mt-sde (CFLAGS_FOR_TARGET): Add -mno-gpopt.
[gcc.git] / gcc / config / mips / mips.c
1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
9
10 This file is part of GCC.
11
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
15 any later version.
16
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include <signal.h>
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
38 #include "recog.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "tree.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "optabs.h"
45 #include "flags.h"
46 #include "reload.h"
47 #include "tm_p.h"
48 #include "ggc.h"
49 #include "gstab.h"
50 #include "hashtab.h"
51 #include "debug.h"
52 #include "target.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
59 #include "bitmap.h"
60 #include "diagnostic.h"
61
62 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
63 #define UNSPEC_ADDRESS_P(X) \
64 (GET_CODE (X) == UNSPEC \
65 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
66 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
67
68 /* Extract the symbol or label from UNSPEC wrapper X. */
69 #define UNSPEC_ADDRESS(X) \
70 XVECEXP (X, 0, 0)
71
72 /* Extract the symbol type from UNSPEC wrapper X. */
73 #define UNSPEC_ADDRESS_TYPE(X) \
74 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
75
76 /* The maximum distance between the top of the stack frame and the
77 value $sp has when we save and restore registers.
78
79 The value for normal-mode code must be a SMALL_OPERAND and must
80 preserve the maximum stack alignment. We therefore use a value
81 of 0x7ff0 in this case.
82
83 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
84 up to 0x7f8 bytes and can usually save or restore all the registers
85 that we need to save or restore. (Note that we can only use these
86 instructions for o32, for which the stack alignment is 8 bytes.)
87
88 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
89 RESTORE are not available. We can then use unextended instructions
90 to save and restore registers, and to allocate and deallocate the top
91 part of the frame. */
92 #define MIPS_MAX_FIRST_STACK_STEP \
93 (!TARGET_MIPS16 ? 0x7ff0 \
94 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
95 : TARGET_64BIT ? 0x100 : 0x400)
96
97 /* True if INSN is a mips.md pattern or asm statement. */
98 #define USEFUL_INSN_P(INSN) \
99 (INSN_P (INSN) \
100 && GET_CODE (PATTERN (INSN)) != USE \
101 && GET_CODE (PATTERN (INSN)) != CLOBBER \
102 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
103 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
104
105 /* If INSN is a delayed branch sequence, return the first instruction
106 in the sequence, otherwise return INSN itself. */
107 #define SEQ_BEGIN(INSN) \
108 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
109 ? XVECEXP (PATTERN (INSN), 0, 0) \
110 : (INSN))
111
112 /* Likewise for the last instruction in a delayed branch sequence. */
113 #define SEQ_END(INSN) \
114 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
115 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
116 : (INSN))
117
118 /* Execute the following loop body with SUBINSN set to each instruction
119 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
120 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
121 for ((SUBINSN) = SEQ_BEGIN (INSN); \
122 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
123 (SUBINSN) = NEXT_INSN (SUBINSN))
124
125 /* True if bit BIT is set in VALUE. */
126 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
127
128 /* Classifies an address.
129
130 ADDRESS_REG
131 A natural register + offset address. The register satisfies
132 mips_valid_base_register_p and the offset is a const_arith_operand.
133
134 ADDRESS_LO_SUM
135 A LO_SUM rtx. The first operand is a valid base register and
136 the second operand is a symbolic address.
137
138 ADDRESS_CONST_INT
139 A signed 16-bit constant address.
140
141 ADDRESS_SYMBOLIC:
142 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
143 enum mips_address_type {
144 ADDRESS_REG,
145 ADDRESS_LO_SUM,
146 ADDRESS_CONST_INT,
147 ADDRESS_SYMBOLIC
148 };
149
150 /* Classifies the prototype of a builtin function. */
151 enum mips_function_type
152 {
153 MIPS_V2SF_FTYPE_V2SF,
154 MIPS_V2SF_FTYPE_V2SF_V2SF,
155 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
156 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
157 MIPS_V2SF_FTYPE_SF_SF,
158 MIPS_INT_FTYPE_V2SF_V2SF,
159 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
160 MIPS_INT_FTYPE_SF_SF,
161 MIPS_INT_FTYPE_DF_DF,
162 MIPS_SF_FTYPE_V2SF,
163 MIPS_SF_FTYPE_SF,
164 MIPS_SF_FTYPE_SF_SF,
165 MIPS_DF_FTYPE_DF,
166 MIPS_DF_FTYPE_DF_DF,
167
168 /* For MIPS DSP ASE */
169 MIPS_DI_FTYPE_DI_SI,
170 MIPS_DI_FTYPE_DI_SI_SI,
171 MIPS_DI_FTYPE_DI_V2HI_V2HI,
172 MIPS_DI_FTYPE_DI_V4QI_V4QI,
173 MIPS_SI_FTYPE_DI_SI,
174 MIPS_SI_FTYPE_PTR_SI,
175 MIPS_SI_FTYPE_SI,
176 MIPS_SI_FTYPE_SI_SI,
177 MIPS_SI_FTYPE_V2HI,
178 MIPS_SI_FTYPE_V2HI_V2HI,
179 MIPS_SI_FTYPE_V4QI,
180 MIPS_SI_FTYPE_V4QI_V4QI,
181 MIPS_SI_FTYPE_VOID,
182 MIPS_V2HI_FTYPE_SI,
183 MIPS_V2HI_FTYPE_SI_SI,
184 MIPS_V2HI_FTYPE_V2HI,
185 MIPS_V2HI_FTYPE_V2HI_SI,
186 MIPS_V2HI_FTYPE_V2HI_V2HI,
187 MIPS_V2HI_FTYPE_V4QI,
188 MIPS_V2HI_FTYPE_V4QI_V2HI,
189 MIPS_V4QI_FTYPE_SI,
190 MIPS_V4QI_FTYPE_V2HI_V2HI,
191 MIPS_V4QI_FTYPE_V4QI_SI,
192 MIPS_V4QI_FTYPE_V4QI_V4QI,
193 MIPS_VOID_FTYPE_SI_SI,
194 MIPS_VOID_FTYPE_V2HI_V2HI,
195 MIPS_VOID_FTYPE_V4QI_V4QI,
196
197 /* For MIPS DSP REV 2 ASE. */
198 MIPS_V4QI_FTYPE_V4QI,
199 MIPS_SI_FTYPE_SI_SI_SI,
200 MIPS_DI_FTYPE_DI_USI_USI,
201 MIPS_DI_FTYPE_SI_SI,
202 MIPS_DI_FTYPE_USI_USI,
203 MIPS_V2HI_FTYPE_SI_SI_SI,
204
205 /* The last type. */
206 MIPS_MAX_FTYPE_MAX
207 };
208
209 /* Specifies how a builtin function should be converted into rtl. */
210 enum mips_builtin_type
211 {
212 /* The builtin corresponds directly to an .md pattern. The return
213 value is mapped to operand 0 and the arguments are mapped to
214 operands 1 and above. */
215 MIPS_BUILTIN_DIRECT,
216
217 /* The builtin corresponds directly to an .md pattern. There is no return
218 value and the arguments are mapped to operands 0 and above. */
219 MIPS_BUILTIN_DIRECT_NO_TARGET,
220
221 /* The builtin corresponds to a comparison instruction followed by
222 a mips_cond_move_tf_ps pattern. The first two arguments are the
223 values to compare and the second two arguments are the vector
224 operands for the movt.ps or movf.ps instruction (in assembly order). */
225 MIPS_BUILTIN_MOVF,
226 MIPS_BUILTIN_MOVT,
227
228 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
229 of this instruction is the result of the comparison, which has mode
230 CCV2 or CCV4. The function arguments are mapped to operands 1 and
231 above. The function's return value is an SImode boolean that is
232 true under the following conditions:
233
234 MIPS_BUILTIN_CMP_ANY: one of the registers is true
235 MIPS_BUILTIN_CMP_ALL: all of the registers are true
236 MIPS_BUILTIN_CMP_LOWER: the first register is true
237 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
238 MIPS_BUILTIN_CMP_ANY,
239 MIPS_BUILTIN_CMP_ALL,
240 MIPS_BUILTIN_CMP_UPPER,
241 MIPS_BUILTIN_CMP_LOWER,
242
243 /* As above, but the instruction only sets a single $fcc register. */
244 MIPS_BUILTIN_CMP_SINGLE,
245
246 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
247 MIPS_BUILTIN_BPOSGE32
248 };
249
250 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
251 #define MIPS_FP_CONDITIONS(MACRO) \
252 MACRO (f), \
253 MACRO (un), \
254 MACRO (eq), \
255 MACRO (ueq), \
256 MACRO (olt), \
257 MACRO (ult), \
258 MACRO (ole), \
259 MACRO (ule), \
260 MACRO (sf), \
261 MACRO (ngle), \
262 MACRO (seq), \
263 MACRO (ngl), \
264 MACRO (lt), \
265 MACRO (nge), \
266 MACRO (le), \
267 MACRO (ngt)
268
269 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
270 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
271 enum mips_fp_condition {
272 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
273 };
274
275 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
276 #define STRINGIFY(X) #X
277 static const char *const mips_fp_conditions[] = {
278 MIPS_FP_CONDITIONS (STRINGIFY)
279 };
280
281 /* A function to save or store a register. The first argument is the
282 register and the second is the stack slot. */
283 typedef void (*mips_save_restore_fn) (rtx, rtx);
284
285 struct mips16_constant;
286 struct mips_arg_info;
287 struct mips_address_info;
288 struct mips_integer_op;
289 struct mips_sim;
290
291 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
292 static bool mips_classify_address (struct mips_address_info *, rtx,
293 enum machine_mode, int);
294 static bool mips_cannot_force_const_mem (rtx);
295 static bool mips_use_blocks_for_constant_p (enum machine_mode, const_rtx);
296 static int mips_symbol_insns (enum mips_symbol_type, enum machine_mode);
297 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
298 static rtx mips_force_temporary (rtx, rtx);
299 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
300 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
301 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
302 static unsigned int mips_build_lower (struct mips_integer_op *,
303 unsigned HOST_WIDE_INT);
304 static unsigned int mips_build_integer (struct mips_integer_op *,
305 unsigned HOST_WIDE_INT);
306 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
307 static int m16_check_op (rtx, int, int, int);
308 static bool mips_rtx_costs (rtx, int, int, int *);
309 static int mips_address_cost (rtx);
310 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
311 static void mips_load_call_address (rtx, rtx, int);
312 static bool mips_function_ok_for_sibcall (tree, tree);
313 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
314 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
315 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
316 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
317 tree, int, struct mips_arg_info *);
318 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
319 static void mips_set_architecture (const struct mips_cpu_info *);
320 static void mips_set_tune (const struct mips_cpu_info *);
321 static bool mips_handle_option (size_t, const char *, int);
322 static struct machine_function *mips_init_machine_status (void);
323 static void print_operand_reloc (FILE *, rtx, enum mips_symbol_context,
324 const char **);
325 static void mips_file_start (void);
326 static int mips_small_data_pattern_1 (rtx *, void *);
327 static int mips_rewrite_small_data_1 (rtx *, void *);
328 static bool mips_function_has_gp_insn (void);
329 static unsigned int mips_global_pointer (void);
330 static bool mips_save_reg_p (unsigned int);
331 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
332 mips_save_restore_fn);
333 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
334 static void mips_output_cplocal (void);
335 static void mips_emit_loadgp (void);
336 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
337 static void mips_set_frame_expr (rtx);
338 static rtx mips_frame_set (rtx, rtx);
339 static void mips_save_reg (rtx, rtx);
340 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
341 static void mips_restore_reg (rtx, rtx);
342 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
343 HOST_WIDE_INT, tree);
344 static section *mips_select_rtx_section (enum machine_mode, rtx,
345 unsigned HOST_WIDE_INT);
346 static section *mips_function_rodata_section (tree);
347 static bool mips_in_small_data_p (const_tree);
348 static bool mips_use_anchors_for_symbol_p (const_rtx);
349 static int mips_fpr_return_fields (const_tree, tree *);
350 static bool mips_return_in_msb (const_tree);
351 static rtx mips_return_fpr_pair (enum machine_mode mode,
352 enum machine_mode mode1, HOST_WIDE_INT,
353 enum machine_mode mode2, HOST_WIDE_INT);
354 static rtx mips16_gp_pseudo_reg (void);
355 static void mips16_fp_args (FILE *, int, int);
356 static void build_mips16_function_stub (FILE *);
357 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
358 static void dump_constants (struct mips16_constant *, rtx);
359 static int mips16_insn_length (rtx);
360 static int mips16_rewrite_pool_refs (rtx *, void *);
361 static void mips16_lay_out_constants (void);
362 static void mips_sim_reset (struct mips_sim *);
363 static void mips_sim_init (struct mips_sim *, state_t);
364 static void mips_sim_next_cycle (struct mips_sim *);
365 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
366 static int mips_sim_wait_regs_2 (rtx *, void *);
367 static void mips_sim_wait_regs_1 (rtx *, void *);
368 static void mips_sim_wait_regs (struct mips_sim *, rtx);
369 static void mips_sim_wait_units (struct mips_sim *, rtx);
370 static void mips_sim_wait_insn (struct mips_sim *, rtx);
371 static void mips_sim_record_set (rtx, const_rtx, void *);
372 static void mips_sim_issue_insn (struct mips_sim *, rtx);
373 static void mips_sim_issue_nop (struct mips_sim *);
374 static void mips_sim_finish_insn (struct mips_sim *, rtx);
375 static void vr4130_avoid_branch_rt_conflict (rtx);
376 static void vr4130_align_insns (void);
377 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
378 static void mips_avoid_hazards (void);
379 static void mips_reorg (void);
380 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
381 static bool mips_matching_cpu_name_p (const char *, const char *);
382 static const struct mips_cpu_info *mips_parse_cpu (const char *);
383 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
384 static bool mips_return_in_memory (const_tree, const_tree);
385 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
386 static void mips_macc_chains_record (rtx);
387 static void mips_macc_chains_reorder (rtx *, int);
388 static void vr4130_true_reg_dependence_p_1 (rtx, const_rtx, void *);
389 static bool vr4130_true_reg_dependence_p (rtx);
390 static bool vr4130_swap_insns_p (rtx, rtx);
391 static void vr4130_reorder (rtx *, int);
392 static void mips_promote_ready (rtx *, int, int);
393 static void mips_sched_init (FILE *, int, int);
394 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
395 static int mips_variable_issue (FILE *, int, rtx, int);
396 static int mips_adjust_cost (rtx, rtx, rtx, int);
397 static int mips_issue_rate (void);
398 static int mips_multipass_dfa_lookahead (void);
399 static void mips_init_libfuncs (void);
400 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
401 tree, int *, int);
402 static tree mips_build_builtin_va_list (void);
403 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
404 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
405 const_tree, bool);
406 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
407 const_tree, bool);
408 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
409 tree, bool);
410 static bool mips_valid_pointer_mode (enum machine_mode);
411 static bool mips_vector_mode_supported_p (enum machine_mode);
412 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree, unsigned int);
413 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
414 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
415 static void mips_init_builtins (void);
416 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
417 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
418 enum insn_code, enum mips_fp_condition,
419 rtx, tree);
420 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
421 enum insn_code, enum mips_fp_condition,
422 rtx, tree);
423 static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
424 static void mips_encode_section_info (tree, rtx, int);
425 static void mips_extra_live_on_entry (bitmap);
426 static int mips_comp_type_attributes (const_tree, const_tree);
427 static void mips_set_mips16_mode (int);
428 static void mips_set_current_function (tree);
429 static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
430 static bool mips_offset_within_alignment_p (rtx, HOST_WIDE_INT);
431 static void mips_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
432
433 /* Structure to be filled in by compute_frame_size with register
434 save masks, and offsets for the current function. */
435
436 struct mips_frame_info GTY(())
437 {
438 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
439 HOST_WIDE_INT var_size; /* # bytes that variables take up */
440 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
441 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
442 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
443 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
444 unsigned int mask; /* mask of saved gp registers */
445 unsigned int fmask; /* mask of saved fp registers */
446 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
447 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
448 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
449 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
450 bool initialized; /* true if frame size already calculated */
451 int num_gp; /* number of gp registers saved */
452 int num_fp; /* number of fp registers saved */
453 };
454
455 struct machine_function GTY(()) {
456 /* Pseudo-reg holding the value of $28 in a mips16 function which
457 refers to GP relative global variables. */
458 rtx mips16_gp_pseudo_rtx;
459
460 /* The number of extra stack bytes taken up by register varargs.
461 This area is allocated by the callee at the very top of the frame. */
462 int varargs_size;
463
464 /* Current frame information, calculated by compute_frame_size. */
465 struct mips_frame_info frame;
466
467 /* The register to use as the global pointer within this function. */
468 unsigned int global_pointer;
469
470 /* True if mips_adjust_insn_length should ignore an instruction's
471 hazard attribute. */
472 bool ignore_hazard_length_p;
473
474 /* True if the whole function is suitable for .set noreorder and
475 .set nomacro. */
476 bool all_noreorder_p;
477
478 /* True if the function is known to have an instruction that needs $gp. */
479 bool has_gp_insn_p;
480
481 /* True if we have emitted an instruction to initialize
482 mips16_gp_pseudo_rtx. */
483 bool initialized_mips16_gp_pseudo_p;
484 };
485
486 /* Information about a single argument. */
487 struct mips_arg_info
488 {
489 /* True if the argument is passed in a floating-point register, or
490 would have been if we hadn't run out of registers. */
491 bool fpr_p;
492
493 /* The number of words passed in registers, rounded up. */
494 unsigned int reg_words;
495
496 /* For EABI, the offset of the first register from GP_ARG_FIRST or
497 FP_ARG_FIRST. For other ABIs, the offset of the first register from
498 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
499 comment for details).
500
501 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
502 on the stack. */
503 unsigned int reg_offset;
504
505 /* The number of words that must be passed on the stack, rounded up. */
506 unsigned int stack_words;
507
508 /* The offset from the start of the stack overflow area of the argument's
509 first stack word. Only meaningful when STACK_WORDS is nonzero. */
510 unsigned int stack_offset;
511 };
512
513
514 /* Information about an address described by mips_address_type.
515
516 ADDRESS_CONST_INT
517 No fields are used.
518
519 ADDRESS_REG
520 REG is the base register and OFFSET is the constant offset.
521
522 ADDRESS_LO_SUM
523 REG is the register that contains the high part of the address,
524 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
525 is the type of OFFSET's symbol.
526
527 ADDRESS_SYMBOLIC
528 SYMBOL_TYPE is the type of symbol being referenced. */
529
530 struct mips_address_info
531 {
532 enum mips_address_type type;
533 rtx reg;
534 rtx offset;
535 enum mips_symbol_type symbol_type;
536 };
537
538
539 /* One stage in a constant building sequence. These sequences have
540 the form:
541
542 A = VALUE[0]
543 A = A CODE[1] VALUE[1]
544 A = A CODE[2] VALUE[2]
545 ...
546
547 where A is an accumulator, each CODE[i] is a binary rtl operation
548 and each VALUE[i] is a constant integer. */
549 struct mips_integer_op {
550 enum rtx_code code;
551 unsigned HOST_WIDE_INT value;
552 };
553
554
555 /* The largest number of operations needed to load an integer constant.
556 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
557 When the lowest bit is clear, we can try, but reject a sequence with
558 an extra SLL at the end. */
559 #define MIPS_MAX_INTEGER_OPS 7
560
561 /* Information about a MIPS16e SAVE or RESTORE instruction. */
562 struct mips16e_save_restore_info {
563 /* The number of argument registers saved by a SAVE instruction.
564 0 for RESTORE instructions. */
565 unsigned int nargs;
566
567 /* Bit X is set if the instruction saves or restores GPR X. */
568 unsigned int mask;
569
570 /* The total number of bytes to allocate. */
571 HOST_WIDE_INT size;
572 };
573
574 /* Global variables for machine-dependent things. */
575
576 /* Threshold for data being put into the small data/bss area, instead
577 of the normal data area. */
578 int mips_section_threshold = -1;
579
580 /* Count the number of .file directives, so that .loc is up to date. */
581 int num_source_filenames = 0;
582
583 /* Count the number of sdb related labels are generated (to find block
584 start and end boundaries). */
585 int sdb_label_count = 0;
586
587 /* Next label # for each statement for Silicon Graphics IRIS systems. */
588 int sym_lineno = 0;
589
590 /* Name of the file containing the current function. */
591 const char *current_function_file = "";
592
593 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
594 int set_noreorder;
595 int set_noat;
596 int set_nomacro;
597 int set_volatile;
598
599 /* The next branch instruction is a branch likely, not branch normal. */
600 int mips_branch_likely;
601
602 /* The operands passed to the last cmpMM expander. */
603 rtx cmp_operands[2];
604
605 /* The target cpu for code generation. */
606 enum processor_type mips_arch;
607 const struct mips_cpu_info *mips_arch_info;
608
609 /* The target cpu for optimization and scheduling. */
610 enum processor_type mips_tune;
611 const struct mips_cpu_info *mips_tune_info;
612
613 /* Which instruction set architecture to use. */
614 int mips_isa;
615
616 /* Which ABI to use. */
617 int mips_abi = MIPS_ABI_DEFAULT;
618
619 /* Cost information to use. */
620 const struct mips_rtx_cost_data *mips_cost;
621
622 /* Remember the ambient target flags, excluding mips16. */
623 static int mips_base_target_flags;
624 /* The mips16 command-line target flags only. */
625 static bool mips_base_mips16;
626 /* Similar copies of option settings. */
627 static int mips_base_schedule_insns; /* flag_schedule_insns */
628 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
629 static int mips_base_align_loops; /* align_loops */
630 static int mips_base_align_jumps; /* align_jumps */
631 static int mips_base_align_functions; /* align_functions */
632 static GTY(()) int mips16_flipper;
633
634 /* The -mtext-loads setting. */
635 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
636
637 /* The architecture selected by -mipsN. */
638 static const struct mips_cpu_info *mips_isa_info;
639
640 /* If TRUE, we split addresses into their high and low parts in the RTL. */
641 int mips_split_addresses;
642
643 /* Mode used for saving/restoring general purpose registers. */
644 static enum machine_mode gpr_mode;
645
646 /* Array giving truth value on whether or not a given hard register
647 can support a given mode. */
648 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
649
650 /* List of all MIPS punctuation characters used by print_operand. */
651 char mips_print_operand_punct[256];
652
653 /* Map GCC register number to debugger register number. */
654 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
655 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
656
657 /* A copy of the original flag_delayed_branch: see override_options. */
658 static int mips_flag_delayed_branch;
659
660 static GTY (()) int mips_output_filename_first_time = 1;
661
662 /* mips_split_p[X] is true if symbols of type X can be split by
663 mips_split_symbol(). */
664 bool mips_split_p[NUM_SYMBOL_TYPES];
665
666 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
667 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
668 if they are matched by a special .md file pattern. */
669 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
670
671 /* Likewise for HIGHs. */
672 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
673
674 /* Map hard register number to register class */
675 const enum reg_class mips_regno_to_class[] =
676 {
677 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
678 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
679 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
680 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
681 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
682 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
683 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
684 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
685 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
686 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
687 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
688 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
689 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
690 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
691 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
692 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
693 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
694 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
695 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
696 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
697 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
698 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
699 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
700 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
701 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
702 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
703 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
704 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
705 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
706 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
707 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
708 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
709 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
710 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
711 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
712 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
713 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
714 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
715 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
716 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
717 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
718 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
719 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
720 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
721 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
722 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
723 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
724 };
725
726 /* Table of machine dependent attributes. */
727 const struct attribute_spec mips_attribute_table[] =
728 {
729 { "long_call", 0, 0, false, true, true, NULL },
730 { "far", 0, 0, false, true, true, NULL },
731 { "near", 0, 0, false, true, true, NULL },
732 /* Switch MIPS16 ASE on and off per-function. */
733 { "mips16", 0, 0, false, true, true, NULL },
734 { "nomips16", 0, 0, false, true, true, NULL },
735 { NULL, 0, 0, false, false, false, NULL }
736 };
737 \f
738 /* A table describing all the processors gcc knows about. Names are
739 matched in the order listed. The first mention of an ISA level is
740 taken as the canonical name for that ISA.
741
742 To ease comparison, please keep this table in the same order as
743 gas's mips_cpu_info_table[]. Please also make sure that
744 MIPS_ISA_LEVEL_SPEC handles all -march options correctly. */
745 const struct mips_cpu_info mips_cpu_info_table[] = {
746 /* Entries for generic ISAs */
747 { "mips1", PROCESSOR_R3000, 1 },
748 { "mips2", PROCESSOR_R6000, 2 },
749 { "mips3", PROCESSOR_R4000, 3 },
750 { "mips4", PROCESSOR_R8000, 4 },
751 { "mips32", PROCESSOR_4KC, 32 },
752 { "mips32r2", PROCESSOR_M4K, 33 },
753 { "mips64", PROCESSOR_5KC, 64 },
754
755 /* MIPS I */
756 { "r3000", PROCESSOR_R3000, 1 },
757 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
758 { "r3900", PROCESSOR_R3900, 1 },
759
760 /* MIPS II */
761 { "r6000", PROCESSOR_R6000, 2 },
762
763 /* MIPS III */
764 { "r4000", PROCESSOR_R4000, 3 },
765 { "vr4100", PROCESSOR_R4100, 3 },
766 { "vr4111", PROCESSOR_R4111, 3 },
767 { "vr4120", PROCESSOR_R4120, 3 },
768 { "vr4130", PROCESSOR_R4130, 3 },
769 { "vr4300", PROCESSOR_R4300, 3 },
770 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
771 { "r4600", PROCESSOR_R4600, 3 },
772 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
773 { "r4650", PROCESSOR_R4650, 3 },
774
775 /* MIPS IV */
776 { "r8000", PROCESSOR_R8000, 4 },
777 { "vr5000", PROCESSOR_R5000, 4 },
778 { "vr5400", PROCESSOR_R5400, 4 },
779 { "vr5500", PROCESSOR_R5500, 4 },
780 { "rm7000", PROCESSOR_R7000, 4 },
781 { "rm9000", PROCESSOR_R9000, 4 },
782
783 /* MIPS32 */
784 { "4kc", PROCESSOR_4KC, 32 },
785 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
786 { "4kp", PROCESSOR_4KP, 32 },
787 { "4ksc", PROCESSOR_4KC, 32 },
788
789 /* MIPS32 Release 2 */
790 { "m4k", PROCESSOR_M4K, 33 },
791 { "4kec", PROCESSOR_4KC, 33 },
792 { "4kem", PROCESSOR_4KC, 33 },
793 { "4kep", PROCESSOR_4KP, 33 },
794 { "4ksd", PROCESSOR_4KC, 33 },
795
796 { "24kc", PROCESSOR_24KC, 33 },
797 { "24kf2_1", PROCESSOR_24KF2_1, 33 },
798 { "24kf", PROCESSOR_24KF2_1, 33 },
799 { "24kf1_1", PROCESSOR_24KF1_1, 33 },
800 { "24kfx", PROCESSOR_24KF1_1, 33 },
801 { "24kx", PROCESSOR_24KF1_1, 33 },
802
803 { "24kec", PROCESSOR_24KC, 33 }, /* 24K with DSP */
804 { "24kef2_1", PROCESSOR_24KF2_1, 33 },
805 { "24kef", PROCESSOR_24KF2_1, 33 },
806 { "24kef1_1", PROCESSOR_24KF1_1, 33 },
807 { "24kefx", PROCESSOR_24KF1_1, 33 },
808 { "24kex", PROCESSOR_24KF1_1, 33 },
809
810 { "34kc", PROCESSOR_24KC, 33 }, /* 34K with MT/DSP */
811 { "34kf2_1", PROCESSOR_24KF2_1, 33 },
812 { "34kf", PROCESSOR_24KF2_1, 33 },
813 { "34kf1_1", PROCESSOR_24KF1_1, 33 },
814 { "34kfx", PROCESSOR_24KF1_1, 33 },
815 { "34kx", PROCESSOR_24KF1_1, 33 },
816
817 { "74kc", PROCESSOR_74KC, 33 }, /* 74K with DSPr2 */
818 { "74kf2_1", PROCESSOR_74KF2_1, 33 },
819 { "74kf", PROCESSOR_74KF2_1, 33 },
820 { "74kf1_1", PROCESSOR_74KF1_1, 33 },
821 { "74kfx", PROCESSOR_74KF1_1, 33 },
822 { "74kx", PROCESSOR_74KF1_1, 33 },
823 { "74kf3_2", PROCESSOR_74KF3_2, 33 },
824
825 /* MIPS64 */
826 { "5kc", PROCESSOR_5KC, 64 },
827 { "5kf", PROCESSOR_5KF, 64 },
828 { "20kc", PROCESSOR_20KC, 64 },
829 { "sb1", PROCESSOR_SB1, 64 },
830 { "sb1a", PROCESSOR_SB1A, 64 },
831 { "sr71000", PROCESSOR_SR71000, 64 },
832
833 /* End marker */
834 { 0, 0, 0 }
835 };
836
837 /* Default costs. If these are used for a processor we should look
838 up the actual costs. */
839 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
840 COSTS_N_INSNS (7), /* fp_mult_sf */ \
841 COSTS_N_INSNS (8), /* fp_mult_df */ \
842 COSTS_N_INSNS (23), /* fp_div_sf */ \
843 COSTS_N_INSNS (36), /* fp_div_df */ \
844 COSTS_N_INSNS (10), /* int_mult_si */ \
845 COSTS_N_INSNS (10), /* int_mult_di */ \
846 COSTS_N_INSNS (69), /* int_div_si */ \
847 COSTS_N_INSNS (69), /* int_div_di */ \
848 2, /* branch_cost */ \
849 4 /* memory_latency */
850
851 /* Need to replace these with the costs of calling the appropriate
852 libgcc routine. */
853 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
854 COSTS_N_INSNS (256), /* fp_mult_sf */ \
855 COSTS_N_INSNS (256), /* fp_mult_df */ \
856 COSTS_N_INSNS (256), /* fp_div_sf */ \
857 COSTS_N_INSNS (256) /* fp_div_df */
858
859 static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
860 {
861 COSTS_N_INSNS (1), /* fp_add */
862 COSTS_N_INSNS (1), /* fp_mult_sf */
863 COSTS_N_INSNS (1), /* fp_mult_df */
864 COSTS_N_INSNS (1), /* fp_div_sf */
865 COSTS_N_INSNS (1), /* fp_div_df */
866 COSTS_N_INSNS (1), /* int_mult_si */
867 COSTS_N_INSNS (1), /* int_mult_di */
868 COSTS_N_INSNS (1), /* int_div_si */
869 COSTS_N_INSNS (1), /* int_div_di */
870 2, /* branch_cost */
871 4 /* memory_latency */
872 };
873
874 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
875 {
876 { /* R3000 */
877 COSTS_N_INSNS (2), /* fp_add */
878 COSTS_N_INSNS (4), /* fp_mult_sf */
879 COSTS_N_INSNS (5), /* fp_mult_df */
880 COSTS_N_INSNS (12), /* fp_div_sf */
881 COSTS_N_INSNS (19), /* fp_div_df */
882 COSTS_N_INSNS (12), /* int_mult_si */
883 COSTS_N_INSNS (12), /* int_mult_di */
884 COSTS_N_INSNS (35), /* int_div_si */
885 COSTS_N_INSNS (35), /* int_div_di */
886 1, /* branch_cost */
887 4 /* memory_latency */
888
889 },
890 { /* 4KC */
891 SOFT_FP_COSTS,
892 COSTS_N_INSNS (6), /* int_mult_si */
893 COSTS_N_INSNS (6), /* int_mult_di */
894 COSTS_N_INSNS (36), /* int_div_si */
895 COSTS_N_INSNS (36), /* int_div_di */
896 1, /* branch_cost */
897 4 /* memory_latency */
898 },
899 { /* 4KP */
900 SOFT_FP_COSTS,
901 COSTS_N_INSNS (36), /* int_mult_si */
902 COSTS_N_INSNS (36), /* int_mult_di */
903 COSTS_N_INSNS (37), /* int_div_si */
904 COSTS_N_INSNS (37), /* int_div_di */
905 1, /* branch_cost */
906 4 /* memory_latency */
907 },
908 { /* 5KC */
909 SOFT_FP_COSTS,
910 COSTS_N_INSNS (4), /* int_mult_si */
911 COSTS_N_INSNS (11), /* int_mult_di */
912 COSTS_N_INSNS (36), /* int_div_si */
913 COSTS_N_INSNS (68), /* int_div_di */
914 1, /* branch_cost */
915 4 /* memory_latency */
916 },
917 { /* 5KF */
918 COSTS_N_INSNS (4), /* fp_add */
919 COSTS_N_INSNS (4), /* fp_mult_sf */
920 COSTS_N_INSNS (5), /* fp_mult_df */
921 COSTS_N_INSNS (17), /* fp_div_sf */
922 COSTS_N_INSNS (32), /* fp_div_df */
923 COSTS_N_INSNS (4), /* int_mult_si */
924 COSTS_N_INSNS (11), /* int_mult_di */
925 COSTS_N_INSNS (36), /* int_div_si */
926 COSTS_N_INSNS (68), /* int_div_di */
927 1, /* branch_cost */
928 4 /* memory_latency */
929 },
930 { /* 20KC */
931 COSTS_N_INSNS (4), /* fp_add */
932 COSTS_N_INSNS (4), /* fp_mult_sf */
933 COSTS_N_INSNS (5), /* fp_mult_df */
934 COSTS_N_INSNS (17), /* fp_div_sf */
935 COSTS_N_INSNS (32), /* fp_div_df */
936 COSTS_N_INSNS (4), /* int_mult_si */
937 COSTS_N_INSNS (7), /* int_mult_di */
938 COSTS_N_INSNS (42), /* int_div_si */
939 COSTS_N_INSNS (72), /* int_div_di */
940 1, /* branch_cost */
941 4 /* memory_latency */
942 },
943 { /* 24KC */
944 SOFT_FP_COSTS,
945 COSTS_N_INSNS (5), /* int_mult_si */
946 COSTS_N_INSNS (5), /* int_mult_di */
947 COSTS_N_INSNS (41), /* int_div_si */
948 COSTS_N_INSNS (41), /* int_div_di */
949 1, /* branch_cost */
950 4 /* memory_latency */
951 },
952 { /* 24KF2_1 */
953 COSTS_N_INSNS (8), /* fp_add */
954 COSTS_N_INSNS (8), /* fp_mult_sf */
955 COSTS_N_INSNS (10), /* fp_mult_df */
956 COSTS_N_INSNS (34), /* fp_div_sf */
957 COSTS_N_INSNS (64), /* fp_div_df */
958 COSTS_N_INSNS (5), /* int_mult_si */
959 COSTS_N_INSNS (5), /* int_mult_di */
960 COSTS_N_INSNS (41), /* int_div_si */
961 COSTS_N_INSNS (41), /* int_div_di */
962 1, /* branch_cost */
963 4 /* memory_latency */
964 },
965 { /* 24KF1_1 */
966 COSTS_N_INSNS (4), /* fp_add */
967 COSTS_N_INSNS (4), /* fp_mult_sf */
968 COSTS_N_INSNS (5), /* fp_mult_df */
969 COSTS_N_INSNS (17), /* fp_div_sf */
970 COSTS_N_INSNS (32), /* fp_div_df */
971 COSTS_N_INSNS (5), /* int_mult_si */
972 COSTS_N_INSNS (5), /* int_mult_di */
973 COSTS_N_INSNS (41), /* int_div_si */
974 COSTS_N_INSNS (41), /* int_div_di */
975 1, /* branch_cost */
976 4 /* memory_latency */
977 },
978 { /* 74KC */
979 SOFT_FP_COSTS,
980 COSTS_N_INSNS (5), /* int_mult_si */
981 COSTS_N_INSNS (5), /* int_mult_di */
982 COSTS_N_INSNS (41), /* int_div_si */
983 COSTS_N_INSNS (41), /* int_div_di */
984 1, /* branch_cost */
985 4 /* memory_latency */
986 },
987 { /* 74KF2_1 */
988 COSTS_N_INSNS (8), /* fp_add */
989 COSTS_N_INSNS (8), /* fp_mult_sf */
990 COSTS_N_INSNS (10), /* fp_mult_df */
991 COSTS_N_INSNS (34), /* fp_div_sf */
992 COSTS_N_INSNS (64), /* fp_div_df */
993 COSTS_N_INSNS (5), /* int_mult_si */
994 COSTS_N_INSNS (5), /* int_mult_di */
995 COSTS_N_INSNS (41), /* int_div_si */
996 COSTS_N_INSNS (41), /* int_div_di */
997 1, /* branch_cost */
998 4 /* memory_latency */
999 },
1000 { /* 74KF1_1 */
1001 COSTS_N_INSNS (4), /* fp_add */
1002 COSTS_N_INSNS (4), /* fp_mult_sf */
1003 COSTS_N_INSNS (5), /* fp_mult_df */
1004 COSTS_N_INSNS (17), /* fp_div_sf */
1005 COSTS_N_INSNS (32), /* fp_div_df */
1006 COSTS_N_INSNS (5), /* int_mult_si */
1007 COSTS_N_INSNS (5), /* int_mult_di */
1008 COSTS_N_INSNS (41), /* int_div_si */
1009 COSTS_N_INSNS (41), /* int_div_di */
1010 1, /* branch_cost */
1011 4 /* memory_latency */
1012 },
1013 { /* 74KF3_2 */
1014 COSTS_N_INSNS (6), /* fp_add */
1015 COSTS_N_INSNS (6), /* fp_mult_sf */
1016 COSTS_N_INSNS (7), /* fp_mult_df */
1017 COSTS_N_INSNS (25), /* fp_div_sf */
1018 COSTS_N_INSNS (48), /* fp_div_df */
1019 COSTS_N_INSNS (5), /* int_mult_si */
1020 COSTS_N_INSNS (5), /* int_mult_di */
1021 COSTS_N_INSNS (41), /* int_div_si */
1022 COSTS_N_INSNS (41), /* int_div_di */
1023 1, /* branch_cost */
1024 4 /* memory_latency */
1025 },
1026 { /* M4k */
1027 DEFAULT_COSTS
1028 },
1029 { /* R3900 */
1030 COSTS_N_INSNS (2), /* fp_add */
1031 COSTS_N_INSNS (4), /* fp_mult_sf */
1032 COSTS_N_INSNS (5), /* fp_mult_df */
1033 COSTS_N_INSNS (12), /* fp_div_sf */
1034 COSTS_N_INSNS (19), /* fp_div_df */
1035 COSTS_N_INSNS (2), /* int_mult_si */
1036 COSTS_N_INSNS (2), /* int_mult_di */
1037 COSTS_N_INSNS (35), /* int_div_si */
1038 COSTS_N_INSNS (35), /* int_div_di */
1039 1, /* branch_cost */
1040 4 /* memory_latency */
1041 },
1042 { /* R6000 */
1043 COSTS_N_INSNS (3), /* fp_add */
1044 COSTS_N_INSNS (5), /* fp_mult_sf */
1045 COSTS_N_INSNS (6), /* fp_mult_df */
1046 COSTS_N_INSNS (15), /* fp_div_sf */
1047 COSTS_N_INSNS (16), /* fp_div_df */
1048 COSTS_N_INSNS (17), /* int_mult_si */
1049 COSTS_N_INSNS (17), /* int_mult_di */
1050 COSTS_N_INSNS (38), /* int_div_si */
1051 COSTS_N_INSNS (38), /* int_div_di */
1052 2, /* branch_cost */
1053 6 /* memory_latency */
1054 },
1055 { /* R4000 */
1056 COSTS_N_INSNS (6), /* fp_add */
1057 COSTS_N_INSNS (7), /* fp_mult_sf */
1058 COSTS_N_INSNS (8), /* fp_mult_df */
1059 COSTS_N_INSNS (23), /* fp_div_sf */
1060 COSTS_N_INSNS (36), /* fp_div_df */
1061 COSTS_N_INSNS (10), /* int_mult_si */
1062 COSTS_N_INSNS (10), /* int_mult_di */
1063 COSTS_N_INSNS (69), /* int_div_si */
1064 COSTS_N_INSNS (69), /* int_div_di */
1065 2, /* branch_cost */
1066 6 /* memory_latency */
1067 },
1068 { /* R4100 */
1069 DEFAULT_COSTS
1070 },
1071 { /* R4111 */
1072 DEFAULT_COSTS
1073 },
1074 { /* R4120 */
1075 DEFAULT_COSTS
1076 },
1077 { /* R4130 */
1078 /* The only costs that appear to be updated here are
1079 integer multiplication. */
1080 SOFT_FP_COSTS,
1081 COSTS_N_INSNS (4), /* int_mult_si */
1082 COSTS_N_INSNS (6), /* int_mult_di */
1083 COSTS_N_INSNS (69), /* int_div_si */
1084 COSTS_N_INSNS (69), /* int_div_di */
1085 1, /* branch_cost */
1086 4 /* memory_latency */
1087 },
1088 { /* R4300 */
1089 DEFAULT_COSTS
1090 },
1091 { /* R4600 */
1092 DEFAULT_COSTS
1093 },
1094 { /* R4650 */
1095 DEFAULT_COSTS
1096 },
1097 { /* R5000 */
1098 COSTS_N_INSNS (6), /* fp_add */
1099 COSTS_N_INSNS (4), /* fp_mult_sf */
1100 COSTS_N_INSNS (5), /* fp_mult_df */
1101 COSTS_N_INSNS (23), /* fp_div_sf */
1102 COSTS_N_INSNS (36), /* fp_div_df */
1103 COSTS_N_INSNS (5), /* int_mult_si */
1104 COSTS_N_INSNS (5), /* int_mult_di */
1105 COSTS_N_INSNS (36), /* int_div_si */
1106 COSTS_N_INSNS (36), /* int_div_di */
1107 1, /* branch_cost */
1108 4 /* memory_latency */
1109 },
1110 { /* R5400 */
1111 COSTS_N_INSNS (6), /* fp_add */
1112 COSTS_N_INSNS (5), /* fp_mult_sf */
1113 COSTS_N_INSNS (6), /* fp_mult_df */
1114 COSTS_N_INSNS (30), /* fp_div_sf */
1115 COSTS_N_INSNS (59), /* fp_div_df */
1116 COSTS_N_INSNS (3), /* int_mult_si */
1117 COSTS_N_INSNS (4), /* int_mult_di */
1118 COSTS_N_INSNS (42), /* int_div_si */
1119 COSTS_N_INSNS (74), /* int_div_di */
1120 1, /* branch_cost */
1121 4 /* memory_latency */
1122 },
1123 { /* R5500 */
1124 COSTS_N_INSNS (6), /* fp_add */
1125 COSTS_N_INSNS (5), /* fp_mult_sf */
1126 COSTS_N_INSNS (6), /* fp_mult_df */
1127 COSTS_N_INSNS (30), /* fp_div_sf */
1128 COSTS_N_INSNS (59), /* fp_div_df */
1129 COSTS_N_INSNS (5), /* int_mult_si */
1130 COSTS_N_INSNS (9), /* int_mult_di */
1131 COSTS_N_INSNS (42), /* int_div_si */
1132 COSTS_N_INSNS (74), /* int_div_di */
1133 1, /* branch_cost */
1134 4 /* memory_latency */
1135 },
1136 { /* R7000 */
1137 /* The only costs that are changed here are
1138 integer multiplication. */
1139 COSTS_N_INSNS (6), /* fp_add */
1140 COSTS_N_INSNS (7), /* fp_mult_sf */
1141 COSTS_N_INSNS (8), /* fp_mult_df */
1142 COSTS_N_INSNS (23), /* fp_div_sf */
1143 COSTS_N_INSNS (36), /* fp_div_df */
1144 COSTS_N_INSNS (5), /* int_mult_si */
1145 COSTS_N_INSNS (9), /* int_mult_di */
1146 COSTS_N_INSNS (69), /* int_div_si */
1147 COSTS_N_INSNS (69), /* int_div_di */
1148 1, /* branch_cost */
1149 4 /* memory_latency */
1150 },
1151 { /* R8000 */
1152 DEFAULT_COSTS
1153 },
1154 { /* R9000 */
1155 /* The only costs that are changed here are
1156 integer multiplication. */
1157 COSTS_N_INSNS (6), /* fp_add */
1158 COSTS_N_INSNS (7), /* fp_mult_sf */
1159 COSTS_N_INSNS (8), /* fp_mult_df */
1160 COSTS_N_INSNS (23), /* fp_div_sf */
1161 COSTS_N_INSNS (36), /* fp_div_df */
1162 COSTS_N_INSNS (3), /* int_mult_si */
1163 COSTS_N_INSNS (8), /* int_mult_di */
1164 COSTS_N_INSNS (69), /* int_div_si */
1165 COSTS_N_INSNS (69), /* int_div_di */
1166 1, /* branch_cost */
1167 4 /* memory_latency */
1168 },
1169 { /* SB1 */
1170 /* These costs are the same as the SB-1A below. */
1171 COSTS_N_INSNS (4), /* fp_add */
1172 COSTS_N_INSNS (4), /* fp_mult_sf */
1173 COSTS_N_INSNS (4), /* fp_mult_df */
1174 COSTS_N_INSNS (24), /* fp_div_sf */
1175 COSTS_N_INSNS (32), /* fp_div_df */
1176 COSTS_N_INSNS (3), /* int_mult_si */
1177 COSTS_N_INSNS (4), /* int_mult_di */
1178 COSTS_N_INSNS (36), /* int_div_si */
1179 COSTS_N_INSNS (68), /* int_div_di */
1180 1, /* branch_cost */
1181 4 /* memory_latency */
1182 },
1183 { /* SB1-A */
1184 /* These costs are the same as the SB-1 above. */
1185 COSTS_N_INSNS (4), /* fp_add */
1186 COSTS_N_INSNS (4), /* fp_mult_sf */
1187 COSTS_N_INSNS (4), /* fp_mult_df */
1188 COSTS_N_INSNS (24), /* fp_div_sf */
1189 COSTS_N_INSNS (32), /* fp_div_df */
1190 COSTS_N_INSNS (3), /* int_mult_si */
1191 COSTS_N_INSNS (4), /* int_mult_di */
1192 COSTS_N_INSNS (36), /* int_div_si */
1193 COSTS_N_INSNS (68), /* int_div_di */
1194 1, /* branch_cost */
1195 4 /* memory_latency */
1196 },
1197 { /* SR71000 */
1198 DEFAULT_COSTS
1199 },
1200 };
1201
1202 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
1203 mips16e_s2_s8_regs[X], it must also save the registers in indexes
1204 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
1205 static const unsigned char mips16e_s2_s8_regs[] = {
1206 30, 23, 22, 21, 20, 19, 18
1207 };
1208 static const unsigned char mips16e_a0_a3_regs[] = {
1209 4, 5, 6, 7
1210 };
1211
1212 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
1213 ordered from the uppermost in memory to the lowest in memory. */
1214 static const unsigned char mips16e_save_restore_regs[] = {
1215 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
1216 };
1217 \f
1218 /* Nonzero if -march should decide the default value of
1219 MASK_SOFT_FLOAT_ABI. */
1220 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
1221 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
1222 #endif
1223 \f
1224 /* Initialize the GCC target structure. */
1225 #undef TARGET_ASM_ALIGNED_HI_OP
1226 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1227 #undef TARGET_ASM_ALIGNED_SI_OP
1228 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1229 #undef TARGET_ASM_ALIGNED_DI_OP
1230 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1231
1232 #undef TARGET_ASM_FUNCTION_PROLOGUE
1233 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1234 #undef TARGET_ASM_FUNCTION_EPILOGUE
1235 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1236 #undef TARGET_ASM_SELECT_RTX_SECTION
1237 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1238 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1239 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1240
1241 #undef TARGET_SCHED_INIT
1242 #define TARGET_SCHED_INIT mips_sched_init
1243 #undef TARGET_SCHED_REORDER
1244 #define TARGET_SCHED_REORDER mips_sched_reorder
1245 #undef TARGET_SCHED_REORDER2
1246 #define TARGET_SCHED_REORDER2 mips_sched_reorder
1247 #undef TARGET_SCHED_VARIABLE_ISSUE
1248 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1249 #undef TARGET_SCHED_ADJUST_COST
1250 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1251 #undef TARGET_SCHED_ISSUE_RATE
1252 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1253 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1254 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1255 mips_multipass_dfa_lookahead
1256
1257 #undef TARGET_DEFAULT_TARGET_FLAGS
1258 #define TARGET_DEFAULT_TARGET_FLAGS \
1259 (TARGET_DEFAULT \
1260 | TARGET_CPU_DEFAULT \
1261 | TARGET_ENDIAN_DEFAULT \
1262 | TARGET_FP_EXCEPTIONS_DEFAULT \
1263 | MASK_CHECK_ZERO_DIV \
1264 | MASK_FUSED_MADD)
1265 #undef TARGET_HANDLE_OPTION
1266 #define TARGET_HANDLE_OPTION mips_handle_option
1267
1268 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1269 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1270
1271 #undef TARGET_SET_CURRENT_FUNCTION
1272 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
1273
1274 #undef TARGET_VALID_POINTER_MODE
1275 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1276 #undef TARGET_RTX_COSTS
1277 #define TARGET_RTX_COSTS mips_rtx_costs
1278 #undef TARGET_ADDRESS_COST
1279 #define TARGET_ADDRESS_COST mips_address_cost
1280
1281 #undef TARGET_IN_SMALL_DATA_P
1282 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1283
1284 #undef TARGET_MACHINE_DEPENDENT_REORG
1285 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1286
1287 #undef TARGET_ASM_FILE_START
1288 #define TARGET_ASM_FILE_START mips_file_start
1289 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1290 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1291
1292 #undef TARGET_INIT_LIBFUNCS
1293 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1294
1295 #undef TARGET_BUILD_BUILTIN_VA_LIST
1296 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1297 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1298 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1299
1300 #undef TARGET_PROMOTE_FUNCTION_ARGS
1301 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
1302 #undef TARGET_PROMOTE_FUNCTION_RETURN
1303 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
1304 #undef TARGET_PROMOTE_PROTOTYPES
1305 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
1306
1307 #undef TARGET_RETURN_IN_MEMORY
1308 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1309 #undef TARGET_RETURN_IN_MSB
1310 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1311
1312 #undef TARGET_ASM_OUTPUT_MI_THUNK
1313 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1314 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1315 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1316
1317 #undef TARGET_SETUP_INCOMING_VARARGS
1318 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1319 #undef TARGET_STRICT_ARGUMENT_NAMING
1320 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1321 #undef TARGET_MUST_PASS_IN_STACK
1322 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1323 #undef TARGET_PASS_BY_REFERENCE
1324 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1325 #undef TARGET_CALLEE_COPIES
1326 #define TARGET_CALLEE_COPIES mips_callee_copies
1327 #undef TARGET_ARG_PARTIAL_BYTES
1328 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1329
1330 #undef TARGET_MODE_REP_EXTENDED
1331 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1332
1333 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1334 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1335
1336 #undef TARGET_INIT_BUILTINS
1337 #define TARGET_INIT_BUILTINS mips_init_builtins
1338 #undef TARGET_EXPAND_BUILTIN
1339 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1340
1341 #undef TARGET_HAVE_TLS
1342 #define TARGET_HAVE_TLS HAVE_AS_TLS
1343
1344 #undef TARGET_CANNOT_FORCE_CONST_MEM
1345 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1346
1347 #undef TARGET_ENCODE_SECTION_INFO
1348 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1349
1350 #undef TARGET_ATTRIBUTE_TABLE
1351 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1352
1353 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1354 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1355
1356 #undef TARGET_MIN_ANCHOR_OFFSET
1357 #define TARGET_MIN_ANCHOR_OFFSET -32768
1358 #undef TARGET_MAX_ANCHOR_OFFSET
1359 #define TARGET_MAX_ANCHOR_OFFSET 32767
1360 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1361 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1362 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1363 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1364
1365 #undef TARGET_COMP_TYPE_ATTRIBUTES
1366 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
1367
1368 #ifdef HAVE_AS_DTPRELWORD
1369 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1370 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
1371 #endif
1372
1373 struct gcc_target targetm = TARGET_INITIALIZER;
1374
1375
1376 /* Predicates to test for presence of "near" and "far"/"long_call"
1377 attributes on the given TYPE. */
1378
1379 static bool
1380 mips_near_type_p (const_tree type)
1381 {
1382 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1383 }
1384
1385 static bool
1386 mips_far_type_p (const_tree type)
1387 {
1388 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1389 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1390 }
1391
1392 /* Similar predicates for "mips16"/"nomips16" attributes. */
1393
1394 static bool
1395 mips_mips16_type_p (const_tree type)
1396 {
1397 return lookup_attribute ("mips16", TYPE_ATTRIBUTES (type)) != NULL;
1398 }
1399
1400 static bool
1401 mips_nomips16_type_p (const_tree type)
1402 {
1403 return lookup_attribute ("nomips16", TYPE_ATTRIBUTES (type)) != NULL;
1404 }
1405
1406 /* Return 0 if the attributes for two types are incompatible, 1 if they
1407 are compatible, and 2 if they are nearly compatible (which causes a
1408 warning to be generated). */
1409
1410 static int
1411 mips_comp_type_attributes (const_tree type1, const_tree type2)
1412 {
1413 /* Check for mismatch of non-default calling convention. */
1414 if (TREE_CODE (type1) != FUNCTION_TYPE)
1415 return 1;
1416
1417 /* Disallow mixed near/far attributes. */
1418 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1419 return 0;
1420 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1421 return 0;
1422
1423 /* Mips16/nomips16 attributes must match exactly. */
1424 if (mips_nomips16_type_p (type1) != mips_nomips16_type_p (type2)
1425 || mips_mips16_type_p (type1) != mips_mips16_type_p (type2))
1426 return 0;
1427
1428 return 1;
1429 }
1430 \f
1431 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1432 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1433
1434 static void
1435 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1436 {
1437 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1438 {
1439 *base_ptr = XEXP (x, 0);
1440 *offset_ptr = INTVAL (XEXP (x, 1));
1441 }
1442 else
1443 {
1444 *base_ptr = x;
1445 *offset_ptr = 0;
1446 }
1447 }
1448 \f
1449 /* Return true if SYMBOL_REF X is associated with a global symbol
1450 (in the STB_GLOBAL sense). */
1451
1452 static bool
1453 mips_global_symbol_p (const_rtx x)
1454 {
1455 const_tree const decl = SYMBOL_REF_DECL (x);
1456
1457 if (!decl)
1458 return !SYMBOL_REF_LOCAL_P (x);
1459
1460 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1461 or weak symbols. Relocations in the object file will be against
1462 the target symbol, so it's that symbol's binding that matters here. */
1463 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1464 }
1465
1466 /* Return true if SYMBOL_REF X binds locally. */
1467
1468 static bool
1469 mips_symbol_binds_local_p (const_rtx x)
1470 {
1471 return (SYMBOL_REF_DECL (x)
1472 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1473 : SYMBOL_REF_LOCAL_P (x));
1474 }
1475
1476 /* Return true if rtx constants of mode MODE should be put into a small
1477 data section. */
1478
1479 static bool
1480 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1481 {
1482 return (!TARGET_EMBEDDED_DATA
1483 && TARGET_LOCAL_SDATA
1484 && GET_MODE_SIZE (mode) <= mips_section_threshold);
1485 }
1486
1487 /* Return the method that should be used to access SYMBOL_REF or
1488 LABEL_REF X in context CONTEXT. */
1489
1490 static enum mips_symbol_type
1491 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1492 {
1493 if (TARGET_RTP_PIC)
1494 return SYMBOL_GOT_DISP;
1495
1496 if (GET_CODE (x) == LABEL_REF)
1497 {
1498 /* LABEL_REFs are used for jump tables as well as text labels.
1499 Only return SYMBOL_PC_RELATIVE if we know the label is in
1500 the text section. */
1501 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1502 return SYMBOL_PC_RELATIVE;
1503 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1504 return SYMBOL_GOT_PAGE_OFST;
1505 return SYMBOL_ABSOLUTE;
1506 }
1507
1508 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1509
1510 if (SYMBOL_REF_TLS_MODEL (x))
1511 return SYMBOL_TLS;
1512
1513 if (CONSTANT_POOL_ADDRESS_P (x))
1514 {
1515 if (TARGET_MIPS16_TEXT_LOADS)
1516 return SYMBOL_PC_RELATIVE;
1517
1518 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1519 return SYMBOL_PC_RELATIVE;
1520
1521 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1522 return SYMBOL_GP_RELATIVE;
1523 }
1524
1525 /* Do not use small-data accesses for weak symbols; they may end up
1526 being zero. */
1527 if (TARGET_GPOPT
1528 && SYMBOL_REF_SMALL_P (x)
1529 && !SYMBOL_REF_WEAK (x))
1530 return SYMBOL_GP_RELATIVE;
1531
1532 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1533 is in effect. */
1534 if (TARGET_ABICALLS
1535 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1536 {
1537 /* There are three cases to consider:
1538
1539 - o32 PIC (either with or without explicit relocs)
1540 - n32/n64 PIC without explicit relocs
1541 - n32/n64 PIC with explicit relocs
1542
1543 In the first case, both local and global accesses will use an
1544 R_MIPS_GOT16 relocation. We must correctly predict which of
1545 the two semantics (local or global) the assembler and linker
1546 will apply. The choice depends on the symbol's binding rather
1547 than its visibility.
1548
1549 In the second case, the assembler will not use R_MIPS_GOT16
1550 relocations, but it chooses between local and global accesses
1551 in the same way as for o32 PIC.
1552
1553 In the third case we have more freedom since both forms of
1554 access will work for any kind of symbol. However, there seems
1555 little point in doing things differently. */
1556 if (mips_global_symbol_p (x))
1557 return SYMBOL_GOT_DISP;
1558
1559 return SYMBOL_GOT_PAGE_OFST;
1560 }
1561
1562 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1563 return SYMBOL_FORCE_TO_MEM;
1564 return SYMBOL_ABSOLUTE;
1565 }
1566
1567 /* Classify symbolic expression X, given that it appears in context
1568 CONTEXT. */
1569
1570 static enum mips_symbol_type
1571 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1572 {
1573 rtx offset;
1574
1575 split_const (x, &x, &offset);
1576 if (UNSPEC_ADDRESS_P (x))
1577 return UNSPEC_ADDRESS_TYPE (x);
1578
1579 return mips_classify_symbol (x, context);
1580 }
1581
1582 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1583 is the alignment (in bytes) of SYMBOL_REF X. */
1584
1585 static bool
1586 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1587 {
1588 /* If for some reason we can't get the alignment for the
1589 symbol, initializing this to one means we will only accept
1590 a zero offset. */
1591 HOST_WIDE_INT align = 1;
1592 tree t;
1593
1594 /* Get the alignment of the symbol we're referring to. */
1595 t = SYMBOL_REF_DECL (x);
1596 if (t)
1597 align = DECL_ALIGN_UNIT (t);
1598
1599 return offset >= 0 && offset < align;
1600 }
1601
1602 /* Return true if X is a symbolic constant that can be used in context
1603 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1604
1605 bool
1606 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1607 enum mips_symbol_type *symbol_type)
1608 {
1609 rtx offset;
1610
1611 split_const (x, &x, &offset);
1612 if (UNSPEC_ADDRESS_P (x))
1613 {
1614 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1615 x = UNSPEC_ADDRESS (x);
1616 }
1617 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1618 {
1619 *symbol_type = mips_classify_symbol (x, context);
1620 if (*symbol_type == SYMBOL_TLS)
1621 return false;
1622 }
1623 else
1624 return false;
1625
1626 if (offset == const0_rtx)
1627 return true;
1628
1629 /* Check whether a nonzero offset is valid for the underlying
1630 relocations. */
1631 switch (*symbol_type)
1632 {
1633 case SYMBOL_ABSOLUTE:
1634 case SYMBOL_FORCE_TO_MEM:
1635 case SYMBOL_32_HIGH:
1636 case SYMBOL_64_HIGH:
1637 case SYMBOL_64_MID:
1638 case SYMBOL_64_LOW:
1639 /* If the target has 64-bit pointers and the object file only
1640 supports 32-bit symbols, the values of those symbols will be
1641 sign-extended. In this case we can't allow an arbitrary offset
1642 in case the 32-bit value X + OFFSET has a different sign from X. */
1643 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1644 return offset_within_block_p (x, INTVAL (offset));
1645
1646 /* In other cases the relocations can handle any offset. */
1647 return true;
1648
1649 case SYMBOL_PC_RELATIVE:
1650 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1651 In this case, we no longer have access to the underlying constant,
1652 but the original symbol-based access was known to be valid. */
1653 if (GET_CODE (x) == LABEL_REF)
1654 return true;
1655
1656 /* Fall through. */
1657
1658 case SYMBOL_GP_RELATIVE:
1659 /* Make sure that the offset refers to something within the
1660 same object block. This should guarantee that the final
1661 PC- or GP-relative offset is within the 16-bit limit. */
1662 return offset_within_block_p (x, INTVAL (offset));
1663
1664 case SYMBOL_GOT_PAGE_OFST:
1665 case SYMBOL_GOTOFF_PAGE:
1666 /* If the symbol is global, the GOT entry will contain the symbol's
1667 address, and we will apply a 16-bit offset after loading it.
1668 If the symbol is local, the linker should provide enough local
1669 GOT entries for a 16-bit offset, but larger offsets may lead
1670 to GOT overflow. */
1671 return SMALL_INT (offset);
1672
1673 case SYMBOL_TPREL:
1674 case SYMBOL_DTPREL:
1675 /* There is no carry between the HI and LO REL relocations, so the
1676 offset is only valid if we know it won't lead to such a carry. */
1677 return mips_offset_within_alignment_p (x, INTVAL (offset));
1678
1679 case SYMBOL_GOT_DISP:
1680 case SYMBOL_GOTOFF_DISP:
1681 case SYMBOL_GOTOFF_CALL:
1682 case SYMBOL_GOTOFF_LOADGP:
1683 case SYMBOL_TLSGD:
1684 case SYMBOL_TLSLDM:
1685 case SYMBOL_GOTTPREL:
1686 case SYMBOL_TLS:
1687 case SYMBOL_HALF:
1688 return false;
1689 }
1690 gcc_unreachable ();
1691 }
1692
1693
1694 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1695
1696 int
1697 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1698 {
1699 if (!HARD_REGISTER_NUM_P (regno))
1700 {
1701 if (!strict)
1702 return true;
1703 regno = reg_renumber[regno];
1704 }
1705
1706 /* These fake registers will be eliminated to either the stack or
1707 hard frame pointer, both of which are usually valid base registers.
1708 Reload deals with the cases where the eliminated form isn't valid. */
1709 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1710 return true;
1711
1712 /* In mips16 mode, the stack pointer can only address word and doubleword
1713 values, nothing smaller. There are two problems here:
1714
1715 (a) Instantiating virtual registers can introduce new uses of the
1716 stack pointer. If these virtual registers are valid addresses,
1717 the stack pointer should be too.
1718
1719 (b) Most uses of the stack pointer are not made explicit until
1720 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1721 We don't know until that stage whether we'll be eliminating to the
1722 stack pointer (which needs the restriction) or the hard frame
1723 pointer (which doesn't).
1724
1725 All in all, it seems more consistent to only enforce this restriction
1726 during and after reload. */
1727 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1728 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1729
1730 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1731 }
1732
1733
1734 /* Return true if X is a valid base register for the given mode.
1735 Allow only hard registers if STRICT. */
1736
1737 static bool
1738 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1739 {
1740 if (!strict && GET_CODE (x) == SUBREG)
1741 x = SUBREG_REG (x);
1742
1743 return (REG_P (x)
1744 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1745 }
1746
1747
1748 /* Return true if X is a valid address for machine mode MODE. If it is,
1749 fill in INFO appropriately. STRICT is true if we should only accept
1750 hard base registers. */
1751
1752 static bool
1753 mips_classify_address (struct mips_address_info *info, rtx x,
1754 enum machine_mode mode, int strict)
1755 {
1756 switch (GET_CODE (x))
1757 {
1758 case REG:
1759 case SUBREG:
1760 info->type = ADDRESS_REG;
1761 info->reg = x;
1762 info->offset = const0_rtx;
1763 return mips_valid_base_register_p (info->reg, mode, strict);
1764
1765 case PLUS:
1766 info->type = ADDRESS_REG;
1767 info->reg = XEXP (x, 0);
1768 info->offset = XEXP (x, 1);
1769 return (mips_valid_base_register_p (info->reg, mode, strict)
1770 && const_arith_operand (info->offset, VOIDmode));
1771
1772 case LO_SUM:
1773 info->type = ADDRESS_LO_SUM;
1774 info->reg = XEXP (x, 0);
1775 info->offset = XEXP (x, 1);
1776 /* We have to trust the creator of the LO_SUM to do something vaguely
1777 sane. Target-independent code that creates a LO_SUM should also
1778 create and verify the matching HIGH. Target-independent code that
1779 adds an offset to a LO_SUM must prove that the offset will not
1780 induce a carry. Failure to do either of these things would be
1781 a bug, and we are not required to check for it here. The MIPS
1782 backend itself should only create LO_SUMs for valid symbolic
1783 constants, with the high part being either a HIGH or a copy
1784 of _gp. */
1785 info->symbol_type
1786 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
1787 return (mips_valid_base_register_p (info->reg, mode, strict)
1788 && mips_symbol_insns (info->symbol_type, mode) > 0
1789 && mips_lo_relocs[info->symbol_type] != 0);
1790
1791 case CONST_INT:
1792 /* Small-integer addresses don't occur very often, but they
1793 are legitimate if $0 is a valid base register. */
1794 info->type = ADDRESS_CONST_INT;
1795 return !TARGET_MIPS16 && SMALL_INT (x);
1796
1797 case CONST:
1798 case LABEL_REF:
1799 case SYMBOL_REF:
1800 info->type = ADDRESS_SYMBOLIC;
1801 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
1802 &info->symbol_type)
1803 && mips_symbol_insns (info->symbol_type, mode) > 0
1804 && !mips_split_p[info->symbol_type]);
1805
1806 default:
1807 return false;
1808 }
1809 }
1810
1811 /* Return true if X is a thread-local symbol. */
1812
1813 static bool
1814 mips_tls_operand_p (rtx x)
1815 {
1816 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1817 }
1818
1819 /* Return true if X can not be forced into a constant pool. */
1820
1821 static int
1822 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1823 {
1824 return mips_tls_operand_p (*x);
1825 }
1826
1827 /* Return true if X can not be forced into a constant pool. */
1828
1829 static bool
1830 mips_cannot_force_const_mem (rtx x)
1831 {
1832 rtx base, offset;
1833
1834 if (!TARGET_MIPS16)
1835 {
1836 /* As an optimization, reject constants that mips_legitimize_move
1837 can expand inline.
1838
1839 Suppose we have a multi-instruction sequence that loads constant C
1840 into register R. If R does not get allocated a hard register, and
1841 R is used in an operand that allows both registers and memory
1842 references, reload will consider forcing C into memory and using
1843 one of the instruction's memory alternatives. Returning false
1844 here will force it to use an input reload instead. */
1845 if (GET_CODE (x) == CONST_INT)
1846 return true;
1847
1848 split_const (x, &base, &offset);
1849 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1850 return true;
1851 }
1852
1853 if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1854 return true;
1855
1856 return false;
1857 }
1858
1859 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1860 constants when we're using a per-function constant pool. */
1861
1862 static bool
1863 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1864 const_rtx x ATTRIBUTE_UNUSED)
1865 {
1866 return !TARGET_MIPS16_PCREL_LOADS;
1867 }
1868 \f
1869 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1870 single instruction. We rely on the fact that, in the worst case,
1871 all instructions involved in a MIPS16 address calculation are usually
1872 extended ones. */
1873
1874 static int
1875 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1876 {
1877 switch (type)
1878 {
1879 case SYMBOL_ABSOLUTE:
1880 /* When using 64-bit symbols, we need 5 preparatory instructions,
1881 such as:
1882
1883 lui $at,%highest(symbol)
1884 daddiu $at,$at,%higher(symbol)
1885 dsll $at,$at,16
1886 daddiu $at,$at,%hi(symbol)
1887 dsll $at,$at,16
1888
1889 The final address is then $at + %lo(symbol). With 32-bit
1890 symbols we just need a preparatory lui for normal mode and
1891 a preparatory "li; sll" for MIPS16. */
1892 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1893
1894 case SYMBOL_GP_RELATIVE:
1895 /* Treat GP-relative accesses as taking a single instruction on
1896 MIPS16 too; the copy of $gp can often be shared. */
1897 return 1;
1898
1899 case SYMBOL_PC_RELATIVE:
1900 /* PC-relative constants can be only be used with addiupc,
1901 lwpc and ldpc. */
1902 if (mode == MAX_MACHINE_MODE
1903 || GET_MODE_SIZE (mode) == 4
1904 || GET_MODE_SIZE (mode) == 8)
1905 return 1;
1906
1907 /* The constant must be loaded using addiupc first. */
1908 return 0;
1909
1910 case SYMBOL_FORCE_TO_MEM:
1911 /* The constant must be loaded from the constant pool. */
1912 return 0;
1913
1914 case SYMBOL_GOT_DISP:
1915 /* The constant will have to be loaded from the GOT before it
1916 is used in an address. */
1917 if (mode != MAX_MACHINE_MODE)
1918 return 0;
1919
1920 /* Fall through. */
1921
1922 case SYMBOL_GOT_PAGE_OFST:
1923 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1924 the local/global classification is accurate. See override_options
1925 for details.
1926
1927 The worst cases are:
1928
1929 (1) For local symbols when generating o32 or o64 code. The assembler
1930 will use:
1931
1932 lw $at,%got(symbol)
1933 nop
1934
1935 ...and the final address will be $at + %lo(symbol).
1936
1937 (2) For global symbols when -mxgot. The assembler will use:
1938
1939 lui $at,%got_hi(symbol)
1940 (d)addu $at,$at,$gp
1941
1942 ...and the final address will be $at + %got_lo(symbol). */
1943 return 3;
1944
1945 case SYMBOL_GOTOFF_PAGE:
1946 case SYMBOL_GOTOFF_DISP:
1947 case SYMBOL_GOTOFF_CALL:
1948 case SYMBOL_GOTOFF_LOADGP:
1949 case SYMBOL_32_HIGH:
1950 case SYMBOL_64_HIGH:
1951 case SYMBOL_64_MID:
1952 case SYMBOL_64_LOW:
1953 case SYMBOL_TLSGD:
1954 case SYMBOL_TLSLDM:
1955 case SYMBOL_DTPREL:
1956 case SYMBOL_GOTTPREL:
1957 case SYMBOL_TPREL:
1958 case SYMBOL_HALF:
1959 /* A 16-bit constant formed by a single relocation, or a 32-bit
1960 constant formed from a high 16-bit relocation and a low 16-bit
1961 relocation. Use mips_split_p to determine which. */
1962 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1963
1964 case SYMBOL_TLS:
1965 /* We don't treat a bare TLS symbol as a constant. */
1966 return 0;
1967 }
1968 gcc_unreachable ();
1969 }
1970
1971 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1972 to load symbols of type TYPE into a register. Return 0 if the given
1973 type of symbol cannot be used as an immediate operand.
1974
1975 Otherwise, return the number of instructions needed to load or store
1976 values of mode MODE to or from addresses of type TYPE. Return 0 if
1977 the given type of symbol is not valid in addresses.
1978
1979 In both cases, treat extended MIPS16 instructions as two instructions. */
1980
1981 static int
1982 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1983 {
1984 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
1985 }
1986
1987 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1988
1989 bool
1990 mips_stack_address_p (rtx x, enum machine_mode mode)
1991 {
1992 struct mips_address_info addr;
1993
1994 return (mips_classify_address (&addr, x, mode, false)
1995 && addr.type == ADDRESS_REG
1996 && addr.reg == stack_pointer_rtx);
1997 }
1998
1999 /* Return true if a value at OFFSET bytes from BASE can be accessed
2000 using an unextended mips16 instruction. MODE is the mode of the
2001 value.
2002
2003 Usually the offset in an unextended instruction is a 5-bit field.
2004 The offset is unsigned and shifted left once for HIs, twice
2005 for SIs, and so on. An exception is SImode accesses off the
2006 stack pointer, which have an 8-bit immediate field. */
2007
2008 static bool
2009 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
2010 {
2011 if (TARGET_MIPS16
2012 && GET_CODE (offset) == CONST_INT
2013 && INTVAL (offset) >= 0
2014 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
2015 {
2016 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
2017 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
2018 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
2019 }
2020 return false;
2021 }
2022
2023
2024 /* Return the number of instructions needed to load or store a value
2025 of mode MODE at X. Return 0 if X isn't valid for MODE. Assume that
2026 multiword moves may need to be split into word moves if MIGHT_SPLIT_P,
2027 otherwise assume that a single load or store is enough.
2028
2029 For mips16 code, count extended instructions as two instructions. */
2030
2031 int
2032 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
2033 {
2034 struct mips_address_info addr;
2035 int factor;
2036
2037 /* BLKmode is used for single unaligned loads and stores and should
2038 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2039 meaningless, so we have to single it out as a special case one way
2040 or the other.) */
2041 if (mode != BLKmode && might_split_p)
2042 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2043 else
2044 factor = 1;
2045
2046 if (mips_classify_address (&addr, x, mode, false))
2047 switch (addr.type)
2048 {
2049 case ADDRESS_REG:
2050 if (TARGET_MIPS16
2051 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
2052 return factor * 2;
2053 return factor;
2054
2055 case ADDRESS_LO_SUM:
2056 return (TARGET_MIPS16 ? factor * 2 : factor);
2057
2058 case ADDRESS_CONST_INT:
2059 return factor;
2060
2061 case ADDRESS_SYMBOLIC:
2062 return factor * mips_symbol_insns (addr.symbol_type, mode);
2063 }
2064 return 0;
2065 }
2066
2067
2068 /* Likewise for constant X. */
2069
2070 int
2071 mips_const_insns (rtx x)
2072 {
2073 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2074 enum mips_symbol_type symbol_type;
2075 rtx offset;
2076
2077 switch (GET_CODE (x))
2078 {
2079 case HIGH:
2080 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2081 &symbol_type)
2082 || !mips_split_p[symbol_type])
2083 return 0;
2084
2085 /* This is simply an lui for normal mode. It is an extended
2086 "li" followed by an extended "sll" for MIPS16. */
2087 return TARGET_MIPS16 ? 4 : 1;
2088
2089 case CONST_INT:
2090 if (TARGET_MIPS16)
2091 /* Unsigned 8-bit constants can be loaded using an unextended
2092 LI instruction. Unsigned 16-bit constants can be loaded
2093 using an extended LI. Negative constants must be loaded
2094 using LI and then negated. */
2095 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
2096 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2097 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
2098 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2099 : 0);
2100
2101 return mips_build_integer (codes, INTVAL (x));
2102
2103 case CONST_DOUBLE:
2104 case CONST_VECTOR:
2105 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
2106
2107 case CONST:
2108 if (CONST_GP_P (x))
2109 return 1;
2110
2111 /* See if we can refer to X directly. */
2112 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2113 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2114
2115 /* Otherwise try splitting the constant into a base and offset.
2116 16-bit offsets can be added using an extra addiu. Larger offsets
2117 must be calculated separately and then added to the base. */
2118 split_const (x, &x, &offset);
2119 if (offset != 0)
2120 {
2121 int n = mips_const_insns (x);
2122 if (n != 0)
2123 {
2124 if (SMALL_INT (offset))
2125 return n + 1;
2126 else
2127 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2128 }
2129 }
2130 return 0;
2131
2132 case SYMBOL_REF:
2133 case LABEL_REF:
2134 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2135 MAX_MACHINE_MODE);
2136
2137 default:
2138 return 0;
2139 }
2140 }
2141
2142
2143 /* Return the number of instructions needed to implement INSN,
2144 given that it loads from or stores to MEM. Count extended
2145 mips16 instructions as two instructions. */
2146
2147 int
2148 mips_load_store_insns (rtx mem, rtx insn)
2149 {
2150 enum machine_mode mode;
2151 bool might_split_p;
2152 rtx set;
2153
2154 gcc_assert (MEM_P (mem));
2155 mode = GET_MODE (mem);
2156
2157 /* Try to prove that INSN does not need to be split. */
2158 might_split_p = true;
2159 if (GET_MODE_BITSIZE (mode) == 64)
2160 {
2161 set = single_set (insn);
2162 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2163 might_split_p = false;
2164 }
2165
2166 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2167 }
2168
2169
2170 /* Return the number of instructions needed for an integer division. */
2171
2172 int
2173 mips_idiv_insns (void)
2174 {
2175 int count;
2176
2177 count = 1;
2178 if (TARGET_CHECK_ZERO_DIV)
2179 {
2180 if (GENERATE_DIVIDE_TRAPS)
2181 count++;
2182 else
2183 count += 2;
2184 }
2185
2186 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2187 count++;
2188 return count;
2189 }
2190 \f
2191 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
2192 returns a nonzero value if X is a legitimate address for a memory
2193 operand of the indicated MODE. STRICT is nonzero if this function
2194 is called during reload. */
2195
2196 bool
2197 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
2198 {
2199 struct mips_address_info addr;
2200
2201 return mips_classify_address (&addr, x, mode, strict);
2202 }
2203
2204 /* Emit a move from SRC to DEST. Assume that the move expanders can
2205 handle all moves if !can_create_pseudo_p (). The distinction is
2206 important because, unlike emit_move_insn, the move expanders know
2207 how to force Pmode objects into the constant pool even when the
2208 constant pool address is not itself legitimate. */
2209
2210 rtx
2211 mips_emit_move (rtx dest, rtx src)
2212 {
2213 return (can_create_pseudo_p ()
2214 ? emit_move_insn (dest, src)
2215 : emit_move_insn_1 (dest, src));
2216 }
2217
2218 /* Copy VALUE to a register and return that register. If new psuedos
2219 are allowed, copy it into a new register, otherwise use DEST. */
2220
2221 static rtx
2222 mips_force_temporary (rtx dest, rtx value)
2223 {
2224 if (can_create_pseudo_p ())
2225 return force_reg (Pmode, value);
2226 else
2227 {
2228 mips_emit_move (copy_rtx (dest), value);
2229 return dest;
2230 }
2231 }
2232
2233
2234 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2235 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2236 constant in that context and can be split into a high part and a LO_SUM.
2237 If so, and if LO_SUM_OUT is nonnull, emit the high part and return
2238 the LO_SUM in *LO_SUM_OUT. Leave *LO_SUM_OUT unchanged otherwise.
2239
2240 TEMP is as for mips_force_temporary and is used to load the high
2241 part into a register. */
2242
2243 bool
2244 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *lo_sum_out)
2245 {
2246 enum mips_symbol_context context;
2247 enum mips_symbol_type symbol_type;
2248 rtx high;
2249
2250 context = (mode == MAX_MACHINE_MODE
2251 ? SYMBOL_CONTEXT_LEA
2252 : SYMBOL_CONTEXT_MEM);
2253 if (!mips_symbolic_constant_p (addr, context, &symbol_type)
2254 || mips_symbol_insns (symbol_type, mode) == 0
2255 || !mips_split_p[symbol_type])
2256 return false;
2257
2258 if (lo_sum_out)
2259 {
2260 if (symbol_type == SYMBOL_GP_RELATIVE)
2261 {
2262 if (!can_create_pseudo_p ())
2263 {
2264 emit_insn (gen_load_const_gp (copy_rtx (temp)));
2265 high = temp;
2266 }
2267 else
2268 high = mips16_gp_pseudo_reg ();
2269 }
2270 else
2271 {
2272 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2273 high = mips_force_temporary (temp, high);
2274 }
2275 *lo_sum_out = gen_rtx_LO_SUM (Pmode, high, addr);
2276 }
2277 return true;
2278 }
2279
2280
2281 /* Wrap symbol or label BASE in an unspec address of type SYMBOL_TYPE
2282 and add CONST_INT OFFSET to the result. */
2283
2284 static rtx
2285 mips_unspec_address_offset (rtx base, rtx offset,
2286 enum mips_symbol_type symbol_type)
2287 {
2288 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2289 UNSPEC_ADDRESS_FIRST + symbol_type);
2290 if (offset != const0_rtx)
2291 base = gen_rtx_PLUS (Pmode, base, offset);
2292 return gen_rtx_CONST (Pmode, base);
2293 }
2294
2295 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2296 type SYMBOL_TYPE. */
2297
2298 rtx
2299 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2300 {
2301 rtx base, offset;
2302
2303 split_const (address, &base, &offset);
2304 return mips_unspec_address_offset (base, offset, symbol_type);
2305 }
2306
2307
2308 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2309 high part to BASE and return the result. Just return BASE otherwise.
2310 TEMP is available as a temporary register if needed.
2311
2312 The returned expression can be used as the first operand to a LO_SUM. */
2313
2314 static rtx
2315 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2316 enum mips_symbol_type symbol_type)
2317 {
2318 if (mips_split_p[symbol_type])
2319 {
2320 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2321 addr = mips_force_temporary (temp, addr);
2322 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2323 }
2324 return base;
2325 }
2326
2327
2328 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2329 mips_force_temporary; it is only needed when OFFSET is not a
2330 SMALL_OPERAND. */
2331
2332 static rtx
2333 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2334 {
2335 if (!SMALL_OPERAND (offset))
2336 {
2337 rtx high;
2338 if (TARGET_MIPS16)
2339 {
2340 /* Load the full offset into a register so that we can use
2341 an unextended instruction for the address itself. */
2342 high = GEN_INT (offset);
2343 offset = 0;
2344 }
2345 else
2346 {
2347 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2348 high = GEN_INT (CONST_HIGH_PART (offset));
2349 offset = CONST_LOW_PART (offset);
2350 }
2351 high = mips_force_temporary (temp, high);
2352 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2353 }
2354 return plus_constant (reg, offset);
2355 }
2356
2357 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2358 referencing, and TYPE is the symbol type to use (either global
2359 dynamic or local dynamic). V0 is an RTX for the return value
2360 location. The entire insn sequence is returned. */
2361
2362 static GTY(()) rtx mips_tls_symbol;
2363
2364 static rtx
2365 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2366 {
2367 rtx insn, loc, tga, a0;
2368
2369 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2370
2371 if (!mips_tls_symbol)
2372 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2373
2374 loc = mips_unspec_address (sym, type);
2375
2376 start_sequence ();
2377
2378 emit_insn (gen_rtx_SET (Pmode, a0,
2379 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2380 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2381 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2382 CONST_OR_PURE_CALL_P (insn) = 1;
2383 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2384 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2385 insn = get_insns ();
2386
2387 end_sequence ();
2388
2389 return insn;
2390 }
2391
2392 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2393 return value will be a valid address and move_operand (either a REG
2394 or a LO_SUM). */
2395
2396 static rtx
2397 mips_legitimize_tls_address (rtx loc)
2398 {
2399 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2400 enum tls_model model;
2401
2402 if (TARGET_MIPS16)
2403 {
2404 sorry ("MIPS16 TLS");
2405 return gen_reg_rtx (Pmode);
2406 }
2407
2408 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2409 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2410
2411 model = SYMBOL_REF_TLS_MODEL (loc);
2412 /* Only TARGET_ABICALLS code can have more than one module; other
2413 code must be be static and should not use a GOT. All TLS models
2414 reduce to local exec in this situation. */
2415 if (!TARGET_ABICALLS)
2416 model = TLS_MODEL_LOCAL_EXEC;
2417
2418 switch (model)
2419 {
2420 case TLS_MODEL_GLOBAL_DYNAMIC:
2421 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2422 dest = gen_reg_rtx (Pmode);
2423 emit_libcall_block (insn, dest, v0, loc);
2424 break;
2425
2426 case TLS_MODEL_LOCAL_DYNAMIC:
2427 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2428 tmp1 = gen_reg_rtx (Pmode);
2429
2430 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2431 share the LDM result with other LD model accesses. */
2432 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2433 UNSPEC_TLS_LDM);
2434 emit_libcall_block (insn, tmp1, v0, eqv);
2435
2436 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2437 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2438 mips_unspec_address (loc, SYMBOL_DTPREL));
2439 break;
2440
2441 case TLS_MODEL_INITIAL_EXEC:
2442 tmp1 = gen_reg_rtx (Pmode);
2443 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2444 if (Pmode == DImode)
2445 {
2446 emit_insn (gen_tls_get_tp_di (v1));
2447 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2448 }
2449 else
2450 {
2451 emit_insn (gen_tls_get_tp_si (v1));
2452 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2453 }
2454 dest = gen_reg_rtx (Pmode);
2455 emit_insn (gen_add3_insn (dest, tmp1, v1));
2456 break;
2457
2458 case TLS_MODEL_LOCAL_EXEC:
2459 if (Pmode == DImode)
2460 emit_insn (gen_tls_get_tp_di (v1));
2461 else
2462 emit_insn (gen_tls_get_tp_si (v1));
2463
2464 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2465 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2466 mips_unspec_address (loc, SYMBOL_TPREL));
2467 break;
2468
2469 default:
2470 gcc_unreachable ();
2471 }
2472
2473 return dest;
2474 }
2475
2476 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2477 be legitimized in a way that the generic machinery might not expect,
2478 put the new address in *XLOC and return true. MODE is the mode of
2479 the memory being accessed. */
2480
2481 bool
2482 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2483 {
2484 if (mips_tls_operand_p (*xloc))
2485 {
2486 *xloc = mips_legitimize_tls_address (*xloc);
2487 return true;
2488 }
2489
2490 /* See if the address can split into a high part and a LO_SUM. */
2491 if (mips_split_symbol (NULL, *xloc, mode, xloc))
2492 return true;
2493
2494 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2495 {
2496 /* Handle REG + CONSTANT using mips_add_offset. */
2497 rtx reg;
2498
2499 reg = XEXP (*xloc, 0);
2500 if (!mips_valid_base_register_p (reg, mode, 0))
2501 reg = copy_to_mode_reg (Pmode, reg);
2502 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2503 return true;
2504 }
2505
2506 return false;
2507 }
2508
2509
2510 /* Subroutine of mips_build_integer (with the same interface).
2511 Assume that the final action in the sequence should be a left shift. */
2512
2513 static unsigned int
2514 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2515 {
2516 unsigned int i, shift;
2517
2518 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2519 since signed numbers are easier to load than unsigned ones. */
2520 shift = 0;
2521 while ((value & 1) == 0)
2522 value /= 2, shift++;
2523
2524 i = mips_build_integer (codes, value);
2525 codes[i].code = ASHIFT;
2526 codes[i].value = shift;
2527 return i + 1;
2528 }
2529
2530
2531 /* As for mips_build_shift, but assume that the final action will be
2532 an IOR or PLUS operation. */
2533
2534 static unsigned int
2535 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2536 {
2537 unsigned HOST_WIDE_INT high;
2538 unsigned int i;
2539
2540 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2541 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2542 {
2543 /* The constant is too complex to load with a simple lui/ori pair
2544 so our goal is to clear as many trailing zeros as possible.
2545 In this case, we know bit 16 is set and that the low 16 bits
2546 form a negative number. If we subtract that number from VALUE,
2547 we will clear at least the lowest 17 bits, maybe more. */
2548 i = mips_build_integer (codes, CONST_HIGH_PART (value));
2549 codes[i].code = PLUS;
2550 codes[i].value = CONST_LOW_PART (value);
2551 }
2552 else
2553 {
2554 i = mips_build_integer (codes, high);
2555 codes[i].code = IOR;
2556 codes[i].value = value & 0xffff;
2557 }
2558 return i + 1;
2559 }
2560
2561
2562 /* Fill CODES with a sequence of rtl operations to load VALUE.
2563 Return the number of operations needed. */
2564
2565 static unsigned int
2566 mips_build_integer (struct mips_integer_op *codes,
2567 unsigned HOST_WIDE_INT value)
2568 {
2569 if (SMALL_OPERAND (value)
2570 || SMALL_OPERAND_UNSIGNED (value)
2571 || LUI_OPERAND (value))
2572 {
2573 /* The value can be loaded with a single instruction. */
2574 codes[0].code = UNKNOWN;
2575 codes[0].value = value;
2576 return 1;
2577 }
2578 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2579 {
2580 /* Either the constant is a simple LUI/ORI combination or its
2581 lowest bit is set. We don't want to shift in this case. */
2582 return mips_build_lower (codes, value);
2583 }
2584 else if ((value & 0xffff) == 0)
2585 {
2586 /* The constant will need at least three actions. The lowest
2587 16 bits are clear, so the final action will be a shift. */
2588 return mips_build_shift (codes, value);
2589 }
2590 else
2591 {
2592 /* The final action could be a shift, add or inclusive OR.
2593 Rather than use a complex condition to select the best
2594 approach, try both mips_build_shift and mips_build_lower
2595 and pick the one that gives the shortest sequence.
2596 Note that this case is only used once per constant. */
2597 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2598 unsigned int cost, alt_cost;
2599
2600 cost = mips_build_shift (codes, value);
2601 alt_cost = mips_build_lower (alt_codes, value);
2602 if (alt_cost < cost)
2603 {
2604 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2605 cost = alt_cost;
2606 }
2607 return cost;
2608 }
2609 }
2610
2611
2612 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2613
2614 void
2615 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2616 {
2617 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2618 enum machine_mode mode;
2619 unsigned int i, cost;
2620 rtx x;
2621
2622 mode = GET_MODE (dest);
2623 cost = mips_build_integer (codes, value);
2624
2625 /* Apply each binary operation to X. Invariant: X is a legitimate
2626 source operand for a SET pattern. */
2627 x = GEN_INT (codes[0].value);
2628 for (i = 1; i < cost; i++)
2629 {
2630 if (!can_create_pseudo_p ())
2631 {
2632 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2633 x = temp;
2634 }
2635 else
2636 x = force_reg (mode, x);
2637 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2638 }
2639
2640 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2641 }
2642
2643
2644 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2645 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2646 move_operand. */
2647
2648 static void
2649 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2650 {
2651 rtx base, offset;
2652
2653 /* Split moves of big integers into smaller pieces. */
2654 if (splittable_const_int_operand (src, mode))
2655 {
2656 mips_move_integer (dest, dest, INTVAL (src));
2657 return;
2658 }
2659
2660 /* Split moves of symbolic constants into high/low pairs. */
2661 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2662 {
2663 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2664 return;
2665 }
2666
2667 if (mips_tls_operand_p (src))
2668 {
2669 mips_emit_move (dest, mips_legitimize_tls_address (src));
2670 return;
2671 }
2672
2673 /* If we have (const (plus symbol offset)), and that expression cannot
2674 be forced into memory, load the symbol first and add in the offset.
2675 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2676 forced into memory, as it usually produces better code. */
2677 split_const (src, &base, &offset);
2678 if (offset != const0_rtx
2679 && (targetm.cannot_force_const_mem (src)
2680 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
2681 {
2682 base = mips_force_temporary (dest, base);
2683 mips_emit_move (dest, mips_add_offset (0, base, INTVAL (offset)));
2684 return;
2685 }
2686
2687 src = force_const_mem (mode, src);
2688
2689 /* When using explicit relocs, constant pool references are sometimes
2690 not legitimate addresses. */
2691 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2692 mips_emit_move (dest, src);
2693 }
2694
2695
2696 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2697 sequence that is valid. */
2698
2699 bool
2700 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2701 {
2702 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2703 {
2704 mips_emit_move (dest, force_reg (mode, src));
2705 return true;
2706 }
2707
2708 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2709 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2710 && REG_P (src) && MD_REG_P (REGNO (src))
2711 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2712 {
2713 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2714 if (GET_MODE_SIZE (mode) <= 4)
2715 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2716 gen_rtx_REG (SImode, REGNO (src)),
2717 gen_rtx_REG (SImode, other_regno)));
2718 else
2719 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2720 gen_rtx_REG (DImode, REGNO (src)),
2721 gen_rtx_REG (DImode, other_regno)));
2722 return true;
2723 }
2724
2725 /* We need to deal with constants that would be legitimate
2726 immediate_operands but not legitimate move_operands. */
2727 if (CONSTANT_P (src) && !move_operand (src, mode))
2728 {
2729 mips_legitimize_const_move (mode, dest, src);
2730 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2731 return true;
2732 }
2733 return false;
2734 }
2735 \f
2736 /* We need a lot of little routines to check constant values on the
2737 mips16. These are used to figure out how long the instruction will
2738 be. It would be much better to do this using constraints, but
2739 there aren't nearly enough letters available. */
2740
2741 static int
2742 m16_check_op (rtx op, int low, int high, int mask)
2743 {
2744 return (GET_CODE (op) == CONST_INT
2745 && INTVAL (op) >= low
2746 && INTVAL (op) <= high
2747 && (INTVAL (op) & mask) == 0);
2748 }
2749
2750 int
2751 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2752 {
2753 return m16_check_op (op, 0x1, 0x8, 0);
2754 }
2755
2756 int
2757 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2758 {
2759 return m16_check_op (op, - 0x8, 0x7, 0);
2760 }
2761
2762 int
2763 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2764 {
2765 return m16_check_op (op, - 0x7, 0x8, 0);
2766 }
2767
2768 int
2769 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2770 {
2771 return m16_check_op (op, - 0x10, 0xf, 0);
2772 }
2773
2774 int
2775 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2776 {
2777 return m16_check_op (op, - 0xf, 0x10, 0);
2778 }
2779
2780 int
2781 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2782 {
2783 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2784 }
2785
2786 int
2787 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2788 {
2789 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2790 }
2791
2792 int
2793 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2794 {
2795 return m16_check_op (op, - 0x80, 0x7f, 0);
2796 }
2797
2798 int
2799 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2800 {
2801 return m16_check_op (op, - 0x7f, 0x80, 0);
2802 }
2803
2804 int
2805 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2806 {
2807 return m16_check_op (op, 0x0, 0xff, 0);
2808 }
2809
2810 int
2811 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2812 {
2813 return m16_check_op (op, - 0xff, 0x0, 0);
2814 }
2815
2816 int
2817 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2818 {
2819 return m16_check_op (op, - 0x1, 0xfe, 0);
2820 }
2821
2822 int
2823 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2824 {
2825 return m16_check_op (op, 0x0, 0xff << 2, 3);
2826 }
2827
2828 int
2829 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2830 {
2831 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2832 }
2833
2834 int
2835 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2836 {
2837 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2838 }
2839
2840 int
2841 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2842 {
2843 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2844 }
2845 \f
2846 /* Return true if ADDR matches the pattern for the lwxs load scaled indexed
2847 address instruction. */
2848
2849 static bool
2850 mips_lwxs_address_p (rtx addr)
2851 {
2852 if (ISA_HAS_LWXS
2853 && GET_CODE (addr) == PLUS
2854 && REG_P (XEXP (addr, 1)))
2855 {
2856 rtx offset = XEXP (addr, 0);
2857 if (GET_CODE (offset) == MULT
2858 && REG_P (XEXP (offset, 0))
2859 && GET_CODE (XEXP (offset, 1)) == CONST_INT
2860 && INTVAL (XEXP (offset, 1)) == 4)
2861 return true;
2862 }
2863 return false;
2864 }
2865
2866 static bool
2867 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
2868 {
2869 enum machine_mode mode = GET_MODE (x);
2870 bool float_mode_p = FLOAT_MODE_P (mode);
2871
2872 switch (code)
2873 {
2874 case CONST_INT:
2875 if (TARGET_MIPS16)
2876 {
2877 /* A number between 1 and 8 inclusive is efficient for a shift.
2878 Otherwise, we will need an extended instruction. */
2879 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
2880 || (outer_code) == LSHIFTRT)
2881 {
2882 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
2883 *total = 0;
2884 else
2885 *total = COSTS_N_INSNS (1);
2886 return true;
2887 }
2888
2889 /* We can use cmpi for an xor with an unsigned 16-bit value. */
2890 if ((outer_code) == XOR
2891 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
2892 {
2893 *total = 0;
2894 return true;
2895 }
2896
2897 /* We may be able to use slt or sltu for a comparison with a
2898 signed 16-bit value. (The boundary conditions aren't quite
2899 right, but this is just a heuristic anyhow.) */
2900 if (((outer_code) == LT || (outer_code) == LE
2901 || (outer_code) == GE || (outer_code) == GT
2902 || (outer_code) == LTU || (outer_code) == LEU
2903 || (outer_code) == GEU || (outer_code) == GTU)
2904 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
2905 {
2906 *total = 0;
2907 return true;
2908 }
2909
2910 /* Equality comparisons with 0 are cheap. */
2911 if (((outer_code) == EQ || (outer_code) == NE)
2912 && INTVAL (x) == 0)
2913 {
2914 *total = 0;
2915 return true;
2916 }
2917
2918 /* Constants in the range 0...255 can be loaded with an unextended
2919 instruction. They are therefore as cheap as a register move.
2920
2921 Given the choice between "li R1,0...255" and "move R1,R2"
2922 (where R2 is a known constant), it is usually better to use "li",
2923 since we do not want to unnecessarily extend the lifetime
2924 of R2. */
2925 if (outer_code == SET
2926 && INTVAL (x) >= 0
2927 && INTVAL (x) < 256)
2928 {
2929 *total = 0;
2930 return true;
2931 }
2932 }
2933 else
2934 {
2935 /* These can be used anywhere. */
2936 *total = 0;
2937 return true;
2938 }
2939
2940 /* Otherwise fall through to the handling below because
2941 we'll need to construct the constant. */
2942
2943 case CONST:
2944 case SYMBOL_REF:
2945 case LABEL_REF:
2946 case CONST_DOUBLE:
2947 if (LEGITIMATE_CONSTANT_P (x))
2948 {
2949 *total = COSTS_N_INSNS (1);
2950 return true;
2951 }
2952 else
2953 {
2954 /* The value will need to be fetched from the constant pool. */
2955 *total = CONSTANT_POOL_COST;
2956 return true;
2957 }
2958
2959 case MEM:
2960 {
2961 /* If the address is legitimate, return the number of
2962 instructions it needs. */
2963 rtx addr = XEXP (x, 0);
2964 int n = mips_address_insns (addr, GET_MODE (x), true);
2965 if (n > 0)
2966 {
2967 *total = COSTS_N_INSNS (n + 1);
2968 return true;
2969 }
2970 /* Check for scaled indexed address. */
2971 if (mips_lwxs_address_p (addr))
2972 {
2973 *total = COSTS_N_INSNS (2);
2974 return true;
2975 }
2976 /* Otherwise use the default handling. */
2977 return false;
2978 }
2979
2980 case FFS:
2981 *total = COSTS_N_INSNS (6);
2982 return true;
2983
2984 case NOT:
2985 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
2986 return true;
2987
2988 case AND:
2989 case IOR:
2990 case XOR:
2991 if (mode == DImode && !TARGET_64BIT)
2992 {
2993 *total = COSTS_N_INSNS (2);
2994 return true;
2995 }
2996 return false;
2997
2998 case ASHIFT:
2999 case ASHIFTRT:
3000 case LSHIFTRT:
3001 if (mode == DImode && !TARGET_64BIT)
3002 {
3003 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
3004 ? 4 : 12);
3005 return true;
3006 }
3007 return false;
3008
3009 case ABS:
3010 if (float_mode_p)
3011 *total = COSTS_N_INSNS (1);
3012 else
3013 *total = COSTS_N_INSNS (4);
3014 return true;
3015
3016 case LO_SUM:
3017 *total = COSTS_N_INSNS (1);
3018 return true;
3019
3020 case PLUS:
3021 case MINUS:
3022 if (float_mode_p)
3023 {
3024 *total = mips_cost->fp_add;
3025 return true;
3026 }
3027
3028 else if (mode == DImode && !TARGET_64BIT)
3029 {
3030 *total = COSTS_N_INSNS (4);
3031 return true;
3032 }
3033 return false;
3034
3035 case NEG:
3036 if (mode == DImode && !TARGET_64BIT)
3037 {
3038 *total = COSTS_N_INSNS (4);
3039 return true;
3040 }
3041 return false;
3042
3043 case MULT:
3044 if (mode == SFmode)
3045 *total = mips_cost->fp_mult_sf;
3046
3047 else if (mode == DFmode)
3048 *total = mips_cost->fp_mult_df;
3049
3050 else if (mode == SImode)
3051 *total = mips_cost->int_mult_si;
3052
3053 else
3054 *total = mips_cost->int_mult_di;
3055
3056 return true;
3057
3058 case DIV:
3059 case MOD:
3060 if (float_mode_p)
3061 {
3062 if (mode == SFmode)
3063 *total = mips_cost->fp_div_sf;
3064 else
3065 *total = mips_cost->fp_div_df;
3066
3067 return true;
3068 }
3069 /* Fall through. */
3070
3071 case UDIV:
3072 case UMOD:
3073 if (mode == DImode)
3074 *total = mips_cost->int_div_di;
3075 else
3076 *total = mips_cost->int_div_si;
3077
3078 return true;
3079
3080 case SIGN_EXTEND:
3081 /* A sign extend from SImode to DImode in 64-bit mode is often
3082 zero instructions, because the result can often be used
3083 directly by another instruction; we'll call it one. */
3084 if (TARGET_64BIT && mode == DImode
3085 && GET_MODE (XEXP (x, 0)) == SImode)
3086 *total = COSTS_N_INSNS (1);
3087 else
3088 *total = COSTS_N_INSNS (2);
3089 return true;
3090
3091 case ZERO_EXTEND:
3092 if (TARGET_64BIT && mode == DImode
3093 && GET_MODE (XEXP (x, 0)) == SImode)
3094 *total = COSTS_N_INSNS (2);
3095 else
3096 *total = COSTS_N_INSNS (1);
3097 return true;
3098
3099 case FLOAT:
3100 case UNSIGNED_FLOAT:
3101 case FIX:
3102 case FLOAT_EXTEND:
3103 case FLOAT_TRUNCATE:
3104 case SQRT:
3105 *total = mips_cost->fp_add;
3106 return true;
3107
3108 default:
3109 return false;
3110 }
3111 }
3112
3113 /* Provide the costs of an addressing mode that contains ADDR.
3114 If ADDR is not a valid address, its cost is irrelevant. */
3115
3116 static int
3117 mips_address_cost (rtx addr)
3118 {
3119 return mips_address_insns (addr, SImode, false);
3120 }
3121 \f
3122 /* Return one word of double-word value OP, taking into account the fixed
3123 endianness of certain registers. HIGH_P is true to select the high part,
3124 false to select the low part. */
3125
3126 rtx
3127 mips_subword (rtx op, int high_p)
3128 {
3129 unsigned int byte;
3130 enum machine_mode mode;
3131
3132 mode = GET_MODE (op);
3133 if (mode == VOIDmode)
3134 mode = DImode;
3135
3136 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3137 byte = UNITS_PER_WORD;
3138 else
3139 byte = 0;
3140
3141 if (FP_REG_RTX_P (op))
3142 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
3143
3144 if (MEM_P (op))
3145 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3146
3147 return simplify_gen_subreg (word_mode, op, mode, byte);
3148 }
3149
3150
3151 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3152
3153 bool
3154 mips_split_64bit_move_p (rtx dest, rtx src)
3155 {
3156 if (TARGET_64BIT)
3157 return false;
3158
3159 /* FP->FP moves can be done in a single instruction. */
3160 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3161 return false;
3162
3163 /* Check for floating-point loads and stores. They can be done using
3164 ldc1 and sdc1 on MIPS II and above. */
3165 if (mips_isa > 1)
3166 {
3167 if (FP_REG_RTX_P (dest) && MEM_P (src))
3168 return false;
3169 if (FP_REG_RTX_P (src) && MEM_P (dest))
3170 return false;
3171 }
3172 return true;
3173 }
3174
3175
3176 /* Split a 64-bit move from SRC to DEST assuming that
3177 mips_split_64bit_move_p holds.
3178
3179 Moves into and out of FPRs cause some difficulty here. Such moves
3180 will always be DFmode, since paired FPRs are not allowed to store
3181 DImode values. The most natural representation would be two separate
3182 32-bit moves, such as:
3183
3184 (set (reg:SI $f0) (mem:SI ...))
3185 (set (reg:SI $f1) (mem:SI ...))
3186
3187 However, the second insn is invalid because odd-numbered FPRs are
3188 not allowed to store independent values. Use the patterns load_df_low,
3189 load_df_high and store_df_high instead. */
3190
3191 void
3192 mips_split_64bit_move (rtx dest, rtx src)
3193 {
3194 if (FP_REG_RTX_P (dest))
3195 {
3196 /* Loading an FPR from memory or from GPRs. */
3197 if (ISA_HAS_MXHC1)
3198 {
3199 dest = gen_lowpart (DFmode, dest);
3200 emit_insn (gen_load_df_low (dest, mips_subword (src, 0)));
3201 emit_insn (gen_mthc1 (dest, mips_subword (src, 1),
3202 copy_rtx (dest)));
3203 }
3204 else
3205 {
3206 emit_insn (gen_load_df_low (copy_rtx (dest),
3207 mips_subword (src, 0)));
3208 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
3209 copy_rtx (dest)));
3210 }
3211 }
3212 else if (FP_REG_RTX_P (src))
3213 {
3214 /* Storing an FPR into memory or GPRs. */
3215 if (ISA_HAS_MXHC1)
3216 {
3217 src = gen_lowpart (DFmode, src);
3218 mips_emit_move (mips_subword (dest, 0), mips_subword (src, 0));
3219 emit_insn (gen_mfhc1 (mips_subword (dest, 1), src));
3220 }
3221 else
3222 {
3223 mips_emit_move (mips_subword (dest, 0), mips_subword (src, 0));
3224 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
3225 }
3226 }
3227 else
3228 {
3229 /* The operation can be split into two normal moves. Decide in
3230 which order to do them. */
3231 rtx low_dest;
3232
3233 low_dest = mips_subword (dest, 0);
3234 if (REG_P (low_dest)
3235 && reg_overlap_mentioned_p (low_dest, src))
3236 {
3237 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3238 mips_emit_move (low_dest, mips_subword (src, 0));
3239 }
3240 else
3241 {
3242 mips_emit_move (low_dest, mips_subword (src, 0));
3243 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3244 }
3245 }
3246 }
3247 \f
3248 /* Return the appropriate instructions to move SRC into DEST. Assume
3249 that SRC is operand 1 and DEST is operand 0. */
3250
3251 const char *
3252 mips_output_move (rtx dest, rtx src)
3253 {
3254 enum rtx_code dest_code, src_code;
3255 enum mips_symbol_type symbol_type;
3256 bool dbl_p;
3257
3258 dest_code = GET_CODE (dest);
3259 src_code = GET_CODE (src);
3260 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
3261
3262 if (dbl_p && mips_split_64bit_move_p (dest, src))
3263 return "#";
3264
3265 if ((src_code == REG && GP_REG_P (REGNO (src)))
3266 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
3267 {
3268 if (dest_code == REG)
3269 {
3270 if (GP_REG_P (REGNO (dest)))
3271 return "move\t%0,%z1";
3272
3273 if (MD_REG_P (REGNO (dest)))
3274 return "mt%0\t%z1";
3275
3276 if (DSP_ACC_REG_P (REGNO (dest)))
3277 {
3278 static char retval[] = "mt__\t%z1,%q0";
3279 retval[2] = reg_names[REGNO (dest)][4];
3280 retval[3] = reg_names[REGNO (dest)][5];
3281 return retval;
3282 }
3283
3284 if (FP_REG_P (REGNO (dest)))
3285 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
3286
3287 if (ALL_COP_REG_P (REGNO (dest)))
3288 {
3289 static char retval[] = "dmtc_\t%z1,%0";
3290
3291 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3292 return (dbl_p ? retval : retval + 1);
3293 }
3294 }
3295 if (dest_code == MEM)
3296 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
3297 }
3298 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3299 {
3300 if (src_code == REG)
3301 {
3302 if (DSP_ACC_REG_P (REGNO (src)))
3303 {
3304 static char retval[] = "mf__\t%0,%q1";
3305 retval[2] = reg_names[REGNO (src)][4];
3306 retval[3] = reg_names[REGNO (src)][5];
3307 return retval;
3308 }
3309
3310 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3311 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3312
3313 if (FP_REG_P (REGNO (src)))
3314 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
3315
3316 if (ALL_COP_REG_P (REGNO (src)))
3317 {
3318 static char retval[] = "dmfc_\t%0,%1";
3319
3320 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3321 return (dbl_p ? retval : retval + 1);
3322 }
3323 }
3324
3325 if (src_code == MEM)
3326 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
3327
3328 if (src_code == CONST_INT)
3329 {
3330 /* Don't use the X format, because that will give out of
3331 range numbers for 64-bit hosts and 32-bit targets. */
3332 if (!TARGET_MIPS16)
3333 return "li\t%0,%1\t\t\t# %X1";
3334
3335 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
3336 return "li\t%0,%1";
3337
3338 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
3339 return "#";
3340 }
3341
3342 if (src_code == HIGH)
3343 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
3344
3345 if (CONST_GP_P (src))
3346 return "move\t%0,%1";
3347
3348 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
3349 && mips_lo_relocs[symbol_type] != 0)
3350 {
3351 /* A signed 16-bit constant formed by applying a relocation
3352 operator to a symbolic address. */
3353 gcc_assert (!mips_split_p[symbol_type]);
3354 return "li\t%0,%R1";
3355 }
3356
3357 if (symbolic_operand (src, VOIDmode))
3358 {
3359 gcc_assert (TARGET_MIPS16
3360 ? TARGET_MIPS16_TEXT_LOADS
3361 : !TARGET_EXPLICIT_RELOCS);
3362 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
3363 }
3364 }
3365 if (src_code == REG && FP_REG_P (REGNO (src)))
3366 {
3367 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3368 {
3369 if (GET_MODE (dest) == V2SFmode)
3370 return "mov.ps\t%0,%1";
3371 else
3372 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
3373 }
3374
3375 if (dest_code == MEM)
3376 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
3377 }
3378 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3379 {
3380 if (src_code == MEM)
3381 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3382 }
3383 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3384 {
3385 static char retval[] = "l_c_\t%0,%1";
3386
3387 retval[1] = (dbl_p ? 'd' : 'w');
3388 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3389 return retval;
3390 }
3391 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3392 {
3393 static char retval[] = "s_c_\t%1,%0";
3394
3395 retval[1] = (dbl_p ? 'd' : 'w');
3396 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3397 return retval;
3398 }
3399 gcc_unreachable ();
3400 }
3401 \f
3402 /* Restore $gp from its save slot. Valid only when using o32 or
3403 o64 abicalls. */
3404
3405 void
3406 mips_restore_gp (void)
3407 {
3408 rtx address, slot;
3409
3410 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
3411
3412 address = mips_add_offset (pic_offset_table_rtx,
3413 frame_pointer_needed
3414 ? hard_frame_pointer_rtx
3415 : stack_pointer_rtx,
3416 current_function_outgoing_args_size);
3417 slot = gen_rtx_MEM (Pmode, address);
3418
3419 mips_emit_move (pic_offset_table_rtx, slot);
3420 if (!TARGET_EXPLICIT_RELOCS)
3421 emit_insn (gen_blockage ());
3422 }
3423 \f
3424 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3425
3426 static void
3427 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
3428 {
3429 emit_insn (gen_rtx_SET (VOIDmode, target,
3430 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
3431 }
3432
3433 /* Return true if CMP1 is a suitable second operand for relational
3434 operator CODE. See also the *sCC patterns in mips.md. */
3435
3436 static bool
3437 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3438 {
3439 switch (code)
3440 {
3441 case GT:
3442 case GTU:
3443 return reg_or_0_operand (cmp1, VOIDmode);
3444
3445 case GE:
3446 case GEU:
3447 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3448
3449 case LT:
3450 case LTU:
3451 return arith_operand (cmp1, VOIDmode);
3452
3453 case LE:
3454 return sle_operand (cmp1, VOIDmode);
3455
3456 case LEU:
3457 return sleu_operand (cmp1, VOIDmode);
3458
3459 default:
3460 gcc_unreachable ();
3461 }
3462 }
3463
3464 /* Canonicalize LE or LEU comparisons into LT comparisons when
3465 possible to avoid extra instructions or inverting the
3466 comparison. */
3467
3468 static bool
3469 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3470 enum machine_mode mode)
3471 {
3472 HOST_WIDE_INT original, plus_one;
3473
3474 if (GET_CODE (*cmp1) != CONST_INT)
3475 return false;
3476
3477 original = INTVAL (*cmp1);
3478 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3479
3480 switch (*code)
3481 {
3482 case LE:
3483 if (original < plus_one)
3484 {
3485 *code = LT;
3486 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3487 return true;
3488 }
3489 break;
3490
3491 case LEU:
3492 if (plus_one != 0)
3493 {
3494 *code = LTU;
3495 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3496 return true;
3497 }
3498 break;
3499
3500 default:
3501 return false;
3502 }
3503
3504 return false;
3505
3506 }
3507
3508 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3509 result in TARGET. CMP0 and TARGET are register_operands that have
3510 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3511 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3512
3513 static void
3514 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3515 rtx target, rtx cmp0, rtx cmp1)
3516 {
3517 /* First see if there is a MIPS instruction that can do this operation
3518 with CMP1 in its current form. If not, try to canonicalize the
3519 comparison to LT. If that fails, try doing the same for the
3520 inverse operation. If that also fails, force CMP1 into a register
3521 and try again. */
3522 if (mips_relational_operand_ok_p (code, cmp1))
3523 mips_emit_binary (code, target, cmp0, cmp1);
3524 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3525 mips_emit_binary (code, target, cmp0, cmp1);
3526 else
3527 {
3528 enum rtx_code inv_code = reverse_condition (code);
3529 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3530 {
3531 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3532 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3533 }
3534 else if (invert_ptr == 0)
3535 {
3536 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3537 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3538 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3539 }
3540 else
3541 {
3542 *invert_ptr = !*invert_ptr;
3543 mips_emit_binary (inv_code, target, cmp0, cmp1);
3544 }
3545 }
3546 }
3547
3548 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3549 The register will have the same mode as CMP0. */
3550
3551 static rtx
3552 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3553 {
3554 if (cmp1 == const0_rtx)
3555 return cmp0;
3556
3557 if (uns_arith_operand (cmp1, VOIDmode))
3558 return expand_binop (GET_MODE (cmp0), xor_optab,
3559 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3560
3561 return expand_binop (GET_MODE (cmp0), sub_optab,
3562 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3563 }
3564
3565 /* Convert *CODE into a code that can be used in a floating-point
3566 scc instruction (c.<cond>.<fmt>). Return true if the values of
3567 the condition code registers will be inverted, with 0 indicating
3568 that the condition holds. */
3569
3570 static bool
3571 mips_reverse_fp_cond_p (enum rtx_code *code)
3572 {
3573 switch (*code)
3574 {
3575 case NE:
3576 case LTGT:
3577 case ORDERED:
3578 *code = reverse_condition_maybe_unordered (*code);
3579 return true;
3580
3581 default:
3582 return false;
3583 }
3584 }
3585
3586 /* Convert a comparison into something that can be used in a branch or
3587 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3588 being compared and *CODE is the code used to compare them.
3589
3590 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3591 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3592 otherwise any standard branch condition can be used. The standard branch
3593 conditions are:
3594
3595 - EQ/NE between two registers.
3596 - any comparison between a register and zero. */
3597
3598 static void
3599 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3600 {
3601 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3602 {
3603 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3604 {
3605 *op0 = cmp_operands[0];
3606 *op1 = cmp_operands[1];
3607 }
3608 else if (*code == EQ || *code == NE)
3609 {
3610 if (need_eq_ne_p)
3611 {
3612 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3613 *op1 = const0_rtx;
3614 }
3615 else
3616 {
3617 *op0 = cmp_operands[0];
3618 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3619 }
3620 }
3621 else
3622 {
3623 /* The comparison needs a separate scc instruction. Store the
3624 result of the scc in *OP0 and compare it against zero. */
3625 bool invert = false;
3626 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3627 *op1 = const0_rtx;
3628 mips_emit_int_relational (*code, &invert, *op0,
3629 cmp_operands[0], cmp_operands[1]);
3630 *code = (invert ? EQ : NE);
3631 }
3632 }
3633 else
3634 {
3635 enum rtx_code cmp_code;
3636
3637 /* Floating-point tests use a separate c.cond.fmt comparison to
3638 set a condition code register. The branch or conditional move
3639 will then compare that register against zero.
3640
3641 Set CMP_CODE to the code of the comparison instruction and
3642 *CODE to the code that the branch or move should use. */
3643 cmp_code = *code;
3644 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
3645 *op0 = (ISA_HAS_8CC
3646 ? gen_reg_rtx (CCmode)
3647 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3648 *op1 = const0_rtx;
3649 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3650 }
3651 }
3652 \f
3653 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3654 Store the result in TARGET and return true if successful.
3655
3656 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
3657
3658 bool
3659 mips_emit_scc (enum rtx_code code, rtx target)
3660 {
3661 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3662 return false;
3663
3664 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
3665 if (code == EQ || code == NE)
3666 {
3667 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3668 mips_emit_binary (code, target, zie, const0_rtx);
3669 }
3670 else
3671 mips_emit_int_relational (code, 0, target,
3672 cmp_operands[0], cmp_operands[1]);
3673 return true;
3674 }
3675
3676 /* Emit the common code for doing conditional branches.
3677 operand[0] is the label to jump to.
3678 The comparison operands are saved away by cmp{si,di,sf,df}. */
3679
3680 void
3681 gen_conditional_branch (rtx *operands, enum rtx_code code)
3682 {
3683 rtx op0, op1, condition;
3684
3685 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
3686 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3687 emit_jump_insn (gen_condjump (condition, operands[0]));
3688 }
3689
3690 /* Implement:
3691
3692 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
3693 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
3694
3695 void
3696 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
3697 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
3698 {
3699 rtx cmp_result;
3700 bool reversed_p;
3701
3702 reversed_p = mips_reverse_fp_cond_p (&cond);
3703 cmp_result = gen_reg_rtx (CCV2mode);
3704 emit_insn (gen_scc_ps (cmp_result,
3705 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
3706 if (reversed_p)
3707 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
3708 cmp_result));
3709 else
3710 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
3711 cmp_result));
3712 }
3713
3714 /* Emit the common code for conditional moves. OPERANDS is the array
3715 of operands passed to the conditional move define_expand. */
3716
3717 void
3718 gen_conditional_move (rtx *operands)
3719 {
3720 enum rtx_code code;
3721 rtx op0, op1;
3722
3723 code = GET_CODE (operands[1]);
3724 mips_emit_compare (&code, &op0, &op1, true);
3725 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
3726 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3727 gen_rtx_fmt_ee (code,
3728 GET_MODE (op0),
3729 op0, op1),
3730 operands[2], operands[3])));
3731 }
3732
3733 /* Emit a conditional trap. OPERANDS is the array of operands passed to
3734 the conditional_trap expander. */
3735
3736 void
3737 mips_gen_conditional_trap (rtx *operands)
3738 {
3739 rtx op0, op1;
3740 enum rtx_code cmp_code = GET_CODE (operands[0]);
3741 enum machine_mode mode = GET_MODE (cmp_operands[0]);
3742
3743 /* MIPS conditional trap machine instructions don't have GT or LE
3744 flavors, so we must invert the comparison and convert to LT and
3745 GE, respectively. */
3746 switch (cmp_code)
3747 {
3748 case GT: cmp_code = LT; break;
3749 case LE: cmp_code = GE; break;
3750 case GTU: cmp_code = LTU; break;
3751 case LEU: cmp_code = GEU; break;
3752 default: break;
3753 }
3754 if (cmp_code == GET_CODE (operands[0]))
3755 {
3756 op0 = cmp_operands[0];
3757 op1 = cmp_operands[1];
3758 }
3759 else
3760 {
3761 op0 = cmp_operands[1];
3762 op1 = cmp_operands[0];
3763 }
3764 op0 = force_reg (mode, op0);
3765 if (!arith_operand (op1, mode))
3766 op1 = force_reg (mode, op1);
3767
3768 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
3769 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
3770 operands[1]));
3771 }
3772 \f
3773 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
3774
3775 static bool
3776 mips_ok_for_lazy_binding_p (rtx x)
3777 {
3778 return (TARGET_USE_GOT
3779 && GET_CODE (x) == SYMBOL_REF
3780 && !mips_symbol_binds_local_p (x));
3781 }
3782
3783 /* Load function address ADDR into register DEST. SIBCALL_P is true
3784 if the address is needed for a sibling call. */
3785
3786 static void
3787 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
3788 {
3789 /* If we're generating PIC, and this call is to a global function,
3790 try to allow its address to be resolved lazily. This isn't
3791 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
3792 to the stub would be our caller's gp, not ours. */
3793 if (TARGET_EXPLICIT_RELOCS
3794 && !(sibcall_p && TARGET_CALL_SAVED_GP)
3795 && mips_ok_for_lazy_binding_p (addr))
3796 {
3797 rtx high, lo_sum_symbol;
3798
3799 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
3800 addr, SYMBOL_GOTOFF_CALL);
3801 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
3802 if (Pmode == SImode)
3803 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
3804 else
3805 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
3806 }
3807 else
3808 mips_emit_move (dest, addr);
3809 }
3810
3811
3812 /* Expand a call or call_value instruction. RESULT is where the
3813 result will go (null for calls), ADDR is the address of the
3814 function, ARGS_SIZE is the size of the arguments and AUX is
3815 the value passed to us by mips_function_arg. SIBCALL_P is true
3816 if we are expanding a sibling call, false if we're expanding
3817 a normal call. */
3818
3819 void
3820 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
3821 {
3822 rtx orig_addr, pattern, insn;
3823
3824 orig_addr = addr;
3825 if (!call_insn_operand (addr, VOIDmode))
3826 {
3827 addr = gen_reg_rtx (Pmode);
3828 mips_load_call_address (addr, orig_addr, sibcall_p);
3829 }
3830
3831 if (TARGET_MIPS16
3832 && TARGET_HARD_FLOAT_ABI
3833 && build_mips16_call_stub (result, addr, args_size,
3834 aux == 0 ? 0 : (int) GET_MODE (aux)))
3835 return;
3836
3837 if (result == 0)
3838 pattern = (sibcall_p
3839 ? gen_sibcall_internal (addr, args_size)
3840 : gen_call_internal (addr, args_size));
3841 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
3842 {
3843 rtx reg1, reg2;
3844
3845 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
3846 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
3847 pattern =
3848 (sibcall_p
3849 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
3850 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
3851 }
3852 else
3853 pattern = (sibcall_p
3854 ? gen_sibcall_value_internal (result, addr, args_size)
3855 : gen_call_value_internal (result, addr, args_size));
3856
3857 insn = emit_call_insn (pattern);
3858
3859 /* Lazy-binding stubs require $gp to be valid on entry. */
3860 if (mips_ok_for_lazy_binding_p (orig_addr))
3861 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3862 }
3863
3864
3865 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
3866
3867 static bool
3868 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3869 {
3870 if (!TARGET_SIBCALLS)
3871 return false;
3872
3873 /* We can't do a sibcall if the called function is a MIPS16 function
3874 because there is no direct "jx" instruction equivalent to "jalx" to
3875 switch the ISA mode. */
3876 if (decl && SYMBOL_REF_MIPS16_FUNC_P (XEXP (DECL_RTL (decl), 0)))
3877 return false;
3878
3879 /* Otherwise OK. */
3880 return true;
3881 }
3882 \f
3883 /* Emit code to move general operand SRC into condition-code
3884 register DEST. SCRATCH is a scratch TFmode float register.
3885 The sequence is:
3886
3887 FP1 = SRC
3888 FP2 = 0.0f
3889 DEST = FP2 < FP1
3890
3891 where FP1 and FP2 are single-precision float registers
3892 taken from SCRATCH. */
3893
3894 void
3895 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
3896 {
3897 rtx fp1, fp2;
3898
3899 /* Change the source to SFmode. */
3900 if (MEM_P (src))
3901 src = adjust_address (src, SFmode, 0);
3902 else if (REG_P (src) || GET_CODE (src) == SUBREG)
3903 src = gen_rtx_REG (SFmode, true_regnum (src));
3904
3905 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
3906 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
3907
3908 mips_emit_move (copy_rtx (fp1), src);
3909 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
3910 emit_insn (gen_slt_sf (dest, fp2, fp1));
3911 }
3912 \f
3913 /* Emit code to change the current function's return address to
3914 ADDRESS. SCRATCH is available as a scratch register, if needed.
3915 ADDRESS and SCRATCH are both word-mode GPRs. */
3916
3917 void
3918 mips_set_return_address (rtx address, rtx scratch)
3919 {
3920 rtx slot_address;
3921
3922 compute_frame_size (get_frame_size ());
3923 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
3924 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
3925 cfun->machine->frame.gp_sp_offset);
3926
3927 mips_emit_move (gen_rtx_MEM (GET_MODE (address), slot_address), address);
3928 }
3929 \f
3930 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3931 Assume that the areas do not overlap. */
3932
3933 static void
3934 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
3935 {
3936 HOST_WIDE_INT offset, delta;
3937 unsigned HOST_WIDE_INT bits;
3938 int i;
3939 enum machine_mode mode;
3940 rtx *regs;
3941
3942 /* Work out how many bits to move at a time. If both operands have
3943 half-word alignment, it is usually better to move in half words.
3944 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
3945 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
3946 Otherwise move word-sized chunks. */
3947 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
3948 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
3949 bits = BITS_PER_WORD / 2;
3950 else
3951 bits = BITS_PER_WORD;
3952
3953 mode = mode_for_size (bits, MODE_INT, 0);
3954 delta = bits / BITS_PER_UNIT;
3955
3956 /* Allocate a buffer for the temporary registers. */
3957 regs = alloca (sizeof (rtx) * length / delta);
3958
3959 /* Load as many BITS-sized chunks as possible. Use a normal load if
3960 the source has enough alignment, otherwise use left/right pairs. */
3961 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3962 {
3963 regs[i] = gen_reg_rtx (mode);
3964 if (MEM_ALIGN (src) >= bits)
3965 mips_emit_move (regs[i], adjust_address (src, mode, offset));
3966 else
3967 {
3968 rtx part = adjust_address (src, BLKmode, offset);
3969 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
3970 gcc_unreachable ();
3971 }
3972 }
3973
3974 /* Copy the chunks to the destination. */
3975 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3976 if (MEM_ALIGN (dest) >= bits)
3977 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
3978 else
3979 {
3980 rtx part = adjust_address (dest, BLKmode, offset);
3981 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
3982 gcc_unreachable ();
3983 }
3984
3985 /* Mop up any left-over bytes. */
3986 if (offset < length)
3987 {
3988 src = adjust_address (src, BLKmode, offset);
3989 dest = adjust_address (dest, BLKmode, offset);
3990 move_by_pieces (dest, src, length - offset,
3991 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
3992 }
3993 }
3994 \f
3995 #define MAX_MOVE_REGS 4
3996 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
3997
3998
3999 /* Helper function for doing a loop-based block operation on memory
4000 reference MEM. Each iteration of the loop will operate on LENGTH
4001 bytes of MEM.
4002
4003 Create a new base register for use within the loop and point it to
4004 the start of MEM. Create a new memory reference that uses this
4005 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
4006
4007 static void
4008 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
4009 rtx *loop_reg, rtx *loop_mem)
4010 {
4011 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
4012
4013 /* Although the new mem does not refer to a known location,
4014 it does keep up to LENGTH bytes of alignment. */
4015 *loop_mem = change_address (mem, BLKmode, *loop_reg);
4016 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
4017 }
4018
4019
4020 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
4021 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
4022 memory regions do not overlap. */
4023
4024 static void
4025 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
4026 {
4027 rtx label, src_reg, dest_reg, final_src;
4028 HOST_WIDE_INT leftover;
4029
4030 leftover = length % MAX_MOVE_BYTES;
4031 length -= leftover;
4032
4033 /* Create registers and memory references for use within the loop. */
4034 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
4035 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
4036
4037 /* Calculate the value that SRC_REG should have after the last iteration
4038 of the loop. */
4039 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
4040 0, 0, OPTAB_WIDEN);
4041
4042 /* Emit the start of the loop. */
4043 label = gen_label_rtx ();
4044 emit_label (label);
4045
4046 /* Emit the loop body. */
4047 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
4048
4049 /* Move on to the next block. */
4050 mips_emit_move (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
4051 mips_emit_move (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
4052
4053 /* Emit the loop condition. */
4054 if (Pmode == DImode)
4055 emit_insn (gen_cmpdi (src_reg, final_src));
4056 else
4057 emit_insn (gen_cmpsi (src_reg, final_src));
4058 emit_jump_insn (gen_bne (label));
4059
4060 /* Mop up any left-over bytes. */
4061 if (leftover)
4062 mips_block_move_straight (dest, src, leftover);
4063 }
4064 \f
4065
4066 /* Expand a loop of synci insns for the address range [BEGIN, END). */
4067
4068 void
4069 mips_expand_synci_loop (rtx begin, rtx end)
4070 {
4071 rtx inc, label, cmp, cmp_result;
4072
4073 /* Load INC with the cache line size (rdhwr INC,$1). */
4074 inc = gen_reg_rtx (SImode);
4075 emit_insn (gen_rdhwr (inc, const1_rtx));
4076
4077 /* Loop back to here. */
4078 label = gen_label_rtx ();
4079 emit_label (label);
4080
4081 emit_insn (gen_synci (begin));
4082
4083 cmp = gen_reg_rtx (Pmode);
4084 mips_emit_binary (GTU, cmp, begin, end);
4085
4086 mips_emit_binary (PLUS, begin, begin, inc);
4087
4088 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
4089 emit_jump_insn (gen_condjump (cmp_result, label));
4090 }
4091 \f
4092 /* Expand a movmemsi instruction. */
4093
4094 bool
4095 mips_expand_block_move (rtx dest, rtx src, rtx length)
4096 {
4097 if (GET_CODE (length) == CONST_INT)
4098 {
4099 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
4100 {
4101 mips_block_move_straight (dest, src, INTVAL (length));
4102 return true;
4103 }
4104 else if (optimize)
4105 {
4106 mips_block_move_loop (dest, src, INTVAL (length));
4107 return true;
4108 }
4109 }
4110 return false;
4111 }
4112 \f
4113 /* Argument support functions. */
4114
4115 /* Initialize CUMULATIVE_ARGS for a function. */
4116
4117 void
4118 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
4119 rtx libname ATTRIBUTE_UNUSED)
4120 {
4121 static CUMULATIVE_ARGS zero_cum;
4122 tree param, next_param;
4123
4124 *cum = zero_cum;
4125 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
4126
4127 /* Determine if this function has variable arguments. This is
4128 indicated by the last argument being 'void_type_mode' if there
4129 are no variable arguments. The standard MIPS calling sequence
4130 passes all arguments in the general purpose registers in this case. */
4131
4132 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
4133 param != 0; param = next_param)
4134 {
4135 next_param = TREE_CHAIN (param);
4136 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
4137 cum->gp_reg_found = 1;
4138 }
4139 }
4140
4141
4142 /* Fill INFO with information about a single argument. CUM is the
4143 cumulative state for earlier arguments. MODE is the mode of this
4144 argument and TYPE is its type (if known). NAMED is true if this
4145 is a named (fixed) argument rather than a variable one. */
4146
4147 static void
4148 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4149 tree type, int named, struct mips_arg_info *info)
4150 {
4151 bool doubleword_aligned_p;
4152 unsigned int num_bytes, num_words, max_regs;
4153
4154 /* Work out the size of the argument. */
4155 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4156 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4157
4158 /* Decide whether it should go in a floating-point register, assuming
4159 one is free. Later code checks for availability.
4160
4161 The checks against UNITS_PER_FPVALUE handle the soft-float and
4162 single-float cases. */
4163 switch (mips_abi)
4164 {
4165 case ABI_EABI:
4166 /* The EABI conventions have traditionally been defined in terms
4167 of TYPE_MODE, regardless of the actual type. */
4168 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4169 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4170 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4171 break;
4172
4173 case ABI_32:
4174 case ABI_O64:
4175 /* Only leading floating-point scalars are passed in
4176 floating-point registers. We also handle vector floats the same
4177 say, which is OK because they are not covered by the standard ABI. */
4178 info->fpr_p = (!cum->gp_reg_found
4179 && cum->arg_number < 2
4180 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
4181 || VECTOR_FLOAT_TYPE_P (type))
4182 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4183 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4184 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4185 break;
4186
4187 case ABI_N32:
4188 case ABI_64:
4189 /* Scalar and complex floating-point types are passed in
4190 floating-point registers. */
4191 info->fpr_p = (named
4192 && (type == 0 || FLOAT_TYPE_P (type))
4193 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4194 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4195 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4196 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4197
4198 /* ??? According to the ABI documentation, the real and imaginary
4199 parts of complex floats should be passed in individual registers.
4200 The real and imaginary parts of stack arguments are supposed
4201 to be contiguous and there should be an extra word of padding
4202 at the end.
4203
4204 This has two problems. First, it makes it impossible to use a
4205 single "void *" va_list type, since register and stack arguments
4206 are passed differently. (At the time of writing, MIPSpro cannot
4207 handle complex float varargs correctly.) Second, it's unclear
4208 what should happen when there is only one register free.
4209
4210 For now, we assume that named complex floats should go into FPRs
4211 if there are two FPRs free, otherwise they should be passed in the
4212 same way as a struct containing two floats. */
4213 if (info->fpr_p
4214 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4215 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4216 {
4217 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4218 info->fpr_p = false;
4219 else
4220 num_words = 2;
4221 }
4222 break;
4223
4224 default:
4225 gcc_unreachable ();
4226 }
4227
4228 /* See whether the argument has doubleword alignment. */
4229 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4230
4231 /* Set REG_OFFSET to the register count we're interested in.
4232 The EABI allocates the floating-point registers separately,
4233 but the other ABIs allocate them like integer registers. */
4234 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4235 ? cum->num_fprs
4236 : cum->num_gprs);
4237
4238 /* Advance to an even register if the argument is doubleword-aligned. */
4239 if (doubleword_aligned_p)
4240 info->reg_offset += info->reg_offset & 1;
4241
4242 /* Work out the offset of a stack argument. */
4243 info->stack_offset = cum->stack_words;
4244 if (doubleword_aligned_p)
4245 info->stack_offset += info->stack_offset & 1;
4246
4247 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4248
4249 /* Partition the argument between registers and stack. */
4250 info->reg_words = MIN (num_words, max_regs);
4251 info->stack_words = num_words - info->reg_words;
4252 }
4253
4254
4255 /* INFO describes an argument that is passed in a single-register value.
4256 Return the register it uses, assuming that FPRs are available if
4257 HARD_FLOAT_P. */
4258
4259 static unsigned int
4260 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4261 {
4262 if (!info->fpr_p || !hard_float_p)
4263 return GP_ARG_FIRST + info->reg_offset;
4264 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4265 /* In o32, the second argument is always passed in $f14
4266 for TARGET_DOUBLE_FLOAT, regardless of whether the
4267 first argument was a word or doubleword. */
4268 return FP_ARG_FIRST + 2;
4269 else
4270 return FP_ARG_FIRST + info->reg_offset;
4271 }
4272
4273 /* Implement FUNCTION_ARG_ADVANCE. */
4274
4275 void
4276 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4277 tree type, int named)
4278 {
4279 struct mips_arg_info info;
4280
4281 mips_arg_info (cum, mode, type, named, &info);
4282
4283 if (!info.fpr_p)
4284 cum->gp_reg_found = true;
4285
4286 /* See the comment above the cumulative args structure in mips.h
4287 for an explanation of what this code does. It assumes the O32
4288 ABI, which passes at most 2 arguments in float registers. */
4289 if (cum->arg_number < 2 && info.fpr_p)
4290 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4291
4292 if (mips_abi != ABI_EABI || !info.fpr_p)
4293 cum->num_gprs = info.reg_offset + info.reg_words;
4294 else if (info.reg_words > 0)
4295 cum->num_fprs += MAX_FPRS_PER_FMT;
4296
4297 if (info.stack_words > 0)
4298 cum->stack_words = info.stack_offset + info.stack_words;
4299
4300 cum->arg_number++;
4301 }
4302
4303 /* Implement FUNCTION_ARG. */
4304
4305 struct rtx_def *
4306 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4307 tree type, int named)
4308 {
4309 struct mips_arg_info info;
4310
4311 /* We will be called with a mode of VOIDmode after the last argument
4312 has been seen. Whatever we return will be passed to the call
4313 insn. If we need a mips16 fp_code, return a REG with the code
4314 stored as the mode. */
4315 if (mode == VOIDmode)
4316 {
4317 if (TARGET_MIPS16 && cum->fp_code != 0)
4318 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4319
4320 else
4321 return 0;
4322 }
4323
4324 mips_arg_info (cum, mode, type, named, &info);
4325
4326 /* Return straight away if the whole argument is passed on the stack. */
4327 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4328 return 0;
4329
4330 if (type != 0
4331 && TREE_CODE (type) == RECORD_TYPE
4332 && TARGET_NEWABI
4333 && TYPE_SIZE_UNIT (type)
4334 && host_integerp (TYPE_SIZE_UNIT (type), 1)
4335 && named)
4336 {
4337 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
4338 structure contains a double in its entirety, then that 64-bit
4339 chunk is passed in a floating point register. */
4340 tree field;
4341
4342 /* First check to see if there is any such field. */
4343 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4344 if (TREE_CODE (field) == FIELD_DECL
4345 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4346 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4347 && host_integerp (bit_position (field), 0)
4348 && int_bit_position (field) % BITS_PER_WORD == 0)
4349 break;
4350
4351 if (field != 0)
4352 {
4353 /* Now handle the special case by returning a PARALLEL
4354 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4355 chunks are passed in registers. */
4356 unsigned int i;
4357 HOST_WIDE_INT bitpos;
4358 rtx ret;
4359
4360 /* assign_parms checks the mode of ENTRY_PARM, so we must
4361 use the actual mode here. */
4362 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4363
4364 bitpos = 0;
4365 field = TYPE_FIELDS (type);
4366 for (i = 0; i < info.reg_words; i++)
4367 {
4368 rtx reg;
4369
4370 for (; field; field = TREE_CHAIN (field))
4371 if (TREE_CODE (field) == FIELD_DECL
4372 && int_bit_position (field) >= bitpos)
4373 break;
4374
4375 if (field
4376 && int_bit_position (field) == bitpos
4377 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4378 && !TARGET_SOFT_FLOAT
4379 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4380 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4381 else
4382 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4383
4384 XVECEXP (ret, 0, i)
4385 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4386 GEN_INT (bitpos / BITS_PER_UNIT));
4387
4388 bitpos += BITS_PER_WORD;
4389 }
4390 return ret;
4391 }
4392 }
4393
4394 /* Handle the n32/n64 conventions for passing complex floating-point
4395 arguments in FPR pairs. The real part goes in the lower register
4396 and the imaginary part goes in the upper register. */
4397 if (TARGET_NEWABI
4398 && info.fpr_p
4399 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4400 {
4401 rtx real, imag;
4402 enum machine_mode inner;
4403 int reg;
4404
4405 inner = GET_MODE_INNER (mode);
4406 reg = FP_ARG_FIRST + info.reg_offset;
4407 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4408 {
4409 /* Real part in registers, imaginary part on stack. */
4410 gcc_assert (info.stack_words == info.reg_words);
4411 return gen_rtx_REG (inner, reg);
4412 }
4413 else
4414 {
4415 gcc_assert (info.stack_words == 0);
4416 real = gen_rtx_EXPR_LIST (VOIDmode,
4417 gen_rtx_REG (inner, reg),
4418 const0_rtx);
4419 imag = gen_rtx_EXPR_LIST (VOIDmode,
4420 gen_rtx_REG (inner,
4421 reg + info.reg_words / 2),
4422 GEN_INT (GET_MODE_SIZE (inner)));
4423 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4424 }
4425 }
4426
4427 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4428 }
4429
4430
4431 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4432
4433 static int
4434 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4435 enum machine_mode mode, tree type, bool named)
4436 {
4437 struct mips_arg_info info;
4438
4439 mips_arg_info (cum, mode, type, named, &info);
4440 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4441 }
4442
4443
4444 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4445 PARM_BOUNDARY bits of alignment, but will be given anything up
4446 to STACK_BOUNDARY bits if the type requires it. */
4447
4448 int
4449 function_arg_boundary (enum machine_mode mode, tree type)
4450 {
4451 unsigned int alignment;
4452
4453 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4454 if (alignment < PARM_BOUNDARY)
4455 alignment = PARM_BOUNDARY;
4456 if (alignment > STACK_BOUNDARY)
4457 alignment = STACK_BOUNDARY;
4458 return alignment;
4459 }
4460
4461 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4462 upward rather than downward. In other words, return true if the
4463 first byte of the stack slot has useful data, false if the last
4464 byte does. */
4465
4466 bool
4467 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4468 {
4469 /* On little-endian targets, the first byte of every stack argument
4470 is passed in the first byte of the stack slot. */
4471 if (!BYTES_BIG_ENDIAN)
4472 return true;
4473
4474 /* Otherwise, integral types are padded downward: the last byte of a
4475 stack argument is passed in the last byte of the stack slot. */
4476 if (type != 0
4477 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
4478 : GET_MODE_CLASS (mode) == MODE_INT)
4479 return false;
4480
4481 /* Big-endian o64 pads floating-point arguments downward. */
4482 if (mips_abi == ABI_O64)
4483 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4484 return false;
4485
4486 /* Other types are padded upward for o32, o64, n32 and n64. */
4487 if (mips_abi != ABI_EABI)
4488 return true;
4489
4490 /* Arguments smaller than a stack slot are padded downward. */
4491 if (mode != BLKmode)
4492 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4493 else
4494 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4495 }
4496
4497
4498 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4499 if the least significant byte of the register has useful data. Return
4500 the opposite if the most significant byte does. */
4501
4502 bool
4503 mips_pad_reg_upward (enum machine_mode mode, tree type)
4504 {
4505 /* No shifting is required for floating-point arguments. */
4506 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4507 return !BYTES_BIG_ENDIAN;
4508
4509 /* Otherwise, apply the same padding to register arguments as we do
4510 to stack arguments. */
4511 return mips_pad_arg_upward (mode, type);
4512 }
4513 \f
4514 static void
4515 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4516 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4517 int no_rtl)
4518 {
4519 CUMULATIVE_ARGS local_cum;
4520 int gp_saved, fp_saved;
4521
4522 /* The caller has advanced CUM up to, but not beyond, the last named
4523 argument. Advance a local copy of CUM past the last "real" named
4524 argument, to find out how many registers are left over. */
4525
4526 local_cum = *cum;
4527 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4528
4529 /* Found out how many registers we need to save. */
4530 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4531 fp_saved = (EABI_FLOAT_VARARGS_P
4532 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4533 : 0);
4534
4535 if (!no_rtl)
4536 {
4537 if (gp_saved > 0)
4538 {
4539 rtx ptr, mem;
4540
4541 ptr = plus_constant (virtual_incoming_args_rtx,
4542 REG_PARM_STACK_SPACE (cfun->decl)
4543 - gp_saved * UNITS_PER_WORD);
4544 mem = gen_rtx_MEM (BLKmode, ptr);
4545 set_mem_alias_set (mem, get_varargs_alias_set ());
4546
4547 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4548 mem, gp_saved);
4549 }
4550 if (fp_saved > 0)
4551 {
4552 /* We can't use move_block_from_reg, because it will use
4553 the wrong mode. */
4554 enum machine_mode mode;
4555 int off, i;
4556
4557 /* Set OFF to the offset from virtual_incoming_args_rtx of
4558 the first float register. The FP save area lies below
4559 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4560 off = -gp_saved * UNITS_PER_WORD;
4561 off &= ~(UNITS_PER_FPVALUE - 1);
4562 off -= fp_saved * UNITS_PER_FPREG;
4563
4564 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4565
4566 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4567 i += MAX_FPRS_PER_FMT)
4568 {
4569 rtx ptr, mem;
4570
4571 ptr = plus_constant (virtual_incoming_args_rtx, off);
4572 mem = gen_rtx_MEM (mode, ptr);
4573 set_mem_alias_set (mem, get_varargs_alias_set ());
4574 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4575 off += UNITS_PER_HWFPVALUE;
4576 }
4577 }
4578 }
4579 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4580 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4581 + fp_saved * UNITS_PER_FPREG);
4582 }
4583
4584 /* Create the va_list data type.
4585 We keep 3 pointers, and two offsets.
4586 Two pointers are to the overflow area, which starts at the CFA.
4587 One of these is constant, for addressing into the GPR save area below it.
4588 The other is advanced up the stack through the overflow region.
4589 The third pointer is to the GPR save area. Since the FPR save area
4590 is just below it, we can address FPR slots off this pointer.
4591 We also keep two one-byte offsets, which are to be subtracted from the
4592 constant pointers to yield addresses in the GPR and FPR save areas.
4593 These are downcounted as float or non-float arguments are used,
4594 and when they get to zero, the argument must be obtained from the
4595 overflow region.
4596 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4597 pointer is enough. It's started at the GPR save area, and is
4598 advanced, period.
4599 Note that the GPR save area is not constant size, due to optimization
4600 in the prologue. Hence, we can't use a design with two pointers
4601 and two offsets, although we could have designed this with two pointers
4602 and three offsets. */
4603
4604 static tree
4605 mips_build_builtin_va_list (void)
4606 {
4607 if (EABI_FLOAT_VARARGS_P)
4608 {
4609 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4610 tree array, index;
4611
4612 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4613
4614 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4615 ptr_type_node);
4616 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4617 ptr_type_node);
4618 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4619 ptr_type_node);
4620 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4621 unsigned_char_type_node);
4622 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4623 unsigned_char_type_node);
4624 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4625 warn on every user file. */
4626 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4627 array = build_array_type (unsigned_char_type_node,
4628 build_index_type (index));
4629 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4630
4631 DECL_FIELD_CONTEXT (f_ovfl) = record;
4632 DECL_FIELD_CONTEXT (f_gtop) = record;
4633 DECL_FIELD_CONTEXT (f_ftop) = record;
4634 DECL_FIELD_CONTEXT (f_goff) = record;
4635 DECL_FIELD_CONTEXT (f_foff) = record;
4636 DECL_FIELD_CONTEXT (f_res) = record;
4637
4638 TYPE_FIELDS (record) = f_ovfl;
4639 TREE_CHAIN (f_ovfl) = f_gtop;
4640 TREE_CHAIN (f_gtop) = f_ftop;
4641 TREE_CHAIN (f_ftop) = f_goff;
4642 TREE_CHAIN (f_goff) = f_foff;
4643 TREE_CHAIN (f_foff) = f_res;
4644
4645 layout_type (record);
4646 return record;
4647 }
4648 else if (TARGET_IRIX && TARGET_IRIX6)
4649 /* On IRIX 6, this type is 'char *'. */
4650 return build_pointer_type (char_type_node);
4651 else
4652 /* Otherwise, we use 'void *'. */
4653 return ptr_type_node;
4654 }
4655
4656 /* Implement va_start. */
4657
4658 void
4659 mips_va_start (tree valist, rtx nextarg)
4660 {
4661 if (EABI_FLOAT_VARARGS_P)
4662 {
4663 const CUMULATIVE_ARGS *cum;
4664 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4665 tree ovfl, gtop, ftop, goff, foff;
4666 tree t;
4667 int gpr_save_area_size;
4668 int fpr_save_area_size;
4669 int fpr_offset;
4670
4671 cum = &current_function_args_info;
4672 gpr_save_area_size
4673 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4674 fpr_save_area_size
4675 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4676
4677 f_ovfl = TYPE_FIELDS (va_list_type_node);
4678 f_gtop = TREE_CHAIN (f_ovfl);
4679 f_ftop = TREE_CHAIN (f_gtop);
4680 f_goff = TREE_CHAIN (f_ftop);
4681 f_foff = TREE_CHAIN (f_goff);
4682
4683 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4684 NULL_TREE);
4685 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4686 NULL_TREE);
4687 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4688 NULL_TREE);
4689 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4690 NULL_TREE);
4691 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4692 NULL_TREE);
4693
4694 /* Emit code to initialize OVFL, which points to the next varargs
4695 stack argument. CUM->STACK_WORDS gives the number of stack
4696 words used by named arguments. */
4697 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4698 if (cum->stack_words > 0)
4699 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
4700 size_int (cum->stack_words * UNITS_PER_WORD));
4701 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4702 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4703
4704 /* Emit code to initialize GTOP, the top of the GPR save area. */
4705 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4706 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
4707 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4708
4709 /* Emit code to initialize FTOP, the top of the FPR save area.
4710 This address is gpr_save_area_bytes below GTOP, rounded
4711 down to the next fp-aligned boundary. */
4712 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4713 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4714 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
4715 if (fpr_offset)
4716 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
4717 size_int (-fpr_offset));
4718 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
4719 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4720
4721 /* Emit code to initialize GOFF, the offset from GTOP of the
4722 next GPR argument. */
4723 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
4724 build_int_cst (NULL_TREE, gpr_save_area_size));
4725 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4726
4727 /* Likewise emit code to initialize FOFF, the offset from FTOP
4728 of the next FPR argument. */
4729 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
4730 build_int_cst (NULL_TREE, fpr_save_area_size));
4731 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4732 }
4733 else
4734 {
4735 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4736 std_expand_builtin_va_start (valist, nextarg);
4737 }
4738 }
4739 \f
4740 /* Implement va_arg. */
4741
4742 static tree
4743 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4744 {
4745 HOST_WIDE_INT size, rsize;
4746 tree addr;
4747 bool indirect;
4748
4749 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4750
4751 if (indirect)
4752 type = build_pointer_type (type);
4753
4754 size = int_size_in_bytes (type);
4755 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4756
4757 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
4758 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4759 else
4760 {
4761 /* Not a simple merged stack. */
4762
4763 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4764 tree ovfl, top, off, align;
4765 HOST_WIDE_INT osize;
4766 tree t, u;
4767
4768 f_ovfl = TYPE_FIELDS (va_list_type_node);
4769 f_gtop = TREE_CHAIN (f_ovfl);
4770 f_ftop = TREE_CHAIN (f_gtop);
4771 f_goff = TREE_CHAIN (f_ftop);
4772 f_foff = TREE_CHAIN (f_goff);
4773
4774 /* We maintain separate pointers and offsets for floating-point
4775 and integer arguments, but we need similar code in both cases.
4776 Let:
4777
4778 TOP be the top of the register save area;
4779 OFF be the offset from TOP of the next register;
4780 ADDR_RTX be the address of the argument;
4781 RSIZE be the number of bytes used to store the argument
4782 when it's in the register save area;
4783 OSIZE be the number of bytes used to store it when it's
4784 in the stack overflow area; and
4785 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
4786
4787 The code we want is:
4788
4789 1: off &= -rsize; // round down
4790 2: if (off != 0)
4791 3: {
4792 4: addr_rtx = top - off;
4793 5: off -= rsize;
4794 6: }
4795 7: else
4796 8: {
4797 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
4798 10: addr_rtx = ovfl + PADDING;
4799 11: ovfl += osize;
4800 14: }
4801
4802 [1] and [9] can sometimes be optimized away. */
4803
4804 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4805 NULL_TREE);
4806
4807 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
4808 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
4809 {
4810 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4811 NULL_TREE);
4812 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4813 NULL_TREE);
4814
4815 /* When floating-point registers are saved to the stack,
4816 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
4817 of the float's precision. */
4818 rsize = UNITS_PER_HWFPVALUE;
4819
4820 /* Overflow arguments are padded to UNITS_PER_WORD bytes
4821 (= PARM_BOUNDARY bits). This can be different from RSIZE
4822 in two cases:
4823
4824 (1) On 32-bit targets when TYPE is a structure such as:
4825
4826 struct s { float f; };
4827
4828 Such structures are passed in paired FPRs, so RSIZE
4829 will be 8 bytes. However, the structure only takes
4830 up 4 bytes of memory, so OSIZE will only be 4.
4831
4832 (2) In combinations such as -mgp64 -msingle-float
4833 -fshort-double. Doubles passed in registers
4834 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
4835 but those passed on the stack take up
4836 UNITS_PER_WORD bytes. */
4837 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
4838 }
4839 else
4840 {
4841 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4842 NULL_TREE);
4843 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4844 NULL_TREE);
4845 if (rsize > UNITS_PER_WORD)
4846 {
4847 /* [1] Emit code for: off &= -rsize. */
4848 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
4849 build_int_cst (NULL_TREE, -rsize));
4850 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
4851 gimplify_and_add (t, pre_p);
4852 }
4853 osize = rsize;
4854 }
4855
4856 /* [2] Emit code to branch if off == 0. */
4857 t = build2 (NE_EXPR, boolean_type_node, off,
4858 build_int_cst (TREE_TYPE (off), 0));
4859 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
4860
4861 /* [5] Emit code for: off -= rsize. We do this as a form of
4862 post-increment not available to C. Also widen for the
4863 coming pointer arithmetic. */
4864 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
4865 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
4866 t = fold_convert (sizetype, t);
4867 t = fold_build1 (NEGATE_EXPR, sizetype, t);
4868
4869 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
4870 the argument has RSIZE - SIZE bytes of leading padding. */
4871 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
4872 if (BYTES_BIG_ENDIAN && rsize > size)
4873 {
4874 u = size_int (rsize - size);
4875 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4876 }
4877 COND_EXPR_THEN (addr) = t;
4878
4879 if (osize > UNITS_PER_WORD)
4880 {
4881 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
4882 u = size_int (osize - 1);
4883 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
4884 t = fold_convert (sizetype, t);
4885 u = size_int (-osize);
4886 t = build2 (BIT_AND_EXPR, sizetype, t, u);
4887 t = fold_convert (TREE_TYPE (ovfl), t);
4888 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4889 }
4890 else
4891 align = NULL;
4892
4893 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
4894 post-increment ovfl by osize. On big-endian machines,
4895 the argument has OSIZE - SIZE bytes of leading padding. */
4896 u = fold_convert (TREE_TYPE (ovfl),
4897 build_int_cst (NULL_TREE, osize));
4898 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
4899 if (BYTES_BIG_ENDIAN && osize > size)
4900 {
4901 u = size_int (osize - size);
4902 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4903 }
4904
4905 /* String [9] and [10,11] together. */
4906 if (align)
4907 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
4908 COND_EXPR_ELSE (addr) = t;
4909
4910 addr = fold_convert (build_pointer_type (type), addr);
4911 addr = build_va_arg_indirect_ref (addr);
4912 }
4913
4914 if (indirect)
4915 addr = build_va_arg_indirect_ref (addr);
4916
4917 return addr;
4918 }
4919 \f
4920 /* Return true if it is possible to use left/right accesses for a
4921 bitfield of WIDTH bits starting BITPOS bits into *OP. When
4922 returning true, update *OP, *LEFT and *RIGHT as follows:
4923
4924 *OP is a BLKmode reference to the whole field.
4925
4926 *LEFT is a QImode reference to the first byte if big endian or
4927 the last byte if little endian. This address can be used in the
4928 left-side instructions (lwl, swl, ldl, sdl).
4929
4930 *RIGHT is a QImode reference to the opposite end of the field and
4931 can be used in the patterning right-side instruction. */
4932
4933 static bool
4934 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
4935 rtx *left, rtx *right)
4936 {
4937 rtx first, last;
4938
4939 /* Check that the operand really is a MEM. Not all the extv and
4940 extzv predicates are checked. */
4941 if (!MEM_P (*op))
4942 return false;
4943
4944 /* Check that the size is valid. */
4945 if (width != 32 && (!TARGET_64BIT || width != 64))
4946 return false;
4947
4948 /* We can only access byte-aligned values. Since we are always passed
4949 a reference to the first byte of the field, it is not necessary to
4950 do anything with BITPOS after this check. */
4951 if (bitpos % BITS_PER_UNIT != 0)
4952 return false;
4953
4954 /* Reject aligned bitfields: we want to use a normal load or store
4955 instead of a left/right pair. */
4956 if (MEM_ALIGN (*op) >= width)
4957 return false;
4958
4959 /* Adjust *OP to refer to the whole field. This also has the effect
4960 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
4961 *op = adjust_address (*op, BLKmode, 0);
4962 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
4963
4964 /* Get references to both ends of the field. We deliberately don't
4965 use the original QImode *OP for FIRST since the new BLKmode one
4966 might have a simpler address. */
4967 first = adjust_address (*op, QImode, 0);
4968 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
4969
4970 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
4971 be the upper word and RIGHT the lower word. */
4972 if (TARGET_BIG_ENDIAN)
4973 *left = first, *right = last;
4974 else
4975 *left = last, *right = first;
4976
4977 return true;
4978 }
4979
4980
4981 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
4982 Return true on success. We only handle cases where zero_extract is
4983 equivalent to sign_extract. */
4984
4985 bool
4986 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
4987 {
4988 rtx left, right, temp;
4989
4990 /* If TARGET_64BIT, the destination of a 32-bit load will be a
4991 paradoxical word_mode subreg. This is the only case in which
4992 we allow the destination to be larger than the source. */
4993 if (GET_CODE (dest) == SUBREG
4994 && GET_MODE (dest) == DImode
4995 && SUBREG_BYTE (dest) == 0
4996 && GET_MODE (SUBREG_REG (dest)) == SImode)
4997 dest = SUBREG_REG (dest);
4998
4999 /* After the above adjustment, the destination must be the same
5000 width as the source. */
5001 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
5002 return false;
5003
5004 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
5005 return false;
5006
5007 temp = gen_reg_rtx (GET_MODE (dest));
5008 if (GET_MODE (dest) == DImode)
5009 {
5010 emit_insn (gen_mov_ldl (temp, src, left));
5011 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
5012 }
5013 else
5014 {
5015 emit_insn (gen_mov_lwl (temp, src, left));
5016 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
5017 }
5018 return true;
5019 }
5020
5021
5022 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
5023 true on success. */
5024
5025 bool
5026 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
5027 {
5028 rtx left, right;
5029 enum machine_mode mode;
5030
5031 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
5032 return false;
5033
5034 mode = mode_for_size (width, MODE_INT, 0);
5035 src = gen_lowpart (mode, src);
5036
5037 if (mode == DImode)
5038 {
5039 emit_insn (gen_mov_sdl (dest, src, left));
5040 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
5041 }
5042 else
5043 {
5044 emit_insn (gen_mov_swl (dest, src, left));
5045 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
5046 }
5047 return true;
5048 }
5049
5050 /* Return true if X is a MEM with the same size as MODE. */
5051
5052 bool
5053 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
5054 {
5055 rtx size;
5056
5057 if (!MEM_P (x))
5058 return false;
5059
5060 size = MEM_SIZE (x);
5061 return size && INTVAL (size) == GET_MODE_SIZE (mode);
5062 }
5063
5064 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
5065 source of an "ext" instruction or the destination of an "ins"
5066 instruction. OP must be a register operand and the following
5067 conditions must hold:
5068
5069 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
5070 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
5071 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
5072
5073 Also reject lengths equal to a word as they are better handled
5074 by the move patterns. */
5075
5076 bool
5077 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
5078 {
5079 HOST_WIDE_INT len, pos;
5080
5081 if (!ISA_HAS_EXT_INS
5082 || !register_operand (op, VOIDmode)
5083 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
5084 return false;
5085
5086 len = INTVAL (size);
5087 pos = INTVAL (position);
5088
5089 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
5090 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
5091 return false;
5092
5093 return true;
5094 }
5095
5096 /* Set up globals to generate code for the ISA or processor
5097 described by INFO. */
5098
5099 static void
5100 mips_set_architecture (const struct mips_cpu_info *info)
5101 {
5102 if (info != 0)
5103 {
5104 mips_arch_info = info;
5105 mips_arch = info->cpu;
5106 mips_isa = info->isa;
5107 }
5108 }
5109
5110
5111 /* Likewise for tuning. */
5112
5113 static void
5114 mips_set_tune (const struct mips_cpu_info *info)
5115 {
5116 if (info != 0)
5117 {
5118 mips_tune_info = info;
5119 mips_tune = info->cpu;
5120 }
5121 }
5122
5123 /* Initialize mips_split_addresses from the associated command-line
5124 settings.
5125
5126 mips_split_addresses is a half-way house between explicit
5127 relocations and the traditional assembler macros. It can
5128 split absolute 32-bit symbolic constants into a high/lo_sum
5129 pair but uses macros for other sorts of access.
5130
5131 Like explicit relocation support for REL targets, it relies
5132 on GNU extensions in the assembler and the linker.
5133
5134 Although this code should work for -O0, it has traditionally
5135 been treated as an optimization. */
5136
5137 static void
5138 mips_init_split_addresses (void)
5139 {
5140 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
5141 && optimize && !flag_pic
5142 && !ABI_HAS_64BIT_SYMBOLS)
5143 mips_split_addresses = 1;
5144 else
5145 mips_split_addresses = 0;
5146 }
5147
5148 /* (Re-)Initialize information about relocs. */
5149
5150 static void
5151 mips_init_relocs (void)
5152 {
5153 memset (mips_split_p, '\0', sizeof (mips_split_p));
5154 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
5155 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
5156
5157 if (ABI_HAS_64BIT_SYMBOLS)
5158 {
5159 if (TARGET_EXPLICIT_RELOCS)
5160 {
5161 mips_split_p[SYMBOL_64_HIGH] = true;
5162 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
5163 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
5164
5165 mips_split_p[SYMBOL_64_MID] = true;
5166 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
5167 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
5168
5169 mips_split_p[SYMBOL_64_LOW] = true;
5170 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
5171 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
5172
5173 mips_split_p[SYMBOL_ABSOLUTE] = true;
5174 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5175 }
5176 }
5177 else
5178 {
5179 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses || TARGET_MIPS16)
5180 {
5181 mips_split_p[SYMBOL_ABSOLUTE] = true;
5182 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
5183 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5184
5185 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
5186 }
5187 }
5188
5189 if (TARGET_MIPS16)
5190 {
5191 /* The high part is provided by a pseudo copy of $gp. */
5192 mips_split_p[SYMBOL_GP_RELATIVE] = true;
5193 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
5194 }
5195
5196 if (TARGET_EXPLICIT_RELOCS)
5197 {
5198 /* Small data constants are kept whole until after reload,
5199 then lowered by mips_rewrite_small_data. */
5200 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
5201
5202 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
5203 if (TARGET_NEWABI)
5204 {
5205 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5206 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
5207 }
5208 else
5209 {
5210 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5211 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
5212 }
5213
5214 if (TARGET_XGOT)
5215 {
5216 /* The HIGH and LO_SUM are matched by special .md patterns. */
5217 mips_split_p[SYMBOL_GOT_DISP] = true;
5218
5219 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
5220 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
5221 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
5222
5223 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5224 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5225 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5226 }
5227 else
5228 {
5229 if (TARGET_NEWABI)
5230 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
5231 else
5232 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
5233 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5234 }
5235 }
5236
5237 if (TARGET_NEWABI)
5238 {
5239 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5240 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5241 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5242 }
5243
5244 /* Thread-local relocation operators. */
5245 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5246 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5247 mips_split_p[SYMBOL_DTPREL] = 1;
5248 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5249 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5250 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5251 mips_split_p[SYMBOL_TPREL] = 1;
5252 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5253 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5254
5255 mips_lo_relocs[SYMBOL_HALF] = "%half(";
5256 }
5257
5258 static GTY(()) int was_mips16_p = -1;
5259
5260 /* Set up the target-dependent global state so that it matches the
5261 current function's ISA mode. */
5262
5263 static void
5264 mips_set_mips16_mode (int mips16_p)
5265 {
5266 if (mips16_p == was_mips16_p)
5267 return;
5268
5269 /* Restore base settings of various flags. */
5270 target_flags = mips_base_target_flags;
5271 align_loops = mips_base_align_loops;
5272 align_jumps = mips_base_align_jumps;
5273 align_functions = mips_base_align_functions;
5274 flag_schedule_insns = mips_base_schedule_insns;
5275 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
5276 flag_delayed_branch = mips_flag_delayed_branch;
5277
5278 if (mips16_p)
5279 {
5280 /* Select mips16 instruction set. */
5281 target_flags |= MASK_MIPS16;
5282
5283 /* Don't run the scheduler before reload, since it tends to
5284 increase register pressure. */
5285 flag_schedule_insns = 0;
5286
5287 /* Don't do hot/cold partitioning. The constant layout code expects
5288 the whole function to be in a single section. */
5289 flag_reorder_blocks_and_partition = 0;
5290
5291 /* Silently disable -mexplicit-relocs since it doesn't apply
5292 to mips16 code. Even so, it would overly pedantic to warn
5293 about "-mips16 -mexplicit-relocs", especially given that
5294 we use a %gprel() operator. */
5295 target_flags &= ~MASK_EXPLICIT_RELOCS;
5296
5297 /* Silently disable DSP extensions. */
5298 target_flags &= ~MASK_DSP;
5299 target_flags &= ~MASK_DSPR2;
5300 }
5301 else
5302 {
5303 /* Reset to select base non-mips16 ISA. */
5304 target_flags &= ~MASK_MIPS16;
5305
5306 /* When using explicit relocs, we call dbr_schedule from within
5307 mips_reorg. */
5308 if (TARGET_EXPLICIT_RELOCS)
5309 flag_delayed_branch = 0;
5310
5311 /* Provide default values for align_* for 64-bit targets. */
5312 if (TARGET_64BIT)
5313 {
5314 if (align_loops == 0)
5315 align_loops = 8;
5316 if (align_jumps == 0)
5317 align_jumps = 8;
5318 if (align_functions == 0)
5319 align_functions = 8;
5320 }
5321 }
5322
5323 /* (Re)initialize mips target internals for new ISA. */
5324 mips_init_split_addresses ();
5325 mips_init_relocs ();
5326
5327 if (was_mips16_p >= 0)
5328 /* Reinitialize target-dependent state. */
5329 target_reinit ();
5330
5331 was_mips16_p = TARGET_MIPS16;
5332 }
5333
5334 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
5335 function should use the MIPS16 ISA and switch modes accordingly. */
5336
5337 static void
5338 mips_set_current_function (tree fndecl)
5339 {
5340 int mips16p;
5341 if (errorcount || sorrycount)
5342 /* Avoid generating RTL when fndecl is possibly invalid. Best to fall
5343 back on non-MIPS16 mode to avoid any strange secondary errors about
5344 use of unsupported features in MIPS16 mode. */
5345 mips16p = false;
5346 else if (fndecl)
5347 mips16p = SYMBOL_REF_MIPS16_FUNC_P (XEXP (DECL_RTL (fndecl), 0));
5348 else
5349 mips16p = mips_base_mips16;
5350 mips_set_mips16_mode (mips16p);
5351 }
5352
5353 /* Implement TARGET_HANDLE_OPTION. */
5354
5355 static bool
5356 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
5357 {
5358 switch (code)
5359 {
5360 case OPT_mabi_:
5361 if (strcmp (arg, "32") == 0)
5362 mips_abi = ABI_32;
5363 else if (strcmp (arg, "o64") == 0)
5364 mips_abi = ABI_O64;
5365 else if (strcmp (arg, "n32") == 0)
5366 mips_abi = ABI_N32;
5367 else if (strcmp (arg, "64") == 0)
5368 mips_abi = ABI_64;
5369 else if (strcmp (arg, "eabi") == 0)
5370 mips_abi = ABI_EABI;
5371 else
5372 return false;
5373 return true;
5374
5375 case OPT_march_:
5376 case OPT_mtune_:
5377 return mips_parse_cpu (arg) != 0;
5378
5379 case OPT_mips:
5380 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
5381 return mips_isa_info != 0;
5382
5383 case OPT_mno_flush_func:
5384 mips_cache_flush_func = NULL;
5385 return true;
5386
5387 case OPT_mcode_readable_:
5388 if (strcmp (arg, "yes") == 0)
5389 mips_code_readable = CODE_READABLE_YES;
5390 else if (strcmp (arg, "pcrel") == 0)
5391 mips_code_readable = CODE_READABLE_PCREL;
5392 else if (strcmp (arg, "no") == 0)
5393 mips_code_readable = CODE_READABLE_NO;
5394 else
5395 return false;
5396 return true;
5397
5398 default:
5399 return true;
5400 }
5401 }
5402
5403 /* Set up the threshold for data to go into the small data area, instead
5404 of the normal data area, and detect any conflicts in the switches. */
5405
5406 void
5407 override_options (void)
5408 {
5409 int i, start, regno;
5410 enum machine_mode mode;
5411
5412 #ifdef SUBTARGET_OVERRIDE_OPTIONS
5413 SUBTARGET_OVERRIDE_OPTIONS;
5414 #endif
5415
5416 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
5417
5418 /* The following code determines the architecture and register size.
5419 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
5420 The GAS and GCC code should be kept in sync as much as possible. */
5421
5422 if (mips_arch_string != 0)
5423 mips_set_architecture (mips_parse_cpu (mips_arch_string));
5424
5425 if (mips_isa_info != 0)
5426 {
5427 if (mips_arch_info == 0)
5428 mips_set_architecture (mips_isa_info);
5429 else if (mips_arch_info->isa != mips_isa_info->isa)
5430 error ("-%s conflicts with the other architecture options, "
5431 "which specify a %s processor",
5432 mips_isa_info->name,
5433 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
5434 }
5435
5436 if (mips_arch_info == 0)
5437 {
5438 #ifdef MIPS_CPU_STRING_DEFAULT
5439 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
5440 #else
5441 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
5442 #endif
5443 }
5444
5445 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
5446 error ("-march=%s is not compatible with the selected ABI",
5447 mips_arch_info->name);
5448
5449 /* Optimize for mips_arch, unless -mtune selects a different processor. */
5450 if (mips_tune_string != 0)
5451 mips_set_tune (mips_parse_cpu (mips_tune_string));
5452
5453 if (mips_tune_info == 0)
5454 mips_set_tune (mips_arch_info);
5455
5456 /* Set cost structure for the processor. */
5457 if (optimize_size)
5458 mips_cost = &mips_rtx_cost_optimize_size;
5459 else
5460 mips_cost = &mips_rtx_cost_data[mips_tune];
5461
5462 /* If the user hasn't specified a branch cost, use the processor's
5463 default. */
5464 if (mips_branch_cost == 0)
5465 mips_branch_cost = mips_cost->branch_cost;
5466
5467 if ((target_flags_explicit & MASK_64BIT) != 0)
5468 {
5469 /* The user specified the size of the integer registers. Make sure
5470 it agrees with the ABI and ISA. */
5471 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
5472 error ("-mgp64 used with a 32-bit processor");
5473 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
5474 error ("-mgp32 used with a 64-bit ABI");
5475 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
5476 error ("-mgp64 used with a 32-bit ABI");
5477 }
5478 else
5479 {
5480 /* Infer the integer register size from the ABI and processor.
5481 Restrict ourselves to 32-bit registers if that's all the
5482 processor has, or if the ABI cannot handle 64-bit registers. */
5483 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
5484 target_flags &= ~MASK_64BIT;
5485 else
5486 target_flags |= MASK_64BIT;
5487 }
5488
5489 if ((target_flags_explicit & MASK_FLOAT64) != 0)
5490 {
5491 /* Really, -mfp32 and -mfp64 are ornamental options. There's
5492 only one right answer here. */
5493 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
5494 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
5495 else if (!TARGET_64BIT && TARGET_FLOAT64
5496 && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
5497 error ("-mgp32 and -mfp64 can only be combined if the target"
5498 " supports the mfhc1 and mthc1 instructions");
5499 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
5500 error ("unsupported combination: %s", "-mfp64 -msingle-float");
5501 }
5502 else
5503 {
5504 /* -msingle-float selects 32-bit float registers. Otherwise the
5505 float registers should be the same size as the integer ones. */
5506 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
5507 target_flags |= MASK_FLOAT64;
5508 else
5509 target_flags &= ~MASK_FLOAT64;
5510 }
5511
5512 /* End of code shared with GAS. */
5513
5514 if ((target_flags_explicit & MASK_LONG64) == 0)
5515 {
5516 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
5517 target_flags |= MASK_LONG64;
5518 else
5519 target_flags &= ~MASK_LONG64;
5520 }
5521
5522 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
5523 && (target_flags_explicit & MASK_SOFT_FLOAT_ABI) == 0)
5524 {
5525 /* For some configurations, it is useful to have -march control
5526 the default setting of MASK_SOFT_FLOAT_ABI. */
5527 switch ((int) mips_arch)
5528 {
5529 case PROCESSOR_R4100:
5530 case PROCESSOR_R4111:
5531 case PROCESSOR_R4120:
5532 case PROCESSOR_R4130:
5533 target_flags |= MASK_SOFT_FLOAT_ABI;
5534 break;
5535
5536 default:
5537 target_flags &= ~MASK_SOFT_FLOAT_ABI;
5538 break;
5539 }
5540 }
5541
5542 if (!TARGET_OLDABI)
5543 flag_pcc_struct_return = 0;
5544
5545 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
5546 {
5547 /* If neither -mbranch-likely nor -mno-branch-likely was given
5548 on the command line, set MASK_BRANCHLIKELY based on the target
5549 architecture.
5550
5551 By default, we enable use of Branch Likely instructions on
5552 all architectures which support them with the following
5553 exceptions: when creating MIPS32 or MIPS64 code, and when
5554 tuning for architectures where their use tends to hurt
5555 performance.
5556
5557 The MIPS32 and MIPS64 architecture specifications say "Software
5558 is strongly encouraged to avoid use of Branch Likely
5559 instructions, as they will be removed from a future revision
5560 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
5561 issue those instructions unless instructed to do so by
5562 -mbranch-likely. */
5563 if (ISA_HAS_BRANCHLIKELY
5564 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
5565 && !(TUNE_MIPS5500 || TUNE_SB1))
5566 target_flags |= MASK_BRANCHLIKELY;
5567 else
5568 target_flags &= ~MASK_BRANCHLIKELY;
5569 }
5570 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
5571 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
5572
5573 /* The effect of -mabicalls isn't defined for the EABI. */
5574 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
5575 {
5576 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
5577 target_flags &= ~MASK_ABICALLS;
5578 }
5579
5580 /* MIPS16 cannot generate PIC yet. */
5581 if (TARGET_MIPS16 && (flag_pic || TARGET_ABICALLS))
5582 {
5583 sorry ("MIPS16 PIC");
5584 target_flags &= ~MASK_ABICALLS;
5585 flag_pic = flag_pie = flag_shlib = 0;
5586 }
5587
5588 if (TARGET_ABICALLS)
5589 /* We need to set flag_pic for executables as well as DSOs
5590 because we may reference symbols that are not defined in
5591 the final executable. (MIPS does not use things like
5592 copy relocs, for example.)
5593
5594 Also, there is a body of code that uses __PIC__ to distinguish
5595 between -mabicalls and -mno-abicalls code. */
5596 flag_pic = 1;
5597
5598 /* -mvr4130-align is a "speed over size" optimization: it usually produces
5599 faster code, but at the expense of more nops. Enable it at -O3 and
5600 above. */
5601 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
5602 target_flags |= MASK_VR4130_ALIGN;
5603
5604 /* Prefer a call to memcpy over inline code when optimizing for size,
5605 though see MOVE_RATIO in mips.h. */
5606 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
5607 target_flags |= MASK_MEMCPY;
5608
5609 /* If we have a nonzero small-data limit, check that the -mgpopt
5610 setting is consistent with the other target flags. */
5611 if (mips_section_threshold > 0)
5612 {
5613 if (!TARGET_GPOPT)
5614 {
5615 if (!TARGET_MIPS16 && !TARGET_EXPLICIT_RELOCS)
5616 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
5617
5618 TARGET_LOCAL_SDATA = false;
5619 TARGET_EXTERN_SDATA = false;
5620 }
5621 else
5622 {
5623 if (TARGET_VXWORKS_RTP)
5624 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
5625
5626 if (TARGET_ABICALLS)
5627 warning (0, "cannot use small-data accesses for %qs",
5628 "-mabicalls");
5629 }
5630 }
5631
5632 #ifdef MIPS_TFMODE_FORMAT
5633 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
5634 #endif
5635
5636 /* Make sure that the user didn't turn off paired single support when
5637 MIPS-3D support is requested. */
5638 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
5639 && !TARGET_PAIRED_SINGLE_FLOAT)
5640 error ("-mips3d requires -mpaired-single");
5641
5642 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
5643 if (TARGET_MIPS3D)
5644 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
5645
5646 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
5647 and TARGET_HARD_FLOAT are both true. */
5648 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
5649 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
5650
5651 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
5652 enabled. */
5653 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
5654 error ("-mips3d/-mpaired-single must be used with -mips64");
5655
5656 /* If TARGET_DSPR2, enable MASK_DSP. */
5657 if (TARGET_DSPR2)
5658 target_flags |= MASK_DSP;
5659
5660 mips_print_operand_punct['?'] = 1;
5661 mips_print_operand_punct['#'] = 1;
5662 mips_print_operand_punct['/'] = 1;
5663 mips_print_operand_punct['&'] = 1;
5664 mips_print_operand_punct['!'] = 1;
5665 mips_print_operand_punct['*'] = 1;
5666 mips_print_operand_punct['@'] = 1;
5667 mips_print_operand_punct['.'] = 1;
5668 mips_print_operand_punct['('] = 1;
5669 mips_print_operand_punct[')'] = 1;
5670 mips_print_operand_punct['['] = 1;
5671 mips_print_operand_punct[']'] = 1;
5672 mips_print_operand_punct['<'] = 1;
5673 mips_print_operand_punct['>'] = 1;
5674 mips_print_operand_punct['{'] = 1;
5675 mips_print_operand_punct['}'] = 1;
5676 mips_print_operand_punct['^'] = 1;
5677 mips_print_operand_punct['$'] = 1;
5678 mips_print_operand_punct['+'] = 1;
5679 mips_print_operand_punct['~'] = 1;
5680
5681 /* Set up array to map GCC register number to debug register number.
5682 Ignore the special purpose register numbers. */
5683
5684 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5685 {
5686 mips_dbx_regno[i] = INVALID_REGNUM;
5687 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
5688 mips_dwarf_regno[i] = i;
5689 else
5690 mips_dwarf_regno[i] = INVALID_REGNUM;
5691 }
5692
5693 start = GP_DBX_FIRST - GP_REG_FIRST;
5694 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
5695 mips_dbx_regno[i] = i + start;
5696
5697 start = FP_DBX_FIRST - FP_REG_FIRST;
5698 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
5699 mips_dbx_regno[i] = i + start;
5700
5701 /* HI and LO debug registers use big-endian ordering. */
5702 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
5703 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
5704 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
5705 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
5706 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
5707 {
5708 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
5709 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
5710 }
5711
5712 /* Set up array giving whether a given register can hold a given mode. */
5713
5714 for (mode = VOIDmode;
5715 mode != MAX_MACHINE_MODE;
5716 mode = (enum machine_mode) ((int)mode + 1))
5717 {
5718 register int size = GET_MODE_SIZE (mode);
5719 register enum mode_class class = GET_MODE_CLASS (mode);
5720
5721 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5722 {
5723 register int temp;
5724
5725 if (mode == CCV2mode)
5726 temp = (ISA_HAS_8CC
5727 && ST_REG_P (regno)
5728 && (regno - ST_REG_FIRST) % 2 == 0);
5729
5730 else if (mode == CCV4mode)
5731 temp = (ISA_HAS_8CC
5732 && ST_REG_P (regno)
5733 && (regno - ST_REG_FIRST) % 4 == 0);
5734
5735 else if (mode == CCmode)
5736 {
5737 if (! ISA_HAS_8CC)
5738 temp = (regno == FPSW_REGNUM);
5739 else
5740 temp = (ST_REG_P (regno) || GP_REG_P (regno)
5741 || FP_REG_P (regno));
5742 }
5743
5744 else if (GP_REG_P (regno))
5745 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
5746
5747 else if (FP_REG_P (regno))
5748 temp = ((((regno % MAX_FPRS_PER_FMT) == 0)
5749 || (MIN_FPRS_PER_FMT == 1
5750 && size <= UNITS_PER_FPREG))
5751 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
5752 || class == MODE_VECTOR_FLOAT)
5753 && size <= UNITS_PER_FPVALUE)
5754 /* Allow integer modes that fit into a single
5755 register. We need to put integers into FPRs
5756 when using instructions like cvt and trunc.
5757 We can't allow sizes smaller than a word,
5758 the FPU has no appropriate load/store
5759 instructions for those. */
5760 || (class == MODE_INT
5761 && size >= MIN_UNITS_PER_WORD
5762 && size <= UNITS_PER_FPREG)
5763 /* Allow TFmode for CCmode reloads. */
5764 || (ISA_HAS_8CC && mode == TFmode)));
5765
5766 else if (ACC_REG_P (regno))
5767 temp = (INTEGRAL_MODE_P (mode)
5768 && size <= UNITS_PER_WORD * 2
5769 && (size <= UNITS_PER_WORD
5770 || regno == MD_REG_FIRST
5771 || (DSP_ACC_REG_P (regno)
5772 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)));
5773
5774 else if (ALL_COP_REG_P (regno))
5775 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
5776 else
5777 temp = 0;
5778
5779 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
5780 }
5781 }
5782
5783 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
5784 initialized yet, so we can't use that here. */
5785 gpr_mode = TARGET_64BIT ? DImode : SImode;
5786
5787 /* Function to allocate machine-dependent function status. */
5788 init_machine_status = &mips_init_machine_status;
5789
5790 /* Default to working around R4000 errata only if the processor
5791 was selected explicitly. */
5792 if ((target_flags_explicit & MASK_FIX_R4000) == 0
5793 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
5794 target_flags |= MASK_FIX_R4000;
5795
5796 /* Default to working around R4400 errata only if the processor
5797 was selected explicitly. */
5798 if ((target_flags_explicit & MASK_FIX_R4400) == 0
5799 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
5800 target_flags |= MASK_FIX_R4400;
5801
5802 /* Save base state of options. */
5803 mips_base_mips16 = TARGET_MIPS16;
5804 mips_base_target_flags = target_flags;
5805 mips_base_schedule_insns = flag_schedule_insns;
5806 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
5807 mips_base_align_loops = align_loops;
5808 mips_base_align_jumps = align_jumps;
5809 mips_base_align_functions = align_functions;
5810 mips_flag_delayed_branch = flag_delayed_branch;
5811
5812 /* Now select the mips16 or 32-bit instruction set, as requested. */
5813 mips_set_mips16_mode (mips_base_mips16);
5814 }
5815
5816 /* Swap the register information for registers I and I + 1, which
5817 currently have the wrong endianness. Note that the registers'
5818 fixedness and call-clobberedness might have been set on the
5819 command line. */
5820
5821 static void
5822 mips_swap_registers (unsigned int i)
5823 {
5824 int tmpi;
5825 const char *tmps;
5826
5827 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
5828 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
5829
5830 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
5831 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
5832 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
5833 SWAP_STRING (reg_names[i], reg_names[i + 1]);
5834
5835 #undef SWAP_STRING
5836 #undef SWAP_INT
5837 }
5838
5839 /* Implement CONDITIONAL_REGISTER_USAGE. */
5840
5841 void
5842 mips_conditional_register_usage (void)
5843 {
5844 if (!TARGET_DSP)
5845 {
5846 int regno;
5847
5848 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
5849 fixed_regs[regno] = call_used_regs[regno] = 1;
5850 }
5851 if (!TARGET_HARD_FLOAT)
5852 {
5853 int regno;
5854
5855 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
5856 fixed_regs[regno] = call_used_regs[regno] = 1;
5857 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5858 fixed_regs[regno] = call_used_regs[regno] = 1;
5859 }
5860 else if (! ISA_HAS_8CC)
5861 {
5862 int regno;
5863
5864 /* We only have a single condition code register. We
5865 implement this by hiding all the condition code registers,
5866 and generating RTL that refers directly to ST_REG_FIRST. */
5867 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5868 fixed_regs[regno] = call_used_regs[regno] = 1;
5869 }
5870 /* In mips16 mode, we permit the $t temporary registers to be used
5871 for reload. We prohibit the unused $s registers, since they
5872 are caller saved, and saving them via a mips16 register would
5873 probably waste more time than just reloading the value. */
5874 if (TARGET_MIPS16)
5875 {
5876 fixed_regs[18] = call_used_regs[18] = 1;
5877 fixed_regs[19] = call_used_regs[19] = 1;
5878 fixed_regs[20] = call_used_regs[20] = 1;
5879 fixed_regs[21] = call_used_regs[21] = 1;
5880 fixed_regs[22] = call_used_regs[22] = 1;
5881 fixed_regs[23] = call_used_regs[23] = 1;
5882 fixed_regs[26] = call_used_regs[26] = 1;
5883 fixed_regs[27] = call_used_regs[27] = 1;
5884 fixed_regs[30] = call_used_regs[30] = 1;
5885 }
5886 /* fp20-23 are now caller saved. */
5887 if (mips_abi == ABI_64)
5888 {
5889 int regno;
5890 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
5891 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5892 }
5893 /* Odd registers from fp21 to fp31 are now caller saved. */
5894 if (mips_abi == ABI_N32)
5895 {
5896 int regno;
5897 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
5898 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5899 }
5900 /* Make sure that double-register accumulator values are correctly
5901 ordered for the current endianness. */
5902 if (TARGET_LITTLE_ENDIAN)
5903 {
5904 int regno;
5905 mips_swap_registers (MD_REG_FIRST);
5906 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
5907 mips_swap_registers (regno);
5908 }
5909 }
5910
5911 /* Allocate a chunk of memory for per-function machine-dependent data. */
5912 static struct machine_function *
5913 mips_init_machine_status (void)
5914 {
5915 return ((struct machine_function *)
5916 ggc_alloc_cleared (sizeof (struct machine_function)));
5917 }
5918
5919 /* On the mips16, we want to allocate $24 (T_REG) before other
5920 registers for instructions for which it is possible. This helps
5921 avoid shuffling registers around in order to set up for an xor,
5922 encouraging the compiler to use a cmp instead. */
5923
5924 void
5925 mips_order_regs_for_local_alloc (void)
5926 {
5927 register int i;
5928
5929 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5930 reg_alloc_order[i] = i;
5931
5932 if (TARGET_MIPS16)
5933 {
5934 /* It really doesn't matter where we put register 0, since it is
5935 a fixed register anyhow. */
5936 reg_alloc_order[0] = 24;
5937 reg_alloc_order[24] = 0;
5938 }
5939 }
5940
5941 \f
5942 /* The MIPS debug format wants all automatic variables and arguments
5943 to be in terms of the virtual frame pointer (stack pointer before
5944 any adjustment in the function), while the MIPS 3.0 linker wants
5945 the frame pointer to be the stack pointer after the initial
5946 adjustment. So, we do the adjustment here. The arg pointer (which
5947 is eliminated) points to the virtual frame pointer, while the frame
5948 pointer (which may be eliminated) points to the stack pointer after
5949 the initial adjustments. */
5950
5951 HOST_WIDE_INT
5952 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
5953 {
5954 rtx offset2 = const0_rtx;
5955 rtx reg = eliminate_constant_term (addr, &offset2);
5956
5957 if (offset == 0)
5958 offset = INTVAL (offset2);
5959
5960 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
5961 || reg == hard_frame_pointer_rtx)
5962 {
5963 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
5964 ? compute_frame_size (get_frame_size ())
5965 : cfun->machine->frame.total_size;
5966
5967 /* MIPS16 frame is smaller */
5968 if (frame_pointer_needed && TARGET_MIPS16)
5969 frame_size -= cfun->machine->frame.args_size;
5970
5971 offset = offset - frame_size;
5972 }
5973
5974 /* sdbout_parms does not want this to crash for unrecognized cases. */
5975 #if 0
5976 else if (reg != arg_pointer_rtx)
5977 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
5978 addr);
5979 #endif
5980
5981 return offset;
5982 }
5983 \f
5984 /* If OP is an UNSPEC address, return the address to which it refers,
5985 otherwise return OP itself. */
5986
5987 static rtx
5988 mips_strip_unspec_address (rtx op)
5989 {
5990 rtx base, offset;
5991
5992 split_const (op, &base, &offset);
5993 if (UNSPEC_ADDRESS_P (base))
5994 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
5995 return op;
5996 }
5997
5998 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
5999
6000 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
6001 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
6002 'h' OP is HIGH, prints %hi(X),
6003 'd' output integer constant in decimal,
6004 'z' if the operand is 0, use $0 instead of normal operand.
6005 'D' print second part of double-word register or memory operand.
6006 'L' print low-order register of double-word register operand.
6007 'M' print high-order register of double-word register operand.
6008 'C' print part of opcode for a branch condition.
6009 'F' print part of opcode for a floating-point branch condition.
6010 'N' print part of opcode for a branch condition, inverted.
6011 'W' print part of opcode for a floating-point branch condition, inverted.
6012 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
6013 'z' for (eq:?I ...), 'n' for (ne:?I ...).
6014 't' like 'T', but with the EQ/NE cases reversed
6015 'Y' for a CONST_INT X, print mips_fp_conditions[X]
6016 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
6017 'R' print the reloc associated with LO_SUM
6018 'q' print DSP accumulator registers
6019
6020 The punctuation characters are:
6021
6022 '(' Turn on .set noreorder
6023 ')' Turn on .set reorder
6024 '[' Turn on .set noat
6025 ']' Turn on .set at
6026 '<' Turn on .set nomacro
6027 '>' Turn on .set macro
6028 '{' Turn on .set volatile (not GAS)
6029 '}' Turn on .set novolatile (not GAS)
6030 '&' Turn on .set noreorder if filling delay slots
6031 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
6032 '!' Turn on .set nomacro if filling delay slots
6033 '#' Print nop if in a .set noreorder section.
6034 '/' Like '#', but does nothing within a delayed branch sequence
6035 '?' Print 'l' if we are to use a branch likely instead of normal branch.
6036 '@' Print the name of the assembler temporary register (at or $1).
6037 '.' Print the name of the register with a hard-wired zero (zero or $0).
6038 '^' Print the name of the pic call-through register (t9 or $25).
6039 '$' Print the name of the stack pointer register (sp or $29).
6040 '+' Print the name of the gp register (usually gp or $28).
6041 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
6042
6043 void
6044 print_operand (FILE *file, rtx op, int letter)
6045 {
6046 register enum rtx_code code;
6047
6048 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
6049 {
6050 switch (letter)
6051 {
6052 case '?':
6053 if (mips_branch_likely)
6054 putc ('l', file);
6055 break;
6056
6057 case '@':
6058 fputs (reg_names [GP_REG_FIRST + 1], file);
6059 break;
6060
6061 case '^':
6062 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
6063 break;
6064
6065 case '.':
6066 fputs (reg_names [GP_REG_FIRST + 0], file);
6067 break;
6068
6069 case '$':
6070 fputs (reg_names[STACK_POINTER_REGNUM], file);
6071 break;
6072
6073 case '+':
6074 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
6075 break;
6076
6077 case '&':
6078 if (final_sequence != 0 && set_noreorder++ == 0)
6079 fputs (".set\tnoreorder\n\t", file);
6080 break;
6081
6082 case '*':
6083 if (final_sequence != 0)
6084 {
6085 if (set_noreorder++ == 0)
6086 fputs (".set\tnoreorder\n\t", file);
6087
6088 if (set_nomacro++ == 0)
6089 fputs (".set\tnomacro\n\t", file);
6090 }
6091 break;
6092
6093 case '!':
6094 if (final_sequence != 0 && set_nomacro++ == 0)
6095 fputs ("\n\t.set\tnomacro", file);
6096 break;
6097
6098 case '#':
6099 if (set_noreorder != 0)
6100 fputs ("\n\tnop", file);
6101 break;
6102
6103 case '/':
6104 /* Print an extra newline so that the delayed insn is separated
6105 from the following ones. This looks neater and is consistent
6106 with non-nop delayed sequences. */
6107 if (set_noreorder != 0 && final_sequence == 0)
6108 fputs ("\n\tnop\n", file);
6109 break;
6110
6111 case '(':
6112 if (set_noreorder++ == 0)
6113 fputs (".set\tnoreorder\n\t", file);
6114 break;
6115
6116 case ')':
6117 if (set_noreorder == 0)
6118 error ("internal error: %%) found without a %%( in assembler pattern");
6119
6120 else if (--set_noreorder == 0)
6121 fputs ("\n\t.set\treorder", file);
6122
6123 break;
6124
6125 case '[':
6126 if (set_noat++ == 0)
6127 fputs (".set\tnoat\n\t", file);
6128 break;
6129
6130 case ']':
6131 if (set_noat == 0)
6132 error ("internal error: %%] found without a %%[ in assembler pattern");
6133 else if (--set_noat == 0)
6134 fputs ("\n\t.set\tat", file);
6135
6136 break;
6137
6138 case '<':
6139 if (set_nomacro++ == 0)
6140 fputs (".set\tnomacro\n\t", file);
6141 break;
6142
6143 case '>':
6144 if (set_nomacro == 0)
6145 error ("internal error: %%> found without a %%< in assembler pattern");
6146 else if (--set_nomacro == 0)
6147 fputs ("\n\t.set\tmacro", file);
6148
6149 break;
6150
6151 case '{':
6152 if (set_volatile++ == 0)
6153 fputs ("#.set\tvolatile\n\t", file);
6154 break;
6155
6156 case '}':
6157 if (set_volatile == 0)
6158 error ("internal error: %%} found without a %%{ in assembler pattern");
6159 else if (--set_volatile == 0)
6160 fputs ("\n\t#.set\tnovolatile", file);
6161
6162 break;
6163
6164 case '~':
6165 {
6166 if (align_labels_log > 0)
6167 ASM_OUTPUT_ALIGN (file, align_labels_log);
6168 }
6169 break;
6170
6171 default:
6172 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
6173 break;
6174 }
6175
6176 return;
6177 }
6178
6179 if (! op)
6180 {
6181 error ("PRINT_OPERAND null pointer");
6182 return;
6183 }
6184
6185 code = GET_CODE (op);
6186
6187 if (letter == 'C')
6188 switch (code)
6189 {
6190 case EQ: fputs ("eq", file); break;
6191 case NE: fputs ("ne", file); break;
6192 case GT: fputs ("gt", file); break;
6193 case GE: fputs ("ge", file); break;
6194 case LT: fputs ("lt", file); break;
6195 case LE: fputs ("le", file); break;
6196 case GTU: fputs ("gtu", file); break;
6197 case GEU: fputs ("geu", file); break;
6198 case LTU: fputs ("ltu", file); break;
6199 case LEU: fputs ("leu", file); break;
6200 default:
6201 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
6202 }
6203
6204 else if (letter == 'N')
6205 switch (code)
6206 {
6207 case EQ: fputs ("ne", file); break;
6208 case NE: fputs ("eq", file); break;
6209 case GT: fputs ("le", file); break;
6210 case GE: fputs ("lt", file); break;
6211 case LT: fputs ("ge", file); break;
6212 case LE: fputs ("gt", file); break;
6213 case GTU: fputs ("leu", file); break;
6214 case GEU: fputs ("ltu", file); break;
6215 case LTU: fputs ("geu", file); break;
6216 case LEU: fputs ("gtu", file); break;
6217 default:
6218 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
6219 }
6220
6221 else if (letter == 'F')
6222 switch (code)
6223 {
6224 case EQ: fputs ("c1f", file); break;
6225 case NE: fputs ("c1t", file); break;
6226 default:
6227 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
6228 }
6229
6230 else if (letter == 'W')
6231 switch (code)
6232 {
6233 case EQ: fputs ("c1t", file); break;
6234 case NE: fputs ("c1f", file); break;
6235 default:
6236 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
6237 }
6238
6239 else if (letter == 'h')
6240 {
6241 if (GET_CODE (op) == HIGH)
6242 op = XEXP (op, 0);
6243
6244 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
6245 }
6246
6247 else if (letter == 'R')
6248 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
6249
6250 else if (letter == 'Y')
6251 {
6252 if (GET_CODE (op) == CONST_INT
6253 && ((unsigned HOST_WIDE_INT) INTVAL (op)
6254 < ARRAY_SIZE (mips_fp_conditions)))
6255 fputs (mips_fp_conditions[INTVAL (op)], file);
6256 else
6257 output_operand_lossage ("invalid %%Y value");
6258 }
6259
6260 else if (letter == 'Z')
6261 {
6262 if (ISA_HAS_8CC)
6263 {
6264 print_operand (file, op, 0);
6265 fputc (',', file);
6266 }
6267 }
6268
6269 else if (letter == 'q')
6270 {
6271 int regnum;
6272
6273 if (code != REG)
6274 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6275
6276 regnum = REGNO (op);
6277 if (MD_REG_P (regnum))
6278 fprintf (file, "$ac0");
6279 else if (DSP_ACC_REG_P (regnum))
6280 fprintf (file, "$ac%c", reg_names[regnum][3]);
6281 else
6282 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6283 }
6284
6285 else if (code == REG || code == SUBREG)
6286 {
6287 register int regnum;
6288
6289 if (code == REG)
6290 regnum = REGNO (op);
6291 else
6292 regnum = true_regnum (op);
6293
6294 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
6295 || (letter == 'L' && WORDS_BIG_ENDIAN)
6296 || letter == 'D')
6297 regnum++;
6298
6299 fprintf (file, "%s", reg_names[regnum]);
6300 }
6301
6302 else if (code == MEM)
6303 {
6304 if (letter == 'D')
6305 output_address (plus_constant (XEXP (op, 0), 4));
6306 else
6307 output_address (XEXP (op, 0));
6308 }
6309
6310 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
6311 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
6312
6313 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
6314 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6315
6316 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
6317 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
6318
6319 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6320 fputs (reg_names[GP_REG_FIRST], file);
6321
6322 else if (letter == 'd' || letter == 'x' || letter == 'X')
6323 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
6324
6325 else if (letter == 'T' || letter == 't')
6326 {
6327 int truth = (code == NE) == (letter == 'T');
6328 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6329 }
6330
6331 else if (CONST_GP_P (op))
6332 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6333
6334 else
6335 output_addr_const (file, mips_strip_unspec_address (op));
6336 }
6337
6338
6339 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6340 in context CONTEXT. RELOCS is the array of relocations to use. */
6341
6342 static void
6343 print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6344 const char **relocs)
6345 {
6346 enum mips_symbol_type symbol_type;
6347 const char *p;
6348
6349 symbol_type = mips_classify_symbolic_expression (op, context);
6350 if (relocs[symbol_type] == 0)
6351 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
6352
6353 fputs (relocs[symbol_type], file);
6354 output_addr_const (file, mips_strip_unspec_address (op));
6355 for (p = relocs[symbol_type]; *p != 0; p++)
6356 if (*p == '(')
6357 fputc (')', file);
6358 }
6359 \f
6360 /* Output address operand X to FILE. */
6361
6362 void
6363 print_operand_address (FILE *file, rtx x)
6364 {
6365 struct mips_address_info addr;
6366
6367 if (mips_classify_address (&addr, x, word_mode, true))
6368 switch (addr.type)
6369 {
6370 case ADDRESS_REG:
6371 print_operand (file, addr.offset, 0);
6372 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6373 return;
6374
6375 case ADDRESS_LO_SUM:
6376 print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
6377 mips_lo_relocs);
6378 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6379 return;
6380
6381 case ADDRESS_CONST_INT:
6382 output_addr_const (file, x);
6383 fprintf (file, "(%s)", reg_names[0]);
6384 return;
6385
6386 case ADDRESS_SYMBOLIC:
6387 output_addr_const (file, mips_strip_unspec_address (x));
6388 return;
6389 }
6390 gcc_unreachable ();
6391 }
6392 \f
6393 /* When using assembler macros, keep track of all of small-data externs
6394 so that mips_file_end can emit the appropriate declarations for them.
6395
6396 In most cases it would be safe (though pointless) to emit .externs
6397 for other symbols too. One exception is when an object is within
6398 the -G limit but declared by the user to be in a section other
6399 than .sbss or .sdata. */
6400
6401 void
6402 mips_output_external (FILE *file, tree decl, const char *name)
6403 {
6404 default_elf_asm_output_external (file, decl, name);
6405
6406 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6407 set in order to avoid putting out names that are never really
6408 used. */
6409 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
6410 {
6411 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
6412 {
6413 fputs ("\t.extern\t", file);
6414 assemble_name (file, name);
6415 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
6416 int_size_in_bytes (TREE_TYPE (decl)));
6417 }
6418 else if (TARGET_IRIX
6419 && mips_abi == ABI_32
6420 && TREE_CODE (decl) == FUNCTION_DECL)
6421 {
6422 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6423 `.global name .text' directive for every used but
6424 undefined function. If we don't, the linker may perform
6425 an optimization (skipping over the insns that set $gp)
6426 when it is unsafe. */
6427 fputs ("\t.globl ", file);
6428 assemble_name (file, name);
6429 fputs (" .text\n", file);
6430 }
6431 }
6432 }
6433 \f
6434 /* Emit a new filename to a stream. If we are smuggling stabs, try to
6435 put out a MIPS ECOFF file and a stab. */
6436
6437 void
6438 mips_output_filename (FILE *stream, const char *name)
6439 {
6440
6441 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6442 directives. */
6443 if (write_symbols == DWARF2_DEBUG)
6444 return;
6445 else if (mips_output_filename_first_time)
6446 {
6447 mips_output_filename_first_time = 0;
6448 num_source_filenames += 1;
6449 current_function_file = name;
6450 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6451 output_quoted_string (stream, name);
6452 putc ('\n', stream);
6453 }
6454
6455 /* If we are emitting stabs, let dbxout.c handle this (except for
6456 the mips_output_filename_first_time case). */
6457 else if (write_symbols == DBX_DEBUG)
6458 return;
6459
6460 else if (name != current_function_file
6461 && strcmp (name, current_function_file) != 0)
6462 {
6463 num_source_filenames += 1;
6464 current_function_file = name;
6465 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6466 output_quoted_string (stream, name);
6467 putc ('\n', stream);
6468 }
6469 }
6470 \f
6471 /* Output an ASCII string, in a space-saving way. PREFIX is the string
6472 that should be written before the opening quote, such as "\t.ascii\t"
6473 for real string data or "\t# " for a comment. */
6474
6475 void
6476 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
6477 const char *prefix)
6478 {
6479 size_t i;
6480 int cur_pos = 17;
6481 register const unsigned char *string =
6482 (const unsigned char *)string_param;
6483
6484 fprintf (stream, "%s\"", prefix);
6485 for (i = 0; i < len; i++)
6486 {
6487 register int c = string[i];
6488
6489 if (ISPRINT (c))
6490 {
6491 if (c == '\\' || c == '\"')
6492 {
6493 putc ('\\', stream);
6494 cur_pos++;
6495 }
6496 putc (c, stream);
6497 cur_pos++;
6498 }
6499 else
6500 {
6501 fprintf (stream, "\\%03o", c);
6502 cur_pos += 4;
6503 }
6504
6505 if (cur_pos > 72 && i+1 < len)
6506 {
6507 cur_pos = 17;
6508 fprintf (stream, "\"\n%s\"", prefix);
6509 }
6510 }
6511 fprintf (stream, "\"\n");
6512 }
6513 \f
6514 /* Implement TARGET_ASM_FILE_START. */
6515
6516 static void
6517 mips_file_start (void)
6518 {
6519 default_file_start ();
6520
6521 if (!TARGET_IRIX)
6522 {
6523 /* Generate a special section to describe the ABI switches used to
6524 produce the resultant binary. This used to be done by the assembler
6525 setting bits in the ELF header's flags field, but we have run out of
6526 bits. GDB needs this information in order to be able to correctly
6527 debug these binaries. See the function mips_gdbarch_init() in
6528 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
6529 causes unnecessary IRIX 6 ld warnings. */
6530 const char * abi_string = NULL;
6531
6532 switch (mips_abi)
6533 {
6534 case ABI_32: abi_string = "abi32"; break;
6535 case ABI_N32: abi_string = "abiN32"; break;
6536 case ABI_64: abi_string = "abi64"; break;
6537 case ABI_O64: abi_string = "abiO64"; break;
6538 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
6539 default:
6540 gcc_unreachable ();
6541 }
6542 /* Note - we use fprintf directly rather than calling switch_to_section
6543 because in this way we can avoid creating an allocated section. We
6544 do not want this section to take up any space in the running
6545 executable. */
6546 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
6547
6548 /* There is no ELF header flag to distinguish long32 forms of the
6549 EABI from long64 forms. Emit a special section to help tools
6550 such as GDB. Do the same for o64, which is sometimes used with
6551 -mlong64. */
6552 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
6553 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
6554 TARGET_LONG64 ? 64 : 32);
6555
6556 /* Restore the default section. */
6557 fprintf (asm_out_file, "\t.previous\n");
6558
6559 #ifdef HAVE_AS_GNU_ATTRIBUTE
6560 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
6561 TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
6562 #endif
6563 }
6564
6565 /* Generate the pseudo ops that System V.4 wants. */
6566 if (TARGET_ABICALLS)
6567 fprintf (asm_out_file, "\t.abicalls\n");
6568
6569 if (flag_verbose_asm)
6570 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
6571 ASM_COMMENT_START,
6572 mips_section_threshold, mips_arch_info->name, mips_isa);
6573 }
6574
6575 #ifdef BSS_SECTION_ASM_OP
6576 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
6577 in the use of sbss. */
6578
6579 void
6580 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
6581 unsigned HOST_WIDE_INT size, int align)
6582 {
6583 extern tree last_assemble_variable_decl;
6584
6585 if (mips_in_small_data_p (decl))
6586 switch_to_section (get_named_section (NULL, ".sbss", 0));
6587 else
6588 switch_to_section (bss_section);
6589 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6590 last_assemble_variable_decl = decl;
6591 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
6592 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
6593 }
6594 #endif
6595 \f
6596 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
6597 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
6598
6599 void
6600 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
6601 unsigned HOST_WIDE_INT size,
6602 unsigned int align)
6603 {
6604 /* If the target wants uninitialized const declarations in
6605 .rdata then don't put them in .comm. */
6606 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
6607 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
6608 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
6609 {
6610 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
6611 targetm.asm_out.globalize_label (stream, name);
6612
6613 switch_to_section (readonly_data_section);
6614 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6615 mips_declare_object (stream, name, "",
6616 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
6617 size);
6618 }
6619 else
6620 mips_declare_common_object (stream, name, "\n\t.comm\t",
6621 size, align, true);
6622 }
6623
6624 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
6625 NAME is the name of the object and ALIGN is the required alignment
6626 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
6627 alignment argument. */
6628
6629 void
6630 mips_declare_common_object (FILE *stream, const char *name,
6631 const char *init_string,
6632 unsigned HOST_WIDE_INT size,
6633 unsigned int align, bool takes_alignment_p)
6634 {
6635 if (!takes_alignment_p)
6636 {
6637 size += (align / BITS_PER_UNIT) - 1;
6638 size -= size % (align / BITS_PER_UNIT);
6639 mips_declare_object (stream, name, init_string,
6640 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
6641 }
6642 else
6643 mips_declare_object (stream, name, init_string,
6644 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
6645 size, align / BITS_PER_UNIT);
6646 }
6647
6648 /* Emit either a label, .comm, or .lcomm directive. When using assembler
6649 macros, mark the symbol as written so that mips_file_end won't emit an
6650 .extern for it. STREAM is the output file, NAME is the name of the
6651 symbol, INIT_STRING is the string that should be written before the
6652 symbol and FINAL_STRING is the string that should be written after it.
6653 FINAL_STRING is a printf() format that consumes the remaining arguments. */
6654
6655 void
6656 mips_declare_object (FILE *stream, const char *name, const char *init_string,
6657 const char *final_string, ...)
6658 {
6659 va_list ap;
6660
6661 fputs (init_string, stream);
6662 assemble_name (stream, name);
6663 va_start (ap, final_string);
6664 vfprintf (stream, final_string, ap);
6665 va_end (ap);
6666
6667 if (!TARGET_EXPLICIT_RELOCS)
6668 {
6669 tree name_tree = get_identifier (name);
6670 TREE_ASM_WRITTEN (name_tree) = 1;
6671 }
6672 }
6673
6674 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
6675 extern int size_directive_output;
6676
6677 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
6678 definitions except that it uses mips_declare_object() to emit the label. */
6679
6680 void
6681 mips_declare_object_name (FILE *stream, const char *name,
6682 tree decl ATTRIBUTE_UNUSED)
6683 {
6684 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
6685 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
6686 #endif
6687
6688 size_directive_output = 0;
6689 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
6690 {
6691 HOST_WIDE_INT size;
6692
6693 size_directive_output = 1;
6694 size = int_size_in_bytes (TREE_TYPE (decl));
6695 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6696 }
6697
6698 mips_declare_object (stream, name, "", ":\n");
6699 }
6700
6701 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
6702
6703 void
6704 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
6705 {
6706 const char *name;
6707
6708 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
6709 if (!flag_inhibit_size_directive
6710 && DECL_SIZE (decl) != 0
6711 && !at_end && top_level
6712 && DECL_INITIAL (decl) == error_mark_node
6713 && !size_directive_output)
6714 {
6715 HOST_WIDE_INT size;
6716
6717 size_directive_output = 1;
6718 size = int_size_in_bytes (TREE_TYPE (decl));
6719 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6720 }
6721 }
6722 #endif
6723 \f
6724 /* Return true if X in context CONTEXT is a small data address that can
6725 be rewritten as a LO_SUM. */
6726
6727 static bool
6728 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
6729 {
6730 enum mips_symbol_type symbol_type;
6731
6732 return (TARGET_EXPLICIT_RELOCS
6733 && mips_symbolic_constant_p (x, context, &symbol_type)
6734 && symbol_type == SYMBOL_GP_RELATIVE);
6735 }
6736
6737
6738 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
6739 containing MEM, or null if none. */
6740
6741 static int
6742 mips_small_data_pattern_1 (rtx *loc, void *data)
6743 {
6744 enum mips_symbol_context context;
6745
6746 if (GET_CODE (*loc) == LO_SUM)
6747 return -1;
6748
6749 if (MEM_P (*loc))
6750 {
6751 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
6752 return 1;
6753 return -1;
6754 }
6755
6756 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
6757 return mips_rewrite_small_data_p (*loc, context);
6758 }
6759
6760 /* Return true if OP refers to small data symbols directly, not through
6761 a LO_SUM. */
6762
6763 bool
6764 mips_small_data_pattern_p (rtx op)
6765 {
6766 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
6767 }
6768 \f
6769 /* A for_each_rtx callback, used by mips_rewrite_small_data.
6770 DATA is the containing MEM, or null if none. */
6771
6772 static int
6773 mips_rewrite_small_data_1 (rtx *loc, void *data)
6774 {
6775 enum mips_symbol_context context;
6776
6777 if (MEM_P (*loc))
6778 {
6779 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
6780 return -1;
6781 }
6782
6783 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
6784 if (mips_rewrite_small_data_p (*loc, context))
6785 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
6786
6787 if (GET_CODE (*loc) == LO_SUM)
6788 return -1;
6789
6790 return 0;
6791 }
6792
6793 /* If possible, rewrite OP so that it refers to small data using
6794 explicit relocations. */
6795
6796 rtx
6797 mips_rewrite_small_data (rtx op)
6798 {
6799 op = copy_insn (op);
6800 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
6801 return op;
6802 }
6803 \f
6804 /* Return true if the current function has an insn that implicitly
6805 refers to $gp. */
6806
6807 static bool
6808 mips_function_has_gp_insn (void)
6809 {
6810 /* Don't bother rechecking if we found one last time. */
6811 if (!cfun->machine->has_gp_insn_p)
6812 {
6813 rtx insn;
6814
6815 push_topmost_sequence ();
6816 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6817 if (INSN_P (insn)
6818 && GET_CODE (PATTERN (insn)) != USE
6819 && GET_CODE (PATTERN (insn)) != CLOBBER
6820 && (get_attr_got (insn) != GOT_UNSET
6821 || small_data_pattern (PATTERN (insn), VOIDmode)))
6822 break;
6823 pop_topmost_sequence ();
6824
6825 cfun->machine->has_gp_insn_p = (insn != 0);
6826 }
6827 return cfun->machine->has_gp_insn_p;
6828 }
6829
6830
6831 /* Return the register that should be used as the global pointer
6832 within this function. Return 0 if the function doesn't need
6833 a global pointer. */
6834
6835 static unsigned int
6836 mips_global_pointer (void)
6837 {
6838 unsigned int regno;
6839
6840 /* $gp is always available unless we're using a GOT. */
6841 if (!TARGET_USE_GOT)
6842 return GLOBAL_POINTER_REGNUM;
6843
6844 /* We must always provide $gp when it is used implicitly. */
6845 if (!TARGET_EXPLICIT_RELOCS)
6846 return GLOBAL_POINTER_REGNUM;
6847
6848 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
6849 a valid gp. */
6850 if (current_function_profile)
6851 return GLOBAL_POINTER_REGNUM;
6852
6853 /* If the function has a nonlocal goto, $gp must hold the correct
6854 global pointer for the target function. */
6855 if (current_function_has_nonlocal_goto)
6856 return GLOBAL_POINTER_REGNUM;
6857
6858 /* If the gp is never referenced, there's no need to initialize it.
6859 Note that reload can sometimes introduce constant pool references
6860 into a function that otherwise didn't need them. For example,
6861 suppose we have an instruction like:
6862
6863 (set (reg:DF R1) (float:DF (reg:SI R2)))
6864
6865 If R2 turns out to be constant such as 1, the instruction may have a
6866 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
6867 using this constant if R2 doesn't get allocated to a register.
6868
6869 In cases like these, reload will have added the constant to the pool
6870 but no instruction will yet refer to it. */
6871 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
6872 && !current_function_uses_const_pool
6873 && !mips_function_has_gp_insn ())
6874 return 0;
6875
6876 /* We need a global pointer, but perhaps we can use a call-clobbered
6877 register instead of $gp. */
6878 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
6879 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6880 if (!df_regs_ever_live_p (regno)
6881 && call_used_regs[regno]
6882 && !fixed_regs[regno]
6883 && regno != PIC_FUNCTION_ADDR_REGNUM)
6884 return regno;
6885
6886 return GLOBAL_POINTER_REGNUM;
6887 }
6888
6889
6890 /* Return true if the function return value MODE will get returned in a
6891 floating-point register. */
6892
6893 static bool
6894 mips_return_mode_in_fpr_p (enum machine_mode mode)
6895 {
6896 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
6897 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
6898 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6899 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
6900 }
6901
6902 /* Return a two-character string representing a function floating-point
6903 return mode, used to name MIPS16 function stubs. */
6904
6905 static const char *
6906 mips16_call_stub_mode_suffix (enum machine_mode mode)
6907 {
6908 if (mode == SFmode)
6909 return "sf";
6910 else if (mode == DFmode)
6911 return "df";
6912 else if (mode == SCmode)
6913 return "sc";
6914 else if (mode == DCmode)
6915 return "dc";
6916 else if (mode == V2SFmode)
6917 return "df";
6918 else
6919 gcc_unreachable ();
6920 }
6921
6922 /* Return true if the current function returns its value in a floating-point
6923 register in MIPS16 mode. */
6924
6925 static bool
6926 mips16_cfun_returns_in_fpr_p (void)
6927 {
6928 tree return_type = DECL_RESULT (current_function_decl);
6929 return (TARGET_MIPS16
6930 && TARGET_HARD_FLOAT_ABI
6931 && !aggregate_value_p (return_type, current_function_decl)
6932 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
6933 }
6934
6935
6936 /* Return true if the current function must save REGNO. */
6937
6938 static bool
6939 mips_save_reg_p (unsigned int regno)
6940 {
6941 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
6942 if we have not chosen a call-clobbered substitute. */
6943 if (regno == GLOBAL_POINTER_REGNUM)
6944 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
6945
6946 /* Check call-saved registers. */
6947 if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
6948 return true;
6949
6950 /* Save both registers in an FPR pair if either one is used. This is
6951 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
6952 register to be used without the even register. */
6953 if (FP_REG_P (regno)
6954 && MAX_FPRS_PER_FMT == 2
6955 && df_regs_ever_live_p (regno + 1)
6956 && !call_used_regs[regno + 1])
6957 return true;
6958
6959 /* We need to save the old frame pointer before setting up a new one. */
6960 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
6961 return true;
6962
6963 /* We need to save the incoming return address if it is ever clobbered
6964 within the function. */
6965 if (regno == GP_REG_FIRST + 31 && df_regs_ever_live_p (regno))
6966 return true;
6967
6968 if (TARGET_MIPS16)
6969 {
6970 /* $18 is a special case in mips16 code. It may be used to call
6971 a function which returns a floating point value, but it is
6972 marked in call_used_regs. */
6973 if (regno == GP_REG_FIRST + 18 && df_regs_ever_live_p (regno))
6974 return true;
6975
6976 /* $31 is also a special case. It will be used to copy a return
6977 value into the floating point registers if the return value is
6978 floating point. */
6979 if (regno == GP_REG_FIRST + 31
6980 && mips16_cfun_returns_in_fpr_p ())
6981 return true;
6982 }
6983
6984 return false;
6985 }
6986
6987 /* Return the index of the lowest X in the range [0, SIZE) for which
6988 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
6989
6990 static unsigned int
6991 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
6992 unsigned int size)
6993 {
6994 unsigned int i;
6995
6996 for (i = 0; i < size; i++)
6997 if (BITSET_P (mask, regs[i]))
6998 break;
6999
7000 return i;
7001 }
7002
7003 /* *MASK_PTR is a mask of general purpose registers and *GP_REG_SIZE_PTR
7004 is the number of bytes that they occupy. If *MASK_PTR contains REGS[X]
7005 for some X in [0, SIZE), adjust *MASK_PTR and *GP_REG_SIZE_PTR so that
7006 the same is true for all indexes (X, SIZE). */
7007
7008 static void
7009 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
7010 unsigned int size, HOST_WIDE_INT *gp_reg_size_ptr)
7011 {
7012 unsigned int i;
7013
7014 i = mips16e_find_first_register (*mask_ptr, regs, size);
7015 for (i++; i < size; i++)
7016 if (!BITSET_P (*mask_ptr, regs[i]))
7017 {
7018 *gp_reg_size_ptr += GET_MODE_SIZE (gpr_mode);
7019 *mask_ptr |= 1 << regs[i];
7020 }
7021 }
7022
7023 /* Return the bytes needed to compute the frame pointer from the current
7024 stack pointer. SIZE is the size (in bytes) of the local variables.
7025
7026 MIPS stack frames look like:
7027
7028 Before call After call
7029 high +-----------------------+ +-----------------------+
7030 mem. | | | |
7031 | caller's temps. | | caller's temps. |
7032 | | | |
7033 +-----------------------+ +-----------------------+
7034 | | | |
7035 | arguments on stack. | | arguments on stack. |
7036 | | | |
7037 +-----------------------+ +-----------------------+
7038 | 4 words to save | | 4 words to save |
7039 | arguments passed | | arguments passed |
7040 | in registers, even | | in registers, even |
7041 | if not passed. | | if not passed. |
7042 SP->+-----------------------+ VFP->+-----------------------+
7043 (VFP = SP+fp_sp_offset) | |\
7044 | fp register save | | fp_reg_size
7045 | |/
7046 SP+gp_sp_offset->+-----------------------+
7047 /| |\
7048 | | gp register save | | gp_reg_size
7049 gp_reg_rounded | | |/
7050 | +-----------------------+
7051 \| alignment padding |
7052 +-----------------------+
7053 | |\
7054 | local variables | | var_size
7055 | |/
7056 +-----------------------+
7057 | |
7058 | alloca allocations |
7059 | |
7060 +-----------------------+
7061 /| |
7062 cprestore_size | | GP save for V.4 abi |
7063 \| |
7064 +-----------------------+
7065 | |\
7066 | arguments on stack | |
7067 | | |
7068 +-----------------------+ |
7069 | 4 words to save | | args_size
7070 | arguments passed | |
7071 | in registers, even | |
7072 | if not passed. | |
7073 low | (TARGET_OLDABI only) |/
7074 memory SP->+-----------------------+
7075
7076 */
7077
7078 HOST_WIDE_INT
7079 compute_frame_size (HOST_WIDE_INT size)
7080 {
7081 unsigned int regno;
7082 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
7083 HOST_WIDE_INT var_size; /* # bytes that variables take up */
7084 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
7085 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
7086 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
7087 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
7088 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
7089 unsigned int mask; /* mask of saved gp registers */
7090 unsigned int fmask; /* mask of saved fp registers */
7091
7092 cfun->machine->global_pointer = mips_global_pointer ();
7093
7094 gp_reg_size = 0;
7095 fp_reg_size = 0;
7096 mask = 0;
7097 fmask = 0;
7098 var_size = MIPS_STACK_ALIGN (size);
7099 args_size = current_function_outgoing_args_size;
7100 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
7101
7102 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
7103 functions. If the function has local variables, we're committed
7104 to allocating it anyway. Otherwise reclaim it here. */
7105 if (var_size == 0 && current_function_is_leaf)
7106 cprestore_size = args_size = 0;
7107
7108 /* The MIPS 3.0 linker does not like functions that dynamically
7109 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
7110 looks like we are trying to create a second frame pointer to the
7111 function, so allocate some stack space to make it happy. */
7112
7113 if (args_size == 0 && current_function_calls_alloca)
7114 args_size = 4 * UNITS_PER_WORD;
7115
7116 total_size = var_size + args_size + cprestore_size;
7117
7118 /* Calculate space needed for gp registers. */
7119 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7120 if (mips_save_reg_p (regno))
7121 {
7122 gp_reg_size += GET_MODE_SIZE (gpr_mode);
7123 mask |= 1 << (regno - GP_REG_FIRST);
7124 }
7125
7126 /* We need to restore these for the handler. */
7127 if (current_function_calls_eh_return)
7128 {
7129 unsigned int i;
7130 for (i = 0; ; ++i)
7131 {
7132 regno = EH_RETURN_DATA_REGNO (i);
7133 if (regno == INVALID_REGNUM)
7134 break;
7135 gp_reg_size += GET_MODE_SIZE (gpr_mode);
7136 mask |= 1 << (regno - GP_REG_FIRST);
7137 }
7138 }
7139
7140 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
7141 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
7142 save all later registers too. */
7143 if (GENERATE_MIPS16E_SAVE_RESTORE)
7144 {
7145 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7146 ARRAY_SIZE (mips16e_s2_s8_regs), &gp_reg_size);
7147 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7148 ARRAY_SIZE (mips16e_a0_a3_regs), &gp_reg_size);
7149 }
7150
7151 /* This loop must iterate over the same space as its companion in
7152 mips_for_each_saved_reg. */
7153 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
7154 regno >= FP_REG_FIRST;
7155 regno -= MAX_FPRS_PER_FMT)
7156 {
7157 if (mips_save_reg_p (regno))
7158 {
7159 fp_reg_size += MAX_FPRS_PER_FMT * UNITS_PER_FPREG;
7160 fmask |= ((1 << MAX_FPRS_PER_FMT) - 1) << (regno - FP_REG_FIRST);
7161 }
7162 }
7163
7164 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
7165 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
7166
7167 /* Add in the space required for saving incoming register arguments. */
7168 total_size += current_function_pretend_args_size;
7169 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
7170
7171 /* Save other computed information. */
7172 cfun->machine->frame.total_size = total_size;
7173 cfun->machine->frame.var_size = var_size;
7174 cfun->machine->frame.args_size = args_size;
7175 cfun->machine->frame.cprestore_size = cprestore_size;
7176 cfun->machine->frame.gp_reg_size = gp_reg_size;
7177 cfun->machine->frame.fp_reg_size = fp_reg_size;
7178 cfun->machine->frame.mask = mask;
7179 cfun->machine->frame.fmask = fmask;
7180 cfun->machine->frame.initialized = reload_completed;
7181 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
7182 cfun->machine->frame.num_fp = (fp_reg_size
7183 / (MAX_FPRS_PER_FMT * UNITS_PER_FPREG));
7184
7185 if (mask)
7186 {
7187 HOST_WIDE_INT offset;
7188
7189 if (GENERATE_MIPS16E_SAVE_RESTORE)
7190 /* MIPS16e SAVE and RESTORE instructions require the GP save area
7191 to be aligned at the high end with any padding at the low end.
7192 It is only safe to use this calculation for o32, where we never
7193 have pretend arguments, and where any varargs will be saved in
7194 the caller-allocated area rather than at the top of the frame. */
7195 offset = (total_size - GET_MODE_SIZE (gpr_mode));
7196 else
7197 offset = (args_size + cprestore_size + var_size
7198 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
7199 cfun->machine->frame.gp_sp_offset = offset;
7200 cfun->machine->frame.gp_save_offset = offset - total_size;
7201 }
7202 else
7203 {
7204 cfun->machine->frame.gp_sp_offset = 0;
7205 cfun->machine->frame.gp_save_offset = 0;
7206 }
7207
7208 if (fmask)
7209 {
7210 HOST_WIDE_INT offset;
7211
7212 offset = (args_size + cprestore_size + var_size
7213 + gp_reg_rounded + fp_reg_size
7214 - MAX_FPRS_PER_FMT * UNITS_PER_FPREG);
7215 cfun->machine->frame.fp_sp_offset = offset;
7216 cfun->machine->frame.fp_save_offset = offset - total_size;
7217 }
7218 else
7219 {
7220 cfun->machine->frame.fp_sp_offset = 0;
7221 cfun->machine->frame.fp_save_offset = 0;
7222 }
7223
7224 /* Ok, we're done. */
7225 return total_size;
7226 }
7227 \f
7228 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
7229 pointer or argument pointer. TO is either the stack pointer or
7230 hard frame pointer. */
7231
7232 HOST_WIDE_INT
7233 mips_initial_elimination_offset (int from, int to)
7234 {
7235 HOST_WIDE_INT offset;
7236
7237 compute_frame_size (get_frame_size ());
7238
7239 /* Set OFFSET to the offset from the stack pointer. */
7240 switch (from)
7241 {
7242 case FRAME_POINTER_REGNUM:
7243 offset = 0;
7244 break;
7245
7246 case ARG_POINTER_REGNUM:
7247 offset = (cfun->machine->frame.total_size
7248 - current_function_pretend_args_size);
7249 break;
7250
7251 default:
7252 gcc_unreachable ();
7253 }
7254
7255 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
7256 offset -= cfun->machine->frame.args_size;
7257
7258 return offset;
7259 }
7260 \f
7261 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
7262 back to a previous frame. */
7263 rtx
7264 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
7265 {
7266 if (count != 0)
7267 return const0_rtx;
7268
7269 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
7270 }
7271 \f
7272 /* Use FN to save or restore register REGNO. MODE is the register's
7273 mode and OFFSET is the offset of its save slot from the current
7274 stack pointer. */
7275
7276 static void
7277 mips_save_restore_reg (enum machine_mode mode, int regno,
7278 HOST_WIDE_INT offset, mips_save_restore_fn fn)
7279 {
7280 rtx mem;
7281
7282 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
7283
7284 fn (gen_rtx_REG (mode, regno), mem);
7285 }
7286
7287
7288 /* Call FN for each register that is saved by the current function.
7289 SP_OFFSET is the offset of the current stack pointer from the start
7290 of the frame. */
7291
7292 static void
7293 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
7294 {
7295 enum machine_mode fpr_mode;
7296 HOST_WIDE_INT offset;
7297 int regno;
7298
7299 /* Save registers starting from high to low. The debuggers prefer at least
7300 the return register be stored at func+4, and also it allows us not to
7301 need a nop in the epilogue if at least one register is reloaded in
7302 addition to return address. */
7303 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
7304 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
7305 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
7306 {
7307 mips_save_restore_reg (gpr_mode, regno, offset, fn);
7308 offset -= GET_MODE_SIZE (gpr_mode);
7309 }
7310
7311 /* This loop must iterate over the same space as its companion in
7312 compute_frame_size. */
7313 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
7314 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
7315 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
7316 regno >= FP_REG_FIRST;
7317 regno -= MAX_FPRS_PER_FMT)
7318 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
7319 {
7320 mips_save_restore_reg (fpr_mode, regno, offset, fn);
7321 offset -= GET_MODE_SIZE (fpr_mode);
7322 }
7323 }
7324 \f
7325 /* If we're generating n32 or n64 abicalls, and the current function
7326 does not use $28 as its global pointer, emit a cplocal directive.
7327 Use pic_offset_table_rtx as the argument to the directive. */
7328
7329 static void
7330 mips_output_cplocal (void)
7331 {
7332 if (!TARGET_EXPLICIT_RELOCS
7333 && cfun->machine->global_pointer > 0
7334 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
7335 output_asm_insn (".cplocal %+", 0);
7336 }
7337
7338 /* Return the style of GP load sequence that is being used for the
7339 current function. */
7340
7341 enum mips_loadgp_style
7342 mips_current_loadgp_style (void)
7343 {
7344 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
7345 return LOADGP_NONE;
7346
7347 if (TARGET_RTP_PIC)
7348 return LOADGP_RTP;
7349
7350 if (TARGET_ABSOLUTE_ABICALLS)
7351 return LOADGP_ABSOLUTE;
7352
7353 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
7354 }
7355
7356 /* The __gnu_local_gp symbol. */
7357
7358 static GTY(()) rtx mips_gnu_local_gp;
7359
7360 /* If we're generating n32 or n64 abicalls, emit instructions
7361 to set up the global pointer. */
7362
7363 static void
7364 mips_emit_loadgp (void)
7365 {
7366 rtx addr, offset, incoming_address, base, index;
7367
7368 switch (mips_current_loadgp_style ())
7369 {
7370 case LOADGP_ABSOLUTE:
7371 if (mips_gnu_local_gp == NULL)
7372 {
7373 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
7374 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
7375 }
7376 emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
7377 break;
7378
7379 case LOADGP_NEWABI:
7380 addr = XEXP (DECL_RTL (current_function_decl), 0);
7381 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
7382 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7383 emit_insn (gen_loadgp_newabi (offset, incoming_address));
7384 if (!TARGET_EXPLICIT_RELOCS)
7385 emit_insn (gen_loadgp_blockage ());
7386 break;
7387
7388 case LOADGP_RTP:
7389 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
7390 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
7391 emit_insn (gen_loadgp_rtp (base, index));
7392 if (!TARGET_EXPLICIT_RELOCS)
7393 emit_insn (gen_loadgp_blockage ());
7394 break;
7395
7396 default:
7397 break;
7398 }
7399 }
7400
7401 /* Set up the stack and frame (if desired) for the function. */
7402
7403 static void
7404 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7405 {
7406 const char *fnname;
7407 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
7408
7409 #ifdef SDB_DEBUGGING_INFO
7410 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
7411 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
7412 #endif
7413
7414 /* In mips16 mode, we may need to generate a 32 bit to handle
7415 floating point arguments. The linker will arrange for any 32-bit
7416 functions to call this stub, which will then jump to the 16-bit
7417 function proper. */
7418 if (TARGET_MIPS16
7419 && TARGET_HARD_FLOAT_ABI
7420 && current_function_args_info.fp_code != 0)
7421 build_mips16_function_stub (file);
7422
7423 /* Select the mips16 mode for this function. */
7424 if (TARGET_MIPS16)
7425 fprintf (file, "\t.set\tmips16\n");
7426 else
7427 fprintf (file, "\t.set\tnomips16\n");
7428
7429 if (!FUNCTION_NAME_ALREADY_DECLARED)
7430 {
7431 /* Get the function name the same way that toplev.c does before calling
7432 assemble_start_function. This is needed so that the name used here
7433 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7434 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7435
7436 if (!flag_inhibit_size_directive)
7437 {
7438 fputs ("\t.ent\t", file);
7439 assemble_name (file, fnname);
7440 fputs ("\n", file);
7441 }
7442
7443 assemble_name (file, fnname);
7444 fputs (":\n", file);
7445 }
7446
7447 /* Stop mips_file_end from treating this function as external. */
7448 if (TARGET_IRIX && mips_abi == ABI_32)
7449 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
7450
7451 if (!flag_inhibit_size_directive)
7452 {
7453 /* .frame FRAMEREG, FRAMESIZE, RETREG */
7454 fprintf (file,
7455 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
7456 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
7457 ", args= " HOST_WIDE_INT_PRINT_DEC
7458 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
7459 (reg_names[(frame_pointer_needed)
7460 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
7461 ((frame_pointer_needed && TARGET_MIPS16)
7462 ? tsize - cfun->machine->frame.args_size
7463 : tsize),
7464 reg_names[GP_REG_FIRST + 31],
7465 cfun->machine->frame.var_size,
7466 cfun->machine->frame.num_gp,
7467 cfun->machine->frame.num_fp,
7468 cfun->machine->frame.args_size,
7469 cfun->machine->frame.cprestore_size);
7470
7471 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
7472 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7473 cfun->machine->frame.mask,
7474 cfun->machine->frame.gp_save_offset);
7475 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7476 cfun->machine->frame.fmask,
7477 cfun->machine->frame.fp_save_offset);
7478
7479 /* Require:
7480 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
7481 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
7482 }
7483
7484 if (mips_current_loadgp_style () == LOADGP_OLDABI)
7485 {
7486 /* Handle the initialization of $gp for SVR4 PIC. */
7487 if (!cfun->machine->all_noreorder_p)
7488 output_asm_insn ("%(.cpload\t%^%)", 0);
7489 else
7490 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
7491 }
7492 else if (cfun->machine->all_noreorder_p)
7493 output_asm_insn ("%(%<", 0);
7494
7495 /* Tell the assembler which register we're using as the global
7496 pointer. This is needed for thunks, since they can use either
7497 explicit relocs or assembler macros. */
7498 mips_output_cplocal ();
7499 }
7500 \f
7501 /* Make the last instruction frame related and note that it performs
7502 the operation described by FRAME_PATTERN. */
7503
7504 static void
7505 mips_set_frame_expr (rtx frame_pattern)
7506 {
7507 rtx insn;
7508
7509 insn = get_last_insn ();
7510 RTX_FRAME_RELATED_P (insn) = 1;
7511 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7512 frame_pattern,
7513 REG_NOTES (insn));
7514 }
7515
7516
7517 /* Return a frame-related rtx that stores REG at MEM.
7518 REG must be a single register. */
7519
7520 static rtx
7521 mips_frame_set (rtx mem, rtx reg)
7522 {
7523 rtx set;
7524
7525 /* If we're saving the return address register and the dwarf return
7526 address column differs from the hard register number, adjust the
7527 note reg to refer to the former. */
7528 if (REGNO (reg) == GP_REG_FIRST + 31
7529 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7530 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7531
7532 set = gen_rtx_SET (VOIDmode, mem, reg);
7533 RTX_FRAME_RELATED_P (set) = 1;
7534
7535 return set;
7536 }
7537
7538
7539 /* Save register REG to MEM. Make the instruction frame-related. */
7540
7541 static void
7542 mips_save_reg (rtx reg, rtx mem)
7543 {
7544 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
7545 {
7546 rtx x1, x2;
7547
7548 if (mips_split_64bit_move_p (mem, reg))
7549 mips_split_64bit_move (mem, reg);
7550 else
7551 mips_emit_move (mem, reg);
7552
7553 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
7554 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
7555 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
7556 }
7557 else
7558 {
7559 if (TARGET_MIPS16
7560 && REGNO (reg) != GP_REG_FIRST + 31
7561 && !M16_REG_P (REGNO (reg)))
7562 {
7563 /* Save a non-mips16 register by moving it through a temporary.
7564 We don't need to do this for $31 since there's a special
7565 instruction for it. */
7566 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
7567 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
7568 }
7569 else
7570 mips_emit_move (mem, reg);
7571
7572 mips_set_frame_expr (mips_frame_set (mem, reg));
7573 }
7574 }
7575
7576 /* Return a move between register REGNO and memory location SP + OFFSET.
7577 Make the move a load if RESTORE_P, otherwise make it a frame-related
7578 store. */
7579
7580 static rtx
7581 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
7582 unsigned int regno)
7583 {
7584 rtx reg, mem;
7585
7586 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
7587 reg = gen_rtx_REG (SImode, regno);
7588 return (restore_p
7589 ? gen_rtx_SET (VOIDmode, reg, mem)
7590 : mips_frame_set (mem, reg));
7591 }
7592
7593 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7594 The instruction must:
7595
7596 - Allocate or deallocate SIZE bytes in total; SIZE is known
7597 to be nonzero.
7598
7599 - Save or restore as many registers in *MASK_PTR as possible.
7600 The instruction saves the first registers at the top of the
7601 allocated area, with the other registers below it.
7602
7603 - Save NARGS argument registers above the allocated area.
7604
7605 (NARGS is always zero if RESTORE_P.)
7606
7607 The SAVE and RESTORE instructions cannot save and restore all general
7608 registers, so there may be some registers left over for the caller to
7609 handle. Destructively modify *MASK_PTR so that it contains the registers
7610 that still need to be saved or restored. The caller can save these
7611 registers in the memory immediately below *OFFSET_PTR, which is a
7612 byte offset from the bottom of the allocated stack area. */
7613
7614 static rtx
7615 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
7616 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
7617 HOST_WIDE_INT size)
7618 {
7619 rtx pattern, set;
7620 HOST_WIDE_INT offset, top_offset;
7621 unsigned int i, regno;
7622 int n;
7623
7624 gcc_assert (cfun->machine->frame.fp_reg_size == 0);
7625
7626 /* Calculate the number of elements in the PARALLEL. We need one element
7627 for the stack adjustment, one for each argument register save, and one
7628 for each additional register move. */
7629 n = 1 + nargs;
7630 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7631 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
7632 n++;
7633
7634 /* Create the final PARALLEL. */
7635 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
7636 n = 0;
7637
7638 /* Add the stack pointer adjustment. */
7639 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7640 plus_constant (stack_pointer_rtx,
7641 restore_p ? size : -size));
7642 RTX_FRAME_RELATED_P (set) = 1;
7643 XVECEXP (pattern, 0, n++) = set;
7644
7645 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7646 top_offset = restore_p ? size : 0;
7647
7648 /* Save the arguments. */
7649 for (i = 0; i < nargs; i++)
7650 {
7651 offset = top_offset + i * GET_MODE_SIZE (gpr_mode);
7652 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
7653 XVECEXP (pattern, 0, n++) = set;
7654 }
7655
7656 /* Then fill in the other register moves. */
7657 offset = top_offset;
7658 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7659 {
7660 regno = mips16e_save_restore_regs[i];
7661 if (BITSET_P (*mask_ptr, regno))
7662 {
7663 offset -= UNITS_PER_WORD;
7664 set = mips16e_save_restore_reg (restore_p, offset, regno);
7665 XVECEXP (pattern, 0, n++) = set;
7666 *mask_ptr &= ~(1 << regno);
7667 }
7668 }
7669
7670 /* Tell the caller what offset it should use for the remaining registers. */
7671 *offset_ptr = size + (offset - top_offset) + size;
7672
7673 gcc_assert (n == XVECLEN (pattern, 0));
7674
7675 return pattern;
7676 }
7677
7678 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
7679 pointer. Return true if PATTERN matches the kind of instruction
7680 generated by mips16e_build_save_restore. If INFO is nonnull,
7681 initialize it when returning true. */
7682
7683 bool
7684 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
7685 struct mips16e_save_restore_info *info)
7686 {
7687 unsigned int i, nargs, mask;
7688 HOST_WIDE_INT top_offset, save_offset, offset, extra;
7689 rtx set, reg, mem, base;
7690 int n;
7691
7692 if (!GENERATE_MIPS16E_SAVE_RESTORE)
7693 return false;
7694
7695 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7696 top_offset = adjust > 0 ? adjust : 0;
7697
7698 /* Interpret all other members of the PARALLEL. */
7699 save_offset = top_offset - GET_MODE_SIZE (gpr_mode);
7700 mask = 0;
7701 nargs = 0;
7702 i = 0;
7703 for (n = 1; n < XVECLEN (pattern, 0); n++)
7704 {
7705 /* Check that we have a SET. */
7706 set = XVECEXP (pattern, 0, n);
7707 if (GET_CODE (set) != SET)
7708 return false;
7709
7710 /* Check that the SET is a load (if restoring) or a store
7711 (if saving). */
7712 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
7713 if (!MEM_P (mem))
7714 return false;
7715
7716 /* Check that the address is the sum of the stack pointer and a
7717 possibly-zero constant offset. */
7718 mips_split_plus (XEXP (mem, 0), &base, &offset);
7719 if (base != stack_pointer_rtx)
7720 return false;
7721
7722 /* Check that SET's other operand is a register. */
7723 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
7724 if (!REG_P (reg))
7725 return false;
7726
7727 /* Check for argument saves. */
7728 if (offset == top_offset + nargs * GET_MODE_SIZE (gpr_mode)
7729 && REGNO (reg) == GP_ARG_FIRST + nargs)
7730 nargs++;
7731 else if (offset == save_offset)
7732 {
7733 while (mips16e_save_restore_regs[i++] != REGNO (reg))
7734 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
7735 return false;
7736
7737 mask |= 1 << REGNO (reg);
7738 save_offset -= GET_MODE_SIZE (gpr_mode);
7739 }
7740 else
7741 return false;
7742 }
7743
7744 /* Check that the restrictions on register ranges are met. */
7745 extra = 0;
7746 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7747 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
7748 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7749 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
7750 if (extra != 0)
7751 return false;
7752
7753 /* Make sure that the topmost argument register is not saved twice.
7754 The checks above ensure that the same is then true for the other
7755 argument registers. */
7756 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
7757 return false;
7758
7759 /* Pass back information, if requested. */
7760 if (info)
7761 {
7762 info->nargs = nargs;
7763 info->mask = mask;
7764 info->size = (adjust > 0 ? adjust : -adjust);
7765 }
7766
7767 return true;
7768 }
7769
7770 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
7771 for the register range [MIN_REG, MAX_REG]. Return a pointer to
7772 the null terminator. */
7773
7774 static char *
7775 mips16e_add_register_range (char *s, unsigned int min_reg,
7776 unsigned int max_reg)
7777 {
7778 if (min_reg != max_reg)
7779 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
7780 else
7781 s += sprintf (s, ",%s", reg_names[min_reg]);
7782 return s;
7783 }
7784
7785 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
7786 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
7787
7788 const char *
7789 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
7790 {
7791 static char buffer[300];
7792
7793 struct mips16e_save_restore_info info;
7794 unsigned int i, end;
7795 char *s;
7796
7797 /* Parse the pattern. */
7798 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
7799 gcc_unreachable ();
7800
7801 /* Add the mnemonic. */
7802 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
7803 s += strlen (s);
7804
7805 /* Save the arguments. */
7806 if (info.nargs > 1)
7807 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
7808 reg_names[GP_ARG_FIRST + info.nargs - 1]);
7809 else if (info.nargs == 1)
7810 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
7811
7812 /* Emit the amount of stack space to allocate or deallocate. */
7813 s += sprintf (s, "%d", (int) info.size);
7814
7815 /* Save or restore $16. */
7816 if (BITSET_P (info.mask, 16))
7817 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
7818
7819 /* Save or restore $17. */
7820 if (BITSET_P (info.mask, 17))
7821 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
7822
7823 /* Save or restore registers in the range $s2...$s8, which
7824 mips16e_s2_s8_regs lists in decreasing order. Note that this
7825 is a software register range; the hardware registers are not
7826 numbered consecutively. */
7827 end = ARRAY_SIZE (mips16e_s2_s8_regs);
7828 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
7829 if (i < end)
7830 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
7831 mips16e_s2_s8_regs[i]);
7832
7833 /* Save or restore registers in the range $a0...$a3. */
7834 end = ARRAY_SIZE (mips16e_a0_a3_regs);
7835 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
7836 if (i < end)
7837 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
7838 mips16e_a0_a3_regs[end - 1]);
7839
7840 /* Save or restore $31. */
7841 if (BITSET_P (info.mask, 31))
7842 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
7843
7844 return buffer;
7845 }
7846
7847 /* Return a simplified form of X using the register values in REG_VALUES.
7848 REG_VALUES[R] is the last value assigned to hard register R, or null
7849 if R has not been modified.
7850
7851 This function is rather limited, but is good enough for our purposes. */
7852
7853 static rtx
7854 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
7855 {
7856 rtx x0, x1;
7857
7858 x = avoid_constant_pool_reference (x);
7859
7860 if (UNARY_P (x))
7861 {
7862 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7863 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
7864 x0, GET_MODE (XEXP (x, 0)));
7865 }
7866
7867 if (ARITHMETIC_P (x))
7868 {
7869 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7870 x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
7871 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
7872 }
7873
7874 if (REG_P (x)
7875 && reg_values[REGNO (x)]
7876 && !rtx_unstable_p (reg_values[REGNO (x)]))
7877 return reg_values[REGNO (x)];
7878
7879 return x;
7880 }
7881
7882 /* Return true if (set DEST SRC) stores an argument register into its
7883 caller-allocated save slot, storing the number of that argument
7884 register in *REGNO_PTR if so. REG_VALUES is as for
7885 mips16e_collect_propagate_value. */
7886
7887 static bool
7888 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
7889 unsigned int *regno_ptr)
7890 {
7891 unsigned int argno, regno;
7892 HOST_WIDE_INT offset, required_offset;
7893 rtx addr, base;
7894
7895 /* Check that this is a word-mode store. */
7896 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
7897 return false;
7898
7899 /* Check that the register being saved is an unmodified argument
7900 register. */
7901 regno = REGNO (src);
7902 if (regno < GP_ARG_FIRST || regno > GP_ARG_LAST || reg_values[regno])
7903 return false;
7904 argno = regno - GP_ARG_FIRST;
7905
7906 /* Check whether the address is an appropriate stack pointer or
7907 frame pointer access. The frame pointer is offset from the
7908 stack pointer by the size of the outgoing arguments. */
7909 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
7910 mips_split_plus (addr, &base, &offset);
7911 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
7912 if (base == hard_frame_pointer_rtx)
7913 required_offset -= cfun->machine->frame.args_size;
7914 else if (base != stack_pointer_rtx)
7915 return false;
7916 if (offset != required_offset)
7917 return false;
7918
7919 *regno_ptr = regno;
7920 return true;
7921 }
7922
7923 /* A subroutine of mips_expand_prologue, called only when generating
7924 MIPS16e SAVE instructions. Search the start of the function for any
7925 instructions that save argument registers into their caller-allocated
7926 save slots. Delete such instructions and return a value N such that
7927 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
7928 instructions redundant. */
7929
7930 static unsigned int
7931 mips16e_collect_argument_saves (void)
7932 {
7933 rtx reg_values[FIRST_PSEUDO_REGISTER];
7934 rtx insn, next, set, dest, src;
7935 unsigned int nargs, regno;
7936
7937 push_topmost_sequence ();
7938 nargs = 0;
7939 memset (reg_values, 0, sizeof (reg_values));
7940 for (insn = get_insns (); insn; insn = next)
7941 {
7942 next = NEXT_INSN (insn);
7943 if (NOTE_P (insn))
7944 continue;
7945
7946 if (!INSN_P (insn))
7947 break;
7948
7949 set = PATTERN (insn);
7950 if (GET_CODE (set) != SET)
7951 break;
7952
7953 dest = SET_DEST (set);
7954 src = SET_SRC (set);
7955 if (mips16e_collect_argument_save_p (dest, src, reg_values, &regno))
7956 {
7957 if (!BITSET_P (cfun->machine->frame.mask, regno))
7958 {
7959 delete_insn (insn);
7960 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
7961 }
7962 }
7963 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
7964 reg_values[REGNO (dest)]
7965 = mips16e_collect_propagate_value (src, reg_values);
7966 else
7967 break;
7968 }
7969 pop_topmost_sequence ();
7970
7971 return nargs;
7972 }
7973
7974 /* Expand the prologue into a bunch of separate insns. */
7975
7976 void
7977 mips_expand_prologue (void)
7978 {
7979 HOST_WIDE_INT size;
7980 unsigned int nargs;
7981 rtx insn;
7982
7983 if (cfun->machine->global_pointer > 0)
7984 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
7985
7986 size = compute_frame_size (get_frame_size ());
7987
7988 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
7989 bytes beforehand; this is enough to cover the register save area
7990 without going out of range. */
7991 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
7992 {
7993 HOST_WIDE_INT step1;
7994
7995 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
7996
7997 if (GENERATE_MIPS16E_SAVE_RESTORE)
7998 {
7999 HOST_WIDE_INT offset;
8000 unsigned int mask, regno;
8001
8002 /* Try to merge argument stores into the save instruction. */
8003 nargs = mips16e_collect_argument_saves ();
8004
8005 /* Build the save instruction. */
8006 mask = cfun->machine->frame.mask;
8007 insn = mips16e_build_save_restore (false, &mask, &offset,
8008 nargs, step1);
8009 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8010 size -= step1;
8011
8012 /* Check if we need to save other registers. */
8013 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8014 if (BITSET_P (mask, regno - GP_REG_FIRST))
8015 {
8016 offset -= GET_MODE_SIZE (gpr_mode);
8017 mips_save_restore_reg (gpr_mode, regno, offset, mips_save_reg);
8018 }
8019 }
8020 else
8021 {
8022 insn = gen_add3_insn (stack_pointer_rtx,
8023 stack_pointer_rtx,
8024 GEN_INT (-step1));
8025 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8026 size -= step1;
8027 mips_for_each_saved_reg (size, mips_save_reg);
8028 }
8029 }
8030
8031 /* Allocate the rest of the frame. */
8032 if (size > 0)
8033 {
8034 if (SMALL_OPERAND (-size))
8035 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
8036 stack_pointer_rtx,
8037 GEN_INT (-size)))) = 1;
8038 else
8039 {
8040 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
8041 if (TARGET_MIPS16)
8042 {
8043 /* There are no instructions to add or subtract registers
8044 from the stack pointer, so use the frame pointer as a
8045 temporary. We should always be using a frame pointer
8046 in this case anyway. */
8047 gcc_assert (frame_pointer_needed);
8048 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8049 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
8050 hard_frame_pointer_rtx,
8051 MIPS_PROLOGUE_TEMP (Pmode)));
8052 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
8053 }
8054 else
8055 emit_insn (gen_sub3_insn (stack_pointer_rtx,
8056 stack_pointer_rtx,
8057 MIPS_PROLOGUE_TEMP (Pmode)));
8058
8059 /* Describe the combined effect of the previous instructions. */
8060 mips_set_frame_expr
8061 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8062 plus_constant (stack_pointer_rtx, -size)));
8063 }
8064 }
8065
8066 /* Set up the frame pointer, if we're using one. In mips16 code,
8067 we point the frame pointer ahead of the outgoing argument area.
8068 This should allow more variables & incoming arguments to be
8069 accessed with unextended instructions. */
8070 if (frame_pointer_needed)
8071 {
8072 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
8073 {
8074 rtx offset = GEN_INT (cfun->machine->frame.args_size);
8075 if (SMALL_OPERAND (cfun->machine->frame.args_size))
8076 RTX_FRAME_RELATED_P
8077 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8078 stack_pointer_rtx,
8079 offset))) = 1;
8080 else
8081 {
8082 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), offset);
8083 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8084 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8085 hard_frame_pointer_rtx,
8086 MIPS_PROLOGUE_TEMP (Pmode)));
8087 mips_set_frame_expr
8088 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
8089 plus_constant (stack_pointer_rtx,
8090 cfun->machine->frame.args_size)));
8091 }
8092 }
8093 else
8094 RTX_FRAME_RELATED_P (mips_emit_move (hard_frame_pointer_rtx,
8095 stack_pointer_rtx)) = 1;
8096 }
8097
8098 mips_emit_loadgp ();
8099
8100 /* If generating o32/o64 abicalls, save $gp on the stack. */
8101 if (TARGET_ABICALLS && TARGET_OLDABI && !current_function_is_leaf)
8102 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
8103
8104 /* If we are profiling, make sure no instructions are scheduled before
8105 the call to mcount. */
8106
8107 if (current_function_profile)
8108 emit_insn (gen_blockage ());
8109 }
8110 \f
8111 /* Do any necessary cleanup after a function to restore stack, frame,
8112 and regs. */
8113
8114 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
8115
8116 static void
8117 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8118 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8119 {
8120 /* Reinstate the normal $gp. */
8121 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
8122 mips_output_cplocal ();
8123
8124 if (cfun->machine->all_noreorder_p)
8125 {
8126 /* Avoid using %>%) since it adds excess whitespace. */
8127 output_asm_insn (".set\tmacro", 0);
8128 output_asm_insn (".set\treorder", 0);
8129 set_noreorder = set_nomacro = 0;
8130 }
8131
8132 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
8133 {
8134 const char *fnname;
8135
8136 /* Get the function name the same way that toplev.c does before calling
8137 assemble_start_function. This is needed so that the name used here
8138 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8139 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8140 fputs ("\t.end\t", file);
8141 assemble_name (file, fnname);
8142 fputs ("\n", file);
8143 }
8144 }
8145 \f
8146 /* Emit instructions to restore register REG from slot MEM. */
8147
8148 static void
8149 mips_restore_reg (rtx reg, rtx mem)
8150 {
8151 /* There's no mips16 instruction to load $31 directly. Load into
8152 $7 instead and adjust the return insn appropriately. */
8153 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
8154 reg = gen_rtx_REG (GET_MODE (reg), 7);
8155
8156 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
8157 {
8158 /* Can't restore directly; move through a temporary. */
8159 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
8160 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
8161 }
8162 else
8163 mips_emit_move (reg, mem);
8164 }
8165
8166
8167 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
8168 if this epilogue precedes a sibling call, false if it is for a normal
8169 "epilogue" pattern. */
8170
8171 void
8172 mips_expand_epilogue (int sibcall_p)
8173 {
8174 HOST_WIDE_INT step1, step2;
8175 rtx base, target;
8176
8177 if (!sibcall_p && mips_can_use_return_insn ())
8178 {
8179 emit_jump_insn (gen_return ());
8180 return;
8181 }
8182
8183 /* In mips16 mode, if the return value should go into a floating-point
8184 register, we need to call a helper routine to copy it over. */
8185 if (mips16_cfun_returns_in_fpr_p ())
8186 {
8187 char *name;
8188 rtx func;
8189 rtx insn;
8190 rtx retval;
8191 rtx call;
8192 tree id;
8193 tree return_type;
8194 enum machine_mode return_mode;
8195
8196 return_type = DECL_RESULT (current_function_decl);
8197 return_mode = DECL_MODE (return_type);
8198
8199 name = ACONCAT (("__mips16_ret_",
8200 mips16_call_stub_mode_suffix (return_mode),
8201 NULL));
8202 id = get_identifier (name);
8203 func = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
8204 retval = gen_rtx_REG (return_mode, GP_RETURN);
8205 call = gen_call_value_internal (retval, func, const0_rtx);
8206 insn = emit_call_insn (call);
8207 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
8208 }
8209
8210 /* Split the frame into two. STEP1 is the amount of stack we should
8211 deallocate before restoring the registers. STEP2 is the amount we
8212 should deallocate afterwards.
8213
8214 Start off by assuming that no registers need to be restored. */
8215 step1 = cfun->machine->frame.total_size;
8216 step2 = 0;
8217
8218 /* Work out which register holds the frame address. Account for the
8219 frame pointer offset used by mips16 code. */
8220 if (!frame_pointer_needed)
8221 base = stack_pointer_rtx;
8222 else
8223 {
8224 base = hard_frame_pointer_rtx;
8225 if (TARGET_MIPS16)
8226 step1 -= cfun->machine->frame.args_size;
8227 }
8228
8229 /* If we need to restore registers, deallocate as much stack as
8230 possible in the second step without going out of range. */
8231 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8232 {
8233 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
8234 step1 -= step2;
8235 }
8236
8237 /* Set TARGET to BASE + STEP1. */
8238 target = base;
8239 if (step1 > 0)
8240 {
8241 rtx adjust;
8242
8243 /* Get an rtx for STEP1 that we can add to BASE. */
8244 adjust = GEN_INT (step1);
8245 if (!SMALL_OPERAND (step1))
8246 {
8247 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
8248 adjust = MIPS_EPILOGUE_TEMP (Pmode);
8249 }
8250
8251 /* Normal mode code can copy the result straight into $sp. */
8252 if (!TARGET_MIPS16)
8253 target = stack_pointer_rtx;
8254
8255 emit_insn (gen_add3_insn (target, base, adjust));
8256 }
8257
8258 /* Copy TARGET into the stack pointer. */
8259 if (target != stack_pointer_rtx)
8260 mips_emit_move (stack_pointer_rtx, target);
8261
8262 /* If we're using addressing macros, $gp is implicitly used by all
8263 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
8264 from the stack. */
8265 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
8266 emit_insn (gen_blockage ());
8267
8268 if (GENERATE_MIPS16E_SAVE_RESTORE && cfun->machine->frame.mask != 0)
8269 {
8270 unsigned int regno, mask;
8271 HOST_WIDE_INT offset;
8272 rtx restore;
8273
8274 /* Generate the restore instruction. */
8275 mask = cfun->machine->frame.mask;
8276 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
8277
8278 /* Restore any other registers manually. */
8279 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8280 if (BITSET_P (mask, regno - GP_REG_FIRST))
8281 {
8282 offset -= GET_MODE_SIZE (gpr_mode);
8283 mips_save_restore_reg (gpr_mode, regno, offset, mips_restore_reg);
8284 }
8285
8286 /* Restore the remaining registers and deallocate the final bit
8287 of the frame. */
8288 emit_insn (restore);
8289 }
8290 else
8291 {
8292 /* Restore the registers. */
8293 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
8294 mips_restore_reg);
8295
8296 /* Deallocate the final bit of the frame. */
8297 if (step2 > 0)
8298 emit_insn (gen_add3_insn (stack_pointer_rtx,
8299 stack_pointer_rtx,
8300 GEN_INT (step2)));
8301 }
8302
8303 /* Add in the __builtin_eh_return stack adjustment. We need to
8304 use a temporary in mips16 code. */
8305 if (current_function_calls_eh_return)
8306 {
8307 if (TARGET_MIPS16)
8308 {
8309 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
8310 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
8311 MIPS_EPILOGUE_TEMP (Pmode),
8312 EH_RETURN_STACKADJ_RTX));
8313 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
8314 }
8315 else
8316 emit_insn (gen_add3_insn (stack_pointer_rtx,
8317 stack_pointer_rtx,
8318 EH_RETURN_STACKADJ_RTX));
8319 }
8320
8321 if (!sibcall_p)
8322 {
8323 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8324 path will restore the return address into $7 rather than $31. */
8325 if (TARGET_MIPS16
8326 && !GENERATE_MIPS16E_SAVE_RESTORE
8327 && (cfun->machine->frame.mask & RA_MASK) != 0)
8328 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8329 GP_REG_FIRST + 7)));
8330 else
8331 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8332 GP_REG_FIRST + 31)));
8333 }
8334 }
8335 \f
8336 /* Return nonzero if this function is known to have a null epilogue.
8337 This allows the optimizer to omit jumps to jumps if no stack
8338 was created. */
8339
8340 int
8341 mips_can_use_return_insn (void)
8342 {
8343 if (! reload_completed)
8344 return 0;
8345
8346 if (df_regs_ever_live_p (31) || current_function_profile)
8347 return 0;
8348
8349 /* In mips16 mode, a function that returns a floating point value
8350 needs to arrange to copy the return value into the floating point
8351 registers. */
8352 if (mips16_cfun_returns_in_fpr_p ())
8353 return 0;
8354
8355 if (cfun->machine->frame.initialized)
8356 return cfun->machine->frame.total_size == 0;
8357
8358 return compute_frame_size (get_frame_size ()) == 0;
8359 }
8360 \f
8361 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
8362 in order to avoid duplicating too much logic from elsewhere. */
8363
8364 static void
8365 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8366 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8367 tree function)
8368 {
8369 rtx this, temp1, temp2, insn, fnaddr;
8370
8371 /* Pretend to be a post-reload pass while generating rtl. */
8372 reload_completed = 1;
8373
8374 /* Mark the end of the (empty) prologue. */
8375 emit_note (NOTE_INSN_PROLOGUE_END);
8376
8377 /* Pick a global pointer. Use a call-clobbered register if
8378 TARGET_CALL_SAVED_GP, so that we can use a sibcall. */
8379 if (TARGET_USE_GOT)
8380 {
8381 cfun->machine->global_pointer =
8382 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
8383
8384 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8385 }
8386
8387 /* Set up the global pointer for n32 or n64 abicalls. If
8388 LOADGP_ABSOLUTE then the thunk does not use the gp and there is
8389 no need to load it.*/
8390 if (mips_current_loadgp_style () != LOADGP_ABSOLUTE
8391 || !targetm.binds_local_p (function))
8392 mips_emit_loadgp ();
8393
8394 /* We need two temporary registers in some cases. */
8395 temp1 = gen_rtx_REG (Pmode, 2);
8396 temp2 = gen_rtx_REG (Pmode, 3);
8397
8398 /* Find out which register contains the "this" pointer. */
8399 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8400 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
8401 else
8402 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
8403
8404 /* Add DELTA to THIS. */
8405 if (delta != 0)
8406 {
8407 rtx offset = GEN_INT (delta);
8408 if (!SMALL_OPERAND (delta))
8409 {
8410 mips_emit_move (temp1, offset);
8411 offset = temp1;
8412 }
8413 emit_insn (gen_add3_insn (this, this, offset));
8414 }
8415
8416 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
8417 if (vcall_offset != 0)
8418 {
8419 rtx addr;
8420
8421 /* Set TEMP1 to *THIS. */
8422 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
8423
8424 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
8425 addr = mips_add_offset (temp2, temp1, vcall_offset);
8426
8427 /* Load the offset and add it to THIS. */
8428 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
8429 emit_insn (gen_add3_insn (this, this, temp1));
8430 }
8431
8432 /* Jump to the target function. Use a sibcall if direct jumps are
8433 allowed, otherwise load the address into a register first. */
8434 fnaddr = XEXP (DECL_RTL (function), 0);
8435 if (TARGET_MIPS16 || TARGET_USE_GOT || SYMBOL_REF_LONG_CALL_P (fnaddr)
8436 || SYMBOL_REF_MIPS16_FUNC_P (fnaddr))
8437 {
8438 /* This is messy. gas treats "la $25,foo" as part of a call
8439 sequence and may allow a global "foo" to be lazily bound.
8440 The general move patterns therefore reject this combination.
8441
8442 In this context, lazy binding would actually be OK
8443 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
8444 TARGET_CALL_SAVED_GP; see mips_load_call_address.
8445 We must therefore load the address via a temporary
8446 register if mips_dangerous_for_la25_p.
8447
8448 If we jump to the temporary register rather than $25, the assembler
8449 can use the move insn to fill the jump's delay slot. */
8450 if (TARGET_USE_PIC_FN_ADDR_REG
8451 && !mips_dangerous_for_la25_p (fnaddr))
8452 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8453 mips_load_call_address (temp1, fnaddr, true);
8454
8455 if (TARGET_USE_PIC_FN_ADDR_REG
8456 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
8457 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
8458 emit_jump_insn (gen_indirect_jump (temp1));
8459 }
8460 else
8461 {
8462 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
8463 SIBLING_CALL_P (insn) = 1;
8464 }
8465
8466 /* Run just enough of rest_of_compilation. This sequence was
8467 "borrowed" from alpha.c. */
8468 insn = get_insns ();
8469 insn_locators_alloc ();
8470 split_all_insns_noflow ();
8471 mips16_lay_out_constants ();
8472 shorten_branches (insn);
8473 final_start_function (insn, file, 1);
8474 final (insn, file, 1);
8475 final_end_function ();
8476
8477 /* Clean up the vars set above. Note that final_end_function resets
8478 the global pointer for us. */
8479 reload_completed = 0;
8480 }
8481 \f
8482 /* Implement TARGET_SELECT_RTX_SECTION. */
8483
8484 static section *
8485 mips_select_rtx_section (enum machine_mode mode, rtx x,
8486 unsigned HOST_WIDE_INT align)
8487 {
8488 /* ??? Consider using mergeable small data sections. */
8489 if (mips_rtx_constant_in_small_data_p (mode))
8490 return get_named_section (NULL, ".sdata", 0);
8491
8492 return default_elf_select_rtx_section (mode, x, align);
8493 }
8494
8495 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
8496
8497 The complication here is that, with the combination TARGET_ABICALLS
8498 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
8499 therefore not be included in the read-only part of a DSO. Handle such
8500 cases by selecting a normal data section instead of a read-only one.
8501 The logic apes that in default_function_rodata_section. */
8502
8503 static section *
8504 mips_function_rodata_section (tree decl)
8505 {
8506 if (!TARGET_ABICALLS || TARGET_GPWORD)
8507 return default_function_rodata_section (decl);
8508
8509 if (decl && DECL_SECTION_NAME (decl))
8510 {
8511 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8512 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
8513 {
8514 char *rname = ASTRDUP (name);
8515 rname[14] = 'd';
8516 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
8517 }
8518 else if (flag_function_sections && flag_data_sections
8519 && strncmp (name, ".text.", 6) == 0)
8520 {
8521 char *rname = ASTRDUP (name);
8522 memcpy (rname + 1, "data", 4);
8523 return get_section (rname, SECTION_WRITE, decl);
8524 }
8525 }
8526 return data_section;
8527 }
8528
8529 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
8530 locally-defined objects go in a small data section. It also controls
8531 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
8532 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
8533
8534 static bool
8535 mips_in_small_data_p (const_tree decl)
8536 {
8537 HOST_WIDE_INT size;
8538
8539 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
8540 return false;
8541
8542 /* We don't yet generate small-data references for -mabicalls or
8543 VxWorks RTP code. See the related -G handling in override_options. */
8544 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
8545 return false;
8546
8547 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
8548 {
8549 const char *name;
8550
8551 /* Reject anything that isn't in a known small-data section. */
8552 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8553 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
8554 return false;
8555
8556 /* If a symbol is defined externally, the assembler will use the
8557 usual -G rules when deciding how to implement macros. */
8558 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
8559 return true;
8560 }
8561 else if (TARGET_EMBEDDED_DATA)
8562 {
8563 /* Don't put constants into the small data section: we want them
8564 to be in ROM rather than RAM. */
8565 if (TREE_CODE (decl) != VAR_DECL)
8566 return false;
8567
8568 if (TREE_READONLY (decl)
8569 && !TREE_SIDE_EFFECTS (decl)
8570 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
8571 return false;
8572 }
8573
8574 /* Enforce -mlocal-sdata. */
8575 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
8576 return false;
8577
8578 /* Enforce -mextern-sdata. */
8579 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
8580 {
8581 if (DECL_EXTERNAL (decl))
8582 return false;
8583 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
8584 return false;
8585 }
8586
8587 size = int_size_in_bytes (TREE_TYPE (decl));
8588 return (size > 0 && size <= mips_section_threshold);
8589 }
8590
8591 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
8592 anchors for small data: the GP register acts as an anchor in that
8593 case. We also don't want to use them for PC-relative accesses,
8594 where the PC acts as an anchor. */
8595
8596 static bool
8597 mips_use_anchors_for_symbol_p (const_rtx symbol)
8598 {
8599 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
8600 {
8601 case SYMBOL_PC_RELATIVE:
8602 case SYMBOL_GP_RELATIVE:
8603 return false;
8604
8605 default:
8606 return true;
8607 }
8608 }
8609 \f
8610 /* See whether VALTYPE is a record whose fields should be returned in
8611 floating-point registers. If so, return the number of fields and
8612 list them in FIELDS (which should have two elements). Return 0
8613 otherwise.
8614
8615 For n32 & n64, a structure with one or two fields is returned in
8616 floating-point registers as long as every field has a floating-point
8617 type. */
8618
8619 static int
8620 mips_fpr_return_fields (const_tree valtype, tree *fields)
8621 {
8622 tree field;
8623 int i;
8624
8625 if (!TARGET_NEWABI)
8626 return 0;
8627
8628 if (TREE_CODE (valtype) != RECORD_TYPE)
8629 return 0;
8630
8631 i = 0;
8632 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
8633 {
8634 if (TREE_CODE (field) != FIELD_DECL)
8635 continue;
8636
8637 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
8638 return 0;
8639
8640 if (i == 2)
8641 return 0;
8642
8643 fields[i++] = field;
8644 }
8645 return i;
8646 }
8647
8648
8649 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
8650 a value in the most significant part of $2/$3 if:
8651
8652 - the target is big-endian;
8653
8654 - the value has a structure or union type (we generalize this to
8655 cover aggregates from other languages too); and
8656
8657 - the structure is not returned in floating-point registers. */
8658
8659 static bool
8660 mips_return_in_msb (const_tree valtype)
8661 {
8662 tree fields[2];
8663
8664 return (TARGET_NEWABI
8665 && TARGET_BIG_ENDIAN
8666 && AGGREGATE_TYPE_P (valtype)
8667 && mips_fpr_return_fields (valtype, fields) == 0);
8668 }
8669
8670
8671 /* Return a composite value in a pair of floating-point registers.
8672 MODE1 and OFFSET1 are the mode and byte offset for the first value,
8673 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
8674 complete value.
8675
8676 For n32 & n64, $f0 always holds the first value and $f2 the second.
8677 Otherwise the values are packed together as closely as possible. */
8678
8679 static rtx
8680 mips_return_fpr_pair (enum machine_mode mode,
8681 enum machine_mode mode1, HOST_WIDE_INT offset1,
8682 enum machine_mode mode2, HOST_WIDE_INT offset2)
8683 {
8684 int inc;
8685
8686 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
8687 return gen_rtx_PARALLEL
8688 (mode,
8689 gen_rtvec (2,
8690 gen_rtx_EXPR_LIST (VOIDmode,
8691 gen_rtx_REG (mode1, FP_RETURN),
8692 GEN_INT (offset1)),
8693 gen_rtx_EXPR_LIST (VOIDmode,
8694 gen_rtx_REG (mode2, FP_RETURN + inc),
8695 GEN_INT (offset2))));
8696
8697 }
8698
8699
8700 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
8701 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
8702 VALTYPE is null and MODE is the mode of the return value. */
8703
8704 rtx
8705 mips_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
8706 enum machine_mode mode)
8707 {
8708 if (valtype)
8709 {
8710 tree fields[2];
8711 int unsignedp;
8712
8713 mode = TYPE_MODE (valtype);
8714 unsignedp = TYPE_UNSIGNED (valtype);
8715
8716 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
8717 true, we must promote the mode just as PROMOTE_MODE does. */
8718 mode = promote_mode (valtype, mode, &unsignedp, 1);
8719
8720 /* Handle structures whose fields are returned in $f0/$f2. */
8721 switch (mips_fpr_return_fields (valtype, fields))
8722 {
8723 case 1:
8724 return gen_rtx_REG (mode, FP_RETURN);
8725
8726 case 2:
8727 return mips_return_fpr_pair (mode,
8728 TYPE_MODE (TREE_TYPE (fields[0])),
8729 int_byte_position (fields[0]),
8730 TYPE_MODE (TREE_TYPE (fields[1])),
8731 int_byte_position (fields[1]));
8732 }
8733
8734 /* If a value is passed in the most significant part of a register, see
8735 whether we have to round the mode up to a whole number of words. */
8736 if (mips_return_in_msb (valtype))
8737 {
8738 HOST_WIDE_INT size = int_size_in_bytes (valtype);
8739 if (size % UNITS_PER_WORD != 0)
8740 {
8741 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
8742 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
8743 }
8744 }
8745
8746 /* For EABI, the class of return register depends entirely on MODE.
8747 For example, "struct { some_type x; }" and "union { some_type x; }"
8748 are returned in the same way as a bare "some_type" would be.
8749 Other ABIs only use FPRs for scalar, complex or vector types. */
8750 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
8751 return gen_rtx_REG (mode, GP_RETURN);
8752 }
8753
8754 if (!TARGET_MIPS16)
8755 {
8756 /* Handle long doubles for n32 & n64. */
8757 if (mode == TFmode)
8758 return mips_return_fpr_pair (mode,
8759 DImode, 0,
8760 DImode, GET_MODE_SIZE (mode) / 2);
8761
8762 if (mips_return_mode_in_fpr_p (mode))
8763 {
8764 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
8765 return mips_return_fpr_pair (mode,
8766 GET_MODE_INNER (mode), 0,
8767 GET_MODE_INNER (mode),
8768 GET_MODE_SIZE (mode) / 2);
8769 else
8770 return gen_rtx_REG (mode, FP_RETURN);
8771 }
8772 }
8773
8774 return gen_rtx_REG (mode, GP_RETURN);
8775 }
8776
8777 /* Return nonzero when an argument must be passed by reference. */
8778
8779 static bool
8780 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
8781 enum machine_mode mode, const_tree type,
8782 bool named ATTRIBUTE_UNUSED)
8783 {
8784 if (mips_abi == ABI_EABI)
8785 {
8786 int size;
8787
8788 /* ??? How should SCmode be handled? */
8789 if (mode == DImode || mode == DFmode)
8790 return 0;
8791
8792 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
8793 return size == -1 || size > UNITS_PER_WORD;
8794 }
8795 else
8796 {
8797 /* If we have a variable-sized parameter, we have no choice. */
8798 return targetm.calls.must_pass_in_stack (mode, type);
8799 }
8800 }
8801
8802 static bool
8803 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
8804 enum machine_mode mode ATTRIBUTE_UNUSED,
8805 const_tree type ATTRIBUTE_UNUSED, bool named)
8806 {
8807 return mips_abi == ABI_EABI && named;
8808 }
8809
8810 /* Return true if registers of class CLASS cannot change from mode FROM
8811 to mode TO. */
8812
8813 bool
8814 mips_cannot_change_mode_class (enum machine_mode from,
8815 enum machine_mode to, enum reg_class class)
8816 {
8817 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
8818 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
8819 {
8820 if (TARGET_BIG_ENDIAN)
8821 {
8822 /* When a multi-word value is stored in paired floating-point
8823 registers, the first register always holds the low word.
8824 We therefore can't allow FPRs to change between single-word
8825 and multi-word modes. */
8826 if (MAX_FPRS_PER_FMT > 1 && reg_classes_intersect_p (FP_REGS, class))
8827 return true;
8828 }
8829 }
8830
8831 /* gcc assumes that each word of a multiword register can be accessed
8832 individually using SUBREGs. This is not true for floating-point
8833 registers if they are bigger than a word. */
8834 if (UNITS_PER_FPREG > UNITS_PER_WORD
8835 && GET_MODE_SIZE (from) > UNITS_PER_WORD
8836 && GET_MODE_SIZE (to) < UNITS_PER_FPREG
8837 && reg_classes_intersect_p (FP_REGS, class))
8838 return true;
8839
8840 /* Loading a 32-bit value into a 64-bit floating-point register
8841 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
8842 We can't allow 64-bit float registers to change from SImode to
8843 to a wider mode. */
8844 if (TARGET_64BIT
8845 && TARGET_FLOAT64
8846 && from == SImode
8847 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
8848 && reg_classes_intersect_p (FP_REGS, class))
8849 return true;
8850
8851 return false;
8852 }
8853
8854 /* Return true if X should not be moved directly into register $25.
8855 We need this because many versions of GAS will treat "la $25,foo" as
8856 part of a call sequence and so allow a global "foo" to be lazily bound. */
8857
8858 bool
8859 mips_dangerous_for_la25_p (rtx x)
8860 {
8861 return (!TARGET_EXPLICIT_RELOCS
8862 && TARGET_USE_GOT
8863 && GET_CODE (x) == SYMBOL_REF
8864 && mips_global_symbol_p (x));
8865 }
8866
8867 /* Implement PREFERRED_RELOAD_CLASS. */
8868
8869 enum reg_class
8870 mips_preferred_reload_class (rtx x, enum reg_class class)
8871 {
8872 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
8873 return LEA_REGS;
8874
8875 if (TARGET_HARD_FLOAT
8876 && FLOAT_MODE_P (GET_MODE (x))
8877 && reg_class_subset_p (FP_REGS, class))
8878 return FP_REGS;
8879
8880 if (reg_class_subset_p (GR_REGS, class))
8881 class = GR_REGS;
8882
8883 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
8884 class = M16_REGS;
8885
8886 return class;
8887 }
8888
8889 /* This function returns the register class required for a secondary
8890 register when copying between one of the registers in CLASS, and X,
8891 using MODE. If IN_P is nonzero, the copy is going from X to the
8892 register, otherwise the register is the source. A return value of
8893 NO_REGS means that no secondary register is required. */
8894
8895 enum reg_class
8896 mips_secondary_reload_class (enum reg_class class,
8897 enum machine_mode mode, rtx x, int in_p)
8898 {
8899 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
8900 int regno = -1;
8901 int gp_reg_p;
8902
8903 if (REG_P (x)|| GET_CODE (x) == SUBREG)
8904 regno = true_regnum (x);
8905
8906 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
8907
8908 if (mips_dangerous_for_la25_p (x))
8909 {
8910 gr_regs = LEA_REGS;
8911 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
8912 return gr_regs;
8913 }
8914
8915 /* Copying from HI or LO to anywhere other than a general register
8916 requires a general register.
8917 This rule applies to both the original HI/LO pair and the new
8918 DSP accumulators. */
8919 if (reg_class_subset_p (class, ACC_REGS))
8920 {
8921 if (TARGET_MIPS16 && in_p)
8922 {
8923 /* We can't really copy to HI or LO at all in mips16 mode. */
8924 return M16_REGS;
8925 }
8926 return gp_reg_p ? NO_REGS : gr_regs;
8927 }
8928 if (ACC_REG_P (regno))
8929 {
8930 if (TARGET_MIPS16 && ! in_p)
8931 {
8932 /* We can't really copy to HI or LO at all in mips16 mode. */
8933 return M16_REGS;
8934 }
8935 return class == gr_regs ? NO_REGS : gr_regs;
8936 }
8937
8938 /* We can only copy a value to a condition code register from a
8939 floating point register, and even then we require a scratch
8940 floating point register. We can only copy a value out of a
8941 condition code register into a general register. */
8942 if (class == ST_REGS)
8943 {
8944 if (in_p)
8945 return FP_REGS;
8946 return gp_reg_p ? NO_REGS : gr_regs;
8947 }
8948 if (ST_REG_P (regno))
8949 {
8950 if (! in_p)
8951 return FP_REGS;
8952 return class == gr_regs ? NO_REGS : gr_regs;
8953 }
8954
8955 if (class == FP_REGS)
8956 {
8957 if (MEM_P (x))
8958 {
8959 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
8960 return NO_REGS;
8961 }
8962 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
8963 {
8964 /* We can use the l.s and l.d macros to load floating-point
8965 constants. ??? For l.s, we could probably get better
8966 code by returning GR_REGS here. */
8967 return NO_REGS;
8968 }
8969 else if (gp_reg_p || x == CONST0_RTX (mode))
8970 {
8971 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
8972 return NO_REGS;
8973 }
8974 else if (FP_REG_P (regno))
8975 {
8976 /* In this case we can use mov.s or mov.d. */
8977 return NO_REGS;
8978 }
8979 else
8980 {
8981 /* Otherwise, we need to reload through an integer register. */
8982 return gr_regs;
8983 }
8984 }
8985
8986 /* In mips16 mode, going between memory and anything but M16_REGS
8987 requires an M16_REG. */
8988 if (TARGET_MIPS16)
8989 {
8990 if (class != M16_REGS && class != M16_NA_REGS)
8991 {
8992 if (gp_reg_p)
8993 return NO_REGS;
8994 return M16_REGS;
8995 }
8996 if (! gp_reg_p)
8997 {
8998 if (class == M16_REGS || class == M16_NA_REGS)
8999 return NO_REGS;
9000 return M16_REGS;
9001 }
9002 }
9003
9004 return NO_REGS;
9005 }
9006
9007 /* Implement CLASS_MAX_NREGS.
9008
9009 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
9010
9011 - ST_REGS are always hold CCmode values, and CCmode values are
9012 considered to be 4 bytes wide.
9013
9014 All other register classes are covered by UNITS_PER_WORD. Note that
9015 this is true even for unions of integer and float registers when the
9016 latter are smaller than the former. The only supported combination
9017 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
9018 words but 32-bit float registers. A word-based calculation is correct
9019 in that case since -msingle-float disallows multi-FPR values. */
9020
9021 int
9022 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
9023 enum machine_mode mode)
9024 {
9025 if (class == ST_REGS)
9026 return (GET_MODE_SIZE (mode) + 3) / 4;
9027 else if (class == FP_REGS)
9028 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
9029 else
9030 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
9031 }
9032
9033 static bool
9034 mips_valid_pointer_mode (enum machine_mode mode)
9035 {
9036 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9037 }
9038
9039 /* Target hook for vector_mode_supported_p. */
9040
9041 static bool
9042 mips_vector_mode_supported_p (enum machine_mode mode)
9043 {
9044 switch (mode)
9045 {
9046 case V2SFmode:
9047 return TARGET_PAIRED_SINGLE_FLOAT;
9048
9049 case V2HImode:
9050 case V4QImode:
9051 return TARGET_DSP;
9052
9053 default:
9054 return false;
9055 }
9056 }
9057 \f
9058 /* If we can access small data directly (using gp-relative relocation
9059 operators) return the small data pointer, otherwise return null.
9060
9061 For each mips16 function which refers to GP relative symbols, we
9062 use a pseudo register, initialized at the start of the function, to
9063 hold the $gp value. */
9064
9065 static rtx
9066 mips16_gp_pseudo_reg (void)
9067 {
9068 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
9069 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
9070
9071 /* Don't initialize the pseudo register if we are being called from
9072 the tree optimizers' cost-calculation routines. */
9073 if (!cfun->machine->initialized_mips16_gp_pseudo_p
9074 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
9075 {
9076 rtx insn, scan;
9077
9078 /* We want to initialize this to a value which gcc will believe
9079 is constant. */
9080 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
9081
9082 push_topmost_sequence ();
9083 /* We need to emit the initialization after the FUNCTION_BEG
9084 note, so that it will be integrated. */
9085 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
9086 if (NOTE_P (scan)
9087 && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
9088 break;
9089 if (scan == NULL_RTX)
9090 scan = get_insns ();
9091 insn = emit_insn_after (insn, scan);
9092 pop_topmost_sequence ();
9093
9094 cfun->machine->initialized_mips16_gp_pseudo_p = true;
9095 }
9096
9097 return cfun->machine->mips16_gp_pseudo_rtx;
9098 }
9099
9100 /* Write out code to move floating point arguments in or out of
9101 general registers. Output the instructions to FILE. FP_CODE is
9102 the code describing which arguments are present (see the comment at
9103 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
9104 we are copying from the floating point registers. */
9105
9106 static void
9107 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
9108 {
9109 const char *s;
9110 int gparg, fparg;
9111 unsigned int f;
9112 CUMULATIVE_ARGS cum;
9113
9114 /* This code only works for the original 32-bit ABI and the O64 ABI. */
9115 gcc_assert (TARGET_OLDABI);
9116
9117 if (from_fp_p)
9118 s = "mfc1";
9119 else
9120 s = "mtc1";
9121
9122 init_cumulative_args (&cum, NULL, NULL);
9123
9124 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9125 {
9126 enum machine_mode mode;
9127 struct mips_arg_info info;
9128
9129 if ((f & 3) == 1)
9130 mode = SFmode;
9131 else if ((f & 3) == 2)
9132 mode = DFmode;
9133 else
9134 gcc_unreachable ();
9135
9136 mips_arg_info (&cum, mode, NULL, true, &info);
9137 gparg = mips_arg_regno (&info, false);
9138 fparg = mips_arg_regno (&info, true);
9139
9140 if (mode == SFmode)
9141 fprintf (file, "\t%s\t%s,%s\n", s,
9142 reg_names[gparg], reg_names[fparg]);
9143 else if (TARGET_64BIT)
9144 fprintf (file, "\td%s\t%s,%s\n", s,
9145 reg_names[gparg], reg_names[fparg]);
9146 else if (ISA_HAS_MXHC1)
9147 /* -mips32r2 -mfp64 */
9148 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
9149 s,
9150 reg_names[gparg + (WORDS_BIG_ENDIAN ? 1 : 0)],
9151 reg_names[fparg],
9152 from_fp_p ? "mfhc1" : "mthc1",
9153 reg_names[gparg + (WORDS_BIG_ENDIAN ? 0 : 1)],
9154 reg_names[fparg]);
9155 else if (TARGET_BIG_ENDIAN)
9156 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
9157 reg_names[gparg], reg_names[fparg + 1], s,
9158 reg_names[gparg + 1], reg_names[fparg]);
9159 else
9160 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
9161 reg_names[gparg], reg_names[fparg], s,
9162 reg_names[gparg + 1], reg_names[fparg + 1]);
9163
9164 function_arg_advance (&cum, mode, NULL, true);
9165 }
9166 }
9167
9168 /* Build a mips16 function stub. This is used for functions which
9169 take arguments in the floating point registers. It is 32-bit code
9170 that moves the floating point args into the general registers, and
9171 then jumps to the 16-bit code. */
9172
9173 static void
9174 build_mips16_function_stub (FILE *file)
9175 {
9176 const char *fnname;
9177 char *secname, *stubname;
9178 tree stubid, stubdecl;
9179 int need_comma;
9180 unsigned int f;
9181
9182 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
9183 secname = (char *) alloca (strlen (fnname) + 20);
9184 sprintf (secname, ".mips16.fn.%s", fnname);
9185 stubname = (char *) alloca (strlen (fnname) + 20);
9186 sprintf (stubname, "__fn_stub_%s", fnname);
9187 stubid = get_identifier (stubname);
9188 stubdecl = build_decl (FUNCTION_DECL, stubid,
9189 build_function_type (void_type_node, NULL_TREE));
9190 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9191 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9192
9193 fprintf (file, "\t# Stub function for %s (", current_function_name ());
9194 need_comma = 0;
9195 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
9196 {
9197 fprintf (file, "%s%s",
9198 need_comma ? ", " : "",
9199 (f & 3) == 1 ? "float" : "double");
9200 need_comma = 1;
9201 }
9202 fprintf (file, ")\n");
9203
9204 fprintf (file, "\t.set\tnomips16\n");
9205 switch_to_section (function_section (stubdecl));
9206 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
9207
9208 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
9209 within a .ent, and we cannot emit another .ent. */
9210 if (!FUNCTION_NAME_ALREADY_DECLARED)
9211 {
9212 fputs ("\t.ent\t", file);
9213 assemble_name (file, stubname);
9214 fputs ("\n", file);
9215 }
9216
9217 assemble_name (file, stubname);
9218 fputs (":\n", file);
9219
9220 /* We don't want the assembler to insert any nops here. */
9221 fprintf (file, "\t.set\tnoreorder\n");
9222
9223 mips16_fp_args (file, current_function_args_info.fp_code, 1);
9224
9225 fprintf (asm_out_file, "\t.set\tnoat\n");
9226 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
9227 assemble_name (file, fnname);
9228 fprintf (file, "\n");
9229 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9230 fprintf (asm_out_file, "\t.set\tat\n");
9231
9232 /* Unfortunately, we can't fill the jump delay slot. We can't fill
9233 with one of the mfc1 instructions, because the result is not
9234 available for one instruction, so if the very first instruction
9235 in the function refers to the register, it will see the wrong
9236 value. */
9237 fprintf (file, "\tnop\n");
9238
9239 fprintf (file, "\t.set\treorder\n");
9240
9241 if (!FUNCTION_NAME_ALREADY_DECLARED)
9242 {
9243 fputs ("\t.end\t", file);
9244 assemble_name (file, stubname);
9245 fputs ("\n", file);
9246 }
9247
9248 switch_to_section (function_section (current_function_decl));
9249 }
9250
9251 /* We keep a list of functions for which we have already built stubs
9252 in build_mips16_call_stub. */
9253
9254 struct mips16_stub
9255 {
9256 struct mips16_stub *next;
9257 char *name;
9258 int fpret;
9259 };
9260
9261 static struct mips16_stub *mips16_stubs;
9262
9263 /* Emit code to return a double value from a mips16 stub. GPREG is the
9264 first GP reg to use, FPREG is the first FP reg to use. */
9265
9266 static void
9267 mips16_fpret_double (int gpreg, int fpreg)
9268 {
9269 if (TARGET_64BIT)
9270 fprintf (asm_out_file, "\tdmfc1\t%s,%s\n",
9271 reg_names[gpreg], reg_names[fpreg]);
9272 else if (TARGET_FLOAT64)
9273 {
9274 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9275 reg_names[gpreg + WORDS_BIG_ENDIAN],
9276 reg_names[fpreg]);
9277 fprintf (asm_out_file, "\tmfhc1\t%s,%s\n",
9278 reg_names[gpreg + !WORDS_BIG_ENDIAN],
9279 reg_names[fpreg]);
9280 }
9281 else
9282 {
9283 if (TARGET_BIG_ENDIAN)
9284 {
9285 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9286 reg_names[gpreg + 0],
9287 reg_names[fpreg + 1]);
9288 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9289 reg_names[gpreg + 1],
9290 reg_names[fpreg + 0]);
9291 }
9292 else
9293 {
9294 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9295 reg_names[gpreg + 0],
9296 reg_names[fpreg + 0]);
9297 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9298 reg_names[gpreg + 1],
9299 reg_names[fpreg + 1]);
9300 }
9301 }
9302 }
9303
9304 /* Build a call stub for a mips16 call. A stub is needed if we are
9305 passing any floating point values which should go into the floating
9306 point registers. If we are, and the call turns out to be to a
9307 32-bit function, the stub will be used to move the values into the
9308 floating point registers before calling the 32-bit function. The
9309 linker will magically adjust the function call to either the 16-bit
9310 function or the 32-bit stub, depending upon where the function call
9311 is actually defined.
9312
9313 Similarly, we need a stub if the return value might come back in a
9314 floating point register.
9315
9316 RETVAL is the location of the return value, or null if this is
9317 a call rather than a call_value. FN is the address of the
9318 function and ARG_SIZE is the size of the arguments. FP_CODE
9319 is the code built by function_arg. This function returns a nonzero
9320 value if it builds the call instruction itself. */
9321
9322 int
9323 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
9324 {
9325 int fpret = 0;
9326 const char *fnname;
9327 char *secname, *stubname;
9328 struct mips16_stub *l;
9329 tree stubid, stubdecl;
9330 int need_comma;
9331 unsigned int f;
9332
9333 /* We don't need to do anything if we aren't in mips16 mode, or if
9334 we were invoked with the -msoft-float option. */
9335 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
9336 return 0;
9337
9338 /* Figure out whether the value might come back in a floating point
9339 register. */
9340 if (retval)
9341 fpret = mips_return_mode_in_fpr_p (GET_MODE (retval));
9342
9343 /* We don't need to do anything if there were no floating point
9344 arguments and the value will not be returned in a floating point
9345 register. */
9346 if (fp_code == 0 && ! fpret)
9347 return 0;
9348
9349 /* We don't need to do anything if this is a call to a special
9350 mips16 support function. */
9351 if (GET_CODE (fn) == SYMBOL_REF
9352 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
9353 return 0;
9354
9355 /* This code will only work for o32 and o64 abis. The other ABI's
9356 require more sophisticated support. */
9357 gcc_assert (TARGET_OLDABI);
9358
9359 /* If we're calling via a function pointer, then we must always call
9360 via a stub. There are magic stubs provided in libgcc.a for each
9361 of the required cases. Each of them expects the function address
9362 to arrive in register $2. */
9363
9364 if (GET_CODE (fn) != SYMBOL_REF)
9365 {
9366 char buf[30];
9367 tree id;
9368 rtx stub_fn, insn;
9369
9370 /* ??? If this code is modified to support other ABI's, we need
9371 to handle PARALLEL return values here. */
9372
9373 if (fpret)
9374 sprintf (buf, "__mips16_call_stub_%s_%d",
9375 mips16_call_stub_mode_suffix (GET_MODE (retval)),
9376 fp_code);
9377 else
9378 sprintf (buf, "__mips16_call_stub_%d",
9379 fp_code);
9380
9381 id = get_identifier (buf);
9382 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
9383
9384 mips_emit_move (gen_rtx_REG (Pmode, 2), fn);
9385
9386 if (retval == NULL_RTX)
9387 insn = gen_call_internal (stub_fn, arg_size);
9388 else
9389 insn = gen_call_value_internal (retval, stub_fn, arg_size);
9390 insn = emit_call_insn (insn);
9391
9392 /* Put the register usage information on the CALL. */
9393 CALL_INSN_FUNCTION_USAGE (insn) =
9394 gen_rtx_EXPR_LIST (VOIDmode,
9395 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
9396 CALL_INSN_FUNCTION_USAGE (insn));
9397
9398 /* If we are handling a floating point return value, we need to
9399 save $18 in the function prologue. Putting a note on the
9400 call will mean that df_regs_ever_live_p ($18) will be true if the
9401 call is not eliminated, and we can check that in the prologue
9402 code. */
9403 if (fpret)
9404 CALL_INSN_FUNCTION_USAGE (insn) =
9405 gen_rtx_EXPR_LIST (VOIDmode,
9406 gen_rtx_USE (VOIDmode,
9407 gen_rtx_REG (word_mode, 18)),
9408 CALL_INSN_FUNCTION_USAGE (insn));
9409
9410 /* Return 1 to tell the caller that we've generated the call
9411 insn. */
9412 return 1;
9413 }
9414
9415 /* We know the function we are going to call. If we have already
9416 built a stub, we don't need to do anything further. */
9417
9418 fnname = XSTR (fn, 0);
9419 for (l = mips16_stubs; l != NULL; l = l->next)
9420 if (strcmp (l->name, fnname) == 0)
9421 break;
9422
9423 if (l == NULL)
9424 {
9425 /* Build a special purpose stub. When the linker sees a
9426 function call in mips16 code, it will check where the target
9427 is defined. If the target is a 32-bit call, the linker will
9428 search for the section defined here. It can tell which
9429 symbol this section is associated with by looking at the
9430 relocation information (the name is unreliable, since this
9431 might be a static function). If such a section is found, the
9432 linker will redirect the call to the start of the magic
9433 section.
9434
9435 If the function does not return a floating point value, the
9436 special stub section is named
9437 .mips16.call.FNNAME
9438
9439 If the function does return a floating point value, the stub
9440 section is named
9441 .mips16.call.fp.FNNAME
9442 */
9443
9444 secname = (char *) alloca (strlen (fnname) + 40);
9445 sprintf (secname, ".mips16.call.%s%s",
9446 fpret ? "fp." : "",
9447 fnname);
9448 stubname = (char *) alloca (strlen (fnname) + 20);
9449 sprintf (stubname, "__call_stub_%s%s",
9450 fpret ? "fp_" : "",
9451 fnname);
9452 stubid = get_identifier (stubname);
9453 stubdecl = build_decl (FUNCTION_DECL, stubid,
9454 build_function_type (void_type_node, NULL_TREE));
9455 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9456 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9457
9458 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
9459 (fpret
9460 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
9461 : ""),
9462 fnname);
9463 need_comma = 0;
9464 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9465 {
9466 fprintf (asm_out_file, "%s%s",
9467 need_comma ? ", " : "",
9468 (f & 3) == 1 ? "float" : "double");
9469 need_comma = 1;
9470 }
9471 fprintf (asm_out_file, ")\n");
9472
9473 fprintf (asm_out_file, "\t.set\tnomips16\n");
9474 assemble_start_function (stubdecl, stubname);
9475
9476 if (!FUNCTION_NAME_ALREADY_DECLARED)
9477 {
9478 fputs ("\t.ent\t", asm_out_file);
9479 assemble_name (asm_out_file, stubname);
9480 fputs ("\n", asm_out_file);
9481
9482 assemble_name (asm_out_file, stubname);
9483 fputs (":\n", asm_out_file);
9484 }
9485
9486 /* We build the stub code by hand. That's the only way we can
9487 do it, since we can't generate 32-bit code during a 16-bit
9488 compilation. */
9489
9490 /* We don't want the assembler to insert any nops here. */
9491 fprintf (asm_out_file, "\t.set\tnoreorder\n");
9492
9493 mips16_fp_args (asm_out_file, fp_code, 0);
9494
9495 if (! fpret)
9496 {
9497 fprintf (asm_out_file, "\t.set\tnoat\n");
9498 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
9499 fnname);
9500 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9501 fprintf (asm_out_file, "\t.set\tat\n");
9502 /* Unfortunately, we can't fill the jump delay slot. We
9503 can't fill with one of the mtc1 instructions, because the
9504 result is not available for one instruction, so if the
9505 very first instruction in the function refers to the
9506 register, it will see the wrong value. */
9507 fprintf (asm_out_file, "\tnop\n");
9508 }
9509 else
9510 {
9511 fprintf (asm_out_file, "\tmove\t%s,%s\n",
9512 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
9513 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
9514 /* As above, we can't fill the delay slot. */
9515 fprintf (asm_out_file, "\tnop\n");
9516 if (GET_MODE (retval) == SFmode)
9517 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9518 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
9519 else if (GET_MODE (retval) == SCmode)
9520 {
9521 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9522 reg_names[GP_REG_FIRST + 2],
9523 reg_names[FP_REG_FIRST + 0]);
9524 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9525 reg_names[GP_REG_FIRST + 3],
9526 reg_names[FP_REG_FIRST + MAX_FPRS_PER_FMT]);
9527 }
9528 else if (GET_MODE (retval) == DFmode
9529 || GET_MODE (retval) == V2SFmode)
9530 {
9531 mips16_fpret_double (GP_REG_FIRST + 2, FP_REG_FIRST + 0);
9532 }
9533 else if (GET_MODE (retval) == DCmode)
9534 {
9535 mips16_fpret_double (GP_REG_FIRST + 2,
9536 FP_REG_FIRST + 0);
9537 mips16_fpret_double (GP_REG_FIRST + 4,
9538 FP_REG_FIRST + MAX_FPRS_PER_FMT);
9539 }
9540 else
9541 {
9542 if (TARGET_BIG_ENDIAN)
9543 {
9544 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9545 reg_names[GP_REG_FIRST + 2],
9546 reg_names[FP_REG_FIRST + 1]);
9547 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9548 reg_names[GP_REG_FIRST + 3],
9549 reg_names[FP_REG_FIRST + 0]);
9550 }
9551 else
9552 {
9553 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9554 reg_names[GP_REG_FIRST + 2],
9555 reg_names[FP_REG_FIRST + 0]);
9556 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9557 reg_names[GP_REG_FIRST + 3],
9558 reg_names[FP_REG_FIRST + 1]);
9559 }
9560 }
9561 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
9562 /* As above, we can't fill the delay slot. */
9563 fprintf (asm_out_file, "\tnop\n");
9564 }
9565
9566 fprintf (asm_out_file, "\t.set\treorder\n");
9567
9568 #ifdef ASM_DECLARE_FUNCTION_SIZE
9569 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
9570 #endif
9571
9572 if (!FUNCTION_NAME_ALREADY_DECLARED)
9573 {
9574 fputs ("\t.end\t", asm_out_file);
9575 assemble_name (asm_out_file, stubname);
9576 fputs ("\n", asm_out_file);
9577 }
9578
9579 /* Record this stub. */
9580 l = (struct mips16_stub *) xmalloc (sizeof *l);
9581 l->name = xstrdup (fnname);
9582 l->fpret = fpret;
9583 l->next = mips16_stubs;
9584 mips16_stubs = l;
9585 }
9586
9587 /* If we expect a floating point return value, but we've built a
9588 stub which does not expect one, then we're in trouble. We can't
9589 use the existing stub, because it won't handle the floating point
9590 value. We can't build a new stub, because the linker won't know
9591 which stub to use for the various calls in this object file.
9592 Fortunately, this case is illegal, since it means that a function
9593 was declared in two different ways in a single compilation. */
9594 if (fpret && ! l->fpret)
9595 error ("cannot handle inconsistent calls to %qs", fnname);
9596
9597 /* If we are calling a stub which handles a floating point return
9598 value, we need to arrange to save $18 in the prologue. We do
9599 this by marking the function call as using the register. The
9600 prologue will later see that it is used, and emit code to save
9601 it. */
9602
9603 if (l->fpret)
9604 {
9605 rtx insn;
9606
9607 if (retval == NULL_RTX)
9608 insn = gen_call_internal (fn, arg_size);
9609 else
9610 insn = gen_call_value_internal (retval, fn, arg_size);
9611 insn = emit_call_insn (insn);
9612
9613 CALL_INSN_FUNCTION_USAGE (insn) =
9614 gen_rtx_EXPR_LIST (VOIDmode,
9615 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
9616 CALL_INSN_FUNCTION_USAGE (insn));
9617
9618 /* Return 1 to tell the caller that we've generated the call
9619 insn. */
9620 return 1;
9621 }
9622
9623 /* Return 0 to let the caller generate the call insn. */
9624 return 0;
9625 }
9626
9627 /* An entry in the mips16 constant pool. VALUE is the pool constant,
9628 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
9629
9630 struct mips16_constant {
9631 struct mips16_constant *next;
9632 rtx value;
9633 rtx label;
9634 enum machine_mode mode;
9635 };
9636
9637 /* Information about an incomplete mips16 constant pool. FIRST is the
9638 first constant, HIGHEST_ADDRESS is the highest address that the first
9639 byte of the pool can have, and INSN_ADDRESS is the current instruction
9640 address. */
9641
9642 struct mips16_constant_pool {
9643 struct mips16_constant *first;
9644 int highest_address;
9645 int insn_address;
9646 };
9647
9648 /* Add constant VALUE to POOL and return its label. MODE is the
9649 value's mode (used for CONST_INTs, etc.). */
9650
9651 static rtx
9652 add_constant (struct mips16_constant_pool *pool,
9653 rtx value, enum machine_mode mode)
9654 {
9655 struct mips16_constant **p, *c;
9656 bool first_of_size_p;
9657
9658 /* See whether the constant is already in the pool. If so, return the
9659 existing label, otherwise leave P pointing to the place where the
9660 constant should be added.
9661
9662 Keep the pool sorted in increasing order of mode size so that we can
9663 reduce the number of alignments needed. */
9664 first_of_size_p = true;
9665 for (p = &pool->first; *p != 0; p = &(*p)->next)
9666 {
9667 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
9668 return (*p)->label;
9669 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
9670 break;
9671 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
9672 first_of_size_p = false;
9673 }
9674
9675 /* In the worst case, the constant needed by the earliest instruction
9676 will end up at the end of the pool. The entire pool must then be
9677 accessible from that instruction.
9678
9679 When adding the first constant, set the pool's highest address to
9680 the address of the first out-of-range byte. Adjust this address
9681 downwards each time a new constant is added. */
9682 if (pool->first == 0)
9683 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
9684 is the address of the instruction with the lowest two bits clear.
9685 The base PC value for ld has the lowest three bits clear. Assume
9686 the worst case here. */
9687 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
9688 pool->highest_address -= GET_MODE_SIZE (mode);
9689 if (first_of_size_p)
9690 /* Take into account the worst possible padding due to alignment. */
9691 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
9692
9693 /* Create a new entry. */
9694 c = (struct mips16_constant *) xmalloc (sizeof *c);
9695 c->value = value;
9696 c->mode = mode;
9697 c->label = gen_label_rtx ();
9698 c->next = *p;
9699 *p = c;
9700
9701 return c->label;
9702 }
9703
9704 /* Output constant VALUE after instruction INSN and return the last
9705 instruction emitted. MODE is the mode of the constant. */
9706
9707 static rtx
9708 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
9709 {
9710 switch (GET_MODE_CLASS (mode))
9711 {
9712 case MODE_INT:
9713 {
9714 rtx size = GEN_INT (GET_MODE_SIZE (mode));
9715 return emit_insn_after (gen_consttable_int (value, size), insn);
9716 }
9717
9718 case MODE_FLOAT:
9719 return emit_insn_after (gen_consttable_float (value), insn);
9720
9721 case MODE_VECTOR_FLOAT:
9722 case MODE_VECTOR_INT:
9723 {
9724 int i;
9725 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
9726 insn = dump_constants_1 (GET_MODE_INNER (mode),
9727 CONST_VECTOR_ELT (value, i), insn);
9728 return insn;
9729 }
9730
9731 default:
9732 gcc_unreachable ();
9733 }
9734 }
9735
9736
9737 /* Dump out the constants in CONSTANTS after INSN. */
9738
9739 static void
9740 dump_constants (struct mips16_constant *constants, rtx insn)
9741 {
9742 struct mips16_constant *c, *next;
9743 int align;
9744
9745 align = 0;
9746 for (c = constants; c != NULL; c = next)
9747 {
9748 /* If necessary, increase the alignment of PC. */
9749 if (align < GET_MODE_SIZE (c->mode))
9750 {
9751 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
9752 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
9753 }
9754 align = GET_MODE_SIZE (c->mode);
9755
9756 insn = emit_label_after (c->label, insn);
9757 insn = dump_constants_1 (c->mode, c->value, insn);
9758
9759 next = c->next;
9760 free (c);
9761 }
9762
9763 emit_barrier_after (insn);
9764 }
9765
9766 /* Return the length of instruction INSN. */
9767
9768 static int
9769 mips16_insn_length (rtx insn)
9770 {
9771 if (JUMP_P (insn))
9772 {
9773 rtx body = PATTERN (insn);
9774 if (GET_CODE (body) == ADDR_VEC)
9775 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
9776 if (GET_CODE (body) == ADDR_DIFF_VEC)
9777 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
9778 }
9779 return get_attr_length (insn);
9780 }
9781
9782 /* Rewrite *X so that constant pool references refer to the constant's
9783 label instead. DATA points to the constant pool structure. */
9784
9785 static int
9786 mips16_rewrite_pool_refs (rtx *x, void *data)
9787 {
9788 struct mips16_constant_pool *pool = data;
9789 rtx base, offset, label;
9790
9791 if (MEM_P (*x))
9792 x = &XEXP (*x, 0);
9793 else if (!TARGET_MIPS16_TEXT_LOADS)
9794 return 0;
9795
9796 split_const (*x, &base, &offset);
9797 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
9798 {
9799 label = add_constant (pool, get_pool_constant (base),
9800 get_pool_mode (base));
9801 base = gen_rtx_LABEL_REF (Pmode, label);
9802 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
9803 return -1;
9804 }
9805 return GET_CODE (*x) == CONST ? -1 : 0;
9806 }
9807
9808 /* Build MIPS16 constant pools. */
9809
9810 static void
9811 mips16_lay_out_constants (void)
9812 {
9813 struct mips16_constant_pool pool;
9814 rtx insn, barrier;
9815
9816 if (!TARGET_MIPS16_PCREL_LOADS)
9817 return;
9818
9819 barrier = 0;
9820 memset (&pool, 0, sizeof (pool));
9821 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9822 {
9823 /* Rewrite constant pool references in INSN. */
9824 if (INSN_P (insn))
9825 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
9826
9827 pool.insn_address += mips16_insn_length (insn);
9828
9829 if (pool.first != NULL)
9830 {
9831 /* If there are no natural barriers between the first user of
9832 the pool and the highest acceptable address, we'll need to
9833 create a new instruction to jump around the constant pool.
9834 In the worst case, this instruction will be 4 bytes long.
9835
9836 If it's too late to do this transformation after INSN,
9837 do it immediately before INSN. */
9838 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
9839 {
9840 rtx label, jump;
9841
9842 label = gen_label_rtx ();
9843
9844 jump = emit_jump_insn_before (gen_jump (label), insn);
9845 JUMP_LABEL (jump) = label;
9846 LABEL_NUSES (label) = 1;
9847 barrier = emit_barrier_after (jump);
9848
9849 emit_label_after (label, barrier);
9850 pool.insn_address += 4;
9851 }
9852
9853 /* See whether the constant pool is now out of range of the first
9854 user. If so, output the constants after the previous barrier.
9855 Note that any instructions between BARRIER and INSN (inclusive)
9856 will use negative offsets to refer to the pool. */
9857 if (pool.insn_address > pool.highest_address)
9858 {
9859 dump_constants (pool.first, barrier);
9860 pool.first = NULL;
9861 barrier = 0;
9862 }
9863 else if (BARRIER_P (insn))
9864 barrier = insn;
9865 }
9866 }
9867 dump_constants (pool.first, get_last_insn ());
9868 }
9869 \f
9870 /* A temporary variable used by for_each_rtx callbacks, etc. */
9871 static rtx mips_sim_insn;
9872
9873 /* A structure representing the state of the processor pipeline.
9874 Used by the mips_sim_* family of functions. */
9875 struct mips_sim {
9876 /* The maximum number of instructions that can be issued in a cycle.
9877 (Caches mips_issue_rate.) */
9878 unsigned int issue_rate;
9879
9880 /* The current simulation time. */
9881 unsigned int time;
9882
9883 /* How many more instructions can be issued in the current cycle. */
9884 unsigned int insns_left;
9885
9886 /* LAST_SET[X].INSN is the last instruction to set register X.
9887 LAST_SET[X].TIME is the time at which that instruction was issued.
9888 INSN is null if no instruction has yet set register X. */
9889 struct {
9890 rtx insn;
9891 unsigned int time;
9892 } last_set[FIRST_PSEUDO_REGISTER];
9893
9894 /* The pipeline's current DFA state. */
9895 state_t dfa_state;
9896 };
9897
9898 /* Reset STATE to the initial simulation state. */
9899
9900 static void
9901 mips_sim_reset (struct mips_sim *state)
9902 {
9903 state->time = 0;
9904 state->insns_left = state->issue_rate;
9905 memset (&state->last_set, 0, sizeof (state->last_set));
9906 state_reset (state->dfa_state);
9907 }
9908
9909 /* Initialize STATE before its first use. DFA_STATE points to an
9910 allocated but uninitialized DFA state. */
9911
9912 static void
9913 mips_sim_init (struct mips_sim *state, state_t dfa_state)
9914 {
9915 state->issue_rate = mips_issue_rate ();
9916 state->dfa_state = dfa_state;
9917 mips_sim_reset (state);
9918 }
9919
9920 /* Advance STATE by one clock cycle. */
9921
9922 static void
9923 mips_sim_next_cycle (struct mips_sim *state)
9924 {
9925 state->time++;
9926 state->insns_left = state->issue_rate;
9927 state_transition (state->dfa_state, 0);
9928 }
9929
9930 /* Advance simulation state STATE until instruction INSN can read
9931 register REG. */
9932
9933 static void
9934 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
9935 {
9936 unsigned int i;
9937
9938 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
9939 if (state->last_set[REGNO (reg) + i].insn != 0)
9940 {
9941 unsigned int t;
9942
9943 t = state->last_set[REGNO (reg) + i].time;
9944 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
9945 while (state->time < t)
9946 mips_sim_next_cycle (state);
9947 }
9948 }
9949
9950 /* A for_each_rtx callback. If *X is a register, advance simulation state
9951 DATA until mips_sim_insn can read the register's value. */
9952
9953 static int
9954 mips_sim_wait_regs_2 (rtx *x, void *data)
9955 {
9956 if (REG_P (*x))
9957 mips_sim_wait_reg (data, mips_sim_insn, *x);
9958 return 0;
9959 }
9960
9961 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
9962
9963 static void
9964 mips_sim_wait_regs_1 (rtx *x, void *data)
9965 {
9966 for_each_rtx (x, mips_sim_wait_regs_2, data);
9967 }
9968
9969 /* Advance simulation state STATE until all of INSN's register
9970 dependencies are satisfied. */
9971
9972 static void
9973 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
9974 {
9975 mips_sim_insn = insn;
9976 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
9977 }
9978
9979 /* Advance simulation state STATE until the units required by
9980 instruction INSN are available. */
9981
9982 static void
9983 mips_sim_wait_units (struct mips_sim *state, rtx insn)
9984 {
9985 state_t tmp_state;
9986
9987 tmp_state = alloca (state_size ());
9988 while (state->insns_left == 0
9989 || (memcpy (tmp_state, state->dfa_state, state_size ()),
9990 state_transition (tmp_state, insn) >= 0))
9991 mips_sim_next_cycle (state);
9992 }
9993
9994 /* Advance simulation state STATE until INSN is ready to issue. */
9995
9996 static void
9997 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
9998 {
9999 mips_sim_wait_regs (state, insn);
10000 mips_sim_wait_units (state, insn);
10001 }
10002
10003 /* mips_sim_insn has just set X. Update the LAST_SET array
10004 in simulation state DATA. */
10005
10006 static void
10007 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
10008 {
10009 struct mips_sim *state;
10010 unsigned int i;
10011
10012 state = data;
10013 if (REG_P (x))
10014 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
10015 {
10016 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
10017 state->last_set[REGNO (x) + i].time = state->time;
10018 }
10019 }
10020
10021 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
10022 can issue immediately (i.e., that mips_sim_wait_insn has already
10023 been called). */
10024
10025 static void
10026 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
10027 {
10028 state_transition (state->dfa_state, insn);
10029 state->insns_left--;
10030
10031 mips_sim_insn = insn;
10032 note_stores (PATTERN (insn), mips_sim_record_set, state);
10033 }
10034
10035 /* Simulate issuing a NOP in state STATE. */
10036
10037 static void
10038 mips_sim_issue_nop (struct mips_sim *state)
10039 {
10040 if (state->insns_left == 0)
10041 mips_sim_next_cycle (state);
10042 state->insns_left--;
10043 }
10044
10045 /* Update simulation state STATE so that it's ready to accept the instruction
10046 after INSN. INSN should be part of the main rtl chain, not a member of a
10047 SEQUENCE. */
10048
10049 static void
10050 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
10051 {
10052 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
10053 if (JUMP_P (insn))
10054 mips_sim_issue_nop (state);
10055
10056 switch (GET_CODE (SEQ_BEGIN (insn)))
10057 {
10058 case CODE_LABEL:
10059 case CALL_INSN:
10060 /* We can't predict the processor state after a call or label. */
10061 mips_sim_reset (state);
10062 break;
10063
10064 case JUMP_INSN:
10065 /* The delay slots of branch likely instructions are only executed
10066 when the branch is taken. Therefore, if the caller has simulated
10067 the delay slot instruction, STATE does not really reflect the state
10068 of the pipeline for the instruction after the delay slot. Also,
10069 branch likely instructions tend to incur a penalty when not taken,
10070 so there will probably be an extra delay between the branch and
10071 the instruction after the delay slot. */
10072 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
10073 mips_sim_reset (state);
10074 break;
10075
10076 default:
10077 break;
10078 }
10079 }
10080 \f
10081 /* The VR4130 pipeline issues aligned pairs of instructions together,
10082 but it stalls the second instruction if it depends on the first.
10083 In order to cut down the amount of logic required, this dependence
10084 check is not based on a full instruction decode. Instead, any non-SPECIAL
10085 instruction is assumed to modify the register specified by bits 20-16
10086 (which is usually the "rt" field).
10087
10088 In beq, beql, bne and bnel instructions, the rt field is actually an
10089 input, so we can end up with a false dependence between the branch
10090 and its delay slot. If this situation occurs in instruction INSN,
10091 try to avoid it by swapping rs and rt. */
10092
10093 static void
10094 vr4130_avoid_branch_rt_conflict (rtx insn)
10095 {
10096 rtx first, second;
10097
10098 first = SEQ_BEGIN (insn);
10099 second = SEQ_END (insn);
10100 if (JUMP_P (first)
10101 && NONJUMP_INSN_P (second)
10102 && GET_CODE (PATTERN (first)) == SET
10103 && GET_CODE (SET_DEST (PATTERN (first))) == PC
10104 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
10105 {
10106 /* Check for the right kind of condition. */
10107 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
10108 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
10109 && REG_P (XEXP (cond, 0))
10110 && REG_P (XEXP (cond, 1))
10111 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
10112 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
10113 {
10114 /* SECOND mentions the rt register but not the rs register. */
10115 rtx tmp = XEXP (cond, 0);
10116 XEXP (cond, 0) = XEXP (cond, 1);
10117 XEXP (cond, 1) = tmp;
10118 }
10119 }
10120 }
10121
10122 /* Implement -mvr4130-align. Go through each basic block and simulate the
10123 processor pipeline. If we find that a pair of instructions could execute
10124 in parallel, and the first of those instruction is not 8-byte aligned,
10125 insert a nop to make it aligned. */
10126
10127 static void
10128 vr4130_align_insns (void)
10129 {
10130 struct mips_sim state;
10131 rtx insn, subinsn, last, last2, next;
10132 bool aligned_p;
10133
10134 dfa_start ();
10135
10136 /* LAST is the last instruction before INSN to have a nonzero length.
10137 LAST2 is the last such instruction before LAST. */
10138 last = 0;
10139 last2 = 0;
10140
10141 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
10142 aligned_p = true;
10143
10144 mips_sim_init (&state, alloca (state_size ()));
10145 for (insn = get_insns (); insn != 0; insn = next)
10146 {
10147 unsigned int length;
10148
10149 next = NEXT_INSN (insn);
10150
10151 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
10152 This isn't really related to the alignment pass, but we do it on
10153 the fly to avoid a separate instruction walk. */
10154 vr4130_avoid_branch_rt_conflict (insn);
10155
10156 if (USEFUL_INSN_P (insn))
10157 FOR_EACH_SUBINSN (subinsn, insn)
10158 {
10159 mips_sim_wait_insn (&state, subinsn);
10160
10161 /* If we want this instruction to issue in parallel with the
10162 previous one, make sure that the previous instruction is
10163 aligned. There are several reasons why this isn't worthwhile
10164 when the second instruction is a call:
10165
10166 - Calls are less likely to be performance critical,
10167 - There's a good chance that the delay slot can execute
10168 in parallel with the call.
10169 - The return address would then be unaligned.
10170
10171 In general, if we're going to insert a nop between instructions
10172 X and Y, it's better to insert it immediately after X. That
10173 way, if the nop makes Y aligned, it will also align any labels
10174 between X and Y. */
10175 if (state.insns_left != state.issue_rate
10176 && !CALL_P (subinsn))
10177 {
10178 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
10179 {
10180 /* SUBINSN is the first instruction in INSN and INSN is
10181 aligned. We want to align the previous instruction
10182 instead, so insert a nop between LAST2 and LAST.
10183
10184 Note that LAST could be either a single instruction
10185 or a branch with a delay slot. In the latter case,
10186 LAST, like INSN, is already aligned, but the delay
10187 slot must have some extra delay that stops it from
10188 issuing at the same time as the branch. We therefore
10189 insert a nop before the branch in order to align its
10190 delay slot. */
10191 emit_insn_after (gen_nop (), last2);
10192 aligned_p = false;
10193 }
10194 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
10195 {
10196 /* SUBINSN is the delay slot of INSN, but INSN is
10197 currently unaligned. Insert a nop between
10198 LAST and INSN to align it. */
10199 emit_insn_after (gen_nop (), last);
10200 aligned_p = true;
10201 }
10202 }
10203 mips_sim_issue_insn (&state, subinsn);
10204 }
10205 mips_sim_finish_insn (&state, insn);
10206
10207 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
10208 length = get_attr_length (insn);
10209 if (length > 0)
10210 {
10211 /* If the instruction is an asm statement or multi-instruction
10212 mips.md patern, the length is only an estimate. Insert an
10213 8 byte alignment after it so that the following instructions
10214 can be handled correctly. */
10215 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
10216 && (recog_memoized (insn) < 0 || length >= 8))
10217 {
10218 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
10219 next = NEXT_INSN (next);
10220 mips_sim_next_cycle (&state);
10221 aligned_p = true;
10222 }
10223 else if (length & 4)
10224 aligned_p = !aligned_p;
10225 last2 = last;
10226 last = insn;
10227 }
10228
10229 /* See whether INSN is an aligned label. */
10230 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
10231 aligned_p = true;
10232 }
10233 dfa_finish ();
10234 }
10235 \f
10236 /* Subroutine of mips_reorg. If there is a hazard between INSN
10237 and a previous instruction, avoid it by inserting nops after
10238 instruction AFTER.
10239
10240 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
10241 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
10242 before using the value of that register. *HILO_DELAY counts the
10243 number of instructions since the last hilo hazard (that is,
10244 the number of instructions since the last mflo or mfhi).
10245
10246 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
10247 for the next instruction.
10248
10249 LO_REG is an rtx for the LO register, used in dependence checking. */
10250
10251 static void
10252 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
10253 rtx *delayed_reg, rtx lo_reg)
10254 {
10255 rtx pattern, set;
10256 int nops, ninsns;
10257
10258 if (!INSN_P (insn))
10259 return;
10260
10261 pattern = PATTERN (insn);
10262
10263 /* Do not put the whole function in .set noreorder if it contains
10264 an asm statement. We don't know whether there will be hazards
10265 between the asm statement and the gcc-generated code. */
10266 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
10267 cfun->machine->all_noreorder_p = false;
10268
10269 /* Ignore zero-length instructions (barriers and the like). */
10270 ninsns = get_attr_length (insn) / 4;
10271 if (ninsns == 0)
10272 return;
10273
10274 /* Work out how many nops are needed. Note that we only care about
10275 registers that are explicitly mentioned in the instruction's pattern.
10276 It doesn't matter that calls use the argument registers or that they
10277 clobber hi and lo. */
10278 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
10279 nops = 2 - *hilo_delay;
10280 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
10281 nops = 1;
10282 else
10283 nops = 0;
10284
10285 /* Insert the nops between this instruction and the previous one.
10286 Each new nop takes us further from the last hilo hazard. */
10287 *hilo_delay += nops;
10288 while (nops-- > 0)
10289 emit_insn_after (gen_hazard_nop (), after);
10290
10291 /* Set up the state for the next instruction. */
10292 *hilo_delay += ninsns;
10293 *delayed_reg = 0;
10294 if (INSN_CODE (insn) >= 0)
10295 switch (get_attr_hazard (insn))
10296 {
10297 case HAZARD_NONE:
10298 break;
10299
10300 case HAZARD_HILO:
10301 *hilo_delay = 0;
10302 break;
10303
10304 case HAZARD_DELAY:
10305 set = single_set (insn);
10306 gcc_assert (set != 0);
10307 *delayed_reg = SET_DEST (set);
10308 break;
10309 }
10310 }
10311
10312
10313 /* Go through the instruction stream and insert nops where necessary.
10314 See if the whole function can then be put into .set noreorder &
10315 .set nomacro. */
10316
10317 static void
10318 mips_avoid_hazards (void)
10319 {
10320 rtx insn, last_insn, lo_reg, delayed_reg;
10321 int hilo_delay, i;
10322
10323 /* Force all instructions to be split into their final form. */
10324 split_all_insns_noflow ();
10325
10326 /* Recalculate instruction lengths without taking nops into account. */
10327 cfun->machine->ignore_hazard_length_p = true;
10328 shorten_branches (get_insns ());
10329
10330 cfun->machine->all_noreorder_p = true;
10331
10332 /* Profiled functions can't be all noreorder because the profiler
10333 support uses assembler macros. */
10334 if (current_function_profile)
10335 cfun->machine->all_noreorder_p = false;
10336
10337 /* Code compiled with -mfix-vr4120 can't be all noreorder because
10338 we rely on the assembler to work around some errata. */
10339 if (TARGET_FIX_VR4120)
10340 cfun->machine->all_noreorder_p = false;
10341
10342 /* The same is true for -mfix-vr4130 if we might generate mflo or
10343 mfhi instructions. Note that we avoid using mflo and mfhi if
10344 the VR4130 macc and dmacc instructions are available instead;
10345 see the *mfhilo_{si,di}_macc patterns. */
10346 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
10347 cfun->machine->all_noreorder_p = false;
10348
10349 last_insn = 0;
10350 hilo_delay = 2;
10351 delayed_reg = 0;
10352 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
10353
10354 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
10355 if (INSN_P (insn))
10356 {
10357 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
10358 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10359 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
10360 &hilo_delay, &delayed_reg, lo_reg);
10361 else
10362 mips_avoid_hazard (last_insn, insn, &hilo_delay,
10363 &delayed_reg, lo_reg);
10364
10365 last_insn = insn;
10366 }
10367 }
10368
10369
10370 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
10371
10372 static void
10373 mips_reorg (void)
10374 {
10375 mips16_lay_out_constants ();
10376 if (TARGET_EXPLICIT_RELOCS)
10377 {
10378 if (mips_flag_delayed_branch)
10379 dbr_schedule (get_insns ());
10380 mips_avoid_hazards ();
10381 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
10382 vr4130_align_insns ();
10383 }
10384 }
10385
10386 /* This function does three things:
10387
10388 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
10389 - Register the mips16 hardware floating point stubs.
10390 - Register the gofast functions if selected using --enable-gofast. */
10391
10392 #include "config/gofast.h"
10393
10394 static void
10395 mips_init_libfuncs (void)
10396 {
10397 if (TARGET_FIX_VR4120)
10398 {
10399 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
10400 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
10401 }
10402
10403 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
10404 {
10405 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
10406 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
10407 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
10408 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
10409
10410 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
10411 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
10412 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
10413 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
10414 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
10415 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
10416 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
10417
10418 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
10419 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
10420 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
10421
10422 if (TARGET_DOUBLE_FLOAT)
10423 {
10424 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
10425 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
10426 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
10427 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
10428
10429 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
10430 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
10431 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
10432 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
10433 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
10434 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
10435 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
10436
10437 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
10438 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
10439
10440 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
10441 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
10442 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__mips16_floatunsidf");
10443 }
10444 }
10445 else
10446 gofast_maybe_init_libfuncs ();
10447 }
10448
10449 /* Return a number assessing the cost of moving a register in class
10450 FROM to class TO. The classes are expressed using the enumeration
10451 values such as `GENERAL_REGS'. A value of 2 is the default; other
10452 values are interpreted relative to that.
10453
10454 It is not required that the cost always equal 2 when FROM is the
10455 same as TO; on some machines it is expensive to move between
10456 registers if they are not general registers.
10457
10458 If reload sees an insn consisting of a single `set' between two
10459 hard registers, and if `REGISTER_MOVE_COST' applied to their
10460 classes returns a value of 2, reload does not check to ensure that
10461 the constraints of the insn are met. Setting a cost of other than
10462 2 will allow reload to verify that the constraints are met. You
10463 should do this if the `movM' pattern's constraints do not allow
10464 such copying.
10465
10466 ??? We make the cost of moving from HI/LO into general
10467 registers the same as for one of moving general registers to
10468 HI/LO for TARGET_MIPS16 in order to prevent allocating a
10469 pseudo to HI/LO. This might hurt optimizations though, it
10470 isn't clear if it is wise. And it might not work in all cases. We
10471 could solve the DImode LO reg problem by using a multiply, just
10472 like reload_{in,out}si. We could solve the SImode/HImode HI reg
10473 problem by using divide instructions. divu puts the remainder in
10474 the HI reg, so doing a divide by -1 will move the value in the HI
10475 reg for all values except -1. We could handle that case by using a
10476 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
10477 a compare/branch to test the input value to see which instruction
10478 we need to use. This gets pretty messy, but it is feasible. */
10479
10480 int
10481 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10482 enum reg_class to, enum reg_class from)
10483 {
10484 if (from == M16_REGS && reg_class_subset_p (to, GENERAL_REGS))
10485 return 2;
10486 else if (from == M16_NA_REGS && reg_class_subset_p (to, GENERAL_REGS))
10487 return 2;
10488 else if (reg_class_subset_p (from, GENERAL_REGS))
10489 {
10490 if (to == M16_REGS)
10491 return 2;
10492 else if (to == M16_NA_REGS)
10493 return 2;
10494 else if (reg_class_subset_p (to, GENERAL_REGS))
10495 {
10496 if (TARGET_MIPS16)
10497 return 4;
10498 else
10499 return 2;
10500 }
10501 else if (to == FP_REGS)
10502 return 4;
10503 else if (reg_class_subset_p (to, ACC_REGS))
10504 {
10505 if (TARGET_MIPS16)
10506 return 12;
10507 else
10508 return 6;
10509 }
10510 else if (reg_class_subset_p (to, ALL_COP_REGS))
10511 {
10512 return 5;
10513 }
10514 }
10515 else if (from == FP_REGS)
10516 {
10517 if (reg_class_subset_p (to, GENERAL_REGS))
10518 return 4;
10519 else if (to == FP_REGS)
10520 return 2;
10521 else if (to == ST_REGS)
10522 return 8;
10523 }
10524 else if (reg_class_subset_p (from, ACC_REGS))
10525 {
10526 if (reg_class_subset_p (to, GENERAL_REGS))
10527 {
10528 if (TARGET_MIPS16)
10529 return 12;
10530 else
10531 return 6;
10532 }
10533 }
10534 else if (from == ST_REGS && reg_class_subset_p (to, GENERAL_REGS))
10535 return 4;
10536 else if (reg_class_subset_p (from, ALL_COP_REGS))
10537 {
10538 return 5;
10539 }
10540
10541 /* Fall through.
10542 ??? What cases are these? Shouldn't we return 2 here? */
10543
10544 return 12;
10545 }
10546
10547 /* Return the length of INSN. LENGTH is the initial length computed by
10548 attributes in the machine-description file. */
10549
10550 int
10551 mips_adjust_insn_length (rtx insn, int length)
10552 {
10553 /* A unconditional jump has an unfilled delay slot if it is not part
10554 of a sequence. A conditional jump normally has a delay slot, but
10555 does not on MIPS16. */
10556 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
10557 length += 4;
10558
10559 /* See how many nops might be needed to avoid hardware hazards. */
10560 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
10561 switch (get_attr_hazard (insn))
10562 {
10563 case HAZARD_NONE:
10564 break;
10565
10566 case HAZARD_DELAY:
10567 length += 4;
10568 break;
10569
10570 case HAZARD_HILO:
10571 length += 8;
10572 break;
10573 }
10574
10575 /* All MIPS16 instructions are a measly two bytes. */
10576 if (TARGET_MIPS16)
10577 length /= 2;
10578
10579 return length;
10580 }
10581
10582
10583 /* Return an asm sequence to start a noat block and load the address
10584 of a label into $1. */
10585
10586 const char *
10587 mips_output_load_label (void)
10588 {
10589 if (TARGET_EXPLICIT_RELOCS)
10590 switch (mips_abi)
10591 {
10592 case ABI_N32:
10593 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
10594
10595 case ABI_64:
10596 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
10597
10598 default:
10599 if (ISA_HAS_LOAD_DELAY)
10600 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
10601 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
10602 }
10603 else
10604 {
10605 if (Pmode == DImode)
10606 return "%[dla\t%@,%0";
10607 else
10608 return "%[la\t%@,%0";
10609 }
10610 }
10611
10612 /* Return the assembly code for INSN, which has the operands given by
10613 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
10614 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
10615 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
10616 version of BRANCH_IF_TRUE. */
10617
10618 const char *
10619 mips_output_conditional_branch (rtx insn, rtx *operands,
10620 const char *branch_if_true,
10621 const char *branch_if_false)
10622 {
10623 unsigned int length;
10624 rtx taken, not_taken;
10625
10626 length = get_attr_length (insn);
10627 if (length <= 8)
10628 {
10629 /* Just a simple conditional branch. */
10630 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
10631 return branch_if_true;
10632 }
10633
10634 /* Generate a reversed branch around a direct jump. This fallback does
10635 not use branch-likely instructions. */
10636 mips_branch_likely = false;
10637 not_taken = gen_label_rtx ();
10638 taken = operands[1];
10639
10640 /* Generate the reversed branch to NOT_TAKEN. */
10641 operands[1] = not_taken;
10642 output_asm_insn (branch_if_false, operands);
10643
10644 /* If INSN has a delay slot, we must provide delay slots for both the
10645 branch to NOT_TAKEN and the conditional jump. We must also ensure
10646 that INSN's delay slot is executed in the appropriate cases. */
10647 if (final_sequence)
10648 {
10649 /* This first delay slot will always be executed, so use INSN's
10650 delay slot if is not annulled. */
10651 if (!INSN_ANNULLED_BRANCH_P (insn))
10652 {
10653 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10654 asm_out_file, optimize, 1, NULL);
10655 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10656 }
10657 else
10658 output_asm_insn ("nop", 0);
10659 fprintf (asm_out_file, "\n");
10660 }
10661
10662 /* Output the unconditional branch to TAKEN. */
10663 if (length <= 16)
10664 output_asm_insn ("j\t%0%/", &taken);
10665 else
10666 {
10667 output_asm_insn (mips_output_load_label (), &taken);
10668 output_asm_insn ("jr\t%@%]%/", 0);
10669 }
10670
10671 /* Now deal with its delay slot; see above. */
10672 if (final_sequence)
10673 {
10674 /* This delay slot will only be executed if the branch is taken.
10675 Use INSN's delay slot if is annulled. */
10676 if (INSN_ANNULLED_BRANCH_P (insn))
10677 {
10678 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10679 asm_out_file, optimize, 1, NULL);
10680 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10681 }
10682 else
10683 output_asm_insn ("nop", 0);
10684 fprintf (asm_out_file, "\n");
10685 }
10686
10687 /* Output NOT_TAKEN. */
10688 (*targetm.asm_out.internal_label) (asm_out_file, "L",
10689 CODE_LABEL_NUMBER (not_taken));
10690 return "";
10691 }
10692
10693 /* Return the assembly code for INSN, which branches to OPERANDS[1]
10694 if some ordered condition is true. The condition is given by
10695 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
10696 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
10697 its second is always zero. */
10698
10699 const char *
10700 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
10701 {
10702 const char *branch[2];
10703
10704 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
10705 Make BRANCH[0] branch on the inverse condition. */
10706 switch (GET_CODE (operands[0]))
10707 {
10708 /* These cases are equivalent to comparisons against zero. */
10709 case LEU:
10710 inverted_p = !inverted_p;
10711 /* Fall through. */
10712 case GTU:
10713 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
10714 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
10715 break;
10716
10717 /* These cases are always true or always false. */
10718 case LTU:
10719 inverted_p = !inverted_p;
10720 /* Fall through. */
10721 case GEU:
10722 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
10723 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
10724 break;
10725
10726 default:
10727 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
10728 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
10729 break;
10730 }
10731 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
10732 }
10733 \f
10734 /* Used to output div or ddiv instruction DIVISION, which has the operands
10735 given by OPERANDS. Add in a divide-by-zero check if needed.
10736
10737 When working around R4000 and R4400 errata, we need to make sure that
10738 the division is not immediately followed by a shift[1][2]. We also
10739 need to stop the division from being put into a branch delay slot[3].
10740 The easiest way to avoid both problems is to add a nop after the
10741 division. When a divide-by-zero check is needed, this nop can be
10742 used to fill the branch delay slot.
10743
10744 [1] If a double-word or a variable shift executes immediately
10745 after starting an integer division, the shift may give an
10746 incorrect result. See quotations of errata #16 and #28 from
10747 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10748 in mips.md for details.
10749
10750 [2] A similar bug to [1] exists for all revisions of the
10751 R4000 and the R4400 when run in an MC configuration.
10752 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
10753
10754 "19. In this following sequence:
10755
10756 ddiv (or ddivu or div or divu)
10757 dsll32 (or dsrl32, dsra32)
10758
10759 if an MPT stall occurs, while the divide is slipping the cpu
10760 pipeline, then the following double shift would end up with an
10761 incorrect result.
10762
10763 Workaround: The compiler needs to avoid generating any
10764 sequence with divide followed by extended double shift."
10765
10766 This erratum is also present in "MIPS R4400MC Errata, Processor
10767 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
10768 & 3.0" as errata #10 and #4, respectively.
10769
10770 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10771 (also valid for MIPS R4000MC processors):
10772
10773 "52. R4000SC: This bug does not apply for the R4000PC.
10774
10775 There are two flavors of this bug:
10776
10777 1) If the instruction just after divide takes an RF exception
10778 (tlb-refill, tlb-invalid) and gets an instruction cache
10779 miss (both primary and secondary) and the line which is
10780 currently in secondary cache at this index had the first
10781 data word, where the bits 5..2 are set, then R4000 would
10782 get a wrong result for the div.
10783
10784 ##1
10785 nop
10786 div r8, r9
10787 ------------------- # end-of page. -tlb-refill
10788 nop
10789 ##2
10790 nop
10791 div r8, r9
10792 ------------------- # end-of page. -tlb-invalid
10793 nop
10794
10795 2) If the divide is in the taken branch delay slot, where the
10796 target takes RF exception and gets an I-cache miss for the
10797 exception vector or where I-cache miss occurs for the
10798 target address, under the above mentioned scenarios, the
10799 div would get wrong results.
10800
10801 ##1
10802 j r2 # to next page mapped or unmapped
10803 div r8,r9 # this bug would be there as long
10804 # as there is an ICache miss and
10805 nop # the "data pattern" is present
10806
10807 ##2
10808 beq r0, r0, NextPage # to Next page
10809 div r8,r9
10810 nop
10811
10812 This bug is present for div, divu, ddiv, and ddivu
10813 instructions.
10814
10815 Workaround: For item 1), OS could make sure that the next page
10816 after the divide instruction is also mapped. For item 2), the
10817 compiler could make sure that the divide instruction is not in
10818 the branch delay slot."
10819
10820 These processors have PRId values of 0x00004220 and 0x00004300 for
10821 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
10822
10823 const char *
10824 mips_output_division (const char *division, rtx *operands)
10825 {
10826 const char *s;
10827
10828 s = division;
10829 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
10830 {
10831 output_asm_insn (s, operands);
10832 s = "nop";
10833 }
10834 if (TARGET_CHECK_ZERO_DIV)
10835 {
10836 if (TARGET_MIPS16)
10837 {
10838 output_asm_insn (s, operands);
10839 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
10840 }
10841 else if (GENERATE_DIVIDE_TRAPS)
10842 {
10843 output_asm_insn (s, operands);
10844 s = "teq\t%2,%.,7";
10845 }
10846 else
10847 {
10848 output_asm_insn ("%(bne\t%2,%.,1f", operands);
10849 output_asm_insn (s, operands);
10850 s = "break\t7%)\n1:";
10851 }
10852 }
10853 return s;
10854 }
10855 \f
10856 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
10857 with a final "000" replaced by "k". Ignore case.
10858
10859 Note: this function is shared between GCC and GAS. */
10860
10861 static bool
10862 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
10863 {
10864 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
10865 given++, canonical++;
10866
10867 return ((*given == 0 && *canonical == 0)
10868 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
10869 }
10870
10871
10872 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
10873 CPU name. We've traditionally allowed a lot of variation here.
10874
10875 Note: this function is shared between GCC and GAS. */
10876
10877 static bool
10878 mips_matching_cpu_name_p (const char *canonical, const char *given)
10879 {
10880 /* First see if the name matches exactly, or with a final "000"
10881 turned into "k". */
10882 if (mips_strict_matching_cpu_name_p (canonical, given))
10883 return true;
10884
10885 /* If not, try comparing based on numerical designation alone.
10886 See if GIVEN is an unadorned number, or 'r' followed by a number. */
10887 if (TOLOWER (*given) == 'r')
10888 given++;
10889 if (!ISDIGIT (*given))
10890 return false;
10891
10892 /* Skip over some well-known prefixes in the canonical name,
10893 hoping to find a number there too. */
10894 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
10895 canonical += 2;
10896 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
10897 canonical += 2;
10898 else if (TOLOWER (canonical[0]) == 'r')
10899 canonical += 1;
10900
10901 return mips_strict_matching_cpu_name_p (canonical, given);
10902 }
10903
10904
10905 /* Return the mips_cpu_info entry for the processor or ISA given
10906 by CPU_STRING. Return null if the string isn't recognized.
10907
10908 A similar function exists in GAS. */
10909
10910 static const struct mips_cpu_info *
10911 mips_parse_cpu (const char *cpu_string)
10912 {
10913 const struct mips_cpu_info *p;
10914 const char *s;
10915
10916 /* In the past, we allowed upper-case CPU names, but it doesn't
10917 work well with the multilib machinery. */
10918 for (s = cpu_string; *s != 0; s++)
10919 if (ISUPPER (*s))
10920 {
10921 warning (0, "the cpu name must be lower case");
10922 break;
10923 }
10924
10925 /* 'from-abi' selects the most compatible architecture for the given
10926 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
10927 EABIs, we have to decide whether we're using the 32-bit or 64-bit
10928 version. Look first at the -mgp options, if given, otherwise base
10929 the choice on MASK_64BIT in TARGET_DEFAULT. */
10930 if (strcasecmp (cpu_string, "from-abi") == 0)
10931 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
10932 : ABI_NEEDS_64BIT_REGS ? 3
10933 : (TARGET_64BIT ? 3 : 1));
10934
10935 /* 'default' has traditionally been a no-op. Probably not very useful. */
10936 if (strcasecmp (cpu_string, "default") == 0)
10937 return 0;
10938
10939 for (p = mips_cpu_info_table; p->name != 0; p++)
10940 if (mips_matching_cpu_name_p (p->name, cpu_string))
10941 return p;
10942
10943 return 0;
10944 }
10945
10946
10947 /* Return the processor associated with the given ISA level, or null
10948 if the ISA isn't valid. */
10949
10950 static const struct mips_cpu_info *
10951 mips_cpu_info_from_isa (int isa)
10952 {
10953 const struct mips_cpu_info *p;
10954
10955 for (p = mips_cpu_info_table; p->name != 0; p++)
10956 if (p->isa == isa)
10957 return p;
10958
10959 return 0;
10960 }
10961 \f
10962 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
10963 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
10964 they only hold condition code modes, and CCmode is always considered to
10965 be 4 bytes wide. All other registers are word sized. */
10966
10967 unsigned int
10968 mips_hard_regno_nregs (int regno, enum machine_mode mode)
10969 {
10970 if (ST_REG_P (regno))
10971 return ((GET_MODE_SIZE (mode) + 3) / 4);
10972 else if (! FP_REG_P (regno))
10973 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
10974 else
10975 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
10976 }
10977
10978 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
10979 all BLKmode objects are returned in memory. Under the new (N32 and
10980 64-bit MIPS ABIs) small structures are returned in a register.
10981 Objects with varying size must still be returned in memory, of
10982 course. */
10983
10984 static bool
10985 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
10986 {
10987 if (TARGET_OLDABI)
10988 return (TYPE_MODE (type) == BLKmode);
10989 else
10990 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
10991 || (int_size_in_bytes (type) == -1));
10992 }
10993
10994 static bool
10995 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
10996 {
10997 return !TARGET_OLDABI;
10998 }
10999 \f
11000 /* Return true if INSN is a multiply-add or multiply-subtract
11001 instruction and PREV assigns to the accumulator operand. */
11002
11003 bool
11004 mips_linked_madd_p (rtx prev, rtx insn)
11005 {
11006 rtx x;
11007
11008 x = single_set (insn);
11009 if (x == 0)
11010 return false;
11011
11012 x = SET_SRC (x);
11013
11014 if (GET_CODE (x) == PLUS
11015 && GET_CODE (XEXP (x, 0)) == MULT
11016 && reg_set_p (XEXP (x, 1), prev))
11017 return true;
11018
11019 if (GET_CODE (x) == MINUS
11020 && GET_CODE (XEXP (x, 1)) == MULT
11021 && reg_set_p (XEXP (x, 0), prev))
11022 return true;
11023
11024 return false;
11025 }
11026 \f
11027 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
11028 that may clobber hi or lo. */
11029
11030 static rtx mips_macc_chains_last_hilo;
11031
11032 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
11033 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
11034
11035 static void
11036 mips_macc_chains_record (rtx insn)
11037 {
11038 if (get_attr_may_clobber_hilo (insn))
11039 mips_macc_chains_last_hilo = insn;
11040 }
11041
11042 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
11043 has NREADY elements, looking for a multiply-add or multiply-subtract
11044 instruction that is cumulative with mips_macc_chains_last_hilo.
11045 If there is one, promote it ahead of anything else that might
11046 clobber hi or lo. */
11047
11048 static void
11049 mips_macc_chains_reorder (rtx *ready, int nready)
11050 {
11051 int i, j;
11052
11053 if (mips_macc_chains_last_hilo != 0)
11054 for (i = nready - 1; i >= 0; i--)
11055 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
11056 {
11057 for (j = nready - 1; j > i; j--)
11058 if (recog_memoized (ready[j]) >= 0
11059 && get_attr_may_clobber_hilo (ready[j]))
11060 {
11061 mips_promote_ready (ready, i, j);
11062 break;
11063 }
11064 break;
11065 }
11066 }
11067 \f
11068 /* The last instruction to be scheduled. */
11069
11070 static rtx vr4130_last_insn;
11071
11072 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
11073 points to an rtx that is initially an instruction. Nullify the rtx
11074 if the instruction uses the value of register X. */
11075
11076 static void
11077 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
11078 {
11079 rtx *insn_ptr = data;
11080 if (REG_P (x)
11081 && *insn_ptr != 0
11082 && reg_referenced_p (x, PATTERN (*insn_ptr)))
11083 *insn_ptr = 0;
11084 }
11085
11086 /* Return true if there is true register dependence between vr4130_last_insn
11087 and INSN. */
11088
11089 static bool
11090 vr4130_true_reg_dependence_p (rtx insn)
11091 {
11092 note_stores (PATTERN (vr4130_last_insn),
11093 vr4130_true_reg_dependence_p_1, &insn);
11094 return insn == 0;
11095 }
11096
11097 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
11098 the ready queue and that INSN2 is the instruction after it, return
11099 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
11100 in which INSN1 and INSN2 can probably issue in parallel, but for
11101 which (INSN2, INSN1) should be less sensitive to instruction
11102 alignment than (INSN1, INSN2). See 4130.md for more details. */
11103
11104 static bool
11105 vr4130_swap_insns_p (rtx insn1, rtx insn2)
11106 {
11107 sd_iterator_def sd_it;
11108 dep_t dep;
11109
11110 /* Check for the following case:
11111
11112 1) there is some other instruction X with an anti dependence on INSN1;
11113 2) X has a higher priority than INSN2; and
11114 3) X is an arithmetic instruction (and thus has no unit restrictions).
11115
11116 If INSN1 is the last instruction blocking X, it would better to
11117 choose (INSN1, X) over (INSN2, INSN1). */
11118 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
11119 if (DEP_TYPE (dep) == REG_DEP_ANTI
11120 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
11121 && recog_memoized (DEP_CON (dep)) >= 0
11122 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
11123 return false;
11124
11125 if (vr4130_last_insn != 0
11126 && recog_memoized (insn1) >= 0
11127 && recog_memoized (insn2) >= 0)
11128 {
11129 /* See whether INSN1 and INSN2 use different execution units,
11130 or if they are both ALU-type instructions. If so, they can
11131 probably execute in parallel. */
11132 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
11133 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
11134 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
11135 {
11136 /* If only one of the instructions has a dependence on
11137 vr4130_last_insn, prefer to schedule the other one first. */
11138 bool dep1 = vr4130_true_reg_dependence_p (insn1);
11139 bool dep2 = vr4130_true_reg_dependence_p (insn2);
11140 if (dep1 != dep2)
11141 return dep1;
11142
11143 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
11144 is not an ALU-type instruction and if INSN1 uses the same
11145 execution unit. (Note that if this condition holds, we already
11146 know that INSN2 uses a different execution unit.) */
11147 if (class1 != VR4130_CLASS_ALU
11148 && recog_memoized (vr4130_last_insn) >= 0
11149 && class1 == get_attr_vr4130_class (vr4130_last_insn))
11150 return true;
11151 }
11152 }
11153 return false;
11154 }
11155
11156 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
11157 queue with at least two instructions. Swap the first two if
11158 vr4130_swap_insns_p says that it could be worthwhile. */
11159
11160 static void
11161 vr4130_reorder (rtx *ready, int nready)
11162 {
11163 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
11164 mips_promote_ready (ready, nready - 2, nready - 1);
11165 }
11166 \f
11167 /* Remove the instruction at index LOWER from ready queue READY and
11168 reinsert it in front of the instruction at index HIGHER. LOWER must
11169 be <= HIGHER. */
11170
11171 static void
11172 mips_promote_ready (rtx *ready, int lower, int higher)
11173 {
11174 rtx new_head;
11175 int i;
11176
11177 new_head = ready[lower];
11178 for (i = lower; i < higher; i++)
11179 ready[i] = ready[i + 1];
11180 ready[i] = new_head;
11181 }
11182
11183 /* If the priority of the instruction at POS2 in the ready queue READY
11184 is within LIMIT units of that of the instruction at POS1, swap the
11185 instructions if POS2 is not already less than POS1. */
11186
11187 static void
11188 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
11189 {
11190 if (pos1 < pos2
11191 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
11192 {
11193 rtx temp;
11194 temp = ready[pos1];
11195 ready[pos1] = ready[pos2];
11196 ready[pos2] = temp;
11197 }
11198 }
11199
11200 /* Record whether last 74k AGEN instruction was a load or store. */
11201
11202 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
11203
11204 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
11205 resets to TYPE_UNKNOWN state. */
11206
11207 static void
11208 mips_74k_agen_init (rtx insn)
11209 {
11210 if (!insn || !NONJUMP_INSN_P (insn))
11211 mips_last_74k_agen_insn = TYPE_UNKNOWN;
11212 else if (USEFUL_INSN_P (insn))
11213 {
11214 enum attr_type type = get_attr_type (insn);
11215 if (type == TYPE_LOAD || type == TYPE_STORE)
11216 mips_last_74k_agen_insn = type;
11217 }
11218 }
11219
11220 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
11221 loads to be grouped together, and multiple stores to be grouped
11222 together. Swap things around in the ready queue to make this happen. */
11223
11224 static void
11225 mips_74k_agen_reorder (rtx *ready, int nready)
11226 {
11227 int i;
11228 int store_pos, load_pos;
11229
11230 store_pos = -1;
11231 load_pos = -1;
11232
11233 for (i = nready - 1; i >= 0; i--)
11234 {
11235 rtx insn = ready[i];
11236 if (USEFUL_INSN_P (insn))
11237 switch (get_attr_type (insn))
11238 {
11239 case TYPE_STORE:
11240 if (store_pos == -1)
11241 store_pos = i;
11242 break;
11243
11244 case TYPE_LOAD:
11245 if (load_pos == -1)
11246 load_pos = i;
11247 break;
11248
11249 default:
11250 break;
11251 }
11252 }
11253
11254 if (load_pos == -1 || store_pos == -1)
11255 return;
11256
11257 switch (mips_last_74k_agen_insn)
11258 {
11259 case TYPE_UNKNOWN:
11260 /* Prefer to schedule loads since they have a higher latency. */
11261 case TYPE_LOAD:
11262 /* Swap loads to the front of the queue. */
11263 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
11264 break;
11265 case TYPE_STORE:
11266 /* Swap stores to the front of the queue. */
11267 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
11268 break;
11269 default:
11270 break;
11271 }
11272 }
11273
11274 /* Implement TARGET_SCHED_INIT. */
11275
11276 static void
11277 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11278 int max_ready ATTRIBUTE_UNUSED)
11279 {
11280 mips_macc_chains_last_hilo = 0;
11281 vr4130_last_insn = 0;
11282 mips_74k_agen_init (NULL_RTX);
11283 }
11284
11285 /* Implement TARGET_SCHED_REORDER and TARG_SCHED_REORDER2. */
11286
11287 static int
11288 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11289 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
11290 {
11291 if (!reload_completed
11292 && TUNE_MACC_CHAINS
11293 && *nreadyp > 0)
11294 mips_macc_chains_reorder (ready, *nreadyp);
11295 if (reload_completed
11296 && TUNE_MIPS4130
11297 && !TARGET_VR4130_ALIGN
11298 && *nreadyp > 1)
11299 vr4130_reorder (ready, *nreadyp);
11300 if (TUNE_74K)
11301 mips_74k_agen_reorder (ready, *nreadyp);
11302 return mips_issue_rate ();
11303 }
11304
11305 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
11306
11307 static int
11308 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
11309 rtx insn, int more)
11310 {
11311 if (TUNE_74K)
11312 mips_74k_agen_init (insn);
11313 switch (GET_CODE (PATTERN (insn)))
11314 {
11315 case USE:
11316 case CLOBBER:
11317 /* Don't count USEs and CLOBBERs against the issue rate. */
11318 break;
11319
11320 default:
11321 more--;
11322 if (!reload_completed && TUNE_MACC_CHAINS)
11323 mips_macc_chains_record (insn);
11324 vr4130_last_insn = insn;
11325 break;
11326 }
11327 return more;
11328 }
11329 \f
11330 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
11331 dependencies have no cost, except on the 20Kc where output-dependence
11332 is treated like input-dependence. */
11333
11334 static int
11335 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
11336 rtx dep ATTRIBUTE_UNUSED, int cost)
11337 {
11338 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
11339 && TUNE_20KC)
11340 return cost;
11341 if (REG_NOTE_KIND (link) != 0)
11342 return 0;
11343 return cost;
11344 }
11345
11346 /* Return the number of instructions that can be issued per cycle. */
11347
11348 static int
11349 mips_issue_rate (void)
11350 {
11351 switch (mips_tune)
11352 {
11353 case PROCESSOR_74KC:
11354 case PROCESSOR_74KF2_1:
11355 case PROCESSOR_74KF1_1:
11356 case PROCESSOR_74KF3_2:
11357 /* The 74k is not strictly quad-issue cpu, but can be seen as one
11358 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
11359 but in reality only a maximum of 3 insns can be issued as the
11360 floating point load/stores also require a slot in the AGEN pipe. */
11361 return 4;
11362
11363 case PROCESSOR_20KC:
11364 case PROCESSOR_R4130:
11365 case PROCESSOR_R5400:
11366 case PROCESSOR_R5500:
11367 case PROCESSOR_R7000:
11368 case PROCESSOR_R9000:
11369 return 2;
11370
11371 case PROCESSOR_SB1:
11372 case PROCESSOR_SB1A:
11373 /* This is actually 4, but we get better performance if we claim 3.
11374 This is partly because of unwanted speculative code motion with the
11375 larger number, and partly because in most common cases we can't
11376 reach the theoretical max of 4. */
11377 return 3;
11378
11379 default:
11380 return 1;
11381 }
11382 }
11383
11384 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
11385 be as wide as the scheduling freedom in the DFA. */
11386
11387 static int
11388 mips_multipass_dfa_lookahead (void)
11389 {
11390 /* Can schedule up to 4 of the 6 function units in any one cycle. */
11391 if (TUNE_SB1)
11392 return 4;
11393
11394 return 0;
11395 }
11396
11397 /* Implements a store data bypass check. We need this because the cprestore
11398 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
11399 default routine to abort. We just return false for that case. */
11400 /* ??? Should try to give a better result here than assuming false. */
11401
11402 int
11403 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
11404 {
11405 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
11406 return false;
11407
11408 return ! store_data_bypass_p (out_insn, in_insn);
11409 }
11410 \f
11411 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
11412 return the first operand of the associated "pref" or "prefx" insn. */
11413
11414 rtx
11415 mips_prefetch_cookie (rtx write, rtx locality)
11416 {
11417 /* store_streamed / load_streamed. */
11418 if (INTVAL (locality) <= 0)
11419 return GEN_INT (INTVAL (write) + 4);
11420
11421 /* store / load. */
11422 if (INTVAL (locality) <= 2)
11423 return write;
11424
11425 /* store_retained / load_retained. */
11426 return GEN_INT (INTVAL (write) + 6);
11427 }
11428 \f
11429 /* MIPS builtin function support. */
11430
11431 struct builtin_description
11432 {
11433 /* The code of the main .md file instruction. See mips_builtin_type
11434 for more information. */
11435 enum insn_code icode;
11436
11437 /* The floating-point comparison code to use with ICODE, if any. */
11438 enum mips_fp_condition cond;
11439
11440 /* The name of the builtin function. */
11441 const char *name;
11442
11443 /* Specifies how the function should be expanded. */
11444 enum mips_builtin_type builtin_type;
11445
11446 /* The function's prototype. */
11447 enum mips_function_type function_type;
11448
11449 /* The target flags required for this function. */
11450 int target_flags;
11451 };
11452
11453 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
11454 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
11455 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11456 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11457 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
11458
11459 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
11460 TARGET_FLAGS. */
11461 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
11462 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
11463 "__builtin_mips_" #INSN "_" #COND "_s", \
11464 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
11465 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
11466 "__builtin_mips_" #INSN "_" #COND "_d", \
11467 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
11468
11469 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
11470 The lower and upper forms require TARGET_FLAGS while the any and all
11471 forms require MASK_MIPS3D. */
11472 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
11473 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11474 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
11475 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11476 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11477 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
11478 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11479 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11480 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
11481 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
11482 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11483 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
11484 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
11485
11486 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
11487 require MASK_MIPS3D. */
11488 #define CMP_4S_BUILTINS(INSN, COND) \
11489 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11490 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
11491 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11492 MASK_MIPS3D }, \
11493 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11494 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
11495 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11496 MASK_MIPS3D }
11497
11498 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
11499 instruction requires TARGET_FLAGS. */
11500 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
11501 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11502 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
11503 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11504 TARGET_FLAGS }, \
11505 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11506 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
11507 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11508 TARGET_FLAGS }
11509
11510 /* Define all the builtins related to c.cond.fmt condition COND. */
11511 #define CMP_BUILTINS(COND) \
11512 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11513 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
11514 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
11515 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11516 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
11517 CMP_4S_BUILTINS (c, COND), \
11518 CMP_4S_BUILTINS (cabs, COND)
11519
11520 static const struct builtin_description mips_bdesc[] =
11521 {
11522 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11523 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11524 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11525 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11526 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
11527 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11528 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11529 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11530
11531 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
11532 MASK_PAIRED_SINGLE_FLOAT),
11533 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11534 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11535 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11536 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11537
11538 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11539 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11540 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11541 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11542 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11543 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11544
11545 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11546 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11547 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11548 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11549 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11550 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11551
11552 MIPS_FP_CONDITIONS (CMP_BUILTINS)
11553 };
11554
11555 /* Builtin functions for the SB-1 processor. */
11556
11557 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
11558
11559 static const struct builtin_description sb1_bdesc[] =
11560 {
11561 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
11562 };
11563
11564 /* Builtin functions for DSP ASE. */
11565
11566 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
11567 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
11568 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
11569 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
11570 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
11571
11572 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
11573 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
11574 builtin_description fields. */
11575 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11576 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11577 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
11578
11579 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
11580 branch instruction. TARGET_FLAGS is a builtin_description field. */
11581 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
11582 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
11583 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
11584
11585 static const struct builtin_description dsp_bdesc[] =
11586 {
11587 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11588 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11589 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11590 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11591 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11592 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11593 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11594 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11595 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11596 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11597 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11598 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11599 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11600 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
11601 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
11602 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
11603 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11604 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11605 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11606 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11607 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11608 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11609 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11610 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11611 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11612 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11613 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11614 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11615 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11616 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11617 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11618 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11619 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11620 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11621 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11622 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11623 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11624 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11625 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11626 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11627 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11628 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11629 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11630 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
11631 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11632 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
11633 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
11634 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11635 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11636 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11637 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11638 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11639 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11640 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11641 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11642 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11643 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11644 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11645 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11646 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
11647 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
11648 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11649 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11650 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11651 BPOSGE_BUILTIN (32, MASK_DSP),
11652
11653 /* The following are for the MIPS DSP ASE REV 2. */
11654 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
11655 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11656 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11657 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11658 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11659 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11660 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11661 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11662 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11663 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11664 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11665 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11666 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11667 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11668 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11669 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11670 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
11671 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
11672 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11673 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
11674 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
11675 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
11676 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11677 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11678 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11679 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11680 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11681 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11682 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11683 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11684 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11685 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11686 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11687 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
11688 };
11689
11690 static const struct builtin_description dsp_32only_bdesc[] =
11691 {
11692 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11693 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11694 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11695 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11696 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11697 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11698 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11699 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
11700 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
11701 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11702 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11703 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11704 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11705 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11706 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11707 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11708 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11709 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11710 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11711 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
11712 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
11713
11714 /* The following are for the MIPS DSP ASE REV 2. */
11715 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11716 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11717 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
11718 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
11719 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
11720 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
11721 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11722 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
11723 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
11724 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11725 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11726 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11727 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11728 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11729 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
11730 };
11731
11732 /* This helps provide a mapping from builtin function codes to bdesc
11733 arrays. */
11734
11735 struct bdesc_map
11736 {
11737 /* The builtin function table that this entry describes. */
11738 const struct builtin_description *bdesc;
11739
11740 /* The number of entries in the builtin function table. */
11741 unsigned int size;
11742
11743 /* The target processor that supports these builtin functions.
11744 PROCESSOR_MAX means we enable them for all processors. */
11745 enum processor_type proc;
11746
11747 /* If the target has these flags, this builtin function table
11748 will not be supported. */
11749 int unsupported_target_flags;
11750 };
11751
11752 static const struct bdesc_map bdesc_arrays[] =
11753 {
11754 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX, 0 },
11755 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1, 0 },
11756 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX, 0 },
11757 { dsp_32only_bdesc, ARRAY_SIZE (dsp_32only_bdesc), PROCESSOR_MAX,
11758 MASK_64BIT }
11759 };
11760
11761 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
11762 suitable for input operand OP of instruction ICODE. Return the value. */
11763
11764 static rtx
11765 mips_prepare_builtin_arg (enum insn_code icode,
11766 unsigned int op, tree exp, unsigned int argnum)
11767 {
11768 rtx value;
11769 enum machine_mode mode;
11770
11771 value = expand_normal (CALL_EXPR_ARG (exp, argnum));
11772 mode = insn_data[icode].operand[op].mode;
11773 if (!insn_data[icode].operand[op].predicate (value, mode))
11774 {
11775 value = copy_to_mode_reg (mode, value);
11776 /* Check the predicate again. */
11777 if (!insn_data[icode].operand[op].predicate (value, mode))
11778 {
11779 error ("invalid argument to builtin function");
11780 return const0_rtx;
11781 }
11782 }
11783
11784 return value;
11785 }
11786
11787 /* Return an rtx suitable for output operand OP of instruction ICODE.
11788 If TARGET is non-null, try to use it where possible. */
11789
11790 static rtx
11791 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
11792 {
11793 enum machine_mode mode;
11794
11795 mode = insn_data[icode].operand[op].mode;
11796 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
11797 target = gen_reg_rtx (mode);
11798
11799 return target;
11800 }
11801
11802 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
11803
11804 rtx
11805 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11806 enum machine_mode mode ATTRIBUTE_UNUSED,
11807 int ignore ATTRIBUTE_UNUSED)
11808 {
11809 enum insn_code icode;
11810 enum mips_builtin_type type;
11811 tree fndecl;
11812 unsigned int fcode;
11813 const struct builtin_description *bdesc;
11814 const struct bdesc_map *m;
11815
11816 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11817 fcode = DECL_FUNCTION_CODE (fndecl);
11818
11819 if (TARGET_MIPS16)
11820 {
11821 error ("built-in function %qs not supported for MIPS16",
11822 IDENTIFIER_POINTER (DECL_NAME (fndecl)));
11823 return const0_rtx;
11824 }
11825
11826 bdesc = NULL;
11827 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
11828 {
11829 if (fcode < m->size)
11830 {
11831 bdesc = m->bdesc;
11832 icode = bdesc[fcode].icode;
11833 type = bdesc[fcode].builtin_type;
11834 break;
11835 }
11836 fcode -= m->size;
11837 }
11838 if (bdesc == NULL)
11839 return 0;
11840
11841 switch (type)
11842 {
11843 case MIPS_BUILTIN_DIRECT:
11844 return mips_expand_builtin_direct (icode, target, exp, true);
11845
11846 case MIPS_BUILTIN_DIRECT_NO_TARGET:
11847 return mips_expand_builtin_direct (icode, target, exp, false);
11848
11849 case MIPS_BUILTIN_MOVT:
11850 case MIPS_BUILTIN_MOVF:
11851 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
11852 target, exp);
11853
11854 case MIPS_BUILTIN_CMP_ANY:
11855 case MIPS_BUILTIN_CMP_ALL:
11856 case MIPS_BUILTIN_CMP_UPPER:
11857 case MIPS_BUILTIN_CMP_LOWER:
11858 case MIPS_BUILTIN_CMP_SINGLE:
11859 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
11860 target, exp);
11861
11862 case MIPS_BUILTIN_BPOSGE32:
11863 return mips_expand_builtin_bposge (type, target);
11864
11865 default:
11866 return 0;
11867 }
11868 }
11869
11870 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
11871
11872 void
11873 mips_init_builtins (void)
11874 {
11875 const struct builtin_description *d;
11876 const struct bdesc_map *m;
11877 tree types[(int) MIPS_MAX_FTYPE_MAX];
11878 tree V2SF_type_node;
11879 tree V2HI_type_node;
11880 tree V4QI_type_node;
11881 unsigned int offset;
11882
11883 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
11884 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
11885 return;
11886
11887 if (TARGET_PAIRED_SINGLE_FLOAT)
11888 {
11889 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
11890
11891 types[MIPS_V2SF_FTYPE_V2SF]
11892 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
11893
11894 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
11895 = build_function_type_list (V2SF_type_node,
11896 V2SF_type_node, V2SF_type_node, NULL_TREE);
11897
11898 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
11899 = build_function_type_list (V2SF_type_node,
11900 V2SF_type_node, V2SF_type_node,
11901 integer_type_node, NULL_TREE);
11902
11903 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
11904 = build_function_type_list (V2SF_type_node,
11905 V2SF_type_node, V2SF_type_node,
11906 V2SF_type_node, V2SF_type_node, NULL_TREE);
11907
11908 types[MIPS_V2SF_FTYPE_SF_SF]
11909 = build_function_type_list (V2SF_type_node,
11910 float_type_node, float_type_node, NULL_TREE);
11911
11912 types[MIPS_INT_FTYPE_V2SF_V2SF]
11913 = build_function_type_list (integer_type_node,
11914 V2SF_type_node, V2SF_type_node, NULL_TREE);
11915
11916 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
11917 = build_function_type_list (integer_type_node,
11918 V2SF_type_node, V2SF_type_node,
11919 V2SF_type_node, V2SF_type_node, NULL_TREE);
11920
11921 types[MIPS_INT_FTYPE_SF_SF]
11922 = build_function_type_list (integer_type_node,
11923 float_type_node, float_type_node, NULL_TREE);
11924
11925 types[MIPS_INT_FTYPE_DF_DF]
11926 = build_function_type_list (integer_type_node,
11927 double_type_node, double_type_node, NULL_TREE);
11928
11929 types[MIPS_SF_FTYPE_V2SF]
11930 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
11931
11932 types[MIPS_SF_FTYPE_SF]
11933 = build_function_type_list (float_type_node,
11934 float_type_node, NULL_TREE);
11935
11936 types[MIPS_SF_FTYPE_SF_SF]
11937 = build_function_type_list (float_type_node,
11938 float_type_node, float_type_node, NULL_TREE);
11939
11940 types[MIPS_DF_FTYPE_DF]
11941 = build_function_type_list (double_type_node,
11942 double_type_node, NULL_TREE);
11943
11944 types[MIPS_DF_FTYPE_DF_DF]
11945 = build_function_type_list (double_type_node,
11946 double_type_node, double_type_node, NULL_TREE);
11947 }
11948
11949 if (TARGET_DSP)
11950 {
11951 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
11952 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
11953
11954 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
11955 = build_function_type_list (V2HI_type_node,
11956 V2HI_type_node, V2HI_type_node,
11957 NULL_TREE);
11958
11959 types[MIPS_SI_FTYPE_SI_SI]
11960 = build_function_type_list (intSI_type_node,
11961 intSI_type_node, intSI_type_node,
11962 NULL_TREE);
11963
11964 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
11965 = build_function_type_list (V4QI_type_node,
11966 V4QI_type_node, V4QI_type_node,
11967 NULL_TREE);
11968
11969 types[MIPS_SI_FTYPE_V4QI]
11970 = build_function_type_list (intSI_type_node,
11971 V4QI_type_node,
11972 NULL_TREE);
11973
11974 types[MIPS_V2HI_FTYPE_V2HI]
11975 = build_function_type_list (V2HI_type_node,
11976 V2HI_type_node,
11977 NULL_TREE);
11978
11979 types[MIPS_SI_FTYPE_SI]
11980 = build_function_type_list (intSI_type_node,
11981 intSI_type_node,
11982 NULL_TREE);
11983
11984 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
11985 = build_function_type_list (V4QI_type_node,
11986 V2HI_type_node, V2HI_type_node,
11987 NULL_TREE);
11988
11989 types[MIPS_V2HI_FTYPE_SI_SI]
11990 = build_function_type_list (V2HI_type_node,
11991 intSI_type_node, intSI_type_node,
11992 NULL_TREE);
11993
11994 types[MIPS_SI_FTYPE_V2HI]
11995 = build_function_type_list (intSI_type_node,
11996 V2HI_type_node,
11997 NULL_TREE);
11998
11999 types[MIPS_V2HI_FTYPE_V4QI]
12000 = build_function_type_list (V2HI_type_node,
12001 V4QI_type_node,
12002 NULL_TREE);
12003
12004 types[MIPS_V4QI_FTYPE_V4QI_SI]
12005 = build_function_type_list (V4QI_type_node,
12006 V4QI_type_node, intSI_type_node,
12007 NULL_TREE);
12008
12009 types[MIPS_V2HI_FTYPE_V2HI_SI]
12010 = build_function_type_list (V2HI_type_node,
12011 V2HI_type_node, intSI_type_node,
12012 NULL_TREE);
12013
12014 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
12015 = build_function_type_list (V2HI_type_node,
12016 V4QI_type_node, V2HI_type_node,
12017 NULL_TREE);
12018
12019 types[MIPS_SI_FTYPE_V2HI_V2HI]
12020 = build_function_type_list (intSI_type_node,
12021 V2HI_type_node, V2HI_type_node,
12022 NULL_TREE);
12023
12024 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
12025 = build_function_type_list (intDI_type_node,
12026 intDI_type_node, V4QI_type_node, V4QI_type_node,
12027 NULL_TREE);
12028
12029 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
12030 = build_function_type_list (intDI_type_node,
12031 intDI_type_node, V2HI_type_node, V2HI_type_node,
12032 NULL_TREE);
12033
12034 types[MIPS_DI_FTYPE_DI_SI_SI]
12035 = build_function_type_list (intDI_type_node,
12036 intDI_type_node, intSI_type_node, intSI_type_node,
12037 NULL_TREE);
12038
12039 types[MIPS_V4QI_FTYPE_SI]
12040 = build_function_type_list (V4QI_type_node,
12041 intSI_type_node,
12042 NULL_TREE);
12043
12044 types[MIPS_V2HI_FTYPE_SI]
12045 = build_function_type_list (V2HI_type_node,
12046 intSI_type_node,
12047 NULL_TREE);
12048
12049 types[MIPS_VOID_FTYPE_V4QI_V4QI]
12050 = build_function_type_list (void_type_node,
12051 V4QI_type_node, V4QI_type_node,
12052 NULL_TREE);
12053
12054 types[MIPS_SI_FTYPE_V4QI_V4QI]
12055 = build_function_type_list (intSI_type_node,
12056 V4QI_type_node, V4QI_type_node,
12057 NULL_TREE);
12058
12059 types[MIPS_VOID_FTYPE_V2HI_V2HI]
12060 = build_function_type_list (void_type_node,
12061 V2HI_type_node, V2HI_type_node,
12062 NULL_TREE);
12063
12064 types[MIPS_SI_FTYPE_DI_SI]
12065 = build_function_type_list (intSI_type_node,
12066 intDI_type_node, intSI_type_node,
12067 NULL_TREE);
12068
12069 types[MIPS_DI_FTYPE_DI_SI]
12070 = build_function_type_list (intDI_type_node,
12071 intDI_type_node, intSI_type_node,
12072 NULL_TREE);
12073
12074 types[MIPS_VOID_FTYPE_SI_SI]
12075 = build_function_type_list (void_type_node,
12076 intSI_type_node, intSI_type_node,
12077 NULL_TREE);
12078
12079 types[MIPS_SI_FTYPE_PTR_SI]
12080 = build_function_type_list (intSI_type_node,
12081 ptr_type_node, intSI_type_node,
12082 NULL_TREE);
12083
12084 types[MIPS_SI_FTYPE_VOID]
12085 = build_function_type (intSI_type_node, void_list_node);
12086
12087 if (TARGET_DSPR2)
12088 {
12089 types[MIPS_V4QI_FTYPE_V4QI]
12090 = build_function_type_list (V4QI_type_node,
12091 V4QI_type_node,
12092 NULL_TREE);
12093
12094 types[MIPS_SI_FTYPE_SI_SI_SI]
12095 = build_function_type_list (intSI_type_node,
12096 intSI_type_node, intSI_type_node,
12097 intSI_type_node, NULL_TREE);
12098
12099 types[MIPS_DI_FTYPE_DI_USI_USI]
12100 = build_function_type_list (intDI_type_node,
12101 intDI_type_node,
12102 unsigned_intSI_type_node,
12103 unsigned_intSI_type_node, NULL_TREE);
12104
12105 types[MIPS_DI_FTYPE_SI_SI]
12106 = build_function_type_list (intDI_type_node,
12107 intSI_type_node, intSI_type_node,
12108 NULL_TREE);
12109
12110 types[MIPS_DI_FTYPE_USI_USI]
12111 = build_function_type_list (intDI_type_node,
12112 unsigned_intSI_type_node,
12113 unsigned_intSI_type_node, NULL_TREE);
12114
12115 types[MIPS_V2HI_FTYPE_SI_SI_SI]
12116 = build_function_type_list (V2HI_type_node,
12117 intSI_type_node, intSI_type_node,
12118 intSI_type_node, NULL_TREE);
12119
12120 }
12121 }
12122
12123 /* Iterate through all of the bdesc arrays, initializing all of the
12124 builtin functions. */
12125
12126 offset = 0;
12127 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
12128 {
12129 if ((m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
12130 && (m->unsupported_target_flags & target_flags) == 0)
12131 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
12132 if ((d->target_flags & target_flags) == d->target_flags)
12133 add_builtin_function (d->name, types[d->function_type],
12134 d - m->bdesc + offset,
12135 BUILT_IN_MD, NULL, NULL);
12136 offset += m->size;
12137 }
12138 }
12139
12140 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
12141 .md pattern and CALL is the function expr with arguments. TARGET,
12142 if nonnull, suggests a good place to put the result.
12143 HAS_TARGET indicates the function must return something. */
12144
12145 static rtx
12146 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
12147 bool has_target)
12148 {
12149 rtx ops[MAX_RECOG_OPERANDS];
12150 int i = 0;
12151 int j = 0;
12152
12153 if (has_target)
12154 {
12155 /* We save target to ops[0]. */
12156 ops[0] = mips_prepare_builtin_target (icode, 0, target);
12157 i = 1;
12158 }
12159
12160 /* We need to test if the arglist is not zero. Some instructions have extra
12161 clobber registers. */
12162 for (; i < insn_data[icode].n_operands && i <= call_expr_nargs (exp); i++, j++)
12163 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
12164
12165 switch (i)
12166 {
12167 case 2:
12168 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
12169 break;
12170
12171 case 3:
12172 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
12173 break;
12174
12175 case 4:
12176 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
12177 break;
12178
12179 default:
12180 gcc_unreachable ();
12181 }
12182 return target;
12183 }
12184
12185 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
12186 function (TYPE says which). EXP is the tree for the function
12187 function, ICODE is the instruction that should be used to compare
12188 the first two arguments, and COND is the condition it should test.
12189 TARGET, if nonnull, suggests a good place to put the result. */
12190
12191 static rtx
12192 mips_expand_builtin_movtf (enum mips_builtin_type type,
12193 enum insn_code icode, enum mips_fp_condition cond,
12194 rtx target, tree exp)
12195 {
12196 rtx cmp_result, op0, op1;
12197
12198 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12199 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
12200 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
12201 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
12202
12203 icode = CODE_FOR_mips_cond_move_tf_ps;
12204 target = mips_prepare_builtin_target (icode, 0, target);
12205 if (type == MIPS_BUILTIN_MOVT)
12206 {
12207 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
12208 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
12209 }
12210 else
12211 {
12212 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
12213 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
12214 }
12215 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
12216 return target;
12217 }
12218
12219 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
12220 into TARGET otherwise. Return TARGET. */
12221
12222 static rtx
12223 mips_builtin_branch_and_move (rtx condition, rtx target,
12224 rtx value_if_true, rtx value_if_false)
12225 {
12226 rtx true_label, done_label;
12227
12228 true_label = gen_label_rtx ();
12229 done_label = gen_label_rtx ();
12230
12231 /* First assume that CONDITION is false. */
12232 mips_emit_move (target, value_if_false);
12233
12234 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
12235 emit_jump_insn (gen_condjump (condition, true_label));
12236 emit_jump_insn (gen_jump (done_label));
12237 emit_barrier ();
12238
12239 /* Fix TARGET if CONDITION is true. */
12240 emit_label (true_label);
12241 mips_emit_move (target, value_if_true);
12242
12243 emit_label (done_label);
12244 return target;
12245 }
12246
12247 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
12248 of the comparison instruction and COND is the condition it should test.
12249 EXP is the function call and arguments and TARGET, if nonnull,
12250 suggests a good place to put the boolean result. */
12251
12252 static rtx
12253 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
12254 enum insn_code icode, enum mips_fp_condition cond,
12255 rtx target, tree exp)
12256 {
12257 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
12258 int i;
12259 int j = 0;
12260
12261 if (target == 0 || GET_MODE (target) != SImode)
12262 target = gen_reg_rtx (SImode);
12263
12264 /* Prepare the operands to the comparison. */
12265 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
12266 for (i = 1; i < insn_data[icode].n_operands - 1; i++, j++)
12267 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
12268
12269 switch (insn_data[icode].n_operands)
12270 {
12271 case 4:
12272 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
12273 break;
12274
12275 case 6:
12276 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
12277 ops[3], ops[4], GEN_INT (cond)));
12278 break;
12279
12280 default:
12281 gcc_unreachable ();
12282 }
12283
12284 /* If the comparison sets more than one register, we define the result
12285 to be 0 if all registers are false and -1 if all registers are true.
12286 The value of the complete result is indeterminate otherwise. */
12287 switch (builtin_type)
12288 {
12289 case MIPS_BUILTIN_CMP_ALL:
12290 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
12291 return mips_builtin_branch_and_move (condition, target,
12292 const0_rtx, const1_rtx);
12293
12294 case MIPS_BUILTIN_CMP_UPPER:
12295 case MIPS_BUILTIN_CMP_LOWER:
12296 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
12297 condition = gen_single_cc (cmp_result, offset);
12298 return mips_builtin_branch_and_move (condition, target,
12299 const1_rtx, const0_rtx);
12300
12301 default:
12302 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
12303 return mips_builtin_branch_and_move (condition, target,
12304 const1_rtx, const0_rtx);
12305 }
12306 }
12307
12308 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
12309 suggests a good place to put the boolean result. */
12310
12311 static rtx
12312 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
12313 {
12314 rtx condition, cmp_result;
12315 int cmp_value;
12316
12317 if (target == 0 || GET_MODE (target) != SImode)
12318 target = gen_reg_rtx (SImode);
12319
12320 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
12321
12322 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
12323 cmp_value = 32;
12324 else
12325 gcc_assert (0);
12326
12327 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
12328 return mips_builtin_branch_and_move (condition, target,
12329 const1_rtx, const0_rtx);
12330 }
12331 \f
12332 /* Return true if we should force MIPS16 mode for the function named by
12333 the SYMBOL_REF SYMBOL, which belongs to DECL and has type TYPE.
12334 FIRST is true if this is the first time handling this decl. */
12335
12336 static bool
12337 mips_use_mips16_mode_p (rtx symbol, tree decl, int first, tree type)
12338 {
12339 tree parent;
12340
12341 /* Explicit function attributes take precedence. */
12342 if (mips_mips16_type_p (type))
12343 return true;
12344 if (mips_nomips16_type_p (type))
12345 return false;
12346
12347 /* A nested function should inherit the MIPS16 setting from its parent. */
12348 parent = decl_function_context (decl);
12349 if (parent)
12350 return SYMBOL_REF_MIPS16_FUNC_P (XEXP (DECL_RTL (parent), 0));
12351
12352 /* Handle -mflip-mips16. */
12353 if (TARGET_FLIP_MIPS16
12354 && !DECL_BUILT_IN (decl)
12355 && !DECL_ARTIFICIAL (decl))
12356 {
12357 if (!first)
12358 /* Use the setting we picked first time around. */
12359 return SYMBOL_REF_MIPS16_FUNC_P (symbol);
12360
12361 mips16_flipper = !mips16_flipper;
12362 if (mips16_flipper)
12363 return !mips_base_mips16;
12364 }
12365
12366 return mips_base_mips16;
12367 }
12368
12369 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
12370 FIRST is true if this is the first time handling this decl. */
12371
12372 static void
12373 mips_encode_section_info (tree decl, rtx rtl, int first)
12374 {
12375 default_encode_section_info (decl, rtl, first);
12376
12377 if (TREE_CODE (decl) == FUNCTION_DECL)
12378 {
12379 rtx symbol = XEXP (rtl, 0);
12380 tree type = TREE_TYPE (decl);
12381
12382 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
12383 || mips_far_type_p (type))
12384 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
12385
12386 if (mips_use_mips16_mode_p (symbol, decl, first, type))
12387 {
12388 if (flag_pic || TARGET_ABICALLS)
12389 sorry ("MIPS16 PIC");
12390 else
12391 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_MIPS16_FUNC;
12392 }
12393 }
12394 }
12395
12396 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
12397 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
12398
12399 static void
12400 mips_extra_live_on_entry (bitmap regs)
12401 {
12402 if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
12403 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
12404 }
12405
12406 /* SImode values are represented as sign-extended to DImode. */
12407
12408 int
12409 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
12410 {
12411 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
12412 return SIGN_EXTEND;
12413
12414 return UNKNOWN;
12415 }
12416 \f
12417 /* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
12418
12419 static void
12420 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
12421 {
12422 switch (size)
12423 {
12424 case 4:
12425 fputs ("\t.dtprelword\t", file);
12426 break;
12427
12428 case 8:
12429 fputs ("\t.dtpreldword\t", file);
12430 break;
12431
12432 default:
12433 gcc_unreachable ();
12434 }
12435 output_addr_const (file, x);
12436 fputs ("+0x8000", file);
12437 }
12438 \f
12439 #include "gt-mips.h"