Eliminate BASIC_BLOCK macro.
[gcc.git] / gcc / config / mips / mips.c
1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989-2013 Free Software Foundation, Inc.
3 Contributed by A. Lichnewsky, lich@inria.inria.fr.
4 Changes by Michael Meissner, meissner@osf.org.
5 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
6 Brendan Eich, brendan@microunity.com.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3, or (at your option)
13 any later version.
14
15 GCC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
34 #include "recog.h"
35 #include "output.h"
36 #include "tree.h"
37 #include "varasm.h"
38 #include "stringpool.h"
39 #include "stor-layout.h"
40 #include "calls.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "libfuncs.h"
45 #include "flags.h"
46 #include "reload.h"
47 #include "tm_p.h"
48 #include "ggc.h"
49 #include "gstab.h"
50 #include "hash-table.h"
51 #include "debug.h"
52 #include "target.h"
53 #include "target-def.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
56 #include "sched-int.h"
57 #include "pointer-set.h"
58 #include "vec.h"
59 #include "basic-block.h"
60 #include "tree-ssa-alias.h"
61 #include "internal-fn.h"
62 #include "gimple-fold.h"
63 #include "tree-eh.h"
64 #include "gimple-expr.h"
65 #include "is-a.h"
66 #include "gimple.h"
67 #include "gimplify.h"
68 #include "bitmap.h"
69 #include "diagnostic.h"
70 #include "target-globals.h"
71 #include "opts.h"
72 #include "tree-pass.h"
73 #include "context.h"
74
75 /* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
76 #define UNSPEC_ADDRESS_P(X) \
77 (GET_CODE (X) == UNSPEC \
78 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
79 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
80
81 /* Extract the symbol or label from UNSPEC wrapper X. */
82 #define UNSPEC_ADDRESS(X) \
83 XVECEXP (X, 0, 0)
84
85 /* Extract the symbol type from UNSPEC wrapper X. */
86 #define UNSPEC_ADDRESS_TYPE(X) \
87 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
88
89 /* The maximum distance between the top of the stack frame and the
90 value $sp has when we save and restore registers.
91
92 The value for normal-mode code must be a SMALL_OPERAND and must
93 preserve the maximum stack alignment. We therefore use a value
94 of 0x7ff0 in this case.
95
96 microMIPS LWM and SWM support 12-bit offsets (from -0x800 to 0x7ff),
97 so we use a maximum of 0x7f0 for TARGET_MICROMIPS.
98
99 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
100 up to 0x7f8 bytes and can usually save or restore all the registers
101 that we need to save or restore. (Note that we can only use these
102 instructions for o32, for which the stack alignment is 8 bytes.)
103
104 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
105 RESTORE are not available. We can then use unextended instructions
106 to save and restore registers, and to allocate and deallocate the top
107 part of the frame. */
108 #define MIPS_MAX_FIRST_STACK_STEP \
109 (!TARGET_COMPRESSION ? 0x7ff0 \
110 : TARGET_MICROMIPS || GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
111 : TARGET_64BIT ? 0x100 : 0x400)
112
113 /* True if INSN is a mips.md pattern or asm statement. */
114 /* ??? This test exists through the compiler, perhaps it should be
115 moved to rtl.h. */
116 #define USEFUL_INSN_P(INSN) \
117 (NONDEBUG_INSN_P (INSN) \
118 && GET_CODE (PATTERN (INSN)) != USE \
119 && GET_CODE (PATTERN (INSN)) != CLOBBER)
120
121 /* If INSN is a delayed branch sequence, return the first instruction
122 in the sequence, otherwise return INSN itself. */
123 #define SEQ_BEGIN(INSN) \
124 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
125 ? XVECEXP (PATTERN (INSN), 0, 0) \
126 : (INSN))
127
128 /* Likewise for the last instruction in a delayed branch sequence. */
129 #define SEQ_END(INSN) \
130 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
131 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
132 : (INSN))
133
134 /* Execute the following loop body with SUBINSN set to each instruction
135 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
136 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
137 for ((SUBINSN) = SEQ_BEGIN (INSN); \
138 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
139 (SUBINSN) = NEXT_INSN (SUBINSN))
140
141 /* True if bit BIT is set in VALUE. */
142 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
143
144 /* Return the opcode for a ptr_mode load of the form:
145
146 l[wd] DEST, OFFSET(BASE). */
147 #define MIPS_LOAD_PTR(DEST, OFFSET, BASE) \
148 (((ptr_mode == DImode ? 0x37 : 0x23) << 26) \
149 | ((BASE) << 21) \
150 | ((DEST) << 16) \
151 | (OFFSET))
152
153 /* Return the opcode to move register SRC into register DEST. */
154 #define MIPS_MOVE(DEST, SRC) \
155 ((TARGET_64BIT ? 0x2d : 0x21) \
156 | ((DEST) << 11) \
157 | ((SRC) << 21))
158
159 /* Return the opcode for:
160
161 lui DEST, VALUE. */
162 #define MIPS_LUI(DEST, VALUE) \
163 ((0xf << 26) | ((DEST) << 16) | (VALUE))
164
165 /* Return the opcode to jump to register DEST. */
166 #define MIPS_JR(DEST) \
167 (((DEST) << 21) | 0x8)
168
169 /* Return the opcode for:
170
171 bal . + (1 + OFFSET) * 4. */
172 #define MIPS_BAL(OFFSET) \
173 ((0x1 << 26) | (0x11 << 16) | (OFFSET))
174
175 /* Return the usual opcode for a nop. */
176 #define MIPS_NOP 0
177
178 /* Classifies an address.
179
180 ADDRESS_REG
181 A natural register + offset address. The register satisfies
182 mips_valid_base_register_p and the offset is a const_arith_operand.
183
184 ADDRESS_LO_SUM
185 A LO_SUM rtx. The first operand is a valid base register and
186 the second operand is a symbolic address.
187
188 ADDRESS_CONST_INT
189 A signed 16-bit constant address.
190
191 ADDRESS_SYMBOLIC:
192 A constant symbolic address. */
193 enum mips_address_type {
194 ADDRESS_REG,
195 ADDRESS_LO_SUM,
196 ADDRESS_CONST_INT,
197 ADDRESS_SYMBOLIC
198 };
199
200 /* Macros to create an enumeration identifier for a function prototype. */
201 #define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B
202 #define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C
203 #define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D
204 #define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E
205
206 /* Classifies the prototype of a built-in function. */
207 enum mips_function_type {
208 #define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST,
209 #include "config/mips/mips-ftypes.def"
210 #undef DEF_MIPS_FTYPE
211 MIPS_MAX_FTYPE_MAX
212 };
213
214 /* Specifies how a built-in function should be converted into rtl. */
215 enum mips_builtin_type {
216 /* The function corresponds directly to an .md pattern. The return
217 value is mapped to operand 0 and the arguments are mapped to
218 operands 1 and above. */
219 MIPS_BUILTIN_DIRECT,
220
221 /* The function corresponds directly to an .md pattern. There is no return
222 value and the arguments are mapped to operands 0 and above. */
223 MIPS_BUILTIN_DIRECT_NO_TARGET,
224
225 /* The function corresponds to a comparison instruction followed by
226 a mips_cond_move_tf_ps pattern. The first two arguments are the
227 values to compare and the second two arguments are the vector
228 operands for the movt.ps or movf.ps instruction (in assembly order). */
229 MIPS_BUILTIN_MOVF,
230 MIPS_BUILTIN_MOVT,
231
232 /* The function corresponds to a V2SF comparison instruction. Operand 0
233 of this instruction is the result of the comparison, which has mode
234 CCV2 or CCV4. The function arguments are mapped to operands 1 and
235 above. The function's return value is an SImode boolean that is
236 true under the following conditions:
237
238 MIPS_BUILTIN_CMP_ANY: one of the registers is true
239 MIPS_BUILTIN_CMP_ALL: all of the registers are true
240 MIPS_BUILTIN_CMP_LOWER: the first register is true
241 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
242 MIPS_BUILTIN_CMP_ANY,
243 MIPS_BUILTIN_CMP_ALL,
244 MIPS_BUILTIN_CMP_UPPER,
245 MIPS_BUILTIN_CMP_LOWER,
246
247 /* As above, but the instruction only sets a single $fcc register. */
248 MIPS_BUILTIN_CMP_SINGLE,
249
250 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
251 MIPS_BUILTIN_BPOSGE32
252 };
253
254 /* Invoke MACRO (COND) for each C.cond.fmt condition. */
255 #define MIPS_FP_CONDITIONS(MACRO) \
256 MACRO (f), \
257 MACRO (un), \
258 MACRO (eq), \
259 MACRO (ueq), \
260 MACRO (olt), \
261 MACRO (ult), \
262 MACRO (ole), \
263 MACRO (ule), \
264 MACRO (sf), \
265 MACRO (ngle), \
266 MACRO (seq), \
267 MACRO (ngl), \
268 MACRO (lt), \
269 MACRO (nge), \
270 MACRO (le), \
271 MACRO (ngt)
272
273 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
274 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
275 enum mips_fp_condition {
276 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
277 };
278
279 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
280 #define STRINGIFY(X) #X
281 static const char *const mips_fp_conditions[] = {
282 MIPS_FP_CONDITIONS (STRINGIFY)
283 };
284
285 /* Tuning information that is automatically derived from other sources
286 (such as the scheduler). */
287 static struct {
288 /* The architecture and tuning settings that this structure describes. */
289 enum processor arch;
290 enum processor tune;
291
292 /* True if this structure describes MIPS16 settings. */
293 bool mips16_p;
294
295 /* True if the structure has been initialized. */
296 bool initialized_p;
297
298 /* True if "MULT $0, $0" is preferable to "MTLO $0; MTHI $0"
299 when optimizing for speed. */
300 bool fast_mult_zero_zero_p;
301 } mips_tuning_info;
302
303 /* Information about a function's frame layout. */
304 struct GTY(()) mips_frame_info {
305 /* The size of the frame in bytes. */
306 HOST_WIDE_INT total_size;
307
308 /* The number of bytes allocated to variables. */
309 HOST_WIDE_INT var_size;
310
311 /* The number of bytes allocated to outgoing function arguments. */
312 HOST_WIDE_INT args_size;
313
314 /* The number of bytes allocated to the .cprestore slot, or 0 if there
315 is no such slot. */
316 HOST_WIDE_INT cprestore_size;
317
318 /* Bit X is set if the function saves or restores GPR X. */
319 unsigned int mask;
320
321 /* Likewise FPR X. */
322 unsigned int fmask;
323
324 /* Likewise doubleword accumulator X ($acX). */
325 unsigned int acc_mask;
326
327 /* The number of GPRs, FPRs, doubleword accumulators and COP0
328 registers saved. */
329 unsigned int num_gp;
330 unsigned int num_fp;
331 unsigned int num_acc;
332 unsigned int num_cop0_regs;
333
334 /* The offset of the topmost GPR, FPR, accumulator and COP0-register
335 save slots from the top of the frame, or zero if no such slots are
336 needed. */
337 HOST_WIDE_INT gp_save_offset;
338 HOST_WIDE_INT fp_save_offset;
339 HOST_WIDE_INT acc_save_offset;
340 HOST_WIDE_INT cop0_save_offset;
341
342 /* Likewise, but giving offsets from the bottom of the frame. */
343 HOST_WIDE_INT gp_sp_offset;
344 HOST_WIDE_INT fp_sp_offset;
345 HOST_WIDE_INT acc_sp_offset;
346 HOST_WIDE_INT cop0_sp_offset;
347
348 /* Similar, but the value passed to _mcount. */
349 HOST_WIDE_INT ra_fp_offset;
350
351 /* The offset of arg_pointer_rtx from the bottom of the frame. */
352 HOST_WIDE_INT arg_pointer_offset;
353
354 /* The offset of hard_frame_pointer_rtx from the bottom of the frame. */
355 HOST_WIDE_INT hard_frame_pointer_offset;
356 };
357
358 struct GTY(()) machine_function {
359 /* The next floating-point condition-code register to allocate
360 for ISA_HAS_8CC targets, relative to ST_REG_FIRST. */
361 unsigned int next_fcc;
362
363 /* The register returned by mips16_gp_pseudo_reg; see there for details. */
364 rtx mips16_gp_pseudo_rtx;
365
366 /* The number of extra stack bytes taken up by register varargs.
367 This area is allocated by the callee at the very top of the frame. */
368 int varargs_size;
369
370 /* The current frame information, calculated by mips_compute_frame_info. */
371 struct mips_frame_info frame;
372
373 /* The register to use as the function's global pointer, or INVALID_REGNUM
374 if the function doesn't need one. */
375 unsigned int global_pointer;
376
377 /* How many instructions it takes to load a label into $AT, or 0 if
378 this property hasn't yet been calculated. */
379 unsigned int load_label_num_insns;
380
381 /* True if mips_adjust_insn_length should ignore an instruction's
382 hazard attribute. */
383 bool ignore_hazard_length_p;
384
385 /* True if the whole function is suitable for .set noreorder and
386 .set nomacro. */
387 bool all_noreorder_p;
388
389 /* True if the function has "inflexible" and "flexible" references
390 to the global pointer. See mips_cfun_has_inflexible_gp_ref_p
391 and mips_cfun_has_flexible_gp_ref_p for details. */
392 bool has_inflexible_gp_insn_p;
393 bool has_flexible_gp_insn_p;
394
395 /* True if the function's prologue must load the global pointer
396 value into pic_offset_table_rtx and store the same value in
397 the function's cprestore slot (if any). Even if this value
398 is currently false, we may decide to set it to true later;
399 see mips_must_initialize_gp_p () for details. */
400 bool must_initialize_gp_p;
401
402 /* True if the current function must restore $gp after any potential
403 clobber. This value is only meaningful during the first post-epilogue
404 split_insns pass; see mips_must_initialize_gp_p () for details. */
405 bool must_restore_gp_when_clobbered_p;
406
407 /* True if this is an interrupt handler. */
408 bool interrupt_handler_p;
409
410 /* True if this is an interrupt handler that uses shadow registers. */
411 bool use_shadow_register_set_p;
412
413 /* True if this is an interrupt handler that should keep interrupts
414 masked. */
415 bool keep_interrupts_masked_p;
416
417 /* True if this is an interrupt handler that should use DERET
418 instead of ERET. */
419 bool use_debug_exception_return_p;
420 };
421
422 /* Information about a single argument. */
423 struct mips_arg_info {
424 /* True if the argument is passed in a floating-point register, or
425 would have been if we hadn't run out of registers. */
426 bool fpr_p;
427
428 /* The number of words passed in registers, rounded up. */
429 unsigned int reg_words;
430
431 /* For EABI, the offset of the first register from GP_ARG_FIRST or
432 FP_ARG_FIRST. For other ABIs, the offset of the first register from
433 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
434 comment for details).
435
436 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
437 on the stack. */
438 unsigned int reg_offset;
439
440 /* The number of words that must be passed on the stack, rounded up. */
441 unsigned int stack_words;
442
443 /* The offset from the start of the stack overflow area of the argument's
444 first stack word. Only meaningful when STACK_WORDS is nonzero. */
445 unsigned int stack_offset;
446 };
447
448 /* Information about an address described by mips_address_type.
449
450 ADDRESS_CONST_INT
451 No fields are used.
452
453 ADDRESS_REG
454 REG is the base register and OFFSET is the constant offset.
455
456 ADDRESS_LO_SUM
457 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
458 is the type of symbol it references.
459
460 ADDRESS_SYMBOLIC
461 SYMBOL_TYPE is the type of symbol that the address references. */
462 struct mips_address_info {
463 enum mips_address_type type;
464 rtx reg;
465 rtx offset;
466 enum mips_symbol_type symbol_type;
467 };
468
469 /* One stage in a constant building sequence. These sequences have
470 the form:
471
472 A = VALUE[0]
473 A = A CODE[1] VALUE[1]
474 A = A CODE[2] VALUE[2]
475 ...
476
477 where A is an accumulator, each CODE[i] is a binary rtl operation
478 and each VALUE[i] is a constant integer. CODE[0] is undefined. */
479 struct mips_integer_op {
480 enum rtx_code code;
481 unsigned HOST_WIDE_INT value;
482 };
483
484 /* The largest number of operations needed to load an integer constant.
485 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
486 When the lowest bit is clear, we can try, but reject a sequence with
487 an extra SLL at the end. */
488 #define MIPS_MAX_INTEGER_OPS 7
489
490 /* Information about a MIPS16e SAVE or RESTORE instruction. */
491 struct mips16e_save_restore_info {
492 /* The number of argument registers saved by a SAVE instruction.
493 0 for RESTORE instructions. */
494 unsigned int nargs;
495
496 /* Bit X is set if the instruction saves or restores GPR X. */
497 unsigned int mask;
498
499 /* The total number of bytes to allocate. */
500 HOST_WIDE_INT size;
501 };
502
503 /* Costs of various operations on the different architectures. */
504
505 struct mips_rtx_cost_data
506 {
507 unsigned short fp_add;
508 unsigned short fp_mult_sf;
509 unsigned short fp_mult_df;
510 unsigned short fp_div_sf;
511 unsigned short fp_div_df;
512 unsigned short int_mult_si;
513 unsigned short int_mult_di;
514 unsigned short int_div_si;
515 unsigned short int_div_di;
516 unsigned short branch_cost;
517 unsigned short memory_latency;
518 };
519
520 /* Global variables for machine-dependent things. */
521
522 /* The -G setting, or the configuration's default small-data limit if
523 no -G option is given. */
524 static unsigned int mips_small_data_threshold;
525
526 /* The number of file directives written by mips_output_filename. */
527 int num_source_filenames;
528
529 /* The name that appeared in the last .file directive written by
530 mips_output_filename, or "" if mips_output_filename hasn't
531 written anything yet. */
532 const char *current_function_file = "";
533
534 /* Arrays that map GCC register numbers to debugger register numbers. */
535 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
536 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
537
538 /* Information about the current function's epilogue, used only while
539 expanding it. */
540 static struct {
541 /* A list of queued REG_CFA_RESTORE notes. */
542 rtx cfa_restores;
543
544 /* The CFA is currently defined as CFA_REG + CFA_OFFSET. */
545 rtx cfa_reg;
546 HOST_WIDE_INT cfa_offset;
547
548 /* The offset of the CFA from the stack pointer while restoring
549 registers. */
550 HOST_WIDE_INT cfa_restore_sp_offset;
551 } mips_epilogue;
552
553 /* The nesting depth of the PRINT_OPERAND '%(', '%<' and '%[' constructs. */
554 struct mips_asm_switch mips_noreorder = { "reorder", 0 };
555 struct mips_asm_switch mips_nomacro = { "macro", 0 };
556 struct mips_asm_switch mips_noat = { "at", 0 };
557
558 /* True if we're writing out a branch-likely instruction rather than a
559 normal branch. */
560 static bool mips_branch_likely;
561
562 /* The current instruction-set architecture. */
563 enum processor mips_arch;
564 const struct mips_cpu_info *mips_arch_info;
565
566 /* The processor that we should tune the code for. */
567 enum processor mips_tune;
568 const struct mips_cpu_info *mips_tune_info;
569
570 /* The ISA level associated with mips_arch. */
571 int mips_isa;
572
573 /* The architecture selected by -mipsN, or null if -mipsN wasn't used. */
574 static const struct mips_cpu_info *mips_isa_option_info;
575
576 /* Which cost information to use. */
577 static const struct mips_rtx_cost_data *mips_cost;
578
579 /* The ambient target flags, excluding MASK_MIPS16. */
580 static int mips_base_target_flags;
581
582 /* The default compression mode. */
583 unsigned int mips_base_compression_flags;
584
585 /* The ambient values of other global variables. */
586 static int mips_base_schedule_insns; /* flag_schedule_insns */
587 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
588 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
589 static int mips_base_align_loops; /* align_loops */
590 static int mips_base_align_jumps; /* align_jumps */
591 static int mips_base_align_functions; /* align_functions */
592
593 /* Index [M][R] is true if register R is allowed to hold a value of mode M. */
594 bool mips_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
595
596 /* Index C is true if character C is a valid PRINT_OPERAND punctation
597 character. */
598 static bool mips_print_operand_punct[256];
599
600 static GTY (()) int mips_output_filename_first_time = 1;
601
602 /* mips_split_p[X] is true if symbols of type X can be split by
603 mips_split_symbol. */
604 bool mips_split_p[NUM_SYMBOL_TYPES];
605
606 /* mips_split_hi_p[X] is true if the high parts of symbols of type X
607 can be split by mips_split_symbol. */
608 bool mips_split_hi_p[NUM_SYMBOL_TYPES];
609
610 /* mips_use_pcrel_pool_p[X] is true if symbols of type X should be
611 forced into a PC-relative constant pool. */
612 bool mips_use_pcrel_pool_p[NUM_SYMBOL_TYPES];
613
614 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
615 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
616 if they are matched by a special .md file pattern. */
617 const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
618
619 /* Likewise for HIGHs. */
620 const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
621
622 /* Target state for MIPS16. */
623 struct target_globals *mips16_globals;
624
625 /* Cached value of can_issue_more. This is cached in mips_variable_issue hook
626 and returned from mips_sched_reorder2. */
627 static int cached_can_issue_more;
628
629 /* True if the output uses __mips16_rdhwr. */
630 static bool mips_need_mips16_rdhwr_p;
631
632 /* Index R is the smallest register class that contains register R. */
633 const enum reg_class mips_regno_to_class[FIRST_PSEUDO_REGISTER] = {
634 LEA_REGS, LEA_REGS, M16_REGS, V1_REG,
635 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
636 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
637 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
638 M16_REGS, M16_REGS, LEA_REGS, LEA_REGS,
639 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
640 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
641 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
642 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
643 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
644 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
645 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
646 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
647 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
648 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
649 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
650 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
651 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
652 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
653 NO_REGS, FRAME_REGS, FRAME_REGS, NO_REGS,
654 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
655 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
656 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
657 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
658 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
659 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
660 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
661 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
662 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
663 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
664 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
665 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
666 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
667 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
668 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
669 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
670 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
671 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
672 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
673 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
674 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
675 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
676 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
677 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
678 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
679 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
680 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
681 };
682
683 /* The value of TARGET_ATTRIBUTE_TABLE. */
684 static const struct attribute_spec mips_attribute_table[] = {
685 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
686 om_diagnostic } */
687 { "long_call", 0, 0, false, true, true, NULL, false },
688 { "far", 0, 0, false, true, true, NULL, false },
689 { "near", 0, 0, false, true, true, NULL, false },
690 /* We would really like to treat "mips16" and "nomips16" as type
691 attributes, but GCC doesn't provide the hooks we need to support
692 the right conversion rules. As declaration attributes, they affect
693 code generation but don't carry other semantics. */
694 { "mips16", 0, 0, true, false, false, NULL, false },
695 { "nomips16", 0, 0, true, false, false, NULL, false },
696 { "micromips", 0, 0, true, false, false, NULL, false },
697 { "nomicromips", 0, 0, true, false, false, NULL, false },
698 { "nocompression", 0, 0, true, false, false, NULL, false },
699 /* Allow functions to be specified as interrupt handlers */
700 { "interrupt", 0, 0, false, true, true, NULL, false },
701 { "use_shadow_register_set", 0, 0, false, true, true, NULL, false },
702 { "keep_interrupts_masked", 0, 0, false, true, true, NULL, false },
703 { "use_debug_exception_return", 0, 0, false, true, true, NULL, false },
704 { NULL, 0, 0, false, false, false, NULL, false }
705 };
706 \f
707 /* A table describing all the processors GCC knows about; see
708 mips-cpus.def for details. */
709 static const struct mips_cpu_info mips_cpu_info_table[] = {
710 #define MIPS_CPU(NAME, CPU, ISA, FLAGS) \
711 { NAME, CPU, ISA, FLAGS },
712 #include "mips-cpus.def"
713 #undef MIPS_CPU
714 };
715
716 /* Default costs. If these are used for a processor we should look
717 up the actual costs. */
718 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
719 COSTS_N_INSNS (7), /* fp_mult_sf */ \
720 COSTS_N_INSNS (8), /* fp_mult_df */ \
721 COSTS_N_INSNS (23), /* fp_div_sf */ \
722 COSTS_N_INSNS (36), /* fp_div_df */ \
723 COSTS_N_INSNS (10), /* int_mult_si */ \
724 COSTS_N_INSNS (10), /* int_mult_di */ \
725 COSTS_N_INSNS (69), /* int_div_si */ \
726 COSTS_N_INSNS (69), /* int_div_di */ \
727 2, /* branch_cost */ \
728 4 /* memory_latency */
729
730 /* Floating-point costs for processors without an FPU. Just assume that
731 all floating-point libcalls are very expensive. */
732 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
733 COSTS_N_INSNS (256), /* fp_mult_sf */ \
734 COSTS_N_INSNS (256), /* fp_mult_df */ \
735 COSTS_N_INSNS (256), /* fp_div_sf */ \
736 COSTS_N_INSNS (256) /* fp_div_df */
737
738 /* Costs to use when optimizing for size. */
739 static const struct mips_rtx_cost_data mips_rtx_cost_optimize_size = {
740 COSTS_N_INSNS (1), /* fp_add */
741 COSTS_N_INSNS (1), /* fp_mult_sf */
742 COSTS_N_INSNS (1), /* fp_mult_df */
743 COSTS_N_INSNS (1), /* fp_div_sf */
744 COSTS_N_INSNS (1), /* fp_div_df */
745 COSTS_N_INSNS (1), /* int_mult_si */
746 COSTS_N_INSNS (1), /* int_mult_di */
747 COSTS_N_INSNS (1), /* int_div_si */
748 COSTS_N_INSNS (1), /* int_div_di */
749 2, /* branch_cost */
750 4 /* memory_latency */
751 };
752
753 /* Costs to use when optimizing for speed, indexed by processor. */
754 static const struct mips_rtx_cost_data
755 mips_rtx_cost_data[NUM_PROCESSOR_VALUES] = {
756 { /* R3000 */
757 COSTS_N_INSNS (2), /* fp_add */
758 COSTS_N_INSNS (4), /* fp_mult_sf */
759 COSTS_N_INSNS (5), /* fp_mult_df */
760 COSTS_N_INSNS (12), /* fp_div_sf */
761 COSTS_N_INSNS (19), /* fp_div_df */
762 COSTS_N_INSNS (12), /* int_mult_si */
763 COSTS_N_INSNS (12), /* int_mult_di */
764 COSTS_N_INSNS (35), /* int_div_si */
765 COSTS_N_INSNS (35), /* int_div_di */
766 1, /* branch_cost */
767 4 /* memory_latency */
768 },
769 { /* 4KC */
770 SOFT_FP_COSTS,
771 COSTS_N_INSNS (6), /* int_mult_si */
772 COSTS_N_INSNS (6), /* int_mult_di */
773 COSTS_N_INSNS (36), /* int_div_si */
774 COSTS_N_INSNS (36), /* int_div_di */
775 1, /* branch_cost */
776 4 /* memory_latency */
777 },
778 { /* 4KP */
779 SOFT_FP_COSTS,
780 COSTS_N_INSNS (36), /* int_mult_si */
781 COSTS_N_INSNS (36), /* int_mult_di */
782 COSTS_N_INSNS (37), /* int_div_si */
783 COSTS_N_INSNS (37), /* int_div_di */
784 1, /* branch_cost */
785 4 /* memory_latency */
786 },
787 { /* 5KC */
788 SOFT_FP_COSTS,
789 COSTS_N_INSNS (4), /* int_mult_si */
790 COSTS_N_INSNS (11), /* int_mult_di */
791 COSTS_N_INSNS (36), /* int_div_si */
792 COSTS_N_INSNS (68), /* int_div_di */
793 1, /* branch_cost */
794 4 /* memory_latency */
795 },
796 { /* 5KF */
797 COSTS_N_INSNS (4), /* fp_add */
798 COSTS_N_INSNS (4), /* fp_mult_sf */
799 COSTS_N_INSNS (5), /* fp_mult_df */
800 COSTS_N_INSNS (17), /* fp_div_sf */
801 COSTS_N_INSNS (32), /* fp_div_df */
802 COSTS_N_INSNS (4), /* int_mult_si */
803 COSTS_N_INSNS (11), /* int_mult_di */
804 COSTS_N_INSNS (36), /* int_div_si */
805 COSTS_N_INSNS (68), /* int_div_di */
806 1, /* branch_cost */
807 4 /* memory_latency */
808 },
809 { /* 20KC */
810 COSTS_N_INSNS (4), /* fp_add */
811 COSTS_N_INSNS (4), /* fp_mult_sf */
812 COSTS_N_INSNS (5), /* fp_mult_df */
813 COSTS_N_INSNS (17), /* fp_div_sf */
814 COSTS_N_INSNS (32), /* fp_div_df */
815 COSTS_N_INSNS (4), /* int_mult_si */
816 COSTS_N_INSNS (7), /* int_mult_di */
817 COSTS_N_INSNS (42), /* int_div_si */
818 COSTS_N_INSNS (72), /* int_div_di */
819 1, /* branch_cost */
820 4 /* memory_latency */
821 },
822 { /* 24KC */
823 SOFT_FP_COSTS,
824 COSTS_N_INSNS (5), /* int_mult_si */
825 COSTS_N_INSNS (5), /* int_mult_di */
826 COSTS_N_INSNS (41), /* int_div_si */
827 COSTS_N_INSNS (41), /* int_div_di */
828 1, /* branch_cost */
829 4 /* memory_latency */
830 },
831 { /* 24KF2_1 */
832 COSTS_N_INSNS (8), /* fp_add */
833 COSTS_N_INSNS (8), /* fp_mult_sf */
834 COSTS_N_INSNS (10), /* fp_mult_df */
835 COSTS_N_INSNS (34), /* fp_div_sf */
836 COSTS_N_INSNS (64), /* fp_div_df */
837 COSTS_N_INSNS (5), /* int_mult_si */
838 COSTS_N_INSNS (5), /* int_mult_di */
839 COSTS_N_INSNS (41), /* int_div_si */
840 COSTS_N_INSNS (41), /* int_div_di */
841 1, /* branch_cost */
842 4 /* memory_latency */
843 },
844 { /* 24KF1_1 */
845 COSTS_N_INSNS (4), /* fp_add */
846 COSTS_N_INSNS (4), /* fp_mult_sf */
847 COSTS_N_INSNS (5), /* fp_mult_df */
848 COSTS_N_INSNS (17), /* fp_div_sf */
849 COSTS_N_INSNS (32), /* fp_div_df */
850 COSTS_N_INSNS (5), /* int_mult_si */
851 COSTS_N_INSNS (5), /* int_mult_di */
852 COSTS_N_INSNS (41), /* int_div_si */
853 COSTS_N_INSNS (41), /* int_div_di */
854 1, /* branch_cost */
855 4 /* memory_latency */
856 },
857 { /* 74KC */
858 SOFT_FP_COSTS,
859 COSTS_N_INSNS (5), /* int_mult_si */
860 COSTS_N_INSNS (5), /* int_mult_di */
861 COSTS_N_INSNS (41), /* int_div_si */
862 COSTS_N_INSNS (41), /* int_div_di */
863 1, /* branch_cost */
864 4 /* memory_latency */
865 },
866 { /* 74KF2_1 */
867 COSTS_N_INSNS (8), /* fp_add */
868 COSTS_N_INSNS (8), /* fp_mult_sf */
869 COSTS_N_INSNS (10), /* fp_mult_df */
870 COSTS_N_INSNS (34), /* fp_div_sf */
871 COSTS_N_INSNS (64), /* fp_div_df */
872 COSTS_N_INSNS (5), /* int_mult_si */
873 COSTS_N_INSNS (5), /* int_mult_di */
874 COSTS_N_INSNS (41), /* int_div_si */
875 COSTS_N_INSNS (41), /* int_div_di */
876 1, /* branch_cost */
877 4 /* memory_latency */
878 },
879 { /* 74KF1_1 */
880 COSTS_N_INSNS (4), /* fp_add */
881 COSTS_N_INSNS (4), /* fp_mult_sf */
882 COSTS_N_INSNS (5), /* fp_mult_df */
883 COSTS_N_INSNS (17), /* fp_div_sf */
884 COSTS_N_INSNS (32), /* fp_div_df */
885 COSTS_N_INSNS (5), /* int_mult_si */
886 COSTS_N_INSNS (5), /* int_mult_di */
887 COSTS_N_INSNS (41), /* int_div_si */
888 COSTS_N_INSNS (41), /* int_div_di */
889 1, /* branch_cost */
890 4 /* memory_latency */
891 },
892 { /* 74KF3_2 */
893 COSTS_N_INSNS (6), /* fp_add */
894 COSTS_N_INSNS (6), /* fp_mult_sf */
895 COSTS_N_INSNS (7), /* fp_mult_df */
896 COSTS_N_INSNS (25), /* fp_div_sf */
897 COSTS_N_INSNS (48), /* fp_div_df */
898 COSTS_N_INSNS (5), /* int_mult_si */
899 COSTS_N_INSNS (5), /* int_mult_di */
900 COSTS_N_INSNS (41), /* int_div_si */
901 COSTS_N_INSNS (41), /* int_div_di */
902 1, /* branch_cost */
903 4 /* memory_latency */
904 },
905 { /* Loongson-2E */
906 DEFAULT_COSTS
907 },
908 { /* Loongson-2F */
909 DEFAULT_COSTS
910 },
911 { /* Loongson-3A */
912 DEFAULT_COSTS
913 },
914 { /* M4k */
915 DEFAULT_COSTS
916 },
917 /* Octeon */
918 {
919 SOFT_FP_COSTS,
920 COSTS_N_INSNS (5), /* int_mult_si */
921 COSTS_N_INSNS (5), /* int_mult_di */
922 COSTS_N_INSNS (72), /* int_div_si */
923 COSTS_N_INSNS (72), /* int_div_di */
924 1, /* branch_cost */
925 4 /* memory_latency */
926 },
927 /* Octeon II */
928 {
929 SOFT_FP_COSTS,
930 COSTS_N_INSNS (6), /* int_mult_si */
931 COSTS_N_INSNS (6), /* int_mult_di */
932 COSTS_N_INSNS (18), /* int_div_si */
933 COSTS_N_INSNS (35), /* int_div_di */
934 4, /* branch_cost */
935 4 /* memory_latency */
936 },
937 { /* R3900 */
938 COSTS_N_INSNS (2), /* fp_add */
939 COSTS_N_INSNS (4), /* fp_mult_sf */
940 COSTS_N_INSNS (5), /* fp_mult_df */
941 COSTS_N_INSNS (12), /* fp_div_sf */
942 COSTS_N_INSNS (19), /* fp_div_df */
943 COSTS_N_INSNS (2), /* int_mult_si */
944 COSTS_N_INSNS (2), /* int_mult_di */
945 COSTS_N_INSNS (35), /* int_div_si */
946 COSTS_N_INSNS (35), /* int_div_di */
947 1, /* branch_cost */
948 4 /* memory_latency */
949 },
950 { /* R6000 */
951 COSTS_N_INSNS (3), /* fp_add */
952 COSTS_N_INSNS (5), /* fp_mult_sf */
953 COSTS_N_INSNS (6), /* fp_mult_df */
954 COSTS_N_INSNS (15), /* fp_div_sf */
955 COSTS_N_INSNS (16), /* fp_div_df */
956 COSTS_N_INSNS (17), /* int_mult_si */
957 COSTS_N_INSNS (17), /* int_mult_di */
958 COSTS_N_INSNS (38), /* int_div_si */
959 COSTS_N_INSNS (38), /* int_div_di */
960 2, /* branch_cost */
961 6 /* memory_latency */
962 },
963 { /* R4000 */
964 COSTS_N_INSNS (6), /* fp_add */
965 COSTS_N_INSNS (7), /* fp_mult_sf */
966 COSTS_N_INSNS (8), /* fp_mult_df */
967 COSTS_N_INSNS (23), /* fp_div_sf */
968 COSTS_N_INSNS (36), /* fp_div_df */
969 COSTS_N_INSNS (10), /* int_mult_si */
970 COSTS_N_INSNS (10), /* int_mult_di */
971 COSTS_N_INSNS (69), /* int_div_si */
972 COSTS_N_INSNS (69), /* int_div_di */
973 2, /* branch_cost */
974 6 /* memory_latency */
975 },
976 { /* R4100 */
977 DEFAULT_COSTS
978 },
979 { /* R4111 */
980 DEFAULT_COSTS
981 },
982 { /* R4120 */
983 DEFAULT_COSTS
984 },
985 { /* R4130 */
986 /* The only costs that appear to be updated here are
987 integer multiplication. */
988 SOFT_FP_COSTS,
989 COSTS_N_INSNS (4), /* int_mult_si */
990 COSTS_N_INSNS (6), /* int_mult_di */
991 COSTS_N_INSNS (69), /* int_div_si */
992 COSTS_N_INSNS (69), /* int_div_di */
993 1, /* branch_cost */
994 4 /* memory_latency */
995 },
996 { /* R4300 */
997 DEFAULT_COSTS
998 },
999 { /* R4600 */
1000 DEFAULT_COSTS
1001 },
1002 { /* R4650 */
1003 DEFAULT_COSTS
1004 },
1005 { /* R4700 */
1006 DEFAULT_COSTS
1007 },
1008 { /* R5000 */
1009 COSTS_N_INSNS (6), /* fp_add */
1010 COSTS_N_INSNS (4), /* fp_mult_sf */
1011 COSTS_N_INSNS (5), /* fp_mult_df */
1012 COSTS_N_INSNS (23), /* fp_div_sf */
1013 COSTS_N_INSNS (36), /* fp_div_df */
1014 COSTS_N_INSNS (5), /* int_mult_si */
1015 COSTS_N_INSNS (5), /* int_mult_di */
1016 COSTS_N_INSNS (36), /* int_div_si */
1017 COSTS_N_INSNS (36), /* int_div_di */
1018 1, /* branch_cost */
1019 4 /* memory_latency */
1020 },
1021 { /* R5400 */
1022 COSTS_N_INSNS (6), /* fp_add */
1023 COSTS_N_INSNS (5), /* fp_mult_sf */
1024 COSTS_N_INSNS (6), /* fp_mult_df */
1025 COSTS_N_INSNS (30), /* fp_div_sf */
1026 COSTS_N_INSNS (59), /* fp_div_df */
1027 COSTS_N_INSNS (3), /* int_mult_si */
1028 COSTS_N_INSNS (4), /* int_mult_di */
1029 COSTS_N_INSNS (42), /* int_div_si */
1030 COSTS_N_INSNS (74), /* int_div_di */
1031 1, /* branch_cost */
1032 4 /* memory_latency */
1033 },
1034 { /* R5500 */
1035 COSTS_N_INSNS (6), /* fp_add */
1036 COSTS_N_INSNS (5), /* fp_mult_sf */
1037 COSTS_N_INSNS (6), /* fp_mult_df */
1038 COSTS_N_INSNS (30), /* fp_div_sf */
1039 COSTS_N_INSNS (59), /* fp_div_df */
1040 COSTS_N_INSNS (5), /* int_mult_si */
1041 COSTS_N_INSNS (9), /* int_mult_di */
1042 COSTS_N_INSNS (42), /* int_div_si */
1043 COSTS_N_INSNS (74), /* int_div_di */
1044 1, /* branch_cost */
1045 4 /* memory_latency */
1046 },
1047 { /* R5900 */
1048 COSTS_N_INSNS (4), /* fp_add */
1049 COSTS_N_INSNS (4), /* fp_mult_sf */
1050 COSTS_N_INSNS (256), /* fp_mult_df */
1051 COSTS_N_INSNS (8), /* fp_div_sf */
1052 COSTS_N_INSNS (256), /* fp_div_df */
1053 COSTS_N_INSNS (4), /* int_mult_si */
1054 COSTS_N_INSNS (256), /* int_mult_di */
1055 COSTS_N_INSNS (37), /* int_div_si */
1056 COSTS_N_INSNS (256), /* int_div_di */
1057 1, /* branch_cost */
1058 4 /* memory_latency */
1059 },
1060 { /* R7000 */
1061 /* The only costs that are changed here are
1062 integer multiplication. */
1063 COSTS_N_INSNS (6), /* fp_add */
1064 COSTS_N_INSNS (7), /* fp_mult_sf */
1065 COSTS_N_INSNS (8), /* fp_mult_df */
1066 COSTS_N_INSNS (23), /* fp_div_sf */
1067 COSTS_N_INSNS (36), /* fp_div_df */
1068 COSTS_N_INSNS (5), /* int_mult_si */
1069 COSTS_N_INSNS (9), /* int_mult_di */
1070 COSTS_N_INSNS (69), /* int_div_si */
1071 COSTS_N_INSNS (69), /* int_div_di */
1072 1, /* branch_cost */
1073 4 /* memory_latency */
1074 },
1075 { /* R8000 */
1076 DEFAULT_COSTS
1077 },
1078 { /* R9000 */
1079 /* The only costs that are changed here are
1080 integer multiplication. */
1081 COSTS_N_INSNS (6), /* fp_add */
1082 COSTS_N_INSNS (7), /* fp_mult_sf */
1083 COSTS_N_INSNS (8), /* fp_mult_df */
1084 COSTS_N_INSNS (23), /* fp_div_sf */
1085 COSTS_N_INSNS (36), /* fp_div_df */
1086 COSTS_N_INSNS (3), /* int_mult_si */
1087 COSTS_N_INSNS (8), /* int_mult_di */
1088 COSTS_N_INSNS (69), /* int_div_si */
1089 COSTS_N_INSNS (69), /* int_div_di */
1090 1, /* branch_cost */
1091 4 /* memory_latency */
1092 },
1093 { /* R1x000 */
1094 COSTS_N_INSNS (2), /* fp_add */
1095 COSTS_N_INSNS (2), /* fp_mult_sf */
1096 COSTS_N_INSNS (2), /* fp_mult_df */
1097 COSTS_N_INSNS (12), /* fp_div_sf */
1098 COSTS_N_INSNS (19), /* fp_div_df */
1099 COSTS_N_INSNS (5), /* int_mult_si */
1100 COSTS_N_INSNS (9), /* int_mult_di */
1101 COSTS_N_INSNS (34), /* int_div_si */
1102 COSTS_N_INSNS (66), /* int_div_di */
1103 1, /* branch_cost */
1104 4 /* memory_latency */
1105 },
1106 { /* SB1 */
1107 /* These costs are the same as the SB-1A below. */
1108 COSTS_N_INSNS (4), /* fp_add */
1109 COSTS_N_INSNS (4), /* fp_mult_sf */
1110 COSTS_N_INSNS (4), /* fp_mult_df */
1111 COSTS_N_INSNS (24), /* fp_div_sf */
1112 COSTS_N_INSNS (32), /* fp_div_df */
1113 COSTS_N_INSNS (3), /* int_mult_si */
1114 COSTS_N_INSNS (4), /* int_mult_di */
1115 COSTS_N_INSNS (36), /* int_div_si */
1116 COSTS_N_INSNS (68), /* int_div_di */
1117 1, /* branch_cost */
1118 4 /* memory_latency */
1119 },
1120 { /* SB1-A */
1121 /* These costs are the same as the SB-1 above. */
1122 COSTS_N_INSNS (4), /* fp_add */
1123 COSTS_N_INSNS (4), /* fp_mult_sf */
1124 COSTS_N_INSNS (4), /* fp_mult_df */
1125 COSTS_N_INSNS (24), /* fp_div_sf */
1126 COSTS_N_INSNS (32), /* fp_div_df */
1127 COSTS_N_INSNS (3), /* int_mult_si */
1128 COSTS_N_INSNS (4), /* int_mult_di */
1129 COSTS_N_INSNS (36), /* int_div_si */
1130 COSTS_N_INSNS (68), /* int_div_di */
1131 1, /* branch_cost */
1132 4 /* memory_latency */
1133 },
1134 { /* SR71000 */
1135 DEFAULT_COSTS
1136 },
1137 { /* XLR */
1138 SOFT_FP_COSTS,
1139 COSTS_N_INSNS (8), /* int_mult_si */
1140 COSTS_N_INSNS (8), /* int_mult_di */
1141 COSTS_N_INSNS (72), /* int_div_si */
1142 COSTS_N_INSNS (72), /* int_div_di */
1143 1, /* branch_cost */
1144 4 /* memory_latency */
1145 },
1146 { /* XLP */
1147 /* These costs are the same as 5KF above. */
1148 COSTS_N_INSNS (4), /* fp_add */
1149 COSTS_N_INSNS (4), /* fp_mult_sf */
1150 COSTS_N_INSNS (5), /* fp_mult_df */
1151 COSTS_N_INSNS (17), /* fp_div_sf */
1152 COSTS_N_INSNS (32), /* fp_div_df */
1153 COSTS_N_INSNS (4), /* int_mult_si */
1154 COSTS_N_INSNS (11), /* int_mult_di */
1155 COSTS_N_INSNS (36), /* int_div_si */
1156 COSTS_N_INSNS (68), /* int_div_di */
1157 1, /* branch_cost */
1158 4 /* memory_latency */
1159 }
1160 };
1161 \f
1162 static rtx mips_find_pic_call_symbol (rtx, rtx, bool);
1163 static int mips_register_move_cost (enum machine_mode, reg_class_t,
1164 reg_class_t);
1165 static unsigned int mips_function_arg_boundary (enum machine_mode, const_tree);
1166 \f
1167 /* This hash table keeps track of implicit "mips16" and "nomips16" attributes
1168 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
1169 struct GTY (()) mflip_mips16_entry {
1170 const char *name;
1171 bool mips16_p;
1172 };
1173 static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
1174
1175 /* Hash table callbacks for mflip_mips16_htab. */
1176
1177 static hashval_t
1178 mflip_mips16_htab_hash (const void *entry)
1179 {
1180 return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
1181 }
1182
1183 static int
1184 mflip_mips16_htab_eq (const void *entry, const void *name)
1185 {
1186 return strcmp (((const struct mflip_mips16_entry *) entry)->name,
1187 (const char *) name) == 0;
1188 }
1189
1190 /* True if -mflip-mips16 should next add an attribute for the default MIPS16
1191 mode, false if it should next add an attribute for the opposite mode. */
1192 static GTY(()) bool mips16_flipper;
1193
1194 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
1195 for -mflip-mips16. Return true if it should use "mips16" and false if
1196 it should use "nomips16". */
1197
1198 static bool
1199 mflip_mips16_use_mips16_p (tree decl)
1200 {
1201 struct mflip_mips16_entry *entry;
1202 const char *name;
1203 hashval_t hash;
1204 void **slot;
1205 bool base_is_mips16 = (mips_base_compression_flags & MASK_MIPS16) != 0;
1206
1207 /* Use the opposite of the command-line setting for anonymous decls. */
1208 if (!DECL_NAME (decl))
1209 return !base_is_mips16;
1210
1211 if (!mflip_mips16_htab)
1212 mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
1213 mflip_mips16_htab_eq, NULL);
1214
1215 name = IDENTIFIER_POINTER (DECL_NAME (decl));
1216 hash = htab_hash_string (name);
1217 slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
1218 entry = (struct mflip_mips16_entry *) *slot;
1219 if (!entry)
1220 {
1221 mips16_flipper = !mips16_flipper;
1222 entry = ggc_alloc_mflip_mips16_entry ();
1223 entry->name = name;
1224 entry->mips16_p = mips16_flipper ? !base_is_mips16 : base_is_mips16;
1225 *slot = entry;
1226 }
1227 return entry->mips16_p;
1228 }
1229 \f
1230 /* Predicates to test for presence of "near" and "far"/"long_call"
1231 attributes on the given TYPE. */
1232
1233 static bool
1234 mips_near_type_p (const_tree type)
1235 {
1236 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1237 }
1238
1239 static bool
1240 mips_far_type_p (const_tree type)
1241 {
1242 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1243 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1244 }
1245
1246
1247 /* Check if the interrupt attribute is set for a function. */
1248
1249 static bool
1250 mips_interrupt_type_p (tree type)
1251 {
1252 return lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type)) != NULL;
1253 }
1254
1255 /* Check if the attribute to use shadow register set is set for a function. */
1256
1257 static bool
1258 mips_use_shadow_register_set_p (tree type)
1259 {
1260 return lookup_attribute ("use_shadow_register_set",
1261 TYPE_ATTRIBUTES (type)) != NULL;
1262 }
1263
1264 /* Check if the attribute to keep interrupts masked is set for a function. */
1265
1266 static bool
1267 mips_keep_interrupts_masked_p (tree type)
1268 {
1269 return lookup_attribute ("keep_interrupts_masked",
1270 TYPE_ATTRIBUTES (type)) != NULL;
1271 }
1272
1273 /* Check if the attribute to use debug exception return is set for
1274 a function. */
1275
1276 static bool
1277 mips_use_debug_exception_return_p (tree type)
1278 {
1279 return lookup_attribute ("use_debug_exception_return",
1280 TYPE_ATTRIBUTES (type)) != NULL;
1281 }
1282
1283 /* Return the set of compression modes that are explicitly required
1284 by the attributes in ATTRIBUTES. */
1285
1286 static unsigned int
1287 mips_get_compress_on_flags (tree attributes)
1288 {
1289 unsigned int flags = 0;
1290
1291 if (lookup_attribute ("mips16", attributes) != NULL)
1292 flags |= MASK_MIPS16;
1293
1294 if (lookup_attribute ("micromips", attributes) != NULL)
1295 flags |= MASK_MICROMIPS;
1296
1297 return flags;
1298 }
1299
1300 /* Return the set of compression modes that are explicitly forbidden
1301 by the attributes in ATTRIBUTES. */
1302
1303 static unsigned int
1304 mips_get_compress_off_flags (tree attributes)
1305 {
1306 unsigned int flags = 0;
1307
1308 if (lookup_attribute ("nocompression", attributes) != NULL)
1309 flags |= MASK_MIPS16 | MASK_MICROMIPS;
1310
1311 if (lookup_attribute ("nomips16", attributes) != NULL)
1312 flags |= MASK_MIPS16;
1313
1314 if (lookup_attribute ("nomicromips", attributes) != NULL)
1315 flags |= MASK_MICROMIPS;
1316
1317 return flags;
1318 }
1319
1320 /* Return the compression mode that should be used for function DECL.
1321 Return the ambient setting if DECL is null. */
1322
1323 static unsigned int
1324 mips_get_compress_mode (tree decl)
1325 {
1326 unsigned int flags, force_on;
1327
1328 flags = mips_base_compression_flags;
1329 if (decl)
1330 {
1331 /* Nested functions must use the same frame pointer as their
1332 parent and must therefore use the same ISA mode. */
1333 tree parent = decl_function_context (decl);
1334 if (parent)
1335 decl = parent;
1336 force_on = mips_get_compress_on_flags (DECL_ATTRIBUTES (decl));
1337 if (force_on)
1338 return force_on;
1339 flags &= ~mips_get_compress_off_flags (DECL_ATTRIBUTES (decl));
1340 }
1341 return flags;
1342 }
1343
1344 /* Return the attribute name associated with MASK_MIPS16 and MASK_MICROMIPS
1345 flags FLAGS. */
1346
1347 static const char *
1348 mips_get_compress_on_name (unsigned int flags)
1349 {
1350 if (flags == MASK_MIPS16)
1351 return "mips16";
1352 return "micromips";
1353 }
1354
1355 /* Return the attribute name that forbids MASK_MIPS16 and MASK_MICROMIPS
1356 flags FLAGS. */
1357
1358 static const char *
1359 mips_get_compress_off_name (unsigned int flags)
1360 {
1361 if (flags == MASK_MIPS16)
1362 return "nomips16";
1363 if (flags == MASK_MICROMIPS)
1364 return "nomicromips";
1365 return "nocompression";
1366 }
1367
1368 /* Implement TARGET_COMP_TYPE_ATTRIBUTES. */
1369
1370 static int
1371 mips_comp_type_attributes (const_tree type1, const_tree type2)
1372 {
1373 /* Disallow mixed near/far attributes. */
1374 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1375 return 0;
1376 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1377 return 0;
1378 return 1;
1379 }
1380
1381 /* Implement TARGET_INSERT_ATTRIBUTES. */
1382
1383 static void
1384 mips_insert_attributes (tree decl, tree *attributes)
1385 {
1386 const char *name;
1387 unsigned int compression_flags, nocompression_flags;
1388
1389 /* Check for "mips16" and "nomips16" attributes. */
1390 compression_flags = mips_get_compress_on_flags (*attributes);
1391 nocompression_flags = mips_get_compress_off_flags (*attributes);
1392
1393 if (TREE_CODE (decl) != FUNCTION_DECL)
1394 {
1395 if (nocompression_flags)
1396 error ("%qs attribute only applies to functions",
1397 mips_get_compress_off_name (nocompression_flags));
1398
1399 if (compression_flags)
1400 error ("%qs attribute only applies to functions",
1401 mips_get_compress_on_name (nocompression_flags));
1402 }
1403 else
1404 {
1405 compression_flags |= mips_get_compress_on_flags (DECL_ATTRIBUTES (decl));
1406 nocompression_flags |=
1407 mips_get_compress_off_flags (DECL_ATTRIBUTES (decl));
1408
1409 if (compression_flags && nocompression_flags)
1410 error ("%qE cannot have both %qs and %qs attributes",
1411 DECL_NAME (decl), mips_get_compress_on_name (compression_flags),
1412 mips_get_compress_off_name (nocompression_flags));
1413
1414 if (compression_flags & MASK_MIPS16
1415 && compression_flags & MASK_MICROMIPS)
1416 error ("%qE cannot have both %qs and %qs attributes",
1417 DECL_NAME (decl), "mips16", "micromips");
1418
1419 if (TARGET_FLIP_MIPS16
1420 && !DECL_ARTIFICIAL (decl)
1421 && compression_flags == 0
1422 && nocompression_flags == 0)
1423 {
1424 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
1425 "mips16" attribute, arbitrarily pick one. We must pick the same
1426 setting for duplicate declarations of a function. */
1427 name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
1428 *attributes = tree_cons (get_identifier (name), NULL, *attributes);
1429 name = "nomicromips";
1430 *attributes = tree_cons (get_identifier (name), NULL, *attributes);
1431 }
1432 }
1433 }
1434
1435 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
1436
1437 static tree
1438 mips_merge_decl_attributes (tree olddecl, tree newdecl)
1439 {
1440 unsigned int diff;
1441
1442 diff = (mips_get_compress_on_flags (DECL_ATTRIBUTES (olddecl))
1443 ^ mips_get_compress_on_flags (DECL_ATTRIBUTES (newdecl)));
1444 if (diff)
1445 error ("%qE redeclared with conflicting %qs attributes",
1446 DECL_NAME (newdecl), mips_get_compress_on_name (diff));
1447
1448 diff = (mips_get_compress_off_flags (DECL_ATTRIBUTES (olddecl))
1449 ^ mips_get_compress_off_flags (DECL_ATTRIBUTES (newdecl)));
1450 if (diff)
1451 error ("%qE redeclared with conflicting %qs attributes",
1452 DECL_NAME (newdecl), mips_get_compress_off_name (diff));
1453
1454 return merge_attributes (DECL_ATTRIBUTES (olddecl),
1455 DECL_ATTRIBUTES (newdecl));
1456 }
1457
1458 /* Implement TARGET_CAN_INLINE_P. */
1459
1460 static bool
1461 mips_can_inline_p (tree caller, tree callee)
1462 {
1463 if (mips_get_compress_mode (callee) != mips_get_compress_mode (caller))
1464 return false;
1465 return default_target_can_inline_p (caller, callee);
1466 }
1467 \f
1468 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1469 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1470
1471 static void
1472 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1473 {
1474 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
1475 {
1476 *base_ptr = XEXP (x, 0);
1477 *offset_ptr = INTVAL (XEXP (x, 1));
1478 }
1479 else
1480 {
1481 *base_ptr = x;
1482 *offset_ptr = 0;
1483 }
1484 }
1485 \f
1486 static unsigned int mips_build_integer (struct mips_integer_op *,
1487 unsigned HOST_WIDE_INT);
1488
1489 /* A subroutine of mips_build_integer, with the same interface.
1490 Assume that the final action in the sequence should be a left shift. */
1491
1492 static unsigned int
1493 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1494 {
1495 unsigned int i, shift;
1496
1497 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1498 since signed numbers are easier to load than unsigned ones. */
1499 shift = 0;
1500 while ((value & 1) == 0)
1501 value /= 2, shift++;
1502
1503 i = mips_build_integer (codes, value);
1504 codes[i].code = ASHIFT;
1505 codes[i].value = shift;
1506 return i + 1;
1507 }
1508
1509 /* As for mips_build_shift, but assume that the final action will be
1510 an IOR or PLUS operation. */
1511
1512 static unsigned int
1513 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1514 {
1515 unsigned HOST_WIDE_INT high;
1516 unsigned int i;
1517
1518 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1519 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1520 {
1521 /* The constant is too complex to load with a simple LUI/ORI pair,
1522 so we want to give the recursive call as many trailing zeros as
1523 possible. In this case, we know bit 16 is set and that the
1524 low 16 bits form a negative number. If we subtract that number
1525 from VALUE, we will clear at least the lowest 17 bits, maybe more. */
1526 i = mips_build_integer (codes, CONST_HIGH_PART (value));
1527 codes[i].code = PLUS;
1528 codes[i].value = CONST_LOW_PART (value);
1529 }
1530 else
1531 {
1532 /* Either this is a simple LUI/ORI pair, or clearing the lowest 16
1533 bits gives a value with at least 17 trailing zeros. */
1534 i = mips_build_integer (codes, high);
1535 codes[i].code = IOR;
1536 codes[i].value = value & 0xffff;
1537 }
1538 return i + 1;
1539 }
1540
1541 /* Fill CODES with a sequence of rtl operations to load VALUE.
1542 Return the number of operations needed. */
1543
1544 static unsigned int
1545 mips_build_integer (struct mips_integer_op *codes,
1546 unsigned HOST_WIDE_INT value)
1547 {
1548 if (SMALL_OPERAND (value)
1549 || SMALL_OPERAND_UNSIGNED (value)
1550 || LUI_OPERAND (value))
1551 {
1552 /* The value can be loaded with a single instruction. */
1553 codes[0].code = UNKNOWN;
1554 codes[0].value = value;
1555 return 1;
1556 }
1557 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1558 {
1559 /* Either the constant is a simple LUI/ORI combination or its
1560 lowest bit is set. We don't want to shift in this case. */
1561 return mips_build_lower (codes, value);
1562 }
1563 else if ((value & 0xffff) == 0)
1564 {
1565 /* The constant will need at least three actions. The lowest
1566 16 bits are clear, so the final action will be a shift. */
1567 return mips_build_shift (codes, value);
1568 }
1569 else
1570 {
1571 /* The final action could be a shift, add or inclusive OR.
1572 Rather than use a complex condition to select the best
1573 approach, try both mips_build_shift and mips_build_lower
1574 and pick the one that gives the shortest sequence.
1575 Note that this case is only used once per constant. */
1576 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1577 unsigned int cost, alt_cost;
1578
1579 cost = mips_build_shift (codes, value);
1580 alt_cost = mips_build_lower (alt_codes, value);
1581 if (alt_cost < cost)
1582 {
1583 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1584 cost = alt_cost;
1585 }
1586 return cost;
1587 }
1588 }
1589 \f
1590 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
1591
1592 static bool
1593 mips_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1594 {
1595 return mips_const_insns (x) > 0;
1596 }
1597 \f
1598 /* Return a SYMBOL_REF for a MIPS16 function called NAME. */
1599
1600 static rtx
1601 mips16_stub_function (const char *name)
1602 {
1603 rtx x;
1604
1605 x = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
1606 SYMBOL_REF_FLAGS (x) |= (SYMBOL_FLAG_EXTERNAL | SYMBOL_FLAG_FUNCTION);
1607 return x;
1608 }
1609 \f
1610 /* Return true if symbols of type TYPE require a GOT access. */
1611
1612 static bool
1613 mips_got_symbol_type_p (enum mips_symbol_type type)
1614 {
1615 switch (type)
1616 {
1617 case SYMBOL_GOT_PAGE_OFST:
1618 case SYMBOL_GOT_DISP:
1619 return true;
1620
1621 default:
1622 return false;
1623 }
1624 }
1625
1626 /* Return true if X is a thread-local symbol. */
1627
1628 static bool
1629 mips_tls_symbol_p (rtx x)
1630 {
1631 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1632 }
1633
1634 /* Return true if SYMBOL_REF X is associated with a global symbol
1635 (in the STB_GLOBAL sense). */
1636
1637 static bool
1638 mips_global_symbol_p (const_rtx x)
1639 {
1640 const_tree decl = SYMBOL_REF_DECL (x);
1641
1642 if (!decl)
1643 return !SYMBOL_REF_LOCAL_P (x) || SYMBOL_REF_EXTERNAL_P (x);
1644
1645 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1646 or weak symbols. Relocations in the object file will be against
1647 the target symbol, so it's that symbol's binding that matters here. */
1648 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1649 }
1650
1651 /* Return true if function X is a libgcc MIPS16 stub function. */
1652
1653 static bool
1654 mips16_stub_function_p (const_rtx x)
1655 {
1656 return (GET_CODE (x) == SYMBOL_REF
1657 && strncmp (XSTR (x, 0), "__mips16_", 9) == 0);
1658 }
1659
1660 /* Return true if function X is a locally-defined and locally-binding
1661 MIPS16 function. */
1662
1663 static bool
1664 mips16_local_function_p (const_rtx x)
1665 {
1666 return (GET_CODE (x) == SYMBOL_REF
1667 && SYMBOL_REF_LOCAL_P (x)
1668 && !SYMBOL_REF_EXTERNAL_P (x)
1669 && (mips_get_compress_mode (SYMBOL_REF_DECL (x)) & MASK_MIPS16));
1670 }
1671
1672 /* Return true if SYMBOL_REF X binds locally. */
1673
1674 static bool
1675 mips_symbol_binds_local_p (const_rtx x)
1676 {
1677 return (SYMBOL_REF_DECL (x)
1678 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1679 : SYMBOL_REF_LOCAL_P (x));
1680 }
1681
1682 /* Return true if rtx constants of mode MODE should be put into a small
1683 data section. */
1684
1685 static bool
1686 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1687 {
1688 return (!TARGET_EMBEDDED_DATA
1689 && TARGET_LOCAL_SDATA
1690 && GET_MODE_SIZE (mode) <= mips_small_data_threshold);
1691 }
1692
1693 /* Return true if X should not be moved directly into register $25.
1694 We need this because many versions of GAS will treat "la $25,foo" as
1695 part of a call sequence and so allow a global "foo" to be lazily bound. */
1696
1697 bool
1698 mips_dangerous_for_la25_p (rtx x)
1699 {
1700 return (!TARGET_EXPLICIT_RELOCS
1701 && TARGET_USE_GOT
1702 && GET_CODE (x) == SYMBOL_REF
1703 && mips_global_symbol_p (x));
1704 }
1705
1706 /* Return true if calls to X might need $25 to be valid on entry. */
1707
1708 bool
1709 mips_use_pic_fn_addr_reg_p (const_rtx x)
1710 {
1711 if (!TARGET_USE_PIC_FN_ADDR_REG)
1712 return false;
1713
1714 /* MIPS16 stub functions are guaranteed not to use $25. */
1715 if (mips16_stub_function_p (x))
1716 return false;
1717
1718 if (GET_CODE (x) == SYMBOL_REF)
1719 {
1720 /* If PLTs and copy relocations are available, the static linker
1721 will make sure that $25 is valid on entry to the target function. */
1722 if (TARGET_ABICALLS_PIC0)
1723 return false;
1724
1725 /* Locally-defined functions use absolute accesses to set up
1726 the global pointer. */
1727 if (TARGET_ABSOLUTE_ABICALLS
1728 && mips_symbol_binds_local_p (x)
1729 && !SYMBOL_REF_EXTERNAL_P (x))
1730 return false;
1731 }
1732
1733 return true;
1734 }
1735
1736 /* Return the method that should be used to access SYMBOL_REF or
1737 LABEL_REF X in context CONTEXT. */
1738
1739 static enum mips_symbol_type
1740 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1741 {
1742 if (TARGET_RTP_PIC)
1743 return SYMBOL_GOT_DISP;
1744
1745 if (GET_CODE (x) == LABEL_REF)
1746 {
1747 /* Only return SYMBOL_PC_RELATIVE if we are generating MIPS16
1748 code and if we know that the label is in the current function's
1749 text section. LABEL_REFs are used for jump tables as well as
1750 text labels, so we must check whether jump tables live in the
1751 text section. */
1752 if (TARGET_MIPS16_SHORT_JUMP_TABLES
1753 && !LABEL_REF_NONLOCAL_P (x))
1754 return SYMBOL_PC_RELATIVE;
1755
1756 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1757 return SYMBOL_GOT_PAGE_OFST;
1758
1759 return SYMBOL_ABSOLUTE;
1760 }
1761
1762 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1763
1764 if (SYMBOL_REF_TLS_MODEL (x))
1765 return SYMBOL_TLS;
1766
1767 if (CONSTANT_POOL_ADDRESS_P (x))
1768 {
1769 if (TARGET_MIPS16_TEXT_LOADS)
1770 return SYMBOL_PC_RELATIVE;
1771
1772 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1773 return SYMBOL_PC_RELATIVE;
1774
1775 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1776 return SYMBOL_GP_RELATIVE;
1777 }
1778
1779 /* Do not use small-data accesses for weak symbols; they may end up
1780 being zero. */
1781 if (TARGET_GPOPT && SYMBOL_REF_SMALL_P (x) && !SYMBOL_REF_WEAK (x))
1782 return SYMBOL_GP_RELATIVE;
1783
1784 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1785 is in effect. */
1786 if (TARGET_ABICALLS_PIC2
1787 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1788 {
1789 /* There are three cases to consider:
1790
1791 - o32 PIC (either with or without explicit relocs)
1792 - n32/n64 PIC without explicit relocs
1793 - n32/n64 PIC with explicit relocs
1794
1795 In the first case, both local and global accesses will use an
1796 R_MIPS_GOT16 relocation. We must correctly predict which of
1797 the two semantics (local or global) the assembler and linker
1798 will apply. The choice depends on the symbol's binding rather
1799 than its visibility.
1800
1801 In the second case, the assembler will not use R_MIPS_GOT16
1802 relocations, but it chooses between local and global accesses
1803 in the same way as for o32 PIC.
1804
1805 In the third case we have more freedom since both forms of
1806 access will work for any kind of symbol. However, there seems
1807 little point in doing things differently. */
1808 if (mips_global_symbol_p (x))
1809 return SYMBOL_GOT_DISP;
1810
1811 return SYMBOL_GOT_PAGE_OFST;
1812 }
1813
1814 return SYMBOL_ABSOLUTE;
1815 }
1816
1817 /* Classify the base of symbolic expression X, given that X appears in
1818 context CONTEXT. */
1819
1820 static enum mips_symbol_type
1821 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1822 {
1823 rtx offset;
1824
1825 split_const (x, &x, &offset);
1826 if (UNSPEC_ADDRESS_P (x))
1827 return UNSPEC_ADDRESS_TYPE (x);
1828
1829 return mips_classify_symbol (x, context);
1830 }
1831
1832 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1833 is the alignment in bytes of SYMBOL_REF X. */
1834
1835 static bool
1836 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1837 {
1838 HOST_WIDE_INT align;
1839
1840 align = SYMBOL_REF_DECL (x) ? DECL_ALIGN_UNIT (SYMBOL_REF_DECL (x)) : 1;
1841 return IN_RANGE (offset, 0, align - 1);
1842 }
1843
1844 /* Return true if X is a symbolic constant that can be used in context
1845 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1846
1847 bool
1848 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1849 enum mips_symbol_type *symbol_type)
1850 {
1851 rtx offset;
1852
1853 split_const (x, &x, &offset);
1854 if (UNSPEC_ADDRESS_P (x))
1855 {
1856 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1857 x = UNSPEC_ADDRESS (x);
1858 }
1859 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1860 {
1861 *symbol_type = mips_classify_symbol (x, context);
1862 if (*symbol_type == SYMBOL_TLS)
1863 return false;
1864 }
1865 else
1866 return false;
1867
1868 if (offset == const0_rtx)
1869 return true;
1870
1871 /* Check whether a nonzero offset is valid for the underlying
1872 relocations. */
1873 switch (*symbol_type)
1874 {
1875 case SYMBOL_ABSOLUTE:
1876 case SYMBOL_64_HIGH:
1877 case SYMBOL_64_MID:
1878 case SYMBOL_64_LOW:
1879 /* If the target has 64-bit pointers and the object file only
1880 supports 32-bit symbols, the values of those symbols will be
1881 sign-extended. In this case we can't allow an arbitrary offset
1882 in case the 32-bit value X + OFFSET has a different sign from X. */
1883 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1884 return offset_within_block_p (x, INTVAL (offset));
1885
1886 /* In other cases the relocations can handle any offset. */
1887 return true;
1888
1889 case SYMBOL_PC_RELATIVE:
1890 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1891 In this case, we no longer have access to the underlying constant,
1892 but the original symbol-based access was known to be valid. */
1893 if (GET_CODE (x) == LABEL_REF)
1894 return true;
1895
1896 /* Fall through. */
1897
1898 case SYMBOL_GP_RELATIVE:
1899 /* Make sure that the offset refers to something within the
1900 same object block. This should guarantee that the final
1901 PC- or GP-relative offset is within the 16-bit limit. */
1902 return offset_within_block_p (x, INTVAL (offset));
1903
1904 case SYMBOL_GOT_PAGE_OFST:
1905 case SYMBOL_GOTOFF_PAGE:
1906 /* If the symbol is global, the GOT entry will contain the symbol's
1907 address, and we will apply a 16-bit offset after loading it.
1908 If the symbol is local, the linker should provide enough local
1909 GOT entries for a 16-bit offset, but larger offsets may lead
1910 to GOT overflow. */
1911 return SMALL_INT (offset);
1912
1913 case SYMBOL_TPREL:
1914 case SYMBOL_DTPREL:
1915 /* There is no carry between the HI and LO REL relocations, so the
1916 offset is only valid if we know it won't lead to such a carry. */
1917 return mips_offset_within_alignment_p (x, INTVAL (offset));
1918
1919 case SYMBOL_GOT_DISP:
1920 case SYMBOL_GOTOFF_DISP:
1921 case SYMBOL_GOTOFF_CALL:
1922 case SYMBOL_GOTOFF_LOADGP:
1923 case SYMBOL_TLSGD:
1924 case SYMBOL_TLSLDM:
1925 case SYMBOL_GOTTPREL:
1926 case SYMBOL_TLS:
1927 case SYMBOL_HALF:
1928 return false;
1929 }
1930 gcc_unreachable ();
1931 }
1932 \f
1933 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1934 single instruction. We rely on the fact that, in the worst case,
1935 all instructions involved in a MIPS16 address calculation are usually
1936 extended ones. */
1937
1938 static int
1939 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1940 {
1941 if (mips_use_pcrel_pool_p[(int) type])
1942 {
1943 if (mode == MAX_MACHINE_MODE)
1944 /* LEAs will be converted into constant-pool references by
1945 mips_reorg. */
1946 type = SYMBOL_PC_RELATIVE;
1947 else
1948 /* The constant must be loaded and then dereferenced. */
1949 return 0;
1950 }
1951
1952 switch (type)
1953 {
1954 case SYMBOL_ABSOLUTE:
1955 /* When using 64-bit symbols, we need 5 preparatory instructions,
1956 such as:
1957
1958 lui $at,%highest(symbol)
1959 daddiu $at,$at,%higher(symbol)
1960 dsll $at,$at,16
1961 daddiu $at,$at,%hi(symbol)
1962 dsll $at,$at,16
1963
1964 The final address is then $at + %lo(symbol). With 32-bit
1965 symbols we just need a preparatory LUI for normal mode and
1966 a preparatory LI and SLL for MIPS16. */
1967 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1968
1969 case SYMBOL_GP_RELATIVE:
1970 /* Treat GP-relative accesses as taking a single instruction on
1971 MIPS16 too; the copy of $gp can often be shared. */
1972 return 1;
1973
1974 case SYMBOL_PC_RELATIVE:
1975 /* PC-relative constants can be only be used with ADDIUPC,
1976 DADDIUPC, LWPC and LDPC. */
1977 if (mode == MAX_MACHINE_MODE
1978 || GET_MODE_SIZE (mode) == 4
1979 || GET_MODE_SIZE (mode) == 8)
1980 return 1;
1981
1982 /* The constant must be loaded using ADDIUPC or DADDIUPC first. */
1983 return 0;
1984
1985 case SYMBOL_GOT_DISP:
1986 /* The constant will have to be loaded from the GOT before it
1987 is used in an address. */
1988 if (mode != MAX_MACHINE_MODE)
1989 return 0;
1990
1991 /* Fall through. */
1992
1993 case SYMBOL_GOT_PAGE_OFST:
1994 /* Unless -funit-at-a-time is in effect, we can't be sure whether the
1995 local/global classification is accurate. The worst cases are:
1996
1997 (1) For local symbols when generating o32 or o64 code. The assembler
1998 will use:
1999
2000 lw $at,%got(symbol)
2001 nop
2002
2003 ...and the final address will be $at + %lo(symbol).
2004
2005 (2) For global symbols when -mxgot. The assembler will use:
2006
2007 lui $at,%got_hi(symbol)
2008 (d)addu $at,$at,$gp
2009
2010 ...and the final address will be $at + %got_lo(symbol). */
2011 return 3;
2012
2013 case SYMBOL_GOTOFF_PAGE:
2014 case SYMBOL_GOTOFF_DISP:
2015 case SYMBOL_GOTOFF_CALL:
2016 case SYMBOL_GOTOFF_LOADGP:
2017 case SYMBOL_64_HIGH:
2018 case SYMBOL_64_MID:
2019 case SYMBOL_64_LOW:
2020 case SYMBOL_TLSGD:
2021 case SYMBOL_TLSLDM:
2022 case SYMBOL_DTPREL:
2023 case SYMBOL_GOTTPREL:
2024 case SYMBOL_TPREL:
2025 case SYMBOL_HALF:
2026 /* A 16-bit constant formed by a single relocation, or a 32-bit
2027 constant formed from a high 16-bit relocation and a low 16-bit
2028 relocation. Use mips_split_p to determine which. 32-bit
2029 constants need an "lui; addiu" sequence for normal mode and
2030 an "li; sll; addiu" sequence for MIPS16 mode. */
2031 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
2032
2033 case SYMBOL_TLS:
2034 /* We don't treat a bare TLS symbol as a constant. */
2035 return 0;
2036 }
2037 gcc_unreachable ();
2038 }
2039
2040 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
2041 to load symbols of type TYPE into a register. Return 0 if the given
2042 type of symbol cannot be used as an immediate operand.
2043
2044 Otherwise, return the number of instructions needed to load or store
2045 values of mode MODE to or from addresses of type TYPE. Return 0 if
2046 the given type of symbol is not valid in addresses.
2047
2048 In both cases, instruction counts are based off BASE_INSN_LENGTH. */
2049
2050 static int
2051 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
2052 {
2053 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
2054 }
2055 \f
2056 /* A for_each_rtx callback. Stop the search if *X references a
2057 thread-local symbol. */
2058
2059 static int
2060 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
2061 {
2062 return mips_tls_symbol_p (*x);
2063 }
2064
2065 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
2066
2067 static bool
2068 mips_cannot_force_const_mem (enum machine_mode mode, rtx x)
2069 {
2070 enum mips_symbol_type type;
2071 rtx base, offset;
2072
2073 /* There is no assembler syntax for expressing an address-sized
2074 high part. */
2075 if (GET_CODE (x) == HIGH)
2076 return true;
2077
2078 /* As an optimization, reject constants that mips_legitimize_move
2079 can expand inline.
2080
2081 Suppose we have a multi-instruction sequence that loads constant C
2082 into register R. If R does not get allocated a hard register, and
2083 R is used in an operand that allows both registers and memory
2084 references, reload will consider forcing C into memory and using
2085 one of the instruction's memory alternatives. Returning false
2086 here will force it to use an input reload instead. */
2087 if (CONST_INT_P (x) && mips_legitimate_constant_p (mode, x))
2088 return true;
2089
2090 split_const (x, &base, &offset);
2091 if (mips_symbolic_constant_p (base, SYMBOL_CONTEXT_LEA, &type))
2092 {
2093 /* See whether we explicitly want these symbols in the pool. */
2094 if (mips_use_pcrel_pool_p[(int) type])
2095 return false;
2096
2097 /* The same optimization as for CONST_INT. */
2098 if (SMALL_INT (offset) && mips_symbol_insns (type, MAX_MACHINE_MODE) > 0)
2099 return true;
2100
2101 /* If MIPS16 constant pools live in the text section, they should
2102 not refer to anything that might need run-time relocation. */
2103 if (TARGET_MIPS16_PCREL_LOADS && mips_got_symbol_type_p (type))
2104 return true;
2105 }
2106
2107 /* TLS symbols must be computed by mips_legitimize_move. */
2108 if (for_each_rtx (&x, &mips_tls_symbol_ref_1, NULL))
2109 return true;
2110
2111 return false;
2112 }
2113
2114 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
2115 constants when we're using a per-function constant pool. */
2116
2117 static bool
2118 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
2119 const_rtx x ATTRIBUTE_UNUSED)
2120 {
2121 return !TARGET_MIPS16_PCREL_LOADS;
2122 }
2123 \f
2124 /* Return true if register REGNO is a valid base register for mode MODE.
2125 STRICT_P is true if REG_OK_STRICT is in effect. */
2126
2127 int
2128 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode,
2129 bool strict_p)
2130 {
2131 if (!HARD_REGISTER_NUM_P (regno))
2132 {
2133 if (!strict_p)
2134 return true;
2135 regno = reg_renumber[regno];
2136 }
2137
2138 /* These fake registers will be eliminated to either the stack or
2139 hard frame pointer, both of which are usually valid base registers.
2140 Reload deals with the cases where the eliminated form isn't valid. */
2141 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
2142 return true;
2143
2144 /* In MIPS16 mode, the stack pointer can only address word and doubleword
2145 values, nothing smaller. There are two problems here:
2146
2147 (a) Instantiating virtual registers can introduce new uses of the
2148 stack pointer. If these virtual registers are valid addresses,
2149 the stack pointer should be too.
2150
2151 (b) Most uses of the stack pointer are not made explicit until
2152 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
2153 We don't know until that stage whether we'll be eliminating to the
2154 stack pointer (which needs the restriction) or the hard frame
2155 pointer (which doesn't).
2156
2157 All in all, it seems more consistent to only enforce this restriction
2158 during and after reload. */
2159 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
2160 return !strict_p || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
2161
2162 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
2163 }
2164
2165 /* Return true if X is a valid base register for mode MODE.
2166 STRICT_P is true if REG_OK_STRICT is in effect. */
2167
2168 static bool
2169 mips_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
2170 {
2171 if (!strict_p && GET_CODE (x) == SUBREG)
2172 x = SUBREG_REG (x);
2173
2174 return (REG_P (x)
2175 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
2176 }
2177
2178 /* Return true if, for every base register BASE_REG, (plus BASE_REG X)
2179 can address a value of mode MODE. */
2180
2181 static bool
2182 mips_valid_offset_p (rtx x, enum machine_mode mode)
2183 {
2184 /* Check that X is a signed 16-bit number. */
2185 if (!const_arith_operand (x, Pmode))
2186 return false;
2187
2188 /* We may need to split multiword moves, so make sure that every word
2189 is accessible. */
2190 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
2191 && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
2192 return false;
2193
2194 return true;
2195 }
2196
2197 /* Return true if a LO_SUM can address a value of mode MODE when the
2198 LO_SUM symbol has type SYMBOL_TYPE. */
2199
2200 static bool
2201 mips_valid_lo_sum_p (enum mips_symbol_type symbol_type, enum machine_mode mode)
2202 {
2203 /* Check that symbols of type SYMBOL_TYPE can be used to access values
2204 of mode MODE. */
2205 if (mips_symbol_insns (symbol_type, mode) == 0)
2206 return false;
2207
2208 /* Check that there is a known low-part relocation. */
2209 if (mips_lo_relocs[symbol_type] == NULL)
2210 return false;
2211
2212 /* We may need to split multiword moves, so make sure that each word
2213 can be accessed without inducing a carry. This is mainly needed
2214 for o64, which has historically only guaranteed 64-bit alignment
2215 for 128-bit types. */
2216 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
2217 && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
2218 return false;
2219
2220 return true;
2221 }
2222
2223 /* Return true if X is a valid address for machine mode MODE. If it is,
2224 fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
2225 effect. */
2226
2227 static bool
2228 mips_classify_address (struct mips_address_info *info, rtx x,
2229 enum machine_mode mode, bool strict_p)
2230 {
2231 switch (GET_CODE (x))
2232 {
2233 case REG:
2234 case SUBREG:
2235 info->type = ADDRESS_REG;
2236 info->reg = x;
2237 info->offset = const0_rtx;
2238 return mips_valid_base_register_p (info->reg, mode, strict_p);
2239
2240 case PLUS:
2241 info->type = ADDRESS_REG;
2242 info->reg = XEXP (x, 0);
2243 info->offset = XEXP (x, 1);
2244 return (mips_valid_base_register_p (info->reg, mode, strict_p)
2245 && mips_valid_offset_p (info->offset, mode));
2246
2247 case LO_SUM:
2248 info->type = ADDRESS_LO_SUM;
2249 info->reg = XEXP (x, 0);
2250 info->offset = XEXP (x, 1);
2251 /* We have to trust the creator of the LO_SUM to do something vaguely
2252 sane. Target-independent code that creates a LO_SUM should also
2253 create and verify the matching HIGH. Target-independent code that
2254 adds an offset to a LO_SUM must prove that the offset will not
2255 induce a carry. Failure to do either of these things would be
2256 a bug, and we are not required to check for it here. The MIPS
2257 backend itself should only create LO_SUMs for valid symbolic
2258 constants, with the high part being either a HIGH or a copy
2259 of _gp. */
2260 info->symbol_type
2261 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
2262 return (mips_valid_base_register_p (info->reg, mode, strict_p)
2263 && mips_valid_lo_sum_p (info->symbol_type, mode));
2264
2265 case CONST_INT:
2266 /* Small-integer addresses don't occur very often, but they
2267 are legitimate if $0 is a valid base register. */
2268 info->type = ADDRESS_CONST_INT;
2269 return !TARGET_MIPS16 && SMALL_INT (x);
2270
2271 case CONST:
2272 case LABEL_REF:
2273 case SYMBOL_REF:
2274 info->type = ADDRESS_SYMBOLIC;
2275 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
2276 &info->symbol_type)
2277 && mips_symbol_insns (info->symbol_type, mode) > 0
2278 && !mips_split_p[info->symbol_type]);
2279
2280 default:
2281 return false;
2282 }
2283 }
2284
2285 /* Implement TARGET_LEGITIMATE_ADDRESS_P. */
2286
2287 static bool
2288 mips_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
2289 {
2290 struct mips_address_info addr;
2291
2292 return mips_classify_address (&addr, x, mode, strict_p);
2293 }
2294
2295 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
2296
2297 bool
2298 mips_stack_address_p (rtx x, enum machine_mode mode)
2299 {
2300 struct mips_address_info addr;
2301
2302 return (mips_classify_address (&addr, x, mode, false)
2303 && addr.type == ADDRESS_REG
2304 && addr.reg == stack_pointer_rtx);
2305 }
2306
2307 /* Return true if ADDR matches the pattern for the LWXS load scaled indexed
2308 address instruction. Note that such addresses are not considered
2309 legitimate in the TARGET_LEGITIMATE_ADDRESS_P sense, because their use
2310 is so restricted. */
2311
2312 static bool
2313 mips_lwxs_address_p (rtx addr)
2314 {
2315 if (ISA_HAS_LWXS
2316 && GET_CODE (addr) == PLUS
2317 && REG_P (XEXP (addr, 1)))
2318 {
2319 rtx offset = XEXP (addr, 0);
2320 if (GET_CODE (offset) == MULT
2321 && REG_P (XEXP (offset, 0))
2322 && CONST_INT_P (XEXP (offset, 1))
2323 && INTVAL (XEXP (offset, 1)) == 4)
2324 return true;
2325 }
2326 return false;
2327 }
2328
2329 /* Return true if ADDR matches the pattern for the L{B,H,W,D}{,U}X load
2330 indexed address instruction. Note that such addresses are
2331 not considered legitimate in the TARGET_LEGITIMATE_ADDRESS_P
2332 sense, because their use is so restricted. */
2333
2334 static bool
2335 mips_lx_address_p (rtx addr, enum machine_mode mode)
2336 {
2337 if (GET_CODE (addr) != PLUS
2338 || !REG_P (XEXP (addr, 0))
2339 || !REG_P (XEXP (addr, 1)))
2340 return false;
2341 if (ISA_HAS_LBX && mode == QImode)
2342 return true;
2343 if (ISA_HAS_LHX && mode == HImode)
2344 return true;
2345 if (ISA_HAS_LWX && mode == SImode)
2346 return true;
2347 if (ISA_HAS_LDX && mode == DImode)
2348 return true;
2349 return false;
2350 }
2351 \f
2352 /* Return true if a value at OFFSET bytes from base register BASE can be
2353 accessed using an unextended MIPS16 instruction. MODE is the mode of
2354 the value.
2355
2356 Usually the offset in an unextended instruction is a 5-bit field.
2357 The offset is unsigned and shifted left once for LH and SH, twice
2358 for LW and SW, and so on. An exception is LWSP and SWSP, which have
2359 an 8-bit immediate field that's shifted left twice. */
2360
2361 static bool
2362 mips16_unextended_reference_p (enum machine_mode mode, rtx base,
2363 unsigned HOST_WIDE_INT offset)
2364 {
2365 if (mode != BLKmode && offset % GET_MODE_SIZE (mode) == 0)
2366 {
2367 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
2368 return offset < 256U * GET_MODE_SIZE (mode);
2369 return offset < 32U * GET_MODE_SIZE (mode);
2370 }
2371 return false;
2372 }
2373
2374 /* Return the number of instructions needed to load or store a value
2375 of mode MODE at address X, assuming that BASE_INSN_LENGTH is the
2376 length of one instruction. Return 0 if X isn't valid for MODE.
2377 Assume that multiword moves may need to be split into word moves
2378 if MIGHT_SPLIT_P, otherwise assume that a single load or store is
2379 enough. */
2380
2381 int
2382 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
2383 {
2384 struct mips_address_info addr;
2385 int factor;
2386
2387 /* BLKmode is used for single unaligned loads and stores and should
2388 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2389 meaningless, so we have to single it out as a special case one way
2390 or the other.) */
2391 if (mode != BLKmode && might_split_p)
2392 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2393 else
2394 factor = 1;
2395
2396 if (mips_classify_address (&addr, x, mode, false))
2397 switch (addr.type)
2398 {
2399 case ADDRESS_REG:
2400 if (TARGET_MIPS16
2401 && !mips16_unextended_reference_p (mode, addr.reg,
2402 UINTVAL (addr.offset)))
2403 return factor * 2;
2404 return factor;
2405
2406 case ADDRESS_LO_SUM:
2407 return TARGET_MIPS16 ? factor * 2 : factor;
2408
2409 case ADDRESS_CONST_INT:
2410 return factor;
2411
2412 case ADDRESS_SYMBOLIC:
2413 return factor * mips_symbol_insns (addr.symbol_type, mode);
2414 }
2415 return 0;
2416 }
2417
2418 /* Return true if X fits within an unsigned field of BITS bits that is
2419 shifted left SHIFT bits before being used. */
2420
2421 bool
2422 mips_unsigned_immediate_p (unsigned HOST_WIDE_INT x, int bits, int shift = 0)
2423 {
2424 return (x & ((1 << shift) - 1)) == 0 && x < ((unsigned) 1 << (shift + bits));
2425 }
2426
2427 /* Return true if X fits within a signed field of BITS bits that is
2428 shifted left SHIFT bits before being used. */
2429
2430 bool
2431 mips_signed_immediate_p (unsigned HOST_WIDE_INT x, int bits, int shift = 0)
2432 {
2433 x += 1 << (bits + shift - 1);
2434 return mips_unsigned_immediate_p (x, bits, shift);
2435 }
2436
2437 /* Return true if X is legitimate for accessing values of mode MODE,
2438 if it is based on a MIPS16 register, and if the offset satisfies
2439 OFFSET_PREDICATE. */
2440
2441 bool
2442 m16_based_address_p (rtx x, enum machine_mode mode,
2443 insn_operand_predicate_fn offset_predicate)
2444 {
2445 struct mips_address_info addr;
2446
2447 return (mips_classify_address (&addr, x, mode, false)
2448 && addr.type == ADDRESS_REG
2449 && M16_REG_P (REGNO (addr.reg))
2450 && offset_predicate (addr.offset, mode));
2451 }
2452
2453 /* Return true if X is a legitimate address that conforms to the requirements
2454 for a microMIPS LWSP or SWSP insn. */
2455
2456 bool
2457 lwsp_swsp_address_p (rtx x, enum machine_mode mode)
2458 {
2459 struct mips_address_info addr;
2460
2461 return (mips_classify_address (&addr, x, mode, false)
2462 && addr.type == ADDRESS_REG
2463 && REGNO (addr.reg) == STACK_POINTER_REGNUM
2464 && uw5_operand (addr.offset, mode));
2465 }
2466
2467 /* Return true if X is a legitimate address with a 12-bit offset.
2468 MODE is the mode of the value being accessed. */
2469
2470 bool
2471 umips_12bit_offset_address_p (rtx x, enum machine_mode mode)
2472 {
2473 struct mips_address_info addr;
2474
2475 return (mips_classify_address (&addr, x, mode, false)
2476 && addr.type == ADDRESS_REG
2477 && CONST_INT_P (addr.offset)
2478 && UMIPS_12BIT_OFFSET_P (INTVAL (addr.offset)));
2479 }
2480
2481 /* Return the number of instructions needed to load constant X,
2482 assuming that BASE_INSN_LENGTH is the length of one instruction.
2483 Return 0 if X isn't a valid constant. */
2484
2485 int
2486 mips_const_insns (rtx x)
2487 {
2488 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2489 enum mips_symbol_type symbol_type;
2490 rtx offset;
2491
2492 switch (GET_CODE (x))
2493 {
2494 case HIGH:
2495 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2496 &symbol_type)
2497 || !mips_split_p[symbol_type])
2498 return 0;
2499
2500 /* This is simply an LUI for normal mode. It is an extended
2501 LI followed by an extended SLL for MIPS16. */
2502 return TARGET_MIPS16 ? 4 : 1;
2503
2504 case CONST_INT:
2505 if (TARGET_MIPS16)
2506 /* Unsigned 8-bit constants can be loaded using an unextended
2507 LI instruction. Unsigned 16-bit constants can be loaded
2508 using an extended LI. Negative constants must be loaded
2509 using LI and then negated. */
2510 return (IN_RANGE (INTVAL (x), 0, 255) ? 1
2511 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2512 : IN_RANGE (-INTVAL (x), 0, 255) ? 2
2513 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2514 : 0);
2515
2516 return mips_build_integer (codes, INTVAL (x));
2517
2518 case CONST_DOUBLE:
2519 case CONST_VECTOR:
2520 /* Allow zeros for normal mode, where we can use $0. */
2521 return !TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
2522
2523 case CONST:
2524 if (CONST_GP_P (x))
2525 return 1;
2526
2527 /* See if we can refer to X directly. */
2528 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2529 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2530
2531 /* Otherwise try splitting the constant into a base and offset.
2532 If the offset is a 16-bit value, we can load the base address
2533 into a register and then use (D)ADDIU to add in the offset.
2534 If the offset is larger, we can load the base and offset
2535 into separate registers and add them together with (D)ADDU.
2536 However, the latter is only possible before reload; during
2537 and after reload, we must have the option of forcing the
2538 constant into the pool instead. */
2539 split_const (x, &x, &offset);
2540 if (offset != 0)
2541 {
2542 int n = mips_const_insns (x);
2543 if (n != 0)
2544 {
2545 if (SMALL_INT (offset))
2546 return n + 1;
2547 else if (!targetm.cannot_force_const_mem (GET_MODE (x), x))
2548 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2549 }
2550 }
2551 return 0;
2552
2553 case SYMBOL_REF:
2554 case LABEL_REF:
2555 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2556 MAX_MACHINE_MODE);
2557
2558 default:
2559 return 0;
2560 }
2561 }
2562
2563 /* X is a doubleword constant that can be handled by splitting it into
2564 two words and loading each word separately. Return the number of
2565 instructions required to do this, assuming that BASE_INSN_LENGTH
2566 is the length of one instruction. */
2567
2568 int
2569 mips_split_const_insns (rtx x)
2570 {
2571 unsigned int low, high;
2572
2573 low = mips_const_insns (mips_subword (x, false));
2574 high = mips_const_insns (mips_subword (x, true));
2575 gcc_assert (low > 0 && high > 0);
2576 return low + high;
2577 }
2578
2579 /* Return the number of instructions needed to implement INSN,
2580 given that it loads from or stores to MEM. Assume that
2581 BASE_INSN_LENGTH is the length of one instruction. */
2582
2583 int
2584 mips_load_store_insns (rtx mem, rtx insn)
2585 {
2586 enum machine_mode mode;
2587 bool might_split_p;
2588 rtx set;
2589
2590 gcc_assert (MEM_P (mem));
2591 mode = GET_MODE (mem);
2592
2593 /* Try to prove that INSN does not need to be split. */
2594 might_split_p = GET_MODE_SIZE (mode) > UNITS_PER_WORD;
2595 if (might_split_p)
2596 {
2597 set = single_set (insn);
2598 if (set && !mips_split_move_insn_p (SET_DEST (set), SET_SRC (set), insn))
2599 might_split_p = false;
2600 }
2601
2602 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2603 }
2604
2605 /* Return the number of instructions needed for an integer division,
2606 assuming that BASE_INSN_LENGTH is the length of one instruction. */
2607
2608 int
2609 mips_idiv_insns (void)
2610 {
2611 int count;
2612
2613 count = 1;
2614 if (TARGET_CHECK_ZERO_DIV)
2615 {
2616 if (GENERATE_DIVIDE_TRAPS)
2617 count++;
2618 else
2619 count += 2;
2620 }
2621
2622 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2623 count++;
2624 return count;
2625 }
2626 \f
2627 /* Emit a move from SRC to DEST. Assume that the move expanders can
2628 handle all moves if !can_create_pseudo_p (). The distinction is
2629 important because, unlike emit_move_insn, the move expanders know
2630 how to force Pmode objects into the constant pool even when the
2631 constant pool address is not itself legitimate. */
2632
2633 rtx
2634 mips_emit_move (rtx dest, rtx src)
2635 {
2636 return (can_create_pseudo_p ()
2637 ? emit_move_insn (dest, src)
2638 : emit_move_insn_1 (dest, src));
2639 }
2640
2641 /* Emit a move from SRC to DEST, splitting compound moves into individual
2642 instructions. SPLIT_TYPE is the type of split to perform. */
2643
2644 static void
2645 mips_emit_move_or_split (rtx dest, rtx src, enum mips_split_type split_type)
2646 {
2647 if (mips_split_move_p (dest, src, split_type))
2648 mips_split_move (dest, src, split_type);
2649 else
2650 mips_emit_move (dest, src);
2651 }
2652
2653 /* Emit an instruction of the form (set TARGET (CODE OP0)). */
2654
2655 static void
2656 mips_emit_unary (enum rtx_code code, rtx target, rtx op0)
2657 {
2658 emit_insn (gen_rtx_SET (VOIDmode, target,
2659 gen_rtx_fmt_e (code, GET_MODE (op0), op0)));
2660 }
2661
2662 /* Compute (CODE OP0) and store the result in a new register of mode MODE.
2663 Return that new register. */
2664
2665 static rtx
2666 mips_force_unary (enum machine_mode mode, enum rtx_code code, rtx op0)
2667 {
2668 rtx reg;
2669
2670 reg = gen_reg_rtx (mode);
2671 mips_emit_unary (code, reg, op0);
2672 return reg;
2673 }
2674
2675 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2676
2677 void
2678 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2679 {
2680 emit_insn (gen_rtx_SET (VOIDmode, target,
2681 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2682 }
2683
2684 /* Compute (CODE OP0 OP1) and store the result in a new register
2685 of mode MODE. Return that new register. */
2686
2687 static rtx
2688 mips_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
2689 {
2690 rtx reg;
2691
2692 reg = gen_reg_rtx (mode);
2693 mips_emit_binary (code, reg, op0, op1);
2694 return reg;
2695 }
2696
2697 /* Copy VALUE to a register and return that register. If new pseudos
2698 are allowed, copy it into a new register, otherwise use DEST. */
2699
2700 static rtx
2701 mips_force_temporary (rtx dest, rtx value)
2702 {
2703 if (can_create_pseudo_p ())
2704 return force_reg (Pmode, value);
2705 else
2706 {
2707 mips_emit_move (dest, value);
2708 return dest;
2709 }
2710 }
2711
2712 /* Emit a call sequence with call pattern PATTERN and return the call
2713 instruction itself (which is not necessarily the last instruction
2714 emitted). ORIG_ADDR is the original, unlegitimized address,
2715 ADDR is the legitimized form, and LAZY_P is true if the call
2716 address is lazily-bound. */
2717
2718 static rtx
2719 mips_emit_call_insn (rtx pattern, rtx orig_addr, rtx addr, bool lazy_p)
2720 {
2721 rtx insn, reg;
2722
2723 insn = emit_call_insn (pattern);
2724
2725 if (TARGET_MIPS16 && mips_use_pic_fn_addr_reg_p (orig_addr))
2726 {
2727 /* MIPS16 JALRs only take MIPS16 registers. If the target
2728 function requires $25 to be valid on entry, we must copy it
2729 there separately. The move instruction can be put in the
2730 call's delay slot. */
2731 reg = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
2732 emit_insn_before (gen_move_insn (reg, addr), insn);
2733 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
2734 }
2735
2736 if (lazy_p)
2737 /* Lazy-binding stubs require $gp to be valid on entry. */
2738 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2739
2740 if (TARGET_USE_GOT)
2741 {
2742 /* See the comment above load_call<mode> for details. */
2743 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2744 gen_rtx_REG (Pmode, GOT_VERSION_REGNUM));
2745 emit_insn (gen_update_got_version ());
2746 }
2747 return insn;
2748 }
2749 \f
2750 /* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
2751 then add CONST_INT OFFSET to the result. */
2752
2753 static rtx
2754 mips_unspec_address_offset (rtx base, rtx offset,
2755 enum mips_symbol_type symbol_type)
2756 {
2757 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2758 UNSPEC_ADDRESS_FIRST + symbol_type);
2759 if (offset != const0_rtx)
2760 base = gen_rtx_PLUS (Pmode, base, offset);
2761 return gen_rtx_CONST (Pmode, base);
2762 }
2763
2764 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2765 type SYMBOL_TYPE. */
2766
2767 rtx
2768 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2769 {
2770 rtx base, offset;
2771
2772 split_const (address, &base, &offset);
2773 return mips_unspec_address_offset (base, offset, symbol_type);
2774 }
2775
2776 /* If OP is an UNSPEC address, return the address to which it refers,
2777 otherwise return OP itself. */
2778
2779 rtx
2780 mips_strip_unspec_address (rtx op)
2781 {
2782 rtx base, offset;
2783
2784 split_const (op, &base, &offset);
2785 if (UNSPEC_ADDRESS_P (base))
2786 op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset));
2787 return op;
2788 }
2789
2790 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2791 high part to BASE and return the result. Just return BASE otherwise.
2792 TEMP is as for mips_force_temporary.
2793
2794 The returned expression can be used as the first operand to a LO_SUM. */
2795
2796 static rtx
2797 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2798 enum mips_symbol_type symbol_type)
2799 {
2800 if (mips_split_p[symbol_type])
2801 {
2802 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2803 addr = mips_force_temporary (temp, addr);
2804 base = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2805 }
2806 return base;
2807 }
2808 \f
2809 /* Return an instruction that copies $gp into register REG. We want
2810 GCC to treat the register's value as constant, so that its value
2811 can be rematerialized on demand. */
2812
2813 static rtx
2814 gen_load_const_gp (rtx reg)
2815 {
2816 return PMODE_INSN (gen_load_const_gp, (reg));
2817 }
2818
2819 /* Return a pseudo register that contains the value of $gp throughout
2820 the current function. Such registers are needed by MIPS16 functions,
2821 for which $gp itself is not a valid base register or addition operand. */
2822
2823 static rtx
2824 mips16_gp_pseudo_reg (void)
2825 {
2826 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
2827 {
2828 rtx insn, scan;
2829
2830 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
2831
2832 push_topmost_sequence ();
2833
2834 scan = get_insns ();
2835 while (NEXT_INSN (scan) && !INSN_P (NEXT_INSN (scan)))
2836 scan = NEXT_INSN (scan);
2837
2838 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
2839 insn = emit_insn_after (insn, scan);
2840 INSN_LOCATION (insn) = 0;
2841
2842 pop_topmost_sequence ();
2843 }
2844
2845 return cfun->machine->mips16_gp_pseudo_rtx;
2846 }
2847
2848 /* Return a base register that holds pic_offset_table_rtx.
2849 TEMP, if nonnull, is a scratch Pmode base register. */
2850
2851 rtx
2852 mips_pic_base_register (rtx temp)
2853 {
2854 if (!TARGET_MIPS16)
2855 return pic_offset_table_rtx;
2856
2857 if (currently_expanding_to_rtl)
2858 return mips16_gp_pseudo_reg ();
2859
2860 if (can_create_pseudo_p ())
2861 temp = gen_reg_rtx (Pmode);
2862
2863 if (TARGET_USE_GOT)
2864 /* The first post-reload split exposes all references to $gp
2865 (both uses and definitions). All references must remain
2866 explicit after that point.
2867
2868 It is safe to introduce uses of $gp at any time, so for
2869 simplicity, we do that before the split too. */
2870 mips_emit_move (temp, pic_offset_table_rtx);
2871 else
2872 emit_insn (gen_load_const_gp (temp));
2873 return temp;
2874 }
2875
2876 /* Return the RHS of a load_call<mode> insn. */
2877
2878 static rtx
2879 mips_unspec_call (rtx reg, rtx symbol)
2880 {
2881 rtvec vec;
2882
2883 vec = gen_rtvec (3, reg, symbol, gen_rtx_REG (SImode, GOT_VERSION_REGNUM));
2884 return gen_rtx_UNSPEC (Pmode, vec, UNSPEC_LOAD_CALL);
2885 }
2886
2887 /* If SRC is the RHS of a load_call<mode> insn, return the underlying symbol
2888 reference. Return NULL_RTX otherwise. */
2889
2890 static rtx
2891 mips_strip_unspec_call (rtx src)
2892 {
2893 if (GET_CODE (src) == UNSPEC && XINT (src, 1) == UNSPEC_LOAD_CALL)
2894 return mips_strip_unspec_address (XVECEXP (src, 0, 1));
2895 return NULL_RTX;
2896 }
2897
2898 /* Create and return a GOT reference of type TYPE for address ADDR.
2899 TEMP, if nonnull, is a scratch Pmode base register. */
2900
2901 rtx
2902 mips_got_load (rtx temp, rtx addr, enum mips_symbol_type type)
2903 {
2904 rtx base, high, lo_sum_symbol;
2905
2906 base = mips_pic_base_register (temp);
2907
2908 /* If we used the temporary register to load $gp, we can't use
2909 it for the high part as well. */
2910 if (temp != NULL && reg_overlap_mentioned_p (base, temp))
2911 temp = NULL;
2912
2913 high = mips_unspec_offset_high (temp, base, addr, type);
2914 lo_sum_symbol = mips_unspec_address (addr, type);
2915
2916 if (type == SYMBOL_GOTOFF_CALL)
2917 return mips_unspec_call (high, lo_sum_symbol);
2918 else
2919 return PMODE_INSN (gen_unspec_got, (high, lo_sum_symbol));
2920 }
2921
2922 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2923 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2924 constant in that context and can be split into high and low parts.
2925 If so, and if LOW_OUT is nonnull, emit the high part and store the
2926 low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
2927
2928 TEMP is as for mips_force_temporary and is used to load the high
2929 part into a register.
2930
2931 When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
2932 a legitimize SET_SRC for an .md pattern, otherwise the low part
2933 is guaranteed to be a legitimate address for mode MODE. */
2934
2935 bool
2936 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out)
2937 {
2938 enum mips_symbol_context context;
2939 enum mips_symbol_type symbol_type;
2940 rtx high;
2941
2942 context = (mode == MAX_MACHINE_MODE
2943 ? SYMBOL_CONTEXT_LEA
2944 : SYMBOL_CONTEXT_MEM);
2945 if (GET_CODE (addr) == HIGH && context == SYMBOL_CONTEXT_LEA)
2946 {
2947 addr = XEXP (addr, 0);
2948 if (mips_symbolic_constant_p (addr, context, &symbol_type)
2949 && mips_symbol_insns (symbol_type, mode) > 0
2950 && mips_split_hi_p[symbol_type])
2951 {
2952 if (low_out)
2953 switch (symbol_type)
2954 {
2955 case SYMBOL_GOT_PAGE_OFST:
2956 /* The high part of a page/ofst pair is loaded from the GOT. */
2957 *low_out = mips_got_load (temp, addr, SYMBOL_GOTOFF_PAGE);
2958 break;
2959
2960 default:
2961 gcc_unreachable ();
2962 }
2963 return true;
2964 }
2965 }
2966 else
2967 {
2968 if (mips_symbolic_constant_p (addr, context, &symbol_type)
2969 && mips_symbol_insns (symbol_type, mode) > 0
2970 && mips_split_p[symbol_type])
2971 {
2972 if (low_out)
2973 switch (symbol_type)
2974 {
2975 case SYMBOL_GOT_DISP:
2976 /* SYMBOL_GOT_DISP symbols are loaded from the GOT. */
2977 *low_out = mips_got_load (temp, addr, SYMBOL_GOTOFF_DISP);
2978 break;
2979
2980 case SYMBOL_GP_RELATIVE:
2981 high = mips_pic_base_register (temp);
2982 *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
2983 break;
2984
2985 default:
2986 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2987 high = mips_force_temporary (temp, high);
2988 *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
2989 break;
2990 }
2991 return true;
2992 }
2993 }
2994 return false;
2995 }
2996
2997 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2998 mips_force_temporary; it is only needed when OFFSET is not a
2999 SMALL_OPERAND. */
3000
3001 static rtx
3002 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
3003 {
3004 if (!SMALL_OPERAND (offset))
3005 {
3006 rtx high;
3007
3008 if (TARGET_MIPS16)
3009 {
3010 /* Load the full offset into a register so that we can use
3011 an unextended instruction for the address itself. */
3012 high = GEN_INT (offset);
3013 offset = 0;
3014 }
3015 else
3016 {
3017 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
3018 The addition inside the macro CONST_HIGH_PART may cause an
3019 overflow, so we need to force a sign-extension check. */
3020 high = gen_int_mode (CONST_HIGH_PART (offset), Pmode);
3021 offset = CONST_LOW_PART (offset);
3022 }
3023 high = mips_force_temporary (temp, high);
3024 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
3025 }
3026 return plus_constant (Pmode, reg, offset);
3027 }
3028 \f
3029 /* The __tls_get_attr symbol. */
3030 static GTY(()) rtx mips_tls_symbol;
3031
3032 /* Return an instruction sequence that calls __tls_get_addr. SYM is
3033 the TLS symbol we are referencing and TYPE is the symbol type to use
3034 (either global dynamic or local dynamic). V0 is an RTX for the
3035 return value location. */
3036
3037 static rtx
3038 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
3039 {
3040 rtx insn, loc, a0;
3041
3042 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
3043
3044 if (!mips_tls_symbol)
3045 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
3046
3047 loc = mips_unspec_address (sym, type);
3048
3049 start_sequence ();
3050
3051 emit_insn (gen_rtx_SET (Pmode, a0,
3052 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
3053 insn = mips_expand_call (MIPS_CALL_NORMAL, v0, mips_tls_symbol,
3054 const0_rtx, NULL_RTX, false);
3055 RTL_CONST_CALL_P (insn) = 1;
3056 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
3057 insn = get_insns ();
3058
3059 end_sequence ();
3060
3061 return insn;
3062 }
3063
3064 /* Return a pseudo register that contains the current thread pointer. */
3065
3066 rtx
3067 mips_expand_thread_pointer (rtx tp)
3068 {
3069 rtx fn;
3070
3071 if (TARGET_MIPS16)
3072 {
3073 mips_need_mips16_rdhwr_p = true;
3074 fn = mips16_stub_function ("__mips16_rdhwr");
3075 SYMBOL_REF_FLAGS (fn) |= SYMBOL_FLAG_LOCAL;
3076 if (!call_insn_operand (fn, VOIDmode))
3077 fn = force_reg (Pmode, fn);
3078 emit_insn (PMODE_INSN (gen_tls_get_tp_mips16, (tp, fn)));
3079 }
3080 else
3081 emit_insn (PMODE_INSN (gen_tls_get_tp, (tp)));
3082 return tp;
3083 }
3084
3085 static rtx
3086 mips_get_tp (void)
3087 {
3088 return mips_expand_thread_pointer (gen_reg_rtx (Pmode));
3089 }
3090
3091 /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
3092 its address. The return value will be both a valid address and a valid
3093 SET_SRC (either a REG or a LO_SUM). */
3094
3095 static rtx
3096 mips_legitimize_tls_address (rtx loc)
3097 {
3098 rtx dest, insn, v0, tp, tmp1, tmp2, eqv, offset;
3099 enum tls_model model;
3100
3101 model = SYMBOL_REF_TLS_MODEL (loc);
3102 /* Only TARGET_ABICALLS code can have more than one module; other
3103 code must be be static and should not use a GOT. All TLS models
3104 reduce to local exec in this situation. */
3105 if (!TARGET_ABICALLS)
3106 model = TLS_MODEL_LOCAL_EXEC;
3107
3108 switch (model)
3109 {
3110 case TLS_MODEL_GLOBAL_DYNAMIC:
3111 v0 = gen_rtx_REG (Pmode, GP_RETURN);
3112 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
3113 dest = gen_reg_rtx (Pmode);
3114 emit_libcall_block (insn, dest, v0, loc);
3115 break;
3116
3117 case TLS_MODEL_LOCAL_DYNAMIC:
3118 v0 = gen_rtx_REG (Pmode, GP_RETURN);
3119 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
3120 tmp1 = gen_reg_rtx (Pmode);
3121
3122 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
3123 share the LDM result with other LD model accesses. */
3124 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3125 UNSPEC_TLS_LDM);
3126 emit_libcall_block (insn, tmp1, v0, eqv);
3127
3128 offset = mips_unspec_address (loc, SYMBOL_DTPREL);
3129 if (mips_split_p[SYMBOL_DTPREL])
3130 {
3131 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
3132 dest = gen_rtx_LO_SUM (Pmode, tmp2, offset);
3133 }
3134 else
3135 dest = expand_binop (Pmode, add_optab, tmp1, offset,
3136 0, 0, OPTAB_DIRECT);
3137 break;
3138
3139 case TLS_MODEL_INITIAL_EXEC:
3140 tp = mips_get_tp ();
3141 tmp1 = gen_reg_rtx (Pmode);
3142 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
3143 if (Pmode == DImode)
3144 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
3145 else
3146 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
3147 dest = gen_reg_rtx (Pmode);
3148 emit_insn (gen_add3_insn (dest, tmp1, tp));
3149 break;
3150
3151 case TLS_MODEL_LOCAL_EXEC:
3152 tmp1 = mips_get_tp ();
3153 offset = mips_unspec_address (loc, SYMBOL_TPREL);
3154 if (mips_split_p[SYMBOL_TPREL])
3155 {
3156 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_TPREL);
3157 dest = gen_rtx_LO_SUM (Pmode, tmp2, offset);
3158 }
3159 else
3160 dest = expand_binop (Pmode, add_optab, tmp1, offset,
3161 0, 0, OPTAB_DIRECT);
3162 break;
3163
3164 default:
3165 gcc_unreachable ();
3166 }
3167 return dest;
3168 }
3169 \f
3170 /* If X is not a valid address for mode MODE, force it into a register. */
3171
3172 static rtx
3173 mips_force_address (rtx x, enum machine_mode mode)
3174 {
3175 if (!mips_legitimate_address_p (mode, x, false))
3176 x = force_reg (Pmode, x);
3177 return x;
3178 }
3179
3180 /* This function is used to implement LEGITIMIZE_ADDRESS. If X can
3181 be legitimized in a way that the generic machinery might not expect,
3182 return a new address, otherwise return NULL. MODE is the mode of
3183 the memory being accessed. */
3184
3185 static rtx
3186 mips_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3187 enum machine_mode mode)
3188 {
3189 rtx base, addr;
3190 HOST_WIDE_INT offset;
3191
3192 if (mips_tls_symbol_p (x))
3193 return mips_legitimize_tls_address (x);
3194
3195 /* See if the address can split into a high part and a LO_SUM. */
3196 if (mips_split_symbol (NULL, x, mode, &addr))
3197 return mips_force_address (addr, mode);
3198
3199 /* Handle BASE + OFFSET using mips_add_offset. */
3200 mips_split_plus (x, &base, &offset);
3201 if (offset != 0)
3202 {
3203 if (!mips_valid_base_register_p (base, mode, false))
3204 base = copy_to_mode_reg (Pmode, base);
3205 addr = mips_add_offset (NULL, base, offset);
3206 return mips_force_address (addr, mode);
3207 }
3208
3209 return x;
3210 }
3211
3212 /* Load VALUE into DEST. TEMP is as for mips_force_temporary. */
3213
3214 void
3215 mips_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value)
3216 {
3217 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
3218 enum machine_mode mode;
3219 unsigned int i, num_ops;
3220 rtx x;
3221
3222 mode = GET_MODE (dest);
3223 num_ops = mips_build_integer (codes, value);
3224
3225 /* Apply each binary operation to X. Invariant: X is a legitimate
3226 source operand for a SET pattern. */
3227 x = GEN_INT (codes[0].value);
3228 for (i = 1; i < num_ops; i++)
3229 {
3230 if (!can_create_pseudo_p ())
3231 {
3232 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
3233 x = temp;
3234 }
3235 else
3236 x = force_reg (mode, x);
3237 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
3238 }
3239
3240 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
3241 }
3242
3243 /* Subroutine of mips_legitimize_move. Move constant SRC into register
3244 DEST given that SRC satisfies immediate_operand but doesn't satisfy
3245 move_operand. */
3246
3247 static void
3248 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
3249 {
3250 rtx base, offset;
3251
3252 /* Split moves of big integers into smaller pieces. */
3253 if (splittable_const_int_operand (src, mode))
3254 {
3255 mips_move_integer (dest, dest, INTVAL (src));
3256 return;
3257 }
3258
3259 /* Split moves of symbolic constants into high/low pairs. */
3260 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
3261 {
3262 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
3263 return;
3264 }
3265
3266 /* Generate the appropriate access sequences for TLS symbols. */
3267 if (mips_tls_symbol_p (src))
3268 {
3269 mips_emit_move (dest, mips_legitimize_tls_address (src));
3270 return;
3271 }
3272
3273 /* If we have (const (plus symbol offset)), and that expression cannot
3274 be forced into memory, load the symbol first and add in the offset.
3275 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
3276 forced into memory, as it usually produces better code. */
3277 split_const (src, &base, &offset);
3278 if (offset != const0_rtx
3279 && (targetm.cannot_force_const_mem (mode, src)
3280 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
3281 {
3282 base = mips_force_temporary (dest, base);
3283 mips_emit_move (dest, mips_add_offset (NULL, base, INTVAL (offset)));
3284 return;
3285 }
3286
3287 src = force_const_mem (mode, src);
3288
3289 /* When using explicit relocs, constant pool references are sometimes
3290 not legitimate addresses. */
3291 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
3292 mips_emit_move (dest, src);
3293 }
3294
3295 /* If (set DEST SRC) is not a valid move instruction, emit an equivalent
3296 sequence that is valid. */
3297
3298 bool
3299 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
3300 {
3301 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
3302 {
3303 mips_emit_move (dest, force_reg (mode, src));
3304 return true;
3305 }
3306
3307 /* We need to deal with constants that would be legitimate
3308 immediate_operands but aren't legitimate move_operands. */
3309 if (CONSTANT_P (src) && !move_operand (src, mode))
3310 {
3311 mips_legitimize_const_move (mode, dest, src);
3312 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
3313 return true;
3314 }
3315 return false;
3316 }
3317 \f
3318 /* Return true if value X in context CONTEXT is a small-data address
3319 that can be rewritten as a LO_SUM. */
3320
3321 static bool
3322 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
3323 {
3324 enum mips_symbol_type symbol_type;
3325
3326 return (mips_lo_relocs[SYMBOL_GP_RELATIVE]
3327 && !mips_split_p[SYMBOL_GP_RELATIVE]
3328 && mips_symbolic_constant_p (x, context, &symbol_type)
3329 && symbol_type == SYMBOL_GP_RELATIVE);
3330 }
3331
3332 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
3333 containing MEM, or null if none. */
3334
3335 static int
3336 mips_small_data_pattern_1 (rtx *loc, void *data)
3337 {
3338 enum mips_symbol_context context;
3339
3340 /* Ignore things like "g" constraints in asms. We make no particular
3341 guarantee about which symbolic constants are acceptable as asm operands
3342 versus which must be forced into a GPR. */
3343 if (GET_CODE (*loc) == LO_SUM || GET_CODE (*loc) == ASM_OPERANDS)
3344 return -1;
3345
3346 if (MEM_P (*loc))
3347 {
3348 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
3349 return 1;
3350 return -1;
3351 }
3352
3353 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
3354 return mips_rewrite_small_data_p (*loc, context);
3355 }
3356
3357 /* Return true if OP refers to small data symbols directly, not through
3358 a LO_SUM. */
3359
3360 bool
3361 mips_small_data_pattern_p (rtx op)
3362 {
3363 return for_each_rtx (&op, mips_small_data_pattern_1, NULL);
3364 }
3365
3366 /* A for_each_rtx callback, used by mips_rewrite_small_data.
3367 DATA is the containing MEM, or null if none. */
3368
3369 static int
3370 mips_rewrite_small_data_1 (rtx *loc, void *data)
3371 {
3372 enum mips_symbol_context context;
3373
3374 if (MEM_P (*loc))
3375 {
3376 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
3377 return -1;
3378 }
3379
3380 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
3381 if (mips_rewrite_small_data_p (*loc, context))
3382 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
3383
3384 if (GET_CODE (*loc) == LO_SUM)
3385 return -1;
3386
3387 return 0;
3388 }
3389
3390 /* Rewrite instruction pattern PATTERN so that it refers to small data
3391 using explicit relocations. */
3392
3393 rtx
3394 mips_rewrite_small_data (rtx pattern)
3395 {
3396 pattern = copy_insn (pattern);
3397 for_each_rtx (&pattern, mips_rewrite_small_data_1, NULL);
3398 return pattern;
3399 }
3400 \f
3401 /* The cost of loading values from the constant pool. It should be
3402 larger than the cost of any constant we want to synthesize inline. */
3403 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
3404
3405 /* Return the cost of X when used as an operand to the MIPS16 instruction
3406 that implements CODE. Return -1 if there is no such instruction, or if
3407 X is not a valid immediate operand for it. */
3408
3409 static int
3410 mips16_constant_cost (int code, HOST_WIDE_INT x)
3411 {
3412 switch (code)
3413 {
3414 case ASHIFT:
3415 case ASHIFTRT:
3416 case LSHIFTRT:
3417 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
3418 other shifts are extended. The shift patterns truncate the shift
3419 count to the right size, so there are no out-of-range values. */
3420 if (IN_RANGE (x, 1, 8))
3421 return 0;
3422 return COSTS_N_INSNS (1);
3423
3424 case PLUS:
3425 if (IN_RANGE (x, -128, 127))
3426 return 0;
3427 if (SMALL_OPERAND (x))
3428 return COSTS_N_INSNS (1);
3429 return -1;
3430
3431 case LEU:
3432 /* Like LE, but reject the always-true case. */
3433 if (x == -1)
3434 return -1;
3435 case LE:
3436 /* We add 1 to the immediate and use SLT. */
3437 x += 1;
3438 case XOR:
3439 /* We can use CMPI for an xor with an unsigned 16-bit X. */
3440 case LT:
3441 case LTU:
3442 if (IN_RANGE (x, 0, 255))
3443 return 0;
3444 if (SMALL_OPERAND_UNSIGNED (x))
3445 return COSTS_N_INSNS (1);
3446 return -1;
3447
3448 case EQ:
3449 case NE:
3450 /* Equality comparisons with 0 are cheap. */
3451 if (x == 0)
3452 return 0;
3453 return -1;
3454
3455 default:
3456 return -1;
3457 }
3458 }
3459
3460 /* Return true if there is a non-MIPS16 instruction that implements CODE
3461 and if that instruction accepts X as an immediate operand. */
3462
3463 static int
3464 mips_immediate_operand_p (int code, HOST_WIDE_INT x)
3465 {
3466 switch (code)
3467 {
3468 case ASHIFT:
3469 case ASHIFTRT:
3470 case LSHIFTRT:
3471 /* All shift counts are truncated to a valid constant. */
3472 return true;
3473
3474 case ROTATE:
3475 case ROTATERT:
3476 /* Likewise rotates, if the target supports rotates at all. */
3477 return ISA_HAS_ROR;
3478
3479 case AND:
3480 case IOR:
3481 case XOR:
3482 /* These instructions take 16-bit unsigned immediates. */
3483 return SMALL_OPERAND_UNSIGNED (x);
3484
3485 case PLUS:
3486 case LT:
3487 case LTU:
3488 /* These instructions take 16-bit signed immediates. */
3489 return SMALL_OPERAND (x);
3490
3491 case EQ:
3492 case NE:
3493 case GT:
3494 case GTU:
3495 /* The "immediate" forms of these instructions are really
3496 implemented as comparisons with register 0. */
3497 return x == 0;
3498
3499 case GE:
3500 case GEU:
3501 /* Likewise, meaning that the only valid immediate operand is 1. */
3502 return x == 1;
3503
3504 case LE:
3505 /* We add 1 to the immediate and use SLT. */
3506 return SMALL_OPERAND (x + 1);
3507
3508 case LEU:
3509 /* Likewise SLTU, but reject the always-true case. */
3510 return SMALL_OPERAND (x + 1) && x + 1 != 0;
3511
3512 case SIGN_EXTRACT:
3513 case ZERO_EXTRACT:
3514 /* The bit position and size are immediate operands. */
3515 return ISA_HAS_EXT_INS;
3516
3517 default:
3518 /* By default assume that $0 can be used for 0. */
3519 return x == 0;
3520 }
3521 }
3522
3523 /* Return the cost of binary operation X, given that the instruction
3524 sequence for a word-sized or smaller operation has cost SINGLE_COST
3525 and that the sequence of a double-word operation has cost DOUBLE_COST.
3526 If SPEED is true, optimize for speed otherwise optimize for size. */
3527
3528 static int
3529 mips_binary_cost (rtx x, int single_cost, int double_cost, bool speed)
3530 {
3531 int cost;
3532
3533 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
3534 cost = double_cost;
3535 else
3536 cost = single_cost;
3537 return (cost
3538 + set_src_cost (XEXP (x, 0), speed)
3539 + rtx_cost (XEXP (x, 1), GET_CODE (x), 1, speed));
3540 }
3541
3542 /* Return the cost of floating-point multiplications of mode MODE. */
3543
3544 static int
3545 mips_fp_mult_cost (enum machine_mode mode)
3546 {
3547 return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
3548 }
3549
3550 /* Return the cost of floating-point divisions of mode MODE. */
3551
3552 static int
3553 mips_fp_div_cost (enum machine_mode mode)
3554 {
3555 return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
3556 }
3557
3558 /* Return the cost of sign-extending OP to mode MODE, not including the
3559 cost of OP itself. */
3560
3561 static int
3562 mips_sign_extend_cost (enum machine_mode mode, rtx op)
3563 {
3564 if (MEM_P (op))
3565 /* Extended loads are as cheap as unextended ones. */
3566 return 0;
3567
3568 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3569 /* A sign extension from SImode to DImode in 64-bit mode is free. */
3570 return 0;
3571
3572 if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
3573 /* We can use SEB or SEH. */
3574 return COSTS_N_INSNS (1);
3575
3576 /* We need to use a shift left and a shift right. */
3577 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3578 }
3579
3580 /* Return the cost of zero-extending OP to mode MODE, not including the
3581 cost of OP itself. */
3582
3583 static int
3584 mips_zero_extend_cost (enum machine_mode mode, rtx op)
3585 {
3586 if (MEM_P (op))
3587 /* Extended loads are as cheap as unextended ones. */
3588 return 0;
3589
3590 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3591 /* We need a shift left by 32 bits and a shift right by 32 bits. */
3592 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3593
3594 if (GENERATE_MIPS16E)
3595 /* We can use ZEB or ZEH. */
3596 return COSTS_N_INSNS (1);
3597
3598 if (TARGET_MIPS16)
3599 /* We need to load 0xff or 0xffff into a register and use AND. */
3600 return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
3601
3602 /* We can use ANDI. */
3603 return COSTS_N_INSNS (1);
3604 }
3605
3606 /* Return the cost of moving between two registers of mode MODE,
3607 assuming that the move will be in pieces of at most UNITS bytes. */
3608
3609 static int
3610 mips_set_reg_reg_piece_cost (enum machine_mode mode, unsigned int units)
3611 {
3612 return COSTS_N_INSNS ((GET_MODE_SIZE (mode) + units - 1) / units);
3613 }
3614
3615 /* Return the cost of moving between two registers of mode MODE. */
3616
3617 static int
3618 mips_set_reg_reg_cost (enum machine_mode mode)
3619 {
3620 switch (GET_MODE_CLASS (mode))
3621 {
3622 case MODE_CC:
3623 return mips_set_reg_reg_piece_cost (mode, GET_MODE_SIZE (CCmode));
3624
3625 case MODE_FLOAT:
3626 case MODE_COMPLEX_FLOAT:
3627 case MODE_VECTOR_FLOAT:
3628 if (TARGET_HARD_FLOAT)
3629 return mips_set_reg_reg_piece_cost (mode, UNITS_PER_HWFPVALUE);
3630 /* Fall through */
3631
3632 default:
3633 return mips_set_reg_reg_piece_cost (mode, UNITS_PER_WORD);
3634 }
3635 }
3636
3637 /* Return the cost of an operand X that can be trucated for free.
3638 SPEED says whether we're optimizing for size or speed. */
3639
3640 static int
3641 mips_truncated_op_cost (rtx x, bool speed)
3642 {
3643 if (GET_CODE (x) == TRUNCATE)
3644 x = XEXP (x, 0);
3645 return set_src_cost (x, speed);
3646 }
3647
3648 /* Implement TARGET_RTX_COSTS. */
3649
3650 static bool
3651 mips_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
3652 int *total, bool speed)
3653 {
3654 enum machine_mode mode = GET_MODE (x);
3655 bool float_mode_p = FLOAT_MODE_P (mode);
3656 int cost;
3657 rtx addr;
3658
3659 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3660 appear in the instruction stream, and the cost of a comparison is
3661 really the cost of the branch or scc condition. At the time of
3662 writing, GCC only uses an explicit outer COMPARE code when optabs
3663 is testing whether a constant is expensive enough to force into a
3664 register. We want optabs to pass such constants through the MIPS
3665 expanders instead, so make all constants very cheap here. */
3666 if (outer_code == COMPARE)
3667 {
3668 gcc_assert (CONSTANT_P (x));
3669 *total = 0;
3670 return true;
3671 }
3672
3673 switch (code)
3674 {
3675 case CONST_INT:
3676 /* Treat *clear_upper32-style ANDs as having zero cost in the
3677 second operand. The cost is entirely in the first operand.
3678
3679 ??? This is needed because we would otherwise try to CSE
3680 the constant operand. Although that's the right thing for
3681 instructions that continue to be a register operation throughout
3682 compilation, it is disastrous for instructions that could
3683 later be converted into a memory operation. */
3684 if (TARGET_64BIT
3685 && outer_code == AND
3686 && UINTVAL (x) == 0xffffffff)
3687 {
3688 *total = 0;
3689 return true;
3690 }
3691
3692 if (TARGET_MIPS16)
3693 {
3694 cost = mips16_constant_cost (outer_code, INTVAL (x));
3695 if (cost >= 0)
3696 {
3697 *total = cost;
3698 return true;
3699 }
3700 }
3701 else
3702 {
3703 /* When not optimizing for size, we care more about the cost
3704 of hot code, and hot code is often in a loop. If a constant
3705 operand needs to be forced into a register, we will often be
3706 able to hoist the constant load out of the loop, so the load
3707 should not contribute to the cost. */
3708 if (speed || mips_immediate_operand_p (outer_code, INTVAL (x)))
3709 {
3710 *total = 0;
3711 return true;
3712 }
3713 }
3714 /* Fall through. */
3715
3716 case CONST:
3717 case SYMBOL_REF:
3718 case LABEL_REF:
3719 case CONST_DOUBLE:
3720 if (force_to_mem_operand (x, VOIDmode))
3721 {
3722 *total = COSTS_N_INSNS (1);
3723 return true;
3724 }
3725 cost = mips_const_insns (x);
3726 if (cost > 0)
3727 {
3728 /* If the constant is likely to be stored in a GPR, SETs of
3729 single-insn constants are as cheap as register sets; we
3730 never want to CSE them.
3731
3732 Don't reduce the cost of storing a floating-point zero in
3733 FPRs. If we have a zero in an FPR for other reasons, we
3734 can get better cfg-cleanup and delayed-branch results by
3735 using it consistently, rather than using $0 sometimes and
3736 an FPR at other times. Also, moves between floating-point
3737 registers are sometimes cheaper than (D)MTC1 $0. */
3738 if (cost == 1
3739 && outer_code == SET
3740 && !(float_mode_p && TARGET_HARD_FLOAT))
3741 cost = 0;
3742 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3743 want to CSE the constant itself. It is usually better to
3744 have N copies of the last operation in the sequence and one
3745 shared copy of the other operations. (Note that this is
3746 not true for MIPS16 code, where the final operation in the
3747 sequence is often an extended instruction.)
3748
3749 Also, if we have a CONST_INT, we don't know whether it is
3750 for a word or doubleword operation, so we cannot rely on
3751 the result of mips_build_integer. */
3752 else if (!TARGET_MIPS16
3753 && (outer_code == SET || mode == VOIDmode))
3754 cost = 1;
3755 *total = COSTS_N_INSNS (cost);
3756 return true;
3757 }
3758 /* The value will need to be fetched from the constant pool. */
3759 *total = CONSTANT_POOL_COST;
3760 return true;
3761
3762 case MEM:
3763 /* If the address is legitimate, return the number of
3764 instructions it needs. */
3765 addr = XEXP (x, 0);
3766 cost = mips_address_insns (addr, mode, true);
3767 if (cost > 0)
3768 {
3769 *total = COSTS_N_INSNS (cost + 1);
3770 return true;
3771 }
3772 /* Check for a scaled indexed address. */
3773 if (mips_lwxs_address_p (addr)
3774 || mips_lx_address_p (addr, mode))
3775 {
3776 *total = COSTS_N_INSNS (2);
3777 return true;
3778 }
3779 /* Otherwise use the default handling. */
3780 return false;
3781
3782 case FFS:
3783 *total = COSTS_N_INSNS (6);
3784 return false;
3785
3786 case NOT:
3787 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
3788 return false;
3789
3790 case AND:
3791 /* Check for a *clear_upper32 pattern and treat it like a zero
3792 extension. See the pattern's comment for details. */
3793 if (TARGET_64BIT
3794 && mode == DImode
3795 && CONST_INT_P (XEXP (x, 1))
3796 && UINTVAL (XEXP (x, 1)) == 0xffffffff)
3797 {
3798 *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
3799 + set_src_cost (XEXP (x, 0), speed));
3800 return true;
3801 }
3802 if (ISA_HAS_CINS && CONST_INT_P (XEXP (x, 1)))
3803 {
3804 rtx op = XEXP (x, 0);
3805 if (GET_CODE (op) == ASHIFT
3806 && CONST_INT_P (XEXP (op, 1))
3807 && mask_low_and_shift_p (mode, XEXP (x, 1), XEXP (op, 1), 32))
3808 {
3809 *total = COSTS_N_INSNS (1) + set_src_cost (XEXP (op, 0), speed);
3810 return true;
3811 }
3812 }
3813 /* (AND (NOT op0) (NOT op1) is a nor operation that can be done in
3814 a single instruction. */
3815 if (!TARGET_MIPS16
3816 && GET_CODE (XEXP (x, 0)) == NOT
3817 && GET_CODE (XEXP (x, 1)) == NOT)
3818 {
3819 cost = GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1;
3820 *total = (COSTS_N_INSNS (cost)
3821 + set_src_cost (XEXP (XEXP (x, 0), 0), speed)
3822 + set_src_cost (XEXP (XEXP (x, 1), 0), speed));
3823 return true;
3824 }
3825
3826 /* Fall through. */
3827
3828 case IOR:
3829 case XOR:
3830 /* Double-word operations use two single-word operations. */
3831 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2),
3832 speed);
3833 return true;
3834
3835 case ASHIFT:
3836 case ASHIFTRT:
3837 case LSHIFTRT:
3838 case ROTATE:
3839 case ROTATERT:
3840 if (CONSTANT_P (XEXP (x, 1)))
3841 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4),
3842 speed);
3843 else
3844 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12),
3845 speed);
3846 return true;
3847
3848 case ABS:
3849 if (float_mode_p)
3850 *total = mips_cost->fp_add;
3851 else
3852 *total = COSTS_N_INSNS (4);
3853 return false;
3854
3855 case LO_SUM:
3856 /* Low-part immediates need an extended MIPS16 instruction. */
3857 *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
3858 + set_src_cost (XEXP (x, 0), speed));
3859 return true;
3860
3861 case LT:
3862 case LTU:
3863 case LE:
3864 case LEU:
3865 case GT:
3866 case GTU:
3867 case GE:
3868 case GEU:
3869 case EQ:
3870 case NE:
3871 case UNORDERED:
3872 case LTGT:
3873 /* Branch comparisons have VOIDmode, so use the first operand's
3874 mode instead. */
3875 mode = GET_MODE (XEXP (x, 0));
3876 if (FLOAT_MODE_P (mode))
3877 {
3878 *total = mips_cost->fp_add;
3879 return false;
3880 }
3881 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4),
3882 speed);
3883 return true;
3884
3885 case MINUS:
3886 if (float_mode_p
3887 && (ISA_HAS_NMADD4_NMSUB4 || ISA_HAS_NMADD3_NMSUB3)
3888 && TARGET_FUSED_MADD
3889 && !HONOR_NANS (mode)
3890 && !HONOR_SIGNED_ZEROS (mode))
3891 {
3892 /* See if we can use NMADD or NMSUB. See mips.md for the
3893 associated patterns. */
3894 rtx op0 = XEXP (x, 0);
3895 rtx op1 = XEXP (x, 1);
3896 if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
3897 {
3898 *total = (mips_fp_mult_cost (mode)
3899 + set_src_cost (XEXP (XEXP (op0, 0), 0), speed)
3900 + set_src_cost (XEXP (op0, 1), speed)
3901 + set_src_cost (op1, speed));
3902 return true;
3903 }
3904 if (GET_CODE (op1) == MULT)
3905 {
3906 *total = (mips_fp_mult_cost (mode)
3907 + set_src_cost (op0, speed)
3908 + set_src_cost (XEXP (op1, 0), speed)
3909 + set_src_cost (XEXP (op1, 1), speed));
3910 return true;
3911 }
3912 }
3913 /* Fall through. */
3914
3915 case PLUS:
3916 if (float_mode_p)
3917 {
3918 /* If this is part of a MADD or MSUB, treat the PLUS as
3919 being free. */
3920 if ((ISA_HAS_FP_MADD4_MSUB4 || ISA_HAS_FP_MADD3_MSUB3)
3921 && TARGET_FUSED_MADD
3922 && GET_CODE (XEXP (x, 0)) == MULT)
3923 *total = 0;
3924 else
3925 *total = mips_cost->fp_add;
3926 return false;
3927 }
3928
3929 /* Double-word operations require three single-word operations and
3930 an SLTU. The MIPS16 version then needs to move the result of
3931 the SLTU from $24 to a MIPS16 register. */
3932 *total = mips_binary_cost (x, COSTS_N_INSNS (1),
3933 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4),
3934 speed);
3935 return true;
3936
3937 case NEG:
3938 if (float_mode_p
3939 && (ISA_HAS_NMADD4_NMSUB4 || ISA_HAS_NMADD3_NMSUB3)
3940 && TARGET_FUSED_MADD
3941 && !HONOR_NANS (mode)
3942 && HONOR_SIGNED_ZEROS (mode))
3943 {
3944 /* See if we can use NMADD or NMSUB. See mips.md for the
3945 associated patterns. */
3946 rtx op = XEXP (x, 0);
3947 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3948 && GET_CODE (XEXP (op, 0)) == MULT)
3949 {
3950 *total = (mips_fp_mult_cost (mode)
3951 + set_src_cost (XEXP (XEXP (op, 0), 0), speed)
3952 + set_src_cost (XEXP (XEXP (op, 0), 1), speed)
3953 + set_src_cost (XEXP (op, 1), speed));
3954 return true;
3955 }
3956 }
3957
3958 if (float_mode_p)
3959 *total = mips_cost->fp_add;
3960 else
3961 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
3962 return false;
3963
3964 case MULT:
3965 if (float_mode_p)
3966 *total = mips_fp_mult_cost (mode);
3967 else if (mode == DImode && !TARGET_64BIT)
3968 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3969 where the mulsidi3 always includes an MFHI and an MFLO. */
3970 *total = (speed
3971 ? mips_cost->int_mult_si * 3 + 6
3972 : COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9));
3973 else if (!speed)
3974 *total = COSTS_N_INSNS (ISA_HAS_MUL3 ? 1 : 2) + 1;
3975 else if (mode == DImode)
3976 *total = mips_cost->int_mult_di;
3977 else
3978 *total = mips_cost->int_mult_si;
3979 return false;
3980
3981 case DIV:
3982 /* Check for a reciprocal. */
3983 if (float_mode_p
3984 && ISA_HAS_FP_RECIP_RSQRT (mode)
3985 && flag_unsafe_math_optimizations
3986 && XEXP (x, 0) == CONST1_RTX (mode))
3987 {
3988 if (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT)
3989 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3990 division as being free. */
3991 *total = set_src_cost (XEXP (x, 1), speed);
3992 else
3993 *total = (mips_fp_div_cost (mode)
3994 + set_src_cost (XEXP (x, 1), speed));
3995 return true;
3996 }
3997 /* Fall through. */
3998
3999 case SQRT:
4000 case MOD:
4001 if (float_mode_p)
4002 {
4003 *total = mips_fp_div_cost (mode);
4004 return false;
4005 }
4006 /* Fall through. */
4007
4008 case UDIV:
4009 case UMOD:
4010 if (!speed)
4011 {
4012 /* It is our responsibility to make division by a power of 2
4013 as cheap as 2 register additions if we want the division
4014 expanders to be used for such operations; see the setting
4015 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
4016 should always produce shorter code than using
4017 expand_sdiv2_pow2. */
4018 if (TARGET_MIPS16
4019 && CONST_INT_P (XEXP (x, 1))
4020 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
4021 {
4022 *total = COSTS_N_INSNS (2) + set_src_cost (XEXP (x, 0), speed);
4023 return true;
4024 }
4025 *total = COSTS_N_INSNS (mips_idiv_insns ());
4026 }
4027 else if (mode == DImode)
4028 *total = mips_cost->int_div_di;
4029 else
4030 *total = mips_cost->int_div_si;
4031 return false;
4032
4033 case SIGN_EXTEND:
4034 *total = mips_sign_extend_cost (mode, XEXP (x, 0));
4035 return false;
4036
4037 case ZERO_EXTEND:
4038 if (outer_code == SET
4039 && ISA_HAS_BADDU
4040 && GET_MODE (XEXP (x, 0)) == QImode
4041 && GET_CODE (XEXP (x, 0)) == PLUS)
4042 {
4043 rtx plus = XEXP (x, 0);
4044 *total = (COSTS_N_INSNS (1)
4045 + mips_truncated_op_cost (XEXP (plus, 0), speed)
4046 + mips_truncated_op_cost (XEXP (plus, 1), speed));
4047 return true;
4048 }
4049 *total = mips_zero_extend_cost (mode, XEXP (x, 0));
4050 return false;
4051
4052 case FLOAT:
4053 case UNSIGNED_FLOAT:
4054 case FIX:
4055 case FLOAT_EXTEND:
4056 case FLOAT_TRUNCATE:
4057 *total = mips_cost->fp_add;
4058 return false;
4059
4060 case SET:
4061 if (register_operand (SET_DEST (x), VOIDmode)
4062 && reg_or_0_operand (SET_SRC (x), VOIDmode))
4063 {
4064 *total = mips_set_reg_reg_cost (GET_MODE (SET_DEST (x)));
4065 return true;
4066 }
4067 return false;
4068
4069 default:
4070 return false;
4071 }
4072 }
4073
4074 /* Implement TARGET_ADDRESS_COST. */
4075
4076 static int
4077 mips_address_cost (rtx addr, enum machine_mode mode,
4078 addr_space_t as ATTRIBUTE_UNUSED,
4079 bool speed ATTRIBUTE_UNUSED)
4080 {
4081 return mips_address_insns (addr, mode, false);
4082 }
4083 \f
4084 /* Information about a single instruction in a multi-instruction
4085 asm sequence. */
4086 struct mips_multi_member {
4087 /* True if this is a label, false if it is code. */
4088 bool is_label_p;
4089
4090 /* The output_asm_insn format of the instruction. */
4091 const char *format;
4092
4093 /* The operands to the instruction. */
4094 rtx operands[MAX_RECOG_OPERANDS];
4095 };
4096 typedef struct mips_multi_member mips_multi_member;
4097
4098 /* The instructions that make up the current multi-insn sequence. */
4099 static vec<mips_multi_member> mips_multi_members;
4100
4101 /* How many instructions (as opposed to labels) are in the current
4102 multi-insn sequence. */
4103 static unsigned int mips_multi_num_insns;
4104
4105 /* Start a new multi-insn sequence. */
4106
4107 static void
4108 mips_multi_start (void)
4109 {
4110 mips_multi_members.truncate (0);
4111 mips_multi_num_insns = 0;
4112 }
4113
4114 /* Add a new, uninitialized member to the current multi-insn sequence. */
4115
4116 static struct mips_multi_member *
4117 mips_multi_add (void)
4118 {
4119 mips_multi_member empty;
4120 return mips_multi_members.safe_push (empty);
4121 }
4122
4123 /* Add a normal insn with the given asm format to the current multi-insn
4124 sequence. The other arguments are a null-terminated list of operands. */
4125
4126 static void
4127 mips_multi_add_insn (const char *format, ...)
4128 {
4129 struct mips_multi_member *member;
4130 va_list ap;
4131 unsigned int i;
4132 rtx op;
4133
4134 member = mips_multi_add ();
4135 member->is_label_p = false;
4136 member->format = format;
4137 va_start (ap, format);
4138 i = 0;
4139 while ((op = va_arg (ap, rtx)))
4140 member->operands[i++] = op;
4141 va_end (ap);
4142 mips_multi_num_insns++;
4143 }
4144
4145 /* Add the given label definition to the current multi-insn sequence.
4146 The definition should include the colon. */
4147
4148 static void
4149 mips_multi_add_label (const char *label)
4150 {
4151 struct mips_multi_member *member;
4152
4153 member = mips_multi_add ();
4154 member->is_label_p = true;
4155 member->format = label;
4156 }
4157
4158 /* Return the index of the last member of the current multi-insn sequence. */
4159
4160 static unsigned int
4161 mips_multi_last_index (void)
4162 {
4163 return mips_multi_members.length () - 1;
4164 }
4165
4166 /* Add a copy of an existing instruction to the current multi-insn
4167 sequence. I is the index of the instruction that should be copied. */
4168
4169 static void
4170 mips_multi_copy_insn (unsigned int i)
4171 {
4172 struct mips_multi_member *member;
4173
4174 member = mips_multi_add ();
4175 memcpy (member, &mips_multi_members[i], sizeof (*member));
4176 gcc_assert (!member->is_label_p);
4177 }
4178
4179 /* Change the operand of an existing instruction in the current
4180 multi-insn sequence. I is the index of the instruction,
4181 OP is the index of the operand, and X is the new value. */
4182
4183 static void
4184 mips_multi_set_operand (unsigned int i, unsigned int op, rtx x)
4185 {
4186 mips_multi_members[i].operands[op] = x;
4187 }
4188
4189 /* Write out the asm code for the current multi-insn sequence. */
4190
4191 static void
4192 mips_multi_write (void)
4193 {
4194 struct mips_multi_member *member;
4195 unsigned int i;
4196
4197 FOR_EACH_VEC_ELT (mips_multi_members, i, member)
4198 if (member->is_label_p)
4199 fprintf (asm_out_file, "%s\n", member->format);
4200 else
4201 output_asm_insn (member->format, member->operands);
4202 }
4203 \f
4204 /* Return one word of double-word value OP, taking into account the fixed
4205 endianness of certain registers. HIGH_P is true to select the high part,
4206 false to select the low part. */
4207
4208 rtx
4209 mips_subword (rtx op, bool high_p)
4210 {
4211 unsigned int byte, offset;
4212 enum machine_mode mode;
4213
4214 mode = GET_MODE (op);
4215 if (mode == VOIDmode)
4216 mode = TARGET_64BIT ? TImode : DImode;
4217
4218 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
4219 byte = UNITS_PER_WORD;
4220 else
4221 byte = 0;
4222
4223 if (FP_REG_RTX_P (op))
4224 {
4225 /* Paired FPRs are always ordered little-endian. */
4226 offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0);
4227 return gen_rtx_REG (word_mode, REGNO (op) + offset);
4228 }
4229
4230 if (MEM_P (op))
4231 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
4232
4233 return simplify_gen_subreg (word_mode, op, mode, byte);
4234 }
4235
4236 /* Return true if SRC should be moved into DEST using "MULT $0, $0".
4237 SPLIT_TYPE is the condition under which moves should be split. */
4238
4239 static bool
4240 mips_mult_move_p (rtx dest, rtx src, enum mips_split_type split_type)
4241 {
4242 return ((split_type != SPLIT_FOR_SPEED
4243 || mips_tuning_info.fast_mult_zero_zero_p)
4244 && src == const0_rtx
4245 && REG_P (dest)
4246 && GET_MODE_SIZE (GET_MODE (dest)) == 2 * UNITS_PER_WORD
4247 && (ISA_HAS_DSP_MULT
4248 ? ACC_REG_P (REGNO (dest))
4249 : MD_REG_P (REGNO (dest))));
4250 }
4251
4252 /* Return true if a move from SRC to DEST should be split into two.
4253 SPLIT_TYPE describes the split condition. */
4254
4255 bool
4256 mips_split_move_p (rtx dest, rtx src, enum mips_split_type split_type)
4257 {
4258 /* Check whether the move can be done using some variant of MULT $0,$0. */
4259 if (mips_mult_move_p (dest, src, split_type))
4260 return false;
4261
4262 /* FPR-to-FPR moves can be done in a single instruction, if they're
4263 allowed at all. */
4264 unsigned int size = GET_MODE_SIZE (GET_MODE (dest));
4265 if (size == 8 && FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
4266 return false;
4267
4268 /* Check for floating-point loads and stores. */
4269 if (size == 8 && ISA_HAS_LDC1_SDC1)
4270 {
4271 if (FP_REG_RTX_P (dest) && MEM_P (src))
4272 return false;
4273 if (FP_REG_RTX_P (src) && MEM_P (dest))
4274 return false;
4275 }
4276
4277 /* Otherwise split all multiword moves. */
4278 return size > UNITS_PER_WORD;
4279 }
4280
4281 /* Split a move from SRC to DEST, given that mips_split_move_p holds.
4282 SPLIT_TYPE describes the split condition. */
4283
4284 void
4285 mips_split_move (rtx dest, rtx src, enum mips_split_type split_type)
4286 {
4287 rtx low_dest;
4288
4289 gcc_checking_assert (mips_split_move_p (dest, src, split_type));
4290 if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
4291 {
4292 if (!TARGET_64BIT && GET_MODE (dest) == DImode)
4293 emit_insn (gen_move_doubleword_fprdi (dest, src));
4294 else if (!TARGET_64BIT && GET_MODE (dest) == DFmode)
4295 emit_insn (gen_move_doubleword_fprdf (dest, src));
4296 else if (!TARGET_64BIT && GET_MODE (dest) == V2SFmode)
4297 emit_insn (gen_move_doubleword_fprv2sf (dest, src));
4298 else if (!TARGET_64BIT && GET_MODE (dest) == V2SImode)
4299 emit_insn (gen_move_doubleword_fprv2si (dest, src));
4300 else if (!TARGET_64BIT && GET_MODE (dest) == V4HImode)
4301 emit_insn (gen_move_doubleword_fprv4hi (dest, src));
4302 else if (!TARGET_64BIT && GET_MODE (dest) == V8QImode)
4303 emit_insn (gen_move_doubleword_fprv8qi (dest, src));
4304 else if (TARGET_64BIT && GET_MODE (dest) == TFmode)
4305 emit_insn (gen_move_doubleword_fprtf (dest, src));
4306 else
4307 gcc_unreachable ();
4308 }
4309 else if (REG_P (dest) && REGNO (dest) == MD_REG_FIRST)
4310 {
4311 low_dest = mips_subword (dest, false);
4312 mips_emit_move (low_dest, mips_subword (src, false));
4313 if (TARGET_64BIT)
4314 emit_insn (gen_mthidi_ti (dest, mips_subword (src, true), low_dest));
4315 else
4316 emit_insn (gen_mthisi_di (dest, mips_subword (src, true), low_dest));
4317 }
4318 else if (REG_P (src) && REGNO (src) == MD_REG_FIRST)
4319 {
4320 mips_emit_move (mips_subword (dest, false), mips_subword (src, false));
4321 if (TARGET_64BIT)
4322 emit_insn (gen_mfhidi_ti (mips_subword (dest, true), src));
4323 else
4324 emit_insn (gen_mfhisi_di (mips_subword (dest, true), src));
4325 }
4326 else
4327 {
4328 /* The operation can be split into two normal moves. Decide in
4329 which order to do them. */
4330 low_dest = mips_subword (dest, false);
4331 if (REG_P (low_dest)
4332 && reg_overlap_mentioned_p (low_dest, src))
4333 {
4334 mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
4335 mips_emit_move (low_dest, mips_subword (src, false));
4336 }
4337 else
4338 {
4339 mips_emit_move (low_dest, mips_subword (src, false));
4340 mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
4341 }
4342 }
4343 }
4344
4345 /* Return the split type for instruction INSN. */
4346
4347 static enum mips_split_type
4348 mips_insn_split_type (rtx insn)
4349 {
4350 basic_block bb = BLOCK_FOR_INSN (insn);
4351 if (bb)
4352 {
4353 if (optimize_bb_for_speed_p (bb))
4354 return SPLIT_FOR_SPEED;
4355 else
4356 return SPLIT_FOR_SIZE;
4357 }
4358 /* Once CFG information has been removed, we should trust the optimization
4359 decisions made by previous passes and only split where necessary. */
4360 return SPLIT_IF_NECESSARY;
4361 }
4362
4363 /* Return true if a move from SRC to DEST in INSN should be split. */
4364
4365 bool
4366 mips_split_move_insn_p (rtx dest, rtx src, rtx insn)
4367 {
4368 return mips_split_move_p (dest, src, mips_insn_split_type (insn));
4369 }
4370
4371 /* Split a move from SRC to DEST in INSN, given that mips_split_move_insn_p
4372 holds. */
4373
4374 void
4375 mips_split_move_insn (rtx dest, rtx src, rtx insn)
4376 {
4377 mips_split_move (dest, src, mips_insn_split_type (insn));
4378 }
4379 \f
4380 /* Return the appropriate instructions to move SRC into DEST. Assume
4381 that SRC is operand 1 and DEST is operand 0. */
4382
4383 const char *
4384 mips_output_move (rtx dest, rtx src)
4385 {
4386 enum rtx_code dest_code, src_code;
4387 enum machine_mode mode;
4388 enum mips_symbol_type symbol_type;
4389 bool dbl_p;
4390
4391 dest_code = GET_CODE (dest);
4392 src_code = GET_CODE (src);
4393 mode = GET_MODE (dest);
4394 dbl_p = (GET_MODE_SIZE (mode) == 8);
4395
4396 if (mips_split_move_p (dest, src, SPLIT_IF_NECESSARY))
4397 return "#";
4398
4399 if ((src_code == REG && GP_REG_P (REGNO (src)))
4400 || (!TARGET_MIPS16 && src == CONST0_RTX (mode)))
4401 {
4402 if (dest_code == REG)
4403 {
4404 if (GP_REG_P (REGNO (dest)))
4405 return "move\t%0,%z1";
4406
4407 if (mips_mult_move_p (dest, src, SPLIT_IF_NECESSARY))
4408 {
4409 if (ISA_HAS_DSP_MULT)
4410 return "mult\t%q0,%.,%.";
4411 else
4412 return "mult\t%.,%.";
4413 }
4414
4415 /* Moves to HI are handled by special .md insns. */
4416 if (REGNO (dest) == LO_REGNUM)
4417 return "mtlo\t%z1";
4418
4419 if (DSP_ACC_REG_P (REGNO (dest)))
4420 {
4421 static char retval[] = "mt__\t%z1,%q0";
4422
4423 retval[2] = reg_names[REGNO (dest)][4];
4424 retval[3] = reg_names[REGNO (dest)][5];
4425 return retval;
4426 }
4427
4428 if (FP_REG_P (REGNO (dest)))
4429 return dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0";
4430
4431 if (ALL_COP_REG_P (REGNO (dest)))
4432 {
4433 static char retval[] = "dmtc_\t%z1,%0";
4434
4435 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
4436 return dbl_p ? retval : retval + 1;
4437 }
4438 }
4439 if (dest_code == MEM)
4440 switch (GET_MODE_SIZE (mode))
4441 {
4442 case 1: return "sb\t%z1,%0";
4443 case 2: return "sh\t%z1,%0";
4444 case 4: return "sw\t%z1,%0";
4445 case 8: return "sd\t%z1,%0";
4446 }
4447 }
4448 if (dest_code == REG && GP_REG_P (REGNO (dest)))
4449 {
4450 if (src_code == REG)
4451 {
4452 /* Moves from HI are handled by special .md insns. */
4453 if (REGNO (src) == LO_REGNUM)
4454 {
4455 /* When generating VR4120 or VR4130 code, we use MACC and
4456 DMACC instead of MFLO. This avoids both the normal
4457 MIPS III HI/LO hazards and the errata related to
4458 -mfix-vr4130. */
4459 if (ISA_HAS_MACCHI)
4460 return dbl_p ? "dmacc\t%0,%.,%." : "macc\t%0,%.,%.";
4461 return "mflo\t%0";
4462 }
4463
4464 if (DSP_ACC_REG_P (REGNO (src)))
4465 {
4466 static char retval[] = "mf__\t%0,%q1";
4467
4468 retval[2] = reg_names[REGNO (src)][4];
4469 retval[3] = reg_names[REGNO (src)][5];
4470 return retval;
4471 }
4472
4473 if (FP_REG_P (REGNO (src)))
4474 return dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1";
4475
4476 if (ALL_COP_REG_P (REGNO (src)))
4477 {
4478 static char retval[] = "dmfc_\t%0,%1";
4479
4480 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
4481 return dbl_p ? retval : retval + 1;
4482 }
4483 }
4484
4485 if (src_code == MEM)
4486 switch (GET_MODE_SIZE (mode))
4487 {
4488 case 1: return "lbu\t%0,%1";
4489 case 2: return "lhu\t%0,%1";
4490 case 4: return "lw\t%0,%1";
4491 case 8: return "ld\t%0,%1";
4492 }
4493
4494 if (src_code == CONST_INT)
4495 {
4496 /* Don't use the X format for the operand itself, because that
4497 will give out-of-range numbers for 64-bit hosts and 32-bit
4498 targets. */
4499 if (!TARGET_MIPS16)
4500 return "li\t%0,%1\t\t\t# %X1";
4501
4502 if (SMALL_OPERAND_UNSIGNED (INTVAL (src)))
4503 return "li\t%0,%1";
4504
4505 if (SMALL_OPERAND_UNSIGNED (-INTVAL (src)))
4506 return "#";
4507 }
4508
4509 if (src_code == HIGH)
4510 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
4511
4512 if (CONST_GP_P (src))
4513 return "move\t%0,%1";
4514
4515 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
4516 && mips_lo_relocs[symbol_type] != 0)
4517 {
4518 /* A signed 16-bit constant formed by applying a relocation
4519 operator to a symbolic address. */
4520 gcc_assert (!mips_split_p[symbol_type]);
4521 return "li\t%0,%R1";
4522 }
4523
4524 if (symbolic_operand (src, VOIDmode))
4525 {
4526 gcc_assert (TARGET_MIPS16
4527 ? TARGET_MIPS16_TEXT_LOADS
4528 : !TARGET_EXPLICIT_RELOCS);
4529 return dbl_p ? "dla\t%0,%1" : "la\t%0,%1";
4530 }
4531 }
4532 if (src_code == REG && FP_REG_P (REGNO (src)))
4533 {
4534 if (dest_code == REG && FP_REG_P (REGNO (dest)))
4535 {
4536 if (GET_MODE (dest) == V2SFmode)
4537 return "mov.ps\t%0,%1";
4538 else
4539 return dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1";
4540 }
4541
4542 if (dest_code == MEM)
4543 return dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0";
4544 }
4545 if (dest_code == REG && FP_REG_P (REGNO (dest)))
4546 {
4547 if (src_code == MEM)
4548 return dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1";
4549 }
4550 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
4551 {
4552 static char retval[] = "l_c_\t%0,%1";
4553
4554 retval[1] = (dbl_p ? 'd' : 'w');
4555 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
4556 return retval;
4557 }
4558 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
4559 {
4560 static char retval[] = "s_c_\t%1,%0";
4561
4562 retval[1] = (dbl_p ? 'd' : 'w');
4563 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
4564 return retval;
4565 }
4566 gcc_unreachable ();
4567 }
4568 \f
4569 /* Return true if CMP1 is a suitable second operand for integer ordering
4570 test CODE. See also the *sCC patterns in mips.md. */
4571
4572 static bool
4573 mips_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
4574 {
4575 switch (code)
4576 {
4577 case GT:
4578 case GTU:
4579 return reg_or_0_operand (cmp1, VOIDmode);
4580
4581 case GE:
4582 case GEU:
4583 return !TARGET_MIPS16 && cmp1 == const1_rtx;
4584
4585 case LT:
4586 case LTU:
4587 return arith_operand (cmp1, VOIDmode);
4588
4589 case LE:
4590 return sle_operand (cmp1, VOIDmode);
4591
4592 case LEU:
4593 return sleu_operand (cmp1, VOIDmode);
4594
4595 default:
4596 gcc_unreachable ();
4597 }
4598 }
4599
4600 /* Return true if *CMP1 (of mode MODE) is a valid second operand for
4601 integer ordering test *CODE, or if an equivalent combination can
4602 be formed by adjusting *CODE and *CMP1. When returning true, update
4603 *CODE and *CMP1 with the chosen code and operand, otherwise leave
4604 them alone. */
4605
4606 static bool
4607 mips_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
4608 enum machine_mode mode)
4609 {
4610 HOST_WIDE_INT plus_one;
4611
4612 if (mips_int_order_operand_ok_p (*code, *cmp1))
4613 return true;
4614
4615 if (CONST_INT_P (*cmp1))
4616 switch (*code)
4617 {
4618 case LE:
4619 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
4620 if (INTVAL (*cmp1) < plus_one)
4621 {
4622 *code = LT;
4623 *cmp1 = force_reg (mode, GEN_INT (plus_one));
4624 return true;
4625 }
4626 break;
4627
4628 case LEU:
4629 plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
4630 if (plus_one != 0)
4631 {
4632 *code = LTU;
4633 *cmp1 = force_reg (mode, GEN_INT (plus_one));
4634 return true;
4635 }
4636 break;
4637
4638 default:
4639 break;
4640 }
4641 return false;
4642 }
4643
4644 /* Compare CMP0 and CMP1 using ordering test CODE and store the result
4645 in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
4646 is nonnull, it's OK to set TARGET to the inverse of the result and
4647 flip *INVERT_PTR instead. */
4648
4649 static void
4650 mips_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
4651 rtx target, rtx cmp0, rtx cmp1)
4652 {
4653 enum machine_mode mode;
4654
4655 /* First see if there is a MIPS instruction that can do this operation.
4656 If not, try doing the same for the inverse operation. If that also
4657 fails, force CMP1 into a register and try again. */
4658 mode = GET_MODE (cmp0);
4659 if (mips_canonicalize_int_order_test (&code, &cmp1, mode))
4660 mips_emit_binary (code, target, cmp0, cmp1);
4661 else
4662 {
4663 enum rtx_code inv_code = reverse_condition (code);
4664 if (!mips_canonicalize_int_order_test (&inv_code, &cmp1, mode))
4665 {
4666 cmp1 = force_reg (mode, cmp1);
4667 mips_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
4668 }
4669 else if (invert_ptr == 0)
4670 {
4671 rtx inv_target;
4672
4673 inv_target = mips_force_binary (GET_MODE (target),
4674 inv_code, cmp0, cmp1);
4675 mips_emit_binary (XOR, target, inv_target, const1_rtx);
4676 }
4677 else
4678 {
4679 *invert_ptr = !*invert_ptr;
4680 mips_emit_binary (inv_code, target, cmp0, cmp1);
4681 }
4682 }
4683 }
4684
4685 /* Return a register that is zero iff CMP0 and CMP1 are equal.
4686 The register will have the same mode as CMP0. */
4687
4688 static rtx
4689 mips_zero_if_equal (rtx cmp0, rtx cmp1)
4690 {
4691 if (cmp1 == const0_rtx)
4692 return cmp0;
4693
4694 if (uns_arith_operand (cmp1, VOIDmode))
4695 return expand_binop (GET_MODE (cmp0), xor_optab,
4696 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
4697
4698 return expand_binop (GET_MODE (cmp0), sub_optab,
4699 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
4700 }
4701
4702 /* Convert *CODE into a code that can be used in a floating-point
4703 scc instruction (C.cond.fmt). Return true if the values of
4704 the condition code registers will be inverted, with 0 indicating
4705 that the condition holds. */
4706
4707 static bool
4708 mips_reversed_fp_cond (enum rtx_code *code)
4709 {
4710 switch (*code)
4711 {
4712 case NE:
4713 case LTGT:
4714 case ORDERED:
4715 *code = reverse_condition_maybe_unordered (*code);
4716 return true;
4717
4718 default:
4719 return false;
4720 }
4721 }
4722
4723 /* Allocate a floating-point condition-code register of mode MODE.
4724
4725 These condition code registers are used for certain kinds
4726 of compound operation, such as compare and branches, vconds,
4727 and built-in functions. At expand time, their use is entirely
4728 controlled by MIPS-specific code and is entirely internal
4729 to these compound operations.
4730
4731 We could (and did in the past) expose condition-code values
4732 as pseudo registers and leave the register allocator to pick
4733 appropriate registers. The problem is that it is not practically
4734 possible for the rtl optimizers to guarantee that no spills will
4735 be needed, even when AVOID_CCMODE_COPIES is defined. We would
4736 therefore need spill and reload sequences to handle the worst case.
4737
4738 Although such sequences do exist, they are very expensive and are
4739 not something we'd want to use. This is especially true of CCV2 and
4740 CCV4, where all the shuffling would greatly outweigh whatever benefit
4741 the vectorization itself provides.
4742
4743 The main benefit of having more than one condition-code register
4744 is to allow the pipelining of operations, especially those involving
4745 comparisons and conditional moves. We don't really expect the
4746 registers to be live for long periods, and certainly never want
4747 them to be live across calls.
4748
4749 Also, there should be no penalty attached to using all the available
4750 registers. They are simply bits in the same underlying FPU control
4751 register.
4752
4753 We therefore expose the hardware registers from the outset and use
4754 a simple round-robin allocation scheme. */
4755
4756 static rtx
4757 mips_allocate_fcc (enum machine_mode mode)
4758 {
4759 unsigned int regno, count;
4760
4761 gcc_assert (TARGET_HARD_FLOAT && ISA_HAS_8CC);
4762
4763 if (mode == CCmode)
4764 count = 1;
4765 else if (mode == CCV2mode)
4766 count = 2;
4767 else if (mode == CCV4mode)
4768 count = 4;
4769 else
4770 gcc_unreachable ();
4771
4772 cfun->machine->next_fcc += -cfun->machine->next_fcc & (count - 1);
4773 if (cfun->machine->next_fcc > ST_REG_LAST - ST_REG_FIRST)
4774 cfun->machine->next_fcc = 0;
4775 regno = ST_REG_FIRST + cfun->machine->next_fcc;
4776 cfun->machine->next_fcc += count;
4777 return gen_rtx_REG (mode, regno);
4778 }
4779
4780 /* Convert a comparison into something that can be used in a branch or
4781 conditional move. On entry, *OP0 and *OP1 are the values being
4782 compared and *CODE is the code used to compare them.
4783
4784 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
4785 If NEED_EQ_NE_P, then only EQ or NE comparisons against zero are possible,
4786 otherwise any standard branch condition can be used. The standard branch
4787 conditions are:
4788
4789 - EQ or NE between two registers.
4790 - any comparison between a register and zero. */
4791
4792 static void
4793 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
4794 {
4795 rtx cmp_op0 = *op0;
4796 rtx cmp_op1 = *op1;
4797
4798 if (GET_MODE_CLASS (GET_MODE (*op0)) == MODE_INT)
4799 {
4800 if (!need_eq_ne_p && *op1 == const0_rtx)
4801 ;
4802 else if (*code == EQ || *code == NE)
4803 {
4804 if (need_eq_ne_p)
4805 {
4806 *op0 = mips_zero_if_equal (cmp_op0, cmp_op1);
4807 *op1 = const0_rtx;
4808 }
4809 else
4810 *op1 = force_reg (GET_MODE (cmp_op0), cmp_op1);
4811 }
4812 else
4813 {
4814 /* The comparison needs a separate scc instruction. Store the
4815 result of the scc in *OP0 and compare it against zero. */
4816 bool invert = false;
4817 *op0 = gen_reg_rtx (GET_MODE (cmp_op0));
4818 mips_emit_int_order_test (*code, &invert, *op0, cmp_op0, cmp_op1);
4819 *code = (invert ? EQ : NE);
4820 *op1 = const0_rtx;
4821 }
4822 }
4823 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_op0)))
4824 {
4825 *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
4826 mips_emit_binary (*code, *op0, cmp_op0, cmp_op1);
4827 *code = NE;
4828 *op1 = const0_rtx;
4829 }
4830 else
4831 {
4832 enum rtx_code cmp_code;
4833
4834 /* Floating-point tests use a separate C.cond.fmt comparison to
4835 set a condition code register. The branch or conditional move
4836 will then compare that register against zero.
4837
4838 Set CMP_CODE to the code of the comparison instruction and
4839 *CODE to the code that the branch or move should use. */
4840 cmp_code = *code;
4841 *code = mips_reversed_fp_cond (&cmp_code) ? EQ : NE;
4842 *op0 = (ISA_HAS_8CC
4843 ? mips_allocate_fcc (CCmode)
4844 : gen_rtx_REG (CCmode, FPSW_REGNUM));
4845 *op1 = const0_rtx;
4846 mips_emit_binary (cmp_code, *op0, cmp_op0, cmp_op1);
4847 }
4848 }
4849 \f
4850 /* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2]
4851 and OPERAND[3]. Store the result in OPERANDS[0].
4852
4853 On 64-bit targets, the mode of the comparison and target will always be
4854 SImode, thus possibly narrower than that of the comparison's operands. */
4855
4856 void
4857 mips_expand_scc (rtx operands[])
4858 {
4859 rtx target = operands[0];
4860 enum rtx_code code = GET_CODE (operands[1]);
4861 rtx op0 = operands[2];
4862 rtx op1 = operands[3];
4863
4864 gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT);
4865
4866 if (code == EQ || code == NE)
4867 {
4868 if (ISA_HAS_SEQ_SNE
4869 && reg_imm10_operand (op1, GET_MODE (op1)))
4870 mips_emit_binary (code, target, op0, op1);
4871 else
4872 {
4873 rtx zie = mips_zero_if_equal (op0, op1);
4874 mips_emit_binary (code, target, zie, const0_rtx);
4875 }
4876 }
4877 else
4878 mips_emit_int_order_test (code, 0, target, op0, op1);
4879 }
4880
4881 /* Compare OPERANDS[1] with OPERANDS[2] using comparison code
4882 CODE and jump to OPERANDS[3] if the condition holds. */
4883
4884 void
4885 mips_expand_conditional_branch (rtx *operands)
4886 {
4887 enum rtx_code code = GET_CODE (operands[0]);
4888 rtx op0 = operands[1];
4889 rtx op1 = operands[2];
4890 rtx condition;
4891
4892 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
4893 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4894 emit_jump_insn (gen_condjump (condition, operands[3]));
4895 }
4896
4897 /* Implement:
4898
4899 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
4900 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
4901
4902 void
4903 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
4904 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
4905 {
4906 rtx cmp_result;
4907 bool reversed_p;
4908
4909 reversed_p = mips_reversed_fp_cond (&cond);
4910 cmp_result = mips_allocate_fcc (CCV2mode);
4911 emit_insn (gen_scc_ps (cmp_result,
4912 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
4913 if (reversed_p)
4914 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
4915 cmp_result));
4916 else
4917 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
4918 cmp_result));
4919 }
4920
4921 /* Perform the comparison in OPERANDS[1]. Move OPERANDS[2] into OPERANDS[0]
4922 if the condition holds, otherwise move OPERANDS[3] into OPERANDS[0]. */
4923
4924 void
4925 mips_expand_conditional_move (rtx *operands)
4926 {
4927 rtx cond;
4928 enum rtx_code code = GET_CODE (operands[1]);
4929 rtx op0 = XEXP (operands[1], 0);
4930 rtx op1 = XEXP (operands[1], 1);
4931
4932 mips_emit_compare (&code, &op0, &op1, true);
4933 cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1);
4934 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
4935 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond,
4936 operands[2], operands[3])));
4937 }
4938
4939 /* Perform the comparison in COMPARISON, then trap if the condition holds. */
4940
4941 void
4942 mips_expand_conditional_trap (rtx comparison)
4943 {
4944 rtx op0, op1;
4945 enum machine_mode mode;
4946 enum rtx_code code;
4947
4948 /* MIPS conditional trap instructions don't have GT or LE flavors,
4949 so we must swap the operands and convert to LT and GE respectively. */
4950 code = GET_CODE (comparison);
4951 switch (code)
4952 {
4953 case GT:
4954 case LE:
4955 case GTU:
4956 case LEU:
4957 code = swap_condition (code);
4958 op0 = XEXP (comparison, 1);
4959 op1 = XEXP (comparison, 0);
4960 break;
4961
4962 default:
4963 op0 = XEXP (comparison, 0);
4964 op1 = XEXP (comparison, 1);
4965 break;
4966 }
4967
4968 mode = GET_MODE (XEXP (comparison, 0));
4969 op0 = force_reg (mode, op0);
4970 if (!arith_operand (op1, mode))
4971 op1 = force_reg (mode, op1);
4972
4973 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
4974 gen_rtx_fmt_ee (code, mode, op0, op1),
4975 const0_rtx));
4976 }
4977 \f
4978 /* Initialize *CUM for a call to a function of type FNTYPE. */
4979
4980 void
4981 mips_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype)
4982 {
4983 memset (cum, 0, sizeof (*cum));
4984 cum->prototype = (fntype && prototype_p (fntype));
4985 cum->gp_reg_found = (cum->prototype && stdarg_p (fntype));
4986 }
4987
4988 /* Fill INFO with information about a single argument. CUM is the
4989 cumulative state for earlier arguments. MODE is the mode of this
4990 argument and TYPE is its type (if known). NAMED is true if this
4991 is a named (fixed) argument rather than a variable one. */
4992
4993 static void
4994 mips_get_arg_info (struct mips_arg_info *info, const CUMULATIVE_ARGS *cum,
4995 enum machine_mode mode, const_tree type, bool named)
4996 {
4997 bool doubleword_aligned_p;
4998 unsigned int num_bytes, num_words, max_regs;
4999
5000 /* Work out the size of the argument. */
5001 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
5002 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5003
5004 /* Decide whether it should go in a floating-point register, assuming
5005 one is free. Later code checks for availability.
5006
5007 The checks against UNITS_PER_FPVALUE handle the soft-float and
5008 single-float cases. */
5009 switch (mips_abi)
5010 {
5011 case ABI_EABI:
5012 /* The EABI conventions have traditionally been defined in terms
5013 of TYPE_MODE, regardless of the actual type. */
5014 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
5015 || mode == V2SFmode)
5016 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
5017 break;
5018
5019 case ABI_32:
5020 case ABI_O64:
5021 /* Only leading floating-point scalars are passed in
5022 floating-point registers. We also handle vector floats the same
5023 say, which is OK because they are not covered by the standard ABI. */
5024 info->fpr_p = (!cum->gp_reg_found
5025 && cum->arg_number < 2
5026 && (type == 0
5027 || SCALAR_FLOAT_TYPE_P (type)
5028 || VECTOR_FLOAT_TYPE_P (type))
5029 && (GET_MODE_CLASS (mode) == MODE_FLOAT
5030 || mode == V2SFmode)
5031 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
5032 break;
5033
5034 case ABI_N32:
5035 case ABI_64:
5036 /* Scalar, complex and vector floating-point types are passed in
5037 floating-point registers, as long as this is a named rather
5038 than a variable argument. */
5039 info->fpr_p = (named
5040 && (type == 0 || FLOAT_TYPE_P (type))
5041 && (GET_MODE_CLASS (mode) == MODE_FLOAT
5042 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5043 || mode == V2SFmode)
5044 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
5045
5046 /* ??? According to the ABI documentation, the real and imaginary
5047 parts of complex floats should be passed in individual registers.
5048 The real and imaginary parts of stack arguments are supposed
5049 to be contiguous and there should be an extra word of padding
5050 at the end.
5051
5052 This has two problems. First, it makes it impossible to use a
5053 single "void *" va_list type, since register and stack arguments
5054 are passed differently. (At the time of writing, MIPSpro cannot
5055 handle complex float varargs correctly.) Second, it's unclear
5056 what should happen when there is only one register free.
5057
5058 For now, we assume that named complex floats should go into FPRs
5059 if there are two FPRs free, otherwise they should be passed in the
5060 same way as a struct containing two floats. */
5061 if (info->fpr_p
5062 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5063 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
5064 {
5065 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
5066 info->fpr_p = false;
5067 else
5068 num_words = 2;
5069 }
5070 break;
5071
5072 default:
5073 gcc_unreachable ();
5074 }
5075
5076 /* See whether the argument has doubleword alignment. */
5077 doubleword_aligned_p = (mips_function_arg_boundary (mode, type)
5078 > BITS_PER_WORD);
5079
5080 /* Set REG_OFFSET to the register count we're interested in.
5081 The EABI allocates the floating-point registers separately,
5082 but the other ABIs allocate them like integer registers. */
5083 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
5084 ? cum->num_fprs
5085 : cum->num_gprs);
5086
5087 /* Advance to an even register if the argument is doubleword-aligned. */
5088 if (doubleword_aligned_p)
5089 info->reg_offset += info->reg_offset & 1;
5090
5091 /* Work out the offset of a stack argument. */
5092 info->stack_offset = cum->stack_words;
5093 if (doubleword_aligned_p)
5094 info->stack_offset += info->stack_offset & 1;
5095
5096 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
5097
5098 /* Partition the argument between registers and stack. */
5099 info->reg_words = MIN (num_words, max_regs);
5100 info->stack_words = num_words - info->reg_words;
5101 }
5102
5103 /* INFO describes a register argument that has the normal format for the
5104 argument's mode. Return the register it uses, assuming that FPRs are
5105 available if HARD_FLOAT_P. */
5106
5107 static unsigned int
5108 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
5109 {
5110 if (!info->fpr_p || !hard_float_p)
5111 return GP_ARG_FIRST + info->reg_offset;
5112 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
5113 /* In o32, the second argument is always passed in $f14
5114 for TARGET_DOUBLE_FLOAT, regardless of whether the
5115 first argument was a word or doubleword. */
5116 return FP_ARG_FIRST + 2;
5117 else
5118 return FP_ARG_FIRST + info->reg_offset;
5119 }
5120
5121 /* Implement TARGET_STRICT_ARGUMENT_NAMING. */
5122
5123 static bool
5124 mips_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
5125 {
5126 return !TARGET_OLDABI;
5127 }
5128
5129 /* Implement TARGET_FUNCTION_ARG. */
5130
5131 static rtx
5132 mips_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
5133 const_tree type, bool named)
5134 {
5135 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5136 struct mips_arg_info info;
5137
5138 /* We will be called with a mode of VOIDmode after the last argument
5139 has been seen. Whatever we return will be passed to the call expander.
5140 If we need a MIPS16 fp_code, return a REG with the code stored as
5141 the mode. */
5142 if (mode == VOIDmode)
5143 {
5144 if (TARGET_MIPS16 && cum->fp_code != 0)
5145 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
5146 else
5147 return NULL;
5148 }
5149
5150 mips_get_arg_info (&info, cum, mode, type, named);
5151
5152 /* Return straight away if the whole argument is passed on the stack. */
5153 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
5154 return NULL;
5155
5156 /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
5157 contains a double in its entirety, then that 64-bit chunk is passed
5158 in a floating-point register. */
5159 if (TARGET_NEWABI
5160 && TARGET_HARD_FLOAT
5161 && named
5162 && type != 0
5163 && TREE_CODE (type) == RECORD_TYPE
5164 && TYPE_SIZE_UNIT (type)
5165 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
5166 {
5167 tree field;
5168
5169 /* First check to see if there is any such field. */
5170 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5171 if (TREE_CODE (field) == FIELD_DECL
5172 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
5173 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
5174 && tree_fits_shwi_p (bit_position (field))
5175 && int_bit_position (field) % BITS_PER_WORD == 0)
5176 break;
5177
5178 if (field != 0)
5179 {
5180 /* Now handle the special case by returning a PARALLEL
5181 indicating where each 64-bit chunk goes. INFO.REG_WORDS
5182 chunks are passed in registers. */
5183 unsigned int i;
5184 HOST_WIDE_INT bitpos;
5185 rtx ret;
5186
5187 /* assign_parms checks the mode of ENTRY_PARM, so we must
5188 use the actual mode here. */
5189 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
5190
5191 bitpos = 0;
5192 field = TYPE_FIELDS (type);
5193 for (i = 0; i < info.reg_words; i++)
5194 {
5195 rtx reg;
5196
5197 for (; field; field = DECL_CHAIN (field))
5198 if (TREE_CODE (field) == FIELD_DECL
5199 && int_bit_position (field) >= bitpos)
5200 break;
5201
5202 if (field
5203 && int_bit_position (field) == bitpos
5204 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
5205 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
5206 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
5207 else
5208 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
5209
5210 XVECEXP (ret, 0, i)
5211 = gen_rtx_EXPR_LIST (VOIDmode, reg,
5212 GEN_INT (bitpos / BITS_PER_UNIT));
5213
5214 bitpos += BITS_PER_WORD;
5215 }
5216 return ret;
5217 }
5218 }
5219
5220 /* Handle the n32/n64 conventions for passing complex floating-point
5221 arguments in FPR pairs. The real part goes in the lower register
5222 and the imaginary part goes in the upper register. */
5223 if (TARGET_NEWABI
5224 && info.fpr_p
5225 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5226 {
5227 rtx real, imag;
5228 enum machine_mode inner;
5229 unsigned int regno;
5230
5231 inner = GET_MODE_INNER (mode);
5232 regno = FP_ARG_FIRST + info.reg_offset;
5233 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
5234 {
5235 /* Real part in registers, imaginary part on stack. */
5236 gcc_assert (info.stack_words == info.reg_words);
5237 return gen_rtx_REG (inner, regno);
5238 }
5239 else
5240 {
5241 gcc_assert (info.stack_words == 0);
5242 real = gen_rtx_EXPR_LIST (VOIDmode,
5243 gen_rtx_REG (inner, regno),
5244 const0_rtx);
5245 imag = gen_rtx_EXPR_LIST (VOIDmode,
5246 gen_rtx_REG (inner,
5247 regno + info.reg_words / 2),
5248 GEN_INT (GET_MODE_SIZE (inner)));
5249 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
5250 }
5251 }
5252
5253 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
5254 }
5255
5256 /* Implement TARGET_FUNCTION_ARG_ADVANCE. */
5257
5258 static void
5259 mips_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
5260 const_tree type, bool named)
5261 {
5262 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
5263 struct mips_arg_info info;
5264
5265 mips_get_arg_info (&info, cum, mode, type, named);
5266
5267 if (!info.fpr_p)
5268 cum->gp_reg_found = true;
5269
5270 /* See the comment above the CUMULATIVE_ARGS structure in mips.h for
5271 an explanation of what this code does. It assumes that we're using
5272 either the o32 or the o64 ABI, both of which pass at most 2 arguments
5273 in FPRs. */
5274 if (cum->arg_number < 2 && info.fpr_p)
5275 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
5276
5277 /* Advance the register count. This has the effect of setting
5278 num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
5279 argument required us to skip the final GPR and pass the whole
5280 argument on the stack. */
5281 if (mips_abi != ABI_EABI || !info.fpr_p)
5282 cum->num_gprs = info.reg_offset + info.reg_words;
5283 else if (info.reg_words > 0)
5284 cum->num_fprs += MAX_FPRS_PER_FMT;
5285
5286 /* Advance the stack word count. */
5287 if (info.stack_words > 0)
5288 cum->stack_words = info.stack_offset + info.stack_words;
5289
5290 cum->arg_number++;
5291 }
5292
5293 /* Implement TARGET_ARG_PARTIAL_BYTES. */
5294
5295 static int
5296 mips_arg_partial_bytes (cumulative_args_t cum,
5297 enum machine_mode mode, tree type, bool named)
5298 {
5299 struct mips_arg_info info;
5300
5301 mips_get_arg_info (&info, get_cumulative_args (cum), mode, type, named);
5302 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
5303 }
5304
5305 /* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at
5306 least PARM_BOUNDARY bits of alignment, but will be given anything up
5307 to STACK_BOUNDARY bits if the type requires it. */
5308
5309 static unsigned int
5310 mips_function_arg_boundary (enum machine_mode mode, const_tree type)
5311 {
5312 unsigned int alignment;
5313
5314 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
5315 if (alignment < PARM_BOUNDARY)
5316 alignment = PARM_BOUNDARY;
5317 if (alignment > STACK_BOUNDARY)
5318 alignment = STACK_BOUNDARY;
5319 return alignment;
5320 }
5321
5322 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
5323 upward rather than downward. In other words, return true if the
5324 first byte of the stack slot has useful data, false if the last
5325 byte does. */
5326
5327 bool
5328 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
5329 {
5330 /* On little-endian targets, the first byte of every stack argument
5331 is passed in the first byte of the stack slot. */
5332 if (!BYTES_BIG_ENDIAN)
5333 return true;
5334
5335 /* Otherwise, integral types are padded downward: the last byte of a
5336 stack argument is passed in the last byte of the stack slot. */
5337 if (type != 0
5338 ? (INTEGRAL_TYPE_P (type)
5339 || POINTER_TYPE_P (type)
5340 || FIXED_POINT_TYPE_P (type))
5341 : (SCALAR_INT_MODE_P (mode)
5342 || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
5343 return false;
5344
5345 /* Big-endian o64 pads floating-point arguments downward. */
5346 if (mips_abi == ABI_O64)
5347 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
5348 return false;
5349
5350 /* Other types are padded upward for o32, o64, n32 and n64. */
5351 if (mips_abi != ABI_EABI)
5352 return true;
5353
5354 /* Arguments smaller than a stack slot are padded downward. */
5355 if (mode != BLKmode)
5356 return GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY;
5357 else
5358 return int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT);
5359 }
5360
5361 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
5362 if the least significant byte of the register has useful data. Return
5363 the opposite if the most significant byte does. */
5364
5365 bool
5366 mips_pad_reg_upward (enum machine_mode mode, tree type)
5367 {
5368 /* No shifting is required for floating-point arguments. */
5369 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
5370 return !BYTES_BIG_ENDIAN;
5371
5372 /* Otherwise, apply the same padding to register arguments as we do
5373 to stack arguments. */
5374 return mips_pad_arg_upward (mode, type);
5375 }
5376
5377 /* Return nonzero when an argument must be passed by reference. */
5378
5379 static bool
5380 mips_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
5381 enum machine_mode mode, const_tree type,
5382 bool named ATTRIBUTE_UNUSED)
5383 {
5384 if (mips_abi == ABI_EABI)
5385 {
5386 int size;
5387
5388 /* ??? How should SCmode be handled? */
5389 if (mode == DImode || mode == DFmode
5390 || mode == DQmode || mode == UDQmode
5391 || mode == DAmode || mode == UDAmode)
5392 return 0;
5393
5394 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
5395 return size == -1 || size > UNITS_PER_WORD;
5396 }
5397 else
5398 {
5399 /* If we have a variable-sized parameter, we have no choice. */
5400 return targetm.calls.must_pass_in_stack (mode, type);
5401 }
5402 }
5403
5404 /* Implement TARGET_CALLEE_COPIES. */
5405
5406 static bool
5407 mips_callee_copies (cumulative_args_t cum ATTRIBUTE_UNUSED,
5408 enum machine_mode mode ATTRIBUTE_UNUSED,
5409 const_tree type ATTRIBUTE_UNUSED, bool named)
5410 {
5411 return mips_abi == ABI_EABI && named;
5412 }
5413 \f
5414 /* See whether VALTYPE is a record whose fields should be returned in
5415 floating-point registers. If so, return the number of fields and
5416 list them in FIELDS (which should have two elements). Return 0
5417 otherwise.
5418
5419 For n32 & n64, a structure with one or two fields is returned in
5420 floating-point registers as long as every field has a floating-point
5421 type. */
5422
5423 static int
5424 mips_fpr_return_fields (const_tree valtype, tree *fields)
5425 {
5426 tree field;
5427 int i;
5428
5429 if (!TARGET_NEWABI)
5430 return 0;
5431
5432 if (TREE_CODE (valtype) != RECORD_TYPE)
5433 return 0;
5434
5435 i = 0;
5436 for (field = TYPE_FIELDS (valtype); field != 0; field = DECL_CHAIN (field))
5437 {
5438 if (TREE_CODE (field) != FIELD_DECL)
5439 continue;
5440
5441 if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
5442 return 0;
5443
5444 if (i == 2)
5445 return 0;
5446
5447 fields[i++] = field;
5448 }
5449 return i;
5450 }
5451
5452 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
5453 a value in the most significant part of $2/$3 if:
5454
5455 - the target is big-endian;
5456
5457 - the value has a structure or union type (we generalize this to
5458 cover aggregates from other languages too); and
5459
5460 - the structure is not returned in floating-point registers. */
5461
5462 static bool
5463 mips_return_in_msb (const_tree valtype)
5464 {
5465 tree fields[2];
5466
5467 return (TARGET_NEWABI
5468 && TARGET_BIG_ENDIAN
5469 && AGGREGATE_TYPE_P (valtype)
5470 && mips_fpr_return_fields (valtype, fields) == 0);
5471 }
5472
5473 /* Return true if the function return value MODE will get returned in a
5474 floating-point register. */
5475
5476 static bool
5477 mips_return_mode_in_fpr_p (enum machine_mode mode)
5478 {
5479 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
5480 || mode == V2SFmode
5481 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5482 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
5483 }
5484
5485 /* Return the representation of an FPR return register when the
5486 value being returned in FP_RETURN has mode VALUE_MODE and the
5487 return type itself has mode TYPE_MODE. On NewABI targets,
5488 the two modes may be different for structures like:
5489
5490 struct __attribute__((packed)) foo { float f; }
5491
5492 where we return the SFmode value of "f" in FP_RETURN, but where
5493 the structure itself has mode BLKmode. */
5494
5495 static rtx
5496 mips_return_fpr_single (enum machine_mode type_mode,
5497 enum machine_mode value_mode)
5498 {
5499 rtx x;
5500
5501 x = gen_rtx_REG (value_mode, FP_RETURN);
5502 if (type_mode != value_mode)
5503 {
5504 x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
5505 x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
5506 }
5507 return x;
5508 }
5509
5510 /* Return a composite value in a pair of floating-point registers.
5511 MODE1 and OFFSET1 are the mode and byte offset for the first value,
5512 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
5513 complete value.
5514
5515 For n32 & n64, $f0 always holds the first value and $f2 the second.
5516 Otherwise the values are packed together as closely as possible. */
5517
5518 static rtx
5519 mips_return_fpr_pair (enum machine_mode mode,
5520 enum machine_mode mode1, HOST_WIDE_INT offset1,
5521 enum machine_mode mode2, HOST_WIDE_INT offset2)
5522 {
5523 int inc;
5524
5525 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
5526 return gen_rtx_PARALLEL
5527 (mode,
5528 gen_rtvec (2,
5529 gen_rtx_EXPR_LIST (VOIDmode,
5530 gen_rtx_REG (mode1, FP_RETURN),
5531 GEN_INT (offset1)),
5532 gen_rtx_EXPR_LIST (VOIDmode,
5533 gen_rtx_REG (mode2, FP_RETURN + inc),
5534 GEN_INT (offset2))));
5535
5536 }
5537
5538 /* Implement TARGET_FUNCTION_VALUE and TARGET_LIBCALL_VALUE.
5539 For normal calls, VALTYPE is the return type and MODE is VOIDmode.
5540 For libcalls, VALTYPE is null and MODE is the mode of the return value. */
5541
5542 static rtx
5543 mips_function_value_1 (const_tree valtype, const_tree fn_decl_or_type,
5544 enum machine_mode mode)
5545 {
5546 if (valtype)
5547 {
5548 tree fields[2];
5549 int unsigned_p;
5550 const_tree func;
5551
5552 if (fn_decl_or_type && DECL_P (fn_decl_or_type))
5553 func = fn_decl_or_type;
5554 else
5555 func = NULL;
5556
5557 mode = TYPE_MODE (valtype);
5558 unsigned_p = TYPE_UNSIGNED (valtype);
5559
5560 /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
5561 return values, promote the mode here too. */
5562 mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
5563
5564 /* Handle structures whose fields are returned in $f0/$f2. */
5565 switch (mips_fpr_return_fields (valtype, fields))
5566 {
5567 case 1:
5568 return mips_return_fpr_single (mode,
5569 TYPE_MODE (TREE_TYPE (fields[0])));
5570
5571 case 2:
5572 return mips_return_fpr_pair (mode,
5573 TYPE_MODE (TREE_TYPE (fields[0])),
5574 int_byte_position (fields[0]),
5575 TYPE_MODE (TREE_TYPE (fields[1])),
5576 int_byte_position (fields[1]));
5577 }
5578
5579 /* If a value is passed in the most significant part of a register, see
5580 whether we have to round the mode up to a whole number of words. */
5581 if (mips_return_in_msb (valtype))
5582 {
5583 HOST_WIDE_INT size = int_size_in_bytes (valtype);
5584 if (size % UNITS_PER_WORD != 0)
5585 {
5586 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
5587 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5588 }
5589 }
5590
5591 /* For EABI, the class of return register depends entirely on MODE.
5592 For example, "struct { some_type x; }" and "union { some_type x; }"
5593 are returned in the same way as a bare "some_type" would be.
5594 Other ABIs only use FPRs for scalar, complex or vector types. */
5595 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
5596 return gen_rtx_REG (mode, GP_RETURN);
5597 }
5598
5599 if (!TARGET_MIPS16)
5600 {
5601 /* Handle long doubles for n32 & n64. */
5602 if (mode == TFmode)
5603 return mips_return_fpr_pair (mode,
5604 DImode, 0,
5605 DImode, GET_MODE_SIZE (mode) / 2);
5606
5607 if (mips_return_mode_in_fpr_p (mode))
5608 {
5609 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5610 return mips_return_fpr_pair (mode,
5611 GET_MODE_INNER (mode), 0,
5612 GET_MODE_INNER (mode),
5613 GET_MODE_SIZE (mode) / 2);
5614 else
5615 return gen_rtx_REG (mode, FP_RETURN);
5616 }
5617 }
5618
5619 return gen_rtx_REG (mode, GP_RETURN);
5620 }
5621
5622 /* Implement TARGET_FUNCTION_VALUE. */
5623
5624 static rtx
5625 mips_function_value (const_tree valtype, const_tree fn_decl_or_type,
5626 bool outgoing ATTRIBUTE_UNUSED)
5627 {
5628 return mips_function_value_1 (valtype, fn_decl_or_type, VOIDmode);
5629 }
5630
5631 /* Implement TARGET_LIBCALL_VALUE. */
5632
5633 static rtx
5634 mips_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
5635 {
5636 return mips_function_value_1 (NULL_TREE, NULL_TREE, mode);
5637 }
5638
5639 /* Implement TARGET_FUNCTION_VALUE_REGNO_P.
5640
5641 On the MIPS, R2 R3 and F0 F2 are the only register thus used.
5642 Currently, R2 and F0 are only implemented here (C has no complex type). */
5643
5644 static bool
5645 mips_function_value_regno_p (const unsigned int regno)
5646 {
5647 if (regno == GP_RETURN
5648 || regno == FP_RETURN
5649 || (LONG_DOUBLE_TYPE_SIZE == 128
5650 && FP_RETURN != GP_RETURN
5651 && regno == FP_RETURN + 2))
5652 return true;
5653
5654 return false;
5655 }
5656
5657 /* Implement TARGET_RETURN_IN_MEMORY. Under the o32 and o64 ABIs,
5658 all BLKmode objects are returned in memory. Under the n32, n64
5659 and embedded ABIs, small structures are returned in a register.
5660 Objects with varying size must still be returned in memory, of
5661 course. */
5662
5663 static bool
5664 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5665 {
5666 return (TARGET_OLDABI
5667 ? TYPE_MODE (type) == BLKmode
5668 : !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD));
5669 }
5670 \f
5671 /* Implement TARGET_SETUP_INCOMING_VARARGS. */
5672
5673 static void
5674 mips_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
5675 tree type, int *pretend_size ATTRIBUTE_UNUSED,
5676 int no_rtl)
5677 {
5678 CUMULATIVE_ARGS local_cum;
5679 int gp_saved, fp_saved;
5680
5681 /* The caller has advanced CUM up to, but not beyond, the last named
5682 argument. Advance a local copy of CUM past the last "real" named
5683 argument, to find out how many registers are left over. */
5684 local_cum = *get_cumulative_args (cum);
5685 mips_function_arg_advance (pack_cumulative_args (&local_cum), mode, type,
5686 true);
5687
5688 /* Found out how many registers we need to save. */
5689 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
5690 fp_saved = (EABI_FLOAT_VARARGS_P
5691 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
5692 : 0);
5693
5694 if (!no_rtl)
5695 {
5696 if (gp_saved > 0)
5697 {
5698 rtx ptr, mem;
5699
5700 ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
5701 REG_PARM_STACK_SPACE (cfun->decl)
5702 - gp_saved * UNITS_PER_WORD);
5703 mem = gen_frame_mem (BLKmode, ptr);
5704 set_mem_alias_set (mem, get_varargs_alias_set ());
5705
5706 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
5707 mem, gp_saved);
5708 }
5709 if (fp_saved > 0)
5710 {
5711 /* We can't use move_block_from_reg, because it will use
5712 the wrong mode. */
5713 enum machine_mode mode;
5714 int off, i;
5715
5716 /* Set OFF to the offset from virtual_incoming_args_rtx of
5717 the first float register. The FP save area lies below
5718 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
5719 off = (-gp_saved * UNITS_PER_WORD) & -UNITS_PER_FPVALUE;
5720 off -= fp_saved * UNITS_PER_FPREG;
5721
5722 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
5723
5724 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
5725 i += MAX_FPRS_PER_FMT)
5726 {
5727 rtx ptr, mem;
5728
5729 ptr = plus_constant (Pmode, virtual_incoming_args_rtx, off);
5730 mem = gen_frame_mem (mode, ptr);
5731 set_mem_alias_set (mem, get_varargs_alias_set ());
5732 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
5733 off += UNITS_PER_HWFPVALUE;
5734 }
5735 }
5736 }
5737 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
5738 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
5739 + fp_saved * UNITS_PER_FPREG);
5740 }
5741
5742 /* Implement TARGET_BUILTIN_VA_LIST. */
5743
5744 static tree
5745 mips_build_builtin_va_list (void)
5746 {
5747 if (EABI_FLOAT_VARARGS_P)
5748 {
5749 /* We keep 3 pointers, and two offsets.
5750
5751 Two pointers are to the overflow area, which starts at the CFA.
5752 One of these is constant, for addressing into the GPR save area
5753 below it. The other is advanced up the stack through the
5754 overflow region.
5755
5756 The third pointer is to the bottom of the GPR save area.
5757 Since the FPR save area is just below it, we can address
5758 FPR slots off this pointer.
5759
5760 We also keep two one-byte offsets, which are to be subtracted
5761 from the constant pointers to yield addresses in the GPR and
5762 FPR save areas. These are downcounted as float or non-float
5763 arguments are used, and when they get to zero, the argument
5764 must be obtained from the overflow region. */
5765 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
5766 tree array, index;
5767
5768 record = lang_hooks.types.make_type (RECORD_TYPE);
5769
5770 f_ovfl = build_decl (BUILTINS_LOCATION,
5771 FIELD_DECL, get_identifier ("__overflow_argptr"),
5772 ptr_type_node);
5773 f_gtop = build_decl (BUILTINS_LOCATION,
5774 FIELD_DECL, get_identifier ("__gpr_top"),
5775 ptr_type_node);
5776 f_ftop = build_decl (BUILTINS_LOCATION,
5777 FIELD_DECL, get_identifier ("__fpr_top"),
5778 ptr_type_node);
5779 f_goff = build_decl (BUILTINS_LOCATION,
5780 FIELD_DECL, get_identifier ("__gpr_offset"),
5781 unsigned_char_type_node);
5782 f_foff = build_decl (BUILTINS_LOCATION,
5783 FIELD_DECL, get_identifier ("__fpr_offset"),
5784 unsigned_char_type_node);
5785 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
5786 warn on every user file. */
5787 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
5788 array = build_array_type (unsigned_char_type_node,
5789 build_index_type (index));
5790 f_res = build_decl (BUILTINS_LOCATION,
5791 FIELD_DECL, get_identifier ("__reserved"), array);
5792
5793 DECL_FIELD_CONTEXT (f_ovfl) = record;
5794 DECL_FIELD_CONTEXT (f_gtop) = record;
5795 DECL_FIELD_CONTEXT (f_ftop) = record;
5796 DECL_FIELD_CONTEXT (f_goff) = record;
5797 DECL_FIELD_CONTEXT (f_foff) = record;
5798 DECL_FIELD_CONTEXT (f_res) = record;
5799
5800 TYPE_FIELDS (record) = f_ovfl;
5801 DECL_CHAIN (f_ovfl) = f_gtop;
5802 DECL_CHAIN (f_gtop) = f_ftop;
5803 DECL_CHAIN (f_ftop) = f_goff;
5804 DECL_CHAIN (f_goff) = f_foff;
5805 DECL_CHAIN (f_foff) = f_res;
5806
5807 layout_type (record);
5808 return record;
5809 }
5810 else
5811 /* Otherwise, we use 'void *'. */
5812 return ptr_type_node;
5813 }
5814
5815 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
5816
5817 static void
5818 mips_va_start (tree valist, rtx nextarg)
5819 {
5820 if (EABI_FLOAT_VARARGS_P)
5821 {
5822 const CUMULATIVE_ARGS *cum;
5823 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5824 tree ovfl, gtop, ftop, goff, foff;
5825 tree t;
5826 int gpr_save_area_size;
5827 int fpr_save_area_size;
5828 int fpr_offset;
5829
5830 cum = &crtl->args.info;
5831 gpr_save_area_size
5832 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
5833 fpr_save_area_size
5834 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
5835
5836 f_ovfl = TYPE_FIELDS (va_list_type_node);
5837 f_gtop = DECL_CHAIN (f_ovfl);
5838 f_ftop = DECL_CHAIN (f_gtop);
5839 f_goff = DECL_CHAIN (f_ftop);
5840 f_foff = DECL_CHAIN (f_goff);
5841
5842 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5843 NULL_TREE);
5844 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5845 NULL_TREE);
5846 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5847 NULL_TREE);
5848 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5849 NULL_TREE);
5850 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5851 NULL_TREE);
5852
5853 /* Emit code to initialize OVFL, which points to the next varargs
5854 stack argument. CUM->STACK_WORDS gives the number of stack
5855 words used by named arguments. */
5856 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
5857 if (cum->stack_words > 0)
5858 t = fold_build_pointer_plus_hwi (t, cum->stack_words * UNITS_PER_WORD);
5859 t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
5860 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5861
5862 /* Emit code to initialize GTOP, the top of the GPR save area. */
5863 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
5864 t = build2 (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
5865 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5866
5867 /* Emit code to initialize FTOP, the top of the FPR save area.
5868 This address is gpr_save_area_bytes below GTOP, rounded
5869 down to the next fp-aligned boundary. */
5870 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
5871 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
5872 fpr_offset &= -UNITS_PER_FPVALUE;
5873 if (fpr_offset)
5874 t = fold_build_pointer_plus_hwi (t, -fpr_offset);
5875 t = build2 (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
5876 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5877
5878 /* Emit code to initialize GOFF, the offset from GTOP of the
5879 next GPR argument. */
5880 t = build2 (MODIFY_EXPR, TREE_TYPE (goff), goff,
5881 build_int_cst (TREE_TYPE (goff), gpr_save_area_size));
5882 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5883
5884 /* Likewise emit code to initialize FOFF, the offset from FTOP
5885 of the next FPR argument. */
5886 t = build2 (MODIFY_EXPR, TREE_TYPE (foff), foff,
5887 build_int_cst (TREE_TYPE (foff), fpr_save_area_size));
5888 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5889 }
5890 else
5891 {
5892 nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size);
5893 std_expand_builtin_va_start (valist, nextarg);
5894 }
5895 }
5896
5897 /* Like std_gimplify_va_arg_expr, but apply alignment to zero-sized
5898 types as well. */
5899
5900 static tree
5901 mips_std_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
5902 gimple_seq *post_p)
5903 {
5904 tree addr, t, type_size, rounded_size, valist_tmp;
5905 unsigned HOST_WIDE_INT align, boundary;
5906 bool indirect;
5907
5908 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
5909 if (indirect)
5910 type = build_pointer_type (type);
5911
5912 align = PARM_BOUNDARY / BITS_PER_UNIT;
5913 boundary = targetm.calls.function_arg_boundary (TYPE_MODE (type), type);
5914
5915 /* When we align parameter on stack for caller, if the parameter
5916 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
5917 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
5918 here with caller. */
5919 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
5920 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
5921
5922 boundary /= BITS_PER_UNIT;
5923
5924 /* Hoist the valist value into a temporary for the moment. */
5925 valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
5926
5927 /* va_list pointer is aligned to PARM_BOUNDARY. If argument actually
5928 requires greater alignment, we must perform dynamic alignment. */
5929 if (boundary > align)
5930 {
5931 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
5932 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
5933 gimplify_and_add (t, pre_p);
5934
5935 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
5936 fold_build2 (BIT_AND_EXPR, TREE_TYPE (valist),
5937 valist_tmp,
5938 build_int_cst (TREE_TYPE (valist), -boundary)));
5939 gimplify_and_add (t, pre_p);
5940 }
5941 else
5942 boundary = align;
5943
5944 /* If the actual alignment is less than the alignment of the type,
5945 adjust the type accordingly so that we don't assume strict alignment
5946 when dereferencing the pointer. */
5947 boundary *= BITS_PER_UNIT;
5948 if (boundary < TYPE_ALIGN (type))
5949 {
5950 type = build_variant_type_copy (type);
5951 TYPE_ALIGN (type) = boundary;
5952 }
5953
5954 /* Compute the rounded size of the type. */
5955 type_size = size_in_bytes (type);
5956 rounded_size = round_up (type_size, align);
5957
5958 /* Reduce rounded_size so it's sharable with the postqueue. */
5959 gimplify_expr (&rounded_size, pre_p, post_p, is_gimple_val, fb_rvalue);
5960
5961 /* Get AP. */
5962 addr = valist_tmp;
5963 if (PAD_VARARGS_DOWN && !integer_zerop (rounded_size))
5964 {
5965 /* Small args are padded downward. */
5966 t = fold_build2_loc (input_location, GT_EXPR, sizetype,
5967 rounded_size, size_int (align));
5968 t = fold_build3 (COND_EXPR, sizetype, t, size_zero_node,
5969 size_binop (MINUS_EXPR, rounded_size, type_size));
5970 addr = fold_build_pointer_plus (addr, t);
5971 }
5972
5973 /* Compute new value for AP. */
5974 t = fold_build_pointer_plus (valist_tmp, rounded_size);
5975 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
5976 gimplify_and_add (t, pre_p);
5977
5978 addr = fold_convert (build_pointer_type (type), addr);
5979
5980 if (indirect)
5981 addr = build_va_arg_indirect_ref (addr);
5982
5983 return build_va_arg_indirect_ref (addr);
5984 }
5985
5986 /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
5987
5988 static tree
5989 mips_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
5990 gimple_seq *post_p)
5991 {
5992 tree addr;
5993 bool indirect_p;
5994
5995 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5996 if (indirect_p)
5997 type = build_pointer_type (type);
5998
5999 if (!EABI_FLOAT_VARARGS_P)
6000 addr = mips_std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6001 else
6002 {
6003 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
6004 tree ovfl, top, off, align;
6005 HOST_WIDE_INT size, rsize, osize;
6006 tree t, u;
6007
6008 f_ovfl = TYPE_FIELDS (va_list_type_node);
6009 f_gtop = DECL_CHAIN (f_ovfl);
6010 f_ftop = DECL_CHAIN (f_gtop);
6011 f_goff = DECL_CHAIN (f_ftop);
6012 f_foff = DECL_CHAIN (f_goff);
6013
6014 /* Let:
6015
6016 TOP be the top of the GPR or FPR save area;
6017 OFF be the offset from TOP of the next register;
6018 ADDR_RTX be the address of the argument;
6019 SIZE be the number of bytes in the argument type;
6020 RSIZE be the number of bytes used to store the argument
6021 when it's in the register save area; and
6022 OSIZE be the number of bytes used to store it when it's
6023 in the stack overflow area.
6024
6025 The code we want is:
6026
6027 1: off &= -rsize; // round down
6028 2: if (off != 0)
6029 3: {
6030 4: addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0);
6031 5: off -= rsize;
6032 6: }
6033 7: else
6034 8: {
6035 9: ovfl = ((intptr_t) ovfl + osize - 1) & -osize;
6036 10: addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0);
6037 11: ovfl += osize;
6038 14: }
6039
6040 [1] and [9] can sometimes be optimized away. */
6041
6042 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
6043 NULL_TREE);
6044 size = int_size_in_bytes (type);
6045
6046 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
6047 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
6048 {
6049 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop),
6050 unshare_expr (valist), f_ftop, NULL_TREE);
6051 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff),
6052 unshare_expr (valist), f_foff, NULL_TREE);
6053
6054 /* When va_start saves FPR arguments to the stack, each slot
6055 takes up UNITS_PER_HWFPVALUE bytes, regardless of the
6056 argument's precision. */
6057 rsize = UNITS_PER_HWFPVALUE;
6058
6059 /* Overflow arguments are padded to UNITS_PER_WORD bytes
6060 (= PARM_BOUNDARY bits). This can be different from RSIZE
6061 in two cases:
6062
6063 (1) On 32-bit targets when TYPE is a structure such as:
6064
6065 struct s { float f; };
6066
6067 Such structures are passed in paired FPRs, so RSIZE
6068 will be 8 bytes. However, the structure only takes
6069 up 4 bytes of memory, so OSIZE will only be 4.
6070
6071 (2) In combinations such as -mgp64 -msingle-float
6072 -fshort-double. Doubles passed in registers will then take
6073 up 4 (UNITS_PER_HWFPVALUE) bytes, but those passed on the
6074 stack take up UNITS_PER_WORD bytes. */
6075 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
6076 }
6077 else
6078 {
6079 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop),
6080 unshare_expr (valist), f_gtop, NULL_TREE);
6081 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff),
6082 unshare_expr (valist), f_goff, NULL_TREE);
6083 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
6084 if (rsize > UNITS_PER_WORD)
6085 {
6086 /* [1] Emit code for: off &= -rsize. */
6087 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), unshare_expr (off),
6088 build_int_cst (TREE_TYPE (off), -rsize));
6089 gimplify_assign (unshare_expr (off), t, pre_p);
6090 }
6091 osize = rsize;
6092 }
6093
6094 /* [2] Emit code to branch if off == 0. */
6095 t = build2 (NE_EXPR, boolean_type_node, unshare_expr (off),
6096 build_int_cst (TREE_TYPE (off), 0));
6097 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
6098
6099 /* [5] Emit code for: off -= rsize. We do this as a form of
6100 post-decrement not available to C. */
6101 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
6102 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
6103
6104 /* [4] Emit code for:
6105 addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0). */
6106 t = fold_convert (sizetype, t);
6107 t = fold_build1 (NEGATE_EXPR, sizetype, t);
6108 t = fold_build_pointer_plus (top, t);
6109 if (BYTES_BIG_ENDIAN && rsize > size)
6110 t = fold_build_pointer_plus_hwi (t, rsize - size);
6111 COND_EXPR_THEN (addr) = t;
6112
6113 if (osize > UNITS_PER_WORD)
6114 {
6115 /* [9] Emit: ovfl = ((intptr_t) ovfl + osize - 1) & -osize. */
6116 t = fold_build_pointer_plus_hwi (unshare_expr (ovfl), osize - 1);
6117 u = build_int_cst (TREE_TYPE (t), -osize);
6118 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
6119 align = build2 (MODIFY_EXPR, TREE_TYPE (ovfl),
6120 unshare_expr (ovfl), t);
6121 }
6122 else
6123 align = NULL;
6124
6125 /* [10, 11] Emit code for:
6126 addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0)
6127 ovfl += osize. */
6128 u = fold_convert (TREE_TYPE (ovfl), build_int_cst (NULL_TREE, osize));
6129 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
6130 if (BYTES_BIG_ENDIAN && osize > size)
6131 t = fold_build_pointer_plus_hwi (t, osize - size);
6132
6133 /* String [9] and [10, 11] together. */
6134 if (align)
6135 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
6136 COND_EXPR_ELSE (addr) = t;
6137
6138 addr = fold_convert (build_pointer_type (type), addr);
6139 addr = build_va_arg_indirect_ref (addr);
6140 }
6141
6142 if (indirect_p)
6143 addr = build_va_arg_indirect_ref (addr);
6144
6145 return addr;
6146 }
6147 \f
6148 /* Declare a unique, locally-binding function called NAME, then start
6149 its definition. */
6150
6151 static void
6152 mips_start_unique_function (const char *name)
6153 {
6154 tree decl;
6155
6156 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
6157 get_identifier (name),
6158 build_function_type_list (void_type_node, NULL_TREE));
6159 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
6160 NULL_TREE, void_type_node);
6161 TREE_PUBLIC (decl) = 1;
6162 TREE_STATIC (decl) = 1;
6163
6164 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
6165
6166 targetm.asm_out.unique_section (decl, 0);
6167 switch_to_section (get_named_section (decl, NULL, 0));
6168
6169 targetm.asm_out.globalize_label (asm_out_file, name);
6170 fputs ("\t.hidden\t", asm_out_file);
6171 assemble_name (asm_out_file, name);
6172 putc ('\n', asm_out_file);
6173 }
6174
6175 /* Start a definition of function NAME. MIPS16_P indicates whether the
6176 function contains MIPS16 code. */
6177
6178 static void
6179 mips_start_function_definition (const char *name, bool mips16_p)
6180 {
6181 if (mips16_p)
6182 fprintf (asm_out_file, "\t.set\tmips16\n");
6183 else
6184 fprintf (asm_out_file, "\t.set\tnomips16\n");
6185
6186 if (TARGET_MICROMIPS)
6187 fprintf (asm_out_file, "\t.set\tmicromips\n");
6188 #ifdef HAVE_GAS_MICROMIPS
6189 else
6190 fprintf (asm_out_file, "\t.set\tnomicromips\n");
6191 #endif
6192
6193 if (!flag_inhibit_size_directive)
6194 {
6195 fputs ("\t.ent\t", asm_out_file);
6196 assemble_name (asm_out_file, name);
6197 fputs ("\n", asm_out_file);
6198 }
6199
6200 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, name, "function");
6201
6202 /* Start the definition proper. */
6203 assemble_name (asm_out_file, name);
6204 fputs (":\n", asm_out_file);
6205 }
6206
6207 /* End a function definition started by mips_start_function_definition. */
6208
6209 static void
6210 mips_end_function_definition (const char *name)
6211 {
6212 if (!flag_inhibit_size_directive)
6213 {
6214 fputs ("\t.end\t", asm_out_file);
6215 assemble_name (asm_out_file, name);
6216 fputs ("\n", asm_out_file);
6217 }
6218 }
6219 \f
6220 /* Output a definition of the __mips16_rdhwr function. */
6221
6222 static void
6223 mips_output_mips16_rdhwr (void)
6224 {
6225 const char *name;
6226
6227 name = "__mips16_rdhwr";
6228 mips_start_unique_function (name);
6229 mips_start_function_definition (name, false);
6230 fprintf (asm_out_file,
6231 "\t.set\tpush\n"
6232 "\t.set\tmips32r2\n"
6233 "\t.set\tnoreorder\n"
6234 "\trdhwr\t$3,$29\n"
6235 "\t.set\tpop\n"
6236 "\tj\t$31\n");
6237 mips_end_function_definition (name);
6238 }
6239 \f
6240 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
6241
6242 static bool
6243 mips_ok_for_lazy_binding_p (rtx x)
6244 {
6245 return (TARGET_USE_GOT
6246 && GET_CODE (x) == SYMBOL_REF
6247 && !SYMBOL_REF_BIND_NOW_P (x)
6248 && !mips_symbol_binds_local_p (x));
6249 }
6250
6251 /* Load function address ADDR into register DEST. TYPE is as for
6252 mips_expand_call. Return true if we used an explicit lazy-binding
6253 sequence. */
6254
6255 static bool
6256 mips_load_call_address (enum mips_call_type type, rtx dest, rtx addr)
6257 {
6258 /* If we're generating PIC, and this call is to a global function,
6259 try to allow its address to be resolved lazily. This isn't
6260 possible for sibcalls when $gp is call-saved because the value
6261 of $gp on entry to the stub would be our caller's gp, not ours. */
6262 if (TARGET_EXPLICIT_RELOCS
6263 && !(type == MIPS_CALL_SIBCALL && TARGET_CALL_SAVED_GP)
6264 && mips_ok_for_lazy_binding_p (addr))
6265 {
6266 addr = mips_got_load (dest, addr, SYMBOL_GOTOFF_CALL);
6267 emit_insn (gen_rtx_SET (VOIDmode, dest, addr));
6268 return true;
6269 }
6270 else
6271 {
6272 mips_emit_move (dest, addr);
6273 return false;
6274 }
6275 }
6276 \f
6277 /* Each locally-defined hard-float MIPS16 function has a local symbol
6278 associated with it. This hash table maps the function symbol (FUNC)
6279 to the local symbol (LOCAL). */
6280 struct GTY(()) mips16_local_alias {
6281 rtx func;
6282 rtx local;
6283 };
6284 static GTY ((param_is (struct mips16_local_alias))) htab_t mips16_local_aliases;
6285
6286 /* Hash table callbacks for mips16_local_aliases. */
6287
6288 static hashval_t
6289 mips16_local_aliases_hash (const void *entry)
6290 {
6291 const struct mips16_local_alias *alias;
6292
6293 alias = (const struct mips16_local_alias *) entry;
6294 return htab_hash_string (XSTR (alias->func, 0));
6295 }
6296
6297 static int
6298 mips16_local_aliases_eq (const void *entry1, const void *entry2)
6299 {
6300 const struct mips16_local_alias *alias1, *alias2;
6301
6302 alias1 = (const struct mips16_local_alias *) entry1;
6303 alias2 = (const struct mips16_local_alias *) entry2;
6304 return rtx_equal_p (alias1->func, alias2->func);
6305 }
6306
6307 /* FUNC is the symbol for a locally-defined hard-float MIPS16 function.
6308 Return a local alias for it, creating a new one if necessary. */
6309
6310 static rtx
6311 mips16_local_alias (rtx func)
6312 {
6313 struct mips16_local_alias *alias, tmp_alias;
6314 void **slot;
6315
6316 /* Create the hash table if this is the first call. */
6317 if (mips16_local_aliases == NULL)
6318 mips16_local_aliases = htab_create_ggc (37, mips16_local_aliases_hash,
6319 mips16_local_aliases_eq, NULL);
6320
6321 /* Look up the function symbol, creating a new entry if need be. */
6322 tmp_alias.func = func;
6323 slot = htab_find_slot (mips16_local_aliases, &tmp_alias, INSERT);
6324 gcc_assert (slot != NULL);
6325
6326 alias = (struct mips16_local_alias *) *slot;
6327 if (alias == NULL)
6328 {
6329 const char *func_name, *local_name;
6330 rtx local;
6331
6332 /* Create a new SYMBOL_REF for the local symbol. The choice of
6333 __fn_local_* is based on the __fn_stub_* names that we've
6334 traditionally used for the non-MIPS16 stub. */
6335 func_name = targetm.strip_name_encoding (XSTR (func, 0));
6336 local_name = ACONCAT (("__fn_local_", func_name, NULL));
6337 local = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (local_name));
6338 SYMBOL_REF_FLAGS (local) = SYMBOL_REF_FLAGS (func) | SYMBOL_FLAG_LOCAL;
6339
6340 /* Create a new structure to represent the mapping. */
6341 alias = ggc_alloc_mips16_local_alias ();
6342 alias->func = func;
6343 alias->local = local;
6344 *slot = alias;
6345 }
6346 return alias->local;
6347 }
6348 \f
6349 /* A chained list of functions for which mips16_build_call_stub has already
6350 generated a stub. NAME is the name of the function and FP_RET_P is true
6351 if the function returns a value in floating-point registers. */
6352 struct mips16_stub {
6353 struct mips16_stub *next;
6354 char *name;
6355 bool fp_ret_p;
6356 };
6357 static struct mips16_stub *mips16_stubs;
6358
6359 /* Return the two-character string that identifies floating-point
6360 return mode MODE in the name of a MIPS16 function stub. */
6361
6362 static const char *
6363 mips16_call_stub_mode_suffix (enum machine_mode mode)
6364 {
6365 if (mode == SFmode)
6366 return "sf";
6367 else if (mode == DFmode)
6368 return "df";
6369 else if (mode == SCmode)
6370 return "sc";
6371 else if (mode == DCmode)
6372 return "dc";
6373 else if (mode == V2SFmode)
6374 return "df";
6375 else
6376 gcc_unreachable ();
6377 }
6378
6379 /* Write instructions to move a 32-bit value between general register
6380 GPREG and floating-point register FPREG. DIRECTION is 't' to move
6381 from GPREG to FPREG and 'f' to move in the opposite direction. */
6382
6383 static void
6384 mips_output_32bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
6385 {
6386 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
6387 reg_names[gpreg], reg_names[fpreg]);
6388 }
6389
6390 /* Likewise for 64-bit values. */
6391
6392 static void
6393 mips_output_64bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
6394 {
6395 if (TARGET_64BIT)
6396 fprintf (asm_out_file, "\tdm%cc1\t%s,%s\n", direction,
6397 reg_names[gpreg], reg_names[fpreg]);
6398 else if (TARGET_FLOAT64)
6399 {
6400 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
6401 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
6402 fprintf (asm_out_file, "\tm%chc1\t%s,%s\n", direction,
6403 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg]);
6404 }
6405 else
6406 {
6407 /* Move the least-significant word. */
6408 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
6409 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
6410 /* ...then the most significant word. */
6411 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
6412 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg + 1]);
6413 }
6414 }
6415
6416 /* Write out code to move floating-point arguments into or out of
6417 general registers. FP_CODE is the code describing which arguments
6418 are present (see the comment above the definition of CUMULATIVE_ARGS
6419 in mips.h). DIRECTION is as for mips_output_32bit_xfer. */
6420
6421 static void
6422 mips_output_args_xfer (int fp_code, char direction)
6423 {
6424 unsigned int gparg, fparg, f;
6425 CUMULATIVE_ARGS cum;
6426
6427 /* This code only works for o32 and o64. */
6428 gcc_assert (TARGET_OLDABI);
6429
6430 mips_init_cumulative_args (&cum, NULL);
6431
6432 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
6433 {
6434 enum machine_mode mode;
6435 struct mips_arg_info info;
6436
6437 if ((f & 3) == 1)
6438 mode = SFmode;
6439 else if ((f & 3) == 2)
6440 mode = DFmode;
6441 else
6442 gcc_unreachable ();
6443
6444 mips_get_arg_info (&info, &cum, mode, NULL, true);
6445 gparg = mips_arg_regno (&info, false);
6446 fparg = mips_arg_regno (&info, true);
6447
6448 if (mode == SFmode)
6449 mips_output_32bit_xfer (direction, gparg, fparg);
6450 else
6451 mips_output_64bit_xfer (direction, gparg, fparg);
6452
6453 mips_function_arg_advance (pack_cumulative_args (&cum), mode, NULL, true);
6454 }
6455 }
6456
6457 /* Write a MIPS16 stub for the current function. This stub is used
6458 for functions which take arguments in the floating-point registers.
6459 It is normal-mode code that moves the floating-point arguments
6460 into the general registers and then jumps to the MIPS16 code. */
6461
6462 static void
6463 mips16_build_function_stub (void)
6464 {
6465 const char *fnname, *alias_name, *separator;
6466 char *secname, *stubname;
6467 tree stubdecl;
6468 unsigned int f;
6469 rtx symbol, alias;
6470
6471 /* Create the name of the stub, and its unique section. */
6472 symbol = XEXP (DECL_RTL (current_function_decl), 0);
6473 alias = mips16_local_alias (symbol);
6474
6475 fnname = targetm.strip_name_encoding (XSTR (symbol, 0));
6476 alias_name = targetm.strip_name_encoding (XSTR (alias, 0));
6477 secname = ACONCAT ((".mips16.fn.", fnname, NULL));
6478 stubname = ACONCAT (("__fn_stub_", fnname, NULL));
6479
6480 /* Build a decl for the stub. */
6481 stubdecl = build_decl (BUILTINS_LOCATION,
6482 FUNCTION_DECL, get_identifier (stubname),
6483 build_function_type_list (void_type_node, NULL_TREE));
6484 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
6485 DECL_RESULT (stubdecl) = build_decl (BUILTINS_LOCATION,
6486 RESULT_DECL, NULL_TREE, void_type_node);
6487
6488 /* Output a comment. */
6489 fprintf (asm_out_file, "\t# Stub function for %s (",
6490 current_function_name ());
6491 separator = "";
6492 for (f = (unsigned int) crtl->args.info.fp_code; f != 0; f >>= 2)
6493 {
6494 fprintf (asm_out_file, "%s%s", separator,
6495 (f & 3) == 1 ? "float" : "double");
6496 separator = ", ";
6497 }
6498 fprintf (asm_out_file, ")\n");
6499
6500 /* Start the function definition. */
6501 assemble_start_function (stubdecl, stubname);
6502 mips_start_function_definition (stubname, false);
6503
6504 /* If generating pic2 code, either set up the global pointer or
6505 switch to pic0. */
6506 if (TARGET_ABICALLS_PIC2)
6507 {
6508 if (TARGET_ABSOLUTE_ABICALLS)
6509 fprintf (asm_out_file, "\t.option\tpic0\n");
6510 else
6511 {
6512 output_asm_insn ("%(.cpload\t%^%)", NULL);
6513 /* Emit an R_MIPS_NONE relocation to tell the linker what the
6514 target function is. Use a local GOT access when loading the
6515 symbol, to cut down on the number of unnecessary GOT entries
6516 for stubs that aren't needed. */
6517 output_asm_insn (".reloc\t0,R_MIPS_NONE,%0", &symbol);
6518 symbol = alias;
6519 }
6520 }
6521
6522 /* Load the address of the MIPS16 function into $25. Do this first so
6523 that targets with coprocessor interlocks can use an MFC1 to fill the
6524 delay slot. */
6525 output_asm_insn ("la\t%^,%0", &symbol);
6526
6527 /* Move the arguments from floating-point registers to general registers. */
6528 mips_output_args_xfer (crtl->args.info.fp_code, 'f');
6529
6530 /* Jump to the MIPS16 function. */
6531 output_asm_insn ("jr\t%^", NULL);
6532
6533 if (TARGET_ABICALLS_PIC2 && TARGET_ABSOLUTE_ABICALLS)
6534 fprintf (asm_out_file, "\t.option\tpic2\n");
6535
6536 mips_end_function_definition (stubname);
6537
6538 /* If the linker needs to create a dynamic symbol for the target
6539 function, it will associate the symbol with the stub (which,
6540 unlike the target function, follows the proper calling conventions).
6541 It is therefore useful to have a local alias for the target function,
6542 so that it can still be identified as MIPS16 code. As an optimization,
6543 this symbol can also be used for indirect MIPS16 references from
6544 within this file. */
6545 ASM_OUTPUT_DEF (asm_out_file, alias_name, fnname);
6546
6547 switch_to_section (function_section (current_function_decl));
6548 }
6549
6550 /* The current function is a MIPS16 function that returns a value in an FPR.
6551 Copy the return value from its soft-float to its hard-float location.
6552 libgcc2 has special non-MIPS16 helper functions for each case. */
6553
6554 static void
6555 mips16_copy_fpr_return_value (void)
6556 {
6557 rtx fn, insn, retval;
6558 tree return_type;
6559 enum machine_mode return_mode;
6560 const char *name;
6561
6562 return_type = DECL_RESULT (current_function_decl);
6563 return_mode = DECL_MODE (return_type);
6564
6565 name = ACONCAT (("__mips16_ret_",
6566 mips16_call_stub_mode_suffix (return_mode),
6567 NULL));
6568 fn = mips16_stub_function (name);
6569
6570 /* The function takes arguments in $2 (and possibly $3), so calls
6571 to it cannot be lazily bound. */
6572 SYMBOL_REF_FLAGS (fn) |= SYMBOL_FLAG_BIND_NOW;
6573
6574 /* Model the call as something that takes the GPR return value as
6575 argument and returns an "updated" value. */
6576 retval = gen_rtx_REG (return_mode, GP_RETURN);
6577 insn = mips_expand_call (MIPS_CALL_EPILOGUE, retval, fn,
6578 const0_rtx, NULL_RTX, false);
6579 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
6580 }
6581
6582 /* Consider building a stub for a MIPS16 call to function *FN_PTR.
6583 RETVAL is the location of the return value, or null if this is
6584 a "call" rather than a "call_value". ARGS_SIZE is the size of the
6585 arguments and FP_CODE is the code built by mips_function_arg;
6586 see the comment before the fp_code field in CUMULATIVE_ARGS for details.
6587
6588 There are three alternatives:
6589
6590 - If a stub was needed, emit the call and return the call insn itself.
6591
6592 - If we can avoid using a stub by redirecting the call, set *FN_PTR
6593 to the new target and return null.
6594
6595 - If *FN_PTR doesn't need a stub, return null and leave *FN_PTR
6596 unmodified.
6597
6598 A stub is needed for calls to functions that, in normal mode,
6599 receive arguments in FPRs or return values in FPRs. The stub
6600 copies the arguments from their soft-float positions to their
6601 hard-float positions, calls the real function, then copies the
6602 return value from its hard-float position to its soft-float
6603 position.
6604
6605 We can emit a JAL to *FN_PTR even when *FN_PTR might need a stub.
6606 If *FN_PTR turns out to be to a non-MIPS16 function, the linker
6607 automatically redirects the JAL to the stub, otherwise the JAL
6608 continues to call FN directly. */
6609
6610 static rtx
6611 mips16_build_call_stub (rtx retval, rtx *fn_ptr, rtx args_size, int fp_code)
6612 {
6613 const char *fnname;
6614 bool fp_ret_p;
6615 struct mips16_stub *l;
6616 rtx insn, fn;
6617
6618 /* We don't need to do anything if we aren't in MIPS16 mode, or if
6619 we were invoked with the -msoft-float option. */
6620 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
6621 return NULL_RTX;
6622
6623 /* Figure out whether the value might come back in a floating-point
6624 register. */
6625 fp_ret_p = retval && mips_return_mode_in_fpr_p (GET_MODE (retval));
6626
6627 /* We don't need to do anything if there were no floating-point
6628 arguments and the value will not be returned in a floating-point
6629 register. */
6630 if (fp_code == 0 && !fp_ret_p)
6631 return NULL_RTX;
6632
6633 /* We don't need to do anything if this is a call to a special
6634 MIPS16 support function. */
6635 fn = *fn_ptr;
6636 if (mips16_stub_function_p (fn))
6637 return NULL_RTX;
6638
6639 /* If we're calling a locally-defined MIPS16 function, we know that
6640 it will return values in both the "soft-float" and "hard-float"
6641 registers. There is no need to use a stub to move the latter
6642 to the former. */
6643 if (fp_code == 0 && mips16_local_function_p (fn))
6644 return NULL_RTX;
6645
6646 /* This code will only work for o32 and o64 abis. The other ABI's
6647 require more sophisticated support. */
6648 gcc_assert (TARGET_OLDABI);
6649
6650 /* If we're calling via a function pointer, use one of the magic
6651 libgcc.a stubs provided for each (FP_CODE, FP_RET_P) combination.
6652 Each stub expects the function address to arrive in register $2. */
6653 if (GET_CODE (fn) != SYMBOL_REF
6654 || !call_insn_operand (fn, VOIDmode))
6655 {
6656 char buf[30];
6657 rtx stub_fn, insn, addr;
6658 bool lazy_p;
6659
6660 /* If this is a locally-defined and locally-binding function,
6661 avoid the stub by calling the local alias directly. */
6662 if (mips16_local_function_p (fn))
6663 {
6664 *fn_ptr = mips16_local_alias (fn);
6665 return NULL_RTX;
6666 }
6667
6668 /* Create a SYMBOL_REF for the libgcc.a function. */
6669 if (fp_ret_p)
6670 sprintf (buf, "__mips16_call_stub_%s_%d",
6671 mips16_call_stub_mode_suffix (GET_MODE (retval)),
6672 fp_code);
6673 else
6674 sprintf (buf, "__mips16_call_stub_%d", fp_code);
6675 stub_fn = mips16_stub_function (buf);
6676
6677 /* The function uses $2 as an argument, so calls to it
6678 cannot be lazily bound. */
6679 SYMBOL_REF_FLAGS (stub_fn) |= SYMBOL_FLAG_BIND_NOW;
6680
6681 /* Load the target function into $2. */
6682 addr = gen_rtx_REG (Pmode, GP_REG_FIRST + 2);
6683 lazy_p = mips_load_call_address (MIPS_CALL_NORMAL, addr, fn);
6684
6685 /* Emit the call. */
6686 insn = mips_expand_call (MIPS_CALL_NORMAL, retval, stub_fn,
6687 args_size, NULL_RTX, lazy_p);
6688
6689 /* Tell GCC that this call does indeed use the value of $2. */
6690 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), addr);
6691
6692 /* If we are handling a floating-point return value, we need to
6693 save $18 in the function prologue. Putting a note on the
6694 call will mean that df_regs_ever_live_p ($18) will be true if the
6695 call is not eliminated, and we can check that in the prologue
6696 code. */
6697 if (fp_ret_p)
6698 CALL_INSN_FUNCTION_USAGE (insn) =
6699 gen_rtx_EXPR_LIST (VOIDmode,
6700 gen_rtx_CLOBBER (VOIDmode,
6701 gen_rtx_REG (word_mode, 18)),
6702 CALL_INSN_FUNCTION_USAGE (insn));
6703
6704 return insn;
6705 }
6706
6707 /* We know the function we are going to call. If we have already
6708 built a stub, we don't need to do anything further. */
6709 fnname = targetm.strip_name_encoding (XSTR (fn, 0));
6710 for (l = mips16_stubs; l != NULL; l = l->next)
6711 if (strcmp (l->name, fnname) == 0)
6712 break;
6713
6714 if (l == NULL)
6715 {
6716 const char *separator;
6717 char *secname, *stubname;
6718 tree stubid, stubdecl;
6719 unsigned int f;
6720
6721 /* If the function does not return in FPRs, the special stub
6722 section is named
6723 .mips16.call.FNNAME
6724
6725 If the function does return in FPRs, the stub section is named
6726 .mips16.call.fp.FNNAME
6727
6728 Build a decl for the stub. */
6729 secname = ACONCAT ((".mips16.call.", fp_ret_p ? "fp." : "",
6730 fnname, NULL));
6731 stubname = ACONCAT (("__call_stub_", fp_ret_p ? "fp_" : "",
6732 fnname, NULL));
6733 stubid = get_identifier (stubname);
6734 stubdecl = build_decl (BUILTINS_LOCATION,
6735 FUNCTION_DECL, stubid,
6736 build_function_type_list (void_type_node,
6737 NULL_TREE));
6738 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
6739 DECL_RESULT (stubdecl) = build_decl (BUILTINS_LOCATION,
6740 RESULT_DECL, NULL_TREE,
6741 void_type_node);
6742
6743 /* Output a comment. */
6744 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
6745 (fp_ret_p
6746 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
6747 : ""),
6748 fnname);
6749 separator = "";
6750 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
6751 {
6752 fprintf (asm_out_file, "%s%s", separator,
6753 (f & 3) == 1 ? "float" : "double");
6754 separator = ", ";
6755 }
6756 fprintf (asm_out_file, ")\n");
6757
6758 /* Start the function definition. */
6759 assemble_start_function (stubdecl, stubname);
6760 mips_start_function_definition (stubname, false);
6761
6762 if (fp_ret_p)
6763 {
6764 fprintf (asm_out_file, "\t.cfi_startproc\n");
6765
6766 /* Create a fake CFA 4 bytes below the stack pointer.
6767 This works around unwinders (like libgcc's) that expect
6768 the CFA for non-signal frames to be unique. */
6769 fprintf (asm_out_file, "\t.cfi_def_cfa 29,-4\n");
6770
6771 /* "Save" $sp in itself so we don't use the fake CFA.
6772 This is: DW_CFA_val_expression r29, { DW_OP_reg29 }. */
6773 fprintf (asm_out_file, "\t.cfi_escape 0x16,29,1,0x6d\n");
6774 }
6775 else
6776 {
6777 /* Load the address of the MIPS16 function into $25. Do this
6778 first so that targets with coprocessor interlocks can use
6779 an MFC1 to fill the delay slot. */
6780 if (TARGET_EXPLICIT_RELOCS)
6781 {
6782 output_asm_insn ("lui\t%^,%%hi(%0)", &fn);
6783 output_asm_insn ("addiu\t%^,%^,%%lo(%0)", &fn);
6784 }
6785 else
6786 output_asm_insn ("la\t%^,%0", &fn);
6787 }
6788
6789 /* Move the arguments from general registers to floating-point
6790 registers. */
6791 mips_output_args_xfer (fp_code, 't');
6792
6793 if (fp_ret_p)
6794 {
6795 /* Save the return address in $18 and call the non-MIPS16 function.
6796 The stub's caller knows that $18 might be clobbered, even though
6797 $18 is usually a call-saved register. */
6798 fprintf (asm_out_file, "\tmove\t%s,%s\n",
6799 reg_names[GP_REG_FIRST + 18], reg_names[RETURN_ADDR_REGNUM]);
6800 output_asm_insn (MIPS_CALL ("jal", &fn, 0, -1), &fn);
6801 fprintf (asm_out_file, "\t.cfi_register 31,18\n");
6802
6803 /* Move the result from floating-point registers to
6804 general registers. */
6805 switch (GET_MODE (retval))
6806 {
6807 case SCmode:
6808 mips_output_32bit_xfer ('f', GP_RETURN + TARGET_BIG_ENDIAN,
6809 TARGET_BIG_ENDIAN
6810 ? FP_REG_FIRST + MAX_FPRS_PER_FMT
6811 : FP_REG_FIRST);
6812 mips_output_32bit_xfer ('f', GP_RETURN + TARGET_LITTLE_ENDIAN,
6813 TARGET_LITTLE_ENDIAN
6814 ? FP_REG_FIRST + MAX_FPRS_PER_FMT
6815 : FP_REG_FIRST);
6816 if (GET_MODE (retval) == SCmode && TARGET_64BIT)
6817 {
6818 /* On 64-bit targets, complex floats are returned in
6819 a single GPR, such that "sd" on a suitably-aligned
6820 target would store the value correctly. */
6821 fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
6822 reg_names[GP_RETURN + TARGET_BIG_ENDIAN],
6823 reg_names[GP_RETURN + TARGET_BIG_ENDIAN]);
6824 fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
6825 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN],
6826 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN]);
6827 fprintf (asm_out_file, "\tdsrl\t%s,%s,32\n",
6828 reg_names[GP_RETURN + TARGET_BIG_ENDIAN],
6829 reg_names[GP_RETURN + TARGET_BIG_ENDIAN]);
6830 fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
6831 reg_names[GP_RETURN],
6832 reg_names[GP_RETURN],
6833 reg_names[GP_RETURN + 1]);
6834 }
6835 break;
6836
6837 case SFmode:
6838 mips_output_32bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
6839 break;
6840
6841 case DCmode:
6842 mips_output_64bit_xfer ('f', GP_RETURN + (8 / UNITS_PER_WORD),
6843 FP_REG_FIRST + MAX_FPRS_PER_FMT);
6844 /* Fall though. */
6845 case DFmode:
6846 case V2SFmode:
6847 mips_output_64bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
6848 break;
6849
6850 default:
6851 gcc_unreachable ();
6852 }
6853 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 18]);
6854 fprintf (asm_out_file, "\t.cfi_endproc\n");
6855 }
6856 else
6857 {
6858 /* Jump to the previously-loaded address. */
6859 output_asm_insn ("jr\t%^", NULL);
6860 }
6861
6862 #ifdef ASM_DECLARE_FUNCTION_SIZE
6863 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
6864 #endif
6865
6866 mips_end_function_definition (stubname);
6867
6868 /* Record this stub. */
6869 l = XNEW (struct mips16_stub);
6870 l->name = xstrdup (fnname);
6871 l->fp_ret_p = fp_ret_p;
6872 l->next = mips16_stubs;
6873 mips16_stubs = l;
6874 }
6875
6876 /* If we expect a floating-point return value, but we've built a
6877 stub which does not expect one, then we're in trouble. We can't
6878 use the existing stub, because it won't handle the floating-point
6879 value. We can't build a new stub, because the linker won't know
6880 which stub to use for the various calls in this object file.
6881 Fortunately, this case is illegal, since it means that a function
6882 was declared in two different ways in a single compilation. */
6883 if (fp_ret_p && !l->fp_ret_p)
6884 error ("cannot handle inconsistent calls to %qs", fnname);
6885
6886 if (retval == NULL_RTX)
6887 insn = gen_call_internal_direct (fn, args_size);
6888 else
6889 insn = gen_call_value_internal_direct (retval, fn, args_size);
6890 insn = mips_emit_call_insn (insn, fn, fn, false);
6891
6892 /* If we are calling a stub which handles a floating-point return
6893 value, we need to arrange to save $18 in the prologue. We do this
6894 by marking the function call as using the register. The prologue
6895 will later see that it is used, and emit code to save it. */
6896 if (fp_ret_p)
6897 CALL_INSN_FUNCTION_USAGE (insn) =
6898 gen_rtx_EXPR_LIST (VOIDmode,
6899 gen_rtx_CLOBBER (VOIDmode,
6900 gen_rtx_REG (word_mode, 18)),
6901 CALL_INSN_FUNCTION_USAGE (insn));
6902
6903 return insn;
6904 }
6905 \f
6906 /* Expand a call of type TYPE. RESULT is where the result will go (null
6907 for "call"s and "sibcall"s), ADDR is the address of the function,
6908 ARGS_SIZE is the size of the arguments and AUX is the value passed
6909 to us by mips_function_arg. LAZY_P is true if this call already
6910 involves a lazily-bound function address (such as when calling
6911 functions through a MIPS16 hard-float stub).
6912
6913 Return the call itself. */
6914
6915 rtx
6916 mips_expand_call (enum mips_call_type type, rtx result, rtx addr,
6917 rtx args_size, rtx aux, bool lazy_p)
6918 {
6919 rtx orig_addr, pattern, insn;
6920 int fp_code;
6921
6922 fp_code = aux == 0 ? 0 : (int) GET_MODE (aux);
6923 insn = mips16_build_call_stub (result, &addr, args_size, fp_code);
6924 if (insn)
6925 {
6926 gcc_assert (!lazy_p && type == MIPS_CALL_NORMAL);
6927 return insn;
6928 }
6929
6930 orig_addr = addr;
6931 if (!call_insn_operand (addr, VOIDmode))
6932 {
6933 if (type == MIPS_CALL_EPILOGUE)
6934 addr = MIPS_EPILOGUE_TEMP (Pmode);
6935 else
6936 addr = gen_reg_rtx (Pmode);
6937 lazy_p |= mips_load_call_address (type, addr, orig_addr);
6938 }
6939
6940 if (result == 0)
6941 {
6942 rtx (*fn) (rtx, rtx);
6943
6944 if (type == MIPS_CALL_SIBCALL)
6945 fn = gen_sibcall_internal;
6946 else
6947 fn = gen_call_internal;
6948
6949 pattern = fn (addr, args_size);
6950 }
6951 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
6952 {
6953 /* Handle return values created by mips_return_fpr_pair. */
6954 rtx (*fn) (rtx, rtx, rtx, rtx);
6955 rtx reg1, reg2;
6956
6957 if (type == MIPS_CALL_SIBCALL)
6958 fn = gen_sibcall_value_multiple_internal;
6959 else
6960 fn = gen_call_value_multiple_internal;
6961
6962 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
6963 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
6964 pattern = fn (reg1, addr, args_size, reg2);
6965 }
6966 else
6967 {
6968 rtx (*fn) (rtx, rtx, rtx);
6969
6970 if (type == MIPS_CALL_SIBCALL)
6971 fn = gen_sibcall_value_internal;
6972 else
6973 fn = gen_call_value_internal;
6974
6975 /* Handle return values created by mips_return_fpr_single. */
6976 if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
6977 result = XEXP (XVECEXP (result, 0, 0), 0);
6978 pattern = fn (result, addr, args_size);
6979 }
6980
6981 return mips_emit_call_insn (pattern, orig_addr, addr, lazy_p);
6982 }
6983
6984 /* Split call instruction INSN into a $gp-clobbering call and
6985 (where necessary) an instruction to restore $gp from its save slot.
6986 CALL_PATTERN is the pattern of the new call. */
6987
6988 void
6989 mips_split_call (rtx insn, rtx call_pattern)
6990 {
6991 emit_call_insn (call_pattern);
6992 if (!find_reg_note (insn, REG_NORETURN, 0))
6993 /* Pick a temporary register that is suitable for both MIPS16 and
6994 non-MIPS16 code. $4 and $5 are used for returning complex double
6995 values in soft-float code, so $6 is the first suitable candidate. */
6996 mips_restore_gp_from_cprestore_slot (gen_rtx_REG (Pmode, GP_ARG_FIRST + 2));
6997 }
6998
6999 /* Return true if a call to DECL may need to use JALX. */
7000
7001 static bool
7002 mips_call_may_need_jalx_p (tree decl)
7003 {
7004 /* If the current translation unit would use a different mode for DECL,
7005 assume that the call needs JALX. */
7006 if (mips_get_compress_mode (decl) != TARGET_COMPRESSION)
7007 return true;
7008
7009 /* mips_get_compress_mode is always accurate for locally-binding
7010 functions in the current translation unit. */
7011 if (!DECL_EXTERNAL (decl) && targetm.binds_local_p (decl))
7012 return false;
7013
7014 /* When -minterlink-compressed is in effect, assume that functions
7015 could use a different encoding mode unless an attribute explicitly
7016 tells us otherwise. */
7017 if (TARGET_INTERLINK_COMPRESSED)
7018 {
7019 if (!TARGET_COMPRESSION
7020 && mips_get_compress_off_flags (DECL_ATTRIBUTES (decl)) ==0)
7021 return true;
7022 if (TARGET_COMPRESSION
7023 && mips_get_compress_on_flags (DECL_ATTRIBUTES (decl)) == 0)
7024 return true;
7025 }
7026
7027 return false;
7028 }
7029
7030 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
7031
7032 static bool
7033 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7034 {
7035 if (!TARGET_SIBCALLS)
7036 return false;
7037
7038 /* Interrupt handlers need special epilogue code and therefore can't
7039 use sibcalls. */
7040 if (mips_interrupt_type_p (TREE_TYPE (current_function_decl)))
7041 return false;
7042
7043 /* Direct Js are only possible to functions that use the same ISA encoding.
7044 There is no JX counterpoart of JALX. */
7045 if (decl
7046 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode)
7047 && mips_call_may_need_jalx_p (decl))
7048 return false;
7049
7050 /* Sibling calls should not prevent lazy binding. Lazy-binding stubs
7051 require $gp to be valid on entry, so sibcalls can only use stubs
7052 if $gp is call-clobbered. */
7053 if (decl
7054 && TARGET_CALL_SAVED_GP
7055 && !TARGET_ABICALLS_PIC0
7056 && !targetm.binds_local_p (decl))
7057 return false;
7058
7059 /* Otherwise OK. */
7060 return true;
7061 }
7062 \f
7063 /* Emit code to move general operand SRC into condition-code
7064 register DEST given that SCRATCH is a scratch TFmode FPR.
7065 The sequence is:
7066
7067 FP1 = SRC
7068 FP2 = 0.0f
7069 DEST = FP2 < FP1
7070
7071 where FP1 and FP2 are single-precision FPRs taken from SCRATCH. */
7072
7073 void
7074 mips_expand_fcc_reload (rtx dest, rtx src, rtx scratch)
7075 {
7076 rtx fp1, fp2;
7077
7078 /* Change the source to SFmode. */
7079 if (MEM_P (src))
7080 src = adjust_address (src, SFmode, 0);
7081 else if (REG_P (src) || GET_CODE (src) == SUBREG)
7082 src = gen_rtx_REG (SFmode, true_regnum (src));
7083
7084 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
7085 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
7086
7087 mips_emit_move (copy_rtx (fp1), src);
7088 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
7089 emit_insn (gen_slt_sf (dest, fp2, fp1));
7090 }
7091 \f
7092 /* Implement MOVE_BY_PIECES_P. */
7093
7094 bool
7095 mips_move_by_pieces_p (unsigned HOST_WIDE_INT size, unsigned int align)
7096 {
7097 if (HAVE_movmemsi)
7098 {
7099 /* movmemsi is meant to generate code that is at least as good as
7100 move_by_pieces. However, movmemsi effectively uses a by-pieces
7101 implementation both for moves smaller than a word and for
7102 word-aligned moves of no more than MIPS_MAX_MOVE_BYTES_STRAIGHT
7103 bytes. We should allow the tree-level optimisers to do such
7104 moves by pieces, as it often exposes other optimization
7105 opportunities. We might as well continue to use movmemsi at
7106 the rtl level though, as it produces better code when
7107 scheduling is disabled (such as at -O). */
7108 if (currently_expanding_to_rtl)
7109 return false;
7110 if (align < BITS_PER_WORD)
7111 return size < UNITS_PER_WORD;
7112 return size <= MIPS_MAX_MOVE_BYTES_STRAIGHT;
7113 }
7114 /* The default value. If this becomes a target hook, we should
7115 call the default definition instead. */
7116 return (move_by_pieces_ninsns (size, align, MOVE_MAX_PIECES + 1)
7117 < (unsigned int) MOVE_RATIO (optimize_insn_for_speed_p ()));
7118 }
7119
7120 /* Implement STORE_BY_PIECES_P. */
7121
7122 bool
7123 mips_store_by_pieces_p (unsigned HOST_WIDE_INT size, unsigned int align)
7124 {
7125 /* Storing by pieces involves moving constants into registers
7126 of size MIN (ALIGN, BITS_PER_WORD), then storing them.
7127 We need to decide whether it is cheaper to load the address of
7128 constant data into a register and use a block move instead. */
7129
7130 /* If the data is only byte aligned, then:
7131
7132 (a1) A block move of less than 4 bytes would involve three 3 LBs and
7133 3 SBs. We might as well use 3 single-instruction LIs and 3 SBs
7134 instead.
7135
7136 (a2) A block move of 4 bytes from aligned source data can use an
7137 LW/SWL/SWR sequence. This is often better than the 4 LIs and
7138 4 SBs that we would generate when storing by pieces. */
7139 if (align <= BITS_PER_UNIT)
7140 return size < 4;
7141
7142 /* If the data is 2-byte aligned, then:
7143
7144 (b1) A block move of less than 4 bytes would use a combination of LBs,
7145 LHs, SBs and SHs. We get better code by using single-instruction
7146 LIs, SBs and SHs instead.
7147
7148 (b2) A block move of 4 bytes from aligned source data would again use
7149 an LW/SWL/SWR sequence. In most cases, loading the address of
7150 the source data would require at least one extra instruction.
7151 It is often more efficient to use 2 single-instruction LIs and
7152 2 SHs instead.
7153
7154 (b3) A block move of up to 3 additional bytes would be like (b1).
7155
7156 (b4) A block move of 8 bytes from aligned source data can use two
7157 LW/SWL/SWR sequences or a single LD/SDL/SDR sequence. Both
7158 sequences are better than the 4 LIs and 4 SHs that we'd generate
7159 when storing by pieces.
7160
7161 The reasoning for higher alignments is similar:
7162
7163 (c1) A block move of less than 4 bytes would be the same as (b1).
7164
7165 (c2) A block move of 4 bytes would use an LW/SW sequence. Again,
7166 loading the address of the source data would typically require
7167 at least one extra instruction. It is generally better to use
7168 LUI/ORI/SW instead.
7169
7170 (c3) A block move of up to 3 additional bytes would be like (b1).
7171
7172 (c4) A block move of 8 bytes can use two LW/SW sequences or a single
7173 LD/SD sequence, and in these cases we've traditionally preferred
7174 the memory copy over the more bulky constant moves. */
7175 return size < 8;
7176 }
7177
7178 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
7179 Assume that the areas do not overlap. */
7180
7181 static void
7182 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
7183 {
7184 HOST_WIDE_INT offset, delta;
7185 unsigned HOST_WIDE_INT bits;
7186 int i;
7187 enum machine_mode mode;
7188 rtx *regs;
7189
7190 /* Work out how many bits to move at a time. If both operands have
7191 half-word alignment, it is usually better to move in half words.
7192 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
7193 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
7194 Otherwise move word-sized chunks. */
7195 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
7196 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
7197 bits = BITS_PER_WORD / 2;
7198 else
7199 bits = BITS_PER_WORD;
7200
7201 mode = mode_for_size (bits, MODE_INT, 0);
7202 delta = bits / BITS_PER_UNIT;
7203
7204 /* Allocate a buffer for the temporary registers. */
7205 regs = XALLOCAVEC (rtx, length / delta);
7206
7207 /* Load as many BITS-sized chunks as possible. Use a normal load if
7208 the source has enough alignment, otherwise use left/right pairs. */
7209 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
7210 {
7211 regs[i] = gen_reg_rtx (mode);
7212 if (MEM_ALIGN (src) >= bits)
7213 mips_emit_move (regs[i], adjust_address (src, mode, offset));
7214 else
7215 {
7216 rtx part = adjust_address (src, BLKmode, offset);
7217 set_mem_size (part, delta);
7218 if (!mips_expand_ext_as_unaligned_load (regs[i], part, bits, 0, 0))
7219 gcc_unreachable ();
7220 }
7221 }
7222
7223 /* Copy the chunks to the destination. */
7224 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
7225 if (MEM_ALIGN (dest) >= bits)
7226 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
7227 else
7228 {
7229 rtx part = adjust_address (dest, BLKmode, offset);
7230 set_mem_size (part, delta);
7231 if (!mips_expand_ins_as_unaligned_store (part, regs[i], bits, 0))
7232 gcc_unreachable ();
7233 }
7234
7235 /* Mop up any left-over bytes. */
7236 if (offset < length)
7237 {
7238 src = adjust_address (src, BLKmode, offset);
7239 dest = adjust_address (dest, BLKmode, offset);
7240 move_by_pieces (dest, src, length - offset,
7241 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
7242 }
7243 }
7244
7245 /* Helper function for doing a loop-based block operation on memory
7246 reference MEM. Each iteration of the loop will operate on LENGTH
7247 bytes of MEM.
7248
7249 Create a new base register for use within the loop and point it to
7250 the start of MEM. Create a new memory reference that uses this
7251 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
7252
7253 static void
7254 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
7255 rtx *loop_reg, rtx *loop_mem)
7256 {
7257 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
7258
7259 /* Although the new mem does not refer to a known location,
7260 it does keep up to LENGTH bytes of alignment. */
7261 *loop_mem = change_address (mem, BLKmode, *loop_reg);
7262 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
7263 }
7264
7265 /* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
7266 bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
7267 the memory regions do not overlap. */
7268
7269 static void
7270 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
7271 HOST_WIDE_INT bytes_per_iter)
7272 {
7273 rtx label, src_reg, dest_reg, final_src, test;
7274 HOST_WIDE_INT leftover;
7275
7276 leftover = length % bytes_per_iter;
7277 length -= leftover;
7278
7279 /* Create registers and memory references for use within the loop. */
7280 mips_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
7281 mips_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
7282
7283 /* Calculate the value that SRC_REG should have after the last iteration
7284 of the loop. */
7285 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
7286 0, 0, OPTAB_WIDEN);
7287
7288 /* Emit the start of the loop. */
7289 label = gen_label_rtx ();
7290 emit_label (label);
7291
7292 /* Emit the loop body. */
7293 mips_block_move_straight (dest, src, bytes_per_iter);
7294
7295 /* Move on to the next block. */
7296 mips_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
7297 mips_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
7298
7299 /* Emit the loop condition. */
7300 test = gen_rtx_NE (VOIDmode, src_reg, final_src);
7301 if (Pmode == DImode)
7302 emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label));
7303 else
7304 emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
7305
7306 /* Mop up any left-over bytes. */
7307 if (leftover)
7308 mips_block_move_straight (dest, src, leftover);
7309 }
7310
7311 /* Expand a movmemsi instruction, which copies LENGTH bytes from
7312 memory reference SRC to memory reference DEST. */
7313
7314 bool
7315 mips_expand_block_move (rtx dest, rtx src, rtx length)
7316 {
7317 if (CONST_INT_P (length))
7318 {
7319 if (INTVAL (length) <= MIPS_MAX_MOVE_BYTES_STRAIGHT)
7320 {
7321 mips_block_move_straight (dest, src, INTVAL (length));
7322 return true;
7323 }
7324 else if (optimize)
7325 {
7326 mips_block_move_loop (dest, src, INTVAL (length),
7327 MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER);
7328 return true;
7329 }
7330 }
7331 return false;
7332 }
7333 \f
7334 /* Expand a loop of synci insns for the address range [BEGIN, END). */
7335
7336 void
7337 mips_expand_synci_loop (rtx begin, rtx end)
7338 {
7339 rtx inc, label, end_label, cmp_result, mask, length;
7340
7341 /* Create end_label. */
7342 end_label = gen_label_rtx ();
7343
7344 /* Check if begin equals end. */
7345 cmp_result = gen_rtx_EQ (VOIDmode, begin, end);
7346 emit_jump_insn (gen_condjump (cmp_result, end_label));
7347
7348 /* Load INC with the cache line size (rdhwr INC,$1). */
7349 inc = gen_reg_rtx (Pmode);
7350 emit_insn (PMODE_INSN (gen_rdhwr_synci_step, (inc)));
7351
7352 /* Check if inc is 0. */
7353 cmp_result = gen_rtx_EQ (VOIDmode, inc, const0_rtx);
7354 emit_jump_insn (gen_condjump (cmp_result, end_label));
7355
7356 /* Calculate mask. */
7357 mask = mips_force_unary (Pmode, NEG, inc);
7358
7359 /* Mask out begin by mask. */
7360 begin = mips_force_binary (Pmode, AND, begin, mask);
7361
7362 /* Calculate length. */
7363 length = mips_force_binary (Pmode, MINUS, end, begin);
7364
7365 /* Loop back to here. */
7366 label = gen_label_rtx ();
7367 emit_label (label);
7368
7369 emit_insn (gen_synci (begin));
7370
7371 /* Update length. */
7372 mips_emit_binary (MINUS, length, length, inc);
7373
7374 /* Update begin. */
7375 mips_emit_binary (PLUS, begin, begin, inc);
7376
7377 /* Check if length is greater than 0. */
7378 cmp_result = gen_rtx_GT (VOIDmode, length, const0_rtx);
7379 emit_jump_insn (gen_condjump (cmp_result, label));
7380
7381 emit_label (end_label);
7382 }
7383 \f
7384 /* Expand a QI or HI mode atomic memory operation.
7385
7386 GENERATOR contains a pointer to the gen_* function that generates
7387 the SI mode underlying atomic operation using masks that we
7388 calculate.
7389
7390 RESULT is the return register for the operation. Its value is NULL
7391 if unused.
7392
7393 MEM is the location of the atomic access.
7394
7395 OLDVAL is the first operand for the operation.
7396
7397 NEWVAL is the optional second operand for the operation. Its value
7398 is NULL if unused. */
7399
7400 void
7401 mips_expand_atomic_qihi (union mips_gen_fn_ptrs generator,
7402 rtx result, rtx mem, rtx oldval, rtx newval)
7403 {
7404 rtx orig_addr, memsi_addr, memsi, shift, shiftsi, unshifted_mask;
7405 rtx unshifted_mask_reg, mask, inverted_mask, si_op;
7406 rtx res = NULL;
7407 enum machine_mode mode;
7408
7409 mode = GET_MODE (mem);
7410
7411 /* Compute the address of the containing SImode value. */
7412 orig_addr = force_reg (Pmode, XEXP (mem, 0));
7413 memsi_addr = mips_force_binary (Pmode, AND, orig_addr,
7414 force_reg (Pmode, GEN_INT (-4)));
7415
7416 /* Create a memory reference for it. */
7417 memsi = gen_rtx_MEM (SImode, memsi_addr);
7418 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
7419 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
7420
7421 /* Work out the byte offset of the QImode or HImode value,
7422 counting from the least significant byte. */
7423 shift = mips_force_binary (Pmode, AND, orig_addr, GEN_INT (3));
7424 if (TARGET_BIG_ENDIAN)
7425 mips_emit_binary (XOR, shift, shift, GEN_INT (mode == QImode ? 3 : 2));
7426
7427 /* Multiply by eight to convert the shift value from bytes to bits. */
7428 mips_emit_binary (ASHIFT, shift, shift, GEN_INT (3));
7429
7430 /* Make the final shift an SImode value, so that it can be used in
7431 SImode operations. */
7432 shiftsi = force_reg (SImode, gen_lowpart (SImode, shift));
7433
7434 /* Set MASK to an inclusive mask of the QImode or HImode value. */
7435 unshifted_mask = GEN_INT (GET_MODE_MASK (mode));
7436 unshifted_mask_reg = force_reg (SImode, unshifted_mask);
7437 mask = mips_force_binary (SImode, ASHIFT, unshifted_mask_reg, shiftsi);
7438
7439 /* Compute the equivalent exclusive mask. */
7440 inverted_mask = gen_reg_rtx (SImode);
7441 emit_insn (gen_rtx_SET (VOIDmode, inverted_mask,
7442 gen_rtx_NOT (SImode, mask)));
7443
7444 /* Shift the old value into place. */
7445 if (oldval != const0_rtx)
7446 {
7447 oldval = convert_modes (SImode, mode, oldval, true);
7448 oldval = force_reg (SImode, oldval);
7449 oldval = mips_force_binary (SImode, ASHIFT, oldval, shiftsi);
7450 }
7451
7452 /* Do the same for the new value. */
7453 if (newval && newval != const0_rtx)
7454 {
7455 newval = convert_modes (SImode, mode, newval, true);
7456 newval = force_reg (SImode, newval);
7457 newval = mips_force_binary (SImode, ASHIFT, newval, shiftsi);
7458 }
7459
7460 /* Do the SImode atomic access. */
7461 if (result)
7462 res = gen_reg_rtx (SImode);
7463 if (newval)
7464 si_op = generator.fn_6 (res, memsi, mask, inverted_mask, oldval, newval);
7465 else if (result)
7466 si_op = generator.fn_5 (res, memsi, mask, inverted_mask, oldval);
7467 else
7468 si_op = generator.fn_4 (memsi, mask, inverted_mask, oldval);
7469
7470 emit_insn (si_op);
7471
7472 if (result)
7473 {
7474 /* Shift and convert the result. */
7475 mips_emit_binary (AND, res, res, mask);
7476 mips_emit_binary (LSHIFTRT, res, res, shiftsi);
7477 mips_emit_move (result, gen_lowpart (GET_MODE (result), res));
7478 }
7479 }
7480
7481 /* Return true if it is possible to use left/right accesses for a
7482 bitfield of WIDTH bits starting BITPOS bits into BLKmode memory OP.
7483 When returning true, update *LEFT and *RIGHT as follows:
7484
7485 *LEFT is a QImode reference to the first byte if big endian or
7486 the last byte if little endian. This address can be used in the
7487 left-side instructions (LWL, SWL, LDL, SDL).
7488
7489 *RIGHT is a QImode reference to the opposite end of the field and
7490 can be used in the patterning right-side instruction. */
7491
7492 static bool
7493 mips_get_unaligned_mem (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos,
7494 rtx *left, rtx *right)
7495 {
7496 rtx first, last;
7497
7498 /* Check that the size is valid. */
7499 if (width != 32 && (!TARGET_64BIT || width != 64))
7500 return false;
7501
7502 /* We can only access byte-aligned values. Since we are always passed
7503 a reference to the first byte of the field, it is not necessary to
7504 do anything with BITPOS after this check. */
7505 if (bitpos % BITS_PER_UNIT != 0)
7506 return false;
7507
7508 /* Reject aligned bitfields: we want to use a normal load or store
7509 instead of a left/right pair. */
7510 if (MEM_ALIGN (op) >= width)
7511 return false;
7512
7513 /* Get references to both ends of the field. */
7514 first = adjust_address (op, QImode, 0);
7515 last = adjust_address (op, QImode, width / BITS_PER_UNIT - 1);
7516
7517 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
7518 correspond to the MSB and RIGHT to the LSB. */
7519 if (TARGET_BIG_ENDIAN)
7520 *left = first, *right = last;
7521 else
7522 *left = last, *right = first;
7523
7524 return true;
7525 }
7526
7527 /* Try to use left/right loads to expand an "extv" or "extzv" pattern.
7528 DEST, SRC, WIDTH and BITPOS are the operands passed to the expander;
7529 the operation is the equivalent of:
7530
7531 (set DEST (*_extract SRC WIDTH BITPOS))
7532
7533 Return true on success. */
7534
7535 bool
7536 mips_expand_ext_as_unaligned_load (rtx dest, rtx src, HOST_WIDE_INT width,
7537 HOST_WIDE_INT bitpos, bool unsigned_p)
7538 {
7539 rtx left, right, temp;
7540 rtx dest1 = NULL_RTX;
7541
7542 /* If TARGET_64BIT, the destination of a 32-bit "extz" or "extzv" will
7543 be a DImode, create a new temp and emit a zero extend at the end. */
7544 if (GET_MODE (dest) == DImode
7545 && REG_P (dest)
7546 && GET_MODE_BITSIZE (SImode) == width)
7547 {
7548 dest1 = dest;
7549 dest = gen_reg_rtx (SImode);
7550 }
7551
7552 if (!mips_get_unaligned_mem (src, width, bitpos, &left, &right))
7553 return false;
7554
7555 temp = gen_reg_rtx (GET_MODE (dest));
7556 if (GET_MODE (dest) == DImode)
7557 {
7558 emit_insn (gen_mov_ldl (temp, src, left));
7559 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
7560 }
7561 else
7562 {
7563 emit_insn (gen_mov_lwl (temp, src, left));
7564 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
7565 }
7566
7567 /* If we were loading 32bits and the original register was DI then
7568 sign/zero extend into the orignal dest. */
7569 if (dest1)
7570 {
7571 if (unsigned_p)
7572 emit_insn (gen_zero_extendsidi2 (dest1, dest));
7573 else
7574 emit_insn (gen_extendsidi2 (dest1, dest));
7575 }
7576 return true;
7577 }
7578
7579 /* Try to use left/right stores to expand an "ins" pattern. DEST, WIDTH,
7580 BITPOS and SRC are the operands passed to the expander; the operation
7581 is the equivalent of:
7582
7583 (set (zero_extract DEST WIDTH BITPOS) SRC)
7584
7585 Return true on success. */
7586
7587 bool
7588 mips_expand_ins_as_unaligned_store (rtx dest, rtx src, HOST_WIDE_INT width,
7589 HOST_WIDE_INT bitpos)
7590 {
7591 rtx left, right;
7592 enum machine_mode mode;
7593
7594 if (!mips_get_unaligned_mem (dest, width, bitpos, &left, &right))
7595 return false;
7596
7597 mode = mode_for_size (width, MODE_INT, 0);
7598 src = gen_lowpart (mode, src);
7599 if (mode == DImode)
7600 {
7601 emit_insn (gen_mov_sdl (dest, src, left));
7602 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
7603 }
7604 else
7605 {
7606 emit_insn (gen_mov_swl (dest, src, left));
7607 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
7608 }
7609 return true;
7610 }
7611
7612 /* Return true if X is a MEM with the same size as MODE. */
7613
7614 bool
7615 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
7616 {
7617 return (MEM_P (x)
7618 && MEM_SIZE_KNOWN_P (x)
7619 && MEM_SIZE (x) == GET_MODE_SIZE (mode));
7620 }
7621
7622 /* Return true if (zero_extract OP WIDTH BITPOS) can be used as the
7623 source of an "ext" instruction or the destination of an "ins"
7624 instruction. OP must be a register operand and the following
7625 conditions must hold:
7626
7627 0 <= BITPOS < GET_MODE_BITSIZE (GET_MODE (op))
7628 0 < WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
7629 0 < BITPOS + WIDTH <= GET_MODE_BITSIZE (GET_MODE (op))
7630
7631 Also reject lengths equal to a word as they are better handled
7632 by the move patterns. */
7633
7634 bool
7635 mips_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos)
7636 {
7637 if (!ISA_HAS_EXT_INS
7638 || !register_operand (op, VOIDmode)
7639 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
7640 return false;
7641
7642 if (!IN_RANGE (width, 1, GET_MODE_BITSIZE (GET_MODE (op)) - 1))
7643 return false;
7644
7645 if (bitpos < 0 || bitpos + width > GET_MODE_BITSIZE (GET_MODE (op)))
7646 return false;
7647
7648 return true;
7649 }
7650
7651 /* Check if MASK and SHIFT are valid in mask-low-and-shift-left
7652 operation if MAXLEN is the maxium length of consecutive bits that
7653 can make up MASK. MODE is the mode of the operation. See
7654 mask_low_and_shift_len for the actual definition. */
7655
7656 bool
7657 mask_low_and_shift_p (enum machine_mode mode, rtx mask, rtx shift, int maxlen)
7658 {
7659 return IN_RANGE (mask_low_and_shift_len (mode, mask, shift), 1, maxlen);
7660 }
7661
7662 /* Return true iff OP1 and OP2 are valid operands together for the
7663 *and<MODE>3 and *and<MODE>3_mips16 patterns. For the cases to consider,
7664 see the table in the comment before the pattern. */
7665
7666 bool
7667 and_operands_ok (enum machine_mode mode, rtx op1, rtx op2)
7668 {
7669 return (memory_operand (op1, mode)
7670 ? and_load_operand (op2, mode)
7671 : and_reg_operand (op2, mode));
7672 }
7673
7674 /* The canonical form of a mask-low-and-shift-left operation is
7675 (and (ashift X SHIFT) MASK) where MASK has the lower SHIFT number of bits
7676 cleared. Thus we need to shift MASK to the right before checking if it
7677 is a valid mask value. MODE is the mode of the operation. If true
7678 return the length of the mask, otherwise return -1. */
7679
7680 int
7681 mask_low_and_shift_len (enum machine_mode mode, rtx mask, rtx shift)
7682 {
7683 HOST_WIDE_INT shval;
7684
7685 shval = INTVAL (shift) & (GET_MODE_BITSIZE (mode) - 1);
7686 return exact_log2 ((UINTVAL (mask) >> shval) + 1);
7687 }
7688 \f
7689 /* Return true if -msplit-addresses is selected and should be honored.
7690
7691 -msplit-addresses is a half-way house between explicit relocations
7692 and the traditional assembler macros. It can split absolute 32-bit
7693 symbolic constants into a high/lo_sum pair but uses macros for other
7694 sorts of access.
7695
7696 Like explicit relocation support for REL targets, it relies
7697 on GNU extensions in the assembler and the linker.
7698
7699 Although this code should work for -O0, it has traditionally
7700 been treated as an optimization. */
7701
7702 static bool
7703 mips_split_addresses_p (void)
7704 {
7705 return (TARGET_SPLIT_ADDRESSES
7706 && optimize
7707 && !TARGET_MIPS16
7708 && !flag_pic
7709 && !ABI_HAS_64BIT_SYMBOLS);
7710 }
7711
7712 /* (Re-)Initialize mips_split_p, mips_lo_relocs and mips_hi_relocs. */
7713
7714 static void
7715 mips_init_relocs (void)
7716 {
7717 memset (mips_split_p, '\0', sizeof (mips_split_p));
7718 memset (mips_split_hi_p, '\0', sizeof (mips_split_hi_p));
7719 memset (mips_use_pcrel_pool_p, '\0', sizeof (mips_use_pcrel_pool_p));
7720 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
7721 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
7722
7723 if (TARGET_MIPS16_PCREL_LOADS)
7724 mips_use_pcrel_pool_p[SYMBOL_ABSOLUTE] = true;
7725 else
7726 {
7727 if (ABI_HAS_64BIT_SYMBOLS)
7728 {
7729 if (TARGET_EXPLICIT_RELOCS)
7730 {
7731 mips_split_p[SYMBOL_64_HIGH] = true;
7732 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
7733 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
7734
7735 mips_split_p[SYMBOL_64_MID] = true;
7736 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
7737 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
7738
7739 mips_split_p[SYMBOL_64_LOW] = true;
7740 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
7741 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
7742
7743 mips_split_p[SYMBOL_ABSOLUTE] = true;
7744 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
7745 }
7746 }
7747 else
7748 {
7749 if (TARGET_EXPLICIT_RELOCS
7750 || mips_split_addresses_p ()
7751 || TARGET_MIPS16)
7752 {
7753 mips_split_p[SYMBOL_ABSOLUTE] = true;
7754 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
7755 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
7756 }
7757 }
7758 }
7759
7760 if (TARGET_MIPS16)
7761 {
7762 /* The high part is provided by a pseudo copy of $gp. */
7763 mips_split_p[SYMBOL_GP_RELATIVE] = true;
7764 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
7765 }
7766 else if (TARGET_EXPLICIT_RELOCS)
7767 /* Small data constants are kept whole until after reload,
7768 then lowered by mips_rewrite_small_data. */
7769 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
7770
7771 if (TARGET_EXPLICIT_RELOCS)
7772 {
7773 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
7774 if (TARGET_NEWABI)
7775 {
7776 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
7777 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
7778 }
7779 else
7780 {
7781 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
7782 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
7783 }
7784 if (TARGET_MIPS16)
7785 /* Expose the use of $28 as soon as possible. */
7786 mips_split_hi_p[SYMBOL_GOT_PAGE_OFST] = true;
7787
7788 if (TARGET_XGOT)
7789 {
7790 /* The HIGH and LO_SUM are matched by special .md patterns. */
7791 mips_split_p[SYMBOL_GOT_DISP] = true;
7792
7793 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
7794 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
7795 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
7796
7797 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
7798 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
7799 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
7800 }
7801 else
7802 {
7803 if (TARGET_NEWABI)
7804 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
7805 else
7806 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
7807 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
7808 if (TARGET_MIPS16)
7809 /* Expose the use of $28 as soon as possible. */
7810 mips_split_p[SYMBOL_GOT_DISP] = true;
7811 }
7812 }
7813
7814 if (TARGET_NEWABI)
7815 {
7816 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
7817 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
7818 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
7819 }
7820
7821 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
7822 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
7823
7824 if (TARGET_MIPS16_PCREL_LOADS)
7825 {
7826 mips_use_pcrel_pool_p[SYMBOL_DTPREL] = true;
7827 mips_use_pcrel_pool_p[SYMBOL_TPREL] = true;
7828 }
7829 else
7830 {
7831 mips_split_p[SYMBOL_DTPREL] = true;
7832 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
7833 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
7834
7835 mips_split_p[SYMBOL_TPREL] = true;
7836 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
7837 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
7838 }
7839
7840 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
7841 mips_lo_relocs[SYMBOL_HALF] = "%half(";
7842 }
7843
7844 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
7845 in context CONTEXT. RELOCS is the array of relocations to use. */
7846
7847 static void
7848 mips_print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
7849 const char **relocs)
7850 {
7851 enum mips_symbol_type symbol_type;
7852 const char *p;
7853
7854 symbol_type = mips_classify_symbolic_expression (op, context);
7855 gcc_assert (relocs[symbol_type]);
7856
7857 fputs (relocs[symbol_type], file);
7858 output_addr_const (file, mips_strip_unspec_address (op));
7859 for (p = relocs[symbol_type]; *p != 0; p++)
7860 if (*p == '(')
7861 fputc (')', file);
7862 }
7863
7864 /* Start a new block with the given asm switch enabled. If we need
7865 to print a directive, emit PREFIX before it and SUFFIX after it. */
7866
7867 static void
7868 mips_push_asm_switch_1 (struct mips_asm_switch *asm_switch,
7869 const char *prefix, const char *suffix)
7870 {
7871 if (asm_switch->nesting_level == 0)
7872 fprintf (asm_out_file, "%s.set\tno%s%s", prefix, asm_switch->name, suffix);
7873 asm_switch->nesting_level++;
7874 }
7875
7876 /* Likewise, but end a block. */
7877
7878 static void
7879 mips_pop_asm_switch_1 (struct mips_asm_switch *asm_switch,
7880 const char *prefix, const char *suffix)
7881 {
7882 gcc_assert (asm_switch->nesting_level);
7883 asm_switch->nesting_level--;
7884 if (asm_switch->nesting_level == 0)
7885 fprintf (asm_out_file, "%s.set\t%s%s", prefix, asm_switch->name, suffix);
7886 }
7887
7888 /* Wrappers around mips_push_asm_switch_1 and mips_pop_asm_switch_1
7889 that either print a complete line or print nothing. */
7890
7891 void
7892 mips_push_asm_switch (struct mips_asm_switch *asm_switch)
7893 {
7894 mips_push_asm_switch_1 (asm_switch, "\t", "\n");
7895 }
7896
7897 void
7898 mips_pop_asm_switch (struct mips_asm_switch *asm_switch)
7899 {
7900 mips_pop_asm_switch_1 (asm_switch, "\t", "\n");
7901 }
7902
7903 /* Print the text for PRINT_OPERAND punctation character CH to FILE.
7904 The punctuation characters are:
7905
7906 '(' Start a nested ".set noreorder" block.
7907 ')' End a nested ".set noreorder" block.
7908 '[' Start a nested ".set noat" block.
7909 ']' End a nested ".set noat" block.
7910 '<' Start a nested ".set nomacro" block.
7911 '>' End a nested ".set nomacro" block.
7912 '*' Behave like %(%< if generating a delayed-branch sequence.
7913 '#' Print a nop if in a ".set noreorder" block.
7914 '/' Like '#', but do nothing within a delayed-branch sequence.
7915 '?' Print "l" if mips_branch_likely is true
7916 '~' Print a nop if mips_branch_likely is true
7917 '.' Print the name of the register with a hard-wired zero (zero or $0).
7918 '@' Print the name of the assembler temporary register (at or $1).
7919 '^' Print the name of the pic call-through register (t9 or $25).
7920 '+' Print the name of the gp register (usually gp or $28).
7921 '$' Print the name of the stack pointer register (sp or $29).
7922 ':' Print "c" to use the compact version if the delay slot is a nop.
7923 '!' Print "s" to use the short version if the delay slot contains a
7924 16-bit instruction.
7925
7926 See also mips_init_print_operand_pucnt. */
7927
7928 static void
7929 mips_print_operand_punctuation (FILE *file, int ch)
7930 {
7931 switch (ch)
7932 {
7933 case '(':
7934 mips_push_asm_switch_1 (&mips_noreorder, "", "\n\t");
7935 break;
7936
7937 case ')':
7938 mips_pop_asm_switch_1 (&mips_noreorder, "\n\t", "");
7939 break;
7940
7941 case '[':
7942 mips_push_asm_switch_1 (&mips_noat, "", "\n\t");
7943 break;
7944
7945 case ']':
7946 mips_pop_asm_switch_1 (&mips_noat, "\n\t", "");
7947 break;
7948
7949 case '<':
7950 mips_push_asm_switch_1 (&mips_nomacro, "", "\n\t");
7951 break;
7952
7953 case '>':
7954 mips_pop_asm_switch_1 (&mips_nomacro, "\n\t", "");
7955 break;
7956
7957 case '*':
7958 if (final_sequence != 0)
7959 {
7960 mips_print_operand_punctuation (file, '(');
7961 mips_print_operand_punctuation (file, '<');
7962 }
7963 break;
7964
7965 case '#':
7966 if (mips_noreorder.nesting_level > 0)
7967 fputs ("\n\tnop", file);
7968 break;
7969
7970 case '/':
7971 /* Print an extra newline so that the delayed insn is separated
7972 from the following ones. This looks neater and is consistent
7973 with non-nop delayed sequences. */
7974 if (mips_noreorder.nesting_level > 0 && final_sequence == 0)
7975 fputs ("\n\tnop\n", file);
7976 break;
7977
7978 case '?':
7979 if (mips_branch_likely)
7980 putc ('l', file);
7981 break;
7982
7983 case '~':
7984 if (mips_branch_likely)
7985 fputs ("\n\tnop", file);
7986 break;
7987
7988 case '.':
7989 fputs (reg_names[GP_REG_FIRST + 0], file);
7990 break;
7991
7992 case '@':
7993 fputs (reg_names[AT_REGNUM], file);
7994 break;
7995
7996 case '^':
7997 fputs (reg_names[PIC_FUNCTION_ADDR_REGNUM], file);
7998 break;
7999
8000 case '+':
8001 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
8002 break;
8003
8004 case '$':
8005 fputs (reg_names[STACK_POINTER_REGNUM], file);
8006 break;
8007
8008 case ':':
8009 /* When final_sequence is 0, the delay slot will be a nop. We can
8010 use the compact version for microMIPS. */
8011 if (final_sequence == 0)
8012 putc ('c', file);
8013 break;
8014
8015 case '!':
8016 /* If the delay slot instruction is short, then use the
8017 compact version. */
8018 if (final_sequence == 0
8019 || get_attr_length (XVECEXP (final_sequence, 0, 1)) == 2)
8020 putc ('s', file);
8021 break;
8022
8023 default:
8024 gcc_unreachable ();
8025 break;
8026 }
8027 }
8028
8029 /* Initialize mips_print_operand_punct. */
8030
8031 static void
8032 mips_init_print_operand_punct (void)
8033 {
8034 const char *p;
8035
8036 for (p = "()[]<>*#/?~.@^+$:!"; *p; p++)
8037 mips_print_operand_punct[(unsigned char) *p] = true;
8038 }
8039
8040 /* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
8041 associated with condition CODE. Print the condition part of the
8042 opcode to FILE. */
8043
8044 static void
8045 mips_print_int_branch_condition (FILE *file, enum rtx_code code, int letter)
8046 {
8047 switch (code)
8048 {
8049 case EQ:
8050 case NE:
8051 case GT:
8052 case GE:
8053 case LT:
8054 case LE:
8055 case GTU:
8056 case GEU:
8057 case LTU:
8058 case LEU:
8059 /* Conveniently, the MIPS names for these conditions are the same
8060 as their RTL equivalents. */
8061 fputs (GET_RTX_NAME (code), file);
8062 break;
8063
8064 default:
8065 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
8066 break;
8067 }
8068 }
8069
8070 /* Likewise floating-point branches. */
8071
8072 static void
8073 mips_print_float_branch_condition (FILE *file, enum rtx_code code, int letter)
8074 {
8075 switch (code)
8076 {
8077 case EQ:
8078 fputs ("c1f", file);
8079 break;
8080
8081 case NE:
8082 fputs ("c1t", file);
8083 break;
8084
8085 default:
8086 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
8087 break;
8088 }
8089 }
8090
8091 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
8092
8093 static bool
8094 mips_print_operand_punct_valid_p (unsigned char code)
8095 {
8096 return mips_print_operand_punct[code];
8097 }
8098
8099 /* Implement TARGET_PRINT_OPERAND. The MIPS-specific operand codes are:
8100
8101 'X' Print CONST_INT OP in hexadecimal format.
8102 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format.
8103 'd' Print CONST_INT OP in decimal.
8104 'm' Print one less than CONST_INT OP in decimal.
8105 'h' Print the high-part relocation associated with OP, after stripping
8106 any outermost HIGH.
8107 'R' Print the low-part relocation associated with OP.
8108 'C' Print the integer branch condition for comparison OP.
8109 'N' Print the inverse of the integer branch condition for comparison OP.
8110 'F' Print the FPU branch condition for comparison OP.
8111 'W' Print the inverse of the FPU branch condition for comparison OP.
8112 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
8113 'z' for (eq:?I ...), 'n' for (ne:?I ...).
8114 't' Like 'T', but with the EQ/NE cases reversed
8115 'Y' Print mips_fp_conditions[INTVAL (OP)]
8116 'Z' Print OP and a comma for ISA_HAS_8CC, otherwise print nothing.
8117 'q' Print a DSP accumulator register.
8118 'D' Print the second part of a double-word register or memory operand.
8119 'L' Print the low-order register in a double-word register operand.
8120 'M' Print high-order register in a double-word register operand.
8121 'z' Print $0 if OP is zero, otherwise print OP normally.
8122 'b' Print the address of a memory operand, without offset. */
8123
8124 static void
8125 mips_print_operand (FILE *file, rtx op, int letter)
8126 {
8127 enum rtx_code code;
8128
8129 if (mips_print_operand_punct_valid_p (letter))
8130 {
8131 mips_print_operand_punctuation (file, letter);
8132 return;
8133 }
8134
8135 gcc_assert (op);
8136 code = GET_CODE (op);
8137
8138 switch (letter)
8139 {
8140 case 'X':
8141 if (CONST_INT_P (op))
8142 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
8143 else
8144 output_operand_lossage ("invalid use of '%%%c'", letter);
8145 break;
8146
8147 case 'x':
8148 if (CONST_INT_P (op))
8149 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff);
8150 else
8151 output_operand_lossage ("invalid use of '%%%c'", letter);
8152 break;
8153
8154 case 'd':
8155 if (CONST_INT_P (op))
8156 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
8157 else
8158 output_operand_lossage ("invalid use of '%%%c'", letter);
8159 break;
8160
8161 case 'm':
8162 if (CONST_INT_P (op))
8163 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op) - 1);
8164 else
8165 output_operand_lossage ("invalid use of '%%%c'", letter);
8166 break;
8167
8168 case 'h':
8169 if (code == HIGH)
8170 op = XEXP (op, 0);
8171 mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
8172 break;
8173
8174 case 'R':
8175 mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
8176 break;
8177
8178 case 'C':
8179 mips_print_int_branch_condition (file, code, letter);
8180 break;
8181
8182 case 'N':
8183 mips_print_int_branch_condition (file, reverse_condition (code), letter);
8184 break;
8185
8186 case 'F':
8187 mips_print_float_branch_condition (file, code, letter);
8188 break;
8189
8190 case 'W':
8191 mips_print_float_branch_condition (file, reverse_condition (code),
8192 letter);
8193 break;
8194
8195 case 'T':
8196 case 't':
8197 {
8198 int truth = (code == NE) == (letter == 'T');
8199 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
8200 }
8201 break;
8202
8203 case 'Y':
8204 if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (mips_fp_conditions))
8205 fputs (mips_fp_conditions[UINTVAL (op)], file);
8206 else
8207 output_operand_lossage ("'%%%c' is not a valid operand prefix",
8208 letter);
8209 break;
8210
8211 case 'Z':
8212 if (ISA_HAS_8CC)
8213 {
8214 mips_print_operand (file, op, 0);
8215 fputc (',', file);
8216 }
8217 break;
8218
8219 case 'q':
8220 if (code == REG && MD_REG_P (REGNO (op)))
8221 fprintf (file, "$ac0");
8222 else if (code == REG && DSP_ACC_REG_P (REGNO (op)))
8223 fprintf (file, "$ac%c", reg_names[REGNO (op)][3]);
8224 else
8225 output_operand_lossage ("invalid use of '%%%c'", letter);
8226 break;
8227
8228 default:
8229 switch (code)
8230 {
8231 case REG:
8232 {
8233 unsigned int regno = REGNO (op);
8234 if ((letter == 'M' && TARGET_LITTLE_ENDIAN)
8235 || (letter == 'L' && TARGET_BIG_ENDIAN)
8236 || letter == 'D')
8237 regno++;
8238 else if (letter && letter != 'z' && letter != 'M' && letter != 'L')
8239 output_operand_lossage ("invalid use of '%%%c'", letter);
8240 /* We need to print $0 .. $31 for COP0 registers. */
8241 if (COP0_REG_P (regno))
8242 fprintf (file, "$%s", &reg_names[regno][4]);
8243 else
8244 fprintf (file, "%s", reg_names[regno]);
8245 }
8246 break;
8247
8248 case MEM:
8249 if (letter == 'D')
8250 output_address (plus_constant (Pmode, XEXP (op, 0), 4));
8251 else if (letter == 'b')
8252 {
8253 gcc_assert (REG_P (XEXP (op, 0)));
8254 mips_print_operand (file, XEXP (op, 0), 0);
8255 }
8256 else if (letter && letter != 'z')
8257 output_operand_lossage ("invalid use of '%%%c'", letter);
8258 else
8259 output_address (XEXP (op, 0));
8260 break;
8261
8262 default:
8263 if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
8264 fputs (reg_names[GP_REG_FIRST], file);
8265 else if (letter && letter != 'z')
8266 output_operand_lossage ("invalid use of '%%%c'", letter);
8267 else if (CONST_GP_P (op))
8268 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
8269 else
8270 output_addr_const (file, mips_strip_unspec_address (op));
8271 break;
8272 }
8273 }
8274 }
8275
8276 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
8277
8278 static void
8279 mips_print_operand_address (FILE *file, rtx x)
8280 {
8281 struct mips_address_info addr;
8282
8283 if (mips_classify_address (&addr, x, word_mode, true))
8284 switch (addr.type)
8285 {
8286 case ADDRESS_REG:
8287 mips_print_operand (file, addr.offset, 0);
8288 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
8289 return;
8290
8291 case ADDRESS_LO_SUM:
8292 mips_print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
8293 mips_lo_relocs);
8294 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
8295 return;
8296
8297 case ADDRESS_CONST_INT:
8298 output_addr_const (file, x);
8299 fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
8300 return;
8301
8302 case ADDRESS_SYMBOLIC:
8303 output_addr_const (file, mips_strip_unspec_address (x));
8304 return;
8305 }
8306 gcc_unreachable ();
8307 }
8308 \f
8309 /* Implement TARGET_ENCODE_SECTION_INFO. */
8310
8311 static void
8312 mips_encode_section_info (tree decl, rtx rtl, int first)
8313 {
8314 default_encode_section_info (decl, rtl, first);
8315
8316 if (TREE_CODE (decl) == FUNCTION_DECL)
8317 {
8318 rtx symbol = XEXP (rtl, 0);
8319 tree type = TREE_TYPE (decl);
8320
8321 /* Encode whether the symbol is short or long. */
8322 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
8323 || mips_far_type_p (type))
8324 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
8325 }
8326 }
8327
8328 /* Implement TARGET_SELECT_RTX_SECTION. */
8329
8330 static section *
8331 mips_select_rtx_section (enum machine_mode mode, rtx x,
8332 unsigned HOST_WIDE_INT align)
8333 {
8334 /* ??? Consider using mergeable small data sections. */
8335 if (mips_rtx_constant_in_small_data_p (mode))
8336 return get_named_section (NULL, ".sdata", 0);
8337
8338 return default_elf_select_rtx_section (mode, x, align);
8339 }
8340
8341 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
8342
8343 The complication here is that, with the combination TARGET_ABICALLS
8344 && !TARGET_ABSOLUTE_ABICALLS && !TARGET_GPWORD, jump tables will use
8345 absolute addresses, and should therefore not be included in the
8346 read-only part of a DSO. Handle such cases by selecting a normal
8347 data section instead of a read-only one. The logic apes that in
8348 default_function_rodata_section. */
8349
8350 static section *
8351 mips_function_rodata_section (tree decl)
8352 {
8353 if (!TARGET_ABICALLS || TARGET_ABSOLUTE_ABICALLS || TARGET_GPWORD)
8354 return default_function_rodata_section (decl);
8355
8356 if (decl && DECL_SECTION_NAME (decl))
8357 {
8358 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8359 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
8360 {
8361 char *rname = ASTRDUP (name);
8362 rname[14] = 'd';
8363 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
8364 }
8365 else if (flag_function_sections
8366 && flag_data_sections
8367 && strncmp (name, ".text.", 6) == 0)
8368 {
8369 char *rname = ASTRDUP (name);
8370 memcpy (rname + 1, "data", 4);
8371 return get_section (rname, SECTION_WRITE, decl);
8372 }
8373 }
8374 return data_section;
8375 }
8376
8377 /* Implement TARGET_IN_SMALL_DATA_P. */
8378
8379 static bool
8380 mips_in_small_data_p (const_tree decl)
8381 {
8382 unsigned HOST_WIDE_INT size;
8383
8384 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
8385 return false;
8386
8387 /* We don't yet generate small-data references for -mabicalls
8388 or VxWorks RTP code. See the related -G handling in
8389 mips_option_override. */
8390 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
8391 return false;
8392
8393 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
8394 {
8395 const char *name;
8396
8397 /* Reject anything that isn't in a known small-data section. */
8398 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8399 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
8400 return false;
8401
8402 /* If a symbol is defined externally, the assembler will use the
8403 usual -G rules when deciding how to implement macros. */
8404 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
8405 return true;
8406 }
8407 else if (TARGET_EMBEDDED_DATA)
8408 {
8409 /* Don't put constants into the small data section: we want them
8410 to be in ROM rather than RAM. */
8411 if (TREE_CODE (decl) != VAR_DECL)
8412 return false;
8413
8414 if (TREE_READONLY (decl)
8415 && !TREE_SIDE_EFFECTS (decl)
8416 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
8417 return false;
8418 }
8419
8420 /* Enforce -mlocal-sdata. */
8421 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
8422 return false;
8423
8424 /* Enforce -mextern-sdata. */
8425 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
8426 {
8427 if (DECL_EXTERNAL (decl))
8428 return false;
8429 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
8430 return false;
8431 }
8432
8433 /* We have traditionally not treated zero-sized objects as small data,
8434 so this is now effectively part of the ABI. */
8435 size = int_size_in_bytes (TREE_TYPE (decl));
8436 return size > 0 && size <= mips_small_data_threshold;
8437 }
8438
8439 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
8440 anchors for small data: the GP register acts as an anchor in that
8441 case. We also don't want to use them for PC-relative accesses,
8442 where the PC acts as an anchor. */
8443
8444 static bool
8445 mips_use_anchors_for_symbol_p (const_rtx symbol)
8446 {
8447 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
8448 {
8449 case SYMBOL_PC_RELATIVE:
8450 case SYMBOL_GP_RELATIVE:
8451 return false;
8452
8453 default:
8454 return default_use_anchors_for_symbol_p (symbol);
8455 }
8456 }
8457 \f
8458 /* The MIPS debug format wants all automatic variables and arguments
8459 to be in terms of the virtual frame pointer (stack pointer before
8460 any adjustment in the function), while the MIPS 3.0 linker wants
8461 the frame pointer to be the stack pointer after the initial
8462 adjustment. So, we do the adjustment here. The arg pointer (which
8463 is eliminated) points to the virtual frame pointer, while the frame
8464 pointer (which may be eliminated) points to the stack pointer after
8465 the initial adjustments. */
8466
8467 HOST_WIDE_INT
8468 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
8469 {
8470 rtx offset2 = const0_rtx;
8471 rtx reg = eliminate_constant_term (addr, &offset2);
8472
8473 if (offset == 0)
8474 offset = INTVAL (offset2);
8475
8476 if (reg == stack_pointer_rtx
8477 || reg == frame_pointer_rtx
8478 || reg == hard_frame_pointer_rtx)
8479 {
8480 offset -= cfun->machine->frame.total_size;
8481 if (reg == hard_frame_pointer_rtx)
8482 offset += cfun->machine->frame.hard_frame_pointer_offset;
8483 }
8484
8485 return offset;
8486 }
8487 \f
8488 /* Implement ASM_OUTPUT_EXTERNAL. */
8489
8490 void
8491 mips_output_external (FILE *file, tree decl, const char *name)
8492 {
8493 default_elf_asm_output_external (file, decl, name);
8494
8495 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
8496 set in order to avoid putting out names that are never really
8497 used. */
8498 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
8499 {
8500 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
8501 {
8502 /* When using assembler macros, emit .extern directives for
8503 all small-data externs so that the assembler knows how
8504 big they are.
8505
8506 In most cases it would be safe (though pointless) to emit
8507 .externs for other symbols too. One exception is when an
8508 object is within the -G limit but declared by the user to
8509 be in a section other than .sbss or .sdata. */
8510 fputs ("\t.extern\t", file);
8511 assemble_name (file, name);
8512 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
8513 int_size_in_bytes (TREE_TYPE (decl)));
8514 }
8515 }
8516 }
8517
8518 /* Implement TARGET_ASM_OUTPUT_SOURCE_FILENAME. */
8519
8520 static void
8521 mips_output_filename (FILE *stream, const char *name)
8522 {
8523 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
8524 directives. */
8525 if (write_symbols == DWARF2_DEBUG)
8526 return;
8527 else if (mips_output_filename_first_time)
8528 {
8529 mips_output_filename_first_time = 0;
8530 num_source_filenames += 1;
8531 current_function_file = name;
8532 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8533 output_quoted_string (stream, name);
8534 putc ('\n', stream);
8535 }
8536 /* If we are emitting stabs, let dbxout.c handle this (except for
8537 the mips_output_filename_first_time case). */
8538 else if (write_symbols == DBX_DEBUG)
8539 return;
8540 else if (name != current_function_file
8541 && strcmp (name, current_function_file) != 0)
8542 {
8543 num_source_filenames += 1;
8544 current_function_file = name;
8545 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8546 output_quoted_string (stream, name);
8547 putc ('\n', stream);
8548 }
8549 }
8550
8551 /* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
8552
8553 static void ATTRIBUTE_UNUSED
8554 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
8555 {
8556 switch (size)
8557 {
8558 case 4:
8559 fputs ("\t.dtprelword\t", file);
8560 break;
8561
8562 case 8:
8563 fputs ("\t.dtpreldword\t", file);
8564 break;
8565
8566 default:
8567 gcc_unreachable ();
8568 }
8569 output_addr_const (file, x);
8570 fputs ("+0x8000", file);
8571 }
8572
8573 /* Implement TARGET_DWARF_REGISTER_SPAN. */
8574
8575 static rtx
8576 mips_dwarf_register_span (rtx reg)
8577 {
8578 rtx high, low;
8579 enum machine_mode mode;
8580
8581 /* By default, GCC maps increasing register numbers to increasing
8582 memory locations, but paired FPRs are always little-endian,
8583 regardless of the prevailing endianness. */
8584 mode = GET_MODE (reg);
8585 if (FP_REG_P (REGNO (reg))
8586 && TARGET_BIG_ENDIAN
8587 && MAX_FPRS_PER_FMT > 1
8588 && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
8589 {
8590 gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
8591 high = mips_subword (reg, true);
8592 low = mips_subword (reg, false);
8593 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, high, low));
8594 }
8595
8596 return NULL_RTX;
8597 }
8598
8599 /* DSP ALU can bypass data with no delays for the following pairs. */
8600 enum insn_code dspalu_bypass_table[][2] =
8601 {
8602 {CODE_FOR_mips_addsc, CODE_FOR_mips_addwc},
8603 {CODE_FOR_mips_cmpu_eq_qb, CODE_FOR_mips_pick_qb},
8604 {CODE_FOR_mips_cmpu_lt_qb, CODE_FOR_mips_pick_qb},
8605 {CODE_FOR_mips_cmpu_le_qb, CODE_FOR_mips_pick_qb},
8606 {CODE_FOR_mips_cmp_eq_ph, CODE_FOR_mips_pick_ph},
8607 {CODE_FOR_mips_cmp_lt_ph, CODE_FOR_mips_pick_ph},
8608 {CODE_FOR_mips_cmp_le_ph, CODE_FOR_mips_pick_ph},
8609 {CODE_FOR_mips_wrdsp, CODE_FOR_mips_insv}
8610 };
8611
8612 int
8613 mips_dspalu_bypass_p (rtx out_insn, rtx in_insn)
8614 {
8615 int i;
8616 int num_bypass = ARRAY_SIZE (dspalu_bypass_table);
8617 enum insn_code out_icode = (enum insn_code) INSN_CODE (out_insn);
8618 enum insn_code in_icode = (enum insn_code) INSN_CODE (in_insn);
8619
8620 for (i = 0; i < num_bypass; i++)
8621 {
8622 if (out_icode == dspalu_bypass_table[i][0]
8623 && in_icode == dspalu_bypass_table[i][1])
8624 return true;
8625 }
8626
8627 return false;
8628 }
8629 /* Implement ASM_OUTPUT_ASCII. */
8630
8631 void
8632 mips_output_ascii (FILE *stream, const char *string, size_t len)
8633 {
8634 size_t i;
8635 int cur_pos;
8636
8637 cur_pos = 17;
8638 fprintf (stream, "\t.ascii\t\"");
8639 for (i = 0; i < len; i++)
8640 {
8641 int c;
8642
8643 c = (unsigned char) string[i];
8644 if (ISPRINT (c))
8645 {
8646 if (c == '\\' || c == '\"')
8647 {
8648 putc ('\\', stream);
8649 cur_pos++;
8650 }
8651 putc (c, stream);
8652 cur_pos++;
8653 }
8654 else
8655 {
8656 fprintf (stream, "\\%03o", c);
8657 cur_pos += 4;
8658 }
8659
8660 if (cur_pos > 72 && i+1 < len)
8661 {
8662 cur_pos = 17;
8663 fprintf (stream, "\"\n\t.ascii\t\"");
8664 }
8665 }
8666 fprintf (stream, "\"\n");
8667 }
8668
8669 /* Return the pseudo-op for full SYMBOL_(D)TPREL address *ADDR.
8670 Update *ADDR with the operand that should be printed. */
8671
8672 const char *
8673 mips_output_tls_reloc_directive (rtx *addr)
8674 {
8675 enum mips_symbol_type type;
8676
8677 type = mips_classify_symbolic_expression (*addr, SYMBOL_CONTEXT_LEA);
8678 *addr = mips_strip_unspec_address (*addr);
8679 switch (type)
8680 {
8681 case SYMBOL_DTPREL:
8682 return Pmode == SImode ? ".dtprelword\t%0" : ".dtpreldword\t%0";
8683
8684 case SYMBOL_TPREL:
8685 return Pmode == SImode ? ".tprelword\t%0" : ".tpreldword\t%0";
8686
8687 default:
8688 gcc_unreachable ();
8689 }
8690 }
8691
8692 /* Emit either a label, .comm, or .lcomm directive. When using assembler
8693 macros, mark the symbol as written so that mips_asm_output_external
8694 won't emit an .extern for it. STREAM is the output file, NAME is the
8695 name of the symbol, INIT_STRING is the string that should be written
8696 before the symbol and FINAL_STRING is the string that should be
8697 written after it. FINAL_STRING is a printf format that consumes the
8698 remaining arguments. */
8699
8700 void
8701 mips_declare_object (FILE *stream, const char *name, const char *init_string,
8702 const char *final_string, ...)
8703 {
8704 va_list ap;
8705
8706 fputs (init_string, stream);
8707 assemble_name (stream, name);
8708 va_start (ap, final_string);
8709 vfprintf (stream, final_string, ap);
8710 va_end (ap);
8711
8712 if (!TARGET_EXPLICIT_RELOCS)
8713 {
8714 tree name_tree = get_identifier (name);
8715 TREE_ASM_WRITTEN (name_tree) = 1;
8716 }
8717 }
8718
8719 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
8720 NAME is the name of the object and ALIGN is the required alignment
8721 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
8722 alignment argument. */
8723
8724 void
8725 mips_declare_common_object (FILE *stream, const char *name,
8726 const char *init_string,
8727 unsigned HOST_WIDE_INT size,
8728 unsigned int align, bool takes_alignment_p)
8729 {
8730 if (!takes_alignment_p)
8731 {
8732 size += (align / BITS_PER_UNIT) - 1;
8733 size -= size % (align / BITS_PER_UNIT);
8734 mips_declare_object (stream, name, init_string,
8735 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
8736 }
8737 else
8738 mips_declare_object (stream, name, init_string,
8739 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
8740 size, align / BITS_PER_UNIT);
8741 }
8742
8743 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
8744 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
8745
8746 void
8747 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
8748 unsigned HOST_WIDE_INT size,
8749 unsigned int align)
8750 {
8751 /* If the target wants uninitialized const declarations in
8752 .rdata then don't put them in .comm. */
8753 if (TARGET_EMBEDDED_DATA
8754 && TARGET_UNINIT_CONST_IN_RODATA
8755 && TREE_CODE (decl) == VAR_DECL
8756 && TREE_READONLY (decl)
8757 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
8758 {
8759 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
8760 targetm.asm_out.globalize_label (stream, name);
8761
8762 switch_to_section (readonly_data_section);
8763 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
8764 mips_declare_object (stream, name, "",
8765 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
8766 size);
8767 }
8768 else
8769 mips_declare_common_object (stream, name, "\n\t.comm\t",
8770 size, align, true);
8771 }
8772
8773 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8774 extern int size_directive_output;
8775
8776 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
8777 definitions except that it uses mips_declare_object to emit the label. */
8778
8779 void
8780 mips_declare_object_name (FILE *stream, const char *name,
8781 tree decl ATTRIBUTE_UNUSED)
8782 {
8783 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8784 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8785 #endif
8786
8787 size_directive_output = 0;
8788 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
8789 {
8790 HOST_WIDE_INT size;
8791
8792 size_directive_output = 1;
8793 size = int_size_in_bytes (TREE_TYPE (decl));
8794 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8795 }
8796
8797 mips_declare_object (stream, name, "", ":\n");
8798 }
8799
8800 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
8801
8802 void
8803 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
8804 {
8805 const char *name;
8806
8807 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
8808 if (!flag_inhibit_size_directive
8809 && DECL_SIZE (decl) != 0
8810 && !at_end
8811 && top_level
8812 && DECL_INITIAL (decl) == error_mark_node
8813 && !size_directive_output)
8814 {
8815 HOST_WIDE_INT size;
8816
8817 size_directive_output = 1;
8818 size = int_size_in_bytes (TREE_TYPE (decl));
8819 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8820 }
8821 }
8822 #endif
8823 \f
8824 /* Return the FOO in the name of the ".mdebug.FOO" section associated
8825 with the current ABI. */
8826
8827 static const char *
8828 mips_mdebug_abi_name (void)
8829 {
8830 switch (mips_abi)
8831 {
8832 case ABI_32:
8833 return "abi32";
8834 case ABI_O64:
8835 return "abiO64";
8836 case ABI_N32:
8837 return "abiN32";
8838 case ABI_64:
8839 return "abi64";
8840 case ABI_EABI:
8841 return TARGET_64BIT ? "eabi64" : "eabi32";
8842 default:
8843 gcc_unreachable ();
8844 }
8845 }
8846
8847 /* Implement TARGET_ASM_FILE_START. */
8848
8849 static void
8850 mips_file_start (void)
8851 {
8852 default_file_start ();
8853
8854 /* Generate a special section to describe the ABI switches used to
8855 produce the resultant binary. */
8856
8857 /* Record the ABI itself. Modern versions of binutils encode
8858 this information in the ELF header flags, but GDB needs the
8859 information in order to correctly debug binaries produced by
8860 older binutils. See the function mips_gdbarch_init in
8861 gdb/mips-tdep.c. */
8862 fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
8863 mips_mdebug_abi_name ());
8864
8865 /* There is no ELF header flag to distinguish long32 forms of the
8866 EABI from long64 forms. Emit a special section to help tools
8867 such as GDB. Do the same for o64, which is sometimes used with
8868 -mlong64. */
8869 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
8870 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
8871 "\t.previous\n", TARGET_LONG64 ? 64 : 32);
8872
8873 /* Record the NaN encoding. */
8874 if (HAVE_AS_NAN || mips_nan != MIPS_IEEE_754_DEFAULT)
8875 fprintf (asm_out_file, "\t.nan\t%s\n",
8876 mips_nan == MIPS_IEEE_754_2008 ? "2008" : "legacy");
8877
8878 #ifdef HAVE_AS_GNU_ATTRIBUTE
8879 {
8880 int attr;
8881
8882 /* No floating-point operations, -mno-float. */
8883 if (TARGET_NO_FLOAT)
8884 attr = 0;
8885 /* Soft-float code, -msoft-float. */
8886 else if (!TARGET_HARD_FLOAT_ABI)
8887 attr = 3;
8888 /* Single-float code, -msingle-float. */
8889 else if (!TARGET_DOUBLE_FLOAT)
8890 attr = 2;
8891 /* 64-bit FP registers on a 32-bit target, -mips32r2 -mfp64. */
8892 else if (!TARGET_64BIT && TARGET_FLOAT64)
8893 attr = 4;
8894 /* Regular FP code, FP regs same size as GP regs, -mdouble-float. */
8895 else
8896 attr = 1;
8897
8898 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", attr);
8899 }
8900 #endif
8901
8902 /* If TARGET_ABICALLS, tell GAS to generate -KPIC code. */
8903 if (TARGET_ABICALLS)
8904 {
8905 fprintf (asm_out_file, "\t.abicalls\n");
8906 if (TARGET_ABICALLS_PIC0)
8907 fprintf (asm_out_file, "\t.option\tpic0\n");
8908 }
8909
8910 if (flag_verbose_asm)
8911 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
8912 ASM_COMMENT_START,
8913 mips_small_data_threshold, mips_arch_info->name, mips_isa);
8914 }
8915
8916 /* Implement TARGET_ASM_CODE_END. */
8917
8918 static void
8919 mips_code_end (void)
8920 {
8921 if (mips_need_mips16_rdhwr_p)
8922 mips_output_mips16_rdhwr ();
8923 }
8924 \f
8925 /* Make the last instruction frame-related and note that it performs
8926 the operation described by FRAME_PATTERN. */
8927
8928 static void
8929 mips_set_frame_expr (rtx frame_pattern)
8930 {
8931 rtx insn;
8932
8933 insn = get_last_insn ();
8934 RTX_FRAME_RELATED_P (insn) = 1;
8935 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
8936 frame_pattern,
8937 REG_NOTES (insn));
8938 }
8939
8940 /* Return a frame-related rtx that stores REG at MEM.
8941 REG must be a single register. */
8942
8943 static rtx
8944 mips_frame_set (rtx mem, rtx reg)
8945 {
8946 rtx set;
8947
8948 set = gen_rtx_SET (VOIDmode, mem, reg);
8949 RTX_FRAME_RELATED_P (set) = 1;
8950
8951 return set;
8952 }
8953
8954 /* Record that the epilogue has restored call-saved register REG. */
8955
8956 static void
8957 mips_add_cfa_restore (rtx reg)
8958 {
8959 mips_epilogue.cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8960 mips_epilogue.cfa_restores);
8961 }
8962 \f
8963 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
8964 mips16e_s2_s8_regs[X], it must also save the registers in indexes
8965 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
8966 static const unsigned char mips16e_s2_s8_regs[] = {
8967 30, 23, 22, 21, 20, 19, 18
8968 };
8969 static const unsigned char mips16e_a0_a3_regs[] = {
8970 4, 5, 6, 7
8971 };
8972
8973 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
8974 ordered from the uppermost in memory to the lowest in memory. */
8975 static const unsigned char mips16e_save_restore_regs[] = {
8976 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
8977 };
8978
8979 /* Return the index of the lowest X in the range [0, SIZE) for which
8980 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
8981
8982 static unsigned int
8983 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
8984 unsigned int size)
8985 {
8986 unsigned int i;
8987
8988 for (i = 0; i < size; i++)
8989 if (BITSET_P (mask, regs[i]))
8990 break;
8991
8992 return i;
8993 }
8994
8995 /* *MASK_PTR is a mask of general-purpose registers and *NUM_REGS_PTR
8996 is the number of set bits. If *MASK_PTR contains REGS[X] for some X
8997 in [0, SIZE), adjust *MASK_PTR and *NUM_REGS_PTR so that the same
8998 is true for all indexes (X, SIZE). */
8999
9000 static void
9001 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
9002 unsigned int size, unsigned int *num_regs_ptr)
9003 {
9004 unsigned int i;
9005
9006 i = mips16e_find_first_register (*mask_ptr, regs, size);
9007 for (i++; i < size; i++)
9008 if (!BITSET_P (*mask_ptr, regs[i]))
9009 {
9010 *num_regs_ptr += 1;
9011 *mask_ptr |= 1 << regs[i];
9012 }
9013 }
9014
9015 /* Return a simplified form of X using the register values in REG_VALUES.
9016 REG_VALUES[R] is the last value assigned to hard register R, or null
9017 if R has not been modified.
9018
9019 This function is rather limited, but is good enough for our purposes. */
9020
9021 static rtx
9022 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
9023 {
9024 x = avoid_constant_pool_reference (x);
9025
9026 if (UNARY_P (x))
9027 {
9028 rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
9029 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
9030 x0, GET_MODE (XEXP (x, 0)));
9031 }
9032
9033 if (ARITHMETIC_P (x))
9034 {
9035 rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
9036 rtx x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
9037 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
9038 }
9039
9040 if (REG_P (x)
9041 && reg_values[REGNO (x)]
9042 && !rtx_unstable_p (reg_values[REGNO (x)]))
9043 return reg_values[REGNO (x)];
9044
9045 return x;
9046 }
9047
9048 /* Return true if (set DEST SRC) stores an argument register into its
9049 caller-allocated save slot, storing the number of that argument
9050 register in *REGNO_PTR if so. REG_VALUES is as for
9051 mips16e_collect_propagate_value. */
9052
9053 static bool
9054 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
9055 unsigned int *regno_ptr)
9056 {
9057 unsigned int argno, regno;
9058 HOST_WIDE_INT offset, required_offset;
9059 rtx addr, base;
9060
9061 /* Check that this is a word-mode store. */
9062 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
9063 return false;
9064
9065 /* Check that the register being saved is an unmodified argument
9066 register. */
9067 regno = REGNO (src);
9068 if (!IN_RANGE (regno, GP_ARG_FIRST, GP_ARG_LAST) || reg_values[regno])
9069 return false;
9070 argno = regno - GP_ARG_FIRST;
9071
9072 /* Check whether the address is an appropriate stack-pointer or
9073 frame-pointer access. */
9074 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
9075 mips_split_plus (addr, &base, &offset);
9076 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
9077 if (base == hard_frame_pointer_rtx)
9078 required_offset -= cfun->machine->frame.hard_frame_pointer_offset;
9079 else if (base != stack_pointer_rtx)
9080 return false;
9081 if (offset != required_offset)
9082 return false;
9083
9084 *regno_ptr = regno;
9085 return true;
9086 }
9087
9088 /* A subroutine of mips_expand_prologue, called only when generating
9089 MIPS16e SAVE instructions. Search the start of the function for any
9090 instructions that save argument registers into their caller-allocated
9091 save slots. Delete such instructions and return a value N such that
9092 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
9093 instructions redundant. */
9094
9095 static unsigned int
9096 mips16e_collect_argument_saves (void)
9097 {
9098 rtx reg_values[FIRST_PSEUDO_REGISTER];
9099 rtx insn, next, set, dest, src;
9100 unsigned int nargs, regno;
9101
9102 push_topmost_sequence ();
9103 nargs = 0;
9104 memset (reg_values, 0, sizeof (reg_values));
9105 for (insn = get_insns (); insn; insn = next)
9106 {
9107 next = NEXT_INSN (insn);
9108 if (NOTE_P (insn) || DEBUG_INSN_P (insn))
9109 continue;
9110
9111 if (!INSN_P (insn))
9112 break;
9113
9114 set = PATTERN (insn);
9115 if (GET_CODE (set) != SET)
9116 break;
9117
9118 dest = SET_DEST (set);
9119 src = SET_SRC (set);
9120 if (mips16e_collect_argument_save_p (dest, src, reg_values, &regno))
9121 {
9122 if (!BITSET_P (cfun->machine->frame.mask, regno))
9123 {
9124 delete_insn (insn);
9125 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
9126 }
9127 }
9128 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
9129 reg_values[REGNO (dest)]
9130 = mips16e_collect_propagate_value (src, reg_values);
9131 else
9132 break;
9133 }
9134 pop_topmost_sequence ();
9135
9136 return nargs;
9137 }
9138
9139 /* Return a move between register REGNO and memory location SP + OFFSET.
9140 REG_PARM_P is true if SP + OFFSET belongs to REG_PARM_STACK_SPACE.
9141 Make the move a load if RESTORE_P, otherwise make it a store. */
9142
9143 static rtx
9144 mips16e_save_restore_reg (bool restore_p, bool reg_parm_p,
9145 HOST_WIDE_INT offset, unsigned int regno)
9146 {
9147 rtx reg, mem;
9148
9149 mem = gen_frame_mem (SImode, plus_constant (Pmode, stack_pointer_rtx,
9150 offset));
9151 reg = gen_rtx_REG (SImode, regno);
9152 if (restore_p)
9153 {
9154 mips_add_cfa_restore (reg);
9155 return gen_rtx_SET (VOIDmode, reg, mem);
9156 }
9157 if (reg_parm_p)
9158 return gen_rtx_SET (VOIDmode, mem, reg);
9159 return mips_frame_set (mem, reg);
9160 }
9161
9162 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
9163 The instruction must:
9164
9165 - Allocate or deallocate SIZE bytes in total; SIZE is known
9166 to be nonzero.
9167
9168 - Save or restore as many registers in *MASK_PTR as possible.
9169 The instruction saves the first registers at the top of the
9170 allocated area, with the other registers below it.
9171
9172 - Save NARGS argument registers above the allocated area.
9173
9174 (NARGS is always zero if RESTORE_P.)
9175
9176 The SAVE and RESTORE instructions cannot save and restore all general
9177 registers, so there may be some registers left over for the caller to
9178 handle. Destructively modify *MASK_PTR so that it contains the registers
9179 that still need to be saved or restored. The caller can save these
9180 registers in the memory immediately below *OFFSET_PTR, which is a
9181 byte offset from the bottom of the allocated stack area. */
9182
9183 static rtx
9184 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
9185 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
9186 HOST_WIDE_INT size)
9187 {
9188 rtx pattern, set;
9189 HOST_WIDE_INT offset, top_offset;
9190 unsigned int i, regno;
9191 int n;
9192
9193 gcc_assert (cfun->machine->frame.num_fp == 0);
9194
9195 /* Calculate the number of elements in the PARALLEL. We need one element
9196 for the stack adjustment, one for each argument register save, and one
9197 for each additional register move. */
9198 n = 1 + nargs;
9199 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
9200 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
9201 n++;
9202
9203 /* Create the final PARALLEL. */
9204 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
9205 n = 0;
9206
9207 /* Add the stack pointer adjustment. */
9208 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
9209 plus_constant (Pmode, stack_pointer_rtx,
9210 restore_p ? size : -size));
9211 RTX_FRAME_RELATED_P (set) = 1;
9212 XVECEXP (pattern, 0, n++) = set;
9213
9214 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
9215 top_offset = restore_p ? size : 0;
9216
9217 /* Save the arguments. */
9218 for (i = 0; i < nargs; i++)
9219 {
9220 offset = top_offset + i * UNITS_PER_WORD;
9221 set = mips16e_save_restore_reg (restore_p, true, offset,
9222 GP_ARG_FIRST + i);
9223 XVECEXP (pattern, 0, n++) = set;
9224 }
9225
9226 /* Then fill in the other register moves. */
9227 offset = top_offset;
9228 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
9229 {
9230 regno = mips16e_save_restore_regs[i];
9231 if (BITSET_P (*mask_ptr, regno))
9232 {
9233 offset -= UNITS_PER_WORD;
9234 set = mips16e_save_restore_reg (restore_p, false, offset, regno);
9235 XVECEXP (pattern, 0, n++) = set;
9236 *mask_ptr &= ~(1 << regno);
9237 }
9238 }
9239
9240 /* Tell the caller what offset it should use for the remaining registers. */
9241 *offset_ptr = size + (offset - top_offset);
9242
9243 gcc_assert (n == XVECLEN (pattern, 0));
9244
9245 return pattern;
9246 }
9247
9248 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
9249 pointer. Return true if PATTERN matches the kind of instruction
9250 generated by mips16e_build_save_restore. If INFO is nonnull,
9251 initialize it when returning true. */
9252
9253 bool
9254 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
9255 struct mips16e_save_restore_info *info)
9256 {
9257 unsigned int i, nargs, mask, extra;
9258 HOST_WIDE_INT top_offset, save_offset, offset;
9259 rtx set, reg, mem, base;
9260 int n;
9261
9262 if (!GENERATE_MIPS16E_SAVE_RESTORE)
9263 return false;
9264
9265 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
9266 top_offset = adjust > 0 ? adjust : 0;
9267
9268 /* Interpret all other members of the PARALLEL. */
9269 save_offset = top_offset - UNITS_PER_WORD;
9270 mask = 0;
9271 nargs = 0;
9272 i = 0;
9273 for (n = 1; n < XVECLEN (pattern, 0); n++)
9274 {
9275 /* Check that we have a SET. */
9276 set = XVECEXP (pattern, 0, n);
9277 if (GET_CODE (set) != SET)
9278 return false;
9279
9280 /* Check that the SET is a load (if restoring) or a store
9281 (if saving). */
9282 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
9283 if (!MEM_P (mem))
9284 return false;
9285
9286 /* Check that the address is the sum of the stack pointer and a
9287 possibly-zero constant offset. */
9288 mips_split_plus (XEXP (mem, 0), &base, &offset);
9289 if (base != stack_pointer_rtx)
9290 return false;
9291
9292 /* Check that SET's other operand is a register. */
9293 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
9294 if (!REG_P (reg))
9295 return false;
9296
9297 /* Check for argument saves. */
9298 if (offset == top_offset + nargs * UNITS_PER_WORD
9299 && REGNO (reg) == GP_ARG_FIRST + nargs)
9300 nargs++;
9301 else if (offset == save_offset)
9302 {
9303 while (mips16e_save_restore_regs[i++] != REGNO (reg))
9304 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
9305 return false;
9306
9307 mask |= 1 << REGNO (reg);
9308 save_offset -= UNITS_PER_WORD;
9309 }
9310 else
9311 return false;
9312 }
9313
9314 /* Check that the restrictions on register ranges are met. */
9315 extra = 0;
9316 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
9317 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
9318 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
9319 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
9320 if (extra != 0)
9321 return false;
9322
9323 /* Make sure that the topmost argument register is not saved twice.
9324 The checks above ensure that the same is then true for the other
9325 argument registers. */
9326 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
9327 return false;
9328
9329 /* Pass back information, if requested. */
9330 if (info)
9331 {
9332 info->nargs = nargs;
9333 info->mask = mask;
9334 info->size = (adjust > 0 ? adjust : -adjust);
9335 }
9336
9337 return true;
9338 }
9339
9340 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
9341 for the register range [MIN_REG, MAX_REG]. Return a pointer to
9342 the null terminator. */
9343
9344 static char *
9345 mips16e_add_register_range (char *s, unsigned int min_reg,
9346 unsigned int max_reg)
9347 {
9348 if (min_reg != max_reg)
9349 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
9350 else
9351 s += sprintf (s, ",%s", reg_names[min_reg]);
9352 return s;
9353 }
9354
9355 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
9356 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
9357
9358 const char *
9359 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
9360 {
9361 static char buffer[300];
9362
9363 struct mips16e_save_restore_info info;
9364 unsigned int i, end;
9365 char *s;
9366
9367 /* Parse the pattern. */
9368 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
9369 gcc_unreachable ();
9370
9371 /* Add the mnemonic. */
9372 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
9373 s += strlen (s);
9374
9375 /* Save the arguments. */
9376 if (info.nargs > 1)
9377 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
9378 reg_names[GP_ARG_FIRST + info.nargs - 1]);
9379 else if (info.nargs == 1)
9380 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
9381
9382 /* Emit the amount of stack space to allocate or deallocate. */
9383 s += sprintf (s, "%d", (int) info.size);
9384
9385 /* Save or restore $16. */
9386 if (BITSET_P (info.mask, 16))
9387 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
9388
9389 /* Save or restore $17. */
9390 if (BITSET_P (info.mask, 17))
9391 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
9392
9393 /* Save or restore registers in the range $s2...$s8, which
9394 mips16e_s2_s8_regs lists in decreasing order. Note that this
9395 is a software register range; the hardware registers are not
9396 numbered consecutively. */
9397 end = ARRAY_SIZE (mips16e_s2_s8_regs);
9398 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
9399 if (i < end)
9400 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
9401 mips16e_s2_s8_regs[i]);
9402
9403 /* Save or restore registers in the range $a0...$a3. */
9404 end = ARRAY_SIZE (mips16e_a0_a3_regs);
9405 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
9406 if (i < end)
9407 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
9408 mips16e_a0_a3_regs[end - 1]);
9409
9410 /* Save or restore $31. */
9411 if (BITSET_P (info.mask, RETURN_ADDR_REGNUM))
9412 s += sprintf (s, ",%s", reg_names[RETURN_ADDR_REGNUM]);
9413
9414 return buffer;
9415 }
9416 \f
9417 /* Return true if the current function returns its value in a floating-point
9418 register in MIPS16 mode. */
9419
9420 static bool
9421 mips16_cfun_returns_in_fpr_p (void)
9422 {
9423 tree return_type = DECL_RESULT (current_function_decl);
9424 return (TARGET_MIPS16
9425 && TARGET_HARD_FLOAT_ABI
9426 && !aggregate_value_p (return_type, current_function_decl)
9427 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
9428 }
9429
9430 /* Return true if predicate PRED is true for at least one instruction.
9431 Cache the result in *CACHE, and assume that the result is true
9432 if *CACHE is already true. */
9433
9434 static bool
9435 mips_find_gp_ref (bool *cache, bool (*pred) (rtx))
9436 {
9437 rtx insn;
9438
9439 if (!*cache)
9440 {
9441 push_topmost_sequence ();
9442 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9443 if (USEFUL_INSN_P (insn) && pred (insn))
9444 {
9445 *cache = true;
9446 break;
9447 }
9448 pop_topmost_sequence ();
9449 }
9450 return *cache;
9451 }
9452
9453 /* Return true if INSN refers to the global pointer in an "inflexible" way.
9454 See mips_cfun_has_inflexible_gp_ref_p for details. */
9455
9456 static bool
9457 mips_insn_has_inflexible_gp_ref_p (rtx insn)
9458 {
9459 /* Uses of pic_offset_table_rtx in CALL_INSN_FUNCTION_USAGE
9460 indicate that the target could be a traditional MIPS
9461 lazily-binding stub. */
9462 return find_reg_fusage (insn, USE, pic_offset_table_rtx);
9463 }
9464
9465 /* Return true if the current function refers to the global pointer
9466 in a way that forces $28 to be valid. This means that we can't
9467 change the choice of global pointer, even for NewABI code.
9468
9469 One example of this (and one which needs several checks) is that
9470 $28 must be valid when calling traditional MIPS lazy-binding stubs.
9471 (This restriction does not apply to PLTs.) */
9472
9473 static bool
9474 mips_cfun_has_inflexible_gp_ref_p (void)
9475 {
9476 /* If the function has a nonlocal goto, $28 must hold the correct
9477 global pointer for the target function. That is, the target
9478 of the goto implicitly uses $28. */
9479 if (crtl->has_nonlocal_goto)
9480 return true;
9481
9482 if (TARGET_ABICALLS_PIC2)
9483 {
9484 /* Symbolic accesses implicitly use the global pointer unless
9485 -mexplicit-relocs is in effect. JAL macros to symbolic addresses
9486 might go to traditional MIPS lazy-binding stubs. */
9487 if (!TARGET_EXPLICIT_RELOCS)
9488 return true;
9489
9490 /* FUNCTION_PROFILER includes a JAL to _mcount, which again
9491 can be lazily-bound. */
9492 if (crtl->profile)
9493 return true;
9494
9495 /* MIPS16 functions that return in FPRs need to call an
9496 external libgcc routine. This call is only made explict
9497 during mips_expand_epilogue, and it too might be lazily bound. */
9498 if (mips16_cfun_returns_in_fpr_p ())
9499 return true;
9500 }
9501
9502 return mips_find_gp_ref (&cfun->machine->has_inflexible_gp_insn_p,
9503 mips_insn_has_inflexible_gp_ref_p);
9504 }
9505
9506 /* Return true if INSN refers to the global pointer in a "flexible" way.
9507 See mips_cfun_has_flexible_gp_ref_p for details. */
9508
9509 static bool
9510 mips_insn_has_flexible_gp_ref_p (rtx insn)
9511 {
9512 return (get_attr_got (insn) != GOT_UNSET
9513 || mips_small_data_pattern_p (PATTERN (insn))
9514 || reg_overlap_mentioned_p (pic_offset_table_rtx, PATTERN (insn)));
9515 }
9516
9517 /* Return true if the current function references the global pointer,
9518 but if those references do not inherently require the global pointer
9519 to be $28. Assume !mips_cfun_has_inflexible_gp_ref_p (). */
9520
9521 static bool
9522 mips_cfun_has_flexible_gp_ref_p (void)
9523 {
9524 /* Reload can sometimes introduce constant pool references
9525 into a function that otherwise didn't need them. For example,
9526 suppose we have an instruction like:
9527
9528 (set (reg:DF R1) (float:DF (reg:SI R2)))
9529
9530 If R2 turns out to be a constant such as 1, the instruction may
9531 have a REG_EQUAL note saying that R1 == 1.0. Reload then has
9532 the option of using this constant if R2 doesn't get allocated
9533 to a register.
9534
9535 In cases like these, reload will have added the constant to the
9536 pool but no instruction will yet refer to it. */
9537 if (TARGET_ABICALLS_PIC2 && !reload_completed && crtl->uses_const_pool)
9538 return true;
9539
9540 return mips_find_gp_ref (&cfun->machine->has_flexible_gp_insn_p,
9541 mips_insn_has_flexible_gp_ref_p);
9542 }
9543
9544 /* Return the register that should be used as the global pointer
9545 within this function. Return INVALID_REGNUM if the function
9546 doesn't need a global pointer. */
9547
9548 static unsigned int
9549 mips_global_pointer (void)
9550 {
9551 unsigned int regno;
9552
9553 /* $gp is always available unless we're using a GOT. */
9554 if (!TARGET_USE_GOT)
9555 return GLOBAL_POINTER_REGNUM;
9556
9557 /* If there are inflexible references to $gp, we must use the
9558 standard register. */
9559 if (mips_cfun_has_inflexible_gp_ref_p ())
9560 return GLOBAL_POINTER_REGNUM;
9561
9562 /* If there are no current references to $gp, then the only uses
9563 we can introduce later are those involved in long branches. */
9564 if (TARGET_ABSOLUTE_JUMPS && !mips_cfun_has_flexible_gp_ref_p ())
9565 return INVALID_REGNUM;
9566
9567 /* If the global pointer is call-saved, try to use a call-clobbered
9568 alternative. */
9569 if (TARGET_CALL_SAVED_GP && crtl->is_leaf)
9570 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
9571 if (!df_regs_ever_live_p (regno)
9572 && call_really_used_regs[regno]
9573 && !fixed_regs[regno]
9574 && regno != PIC_FUNCTION_ADDR_REGNUM)
9575 return regno;
9576
9577 return GLOBAL_POINTER_REGNUM;
9578 }
9579
9580 /* Return true if the current function's prologue must load the global
9581 pointer value into pic_offset_table_rtx and store the same value in
9582 the function's cprestore slot (if any).
9583
9584 One problem we have to deal with is that, when emitting GOT-based
9585 position independent code, long-branch sequences will need to load
9586 the address of the branch target from the GOT. We don't know until
9587 the very end of compilation whether (and where) the function needs
9588 long branches, so we must ensure that _any_ branch can access the
9589 global pointer in some form. However, we do not want to pessimize
9590 the usual case in which all branches are short.
9591
9592 We handle this as follows:
9593
9594 (1) During reload, we set cfun->machine->global_pointer to
9595 INVALID_REGNUM if we _know_ that the current function
9596 doesn't need a global pointer. This is only valid if
9597 long branches don't need the GOT.
9598
9599 Otherwise, we assume that we might need a global pointer
9600 and pick an appropriate register.
9601
9602 (2) If cfun->machine->global_pointer != INVALID_REGNUM,
9603 we ensure that the global pointer is available at every
9604 block boundary bar entry and exit. We do this in one of two ways:
9605
9606 - If the function has a cprestore slot, we ensure that this
9607 slot is valid at every branch. However, as explained in
9608 point (6) below, there is no guarantee that pic_offset_table_rtx
9609 itself is valid if new uses of the global pointer are introduced
9610 after the first post-epilogue split.
9611
9612 We guarantee that the cprestore slot is valid by loading it
9613 into a fake register, CPRESTORE_SLOT_REGNUM. We then make
9614 this register live at every block boundary bar function entry
9615 and exit. It is then invalid to move the load (and thus the
9616 preceding store) across a block boundary.
9617
9618 - If the function has no cprestore slot, we guarantee that
9619 pic_offset_table_rtx itself is valid at every branch.
9620
9621 See mips_eh_uses for the handling of the register liveness.
9622
9623 (3) During prologue and epilogue generation, we emit "ghost"
9624 placeholder instructions to manipulate the global pointer.
9625
9626 (4) During prologue generation, we set cfun->machine->must_initialize_gp_p
9627 and cfun->machine->must_restore_gp_when_clobbered_p if we already know
9628 that the function needs a global pointer. (There is no need to set
9629 them earlier than this, and doing it as late as possible leads to
9630 fewer false positives.)
9631
9632 (5) If cfun->machine->must_initialize_gp_p is true during a
9633 split_insns pass, we split the ghost instructions into real
9634 instructions. These split instructions can then be optimized in
9635 the usual way. Otherwise, we keep the ghost instructions intact,
9636 and optimize for the case where they aren't needed. We still
9637 have the option of splitting them later, if we need to introduce
9638 new uses of the global pointer.
9639
9640 For example, the scheduler ignores a ghost instruction that
9641 stores $28 to the stack, but it handles the split form of
9642 the ghost instruction as an ordinary store.
9643
9644 (6) [OldABI only.] If cfun->machine->must_restore_gp_when_clobbered_p
9645 is true during the first post-epilogue split_insns pass, we split
9646 calls and restore_gp patterns into instructions that explicitly
9647 load pic_offset_table_rtx from the cprestore slot. Otherwise,
9648 we split these patterns into instructions that _don't_ load from
9649 the cprestore slot.
9650
9651 If cfun->machine->must_restore_gp_when_clobbered_p is true at the
9652 time of the split, then any instructions that exist at that time
9653 can make free use of pic_offset_table_rtx. However, if we want
9654 to introduce new uses of the global pointer after the split,
9655 we must explicitly load the value from the cprestore slot, since
9656 pic_offset_table_rtx itself might not be valid at a given point
9657 in the function.
9658
9659 The idea is that we want to be able to delete redundant
9660 loads from the cprestore slot in the usual case where no
9661 long branches are needed.
9662
9663 (7) If cfun->machine->must_initialize_gp_p is still false at the end
9664 of md_reorg, we decide whether the global pointer is needed for
9665 long branches. If so, we set cfun->machine->must_initialize_gp_p
9666 to true and split the ghost instructions into real instructions
9667 at that stage.
9668
9669 Note that the ghost instructions must have a zero length for three reasons:
9670
9671 - Giving the length of the underlying $gp sequence might cause
9672 us to use long branches in cases where they aren't really needed.
9673
9674 - They would perturb things like alignment calculations.
9675
9676 - More importantly, the hazard detection in md_reorg relies on
9677 empty instructions having a zero length.
9678
9679 If we find a long branch and split the ghost instructions at the
9680 end of md_reorg, the split could introduce more long branches.
9681 That isn't a problem though, because we still do the split before
9682 the final shorten_branches pass.
9683
9684 This is extremely ugly, but it seems like the best compromise between
9685 correctness and efficiency. */
9686
9687 bool
9688 mips_must_initialize_gp_p (void)
9689 {
9690 return cfun->machine->must_initialize_gp_p;
9691 }
9692
9693 /* Return true if REGNO is a register that is ordinarily call-clobbered
9694 but must nevertheless be preserved by an interrupt handler. */
9695
9696 static bool
9697 mips_interrupt_extra_call_saved_reg_p (unsigned int regno)
9698 {
9699 if (MD_REG_P (regno))
9700 return true;
9701
9702 if (TARGET_DSP && DSP_ACC_REG_P (regno))
9703 return true;
9704
9705 if (GP_REG_P (regno) && !cfun->machine->use_shadow_register_set_p)
9706 {
9707 /* $0 is hard-wired. */
9708 if (regno == GP_REG_FIRST)
9709 return false;
9710
9711 /* The interrupt handler can treat kernel registers as
9712 scratch registers. */
9713 if (KERNEL_REG_P (regno))
9714 return false;
9715
9716 /* The function will return the stack pointer to its original value
9717 anyway. */
9718 if (regno == STACK_POINTER_REGNUM)
9719 return false;
9720
9721 /* Otherwise, return true for registers that aren't ordinarily
9722 call-clobbered. */
9723 return call_really_used_regs[regno];
9724 }
9725
9726 return false;
9727 }
9728
9729 /* Return true if the current function should treat register REGNO
9730 as call-saved. */
9731
9732 static bool
9733 mips_cfun_call_saved_reg_p (unsigned int regno)
9734 {
9735 /* If the user makes an ordinarily-call-saved register global,
9736 that register is no longer call-saved. */
9737 if (global_regs[regno])
9738 return false;
9739
9740 /* Interrupt handlers need to save extra registers. */
9741 if (cfun->machine->interrupt_handler_p
9742 && mips_interrupt_extra_call_saved_reg_p (regno))
9743 return true;
9744
9745 /* call_insns preserve $28 unless they explicitly say otherwise,
9746 so call_really_used_regs[] treats $28 as call-saved. However,
9747 we want the ABI property rather than the default call_insn
9748 property here. */
9749 return (regno == GLOBAL_POINTER_REGNUM
9750 ? TARGET_CALL_SAVED_GP
9751 : !call_really_used_regs[regno]);
9752 }
9753
9754 /* Return true if the function body might clobber register REGNO.
9755 We know that REGNO is call-saved. */
9756
9757 static bool
9758 mips_cfun_might_clobber_call_saved_reg_p (unsigned int regno)
9759 {
9760 /* Some functions should be treated as clobbering all call-saved
9761 registers. */
9762 if (crtl->saves_all_registers)
9763 return true;
9764
9765 /* DF handles cases where a register is explicitly referenced in
9766 the rtl. Incoming values are passed in call-clobbered registers,
9767 so we can assume that any live call-saved register is set within
9768 the function. */
9769 if (df_regs_ever_live_p (regno))
9770 return true;
9771
9772 /* Check for registers that are clobbered by FUNCTION_PROFILER.
9773 These clobbers are not explicit in the rtl. */
9774 if (crtl->profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
9775 return true;
9776
9777 /* If we're using a call-saved global pointer, the function's
9778 prologue will need to set it up. */
9779 if (cfun->machine->global_pointer == regno)
9780 return true;
9781
9782 /* The function's prologue will need to set the frame pointer if
9783 frame_pointer_needed. */
9784 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
9785 return true;
9786
9787 /* If a MIPS16 function returns a value in FPRs, its epilogue
9788 will need to call an external libgcc routine. This yet-to-be
9789 generated call_insn will clobber $31. */
9790 if (regno == RETURN_ADDR_REGNUM && mips16_cfun_returns_in_fpr_p ())
9791 return true;
9792
9793 /* If REGNO is ordinarily call-clobbered, we must assume that any
9794 called function could modify it. */
9795 if (cfun->machine->interrupt_handler_p
9796 && !crtl->is_leaf
9797 && mips_interrupt_extra_call_saved_reg_p (regno))
9798 return true;
9799
9800 return false;
9801 }
9802
9803 /* Return true if the current function must save register REGNO. */
9804
9805 static bool
9806 mips_save_reg_p (unsigned int regno)
9807 {
9808 if (mips_cfun_call_saved_reg_p (regno))
9809 {
9810 if (mips_cfun_might_clobber_call_saved_reg_p (regno))
9811 return true;
9812
9813 /* Save both registers in an FPR pair if either one is used. This is
9814 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
9815 register to be used without the even register. */
9816 if (FP_REG_P (regno)
9817 && MAX_FPRS_PER_FMT == 2
9818 && mips_cfun_might_clobber_call_saved_reg_p (regno + 1))
9819 return true;
9820 }
9821
9822 /* We need to save the incoming return address if __builtin_eh_return
9823 is being used to set a different return address. */
9824 if (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return)
9825 return true;
9826
9827 return false;
9828 }
9829
9830 /* Populate the current function's mips_frame_info structure.
9831
9832 MIPS stack frames look like:
9833
9834 +-------------------------------+
9835 | |
9836 | incoming stack arguments |
9837 | |
9838 +-------------------------------+
9839 | |
9840 | caller-allocated save area |
9841 A | for register arguments |
9842 | |
9843 +-------------------------------+ <-- incoming stack pointer
9844 | |
9845 | callee-allocated save area |
9846 B | for arguments that are |
9847 | split between registers and |
9848 | the stack |
9849 | |
9850 +-------------------------------+ <-- arg_pointer_rtx
9851 | |
9852 C | callee-allocated save area |
9853 | for register varargs |
9854 | |
9855 +-------------------------------+ <-- frame_pointer_rtx
9856 | | + cop0_sp_offset
9857 | COP0 reg save area | + UNITS_PER_WORD
9858 | |
9859 +-------------------------------+ <-- frame_pointer_rtx + acc_sp_offset
9860 | | + UNITS_PER_WORD
9861 | accumulator save area |
9862 | |
9863 +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
9864 | | + UNITS_PER_HWFPVALUE
9865 | FPR save area |
9866 | |
9867 +-------------------------------+ <-- stack_pointer_rtx + gp_sp_offset
9868 | | + UNITS_PER_WORD
9869 | GPR save area |
9870 | |
9871 +-------------------------------+ <-- frame_pointer_rtx with
9872 | | \ -fstack-protector
9873 | local variables | | var_size
9874 | | /
9875 +-------------------------------+
9876 | | \
9877 | $gp save area | | cprestore_size
9878 | | /
9879 P +-------------------------------+ <-- hard_frame_pointer_rtx for
9880 | | \ MIPS16 code
9881 | outgoing stack arguments | |
9882 | | |
9883 +-------------------------------+ | args_size
9884 | | |
9885 | caller-allocated save area | |
9886 | for register arguments | |
9887 | | /
9888 +-------------------------------+ <-- stack_pointer_rtx
9889 frame_pointer_rtx without
9890 -fstack-protector
9891 hard_frame_pointer_rtx for
9892 non-MIPS16 code.
9893
9894 At least two of A, B and C will be empty.
9895
9896 Dynamic stack allocations such as alloca insert data at point P.
9897 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
9898 hard_frame_pointer_rtx unchanged. */
9899
9900 static void
9901 mips_compute_frame_info (void)
9902 {
9903 struct mips_frame_info *frame;
9904 HOST_WIDE_INT offset, size;
9905 unsigned int regno, i;
9906
9907 /* Set this function's interrupt properties. */
9908 if (mips_interrupt_type_p (TREE_TYPE (current_function_decl)))
9909 {
9910 if (!ISA_MIPS32R2)
9911 error ("the %<interrupt%> attribute requires a MIPS32r2 processor");
9912 else if (TARGET_HARD_FLOAT)
9913 error ("the %<interrupt%> attribute requires %<-msoft-float%>");
9914 else if (TARGET_MIPS16)
9915 error ("interrupt handlers cannot be MIPS16 functions");
9916 else
9917 {
9918 cfun->machine->interrupt_handler_p = true;
9919 cfun->machine->use_shadow_register_set_p =
9920 mips_use_shadow_register_set_p (TREE_TYPE (current_function_decl));
9921 cfun->machine->keep_interrupts_masked_p =
9922 mips_keep_interrupts_masked_p (TREE_TYPE (current_function_decl));
9923 cfun->machine->use_debug_exception_return_p =
9924 mips_use_debug_exception_return_p (TREE_TYPE
9925 (current_function_decl));
9926 }
9927 }
9928
9929 frame = &cfun->machine->frame;
9930 memset (frame, 0, sizeof (*frame));
9931 size = get_frame_size ();
9932
9933 cfun->machine->global_pointer = mips_global_pointer ();
9934
9935 /* The first two blocks contain the outgoing argument area and the $gp save
9936 slot. This area isn't needed in leaf functions, but if the
9937 target-independent frame size is nonzero, we have already committed to
9938 allocating these in STARTING_FRAME_OFFSET for !FRAME_GROWS_DOWNWARD. */
9939 if ((size == 0 || FRAME_GROWS_DOWNWARD) && crtl->is_leaf)
9940 {
9941 /* The MIPS 3.0 linker does not like functions that dynamically
9942 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
9943 looks like we are trying to create a second frame pointer to the
9944 function, so allocate some stack space to make it happy. */
9945 if (cfun->calls_alloca)
9946 frame->args_size = REG_PARM_STACK_SPACE (cfun->decl);
9947 else
9948 frame->args_size = 0;
9949 frame->cprestore_size = 0;
9950 }
9951 else
9952 {
9953 frame->args_size = crtl->outgoing_args_size;
9954 frame->cprestore_size = MIPS_GP_SAVE_AREA_SIZE;
9955 }
9956 offset = frame->args_size + frame->cprestore_size;
9957
9958 /* Move above the local variables. */
9959 frame->var_size = MIPS_STACK_ALIGN (size);
9960 offset += frame->var_size;
9961
9962 /* Find out which GPRs we need to save. */
9963 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
9964 if (mips_save_reg_p (regno))
9965 {
9966 frame->num_gp++;
9967 frame->mask |= 1 << (regno - GP_REG_FIRST);
9968 }
9969
9970 /* If this function calls eh_return, we must also save and restore the
9971 EH data registers. */
9972 if (crtl->calls_eh_return)
9973 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
9974 {
9975 frame->num_gp++;
9976 frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST);
9977 }
9978
9979 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
9980 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
9981 save all later registers too. */
9982 if (GENERATE_MIPS16E_SAVE_RESTORE)
9983 {
9984 mips16e_mask_registers (&frame->mask, mips16e_s2_s8_regs,
9985 ARRAY_SIZE (mips16e_s2_s8_regs), &frame->num_gp);
9986 mips16e_mask_registers (&frame->mask, mips16e_a0_a3_regs,
9987 ARRAY_SIZE (mips16e_a0_a3_regs), &frame->num_gp);
9988 }
9989
9990 /* Move above the GPR save area. */
9991 if (frame->num_gp > 0)
9992 {
9993 offset += MIPS_STACK_ALIGN (frame->num_gp * UNITS_PER_WORD);
9994 frame->gp_sp_offset = offset - UNITS_PER_WORD;
9995 }
9996
9997 /* Find out which FPRs we need to save. This loop must iterate over
9998 the same space as its companion in mips_for_each_saved_gpr_and_fpr. */
9999 if (TARGET_HARD_FLOAT)
10000 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno += MAX_FPRS_PER_FMT)
10001 if (mips_save_reg_p (regno))
10002 {
10003 frame->num_fp += MAX_FPRS_PER_FMT;
10004 frame->fmask |= ~(~0 << MAX_FPRS_PER_FMT) << (regno - FP_REG_FIRST);
10005 }
10006
10007 /* Move above the FPR save area. */
10008 if (frame->num_fp > 0)
10009 {
10010 offset += MIPS_STACK_ALIGN (frame->num_fp * UNITS_PER_FPREG);
10011 frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
10012 }
10013
10014 /* Add in space for the interrupt context information. */
10015 if (cfun->machine->interrupt_handler_p)
10016 {
10017 /* Check HI/LO. */
10018 if (mips_save_reg_p (LO_REGNUM) || mips_save_reg_p (HI_REGNUM))
10019 {
10020 frame->num_acc++;
10021 frame->acc_mask |= (1 << 0);
10022 }
10023
10024 /* Check accumulators 1, 2, 3. */
10025 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
10026 if (mips_save_reg_p (i) || mips_save_reg_p (i + 1))
10027 {
10028 frame->num_acc++;
10029 frame->acc_mask |= 1 << (((i - DSP_ACC_REG_FIRST) / 2) + 1);
10030 }
10031
10032 /* All interrupt context functions need space to preserve STATUS. */
10033 frame->num_cop0_regs++;
10034
10035 /* If we don't keep interrupts masked, we need to save EPC. */
10036 if (!cfun->machine->keep_interrupts_masked_p)
10037 frame->num_cop0_regs++;
10038 }
10039
10040 /* Move above the accumulator save area. */
10041 if (frame->num_acc > 0)
10042 {
10043 /* Each accumulator needs 2 words. */
10044 offset += frame->num_acc * 2 * UNITS_PER_WORD;
10045 frame->acc_sp_offset = offset - UNITS_PER_WORD;
10046 }
10047
10048 /* Move above the COP0 register save area. */
10049 if (frame->num_cop0_regs > 0)
10050 {
10051 offset += frame->num_cop0_regs * UNITS_PER_WORD;
10052 frame->cop0_sp_offset = offset - UNITS_PER_WORD;
10053 }
10054
10055 /* Move above the callee-allocated varargs save area. */
10056 offset += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
10057 frame->arg_pointer_offset = offset;
10058
10059 /* Move above the callee-allocated area for pretend stack arguments. */
10060 offset += crtl->args.pretend_args_size;
10061 frame->total_size = offset;
10062
10063 /* Work out the offsets of the save areas from the top of the frame. */
10064 if (frame->gp_sp_offset > 0)
10065 frame->gp_save_offset = frame->gp_sp_offset - offset;
10066 if (frame->fp_sp_offset > 0)
10067 frame->fp_save_offset = frame->fp_sp_offset - offset;
10068 if (frame->acc_sp_offset > 0)
10069 frame->acc_save_offset = frame->acc_sp_offset - offset;
10070 if (frame->num_cop0_regs > 0)
10071 frame->cop0_save_offset = frame->cop0_sp_offset - offset;
10072
10073 /* MIPS16 code offsets the frame pointer by the size of the outgoing
10074 arguments. This tends to increase the chances of using unextended
10075 instructions for local variables and incoming arguments. */
10076 if (TARGET_MIPS16)
10077 frame->hard_frame_pointer_offset = frame->args_size;
10078 }
10079
10080 /* Return the style of GP load sequence that is being used for the
10081 current function. */
10082
10083 enum mips_loadgp_style
10084 mips_current_loadgp_style (void)
10085 {
10086 if (!TARGET_USE_GOT || cfun->machine->global_pointer == INVALID_REGNUM)
10087 return LOADGP_NONE;
10088
10089 if (TARGET_RTP_PIC)
10090 return LOADGP_RTP;
10091
10092 if (TARGET_ABSOLUTE_ABICALLS)
10093 return LOADGP_ABSOLUTE;
10094
10095 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
10096 }
10097
10098 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
10099
10100 static bool
10101 mips_frame_pointer_required (void)
10102 {
10103 /* If the function contains dynamic stack allocations, we need to
10104 use the frame pointer to access the static parts of the frame. */
10105 if (cfun->calls_alloca)
10106 return true;
10107
10108 /* In MIPS16 mode, we need a frame pointer for a large frame; otherwise,
10109 reload may be unable to compute the address of a local variable,
10110 since there is no way to add a large constant to the stack pointer
10111 without using a second temporary register. */
10112 if (TARGET_MIPS16)
10113 {
10114 mips_compute_frame_info ();
10115 if (!SMALL_OPERAND (cfun->machine->frame.total_size))
10116 return true;
10117 }
10118
10119 return false;
10120 }
10121
10122 /* Make sure that we're not trying to eliminate to the wrong hard frame
10123 pointer. */
10124
10125 static bool
10126 mips_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
10127 {
10128 return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
10129 }
10130
10131 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
10132 or argument pointer. TO is either the stack pointer or hard frame
10133 pointer. */
10134
10135 HOST_WIDE_INT
10136 mips_initial_elimination_offset (int from, int to)
10137 {
10138 HOST_WIDE_INT offset;
10139
10140 mips_compute_frame_info ();
10141
10142 /* Set OFFSET to the offset from the end-of-prologue stack pointer. */
10143 switch (from)
10144 {
10145 case FRAME_POINTER_REGNUM:
10146 if (FRAME_GROWS_DOWNWARD)
10147 offset = (cfun->machine->frame.args_size
10148 + cfun->machine->frame.cprestore_size
10149 + cfun->machine->frame.var_size);
10150 else
10151 offset = 0;
10152 break;
10153
10154 case ARG_POINTER_REGNUM:
10155 offset = cfun->machine->frame.arg_pointer_offset;
10156 break;
10157
10158 default:
10159 gcc_unreachable ();
10160 }
10161
10162 if (to == HARD_FRAME_POINTER_REGNUM)
10163 offset -= cfun->machine->frame.hard_frame_pointer_offset;
10164
10165 return offset;
10166 }
10167 \f
10168 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. */
10169
10170 static void
10171 mips_extra_live_on_entry (bitmap regs)
10172 {
10173 if (TARGET_USE_GOT)
10174 {
10175 /* PIC_FUNCTION_ADDR_REGNUM is live if we need it to set up
10176 the global pointer. */
10177 if (!TARGET_ABSOLUTE_ABICALLS)
10178 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
10179
10180 /* The prologue may set MIPS16_PIC_TEMP_REGNUM to the value of
10181 the global pointer. */
10182 if (TARGET_MIPS16)
10183 bitmap_set_bit (regs, MIPS16_PIC_TEMP_REGNUM);
10184
10185 /* See the comment above load_call<mode> for details. */
10186 bitmap_set_bit (regs, GOT_VERSION_REGNUM);
10187 }
10188 }
10189
10190 /* Implement RETURN_ADDR_RTX. We do not support moving back to a
10191 previous frame. */
10192
10193 rtx
10194 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
10195 {
10196 if (count != 0)
10197 return const0_rtx;
10198
10199 return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
10200 }
10201
10202 /* Emit code to change the current function's return address to
10203 ADDRESS. SCRATCH is available as a scratch register, if needed.
10204 ADDRESS and SCRATCH are both word-mode GPRs. */
10205
10206 void
10207 mips_set_return_address (rtx address, rtx scratch)
10208 {
10209 rtx slot_address;
10210
10211 gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
10212 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
10213 cfun->machine->frame.gp_sp_offset);
10214 mips_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
10215 }
10216
10217 /* Return true if the current function has a cprestore slot. */
10218
10219 bool
10220 mips_cfun_has_cprestore_slot_p (void)
10221 {
10222 return (cfun->machine->global_pointer != INVALID_REGNUM
10223 && cfun->machine->frame.cprestore_size > 0);
10224 }
10225
10226 /* Fill *BASE and *OFFSET such that *BASE + *OFFSET refers to the
10227 cprestore slot. LOAD_P is true if the caller wants to load from
10228 the cprestore slot; it is false if the caller wants to store to
10229 the slot. */
10230
10231 static void
10232 mips_get_cprestore_base_and_offset (rtx *base, HOST_WIDE_INT *offset,
10233 bool load_p)
10234 {
10235 const struct mips_frame_info *frame;
10236
10237 frame = &cfun->machine->frame;
10238 /* .cprestore always uses the stack pointer instead of the frame pointer.
10239 We have a free choice for direct stores for non-MIPS16 functions,
10240 and for MIPS16 functions whose cprestore slot is in range of the
10241 stack pointer. Using the stack pointer would sometimes give more
10242 (early) scheduling freedom, but using the frame pointer would
10243 sometimes give more (late) scheduling freedom. It's hard to
10244 predict which applies to a given function, so let's keep things
10245 simple.
10246
10247 Loads must always use the frame pointer in functions that call
10248 alloca, and there's little benefit to using the stack pointer
10249 otherwise. */
10250 if (frame_pointer_needed && !(TARGET_CPRESTORE_DIRECTIVE && !load_p))
10251 {
10252 *base = hard_frame_pointer_rtx;
10253 *offset = frame->args_size - frame->hard_frame_pointer_offset;
10254 }
10255 else
10256 {
10257 *base = stack_pointer_rtx;
10258 *offset = frame->args_size;
10259 }
10260 }
10261
10262 /* Return true if X is the load or store address of the cprestore slot;
10263 LOAD_P says which. */
10264
10265 bool
10266 mips_cprestore_address_p (rtx x, bool load_p)
10267 {
10268 rtx given_base, required_base;
10269 HOST_WIDE_INT given_offset, required_offset;
10270
10271 mips_split_plus (x, &given_base, &given_offset);
10272 mips_get_cprestore_base_and_offset (&required_base, &required_offset, load_p);
10273 return given_base == required_base && given_offset == required_offset;
10274 }
10275
10276 /* Return a MEM rtx for the cprestore slot. LOAD_P is true if we are
10277 going to load from it, false if we are going to store to it.
10278 Use TEMP as a temporary register if need be. */
10279
10280 static rtx
10281 mips_cprestore_slot (rtx temp, bool load_p)
10282 {
10283 rtx base;
10284 HOST_WIDE_INT offset;
10285
10286 mips_get_cprestore_base_and_offset (&base, &offset, load_p);
10287 return gen_frame_mem (Pmode, mips_add_offset (temp, base, offset));
10288 }
10289
10290 /* Emit instructions to save global pointer value GP into cprestore
10291 slot MEM. OFFSET is the offset that MEM applies to the base register.
10292
10293 MEM may not be a legitimate address. If it isn't, TEMP is a
10294 temporary register that can be used, otherwise it is a SCRATCH. */
10295
10296 void
10297 mips_save_gp_to_cprestore_slot (rtx mem, rtx offset, rtx gp, rtx temp)
10298 {
10299 if (TARGET_CPRESTORE_DIRECTIVE)
10300 {
10301 gcc_assert (gp == pic_offset_table_rtx);
10302 emit_insn (PMODE_INSN (gen_cprestore, (mem, offset)));
10303 }
10304 else
10305 mips_emit_move (mips_cprestore_slot (temp, false), gp);
10306 }
10307
10308 /* Restore $gp from its save slot, using TEMP as a temporary base register
10309 if need be. This function is for o32 and o64 abicalls only.
10310
10311 See mips_must_initialize_gp_p for details about how we manage the
10312 global pointer. */
10313
10314 void
10315 mips_restore_gp_from_cprestore_slot (rtx temp)
10316 {
10317 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI && epilogue_completed);
10318
10319 if (!cfun->machine->must_restore_gp_when_clobbered_p)
10320 {
10321 emit_note (NOTE_INSN_DELETED);
10322 return;
10323 }
10324
10325 if (TARGET_MIPS16)
10326 {
10327 mips_emit_move (temp, mips_cprestore_slot (temp, true));
10328 mips_emit_move (pic_offset_table_rtx, temp);
10329 }
10330 else
10331 mips_emit_move (pic_offset_table_rtx, mips_cprestore_slot (temp, true));
10332 if (!TARGET_EXPLICIT_RELOCS)
10333 emit_insn (gen_blockage ());
10334 }
10335 \f
10336 /* A function to save or store a register. The first argument is the
10337 register and the second is the stack slot. */
10338 typedef void (*mips_save_restore_fn) (rtx, rtx);
10339
10340 /* Use FN to save or restore register REGNO. MODE is the register's
10341 mode and OFFSET is the offset of its save slot from the current
10342 stack pointer. */
10343
10344 static void
10345 mips_save_restore_reg (enum machine_mode mode, int regno,
10346 HOST_WIDE_INT offset, mips_save_restore_fn fn)
10347 {
10348 rtx mem;
10349
10350 mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx,
10351 offset));
10352 fn (gen_rtx_REG (mode, regno), mem);
10353 }
10354
10355 /* Call FN for each accumlator that is saved by the current function.
10356 SP_OFFSET is the offset of the current stack pointer from the start
10357 of the frame. */
10358
10359 static void
10360 mips_for_each_saved_acc (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
10361 {
10362 HOST_WIDE_INT offset;
10363 int regno;
10364
10365 offset = cfun->machine->frame.acc_sp_offset - sp_offset;
10366 if (BITSET_P (cfun->machine->frame.acc_mask, 0))
10367 {
10368 mips_save_restore_reg (word_mode, LO_REGNUM, offset, fn);
10369 offset -= UNITS_PER_WORD;
10370 mips_save_restore_reg (word_mode, HI_REGNUM, offset, fn);
10371 offset -= UNITS_PER_WORD;
10372 }
10373
10374 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
10375 if (BITSET_P (cfun->machine->frame.acc_mask,
10376 ((regno - DSP_ACC_REG_FIRST) / 2) + 1))
10377 {
10378 mips_save_restore_reg (word_mode, regno, offset, fn);
10379 offset -= UNITS_PER_WORD;
10380 }
10381 }
10382
10383 /* Save register REG to MEM. Make the instruction frame-related. */
10384
10385 static void
10386 mips_save_reg (rtx reg, rtx mem)
10387 {
10388 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
10389 {
10390 rtx x1, x2;
10391
10392 mips_emit_move_or_split (mem, reg, SPLIT_IF_NECESSARY);
10393
10394 x1 = mips_frame_set (mips_subword (mem, false),
10395 mips_subword (reg, false));
10396 x2 = mips_frame_set (mips_subword (mem, true),
10397 mips_subword (reg, true));
10398 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
10399 }
10400 else
10401 mips_emit_save_slot_move (mem, reg, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
10402 }
10403
10404 /* Capture the register combinations that are allowed in a SWM or LWM
10405 instruction. The entries are ordered by number of registers set in
10406 the mask. We also ignore the single register encodings because a
10407 normal SW/LW is preferred. */
10408
10409 static const unsigned int umips_swm_mask[17] = {
10410 0xc0ff0000, 0x80ff0000, 0x40ff0000, 0x807f0000,
10411 0x00ff0000, 0x803f0000, 0x007f0000, 0x801f0000,
10412 0x003f0000, 0x800f0000, 0x001f0000, 0x80070000,
10413 0x000f0000, 0x80030000, 0x00070000, 0x80010000,
10414 0x00030000
10415 };
10416
10417 static const unsigned int umips_swm_encoding[17] = {
10418 25, 24, 9, 23, 8, 22, 7, 21, 6, 20, 5, 19, 4, 18, 3, 17, 2
10419 };
10420
10421 /* Try to use a microMIPS LWM or SWM instruction to save or restore
10422 as many GPRs in *MASK as possible. *OFFSET is the offset from the
10423 stack pointer of the topmost save slot.
10424
10425 Remove from *MASK all registers that were handled using LWM and SWM.
10426 Update *OFFSET so that it points to the first unused save slot. */
10427
10428 static bool
10429 umips_build_save_restore (mips_save_restore_fn fn,
10430 unsigned *mask, HOST_WIDE_INT *offset)
10431 {
10432 int nregs;
10433 unsigned int i, j;
10434 rtx pattern, set, reg, mem;
10435 HOST_WIDE_INT this_offset;
10436 rtx this_base;
10437
10438 /* Try matching $16 to $31 (s0 to ra). */
10439 for (i = 0; i < ARRAY_SIZE (umips_swm_mask); i++)
10440 if ((*mask & 0xffff0000) == umips_swm_mask[i])
10441 break;
10442
10443 if (i == ARRAY_SIZE (umips_swm_mask))
10444 return false;
10445
10446 /* Get the offset of the lowest save slot. */
10447 nregs = (umips_swm_encoding[i] & 0xf) + (umips_swm_encoding[i] >> 4);
10448 this_offset = *offset - UNITS_PER_WORD * (nregs - 1);
10449
10450 /* LWM/SWM can only support offsets from -2048 to 2047. */
10451 if (!UMIPS_12BIT_OFFSET_P (this_offset))
10452 return false;
10453
10454 /* Create the final PARALLEL. */
10455 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nregs));
10456 this_base = stack_pointer_rtx;
10457
10458 /* For registers $16-$23 and $30. */
10459 for (j = 0; j < (umips_swm_encoding[i] & 0xf); j++)
10460 {
10461 HOST_WIDE_INT offset = this_offset + j * UNITS_PER_WORD;
10462 mem = gen_frame_mem (SImode, plus_constant (Pmode, this_base, offset));
10463 unsigned int regno = (j != 8) ? 16 + j : 30;
10464 *mask &= ~(1 << regno);
10465 reg = gen_rtx_REG (SImode, regno);
10466 if (fn == mips_save_reg)
10467 set = mips_frame_set (mem, reg);
10468 else
10469 {
10470 set = gen_rtx_SET (VOIDmode, reg, mem);
10471 mips_add_cfa_restore (reg);
10472 }
10473 XVECEXP (pattern, 0, j) = set;
10474 }
10475
10476 /* For register $31. */
10477 if (umips_swm_encoding[i] >> 4)
10478 {
10479 HOST_WIDE_INT offset = this_offset + j * UNITS_PER_WORD;
10480 *mask &= ~(1 << 31);
10481 mem = gen_frame_mem (SImode, plus_constant (Pmode, this_base, offset));
10482 reg = gen_rtx_REG (SImode, 31);
10483 if (fn == mips_save_reg)
10484 set = mips_frame_set (mem, reg);
10485 else
10486 {
10487 set = gen_rtx_SET (VOIDmode, reg, mem);
10488 mips_add_cfa_restore (reg);
10489 }
10490 XVECEXP (pattern, 0, j) = set;
10491 }
10492
10493 pattern = emit_insn (pattern);
10494 if (fn == mips_save_reg)
10495 RTX_FRAME_RELATED_P (pattern) = 1;
10496
10497 /* Adjust the last offset. */
10498 *offset -= UNITS_PER_WORD * nregs;
10499
10500 return true;
10501 }
10502
10503 /* Call FN for each register that is saved by the current function.
10504 SP_OFFSET is the offset of the current stack pointer from the start
10505 of the frame. */
10506
10507 static void
10508 mips_for_each_saved_gpr_and_fpr (HOST_WIDE_INT sp_offset,
10509 mips_save_restore_fn fn)
10510 {
10511 enum machine_mode fpr_mode;
10512 int regno;
10513 const struct mips_frame_info *frame = &cfun->machine->frame;
10514 HOST_WIDE_INT offset;
10515 unsigned int mask;
10516
10517 /* Save registers starting from high to low. The debuggers prefer at least
10518 the return register be stored at func+4, and also it allows us not to
10519 need a nop in the epilogue if at least one register is reloaded in
10520 addition to return address. */
10521 offset = frame->gp_sp_offset - sp_offset;
10522 mask = frame->mask;
10523
10524 if (TARGET_MICROMIPS)
10525 umips_build_save_restore (fn, &mask, &offset);
10526
10527 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
10528 if (BITSET_P (mask, regno - GP_REG_FIRST))
10529 {
10530 /* Record the ra offset for use by mips_function_profiler. */
10531 if (regno == RETURN_ADDR_REGNUM)
10532 cfun->machine->frame.ra_fp_offset = offset + sp_offset;
10533 mips_save_restore_reg (word_mode, regno, offset, fn);
10534 offset -= UNITS_PER_WORD;
10535 }
10536
10537 /* This loop must iterate over the same space as its companion in
10538 mips_compute_frame_info. */
10539 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
10540 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
10541 for (regno = FP_REG_LAST - MAX_FPRS_PER_FMT + 1;
10542 regno >= FP_REG_FIRST;
10543 regno -= MAX_FPRS_PER_FMT)
10544 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
10545 {
10546 mips_save_restore_reg (fpr_mode, regno, offset, fn);
10547 offset -= GET_MODE_SIZE (fpr_mode);
10548 }
10549 }
10550
10551 /* Return true if a move between register REGNO and its save slot (MEM)
10552 can be done in a single move. LOAD_P is true if we are loading
10553 from the slot, false if we are storing to it. */
10554
10555 static bool
10556 mips_direct_save_slot_move_p (unsigned int regno, rtx mem, bool load_p)
10557 {
10558 /* There is a specific MIPS16 instruction for saving $31 to the stack. */
10559 if (TARGET_MIPS16 && !load_p && regno == RETURN_ADDR_REGNUM)
10560 return false;
10561
10562 return mips_secondary_reload_class (REGNO_REG_CLASS (regno),
10563 GET_MODE (mem), mem, load_p) == NO_REGS;
10564 }
10565
10566 /* Emit a move from SRC to DEST, given that one of them is a register
10567 save slot and that the other is a register. TEMP is a temporary
10568 GPR of the same mode that is available if need be. */
10569
10570 void
10571 mips_emit_save_slot_move (rtx dest, rtx src, rtx temp)
10572 {
10573 unsigned int regno;
10574 rtx mem;
10575
10576 if (REG_P (src))
10577 {
10578 regno = REGNO (src);
10579 mem = dest;
10580 }
10581 else
10582 {
10583 regno = REGNO (dest);
10584 mem = src;
10585 }
10586
10587 if (regno == cfun->machine->global_pointer && !mips_must_initialize_gp_p ())
10588 {
10589 /* We don't yet know whether we'll need this instruction or not.
10590 Postpone the decision by emitting a ghost move. This move
10591 is specifically not frame-related; only the split version is. */
10592 if (TARGET_64BIT)
10593 emit_insn (gen_move_gpdi (dest, src));
10594 else
10595 emit_insn (gen_move_gpsi (dest, src));
10596 return;
10597 }
10598
10599 if (regno == HI_REGNUM)
10600 {
10601 if (REG_P (dest))
10602 {
10603 mips_emit_move (temp, src);
10604 if (TARGET_64BIT)
10605 emit_insn (gen_mthisi_di (gen_rtx_REG (TImode, MD_REG_FIRST),
10606 temp, gen_rtx_REG (DImode, LO_REGNUM)));
10607 else
10608 emit_insn (gen_mthisi_di (gen_rtx_REG (DImode, MD_REG_FIRST),
10609 temp, gen_rtx_REG (SImode, LO_REGNUM)));
10610 }
10611 else
10612 {
10613 if (TARGET_64BIT)
10614 emit_insn (gen_mfhidi_ti (temp,
10615 gen_rtx_REG (TImode, MD_REG_FIRST)));
10616 else
10617 emit_insn (gen_mfhisi_di (temp,
10618 gen_rtx_REG (DImode, MD_REG_FIRST)));
10619 mips_emit_move (dest, temp);
10620 }
10621 }
10622 else if (mips_direct_save_slot_move_p (regno, mem, mem == src))
10623 mips_emit_move (dest, src);
10624 else
10625 {
10626 gcc_assert (!reg_overlap_mentioned_p (dest, temp));
10627 mips_emit_move (temp, src);
10628 mips_emit_move (dest, temp);
10629 }
10630 if (MEM_P (dest))
10631 mips_set_frame_expr (mips_frame_set (dest, src));
10632 }
10633 \f
10634 /* If we're generating n32 or n64 abicalls, and the current function
10635 does not use $28 as its global pointer, emit a cplocal directive.
10636 Use pic_offset_table_rtx as the argument to the directive. */
10637
10638 static void
10639 mips_output_cplocal (void)
10640 {
10641 if (!TARGET_EXPLICIT_RELOCS
10642 && mips_must_initialize_gp_p ()
10643 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
10644 output_asm_insn (".cplocal %+", 0);
10645 }
10646
10647 /* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE. */
10648
10649 static void
10650 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
10651 {
10652 const char *fnname;
10653
10654 /* In MIPS16 mode, we may need to generate a non-MIPS16 stub to handle
10655 floating-point arguments. */
10656 if (TARGET_MIPS16
10657 && TARGET_HARD_FLOAT_ABI
10658 && crtl->args.info.fp_code != 0)
10659 mips16_build_function_stub ();
10660
10661 /* Get the function name the same way that toplev.c does before calling
10662 assemble_start_function. This is needed so that the name used here
10663 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
10664 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
10665 mips_start_function_definition (fnname, TARGET_MIPS16);
10666
10667 /* Output MIPS-specific frame information. */
10668 if (!flag_inhibit_size_directive)
10669 {
10670 const struct mips_frame_info *frame;
10671
10672 frame = &cfun->machine->frame;
10673
10674 /* .frame FRAMEREG, FRAMESIZE, RETREG. */
10675 fprintf (file,
10676 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
10677 "# vars= " HOST_WIDE_INT_PRINT_DEC
10678 ", regs= %d/%d"
10679 ", args= " HOST_WIDE_INT_PRINT_DEC
10680 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
10681 reg_names[frame_pointer_needed
10682 ? HARD_FRAME_POINTER_REGNUM
10683 : STACK_POINTER_REGNUM],
10684 (frame_pointer_needed
10685 ? frame->total_size - frame->hard_frame_pointer_offset
10686 : frame->total_size),
10687 reg_names[RETURN_ADDR_REGNUM],
10688 frame->var_size,
10689 frame->num_gp, frame->num_fp,
10690 frame->args_size,
10691 frame->cprestore_size);
10692
10693 /* .mask MASK, OFFSET. */
10694 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
10695 frame->mask, frame->gp_save_offset);
10696
10697 /* .fmask MASK, OFFSET. */
10698 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
10699 frame->fmask, frame->fp_save_offset);
10700 }
10701
10702 /* Handle the initialization of $gp for SVR4 PIC, if applicable.
10703 Also emit the ".set noreorder; .set nomacro" sequence for functions
10704 that need it. */
10705 if (mips_must_initialize_gp_p ()
10706 && mips_current_loadgp_style () == LOADGP_OLDABI)
10707 {
10708 if (TARGET_MIPS16)
10709 {
10710 /* This is a fixed-form sequence. The position of the
10711 first two instructions is important because of the
10712 way _gp_disp is defined. */
10713 output_asm_insn ("li\t$2,%%hi(_gp_disp)", 0);
10714 output_asm_insn ("addiu\t$3,$pc,%%lo(_gp_disp)", 0);
10715 output_asm_insn ("sll\t$2,16", 0);
10716 output_asm_insn ("addu\t$2,$3", 0);
10717 }
10718 else
10719 {
10720 /* .cpload must be in a .set noreorder but not a
10721 .set nomacro block. */
10722 mips_push_asm_switch (&mips_noreorder);
10723 output_asm_insn (".cpload\t%^", 0);
10724 if (!cfun->machine->all_noreorder_p)
10725 mips_pop_asm_switch (&mips_noreorder);
10726 else
10727 mips_push_asm_switch (&mips_nomacro);
10728 }
10729 }
10730 else if (cfun->machine->all_noreorder_p)
10731 {
10732 mips_push_asm_switch (&mips_noreorder);
10733 mips_push_asm_switch (&mips_nomacro);
10734 }
10735
10736 /* Tell the assembler which register we're using as the global
10737 pointer. This is needed for thunks, since they can use either
10738 explicit relocs or assembler macros. */
10739 mips_output_cplocal ();
10740 }
10741
10742 /* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE. */
10743
10744 static void
10745 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
10746 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
10747 {
10748 const char *fnname;
10749
10750 /* Reinstate the normal $gp. */
10751 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
10752 mips_output_cplocal ();
10753
10754 if (cfun->machine->all_noreorder_p)
10755 {
10756 mips_pop_asm_switch (&mips_nomacro);
10757 mips_pop_asm_switch (&mips_noreorder);
10758 }
10759
10760 /* Get the function name the same way that toplev.c does before calling
10761 assemble_start_function. This is needed so that the name used here
10762 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
10763 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
10764 mips_end_function_definition (fnname);
10765 }
10766 \f
10767 /* Emit an optimisation barrier for accesses to the current frame. */
10768
10769 static void
10770 mips_frame_barrier (void)
10771 {
10772 emit_clobber (gen_frame_mem (BLKmode, stack_pointer_rtx));
10773 }
10774
10775
10776 /* The __gnu_local_gp symbol. */
10777
10778 static GTY(()) rtx mips_gnu_local_gp;
10779
10780 /* If we're generating n32 or n64 abicalls, emit instructions
10781 to set up the global pointer. */
10782
10783 static void
10784 mips_emit_loadgp (void)
10785 {
10786 rtx addr, offset, incoming_address, base, index, pic_reg;
10787
10788 pic_reg = TARGET_MIPS16 ? MIPS16_PIC_TEMP : pic_offset_table_rtx;
10789 switch (mips_current_loadgp_style ())
10790 {
10791 case LOADGP_ABSOLUTE:
10792 if (mips_gnu_local_gp == NULL)
10793 {
10794 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
10795 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
10796 }
10797 emit_insn (PMODE_INSN (gen_loadgp_absolute,
10798 (pic_reg, mips_gnu_local_gp)));
10799 break;
10800
10801 case LOADGP_OLDABI:
10802 /* Added by mips_output_function_prologue. */
10803 break;
10804
10805 case LOADGP_NEWABI:
10806 addr = XEXP (DECL_RTL (current_function_decl), 0);
10807 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
10808 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
10809 emit_insn (PMODE_INSN (gen_loadgp_newabi,
10810 (pic_reg, offset, incoming_address)));
10811 break;
10812
10813 case LOADGP_RTP:
10814 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
10815 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
10816 emit_insn (PMODE_INSN (gen_loadgp_rtp, (pic_reg, base, index)));
10817 break;
10818
10819 default:
10820 return;
10821 }
10822
10823 if (TARGET_MIPS16)
10824 emit_insn (PMODE_INSN (gen_copygp_mips16,
10825 (pic_offset_table_rtx, pic_reg)));
10826
10827 /* Emit a blockage if there are implicit uses of the GP register.
10828 This includes profiled functions, because FUNCTION_PROFILE uses
10829 a jal macro. */
10830 if (!TARGET_EXPLICIT_RELOCS || crtl->profile)
10831 emit_insn (gen_loadgp_blockage ());
10832 }
10833
10834 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
10835
10836 #if PROBE_INTERVAL > 32768
10837 #error Cannot use indexed addressing mode for stack probing
10838 #endif
10839
10840 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
10841 inclusive. These are offsets from the current stack pointer. */
10842
10843 static void
10844 mips_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
10845 {
10846 if (TARGET_MIPS16)
10847 sorry ("-fstack-check=specific not implemented for MIPS16");
10848
10849 /* See if we have a constant small number of probes to generate. If so,
10850 that's the easy case. */
10851 if (first + size <= 32768)
10852 {
10853 HOST_WIDE_INT i;
10854
10855 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
10856 it exceeds SIZE. If only one probe is needed, this will not
10857 generate any code. Then probe at FIRST + SIZE. */
10858 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
10859 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
10860 -(first + i)));
10861
10862 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
10863 -(first + size)));
10864 }
10865
10866 /* Otherwise, do the same as above, but in a loop. Note that we must be
10867 extra careful with variables wrapping around because we might be at
10868 the very top (or the very bottom) of the address space and we have
10869 to be able to handle this case properly; in particular, we use an
10870 equality test for the loop condition. */
10871 else
10872 {
10873 HOST_WIDE_INT rounded_size;
10874 rtx r3 = MIPS_PROLOGUE_TEMP (Pmode);
10875 rtx r12 = MIPS_PROLOGUE_TEMP2 (Pmode);
10876
10877 /* Sanity check for the addressing mode we're going to use. */
10878 gcc_assert (first <= 32768);
10879
10880
10881 /* Step 1: round SIZE to the previous multiple of the interval. */
10882
10883 rounded_size = size & -PROBE_INTERVAL;
10884
10885
10886 /* Step 2: compute initial and final value of the loop counter. */
10887
10888 /* TEST_ADDR = SP + FIRST. */
10889 emit_insn (gen_rtx_SET (VOIDmode, r3,
10890 plus_constant (Pmode, stack_pointer_rtx,
10891 -first)));
10892
10893 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
10894 if (rounded_size > 32768)
10895 {
10896 emit_move_insn (r12, GEN_INT (rounded_size));
10897 emit_insn (gen_rtx_SET (VOIDmode, r12,
10898 gen_rtx_MINUS (Pmode, r3, r12)));
10899 }
10900 else
10901 emit_insn (gen_rtx_SET (VOIDmode, r12,
10902 plus_constant (Pmode, r3, -rounded_size)));
10903
10904
10905 /* Step 3: the loop
10906
10907 while (TEST_ADDR != LAST_ADDR)
10908 {
10909 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
10910 probe at TEST_ADDR
10911 }
10912
10913 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
10914 until it is equal to ROUNDED_SIZE. */
10915
10916 emit_insn (PMODE_INSN (gen_probe_stack_range, (r3, r3, r12)));
10917
10918
10919 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
10920 that SIZE is equal to ROUNDED_SIZE. */
10921
10922 if (size != rounded_size)
10923 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
10924 }
10925
10926 /* Make sure nothing is scheduled before we are done. */
10927 emit_insn (gen_blockage ());
10928 }
10929
10930 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
10931 absolute addresses. */
10932
10933 const char *
10934 mips_output_probe_stack_range (rtx reg1, rtx reg2)
10935 {
10936 static int labelno = 0;
10937 char loop_lab[32], end_lab[32], tmp[64];
10938 rtx xops[2];
10939
10940 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
10941 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
10942
10943 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
10944
10945 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
10946 xops[0] = reg1;
10947 xops[1] = reg2;
10948 strcpy (tmp, "%(%<beq\t%0,%1,");
10949 output_asm_insn (strcat (tmp, &end_lab[1]), xops);
10950
10951 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
10952 xops[1] = GEN_INT (-PROBE_INTERVAL);
10953 if (TARGET_64BIT && TARGET_LONG64)
10954 output_asm_insn ("daddiu\t%0,%0,%1", xops);
10955 else
10956 output_asm_insn ("addiu\t%0,%0,%1", xops);
10957
10958 /* Probe at TEST_ADDR and branch. */
10959 fprintf (asm_out_file, "\tb\t");
10960 assemble_name_raw (asm_out_file, loop_lab);
10961 fputc ('\n', asm_out_file);
10962 if (TARGET_64BIT)
10963 output_asm_insn ("sd\t$0,0(%0)%)", xops);
10964 else
10965 output_asm_insn ("sw\t$0,0(%0)%)", xops);
10966
10967 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
10968
10969 return "";
10970 }
10971
10972 /* A for_each_rtx callback. Stop the search if *X is a kernel register. */
10973
10974 static int
10975 mips_kernel_reg_p (rtx *x, void *data ATTRIBUTE_UNUSED)
10976 {
10977 return REG_P (*x) && KERNEL_REG_P (REGNO (*x));
10978 }
10979
10980 /* Expand the "prologue" pattern. */
10981
10982 void
10983 mips_expand_prologue (void)
10984 {
10985 const struct mips_frame_info *frame;
10986 HOST_WIDE_INT size;
10987 unsigned int nargs;
10988 rtx insn;
10989
10990 if (cfun->machine->global_pointer != INVALID_REGNUM)
10991 {
10992 /* Check whether an insn uses pic_offset_table_rtx, either explicitly
10993 or implicitly. If so, we can commit to using a global pointer
10994 straight away, otherwise we need to defer the decision. */
10995 if (mips_cfun_has_inflexible_gp_ref_p ()
10996 || mips_cfun_has_flexible_gp_ref_p ())
10997 {
10998 cfun->machine->must_initialize_gp_p = true;
10999 cfun->machine->must_restore_gp_when_clobbered_p = true;
11000 }
11001
11002 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
11003 }
11004
11005 frame = &cfun->machine->frame;
11006 size = frame->total_size;
11007
11008 if (flag_stack_usage_info)
11009 current_function_static_stack_size = size;
11010
11011 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
11012 {
11013 if (crtl->is_leaf && !cfun->calls_alloca)
11014 {
11015 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
11016 mips_emit_probe_stack_range (STACK_CHECK_PROTECT,
11017 size - STACK_CHECK_PROTECT);
11018 }
11019 else if (size > 0)
11020 mips_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
11021 }
11022
11023 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
11024 bytes beforehand; this is enough to cover the register save area
11025 without going out of range. */
11026 if (((frame->mask | frame->fmask | frame->acc_mask) != 0)
11027 || frame->num_cop0_regs > 0)
11028 {
11029 HOST_WIDE_INT step1;
11030
11031 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
11032 if (GENERATE_MIPS16E_SAVE_RESTORE)
11033 {
11034 HOST_WIDE_INT offset;
11035 unsigned int mask, regno;
11036
11037 /* Try to merge argument stores into the save instruction. */
11038 nargs = mips16e_collect_argument_saves ();
11039
11040 /* Build the save instruction. */
11041 mask = frame->mask;
11042 insn = mips16e_build_save_restore (false, &mask, &offset,
11043 nargs, step1);
11044 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
11045 mips_frame_barrier ();
11046 size -= step1;
11047
11048 /* Check if we need to save other registers. */
11049 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
11050 if (BITSET_P (mask, regno - GP_REG_FIRST))
11051 {
11052 offset -= UNITS_PER_WORD;
11053 mips_save_restore_reg (word_mode, regno,
11054 offset, mips_save_reg);
11055 }
11056 }
11057 else
11058 {
11059 if (cfun->machine->interrupt_handler_p)
11060 {
11061 HOST_WIDE_INT offset;
11062 rtx mem;
11063
11064 /* If this interrupt is using a shadow register set, we need to
11065 get the stack pointer from the previous register set. */
11066 if (cfun->machine->use_shadow_register_set_p)
11067 emit_insn (gen_mips_rdpgpr (stack_pointer_rtx,
11068 stack_pointer_rtx));
11069
11070 if (!cfun->machine->keep_interrupts_masked_p)
11071 {
11072 /* Move from COP0 Cause to K0. */
11073 emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K0_REG_NUM),
11074 gen_rtx_REG (SImode,
11075 COP0_CAUSE_REG_NUM)));
11076 /* Move from COP0 EPC to K1. */
11077 emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K1_REG_NUM),
11078 gen_rtx_REG (SImode,
11079 COP0_EPC_REG_NUM)));
11080 }
11081
11082 /* Allocate the first part of the frame. */
11083 insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
11084 GEN_INT (-step1));
11085 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
11086 mips_frame_barrier ();
11087 size -= step1;
11088
11089 /* Start at the uppermost location for saving. */
11090 offset = frame->cop0_sp_offset - size;
11091 if (!cfun->machine->keep_interrupts_masked_p)
11092 {
11093 /* Push EPC into its stack slot. */
11094 mem = gen_frame_mem (word_mode,
11095 plus_constant (Pmode, stack_pointer_rtx,
11096 offset));
11097 mips_emit_move (mem, gen_rtx_REG (word_mode, K1_REG_NUM));
11098 offset -= UNITS_PER_WORD;
11099 }
11100
11101 /* Move from COP0 Status to K1. */
11102 emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K1_REG_NUM),
11103 gen_rtx_REG (SImode,
11104 COP0_STATUS_REG_NUM)));
11105
11106 /* Right justify the RIPL in k0. */
11107 if (!cfun->machine->keep_interrupts_masked_p)
11108 emit_insn (gen_lshrsi3 (gen_rtx_REG (SImode, K0_REG_NUM),
11109 gen_rtx_REG (SImode, K0_REG_NUM),
11110 GEN_INT (CAUSE_IPL)));
11111
11112 /* Push Status into its stack slot. */
11113 mem = gen_frame_mem (word_mode,
11114 plus_constant (Pmode, stack_pointer_rtx,
11115 offset));
11116 mips_emit_move (mem, gen_rtx_REG (word_mode, K1_REG_NUM));
11117 offset -= UNITS_PER_WORD;
11118
11119 /* Insert the RIPL into our copy of SR (k1) as the new IPL. */
11120 if (!cfun->machine->keep_interrupts_masked_p)
11121 emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
11122 GEN_INT (6),
11123 GEN_INT (SR_IPL),
11124 gen_rtx_REG (SImode, K0_REG_NUM)));
11125
11126 if (!cfun->machine->keep_interrupts_masked_p)
11127 /* Enable interrupts by clearing the KSU ERL and EXL bits.
11128 IE is already the correct value, so we don't have to do
11129 anything explicit. */
11130 emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
11131 GEN_INT (4),
11132 GEN_INT (SR_EXL),
11133 gen_rtx_REG (SImode, GP_REG_FIRST)));
11134 else
11135 /* Disable interrupts by clearing the KSU, ERL, EXL,
11136 and IE bits. */
11137 emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
11138 GEN_INT (5),
11139 GEN_INT (SR_IE),
11140 gen_rtx_REG (SImode, GP_REG_FIRST)));
11141 }
11142 else
11143 {
11144 insn = gen_add3_insn (stack_pointer_rtx,
11145 stack_pointer_rtx,
11146 GEN_INT (-step1));
11147 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
11148 mips_frame_barrier ();
11149 size -= step1;
11150 }
11151 mips_for_each_saved_acc (size, mips_save_reg);
11152 mips_for_each_saved_gpr_and_fpr (size, mips_save_reg);
11153 }
11154 }
11155
11156 /* Allocate the rest of the frame. */
11157 if (size > 0)
11158 {
11159 if (SMALL_OPERAND (-size))
11160 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
11161 stack_pointer_rtx,
11162 GEN_INT (-size)))) = 1;
11163 else
11164 {
11165 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
11166 if (TARGET_MIPS16)
11167 {
11168 /* There are no instructions to add or subtract registers
11169 from the stack pointer, so use the frame pointer as a
11170 temporary. We should always be using a frame pointer
11171 in this case anyway. */
11172 gcc_assert (frame_pointer_needed);
11173 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
11174 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
11175 hard_frame_pointer_rtx,
11176 MIPS_PROLOGUE_TEMP (Pmode)));
11177 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
11178 }
11179 else
11180 emit_insn (gen_sub3_insn (stack_pointer_rtx,
11181 stack_pointer_rtx,
11182 MIPS_PROLOGUE_TEMP (Pmode)));
11183
11184 /* Describe the combined effect of the previous instructions. */
11185 mips_set_frame_expr
11186 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
11187 plus_constant (Pmode, stack_pointer_rtx, -size)));
11188 }
11189 mips_frame_barrier ();
11190 }
11191
11192 /* Set up the frame pointer, if we're using one. */
11193 if (frame_pointer_needed)
11194 {
11195 HOST_WIDE_INT offset;
11196
11197 offset = frame->hard_frame_pointer_offset;
11198 if (offset == 0)
11199 {
11200 insn = mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
11201 RTX_FRAME_RELATED_P (insn) = 1;
11202 }
11203 else if (SMALL_OPERAND (offset))
11204 {
11205 insn = gen_add3_insn (hard_frame_pointer_rtx,
11206 stack_pointer_rtx, GEN_INT (offset));
11207 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
11208 }
11209 else
11210 {
11211 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (offset));
11212 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
11213 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
11214 hard_frame_pointer_rtx,
11215 MIPS_PROLOGUE_TEMP (Pmode)));
11216 mips_set_frame_expr
11217 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
11218 plus_constant (Pmode, stack_pointer_rtx, offset)));
11219 }
11220 }
11221
11222 mips_emit_loadgp ();
11223
11224 /* Initialize the $gp save slot. */
11225 if (mips_cfun_has_cprestore_slot_p ())
11226 {
11227 rtx base, mem, gp, temp;
11228 HOST_WIDE_INT offset;
11229
11230 mips_get_cprestore_base_and_offset (&base, &offset, false);
11231 mem = gen_frame_mem (Pmode, plus_constant (Pmode, base, offset));
11232 gp = TARGET_MIPS16 ? MIPS16_PIC_TEMP : pic_offset_table_rtx;
11233 temp = (SMALL_OPERAND (offset)
11234 ? gen_rtx_SCRATCH (Pmode)
11235 : MIPS_PROLOGUE_TEMP (Pmode));
11236 emit_insn (PMODE_INSN (gen_potential_cprestore,
11237 (mem, GEN_INT (offset), gp, temp)));
11238
11239 mips_get_cprestore_base_and_offset (&base, &offset, true);
11240 mem = gen_frame_mem (Pmode, plus_constant (Pmode, base, offset));
11241 emit_insn (PMODE_INSN (gen_use_cprestore, (mem)));
11242 }
11243
11244 /* We need to search back to the last use of K0 or K1. */
11245 if (cfun->machine->interrupt_handler_p)
11246 {
11247 for (insn = get_last_insn (); insn != NULL_RTX; insn = PREV_INSN (insn))
11248 if (INSN_P (insn)
11249 && for_each_rtx (&PATTERN (insn), mips_kernel_reg_p, NULL))
11250 break;
11251 /* Emit a move from K1 to COP0 Status after insn. */
11252 gcc_assert (insn != NULL_RTX);
11253 emit_insn_after (gen_cop0_move (gen_rtx_REG (SImode, COP0_STATUS_REG_NUM),
11254 gen_rtx_REG (SImode, K1_REG_NUM)),
11255 insn);
11256 }
11257
11258 /* If we are profiling, make sure no instructions are scheduled before
11259 the call to mcount. */
11260 if (crtl->profile)
11261 emit_insn (gen_blockage ());
11262 }
11263 \f
11264 /* Attach all pending register saves to the previous instruction.
11265 Return that instruction. */
11266
11267 static rtx
11268 mips_epilogue_emit_cfa_restores (void)
11269 {
11270 rtx insn;
11271
11272 insn = get_last_insn ();
11273 gcc_assert (insn && !REG_NOTES (insn));
11274 if (mips_epilogue.cfa_restores)
11275 {
11276 RTX_FRAME_RELATED_P (insn) = 1;
11277 REG_NOTES (insn) = mips_epilogue.cfa_restores;
11278 mips_epilogue.cfa_restores = 0;
11279 }
11280 return insn;
11281 }
11282
11283 /* Like mips_epilogue_emit_cfa_restores, but also record that the CFA is
11284 now at REG + OFFSET. */
11285
11286 static void
11287 mips_epilogue_set_cfa (rtx reg, HOST_WIDE_INT offset)
11288 {
11289 rtx insn;
11290
11291 insn = mips_epilogue_emit_cfa_restores ();
11292 if (reg != mips_epilogue.cfa_reg || offset != mips_epilogue.cfa_offset)
11293 {
11294 RTX_FRAME_RELATED_P (insn) = 1;
11295 REG_NOTES (insn) = alloc_reg_note (REG_CFA_DEF_CFA,
11296 plus_constant (Pmode, reg, offset),
11297 REG_NOTES (insn));
11298 mips_epilogue.cfa_reg = reg;
11299 mips_epilogue.cfa_offset = offset;
11300 }
11301 }
11302
11303 /* Emit instructions to restore register REG from slot MEM. Also update
11304 the cfa_restores list. */
11305
11306 static void
11307 mips_restore_reg (rtx reg, rtx mem)
11308 {
11309 /* There's no MIPS16 instruction to load $31 directly. Load into
11310 $7 instead and adjust the return insn appropriately. */
11311 if (TARGET_MIPS16 && REGNO (reg) == RETURN_ADDR_REGNUM)
11312 reg = gen_rtx_REG (GET_MODE (reg), GP_REG_FIRST + 7);
11313 else if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
11314 {
11315 mips_add_cfa_restore (mips_subword (reg, true));
11316 mips_add_cfa_restore (mips_subword (reg, false));
11317 }
11318 else
11319 mips_add_cfa_restore (reg);
11320
11321 mips_emit_save_slot_move (reg, mem, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
11322 if (REGNO (reg) == REGNO (mips_epilogue.cfa_reg))
11323 /* The CFA is currently defined in terms of the register whose
11324 value we have just restored. Redefine the CFA in terms of
11325 the stack pointer. */
11326 mips_epilogue_set_cfa (stack_pointer_rtx,
11327 mips_epilogue.cfa_restore_sp_offset);
11328 }
11329
11330 /* Emit code to set the stack pointer to BASE + OFFSET, given that
11331 BASE + OFFSET is NEW_FRAME_SIZE bytes below the top of the frame.
11332 BASE, if not the stack pointer, is available as a temporary. */
11333
11334 static void
11335 mips_deallocate_stack (rtx base, rtx offset, HOST_WIDE_INT new_frame_size)
11336 {
11337 if (base == stack_pointer_rtx && offset == const0_rtx)
11338 return;
11339
11340 mips_frame_barrier ();
11341 if (offset == const0_rtx)
11342 {
11343 emit_move_insn (stack_pointer_rtx, base);
11344 mips_epilogue_set_cfa (stack_pointer_rtx, new_frame_size);
11345 }
11346 else if (TARGET_MIPS16 && base != stack_pointer_rtx)
11347 {
11348 emit_insn (gen_add3_insn (base, base, offset));
11349 mips_epilogue_set_cfa (base, new_frame_size);
11350 emit_move_insn (stack_pointer_rtx, base);
11351 }
11352 else
11353 {
11354 emit_insn (gen_add3_insn (stack_pointer_rtx, base, offset));
11355 mips_epilogue_set_cfa (stack_pointer_rtx, new_frame_size);
11356 }
11357 }
11358
11359 /* Emit any instructions needed before a return. */
11360
11361 void
11362 mips_expand_before_return (void)
11363 {
11364 /* When using a call-clobbered gp, we start out with unified call
11365 insns that include instructions to restore the gp. We then split
11366 these unified calls after reload. These split calls explicitly
11367 clobber gp, so there is no need to define
11368 PIC_OFFSET_TABLE_REG_CALL_CLOBBERED.
11369
11370 For consistency, we should also insert an explicit clobber of $28
11371 before return insns, so that the post-reload optimizers know that
11372 the register is not live on exit. */
11373 if (TARGET_CALL_CLOBBERED_GP)
11374 emit_clobber (pic_offset_table_rtx);
11375 }
11376
11377 /* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
11378 says which. */
11379
11380 void
11381 mips_expand_epilogue (bool sibcall_p)
11382 {
11383 const struct mips_frame_info *frame;
11384 HOST_WIDE_INT step1, step2;
11385 rtx base, adjust, insn;
11386 bool use_jraddiusp_p = false;
11387
11388 if (!sibcall_p && mips_can_use_return_insn ())
11389 {
11390 emit_jump_insn (gen_return ());
11391 return;
11392 }
11393
11394 /* In MIPS16 mode, if the return value should go into a floating-point
11395 register, we need to call a helper routine to copy it over. */
11396 if (mips16_cfun_returns_in_fpr_p ())
11397 mips16_copy_fpr_return_value ();
11398
11399 /* Split the frame into two. STEP1 is the amount of stack we should
11400 deallocate before restoring the registers. STEP2 is the amount we
11401 should deallocate afterwards.
11402
11403 Start off by assuming that no registers need to be restored. */
11404 frame = &cfun->machine->frame;
11405 step1 = frame->total_size;
11406 step2 = 0;
11407
11408 /* Work out which register holds the frame address. */
11409 if (!frame_pointer_needed)
11410 base = stack_pointer_rtx;
11411 else
11412 {
11413 base = hard_frame_pointer_rtx;
11414 step1 -= frame->hard_frame_pointer_offset;
11415 }
11416 mips_epilogue.cfa_reg = base;
11417 mips_epilogue.cfa_offset = step1;
11418 mips_epilogue.cfa_restores = NULL_RTX;
11419
11420 /* If we need to restore registers, deallocate as much stack as
11421 possible in the second step without going out of range. */
11422 if ((frame->mask | frame->fmask | frame->acc_mask) != 0
11423 || frame->num_cop0_regs > 0)
11424 {
11425 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
11426 step1 -= step2;
11427 }
11428
11429 /* Get an rtx for STEP1 that we can add to BASE. */
11430 adjust = GEN_INT (step1);
11431 if (!SMALL_OPERAND (step1))
11432 {
11433 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
11434 adjust = MIPS_EPILOGUE_TEMP (Pmode);
11435 }
11436 mips_deallocate_stack (base, adjust, step2);
11437
11438 /* If we're using addressing macros, $gp is implicitly used by all
11439 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
11440 from the stack. */
11441 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
11442 emit_insn (gen_blockage ());
11443
11444 mips_epilogue.cfa_restore_sp_offset = step2;
11445 if (GENERATE_MIPS16E_SAVE_RESTORE && frame->mask != 0)
11446 {
11447 unsigned int regno, mask;
11448 HOST_WIDE_INT offset;
11449 rtx restore;
11450
11451 /* Generate the restore instruction. */
11452 mask = frame->mask;
11453 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
11454
11455 /* Restore any other registers manually. */
11456 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
11457 if (BITSET_P (mask, regno - GP_REG_FIRST))
11458 {
11459 offset -= UNITS_PER_WORD;
11460 mips_save_restore_reg (word_mode, regno, offset, mips_restore_reg);
11461 }
11462
11463 /* Restore the remaining registers and deallocate the final bit
11464 of the frame. */
11465 mips_frame_barrier ();
11466 emit_insn (restore);
11467 mips_epilogue_set_cfa (stack_pointer_rtx, 0);
11468 }
11469 else
11470 {
11471 /* Restore the registers. */
11472 mips_for_each_saved_acc (frame->total_size - step2, mips_restore_reg);
11473 mips_for_each_saved_gpr_and_fpr (frame->total_size - step2,
11474 mips_restore_reg);
11475
11476 if (cfun->machine->interrupt_handler_p)
11477 {
11478 HOST_WIDE_INT offset;
11479 rtx mem;
11480
11481 offset = frame->cop0_sp_offset - (frame->total_size - step2);
11482 if (!cfun->machine->keep_interrupts_masked_p)
11483 {
11484 /* Restore the original EPC. */
11485 mem = gen_frame_mem (word_mode,
11486 plus_constant (Pmode, stack_pointer_rtx,
11487 offset));
11488 mips_emit_move (gen_rtx_REG (word_mode, K0_REG_NUM), mem);
11489 offset -= UNITS_PER_WORD;
11490
11491 /* Move to COP0 EPC. */
11492 emit_insn (gen_cop0_move (gen_rtx_REG (SImode, COP0_EPC_REG_NUM),
11493 gen_rtx_REG (SImode, K0_REG_NUM)));
11494 }
11495
11496 /* Restore the original Status. */
11497 mem = gen_frame_mem (word_mode,
11498 plus_constant (Pmode, stack_pointer_rtx,
11499 offset));
11500 mips_emit_move (gen_rtx_REG (word_mode, K0_REG_NUM), mem);
11501 offset -= UNITS_PER_WORD;
11502
11503 /* If we don't use shadow register set, we need to update SP. */
11504 if (!cfun->machine->use_shadow_register_set_p)
11505 mips_deallocate_stack (stack_pointer_rtx, GEN_INT (step2), 0);
11506 else
11507 /* The choice of position is somewhat arbitrary in this case. */
11508 mips_epilogue_emit_cfa_restores ();
11509
11510 /* Move to COP0 Status. */
11511 emit_insn (gen_cop0_move (gen_rtx_REG (SImode, COP0_STATUS_REG_NUM),
11512 gen_rtx_REG (SImode, K0_REG_NUM)));
11513 }
11514 else if (TARGET_MICROMIPS
11515 && !crtl->calls_eh_return
11516 && !sibcall_p
11517 && step2 > 0
11518 && mips_unsigned_immediate_p (step2, 5, 2))
11519 use_jraddiusp_p = true;
11520 else
11521 /* Deallocate the final bit of the frame. */
11522 mips_deallocate_stack (stack_pointer_rtx, GEN_INT (step2), 0);
11523 }
11524
11525 if (!use_jraddiusp_p)
11526 gcc_assert (!mips_epilogue.cfa_restores);
11527
11528 /* Add in the __builtin_eh_return stack adjustment. We need to
11529 use a temporary in MIPS16 code. */
11530 if (crtl->calls_eh_return)
11531 {
11532 if (TARGET_MIPS16)
11533 {
11534 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
11535 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
11536 MIPS_EPILOGUE_TEMP (Pmode),
11537 EH_RETURN_STACKADJ_RTX));
11538 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
11539 }
11540 else
11541 emit_insn (gen_add3_insn (stack_pointer_rtx,
11542 stack_pointer_rtx,
11543 EH_RETURN_STACKADJ_RTX));
11544 }
11545
11546 if (!sibcall_p)
11547 {
11548 mips_expand_before_return ();
11549 if (cfun->machine->interrupt_handler_p)
11550 {
11551 /* Interrupt handlers generate eret or deret. */
11552 if (cfun->machine->use_debug_exception_return_p)
11553 emit_jump_insn (gen_mips_deret ());
11554 else
11555 emit_jump_insn (gen_mips_eret ());
11556 }
11557 else
11558 {
11559 rtx pat;
11560
11561 /* When generating MIPS16 code, the normal
11562 mips_for_each_saved_gpr_and_fpr path will restore the return
11563 address into $7 rather than $31. */
11564 if (TARGET_MIPS16
11565 && !GENERATE_MIPS16E_SAVE_RESTORE
11566 && BITSET_P (frame->mask, RETURN_ADDR_REGNUM))
11567 {
11568 /* simple_returns cannot rely on values that are only available
11569 on paths through the epilogue (because return paths that do
11570 not pass through the epilogue may nevertheless reuse a
11571 simple_return that occurs at the end of the epilogue).
11572 Use a normal return here instead. */
11573 rtx reg = gen_rtx_REG (Pmode, GP_REG_FIRST + 7);
11574 pat = gen_return_internal (reg);
11575 }
11576 else if (use_jraddiusp_p)
11577 pat = gen_jraddiusp (GEN_INT (step2));
11578 else
11579 {
11580 rtx reg = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
11581 pat = gen_simple_return_internal (reg);
11582 }
11583 emit_jump_insn (pat);
11584 if (use_jraddiusp_p)
11585 mips_epilogue_set_cfa (stack_pointer_rtx, step2);
11586 }
11587 }
11588
11589 /* Search from the beginning to the first use of K0 or K1. */
11590 if (cfun->machine->interrupt_handler_p
11591 && !cfun->machine->keep_interrupts_masked_p)
11592 {
11593 for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
11594 if (INSN_P (insn)
11595 && for_each_rtx (&PATTERN(insn), mips_kernel_reg_p, NULL))
11596 break;
11597 gcc_assert (insn != NULL_RTX);
11598 /* Insert disable interrupts before the first use of K0 or K1. */
11599 emit_insn_before (gen_mips_di (), insn);
11600 emit_insn_before (gen_mips_ehb (), insn);
11601 }
11602 }
11603 \f
11604 /* Return nonzero if this function is known to have a null epilogue.
11605 This allows the optimizer to omit jumps to jumps if no stack
11606 was created. */
11607
11608 bool
11609 mips_can_use_return_insn (void)
11610 {
11611 /* Interrupt handlers need to go through the epilogue. */
11612 if (cfun->machine->interrupt_handler_p)
11613 return false;
11614
11615 if (!reload_completed)
11616 return false;
11617
11618 if (crtl->profile)
11619 return false;
11620
11621 /* In MIPS16 mode, a function that returns a floating-point value
11622 needs to arrange to copy the return value into the floating-point
11623 registers. */
11624 if (mips16_cfun_returns_in_fpr_p ())
11625 return false;
11626
11627 return cfun->machine->frame.total_size == 0;
11628 }
11629 \f
11630 /* Return true if register REGNO can store a value of mode MODE.
11631 The result of this function is cached in mips_hard_regno_mode_ok. */
11632
11633 static bool
11634 mips_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
11635 {
11636 unsigned int size;
11637 enum mode_class mclass;
11638
11639 if (mode == CCV2mode)
11640 return (ISA_HAS_8CC
11641 && ST_REG_P (regno)
11642 && (regno - ST_REG_FIRST) % 2 == 0);
11643
11644 if (mode == CCV4mode)
11645 return (ISA_HAS_8CC
11646 && ST_REG_P (regno)
11647 && (regno - ST_REG_FIRST) % 4 == 0);
11648
11649 if (mode == CCmode)
11650 return ISA_HAS_8CC ? ST_REG_P (regno) : regno == FPSW_REGNUM;
11651
11652 size = GET_MODE_SIZE (mode);
11653 mclass = GET_MODE_CLASS (mode);
11654
11655 if (GP_REG_P (regno))
11656 return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
11657
11658 if (FP_REG_P (regno)
11659 && (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0
11660 || (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG)))
11661 {
11662 /* Allow 64-bit vector modes for Loongson-2E/2F. */
11663 if (TARGET_LOONGSON_VECTORS
11664 && (mode == V2SImode
11665 || mode == V4HImode
11666 || mode == V8QImode
11667 || mode == DImode))
11668 return true;
11669
11670 if (mclass == MODE_FLOAT
11671 || mclass == MODE_COMPLEX_FLOAT
11672 || mclass == MODE_VECTOR_FLOAT)
11673 return size <= UNITS_PER_FPVALUE;
11674
11675 /* Allow integer modes that fit into a single register. We need
11676 to put integers into FPRs when using instructions like CVT
11677 and TRUNC. There's no point allowing sizes smaller than a word,
11678 because the FPU has no appropriate load/store instructions. */
11679 if (mclass == MODE_INT)
11680 return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG;
11681 }
11682
11683 if (ACC_REG_P (regno)
11684 && (INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode)))
11685 {
11686 if (MD_REG_P (regno))
11687 {
11688 /* After a multiplication or division, clobbering HI makes
11689 the value of LO unpredictable, and vice versa. This means
11690 that, for all interesting cases, HI and LO are effectively
11691 a single register.
11692
11693 We model this by requiring that any value that uses HI
11694 also uses LO. */
11695 if (size <= UNITS_PER_WORD * 2)
11696 return regno == (size <= UNITS_PER_WORD ? LO_REGNUM : MD_REG_FIRST);
11697 }
11698 else
11699 {
11700 /* DSP accumulators do not have the same restrictions as
11701 HI and LO, so we can treat them as normal doubleword
11702 registers. */
11703 if (size <= UNITS_PER_WORD)
11704 return true;
11705
11706 if (size <= UNITS_PER_WORD * 2
11707 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)
11708 return true;
11709 }
11710 }
11711
11712 if (ALL_COP_REG_P (regno))
11713 return mclass == MODE_INT && size <= UNITS_PER_WORD;
11714
11715 if (regno == GOT_VERSION_REGNUM)
11716 return mode == SImode;
11717
11718 return false;
11719 }
11720
11721 /* Implement HARD_REGNO_NREGS. */
11722
11723 unsigned int
11724 mips_hard_regno_nregs (int regno, enum machine_mode mode)
11725 {
11726 if (ST_REG_P (regno))
11727 /* The size of FP status registers is always 4, because they only hold
11728 CCmode values, and CCmode is always considered to be 4 bytes wide. */
11729 return (GET_MODE_SIZE (mode) + 3) / 4;
11730
11731 if (FP_REG_P (regno))
11732 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
11733
11734 /* All other registers are word-sized. */
11735 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
11736 }
11737
11738 /* Implement CLASS_MAX_NREGS, taking the maximum of the cases
11739 in mips_hard_regno_nregs. */
11740
11741 int
11742 mips_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
11743 {
11744 int size;
11745 HARD_REG_SET left;
11746
11747 size = 0x8000;
11748 COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]);
11749 if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS]))
11750 {
11751 if (HARD_REGNO_MODE_OK (ST_REG_FIRST, mode))
11752 size = MIN (size, 4);
11753 AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) ST_REGS]);
11754 }
11755 if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
11756 {
11757 if (HARD_REGNO_MODE_OK (FP_REG_FIRST, mode))
11758 size = MIN (size, UNITS_PER_FPREG);
11759 AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]);
11760 }
11761 if (!hard_reg_set_empty_p (left))
11762 size = MIN (size, UNITS_PER_WORD);
11763 return (GET_MODE_SIZE (mode) + size - 1) / size;
11764 }
11765
11766 /* Implement CANNOT_CHANGE_MODE_CLASS. */
11767
11768 bool
11769 mips_cannot_change_mode_class (enum machine_mode from,
11770 enum machine_mode to,
11771 enum reg_class rclass)
11772 {
11773 /* Allow conversions between different Loongson integer vectors,
11774 and between those vectors and DImode. */
11775 if (GET_MODE_SIZE (from) == 8 && GET_MODE_SIZE (to) == 8
11776 && INTEGRAL_MODE_P (from) && INTEGRAL_MODE_P (to))
11777 return false;
11778
11779 /* Otherwise, there are several problems with changing the modes of
11780 values in floating-point registers:
11781
11782 - When a multi-word value is stored in paired floating-point
11783 registers, the first register always holds the low word. We
11784 therefore can't allow FPRs to change between single-word and
11785 multi-word modes on big-endian targets.
11786
11787 - GCC assumes that each word of a multiword register can be
11788 accessed individually using SUBREGs. This is not true for
11789 floating-point registers if they are bigger than a word.
11790
11791 - Loading a 32-bit value into a 64-bit floating-point register
11792 will not sign-extend the value, despite what LOAD_EXTEND_OP
11793 says. We can't allow FPRs to change from SImode to a wider
11794 mode on 64-bit targets.
11795
11796 - If the FPU has already interpreted a value in one format, we
11797 must not ask it to treat the value as having a different
11798 format.
11799
11800 We therefore disallow all mode changes involving FPRs. */
11801
11802 return reg_classes_intersect_p (FP_REGS, rclass);
11803 }
11804
11805 /* Implement target hook small_register_classes_for_mode_p. */
11806
11807 static bool
11808 mips_small_register_classes_for_mode_p (enum machine_mode mode
11809 ATTRIBUTE_UNUSED)
11810 {
11811 return TARGET_MIPS16;
11812 }
11813
11814 /* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
11815
11816 static bool
11817 mips_mode_ok_for_mov_fmt_p (enum machine_mode mode)
11818 {
11819 switch (mode)
11820 {
11821 case SFmode:
11822 return TARGET_HARD_FLOAT;
11823
11824 case DFmode:
11825 return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
11826
11827 case V2SFmode:
11828 return TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT;
11829
11830 default:
11831 return false;
11832 }
11833 }
11834
11835 /* Implement MODES_TIEABLE_P. */
11836
11837 bool
11838 mips_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
11839 {
11840 /* FPRs allow no mode punning, so it's not worth tying modes if we'd
11841 prefer to put one of them in FPRs. */
11842 return (mode1 == mode2
11843 || (!mips_mode_ok_for_mov_fmt_p (mode1)
11844 && !mips_mode_ok_for_mov_fmt_p (mode2)));
11845 }
11846
11847 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
11848
11849 static reg_class_t
11850 mips_preferred_reload_class (rtx x, reg_class_t rclass)
11851 {
11852 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, rclass))
11853 return LEA_REGS;
11854
11855 if (reg_class_subset_p (FP_REGS, rclass)
11856 && mips_mode_ok_for_mov_fmt_p (GET_MODE (x)))
11857 return FP_REGS;
11858
11859 if (reg_class_subset_p (GR_REGS, rclass))
11860 rclass = GR_REGS;
11861
11862 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, rclass))
11863 rclass = M16_REGS;
11864
11865 return rclass;
11866 }
11867
11868 /* RCLASS is a class involved in a REGISTER_MOVE_COST calculation.
11869 Return a "canonical" class to represent it in later calculations. */
11870
11871 static reg_class_t
11872 mips_canonicalize_move_class (reg_class_t rclass)
11873 {
11874 /* All moves involving accumulator registers have the same cost. */
11875 if (reg_class_subset_p (rclass, ACC_REGS))
11876 rclass = ACC_REGS;
11877
11878 /* Likewise promote subclasses of general registers to the most
11879 interesting containing class. */
11880 if (TARGET_MIPS16 && reg_class_subset_p (rclass, M16_REGS))
11881 rclass = M16_REGS;
11882 else if (reg_class_subset_p (rclass, GENERAL_REGS))
11883 rclass = GENERAL_REGS;
11884
11885 return rclass;
11886 }
11887
11888 /* Return the cost of moving a value of mode MODE from a register of
11889 class FROM to a GPR. Return 0 for classes that are unions of other
11890 classes handled by this function. */
11891
11892 static int
11893 mips_move_to_gpr_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
11894 reg_class_t from)
11895 {
11896 switch (from)
11897 {
11898 case GENERAL_REGS:
11899 /* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro. */
11900 return 2;
11901
11902 case ACC_REGS:
11903 /* MFLO and MFHI. */
11904 return 6;
11905
11906 case FP_REGS:
11907 /* MFC1, etc. */
11908 return 4;
11909
11910 case ST_REGS:
11911 /* LUI followed by MOVF. */
11912 return 4;
11913
11914 case COP0_REGS:
11915 case COP2_REGS:
11916 case COP3_REGS:
11917 /* This choice of value is historical. */
11918 return 5;
11919
11920 default:
11921 return 0;
11922 }
11923 }
11924
11925 /* Return the cost of moving a value of mode MODE from a GPR to a
11926 register of class TO. Return 0 for classes that are unions of
11927 other classes handled by this function. */
11928
11929 static int
11930 mips_move_from_gpr_cost (enum machine_mode mode, reg_class_t to)
11931 {
11932 switch (to)
11933 {
11934 case GENERAL_REGS:
11935 /* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro. */
11936 return 2;
11937
11938 case ACC_REGS:
11939 /* MTLO and MTHI. */
11940 return 6;
11941
11942 case FP_REGS:
11943 /* MTC1, etc. */
11944 return 4;
11945
11946 case ST_REGS:
11947 /* A secondary reload through an FPR scratch. */
11948 return (mips_register_move_cost (mode, GENERAL_REGS, FP_REGS)
11949 + mips_register_move_cost (mode, FP_REGS, ST_REGS));
11950
11951 case COP0_REGS:
11952 case COP2_REGS:
11953 case COP3_REGS:
11954 /* This choice of value is historical. */
11955 return 5;
11956
11957 default:
11958 return 0;
11959 }
11960 }
11961
11962 /* Implement TARGET_REGISTER_MOVE_COST. Return 0 for classes that are the
11963 maximum of the move costs for subclasses; regclass will work out
11964 the maximum for us. */
11965
11966 static int
11967 mips_register_move_cost (enum machine_mode mode,
11968 reg_class_t from, reg_class_t to)
11969 {
11970 reg_class_t dregs;
11971 int cost1, cost2;
11972
11973 from = mips_canonicalize_move_class (from);
11974 to = mips_canonicalize_move_class (to);
11975
11976 /* Handle moves that can be done without using general-purpose registers. */
11977 if (from == FP_REGS)
11978 {
11979 if (to == FP_REGS && mips_mode_ok_for_mov_fmt_p (mode))
11980 /* MOV.FMT. */
11981 return 4;
11982 if (to == ST_REGS)
11983 /* The sequence generated by mips_expand_fcc_reload. */
11984 return 8;
11985 }
11986
11987 /* Handle cases in which only one class deviates from the ideal. */
11988 dregs = TARGET_MIPS16 ? M16_REGS : GENERAL_REGS;
11989 if (from == dregs)
11990 return mips_move_from_gpr_cost (mode, to);
11991 if (to == dregs)
11992 return mips_move_to_gpr_cost (mode, from);
11993
11994 /* Handles cases that require a GPR temporary. */
11995 cost1 = mips_move_to_gpr_cost (mode, from);
11996 if (cost1 != 0)
11997 {
11998 cost2 = mips_move_from_gpr_cost (mode, to);
11999 if (cost2 != 0)
12000 return cost1 + cost2;
12001 }
12002
12003 return 0;
12004 }
12005
12006 /* Implement TARGET_MEMORY_MOVE_COST. */
12007
12008 static int
12009 mips_memory_move_cost (enum machine_mode mode, reg_class_t rclass, bool in)
12010 {
12011 return (mips_cost->memory_latency
12012 + memory_move_secondary_cost (mode, rclass, in));
12013 }
12014
12015 /* Return the register class required for a secondary register when
12016 copying between one of the registers in RCLASS and value X, which
12017 has mode MODE. X is the source of the move if IN_P, otherwise it
12018 is the destination. Return NO_REGS if no secondary register is
12019 needed. */
12020
12021 enum reg_class
12022 mips_secondary_reload_class (enum reg_class rclass,
12023 enum machine_mode mode, rtx x, bool in_p)
12024 {
12025 int regno;
12026
12027 /* If X is a constant that cannot be loaded into $25, it must be loaded
12028 into some other GPR. No other register class allows a direct move. */
12029 if (mips_dangerous_for_la25_p (x))
12030 return reg_class_subset_p (rclass, LEA_REGS) ? NO_REGS : LEA_REGS;
12031
12032 regno = true_regnum (x);
12033 if (TARGET_MIPS16)
12034 {
12035 /* In MIPS16 mode, every move must involve a member of M16_REGS. */
12036 if (!reg_class_subset_p (rclass, M16_REGS) && !M16_REG_P (regno))
12037 return M16_REGS;
12038
12039 return NO_REGS;
12040 }
12041
12042 /* Copying from accumulator registers to anywhere other than a general
12043 register requires a temporary general register. */
12044 if (reg_class_subset_p (rclass, ACC_REGS))
12045 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
12046 if (ACC_REG_P (regno))
12047 return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
12048
12049 /* We can only copy a value to a condition code register from a
12050 floating-point register, and even then we require a scratch
12051 floating-point register. We can only copy a value out of a
12052 condition-code register into a general register. */
12053 if (reg_class_subset_p (rclass, ST_REGS))
12054 {
12055 if (in_p)
12056 return FP_REGS;
12057 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
12058 }
12059 if (ST_REG_P (regno))
12060 {
12061 if (!in_p)
12062 return FP_REGS;
12063 return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
12064 }
12065
12066 if (reg_class_subset_p (rclass, FP_REGS))
12067 {
12068 if (MEM_P (x)
12069 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
12070 /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use
12071 pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */
12072 return NO_REGS;
12073
12074 if (GP_REG_P (regno) || x == CONST0_RTX (mode))
12075 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
12076 return NO_REGS;
12077
12078 if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (mode, x))
12079 /* We can force the constant to memory and use lwc1
12080 and ldc1. As above, we will use pairs of lwc1s if
12081 ldc1 is not supported. */
12082 return NO_REGS;
12083
12084 if (FP_REG_P (regno) && mips_mode_ok_for_mov_fmt_p (mode))
12085 /* In this case we can use mov.fmt. */
12086 return NO_REGS;
12087
12088 /* Otherwise, we need to reload through an integer register. */
12089 return GR_REGS;
12090 }
12091 if (FP_REG_P (regno))
12092 return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
12093
12094 return NO_REGS;
12095 }
12096
12097 /* Implement TARGET_MODE_REP_EXTENDED. */
12098
12099 static int
12100 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
12101 {
12102 /* On 64-bit targets, SImode register values are sign-extended to DImode. */
12103 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
12104 return SIGN_EXTEND;
12105
12106 return UNKNOWN;
12107 }
12108 \f
12109 /* Implement TARGET_VALID_POINTER_MODE. */
12110
12111 static bool
12112 mips_valid_pointer_mode (enum machine_mode mode)
12113 {
12114 return mode == SImode || (TARGET_64BIT && mode == DImode);
12115 }
12116
12117 /* Implement TARGET_VECTOR_MODE_SUPPORTED_P. */
12118
12119 static bool
12120 mips_vector_mode_supported_p (enum machine_mode mode)
12121 {
12122 switch (mode)
12123 {
12124 case V2SFmode:
12125 return TARGET_PAIRED_SINGLE_FLOAT;
12126
12127 case V2HImode:
12128 case V4QImode:
12129 case V2HQmode:
12130 case V2UHQmode:
12131 case V2HAmode:
12132 case V2UHAmode:
12133 case V4QQmode:
12134 case V4UQQmode:
12135 return TARGET_DSP;
12136
12137 case V2SImode:
12138 case V4HImode:
12139 case V8QImode:
12140 return TARGET_LOONGSON_VECTORS;
12141
12142 default:
12143 return false;
12144 }
12145 }
12146
12147 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
12148
12149 static bool
12150 mips_scalar_mode_supported_p (enum machine_mode mode)
12151 {
12152 if (ALL_FIXED_POINT_MODE_P (mode)
12153 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
12154 return true;
12155
12156 return default_scalar_mode_supported_p (mode);
12157 }
12158 \f
12159 /* Implement TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */
12160
12161 static enum machine_mode
12162 mips_preferred_simd_mode (enum machine_mode mode ATTRIBUTE_UNUSED)
12163 {
12164 if (TARGET_PAIRED_SINGLE_FLOAT
12165 && mode == SFmode)
12166 return V2SFmode;
12167 return word_mode;
12168 }
12169
12170 /* Implement TARGET_INIT_LIBFUNCS. */
12171
12172 static void
12173 mips_init_libfuncs (void)
12174 {
12175 if (TARGET_FIX_VR4120)
12176 {
12177 /* Register the special divsi3 and modsi3 functions needed to work
12178 around VR4120 division errata. */
12179 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
12180 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
12181 }
12182
12183 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
12184 {
12185 /* Register the MIPS16 -mhard-float stubs. */
12186 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
12187 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
12188 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
12189 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
12190
12191 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
12192 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
12193 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
12194 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
12195 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
12196 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
12197 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
12198
12199 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
12200 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
12201 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
12202
12203 if (TARGET_DOUBLE_FLOAT)
12204 {
12205 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
12206 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
12207 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
12208 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
12209
12210 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
12211 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
12212 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
12213 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
12214 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
12215 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
12216 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
12217
12218 set_conv_libfunc (sext_optab, DFmode, SFmode,
12219 "__mips16_extendsfdf2");
12220 set_conv_libfunc (trunc_optab, SFmode, DFmode,
12221 "__mips16_truncdfsf2");
12222 set_conv_libfunc (sfix_optab, SImode, DFmode,
12223 "__mips16_fix_truncdfsi");
12224 set_conv_libfunc (sfloat_optab, DFmode, SImode,
12225 "__mips16_floatsidf");
12226 set_conv_libfunc (ufloat_optab, DFmode, SImode,
12227 "__mips16_floatunsidf");
12228 }
12229 }
12230
12231 /* The MIPS16 ISA does not have an encoding for "sync", so we rely
12232 on an external non-MIPS16 routine to implement __sync_synchronize.
12233 Similarly for the rest of the ll/sc libfuncs. */
12234 if (TARGET_MIPS16)
12235 {
12236 synchronize_libfunc = init_one_libfunc ("__sync_synchronize");
12237 init_sync_libfuncs (UNITS_PER_WORD);
12238 }
12239 }
12240
12241 /* Build up a multi-insn sequence that loads label TARGET into $AT. */
12242
12243 static void
12244 mips_process_load_label (rtx target)
12245 {
12246 rtx base, gp, intop;
12247 HOST_WIDE_INT offset;
12248
12249 mips_multi_start ();
12250 switch (mips_abi)
12251 {
12252 case ABI_N32:
12253 mips_multi_add_insn ("lw\t%@,%%got_page(%0)(%+)", target, 0);
12254 mips_multi_add_insn ("addiu\t%@,%@,%%got_ofst(%0)", target, 0);
12255 break;
12256
12257 case ABI_64:
12258 mips_multi_add_insn ("ld\t%@,%%got_page(%0)(%+)", target, 0);
12259 mips_multi_add_insn ("daddiu\t%@,%@,%%got_ofst(%0)", target, 0);
12260 break;
12261
12262 default:
12263 gp = pic_offset_table_rtx;
12264 if (mips_cfun_has_cprestore_slot_p ())
12265 {
12266 gp = gen_rtx_REG (Pmode, AT_REGNUM);
12267 mips_get_cprestore_base_and_offset (&base, &offset, true);
12268 if (!SMALL_OPERAND (offset))
12269 {
12270 intop = GEN_INT (CONST_HIGH_PART (offset));
12271 mips_multi_add_insn ("lui\t%0,%1", gp, intop, 0);
12272 mips_multi_add_insn ("addu\t%0,%0,%1", gp, base, 0);
12273
12274 base = gp;
12275 offset = CONST_LOW_PART (offset);
12276 }
12277 intop = GEN_INT (offset);
12278 if (ISA_HAS_LOAD_DELAY)
12279 mips_multi_add_insn ("lw\t%0,%1(%2)%#", gp, intop, base, 0);
12280 else
12281 mips_multi_add_insn ("lw\t%0,%1(%2)", gp, intop, base, 0);
12282 }
12283 if (ISA_HAS_LOAD_DELAY)
12284 mips_multi_add_insn ("lw\t%@,%%got(%0)(%1)%#", target, gp, 0);
12285 else
12286 mips_multi_add_insn ("lw\t%@,%%got(%0)(%1)", target, gp, 0);
12287 mips_multi_add_insn ("addiu\t%@,%@,%%lo(%0)", target, 0);
12288 break;
12289 }
12290 }
12291
12292 /* Return the number of instructions needed to load a label into $AT. */
12293
12294 static unsigned int
12295 mips_load_label_num_insns (void)
12296 {
12297 if (cfun->machine->load_label_num_insns == 0)
12298 {
12299 mips_process_load_label (pc_rtx);
12300 cfun->machine->load_label_num_insns = mips_multi_num_insns;
12301 }
12302 return cfun->machine->load_label_num_insns;
12303 }
12304
12305 /* Emit an asm sequence to start a noat block and load the address
12306 of a label into $1. */
12307
12308 void
12309 mips_output_load_label (rtx target)
12310 {
12311 mips_push_asm_switch (&mips_noat);
12312 if (TARGET_EXPLICIT_RELOCS)
12313 {
12314 mips_process_load_label (target);
12315 mips_multi_write ();
12316 }
12317 else
12318 {
12319 if (Pmode == DImode)
12320 output_asm_insn ("dla\t%@,%0", &target);
12321 else
12322 output_asm_insn ("la\t%@,%0", &target);
12323 }
12324 }
12325
12326 /* Return the length of INSN. LENGTH is the initial length computed by
12327 attributes in the machine-description file. */
12328
12329 int
12330 mips_adjust_insn_length (rtx insn, int length)
12331 {
12332 /* mips.md uses MAX_PIC_BRANCH_LENGTH as a placeholder for the length
12333 of a PIC long-branch sequence. Substitute the correct value. */
12334 if (length == MAX_PIC_BRANCH_LENGTH
12335 && JUMP_P (insn)
12336 && INSN_CODE (insn) >= 0
12337 && get_attr_type (insn) == TYPE_BRANCH)
12338 {
12339 /* Add the branch-over instruction and its delay slot, if this
12340 is a conditional branch. */
12341 length = simplejump_p (insn) ? 0 : 8;
12342
12343 /* Add the size of a load into $AT. */
12344 length += BASE_INSN_LENGTH * mips_load_label_num_insns ();
12345
12346 /* Add the length of an indirect jump, ignoring the delay slot. */
12347 length += TARGET_COMPRESSION ? 2 : 4;
12348 }
12349
12350 /* A unconditional jump has an unfilled delay slot if it is not part
12351 of a sequence. A conditional jump normally has a delay slot, but
12352 does not on MIPS16. */
12353 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
12354 length += TARGET_MIPS16 ? 2 : 4;
12355
12356 /* See how many nops might be needed to avoid hardware hazards. */
12357 if (!cfun->machine->ignore_hazard_length_p
12358 && INSN_P (insn)
12359 && INSN_CODE (insn) >= 0)
12360 switch (get_attr_hazard (insn))
12361 {
12362 case HAZARD_NONE:
12363 break;
12364
12365 case HAZARD_DELAY:
12366 length += NOP_INSN_LENGTH;
12367 break;
12368
12369 case HAZARD_HILO:
12370 length += NOP_INSN_LENGTH * 2;
12371 break;
12372 }
12373
12374 return length;
12375 }
12376
12377 /* Return the assembly code for INSN, which has the operands given by
12378 OPERANDS, and which branches to OPERANDS[0] if some condition is true.
12379 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[0]
12380 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
12381 version of BRANCH_IF_TRUE. */
12382
12383 const char *
12384 mips_output_conditional_branch (rtx insn, rtx *operands,
12385 const char *branch_if_true,
12386 const char *branch_if_false)
12387 {
12388 unsigned int length;
12389 rtx taken, not_taken;
12390
12391 gcc_assert (LABEL_P (operands[0]));
12392
12393 length = get_attr_length (insn);
12394 if (length <= 8)
12395 {
12396 /* Just a simple conditional branch. */
12397 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
12398 return branch_if_true;
12399 }
12400
12401 /* Generate a reversed branch around a direct jump. This fallback does
12402 not use branch-likely instructions. */
12403 mips_branch_likely = false;
12404 not_taken = gen_label_rtx ();
12405 taken = operands[0];
12406
12407 /* Generate the reversed branch to NOT_TAKEN. */
12408 operands[0] = not_taken;
12409 output_asm_insn (branch_if_false, operands);
12410
12411 /* If INSN has a delay slot, we must provide delay slots for both the
12412 branch to NOT_TAKEN and the conditional jump. We must also ensure
12413 that INSN's delay slot is executed in the appropriate cases. */
12414 if (final_sequence)
12415 {
12416 /* This first delay slot will always be executed, so use INSN's
12417 delay slot if is not annulled. */
12418 if (!INSN_ANNULLED_BRANCH_P (insn))
12419 {
12420 final_scan_insn (XVECEXP (final_sequence, 0, 1),
12421 asm_out_file, optimize, 1, NULL);
12422 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
12423 }
12424 else
12425 output_asm_insn ("nop", 0);
12426 fprintf (asm_out_file, "\n");
12427 }
12428
12429 /* Output the unconditional branch to TAKEN. */
12430 if (TARGET_ABSOLUTE_JUMPS)
12431 output_asm_insn (MIPS_ABSOLUTE_JUMP ("j\t%0%/"), &taken);
12432 else
12433 {
12434 mips_output_load_label (taken);
12435 output_asm_insn ("jr\t%@%]%/", 0);
12436 }
12437
12438 /* Now deal with its delay slot; see above. */
12439 if (final_sequence)
12440 {
12441 /* This delay slot will only be executed if the branch is taken.
12442 Use INSN's delay slot if is annulled. */
12443 if (INSN_ANNULLED_BRANCH_P (insn))
12444 {
12445 final_scan_insn (XVECEXP (final_sequence, 0, 1),
12446 asm_out_file, optimize, 1, NULL);
12447 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
12448 }
12449 else
12450 output_asm_insn ("nop", 0);
12451 fprintf (asm_out_file, "\n");
12452 }
12453
12454 /* Output NOT_TAKEN. */
12455 targetm.asm_out.internal_label (asm_out_file, "L",
12456 CODE_LABEL_NUMBER (not_taken));
12457 return "";
12458 }
12459
12460 /* Return the assembly code for INSN, which branches to OPERANDS[0]
12461 if some ordering condition is true. The condition is given by
12462 OPERANDS[1] if !INVERTED_P, otherwise it is the inverse of
12463 OPERANDS[1]. OPERANDS[2] is the comparison's first operand;
12464 its second is always zero. */
12465
12466 const char *
12467 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
12468 {
12469 const char *branch[2];
12470
12471 /* Make BRANCH[1] branch to OPERANDS[0] when the condition is true.
12472 Make BRANCH[0] branch on the inverse condition. */
12473 switch (GET_CODE (operands[1]))
12474 {
12475 /* These cases are equivalent to comparisons against zero. */
12476 case LEU:
12477 inverted_p = !inverted_p;
12478 /* Fall through. */
12479 case GTU:
12480 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%0");
12481 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%0");
12482 break;
12483
12484 /* These cases are always true or always false. */
12485 case LTU:
12486 inverted_p = !inverted_p;
12487 /* Fall through. */
12488 case GEU:
12489 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%0");
12490 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%0");
12491 break;
12492
12493 default:
12494 branch[!inverted_p] = MIPS_BRANCH ("b%C1z", "%2,%0");
12495 branch[inverted_p] = MIPS_BRANCH ("b%N1z", "%2,%0");
12496 break;
12497 }
12498 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
12499 }
12500 \f
12501 /* Start a block of code that needs access to the LL, SC and SYNC
12502 instructions. */
12503
12504 static void
12505 mips_start_ll_sc_sync_block (void)
12506 {
12507 if (!ISA_HAS_LL_SC)
12508 {
12509 output_asm_insn (".set\tpush", 0);
12510 if (TARGET_64BIT)
12511 output_asm_insn (".set\tmips3", 0);
12512 else
12513 output_asm_insn (".set\tmips2", 0);
12514 }
12515 }
12516
12517 /* End a block started by mips_start_ll_sc_sync_block. */
12518
12519 static void
12520 mips_end_ll_sc_sync_block (void)
12521 {
12522 if (!ISA_HAS_LL_SC)
12523 output_asm_insn (".set\tpop", 0);
12524 }
12525
12526 /* Output and/or return the asm template for a sync instruction. */
12527
12528 const char *
12529 mips_output_sync (void)
12530 {
12531 mips_start_ll_sc_sync_block ();
12532 output_asm_insn ("sync", 0);
12533 mips_end_ll_sc_sync_block ();
12534 return "";
12535 }
12536
12537 /* Return the asm template associated with sync_insn1 value TYPE.
12538 IS_64BIT_P is true if we want a 64-bit rather than 32-bit operation. */
12539
12540 static const char *
12541 mips_sync_insn1_template (enum attr_sync_insn1 type, bool is_64bit_p)
12542 {
12543 switch (type)
12544 {
12545 case SYNC_INSN1_MOVE:
12546 return "move\t%0,%z2";
12547 case SYNC_INSN1_LI:
12548 return "li\t%0,%2";
12549 case SYNC_INSN1_ADDU:
12550 return is_64bit_p ? "daddu\t%0,%1,%z2" : "addu\t%0,%1,%z2";
12551 case SYNC_INSN1_ADDIU:
12552 return is_64bit_p ? "daddiu\t%0,%1,%2" : "addiu\t%0,%1,%2";
12553 case SYNC_INSN1_SUBU:
12554 return is_64bit_p ? "dsubu\t%0,%1,%z2" : "subu\t%0,%1,%z2";
12555 case SYNC_INSN1_AND:
12556 return "and\t%0,%1,%z2";
12557 case SYNC_INSN1_ANDI:
12558 return "andi\t%0,%1,%2";
12559 case SYNC_INSN1_OR:
12560 return "or\t%0,%1,%z2";
12561 case SYNC_INSN1_ORI:
12562 return "ori\t%0,%1,%2";
12563 case SYNC_INSN1_XOR:
12564 return "xor\t%0,%1,%z2";
12565 case SYNC_INSN1_XORI:
12566 return "xori\t%0,%1,%2";
12567 }
12568 gcc_unreachable ();
12569 }
12570
12571 /* Return the asm template associated with sync_insn2 value TYPE. */
12572
12573 static const char *
12574 mips_sync_insn2_template (enum attr_sync_insn2 type)
12575 {
12576 switch (type)
12577 {
12578 case SYNC_INSN2_NOP:
12579 gcc_unreachable ();
12580 case SYNC_INSN2_AND:
12581 return "and\t%0,%1,%z2";
12582 case SYNC_INSN2_XOR:
12583 return "xor\t%0,%1,%z2";
12584 case SYNC_INSN2_NOT:
12585 return "nor\t%0,%1,%.";
12586 }
12587 gcc_unreachable ();
12588 }
12589
12590 /* OPERANDS are the operands to a sync loop instruction and INDEX is
12591 the value of the one of the sync_* attributes. Return the operand
12592 referred to by the attribute, or DEFAULT_VALUE if the insn doesn't
12593 have the associated attribute. */
12594
12595 static rtx
12596 mips_get_sync_operand (rtx *operands, int index, rtx default_value)
12597 {
12598 if (index > 0)
12599 default_value = operands[index - 1];
12600 return default_value;
12601 }
12602
12603 /* INSN is a sync loop with operands OPERANDS. Build up a multi-insn
12604 sequence for it. */
12605
12606 static void
12607 mips_process_sync_loop (rtx insn, rtx *operands)
12608 {
12609 rtx at, mem, oldval, newval, inclusive_mask, exclusive_mask;
12610 rtx required_oldval, insn1_op2, tmp1, tmp2, tmp3, cmp;
12611 unsigned int tmp3_insn;
12612 enum attr_sync_insn1 insn1;
12613 enum attr_sync_insn2 insn2;
12614 bool is_64bit_p;
12615 int memmodel_attr;
12616 enum memmodel model;
12617
12618 /* Read an operand from the sync_WHAT attribute and store it in
12619 variable WHAT. DEFAULT is the default value if no attribute
12620 is specified. */
12621 #define READ_OPERAND(WHAT, DEFAULT) \
12622 WHAT = mips_get_sync_operand (operands, (int) get_attr_sync_##WHAT (insn), \
12623 DEFAULT)
12624
12625 /* Read the memory. */
12626 READ_OPERAND (mem, 0);
12627 gcc_assert (mem);
12628 is_64bit_p = (GET_MODE_BITSIZE (GET_MODE (mem)) == 64);
12629
12630 /* Read the other attributes. */
12631 at = gen_rtx_REG (GET_MODE (mem), AT_REGNUM);
12632 READ_OPERAND (oldval, at);
12633 READ_OPERAND (cmp, 0);
12634 READ_OPERAND (newval, at);
12635 READ_OPERAND (inclusive_mask, 0);
12636 READ_OPERAND (exclusive_mask, 0);
12637 READ_OPERAND (required_oldval, 0);
12638 READ_OPERAND (insn1_op2, 0);
12639 insn1 = get_attr_sync_insn1 (insn);
12640 insn2 = get_attr_sync_insn2 (insn);
12641
12642 /* Don't bother setting CMP result that is never used. */
12643 if (cmp && find_reg_note (insn, REG_UNUSED, cmp))
12644 cmp = 0;
12645
12646 memmodel_attr = get_attr_sync_memmodel (insn);
12647 switch (memmodel_attr)
12648 {
12649 case 10:
12650 model = MEMMODEL_ACQ_REL;
12651 break;
12652 case 11:
12653 model = MEMMODEL_ACQUIRE;
12654 break;
12655 default:
12656 model = (enum memmodel) INTVAL (operands[memmodel_attr]);
12657 }
12658
12659 mips_multi_start ();
12660
12661 /* Output the release side of the memory barrier. */
12662 if (need_atomic_barrier_p (model, true))
12663 {
12664 if (required_oldval == 0 && TARGET_OCTEON)
12665 {
12666 /* Octeon doesn't reorder reads, so a full barrier can be
12667 created by using SYNCW to order writes combined with the
12668 write from the following SC. When the SC successfully
12669 completes, we know that all preceding writes are also
12670 committed to the coherent memory system. It is possible
12671 for a single SYNCW to fail, but a pair of them will never
12672 fail, so we use two. */
12673 mips_multi_add_insn ("syncw", NULL);
12674 mips_multi_add_insn ("syncw", NULL);
12675 }
12676 else
12677 mips_multi_add_insn ("sync", NULL);
12678 }
12679
12680 /* Output the branch-back label. */
12681 mips_multi_add_label ("1:");
12682
12683 /* OLDVAL = *MEM. */
12684 mips_multi_add_insn (is_64bit_p ? "lld\t%0,%1" : "ll\t%0,%1",
12685 oldval, mem, NULL);
12686
12687 /* if ((OLDVAL & INCLUSIVE_MASK) != REQUIRED_OLDVAL) goto 2. */
12688 if (required_oldval)
12689 {
12690 if (inclusive_mask == 0)
12691 tmp1 = oldval;
12692 else
12693 {
12694 gcc_assert (oldval != at);
12695 mips_multi_add_insn ("and\t%0,%1,%2",
12696 at, oldval, inclusive_mask, NULL);
12697 tmp1 = at;
12698 }
12699 mips_multi_add_insn ("bne\t%0,%z1,2f", tmp1, required_oldval, NULL);
12700
12701 /* CMP = 0 [delay slot]. */
12702 if (cmp)
12703 mips_multi_add_insn ("li\t%0,0", cmp, NULL);
12704 }
12705
12706 /* $TMP1 = OLDVAL & EXCLUSIVE_MASK. */
12707 if (exclusive_mask == 0)
12708 tmp1 = const0_rtx;
12709 else
12710 {
12711 gcc_assert (oldval != at);
12712 mips_multi_add_insn ("and\t%0,%1,%z2",
12713 at, oldval, exclusive_mask, NULL);
12714 tmp1 = at;
12715 }
12716
12717 /* $TMP2 = INSN1 (OLDVAL, INSN1_OP2).
12718
12719 We can ignore moves if $TMP4 != INSN1_OP2, since we'll still emit
12720 at least one instruction in that case. */
12721 if (insn1 == SYNC_INSN1_MOVE
12722 && (tmp1 != const0_rtx || insn2 != SYNC_INSN2_NOP))
12723 tmp2 = insn1_op2;
12724 else
12725 {
12726 mips_multi_add_insn (mips_sync_insn1_template (insn1, is_64bit_p),
12727 newval, oldval, insn1_op2, NULL);
12728 tmp2 = newval;
12729 }
12730
12731 /* $TMP3 = INSN2 ($TMP2, INCLUSIVE_MASK). */
12732 if (insn2 == SYNC_INSN2_NOP)
12733 tmp3 = tmp2;
12734 else
12735 {
12736 mips_multi_add_insn (mips_sync_insn2_template (insn2),
12737 newval, tmp2, inclusive_mask, NULL);
12738 tmp3 = newval;
12739 }
12740 tmp3_insn = mips_multi_last_index ();
12741
12742 /* $AT = $TMP1 | $TMP3. */
12743 if (tmp1 == const0_rtx || tmp3 == const0_rtx)
12744 {
12745 mips_multi_set_operand (tmp3_insn, 0, at);
12746 tmp3 = at;
12747 }
12748 else
12749 {
12750 gcc_assert (tmp1 != tmp3);
12751 mips_multi_add_insn ("or\t%0,%1,%2", at, tmp1, tmp3, NULL);
12752 }
12753
12754 /* if (!commit (*MEM = $AT)) goto 1.
12755
12756 This will sometimes be a delayed branch; see the write code below
12757 for details. */
12758 mips_multi_add_insn (is_64bit_p ? "scd\t%0,%1" : "sc\t%0,%1", at, mem, NULL);
12759 mips_multi_add_insn ("beq%?\t%0,%.,1b", at, NULL);
12760
12761 /* if (INSN1 != MOVE && INSN1 != LI) NEWVAL = $TMP3 [delay slot]. */
12762 if (insn1 != SYNC_INSN1_MOVE && insn1 != SYNC_INSN1_LI && tmp3 != newval)
12763 {
12764 mips_multi_copy_insn (tmp3_insn);
12765 mips_multi_set_operand (mips_multi_last_index (), 0, newval);
12766 }
12767 else if (!(required_oldval && cmp))
12768 mips_multi_add_insn ("nop", NULL);
12769
12770 /* CMP = 1 -- either standalone or in a delay slot. */
12771 if (required_oldval && cmp)
12772 mips_multi_add_insn ("li\t%0,1", cmp, NULL);
12773
12774 /* Output the acquire side of the memory barrier. */
12775 if (TARGET_SYNC_AFTER_SC && need_atomic_barrier_p (model, false))
12776 mips_multi_add_insn ("sync", NULL);
12777
12778 /* Output the exit label, if needed. */
12779 if (required_oldval)
12780 mips_multi_add_label ("2:");
12781
12782 #undef READ_OPERAND
12783 }
12784
12785 /* Output and/or return the asm template for sync loop INSN, which has
12786 the operands given by OPERANDS. */
12787
12788 const char *
12789 mips_output_sync_loop (rtx insn, rtx *operands)
12790 {
12791 mips_process_sync_loop (insn, operands);
12792
12793 /* Use branch-likely instructions to work around the LL/SC R10000
12794 errata. */
12795 mips_branch_likely = TARGET_FIX_R10000;
12796
12797 mips_push_asm_switch (&mips_noreorder);
12798 mips_push_asm_switch (&mips_nomacro);
12799 mips_push_asm_switch (&mips_noat);
12800 mips_start_ll_sc_sync_block ();
12801
12802 mips_multi_write ();
12803
12804 mips_end_ll_sc_sync_block ();
12805 mips_pop_asm_switch (&mips_noat);
12806 mips_pop_asm_switch (&mips_nomacro);
12807 mips_pop_asm_switch (&mips_noreorder);
12808
12809 return "";
12810 }
12811
12812 /* Return the number of individual instructions in sync loop INSN,
12813 which has the operands given by OPERANDS. */
12814
12815 unsigned int
12816 mips_sync_loop_insns (rtx insn, rtx *operands)
12817 {
12818 mips_process_sync_loop (insn, operands);
12819 return mips_multi_num_insns;
12820 }
12821 \f
12822 /* Return the assembly code for DIV or DDIV instruction DIVISION, which has
12823 the operands given by OPERANDS. Add in a divide-by-zero check if needed.
12824
12825 When working around R4000 and R4400 errata, we need to make sure that
12826 the division is not immediately followed by a shift[1][2]. We also
12827 need to stop the division from being put into a branch delay slot[3].
12828 The easiest way to avoid both problems is to add a nop after the
12829 division. When a divide-by-zero check is needed, this nop can be
12830 used to fill the branch delay slot.
12831
12832 [1] If a double-word or a variable shift executes immediately
12833 after starting an integer division, the shift may give an
12834 incorrect result. See quotations of errata #16 and #28 from
12835 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
12836 in mips.md for details.
12837
12838 [2] A similar bug to [1] exists for all revisions of the
12839 R4000 and the R4400 when run in an MC configuration.
12840 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
12841
12842 "19. In this following sequence:
12843
12844 ddiv (or ddivu or div or divu)
12845 dsll32 (or dsrl32, dsra32)
12846
12847 if an MPT stall occurs, while the divide is slipping the cpu
12848 pipeline, then the following double shift would end up with an
12849 incorrect result.
12850
12851 Workaround: The compiler needs to avoid generating any
12852 sequence with divide followed by extended double shift."
12853
12854 This erratum is also present in "MIPS R4400MC Errata, Processor
12855 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
12856 & 3.0" as errata #10 and #4, respectively.
12857
12858 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
12859 (also valid for MIPS R4000MC processors):
12860
12861 "52. R4000SC: This bug does not apply for the R4000PC.
12862
12863 There are two flavors of this bug:
12864
12865 1) If the instruction just after divide takes an RF exception
12866 (tlb-refill, tlb-invalid) and gets an instruction cache
12867 miss (both primary and secondary) and the line which is
12868 currently in secondary cache at this index had the first
12869 data word, where the bits 5..2 are set, then R4000 would
12870 get a wrong result for the div.
12871
12872 ##1
12873 nop
12874 div r8, r9
12875 ------------------- # end-of page. -tlb-refill
12876 nop
12877 ##2
12878 nop
12879 div r8, r9
12880 ------------------- # end-of page. -tlb-invalid
12881 nop
12882
12883 2) If the divide is in the taken branch delay slot, where the
12884 target takes RF exception and gets an I-cache miss for the
12885 exception vector or where I-cache miss occurs for the
12886 target address, under the above mentioned scenarios, the
12887 div would get wrong results.
12888
12889 ##1
12890 j r2 # to next page mapped or unmapped
12891 div r8,r9 # this bug would be there as long
12892 # as there is an ICache miss and
12893 nop # the "data pattern" is present
12894
12895 ##2
12896 beq r0, r0, NextPage # to Next page
12897 div r8,r9
12898 nop
12899
12900 This bug is present for div, divu, ddiv, and ddivu
12901 instructions.
12902
12903 Workaround: For item 1), OS could make sure that the next page
12904 after the divide instruction is also mapped. For item 2), the
12905 compiler could make sure that the divide instruction is not in
12906 the branch delay slot."
12907
12908 These processors have PRId values of 0x00004220 and 0x00004300 for
12909 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
12910
12911 const char *
12912 mips_output_division (const char *division, rtx *operands)
12913 {
12914 const char *s;
12915
12916 s = division;
12917 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
12918 {
12919 output_asm_insn (s, operands);
12920 s = "nop";
12921 }
12922 if (TARGET_CHECK_ZERO_DIV)
12923 {
12924 if (TARGET_MIPS16)
12925 {
12926 output_asm_insn (s, operands);
12927 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
12928 }
12929 else if (GENERATE_DIVIDE_TRAPS)
12930 {
12931 /* Avoid long replay penalty on load miss by putting the trap before
12932 the divide. */
12933 if (TUNE_74K)
12934 output_asm_insn ("teq\t%2,%.,7", operands);
12935 else
12936 {
12937 output_asm_insn (s, operands);
12938 s = "teq\t%2,%.,7";
12939 }
12940 }
12941 else
12942 {
12943 output_asm_insn ("%(bne\t%2,%.,1f", operands);
12944 output_asm_insn (s, operands);
12945 s = "break\t7%)\n1:";
12946 }
12947 }
12948 return s;
12949 }
12950 \f
12951 /* Return true if IN_INSN is a multiply-add or multiply-subtract
12952 instruction and if OUT_INSN assigns to the accumulator operand. */
12953
12954 bool
12955 mips_linked_madd_p (rtx out_insn, rtx in_insn)
12956 {
12957 enum attr_accum_in accum_in;
12958 int accum_in_opnum;
12959 rtx accum_in_op;
12960
12961 if (recog_memoized (in_insn) < 0)
12962 return false;
12963
12964 accum_in = get_attr_accum_in (in_insn);
12965 if (accum_in == ACCUM_IN_NONE)
12966 return false;
12967
12968 accum_in_opnum = accum_in - ACCUM_IN_0;
12969
12970 extract_insn (in_insn);
12971 gcc_assert (accum_in_opnum < recog_data.n_operands);
12972 accum_in_op = recog_data.operand[accum_in_opnum];
12973
12974 return reg_set_p (accum_in_op, out_insn);
12975 }
12976
12977 /* True if the dependency between OUT_INSN and IN_INSN is on the store
12978 data rather than the address. We need this because the cprestore
12979 pattern is type "store", but is defined using an UNSPEC_VOLATILE,
12980 which causes the default routine to abort. We just return false
12981 for that case. */
12982
12983 bool
12984 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
12985 {
12986 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
12987 return false;
12988
12989 return !store_data_bypass_p (out_insn, in_insn);
12990 }
12991 \f
12992
12993 /* Variables and flags used in scheduler hooks when tuning for
12994 Loongson 2E/2F. */
12995 static struct
12996 {
12997 /* Variables to support Loongson 2E/2F round-robin [F]ALU1/2 dispatch
12998 strategy. */
12999
13000 /* If true, then next ALU1/2 instruction will go to ALU1. */
13001 bool alu1_turn_p;
13002
13003 /* If true, then next FALU1/2 unstruction will go to FALU1. */
13004 bool falu1_turn_p;
13005
13006 /* Codes to query if [f]alu{1,2}_core units are subscribed or not. */
13007 int alu1_core_unit_code;
13008 int alu2_core_unit_code;
13009 int falu1_core_unit_code;
13010 int falu2_core_unit_code;
13011
13012 /* True if current cycle has a multi instruction.
13013 This flag is used in mips_ls2_dfa_post_advance_cycle. */
13014 bool cycle_has_multi_p;
13015
13016 /* Instructions to subscribe ls2_[f]alu{1,2}_turn_enabled units.
13017 These are used in mips_ls2_dfa_post_advance_cycle to initialize
13018 DFA state.
13019 E.g., when alu1_turn_enabled_insn is issued it makes next ALU1/2
13020 instruction to go ALU1. */
13021 rtx alu1_turn_enabled_insn;
13022 rtx alu2_turn_enabled_insn;
13023 rtx falu1_turn_enabled_insn;
13024 rtx falu2_turn_enabled_insn;
13025 } mips_ls2;
13026
13027 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
13028 dependencies have no cost, except on the 20Kc where output-dependence
13029 is treated like input-dependence. */
13030
13031 static int
13032 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
13033 rtx dep ATTRIBUTE_UNUSED, int cost)
13034 {
13035 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
13036 && TUNE_20KC)
13037 return cost;
13038 if (REG_NOTE_KIND (link) != 0)
13039 return 0;
13040 return cost;
13041 }
13042
13043 /* Return the number of instructions that can be issued per cycle. */
13044
13045 static int
13046 mips_issue_rate (void)
13047 {
13048 switch (mips_tune)
13049 {
13050 case PROCESSOR_74KC:
13051 case PROCESSOR_74KF2_1:
13052 case PROCESSOR_74KF1_1:
13053 case PROCESSOR_74KF3_2:
13054 /* The 74k is not strictly quad-issue cpu, but can be seen as one
13055 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
13056 but in reality only a maximum of 3 insns can be issued as
13057 floating-point loads and stores also require a slot in the
13058 AGEN pipe. */
13059 case PROCESSOR_R10000:
13060 /* All R10K Processors are quad-issue (being the first MIPS
13061 processors to support this feature). */
13062 return 4;
13063
13064 case PROCESSOR_20KC:
13065 case PROCESSOR_R4130:
13066 case PROCESSOR_R5400:
13067 case PROCESSOR_R5500:
13068 case PROCESSOR_R5900:
13069 case PROCESSOR_R7000:
13070 case PROCESSOR_R9000:
13071 case PROCESSOR_OCTEON:
13072 case PROCESSOR_OCTEON2:
13073 return 2;
13074
13075 case PROCESSOR_SB1:
13076 case PROCESSOR_SB1A:
13077 /* This is actually 4, but we get better performance if we claim 3.
13078 This is partly because of unwanted speculative code motion with the
13079 larger number, and partly because in most common cases we can't
13080 reach the theoretical max of 4. */
13081 return 3;
13082
13083 case PROCESSOR_LOONGSON_2E:
13084 case PROCESSOR_LOONGSON_2F:
13085 case PROCESSOR_LOONGSON_3A:
13086 return 4;
13087
13088 case PROCESSOR_XLP:
13089 return (reload_completed ? 4 : 3);
13090
13091 default:
13092 return 1;
13093 }
13094 }
13095
13096 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook for Loongson2. */
13097
13098 static void
13099 mips_ls2_init_dfa_post_cycle_insn (void)
13100 {
13101 start_sequence ();
13102 emit_insn (gen_ls2_alu1_turn_enabled_insn ());
13103 mips_ls2.alu1_turn_enabled_insn = get_insns ();
13104 end_sequence ();
13105
13106 start_sequence ();
13107 emit_insn (gen_ls2_alu2_turn_enabled_insn ());
13108 mips_ls2.alu2_turn_enabled_insn = get_insns ();
13109 end_sequence ();
13110
13111 start_sequence ();
13112 emit_insn (gen_ls2_falu1_turn_enabled_insn ());
13113 mips_ls2.falu1_turn_enabled_insn = get_insns ();
13114 end_sequence ();
13115
13116 start_sequence ();
13117 emit_insn (gen_ls2_falu2_turn_enabled_insn ());
13118 mips_ls2.falu2_turn_enabled_insn = get_insns ();
13119 end_sequence ();
13120
13121 mips_ls2.alu1_core_unit_code = get_cpu_unit_code ("ls2_alu1_core");
13122 mips_ls2.alu2_core_unit_code = get_cpu_unit_code ("ls2_alu2_core");
13123 mips_ls2.falu1_core_unit_code = get_cpu_unit_code ("ls2_falu1_core");
13124 mips_ls2.falu2_core_unit_code = get_cpu_unit_code ("ls2_falu2_core");
13125 }
13126
13127 /* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook.
13128 Init data used in mips_dfa_post_advance_cycle. */
13129
13130 static void
13131 mips_init_dfa_post_cycle_insn (void)
13132 {
13133 if (TUNE_LOONGSON_2EF)
13134 mips_ls2_init_dfa_post_cycle_insn ();
13135 }
13136
13137 /* Initialize STATE when scheduling for Loongson 2E/2F.
13138 Support round-robin dispatch scheme by enabling only one of
13139 ALU1/ALU2 and one of FALU1/FALU2 units for ALU1/2 and FALU1/2 instructions
13140 respectively. */
13141
13142 static void
13143 mips_ls2_dfa_post_advance_cycle (state_t state)
13144 {
13145 if (cpu_unit_reservation_p (state, mips_ls2.alu1_core_unit_code))
13146 {
13147 /* Though there are no non-pipelined ALU1 insns,
13148 we can get an instruction of type 'multi' before reload. */
13149 gcc_assert (mips_ls2.cycle_has_multi_p);
13150 mips_ls2.alu1_turn_p = false;
13151 }
13152
13153 mips_ls2.cycle_has_multi_p = false;
13154
13155 if (cpu_unit_reservation_p (state, mips_ls2.alu2_core_unit_code))
13156 /* We have a non-pipelined alu instruction in the core,
13157 adjust round-robin counter. */
13158 mips_ls2.alu1_turn_p = true;
13159
13160 if (mips_ls2.alu1_turn_p)
13161 {
13162 if (state_transition (state, mips_ls2.alu1_turn_enabled_insn) >= 0)
13163 gcc_unreachable ();
13164 }
13165 else
13166 {
13167 if (state_transition (state, mips_ls2.alu2_turn_enabled_insn) >= 0)
13168 gcc_unreachable ();
13169 }
13170
13171 if (cpu_unit_reservation_p (state, mips_ls2.falu1_core_unit_code))
13172 {
13173 /* There are no non-pipelined FALU1 insns. */
13174 gcc_unreachable ();
13175 mips_ls2.falu1_turn_p = false;
13176 }
13177
13178 if (cpu_unit_reservation_p (state, mips_ls2.falu2_core_unit_code))
13179 /* We have a non-pipelined falu instruction in the core,
13180 adjust round-robin counter. */
13181 mips_ls2.falu1_turn_p = true;
13182
13183 if (mips_ls2.falu1_turn_p)
13184 {
13185 if (state_transition (state, mips_ls2.falu1_turn_enabled_insn) >= 0)
13186 gcc_unreachable ();
13187 }
13188 else
13189 {
13190 if (state_transition (state, mips_ls2.falu2_turn_enabled_insn) >= 0)
13191 gcc_unreachable ();
13192 }
13193 }
13194
13195 /* Implement TARGET_SCHED_DFA_POST_ADVANCE_CYCLE.
13196 This hook is being called at the start of each cycle. */
13197
13198 static void
13199 mips_dfa_post_advance_cycle (void)
13200 {
13201 if (TUNE_LOONGSON_2EF)
13202 mips_ls2_dfa_post_advance_cycle (curr_state);
13203 }
13204
13205 /* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
13206 be as wide as the scheduling freedom in the DFA. */
13207
13208 static int
13209 mips_multipass_dfa_lookahead (void)
13210 {
13211 /* Can schedule up to 4 of the 6 function units in any one cycle. */
13212 if (TUNE_SB1)
13213 return 4;
13214
13215 if (TUNE_LOONGSON_2EF || TUNE_LOONGSON_3A)
13216 return 4;
13217
13218 if (TUNE_OCTEON)
13219 return 2;
13220
13221 return 0;
13222 }
13223 \f
13224 /* Remove the instruction at index LOWER from ready queue READY and
13225 reinsert it in front of the instruction at index HIGHER. LOWER must
13226 be <= HIGHER. */
13227
13228 static void
13229 mips_promote_ready (rtx *ready, int lower, int higher)
13230 {
13231 rtx new_head;
13232 int i;
13233
13234 new_head = ready[lower];
13235 for (i = lower; i < higher; i++)
13236 ready[i] = ready[i + 1];
13237 ready[i] = new_head;
13238 }
13239
13240 /* If the priority of the instruction at POS2 in the ready queue READY
13241 is within LIMIT units of that of the instruction at POS1, swap the
13242 instructions if POS2 is not already less than POS1. */
13243
13244 static void
13245 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
13246 {
13247 if (pos1 < pos2
13248 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
13249 {
13250 rtx temp;
13251
13252 temp = ready[pos1];
13253 ready[pos1] = ready[pos2];
13254 ready[pos2] = temp;
13255 }
13256 }
13257 \f
13258 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
13259 that may clobber hi or lo. */
13260 static rtx mips_macc_chains_last_hilo;
13261
13262 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
13263 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
13264
13265 static void
13266 mips_macc_chains_record (rtx insn)
13267 {
13268 if (get_attr_may_clobber_hilo (insn))
13269 mips_macc_chains_last_hilo = insn;
13270 }
13271
13272 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
13273 has NREADY elements, looking for a multiply-add or multiply-subtract
13274 instruction that is cumulative with mips_macc_chains_last_hilo.
13275 If there is one, promote it ahead of anything else that might
13276 clobber hi or lo. */
13277
13278 static void
13279 mips_macc_chains_reorder (rtx *ready, int nready)
13280 {
13281 int i, j;
13282
13283 if (mips_macc_chains_last_hilo != 0)
13284 for (i = nready - 1; i >= 0; i--)
13285 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
13286 {
13287 for (j = nready - 1; j > i; j--)
13288 if (recog_memoized (ready[j]) >= 0
13289 && get_attr_may_clobber_hilo (ready[j]))
13290 {
13291 mips_promote_ready (ready, i, j);
13292 break;
13293 }
13294 break;
13295 }
13296 }
13297 \f
13298 /* The last instruction to be scheduled. */
13299 static rtx vr4130_last_insn;
13300
13301 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
13302 points to an rtx that is initially an instruction. Nullify the rtx
13303 if the instruction uses the value of register X. */
13304
13305 static void
13306 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
13307 void *data)
13308 {
13309 rtx *insn_ptr;
13310
13311 insn_ptr = (rtx *) data;
13312 if (REG_P (x)
13313 && *insn_ptr != 0
13314 && reg_referenced_p (x, PATTERN (*insn_ptr)))
13315 *insn_ptr = 0;
13316 }
13317
13318 /* Return true if there is true register dependence between vr4130_last_insn
13319 and INSN. */
13320
13321 static bool
13322 vr4130_true_reg_dependence_p (rtx insn)
13323 {
13324 note_stores (PATTERN (vr4130_last_insn),
13325 vr4130_true_reg_dependence_p_1, &insn);
13326 return insn == 0;
13327 }
13328
13329 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
13330 the ready queue and that INSN2 is the instruction after it, return
13331 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
13332 in which INSN1 and INSN2 can probably issue in parallel, but for
13333 which (INSN2, INSN1) should be less sensitive to instruction
13334 alignment than (INSN1, INSN2). See 4130.md for more details. */
13335
13336 static bool
13337 vr4130_swap_insns_p (rtx insn1, rtx insn2)
13338 {
13339 sd_iterator_def sd_it;
13340 dep_t dep;
13341
13342 /* Check for the following case:
13343
13344 1) there is some other instruction X with an anti dependence on INSN1;
13345 2) X has a higher priority than INSN2; and
13346 3) X is an arithmetic instruction (and thus has no unit restrictions).
13347
13348 If INSN1 is the last instruction blocking X, it would better to
13349 choose (INSN1, X) over (INSN2, INSN1). */
13350 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
13351 if (DEP_TYPE (dep) == REG_DEP_ANTI
13352 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
13353 && recog_memoized (DEP_CON (dep)) >= 0
13354 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
13355 return false;
13356
13357 if (vr4130_last_insn != 0
13358 && recog_memoized (insn1) >= 0
13359 && recog_memoized (insn2) >= 0)
13360 {
13361 /* See whether INSN1 and INSN2 use different execution units,
13362 or if they are both ALU-type instructions. If so, they can
13363 probably execute in parallel. */
13364 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
13365 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
13366 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
13367 {
13368 /* If only one of the instructions has a dependence on
13369 vr4130_last_insn, prefer to schedule the other one first. */
13370 bool dep1_p = vr4130_true_reg_dependence_p (insn1);
13371 bool dep2_p = vr4130_true_reg_dependence_p (insn2);
13372 if (dep1_p != dep2_p)
13373 return dep1_p;
13374
13375 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
13376 is not an ALU-type instruction and if INSN1 uses the same
13377 execution unit. (Note that if this condition holds, we already
13378 know that INSN2 uses a different execution unit.) */
13379 if (class1 != VR4130_CLASS_ALU
13380 && recog_memoized (vr4130_last_insn) >= 0
13381 && class1 == get_attr_vr4130_class (vr4130_last_insn))
13382 return true;
13383 }
13384 }
13385 return false;
13386 }
13387
13388 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
13389 queue with at least two instructions. Swap the first two if
13390 vr4130_swap_insns_p says that it could be worthwhile. */
13391
13392 static void
13393 vr4130_reorder (rtx *ready, int nready)
13394 {
13395 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
13396 mips_promote_ready (ready, nready - 2, nready - 1);
13397 }
13398 \f
13399 /* Record whether last 74k AGEN instruction was a load or store. */
13400 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
13401
13402 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
13403 resets to TYPE_UNKNOWN state. */
13404
13405 static void
13406 mips_74k_agen_init (rtx insn)
13407 {
13408 if (!insn || CALL_P (insn) || JUMP_P (insn))
13409 mips_last_74k_agen_insn = TYPE_UNKNOWN;
13410 else
13411 {
13412 enum attr_type type = get_attr_type (insn);
13413 if (type == TYPE_LOAD || type == TYPE_STORE)
13414 mips_last_74k_agen_insn = type;
13415 }
13416 }
13417
13418 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
13419 loads to be grouped together, and multiple stores to be grouped
13420 together. Swap things around in the ready queue to make this happen. */
13421
13422 static void
13423 mips_74k_agen_reorder (rtx *ready, int nready)
13424 {
13425 int i;
13426 int store_pos, load_pos;
13427
13428 store_pos = -1;
13429 load_pos = -1;
13430
13431 for (i = nready - 1; i >= 0; i--)
13432 {
13433 rtx insn = ready[i];
13434 if (USEFUL_INSN_P (insn))
13435 switch (get_attr_type (insn))
13436 {
13437 case TYPE_STORE:
13438 if (store_pos == -1)
13439 store_pos = i;
13440 break;
13441
13442 case TYPE_LOAD:
13443 if (load_pos == -1)
13444 load_pos = i;
13445 break;
13446
13447 default:
13448 break;
13449 }
13450 }
13451
13452 if (load_pos == -1 || store_pos == -1)
13453 return;
13454
13455 switch (mips_last_74k_agen_insn)
13456 {
13457 case TYPE_UNKNOWN:
13458 /* Prefer to schedule loads since they have a higher latency. */
13459 case TYPE_LOAD:
13460 /* Swap loads to the front of the queue. */
13461 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
13462 break;
13463 case TYPE_STORE:
13464 /* Swap stores to the front of the queue. */
13465 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
13466 break;
13467 default:
13468 break;
13469 }
13470 }
13471 \f
13472 /* Implement TARGET_SCHED_INIT. */
13473
13474 static void
13475 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
13476 int max_ready ATTRIBUTE_UNUSED)
13477 {
13478 mips_macc_chains_last_hilo = 0;
13479 vr4130_last_insn = 0;
13480 mips_74k_agen_init (NULL_RTX);
13481
13482 /* When scheduling for Loongson2, branch instructions go to ALU1,
13483 therefore basic block is most likely to start with round-robin counter
13484 pointed to ALU2. */
13485 mips_ls2.alu1_turn_p = false;
13486 mips_ls2.falu1_turn_p = true;
13487 }
13488
13489 /* Subroutine used by TARGET_SCHED_REORDER and TARGET_SCHED_REORDER2. */
13490
13491 static void
13492 mips_sched_reorder_1 (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
13493 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
13494 {
13495 if (!reload_completed
13496 && TUNE_MACC_CHAINS
13497 && *nreadyp > 0)
13498 mips_macc_chains_reorder (ready, *nreadyp);
13499
13500 if (reload_completed
13501 && TUNE_MIPS4130
13502 && !TARGET_VR4130_ALIGN
13503 && *nreadyp > 1)
13504 vr4130_reorder (ready, *nreadyp);
13505
13506 if (TUNE_74K)
13507 mips_74k_agen_reorder (ready, *nreadyp);
13508 }
13509
13510 /* Implement TARGET_SCHED_REORDER. */
13511
13512 static int
13513 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
13514 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
13515 {
13516 mips_sched_reorder_1 (file, verbose, ready, nreadyp, cycle);
13517 return mips_issue_rate ();
13518 }
13519
13520 /* Implement TARGET_SCHED_REORDER2. */
13521
13522 static int
13523 mips_sched_reorder2 (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
13524 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
13525 {
13526 mips_sched_reorder_1 (file, verbose, ready, nreadyp, cycle);
13527 return cached_can_issue_more;
13528 }
13529
13530 /* Update round-robin counters for ALU1/2 and FALU1/2. */
13531
13532 static void
13533 mips_ls2_variable_issue (rtx insn)
13534 {
13535 if (mips_ls2.alu1_turn_p)
13536 {
13537 if (cpu_unit_reservation_p (curr_state, mips_ls2.alu1_core_unit_code))
13538 mips_ls2.alu1_turn_p = false;
13539 }
13540 else
13541 {
13542 if (cpu_unit_reservation_p (curr_state, mips_ls2.alu2_core_unit_code))
13543 mips_ls2.alu1_turn_p = true;
13544 }
13545
13546 if (mips_ls2.falu1_turn_p)
13547 {
13548 if (cpu_unit_reservation_p (curr_state, mips_ls2.falu1_core_unit_code))
13549 mips_ls2.falu1_turn_p = false;
13550 }
13551 else
13552 {
13553 if (cpu_unit_reservation_p (curr_state, mips_ls2.falu2_core_unit_code))
13554 mips_ls2.falu1_turn_p = true;
13555 }
13556
13557 if (recog_memoized (insn) >= 0)
13558 mips_ls2.cycle_has_multi_p |= (get_attr_type (insn) == TYPE_MULTI);
13559 }
13560
13561 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
13562
13563 static int
13564 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
13565 rtx insn, int more)
13566 {
13567 /* Ignore USEs and CLOBBERs; don't count them against the issue rate. */
13568 if (USEFUL_INSN_P (insn))
13569 {
13570 if (get_attr_type (insn) != TYPE_GHOST)
13571 more--;
13572 if (!reload_completed && TUNE_MACC_CHAINS)
13573 mips_macc_chains_record (insn);
13574 vr4130_last_insn = insn;
13575 if (TUNE_74K)
13576 mips_74k_agen_init (insn);
13577 else if (TUNE_LOONGSON_2EF)
13578 mips_ls2_variable_issue (insn);
13579 }
13580
13581 /* Instructions of type 'multi' should all be split before
13582 the second scheduling pass. */
13583 gcc_assert (!reload_completed
13584 || recog_memoized (insn) < 0
13585 || get_attr_type (insn) != TYPE_MULTI);
13586
13587 cached_can_issue_more = more;
13588 return more;
13589 }
13590 \f
13591 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
13592 return the first operand of the associated PREF or PREFX insn. */
13593
13594 rtx
13595 mips_prefetch_cookie (rtx write, rtx locality)
13596 {
13597 /* store_streamed / load_streamed. */
13598 if (INTVAL (locality) <= 0)
13599 return GEN_INT (INTVAL (write) + 4);
13600
13601 /* store / load. */
13602 if (INTVAL (locality) <= 2)
13603 return write;
13604
13605 /* store_retained / load_retained. */
13606 return GEN_INT (INTVAL (write) + 6);
13607 }
13608 \f
13609 /* Flags that indicate when a built-in function is available.
13610
13611 BUILTIN_AVAIL_NON_MIPS16
13612 The function is available on the current target, but only
13613 in non-MIPS16 mode. */
13614 #define BUILTIN_AVAIL_NON_MIPS16 1
13615
13616 /* Declare an availability predicate for built-in functions that
13617 require non-MIPS16 mode and also require COND to be true.
13618 NAME is the main part of the predicate's name. */
13619 #define AVAIL_NON_MIPS16(NAME, COND) \
13620 static unsigned int \
13621 mips_builtin_avail_##NAME (void) \
13622 { \
13623 return (COND) ? BUILTIN_AVAIL_NON_MIPS16 : 0; \
13624 }
13625
13626 /* This structure describes a single built-in function. */
13627 struct mips_builtin_description {
13628 /* The code of the main .md file instruction. See mips_builtin_type
13629 for more information. */
13630 enum insn_code icode;
13631
13632 /* The floating-point comparison code to use with ICODE, if any. */
13633 enum mips_fp_condition cond;
13634
13635 /* The name of the built-in function. */
13636 const char *name;
13637
13638 /* Specifies how the function should be expanded. */
13639 enum mips_builtin_type builtin_type;
13640
13641 /* The function's prototype. */
13642 enum mips_function_type function_type;
13643
13644 /* Whether the function is available. */
13645 unsigned int (*avail) (void);
13646 };
13647
13648 AVAIL_NON_MIPS16 (paired_single, TARGET_PAIRED_SINGLE_FLOAT)
13649 AVAIL_NON_MIPS16 (sb1_paired_single, TARGET_SB1 && TARGET_PAIRED_SINGLE_FLOAT)
13650 AVAIL_NON_MIPS16 (mips3d, TARGET_MIPS3D)
13651 AVAIL_NON_MIPS16 (dsp, TARGET_DSP)
13652 AVAIL_NON_MIPS16 (dspr2, TARGET_DSPR2)
13653 AVAIL_NON_MIPS16 (dsp_32, !TARGET_64BIT && TARGET_DSP)
13654 AVAIL_NON_MIPS16 (dsp_64, TARGET_64BIT && TARGET_DSP)
13655 AVAIL_NON_MIPS16 (dspr2_32, !TARGET_64BIT && TARGET_DSPR2)
13656 AVAIL_NON_MIPS16 (loongson, TARGET_LOONGSON_VECTORS)
13657 AVAIL_NON_MIPS16 (cache, TARGET_CACHE_BUILTIN)
13658
13659 /* Construct a mips_builtin_description from the given arguments.
13660
13661 INSN is the name of the associated instruction pattern, without the
13662 leading CODE_FOR_mips_.
13663
13664 CODE is the floating-point condition code associated with the
13665 function. It can be 'f' if the field is not applicable.
13666
13667 NAME is the name of the function itself, without the leading
13668 "__builtin_mips_".
13669
13670 BUILTIN_TYPE and FUNCTION_TYPE are mips_builtin_description fields.
13671
13672 AVAIL is the name of the availability predicate, without the leading
13673 mips_builtin_avail_. */
13674 #define MIPS_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE, \
13675 FUNCTION_TYPE, AVAIL) \
13676 { CODE_FOR_mips_ ## INSN, MIPS_FP_COND_ ## COND, \
13677 "__builtin_mips_" NAME, BUILTIN_TYPE, FUNCTION_TYPE, \
13678 mips_builtin_avail_ ## AVAIL }
13679
13680 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT function
13681 mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE and AVAIL
13682 are as for MIPS_BUILTIN. */
13683 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
13684 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
13685
13686 /* Define __builtin_mips_<INSN>_<COND>_{s,d} functions, both of which
13687 are subject to mips_builtin_avail_<AVAIL>. */
13688 #define CMP_SCALAR_BUILTINS(INSN, COND, AVAIL) \
13689 MIPS_BUILTIN (INSN ## _cond_s, COND, #INSN "_" #COND "_s", \
13690 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, AVAIL), \
13691 MIPS_BUILTIN (INSN ## _cond_d, COND, #INSN "_" #COND "_d", \
13692 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, AVAIL)
13693
13694 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
13695 The lower and upper forms are subject to mips_builtin_avail_<AVAIL>
13696 while the any and all forms are subject to mips_builtin_avail_mips3d. */
13697 #define CMP_PS_BUILTINS(INSN, COND, AVAIL) \
13698 MIPS_BUILTIN (INSN ## _cond_ps, COND, "any_" #INSN "_" #COND "_ps", \
13699 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, \
13700 mips3d), \
13701 MIPS_BUILTIN (INSN ## _cond_ps, COND, "all_" #INSN "_" #COND "_ps", \
13702 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, \
13703 mips3d), \
13704 MIPS_BUILTIN (INSN ## _cond_ps, COND, "lower_" #INSN "_" #COND "_ps", \
13705 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, \
13706 AVAIL), \
13707 MIPS_BUILTIN (INSN ## _cond_ps, COND, "upper_" #INSN "_" #COND "_ps", \
13708 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, \
13709 AVAIL)
13710
13711 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
13712 are subject to mips_builtin_avail_mips3d. */
13713 #define CMP_4S_BUILTINS(INSN, COND) \
13714 MIPS_BUILTIN (INSN ## _cond_4s, COND, "any_" #INSN "_" #COND "_4s", \
13715 MIPS_BUILTIN_CMP_ANY, \
13716 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d), \
13717 MIPS_BUILTIN (INSN ## _cond_4s, COND, "all_" #INSN "_" #COND "_4s", \
13718 MIPS_BUILTIN_CMP_ALL, \
13719 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d)
13720
13721 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
13722 instruction requires mips_builtin_avail_<AVAIL>. */
13723 #define MOVTF_BUILTINS(INSN, COND, AVAIL) \
13724 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movt_" #INSN "_" #COND "_ps", \
13725 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
13726 AVAIL), \
13727 MIPS_BUILTIN (INSN ## _cond_ps, COND, "movf_" #INSN "_" #COND "_ps", \
13728 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
13729 AVAIL)
13730
13731 /* Define all the built-in functions related to C.cond.fmt condition COND. */
13732 #define CMP_BUILTINS(COND) \
13733 MOVTF_BUILTINS (c, COND, paired_single), \
13734 MOVTF_BUILTINS (cabs, COND, mips3d), \
13735 CMP_SCALAR_BUILTINS (cabs, COND, mips3d), \
13736 CMP_PS_BUILTINS (c, COND, paired_single), \
13737 CMP_PS_BUILTINS (cabs, COND, mips3d), \
13738 CMP_4S_BUILTINS (c, COND), \
13739 CMP_4S_BUILTINS (cabs, COND)
13740
13741 /* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT_NO_TARGET
13742 function mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE
13743 and AVAIL are as for MIPS_BUILTIN. */
13744 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
13745 MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT_NO_TARGET, \
13746 FUNCTION_TYPE, AVAIL)
13747
13748 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
13749 branch instruction. AVAIL is as for MIPS_BUILTIN. */
13750 #define BPOSGE_BUILTIN(VALUE, AVAIL) \
13751 MIPS_BUILTIN (bposge, f, "bposge" #VALUE, \
13752 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, AVAIL)
13753
13754 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<FN_NAME>
13755 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
13756 builtin_description field. */
13757 #define LOONGSON_BUILTIN_ALIAS(INSN, FN_NAME, FUNCTION_TYPE) \
13758 { CODE_FOR_loongson_ ## INSN, MIPS_FP_COND_f, \
13759 "__builtin_loongson_" #FN_NAME, MIPS_BUILTIN_DIRECT, \
13760 FUNCTION_TYPE, mips_builtin_avail_loongson }
13761
13762 /* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<INSN>
13763 for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
13764 builtin_description field. */
13765 #define LOONGSON_BUILTIN(INSN, FUNCTION_TYPE) \
13766 LOONGSON_BUILTIN_ALIAS (INSN, INSN, FUNCTION_TYPE)
13767
13768 /* Like LOONGSON_BUILTIN, but add _<SUFFIX> to the end of the function name.
13769 We use functions of this form when the same insn can be usefully applied
13770 to more than one datatype. */
13771 #define LOONGSON_BUILTIN_SUFFIX(INSN, SUFFIX, FUNCTION_TYPE) \
13772 LOONGSON_BUILTIN_ALIAS (INSN, INSN ## _ ## SUFFIX, FUNCTION_TYPE)
13773
13774 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
13775 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
13776 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
13777 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
13778 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
13779 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
13780 #define CODE_FOR_mips_mult CODE_FOR_mulsidi3_32bit
13781 #define CODE_FOR_mips_multu CODE_FOR_umulsidi3_32bit
13782
13783 #define CODE_FOR_loongson_packsswh CODE_FOR_vec_pack_ssat_v2si
13784 #define CODE_FOR_loongson_packsshb CODE_FOR_vec_pack_ssat_v4hi
13785 #define CODE_FOR_loongson_packushb CODE_FOR_vec_pack_usat_v4hi
13786 #define CODE_FOR_loongson_paddw CODE_FOR_addv2si3
13787 #define CODE_FOR_loongson_paddh CODE_FOR_addv4hi3
13788 #define CODE_FOR_loongson_paddb CODE_FOR_addv8qi3
13789 #define CODE_FOR_loongson_paddsh CODE_FOR_ssaddv4hi3
13790 #define CODE_FOR_loongson_paddsb CODE_FOR_ssaddv8qi3
13791 #define CODE_FOR_loongson_paddush CODE_FOR_usaddv4hi3
13792 #define CODE_FOR_loongson_paddusb CODE_FOR_usaddv8qi3
13793 #define CODE_FOR_loongson_pmaxsh CODE_FOR_smaxv4hi3
13794 #define CODE_FOR_loongson_pmaxub CODE_FOR_umaxv8qi3
13795 #define CODE_FOR_loongson_pminsh CODE_FOR_sminv4hi3
13796 #define CODE_FOR_loongson_pminub CODE_FOR_uminv8qi3
13797 #define CODE_FOR_loongson_pmulhuh CODE_FOR_umulv4hi3_highpart
13798 #define CODE_FOR_loongson_pmulhh CODE_FOR_smulv4hi3_highpart
13799 #define CODE_FOR_loongson_pmullh CODE_FOR_mulv4hi3
13800 #define CODE_FOR_loongson_psllh CODE_FOR_ashlv4hi3
13801 #define CODE_FOR_loongson_psllw CODE_FOR_ashlv2si3
13802 #define CODE_FOR_loongson_psrlh CODE_FOR_lshrv4hi3
13803 #define CODE_FOR_loongson_psrlw CODE_FOR_lshrv2si3
13804 #define CODE_FOR_loongson_psrah CODE_FOR_ashrv4hi3
13805 #define CODE_FOR_loongson_psraw CODE_FOR_ashrv2si3
13806 #define CODE_FOR_loongson_psubw CODE_FOR_subv2si3
13807 #define CODE_FOR_loongson_psubh CODE_FOR_subv4hi3
13808 #define CODE_FOR_loongson_psubb CODE_FOR_subv8qi3
13809 #define CODE_FOR_loongson_psubsh CODE_FOR_sssubv4hi3
13810 #define CODE_FOR_loongson_psubsb CODE_FOR_sssubv8qi3
13811 #define CODE_FOR_loongson_psubush CODE_FOR_ussubv4hi3
13812 #define CODE_FOR_loongson_psubusb CODE_FOR_ussubv8qi3
13813
13814 static const struct mips_builtin_description mips_builtins[] = {
13815 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
13816 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
13817 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
13818 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
13819 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, paired_single),
13820 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, paired_single),
13821 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, paired_single),
13822 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, paired_single),
13823
13824 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT, paired_single),
13825 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
13826 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
13827 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
13828 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, mips3d),
13829
13830 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, mips3d),
13831 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, mips3d),
13832 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
13833 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
13834 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
13835 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
13836
13837 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, mips3d),
13838 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, mips3d),
13839 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
13840 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
13841 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
13842 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
13843
13844 MIPS_FP_CONDITIONS (CMP_BUILTINS),
13845
13846 /* Built-in functions for the SB-1 processor. */
13847 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, sb1_paired_single),
13848
13849 /* Built-in functions for the DSP ASE (32-bit and 64-bit). */
13850 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
13851 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
13852 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
13853 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
13854 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
13855 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
13856 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
13857 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
13858 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
13859 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
13860 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, dsp),
13861 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, dsp),
13862 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, dsp),
13863 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, dsp),
13864 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, dsp),
13865 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, dsp),
13866 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
13867 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
13868 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
13869 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
13870 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, dsp),
13871 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, dsp),
13872 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
13873 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
13874 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
13875 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
13876 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
13877 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
13878 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
13879 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
13880 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
13881 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
13882 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
13883 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
13884 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
13885 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
13886 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
13887 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, dsp),
13888 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
13889 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
13890 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
13891 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
13892 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
13893 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, dsp),
13894 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, dsp),
13895 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, dsp),
13896 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, dsp),
13897 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
13898 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
13899 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
13900 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
13901 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
13902 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
13903 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
13904 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
13905 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
13906 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
13907 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
13908 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
13909 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, dsp),
13910 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, dsp),
13911 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_POINTER_SI, dsp),
13912 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_POINTER_SI, dsp),
13913 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_POINTER_SI, dsp),
13914 BPOSGE_BUILTIN (32, dsp),
13915
13916 /* The following are for the MIPS DSP ASE REV 2 (32-bit and 64-bit). */
13917 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, dspr2),
13918 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13919 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13920 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
13921 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
13922 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
13923 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
13924 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
13925 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
13926 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
13927 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13928 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13929 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, dspr2),
13930 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13931 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, dspr2),
13932 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dspr2),
13933 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
13934 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
13935 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
13936 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
13937 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
13938 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, dspr2),
13939 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13940 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13941 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
13942 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
13943 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13944 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13945 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
13946 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
13947 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13948 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13949 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
13950 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
13951
13952 /* Built-in functions for the DSP ASE (32-bit only). */
13953 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
13954 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
13955 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
13956 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
13957 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
13958 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
13959 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
13960 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
13961 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
13962 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
13963 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
13964 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
13965 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
13966 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
13967 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
13968 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
13969 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, dsp_32),
13970 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, dsp_32),
13971 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, dsp_32),
13972 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, dsp_32),
13973 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, dsp_32),
13974 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
13975 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, dsp_32),
13976 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
13977 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, dsp_32),
13978 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, dsp_32),
13979 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, dsp_32),
13980
13981 /* Built-in functions for the DSP ASE (64-bit only). */
13982 DIRECT_BUILTIN (ldx, MIPS_DI_FTYPE_POINTER_SI, dsp_64),
13983
13984 /* The following are for the MIPS DSP ASE REV 2 (32-bit only). */
13985 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
13986 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
13987 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
13988 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
13989 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
13990 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
13991 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
13992 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
13993 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
13994
13995 /* Builtin functions for ST Microelectronics Loongson-2E/2F cores. */
13996 LOONGSON_BUILTIN (packsswh, MIPS_V4HI_FTYPE_V2SI_V2SI),
13997 LOONGSON_BUILTIN (packsshb, MIPS_V8QI_FTYPE_V4HI_V4HI),
13998 LOONGSON_BUILTIN (packushb, MIPS_UV8QI_FTYPE_UV4HI_UV4HI),
13999 LOONGSON_BUILTIN_SUFFIX (paddw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
14000 LOONGSON_BUILTIN_SUFFIX (paddh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
14001 LOONGSON_BUILTIN_SUFFIX (paddb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
14002 LOONGSON_BUILTIN_SUFFIX (paddw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
14003 LOONGSON_BUILTIN_SUFFIX (paddh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
14004 LOONGSON_BUILTIN_SUFFIX (paddb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
14005 LOONGSON_BUILTIN_SUFFIX (paddd, u, MIPS_UDI_FTYPE_UDI_UDI),
14006 LOONGSON_BUILTIN_SUFFIX (paddd, s, MIPS_DI_FTYPE_DI_DI),
14007 LOONGSON_BUILTIN (paddsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
14008 LOONGSON_BUILTIN (paddsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
14009 LOONGSON_BUILTIN (paddush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
14010 LOONGSON_BUILTIN (paddusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
14011 LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_ud, MIPS_UDI_FTYPE_UDI_UDI),
14012 LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_uw, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
14013 LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_uh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
14014 LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_ub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
14015 LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_sd, MIPS_DI_FTYPE_DI_DI),
14016 LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_sw, MIPS_V2SI_FTYPE_V2SI_V2SI),
14017 LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_sh, MIPS_V4HI_FTYPE_V4HI_V4HI),
14018 LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_sb, MIPS_V8QI_FTYPE_V8QI_V8QI),
14019 LOONGSON_BUILTIN (pavgh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
14020 LOONGSON_BUILTIN (pavgb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
14021 LOONGSON_BUILTIN_SUFFIX (pcmpeqw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
14022 LOONGSON_BUILTIN_SUFFIX (pcmpeqh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
14023 LOONGSON_BUILTIN_SUFFIX (pcmpeqb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
14024 LOONGSON_BUILTIN_SUFFIX (pcmpeqw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
14025 LOONGSON_BUILTIN_SUFFIX (pcmpeqh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
14026 LOONGSON_BUILTIN_SUFFIX (pcmpeqb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
14027 LOONGSON_BUILTIN_SUFFIX (pcmpgtw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
14028 LOONGSON_BUILTIN_SUFFIX (pcmpgth, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
14029 LOONGSON_BUILTIN_SUFFIX (pcmpgtb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
14030 LOONGSON_BUILTIN_SUFFIX (pcmpgtw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
14031 LOONGSON_BUILTIN_SUFFIX (pcmpgth, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
14032 LOONGSON_BUILTIN_SUFFIX (pcmpgtb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
14033 LOONGSON_BUILTIN_SUFFIX (pextrh, u, MIPS_UV4HI_FTYPE_UV4HI_USI),
14034 LOONGSON_BUILTIN_SUFFIX (pextrh, s, MIPS_V4HI_FTYPE_V4HI_USI),
14035 LOONGSON_BUILTIN_SUFFIX (pinsrh_0, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
14036 LOONGSON_BUILTIN_SUFFIX (pinsrh_1, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
14037 LOONGSON_BUILTIN_SUFFIX (pinsrh_2, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
14038 LOONGSON_BUILTIN_SUFFIX (pinsrh_3, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
14039 LOONGSON_BUILTIN_SUFFIX (pinsrh_0, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
14040 LOONGSON_BUILTIN_SUFFIX (pinsrh_1, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
14041 LOONGSON_BUILTIN_SUFFIX (pinsrh_2, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
14042 LOONGSON_BUILTIN_SUFFIX (pinsrh_3, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
14043 LOONGSON_BUILTIN (pmaddhw, MIPS_V2SI_FTYPE_V4HI_V4HI),
14044 LOONGSON_BUILTIN (pmaxsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
14045 LOONGSON_BUILTIN (pmaxub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
14046 LOONGSON_BUILTIN (pminsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
14047 LOONGSON_BUILTIN (pminub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
14048 LOONGSON_BUILTIN_SUFFIX (pmovmskb, u, MIPS_UV8QI_FTYPE_UV8QI),
14049 LOONGSON_BUILTIN_SUFFIX (pmovmskb, s, MIPS_V8QI_FTYPE_V8QI),
14050 LOONGSON_BUILTIN (pmulhuh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
14051 LOONGSON_BUILTIN (pmulhh, MIPS_V4HI_FTYPE_V4HI_V4HI),
14052 LOONGSON_BUILTIN (pmullh, MIPS_V4HI_FTYPE_V4HI_V4HI),
14053 LOONGSON_BUILTIN (pmuluw, MIPS_UDI_FTYPE_UV2SI_UV2SI),
14054 LOONGSON_BUILTIN (pasubub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
14055 LOONGSON_BUILTIN (biadd, MIPS_UV4HI_FTYPE_UV8QI),
14056 LOONGSON_BUILTIN (psadbh, MIPS_UV4HI_FTYPE_UV8QI_UV8QI),
14057 LOONGSON_BUILTIN_SUFFIX (pshufh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
14058 LOONGSON_BUILTIN_SUFFIX (pshufh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
14059 LOONGSON_BUILTIN_SUFFIX (psllh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
14060 LOONGSON_BUILTIN_SUFFIX (psllh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
14061 LOONGSON_BUILTIN_SUFFIX (psllw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
14062 LOONGSON_BUILTIN_SUFFIX (psllw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
14063 LOONGSON_BUILTIN_SUFFIX (psrah, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
14064 LOONGSON_BUILTIN_SUFFIX (psrah, s, MIPS_V4HI_FTYPE_V4HI_UQI),
14065 LOONGSON_BUILTIN_SUFFIX (psraw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
14066 LOONGSON_BUILTIN_SUFFIX (psraw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
14067 LOONGSON_BUILTIN_SUFFIX (psrlh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
14068 LOONGSON_BUILTIN_SUFFIX (psrlh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
14069 LOONGSON_BUILTIN_SUFFIX (psrlw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
14070 LOONGSON_BUILTIN_SUFFIX (psrlw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
14071 LOONGSON_BUILTIN_SUFFIX (psubw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
14072 LOONGSON_BUILTIN_SUFFIX (psubh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
14073 LOONGSON_BUILTIN_SUFFIX (psubb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
14074 LOONGSON_BUILTIN_SUFFIX (psubw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
14075 LOONGSON_BUILTIN_SUFFIX (psubh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
14076 LOONGSON_BUILTIN_SUFFIX (psubb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
14077 LOONGSON_BUILTIN_SUFFIX (psubd, u, MIPS_UDI_FTYPE_UDI_UDI),
14078 LOONGSON_BUILTIN_SUFFIX (psubd, s, MIPS_DI_FTYPE_DI_DI),
14079 LOONGSON_BUILTIN (psubsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
14080 LOONGSON_BUILTIN (psubsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
14081 LOONGSON_BUILTIN (psubush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
14082 LOONGSON_BUILTIN (psubusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
14083 LOONGSON_BUILTIN_SUFFIX (punpckhbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
14084 LOONGSON_BUILTIN_SUFFIX (punpckhhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
14085 LOONGSON_BUILTIN_SUFFIX (punpckhwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
14086 LOONGSON_BUILTIN_SUFFIX (punpckhbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
14087 LOONGSON_BUILTIN_SUFFIX (punpckhhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
14088 LOONGSON_BUILTIN_SUFFIX (punpckhwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
14089 LOONGSON_BUILTIN_SUFFIX (punpcklbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
14090 LOONGSON_BUILTIN_SUFFIX (punpcklhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
14091 LOONGSON_BUILTIN_SUFFIX (punpcklwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
14092 LOONGSON_BUILTIN_SUFFIX (punpcklbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
14093 LOONGSON_BUILTIN_SUFFIX (punpcklhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
14094 LOONGSON_BUILTIN_SUFFIX (punpcklwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
14095
14096 /* Sundry other built-in functions. */
14097 DIRECT_NO_TARGET_BUILTIN (cache, MIPS_VOID_FTYPE_SI_CVPOINTER, cache)
14098 };
14099
14100 /* Index I is the function declaration for mips_builtins[I], or null if the
14101 function isn't defined on this target. */
14102 static GTY(()) tree mips_builtin_decls[ARRAY_SIZE (mips_builtins)];
14103
14104 /* MODE is a vector mode whose elements have type TYPE. Return the type
14105 of the vector itself. */
14106
14107 static tree
14108 mips_builtin_vector_type (tree type, enum machine_mode mode)
14109 {
14110 static tree types[2 * (int) MAX_MACHINE_MODE];
14111 int mode_index;
14112
14113 mode_index = (int) mode;
14114
14115 if (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type))
14116 mode_index += MAX_MACHINE_MODE;
14117
14118 if (types[mode_index] == NULL_TREE)
14119 types[mode_index] = build_vector_type_for_mode (type, mode);
14120 return types[mode_index];
14121 }
14122
14123 /* Return a type for 'const volatile void *'. */
14124
14125 static tree
14126 mips_build_cvpointer_type (void)
14127 {
14128 static tree cache;
14129
14130 if (cache == NULL_TREE)
14131 cache = build_pointer_type (build_qualified_type
14132 (void_type_node,
14133 TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE));
14134 return cache;
14135 }
14136
14137 /* Source-level argument types. */
14138 #define MIPS_ATYPE_VOID void_type_node
14139 #define MIPS_ATYPE_INT integer_type_node
14140 #define MIPS_ATYPE_POINTER ptr_type_node
14141 #define MIPS_ATYPE_CVPOINTER mips_build_cvpointer_type ()
14142
14143 /* Standard mode-based argument types. */
14144 #define MIPS_ATYPE_UQI unsigned_intQI_type_node
14145 #define MIPS_ATYPE_SI intSI_type_node
14146 #define MIPS_ATYPE_USI unsigned_intSI_type_node
14147 #define MIPS_ATYPE_DI intDI_type_node
14148 #define MIPS_ATYPE_UDI unsigned_intDI_type_node
14149 #define MIPS_ATYPE_SF float_type_node
14150 #define MIPS_ATYPE_DF double_type_node
14151
14152 /* Vector argument types. */
14153 #define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode)
14154 #define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode)
14155 #define MIPS_ATYPE_V2SI mips_builtin_vector_type (intSI_type_node, V2SImode)
14156 #define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
14157 #define MIPS_ATYPE_V4HI mips_builtin_vector_type (intHI_type_node, V4HImode)
14158 #define MIPS_ATYPE_V8QI mips_builtin_vector_type (intQI_type_node, V8QImode)
14159 #define MIPS_ATYPE_UV2SI \
14160 mips_builtin_vector_type (unsigned_intSI_type_node, V2SImode)
14161 #define MIPS_ATYPE_UV4HI \
14162 mips_builtin_vector_type (unsigned_intHI_type_node, V4HImode)
14163 #define MIPS_ATYPE_UV8QI \
14164 mips_builtin_vector_type (unsigned_intQI_type_node, V8QImode)
14165
14166 /* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists
14167 their associated MIPS_ATYPEs. */
14168 #define MIPS_FTYPE_ATYPES1(A, B) \
14169 MIPS_ATYPE_##A, MIPS_ATYPE_##B
14170
14171 #define MIPS_FTYPE_ATYPES2(A, B, C) \
14172 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C
14173
14174 #define MIPS_FTYPE_ATYPES3(A, B, C, D) \
14175 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D
14176
14177 #define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \
14178 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \
14179 MIPS_ATYPE_##E
14180
14181 /* Return the function type associated with function prototype TYPE. */
14182
14183 static tree
14184 mips_build_function_type (enum mips_function_type type)
14185 {
14186 static tree types[(int) MIPS_MAX_FTYPE_MAX];
14187
14188 if (types[(int) type] == NULL_TREE)
14189 switch (type)
14190 {
14191 #define DEF_MIPS_FTYPE(NUM, ARGS) \
14192 case MIPS_FTYPE_NAME##NUM ARGS: \
14193 types[(int) type] \
14194 = build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS, \
14195 NULL_TREE); \
14196 break;
14197 #include "config/mips/mips-ftypes.def"
14198 #undef DEF_MIPS_FTYPE
14199 default:
14200 gcc_unreachable ();
14201 }
14202
14203 return types[(int) type];
14204 }
14205
14206 /* Implement TARGET_INIT_BUILTINS. */
14207
14208 static void
14209 mips_init_builtins (void)
14210 {
14211 const struct mips_builtin_description *d;
14212 unsigned int i;
14213
14214 /* Iterate through all of the bdesc arrays, initializing all of the
14215 builtin functions. */
14216 for (i = 0; i < ARRAY_SIZE (mips_builtins); i++)
14217 {
14218 d = &mips_builtins[i];
14219 if (d->avail ())
14220 mips_builtin_decls[i]
14221 = add_builtin_function (d->name,
14222 mips_build_function_type (d->function_type),
14223 i, BUILT_IN_MD, NULL, NULL);
14224 }
14225 }
14226
14227 /* Implement TARGET_BUILTIN_DECL. */
14228
14229 static tree
14230 mips_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED)
14231 {
14232 if (code >= ARRAY_SIZE (mips_builtins))
14233 return error_mark_node;
14234 return mips_builtin_decls[code];
14235 }
14236
14237 /* Take argument ARGNO from EXP's argument list and convert it into
14238 an expand operand. Store the operand in *OP. */
14239
14240 static void
14241 mips_prepare_builtin_arg (struct expand_operand *op, tree exp,
14242 unsigned int argno)
14243 {
14244 tree arg;
14245 rtx value;
14246
14247 arg = CALL_EXPR_ARG (exp, argno);
14248 value = expand_normal (arg);
14249 create_input_operand (op, value, TYPE_MODE (TREE_TYPE (arg)));
14250 }
14251
14252 /* Expand instruction ICODE as part of a built-in function sequence.
14253 Use the first NOPS elements of OPS as the instruction's operands.
14254 HAS_TARGET_P is true if operand 0 is a target; it is false if the
14255 instruction has no target.
14256
14257 Return the target rtx if HAS_TARGET_P, otherwise return const0_rtx. */
14258
14259 static rtx
14260 mips_expand_builtin_insn (enum insn_code icode, unsigned int nops,
14261 struct expand_operand *ops, bool has_target_p)
14262 {
14263 if (!maybe_expand_insn (icode, nops, ops))
14264 {
14265 error ("invalid argument to built-in function");
14266 return has_target_p ? gen_reg_rtx (ops[0].mode) : const0_rtx;
14267 }
14268 return has_target_p ? ops[0].value : const0_rtx;
14269 }
14270
14271 /* Expand a floating-point comparison for built-in function call EXP.
14272 The first NARGS arguments are the values to be compared. ICODE is
14273 the .md pattern that does the comparison and COND is the condition
14274 that is being tested. Return an rtx for the result. */
14275
14276 static rtx
14277 mips_expand_builtin_compare_1 (enum insn_code icode,
14278 enum mips_fp_condition cond,
14279 tree exp, int nargs)
14280 {
14281 struct expand_operand ops[MAX_RECOG_OPERANDS];
14282 rtx output;
14283 int opno, argno;
14284
14285 /* The instruction should have a target operand, an operand for each
14286 argument, and an operand for COND. */
14287 gcc_assert (nargs + 2 == insn_data[(int) icode].n_generator_args);
14288
14289 output = mips_allocate_fcc (insn_data[(int) icode].operand[0].mode);
14290 opno = 0;
14291 create_fixed_operand (&ops[opno++], output);
14292 for (argno = 0; argno < nargs; argno++)
14293 mips_prepare_builtin_arg (&ops[opno++], exp, argno);
14294 create_integer_operand (&ops[opno++], (int) cond);
14295 return mips_expand_builtin_insn (icode, opno, ops, true);
14296 }
14297
14298 /* Expand a MIPS_BUILTIN_DIRECT or MIPS_BUILTIN_DIRECT_NO_TARGET function;
14299 HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
14300 and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
14301 suggests a good place to put the result. */
14302
14303 static rtx
14304 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
14305 bool has_target_p)
14306 {
14307 struct expand_operand ops[MAX_RECOG_OPERANDS];
14308 int opno, argno;
14309
14310 /* Map any target to operand 0. */
14311 opno = 0;
14312 if (has_target_p)
14313 create_output_operand (&ops[opno++], target, TYPE_MODE (TREE_TYPE (exp)));
14314
14315 /* Map the arguments to the other operands. */
14316 gcc_assert (opno + call_expr_nargs (exp)
14317 == insn_data[icode].n_generator_args);
14318 for (argno = 0; argno < call_expr_nargs (exp); argno++)
14319 mips_prepare_builtin_arg (&ops[opno++], exp, argno);
14320
14321 return mips_expand_builtin_insn (icode, opno, ops, has_target_p);
14322 }
14323
14324 /* Expand a __builtin_mips_movt_*_ps or __builtin_mips_movf_*_ps
14325 function; TYPE says which. EXP is the CALL_EXPR that calls the
14326 function, ICODE is the instruction that should be used to compare
14327 the first two arguments, and COND is the condition it should test.
14328 TARGET, if nonnull, suggests a good place to put the result. */
14329
14330 static rtx
14331 mips_expand_builtin_movtf (enum mips_builtin_type type,
14332 enum insn_code icode, enum mips_fp_condition cond,
14333 rtx target, tree exp)
14334 {
14335 struct expand_operand ops[4];
14336 rtx cmp_result;
14337
14338 cmp_result = mips_expand_builtin_compare_1 (icode, cond, exp, 2);
14339 create_output_operand (&ops[0], target, TYPE_MODE (TREE_TYPE (exp)));
14340 if (type == MIPS_BUILTIN_MOVT)
14341 {
14342 mips_prepare_builtin_arg (&ops[2], exp, 2);
14343 mips_prepare_builtin_arg (&ops[1], exp, 3);
14344 }
14345 else
14346 {
14347 mips_prepare_builtin_arg (&ops[1], exp, 2);
14348 mips_prepare_builtin_arg (&ops[2], exp, 3);
14349 }
14350 create_fixed_operand (&ops[3], cmp_result);
14351 return mips_expand_builtin_insn (CODE_FOR_mips_cond_move_tf_ps,
14352 4, ops, true);
14353 }
14354
14355 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
14356 into TARGET otherwise. Return TARGET. */
14357
14358 static rtx
14359 mips_builtin_branch_and_move (rtx condition, rtx target,
14360 rtx value_if_true, rtx value_if_false)
14361 {
14362 rtx true_label, done_label;
14363
14364 true_label = gen_label_rtx ();
14365 done_label = gen_label_rtx ();
14366
14367 /* First assume that CONDITION is false. */
14368 mips_emit_move (target, value_if_false);
14369
14370 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
14371 emit_jump_insn (gen_condjump (condition, true_label));
14372 emit_jump_insn (gen_jump (done_label));
14373 emit_barrier ();
14374
14375 /* Fix TARGET if CONDITION is true. */
14376 emit_label (true_label);
14377 mips_emit_move (target, value_if_true);
14378
14379 emit_label (done_label);
14380 return target;
14381 }
14382
14383 /* Expand a comparison built-in function of type BUILTIN_TYPE. EXP is
14384 the CALL_EXPR that calls the function, ICODE is the code of the
14385 comparison instruction, and COND is the condition it should test.
14386 TARGET, if nonnull, suggests a good place to put the boolean result. */
14387
14388 static rtx
14389 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
14390 enum insn_code icode, enum mips_fp_condition cond,
14391 rtx target, tree exp)
14392 {
14393 rtx offset, condition, cmp_result;
14394
14395 if (target == 0 || GET_MODE (target) != SImode)
14396 target = gen_reg_rtx (SImode);
14397 cmp_result = mips_expand_builtin_compare_1 (icode, cond, exp,
14398 call_expr_nargs (exp));
14399
14400 /* If the comparison sets more than one register, we define the result
14401 to be 0 if all registers are false and -1 if all registers are true.
14402 The value of the complete result is indeterminate otherwise. */
14403 switch (builtin_type)
14404 {
14405 case MIPS_BUILTIN_CMP_ALL:
14406 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
14407 return mips_builtin_branch_and_move (condition, target,
14408 const0_rtx, const1_rtx);
14409
14410 case MIPS_BUILTIN_CMP_UPPER:
14411 case MIPS_BUILTIN_CMP_LOWER:
14412 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
14413 condition = gen_single_cc (cmp_result, offset);
14414 return mips_builtin_branch_and_move (condition, target,
14415 const1_rtx, const0_rtx);
14416
14417 default:
14418 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
14419 return mips_builtin_branch_and_move (condition, target,
14420 const1_rtx, const0_rtx);
14421 }
14422 }
14423
14424 /* Expand a bposge built-in function of type BUILTIN_TYPE. TARGET,
14425 if nonnull, suggests a good place to put the boolean result. */
14426
14427 static rtx
14428 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
14429 {
14430 rtx condition, cmp_result;
14431 int cmp_value;
14432
14433 if (target == 0 || GET_MODE (target) != SImode)
14434 target = gen_reg_rtx (SImode);
14435
14436 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
14437
14438 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
14439 cmp_value = 32;
14440 else
14441 gcc_assert (0);
14442
14443 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
14444 return mips_builtin_branch_and_move (condition, target,
14445 const1_rtx, const0_rtx);
14446 }
14447
14448 /* Implement TARGET_EXPAND_BUILTIN. */
14449
14450 static rtx
14451 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
14452 enum machine_mode mode, int ignore)
14453 {
14454 tree fndecl;
14455 unsigned int fcode, avail;
14456 const struct mips_builtin_description *d;
14457
14458 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14459 fcode = DECL_FUNCTION_CODE (fndecl);
14460 gcc_assert (fcode < ARRAY_SIZE (mips_builtins));
14461 d = &mips_builtins[fcode];
14462 avail = d->avail ();
14463 gcc_assert (avail != 0);
14464 if (TARGET_MIPS16)
14465 {
14466 error ("built-in function %qE not supported for MIPS16",
14467 DECL_NAME (fndecl));
14468 return ignore ? const0_rtx : CONST0_RTX (mode);
14469 }
14470 switch (d->builtin_type)
14471 {
14472 case MIPS_BUILTIN_DIRECT:
14473 return mips_expand_builtin_direct (d->icode, target, exp, true);
14474
14475 case MIPS_BUILTIN_DIRECT_NO_TARGET:
14476 return mips_expand_builtin_direct (d->icode, target, exp, false);
14477
14478 case MIPS_BUILTIN_MOVT:
14479 case MIPS_BUILTIN_MOVF:
14480 return mips_expand_builtin_movtf (d->builtin_type, d->icode,
14481 d->cond, target, exp);
14482
14483 case MIPS_BUILTIN_CMP_ANY:
14484 case MIPS_BUILTIN_CMP_ALL:
14485 case MIPS_BUILTIN_CMP_UPPER:
14486 case MIPS_BUILTIN_CMP_LOWER:
14487 case MIPS_BUILTIN_CMP_SINGLE:
14488 return mips_expand_builtin_compare (d->builtin_type, d->icode,
14489 d->cond, target, exp);
14490
14491 case MIPS_BUILTIN_BPOSGE32:
14492 return mips_expand_builtin_bposge (d->builtin_type, target);
14493 }
14494 gcc_unreachable ();
14495 }
14496 \f
14497 /* An entry in the MIPS16 constant pool. VALUE is the pool constant,
14498 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
14499 struct mips16_constant {
14500 struct mips16_constant *next;
14501 rtx value;
14502 rtx label;
14503 enum machine_mode mode;
14504 };
14505
14506 /* Information about an incomplete MIPS16 constant pool. FIRST is the
14507 first constant, HIGHEST_ADDRESS is the highest address that the first
14508 byte of the pool can have, and INSN_ADDRESS is the current instruction
14509 address. */
14510 struct mips16_constant_pool {
14511 struct mips16_constant *first;
14512 int highest_address;
14513 int insn_address;
14514 };
14515
14516 /* Add constant VALUE to POOL and return its label. MODE is the
14517 value's mode (used for CONST_INTs, etc.). */
14518
14519 static rtx
14520 mips16_add_constant (struct mips16_constant_pool *pool,
14521 rtx value, enum machine_mode mode)
14522 {
14523 struct mips16_constant **p, *c;
14524 bool first_of_size_p;
14525
14526 /* See whether the constant is already in the pool. If so, return the
14527 existing label, otherwise leave P pointing to the place where the
14528 constant should be added.
14529
14530 Keep the pool sorted in increasing order of mode size so that we can
14531 reduce the number of alignments needed. */
14532 first_of_size_p = true;
14533 for (p = &pool->first; *p != 0; p = &(*p)->next)
14534 {
14535 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
14536 return (*p)->label;
14537 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
14538 break;
14539 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
14540 first_of_size_p = false;
14541 }
14542
14543 /* In the worst case, the constant needed by the earliest instruction
14544 will end up at the end of the pool. The entire pool must then be
14545 accessible from that instruction.
14546
14547 When adding the first constant, set the pool's highest address to
14548 the address of the first out-of-range byte. Adjust this address
14549 downwards each time a new constant is added. */
14550 if (pool->first == 0)
14551 /* For LWPC, ADDIUPC and DADDIUPC, the base PC value is the address
14552 of the instruction with the lowest two bits clear. The base PC
14553 value for LDPC has the lowest three bits clear. Assume the worst
14554 case here; namely that the PC-relative instruction occupies the
14555 last 2 bytes in an aligned word. */
14556 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
14557 pool->highest_address -= GET_MODE_SIZE (mode);
14558 if (first_of_size_p)
14559 /* Take into account the worst possible padding due to alignment. */
14560 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
14561
14562 /* Create a new entry. */
14563 c = XNEW (struct mips16_constant);
14564 c->value = value;
14565 c->mode = mode;
14566 c->label = gen_label_rtx ();
14567 c->next = *p;
14568 *p = c;
14569
14570 return c->label;
14571 }
14572
14573 /* Output constant VALUE after instruction INSN and return the last
14574 instruction emitted. MODE is the mode of the constant. */
14575
14576 static rtx
14577 mips16_emit_constants_1 (enum machine_mode mode, rtx value, rtx insn)
14578 {
14579 if (SCALAR_INT_MODE_P (mode) || ALL_SCALAR_FIXED_POINT_MODE_P (mode))
14580 {
14581 rtx size = GEN_INT (GET_MODE_SIZE (mode));
14582 return emit_insn_after (gen_consttable_int (value, size), insn);
14583 }
14584
14585 if (SCALAR_FLOAT_MODE_P (mode))
14586 return emit_insn_after (gen_consttable_float (value), insn);
14587
14588 if (VECTOR_MODE_P (mode))
14589 {
14590 int i;
14591
14592 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
14593 insn = mips16_emit_constants_1 (GET_MODE_INNER (mode),
14594 CONST_VECTOR_ELT (value, i), insn);
14595 return insn;
14596 }
14597
14598 gcc_unreachable ();
14599 }
14600
14601 /* Dump out the constants in CONSTANTS after INSN. */
14602
14603 static void
14604 mips16_emit_constants (struct mips16_constant *constants, rtx insn)
14605 {
14606 struct mips16_constant *c, *next;
14607 int align;
14608
14609 align = 0;
14610 for (c = constants; c != NULL; c = next)
14611 {
14612 /* If necessary, increase the alignment of PC. */
14613 if (align < GET_MODE_SIZE (c->mode))
14614 {
14615 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
14616 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
14617 }
14618 align = GET_MODE_SIZE (c->mode);
14619
14620 insn = emit_label_after (c->label, insn);
14621 insn = mips16_emit_constants_1 (c->mode, c->value, insn);
14622
14623 next = c->next;
14624 free (c);
14625 }
14626
14627 emit_barrier_after (insn);
14628 }
14629
14630 /* Return the length of instruction INSN. */
14631
14632 static int
14633 mips16_insn_length (rtx insn)
14634 {
14635 if (JUMP_TABLE_DATA_P (insn))
14636 {
14637 rtx body = PATTERN (insn);
14638 if (GET_CODE (body) == ADDR_VEC)
14639 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
14640 else if (GET_CODE (body) == ADDR_DIFF_VEC)
14641 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
14642 else
14643 gcc_unreachable ();
14644 }
14645 return get_attr_length (insn);
14646 }
14647
14648 /* If *X is a symbolic constant that refers to the constant pool, add
14649 the constant to POOL and rewrite *X to use the constant's label. */
14650
14651 static void
14652 mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
14653 {
14654 rtx base, offset, label;
14655
14656 split_const (*x, &base, &offset);
14657 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
14658 {
14659 label = mips16_add_constant (pool, copy_rtx (get_pool_constant (base)),
14660 get_pool_mode (base));
14661 base = gen_rtx_LABEL_REF (Pmode, label);
14662 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
14663 }
14664 }
14665
14666 /* This structure is used to communicate with mips16_rewrite_pool_refs.
14667 INSN is the instruction we're rewriting and POOL points to the current
14668 constant pool. */
14669 struct mips16_rewrite_pool_refs_info {
14670 rtx insn;
14671 struct mips16_constant_pool *pool;
14672 };
14673
14674 /* Rewrite *X so that constant pool references refer to the constant's
14675 label instead. DATA points to a mips16_rewrite_pool_refs_info
14676 structure. */
14677
14678 static int
14679 mips16_rewrite_pool_refs (rtx *x, void *data)
14680 {
14681 struct mips16_rewrite_pool_refs_info *info =
14682 (struct mips16_rewrite_pool_refs_info *) data;
14683
14684 if (force_to_mem_operand (*x, Pmode))
14685 {
14686 rtx mem = force_const_mem (GET_MODE (*x), *x);
14687 validate_change (info->insn, x, mem, false);
14688 }
14689
14690 if (MEM_P (*x))
14691 {
14692 mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
14693 return -1;
14694 }
14695
14696 /* Don't rewrite the __mips16_rdwr symbol. */
14697 if (GET_CODE (*x) == UNSPEC && XINT (*x, 1) == UNSPEC_TLS_GET_TP)
14698 return -1;
14699
14700 if (TARGET_MIPS16_TEXT_LOADS)
14701 mips16_rewrite_pool_constant (info->pool, x);
14702
14703 return GET_CODE (*x) == CONST ? -1 : 0;
14704 }
14705
14706 /* Return whether CFG is used in mips_reorg. */
14707
14708 static bool
14709 mips_cfg_in_reorg (void)
14710 {
14711 return (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE
14712 || TARGET_RELAX_PIC_CALLS);
14713 }
14714
14715 /* Build MIPS16 constant pools. Split the instructions if SPLIT_P,
14716 otherwise assume that they are already split. */
14717
14718 static void
14719 mips16_lay_out_constants (bool split_p)
14720 {
14721 struct mips16_constant_pool pool;
14722 struct mips16_rewrite_pool_refs_info info;
14723 rtx insn, barrier;
14724
14725 if (!TARGET_MIPS16_PCREL_LOADS)
14726 return;
14727
14728 if (split_p)
14729 {
14730 if (mips_cfg_in_reorg ())
14731 split_all_insns ();
14732 else
14733 split_all_insns_noflow ();
14734 }
14735 barrier = 0;
14736 memset (&pool, 0, sizeof (pool));
14737 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14738 {
14739 /* Rewrite constant pool references in INSN. */
14740 if (USEFUL_INSN_P (insn))
14741 {
14742 info.insn = insn;
14743 info.pool = &pool;
14744 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
14745 }
14746
14747 pool.insn_address += mips16_insn_length (insn);
14748
14749 if (pool.first != NULL)
14750 {
14751 /* If there are no natural barriers between the first user of
14752 the pool and the highest acceptable address, we'll need to
14753 create a new instruction to jump around the constant pool.
14754 In the worst case, this instruction will be 4 bytes long.
14755
14756 If it's too late to do this transformation after INSN,
14757 do it immediately before INSN. */
14758 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
14759 {
14760 rtx label, jump;
14761
14762 label = gen_label_rtx ();
14763
14764 jump = emit_jump_insn_before (gen_jump (label), insn);
14765 JUMP_LABEL (jump) = label;
14766 LABEL_NUSES (label) = 1;
14767 barrier = emit_barrier_after (jump);
14768
14769 emit_label_after (label, barrier);
14770 pool.insn_address += 4;
14771 }
14772
14773 /* See whether the constant pool is now out of range of the first
14774 user. If so, output the constants after the previous barrier.
14775 Note that any instructions between BARRIER and INSN (inclusive)
14776 will use negative offsets to refer to the pool. */
14777 if (pool.insn_address > pool.highest_address)
14778 {
14779 mips16_emit_constants (pool.first, barrier);
14780 pool.first = NULL;
14781 barrier = 0;
14782 }
14783 else if (BARRIER_P (insn))
14784 barrier = insn;
14785 }
14786 }
14787 mips16_emit_constants (pool.first, get_last_insn ());
14788 }
14789 \f
14790 /* Return true if it is worth r10k_simplify_address's while replacing
14791 an address with X. We are looking for constants, and for addresses
14792 at a known offset from the incoming stack pointer. */
14793
14794 static bool
14795 r10k_simplified_address_p (rtx x)
14796 {
14797 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
14798 x = XEXP (x, 0);
14799 return x == virtual_incoming_args_rtx || CONSTANT_P (x);
14800 }
14801
14802 /* X is an expression that appears in INSN. Try to use the UD chains
14803 to simplify it, returning the simplified form on success and the
14804 original form otherwise. Replace the incoming value of $sp with
14805 virtual_incoming_args_rtx (which should never occur in X otherwise). */
14806
14807 static rtx
14808 r10k_simplify_address (rtx x, rtx insn)
14809 {
14810 rtx newx, op0, op1, set, def_insn, note;
14811 df_ref use, def;
14812 struct df_link *defs;
14813
14814 newx = NULL_RTX;
14815 if (UNARY_P (x))
14816 {
14817 op0 = r10k_simplify_address (XEXP (x, 0), insn);
14818 if (op0 != XEXP (x, 0))
14819 newx = simplify_gen_unary (GET_CODE (x), GET_MODE (x),
14820 op0, GET_MODE (XEXP (x, 0)));
14821 }
14822 else if (BINARY_P (x))
14823 {
14824 op0 = r10k_simplify_address (XEXP (x, 0), insn);
14825 op1 = r10k_simplify_address (XEXP (x, 1), insn);
14826 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
14827 newx = simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
14828 }
14829 else if (GET_CODE (x) == LO_SUM)
14830 {
14831 /* LO_SUMs can be offset from HIGHs, if we know they won't
14832 overflow. See mips_classify_address for the rationale behind
14833 the lax check. */
14834 op0 = r10k_simplify_address (XEXP (x, 0), insn);
14835 if (GET_CODE (op0) == HIGH)
14836 newx = XEXP (x, 1);
14837 }
14838 else if (REG_P (x))
14839 {
14840 /* Uses are recorded by regno_reg_rtx, not X itself. */
14841 use = df_find_use (insn, regno_reg_rtx[REGNO (x)]);
14842 gcc_assert (use);
14843 defs = DF_REF_CHAIN (use);
14844
14845 /* Require a single definition. */
14846 if (defs && defs->next == NULL)
14847 {
14848 def = defs->ref;
14849 if (DF_REF_IS_ARTIFICIAL (def))
14850 {
14851 /* Replace the incoming value of $sp with
14852 virtual_incoming_args_rtx. */
14853 if (x == stack_pointer_rtx
14854 && DF_REF_BB (def) == ENTRY_BLOCK_PTR_FOR_FN (cfun))
14855 newx = virtual_incoming_args_rtx;
14856 }
14857 else if (dominated_by_p (CDI_DOMINATORS, DF_REF_BB (use),
14858 DF_REF_BB (def)))
14859 {
14860 /* Make sure that DEF_INSN is a single set of REG. */
14861 def_insn = DF_REF_INSN (def);
14862 if (NONJUMP_INSN_P (def_insn))
14863 {
14864 set = single_set (def_insn);
14865 if (set && rtx_equal_p (SET_DEST (set), x))
14866 {
14867 /* Prefer to use notes, since the def-use chains
14868 are often shorter. */
14869 note = find_reg_equal_equiv_note (def_insn);
14870 if (note)
14871 newx = XEXP (note, 0);
14872 else
14873 newx = SET_SRC (set);
14874 newx = r10k_simplify_address (newx, def_insn);
14875 }
14876 }
14877 }
14878 }
14879 }
14880 if (newx && r10k_simplified_address_p (newx))
14881 return newx;
14882 return x;
14883 }
14884
14885 /* Return true if ADDRESS is known to be an uncached address
14886 on R10K systems. */
14887
14888 static bool
14889 r10k_uncached_address_p (unsigned HOST_WIDE_INT address)
14890 {
14891 unsigned HOST_WIDE_INT upper;
14892
14893 /* Check for KSEG1. */
14894 if (address + 0x60000000 < 0x20000000)
14895 return true;
14896
14897 /* Check for uncached XKPHYS addresses. */
14898 if (Pmode == DImode)
14899 {
14900 upper = (address >> 40) & 0xf9ffff;
14901 if (upper == 0x900000 || upper == 0xb80000)
14902 return true;
14903 }
14904 return false;
14905 }
14906
14907 /* Return true if we can prove that an access to address X in instruction
14908 INSN would be safe from R10K speculation. This X is a general
14909 expression; it might not be a legitimate address. */
14910
14911 static bool
14912 r10k_safe_address_p (rtx x, rtx insn)
14913 {
14914 rtx base, offset;
14915 HOST_WIDE_INT offset_val;
14916
14917 x = r10k_simplify_address (x, insn);
14918
14919 /* Check for references to the stack frame. It doesn't really matter
14920 how much of the frame has been allocated at INSN; -mr10k-cache-barrier
14921 allows us to assume that accesses to any part of the eventual frame
14922 is safe from speculation at any point in the function. */
14923 mips_split_plus (x, &base, &offset_val);
14924 if (base == virtual_incoming_args_rtx
14925 && offset_val >= -cfun->machine->frame.total_size
14926 && offset_val < cfun->machine->frame.args_size)
14927 return true;
14928
14929 /* Check for uncached addresses. */
14930 if (CONST_INT_P (x))
14931 return r10k_uncached_address_p (INTVAL (x));
14932
14933 /* Check for accesses to a static object. */
14934 split_const (x, &base, &offset);
14935 return offset_within_block_p (base, INTVAL (offset));
14936 }
14937
14938 /* Return true if a MEM with MEM_EXPR EXPR and MEM_OFFSET OFFSET is
14939 an in-range access to an automatic variable, or to an object with
14940 a link-time-constant address. */
14941
14942 static bool
14943 r10k_safe_mem_expr_p (tree expr, unsigned HOST_WIDE_INT offset)
14944 {
14945 HOST_WIDE_INT bitoffset, bitsize;
14946 tree inner, var_offset;
14947 enum machine_mode mode;
14948 int unsigned_p, volatile_p;
14949
14950 inner = get_inner_reference (expr, &bitsize, &bitoffset, &var_offset, &mode,
14951 &unsigned_p, &volatile_p, false);
14952 if (!DECL_P (inner) || !DECL_SIZE_UNIT (inner) || var_offset)
14953 return false;
14954
14955 offset += bitoffset / BITS_PER_UNIT;
14956 return offset < tree_to_uhwi (DECL_SIZE_UNIT (inner));
14957 }
14958
14959 /* A for_each_rtx callback for which DATA points to the instruction
14960 containing *X. Stop the search if we find a MEM that is not safe
14961 from R10K speculation. */
14962
14963 static int
14964 r10k_needs_protection_p_1 (rtx *loc, void *data)
14965 {
14966 rtx mem;
14967
14968 mem = *loc;
14969 if (!MEM_P (mem))
14970 return 0;
14971
14972 if (MEM_EXPR (mem)
14973 && MEM_OFFSET_KNOWN_P (mem)
14974 && r10k_safe_mem_expr_p (MEM_EXPR (mem), MEM_OFFSET (mem)))
14975 return -1;
14976
14977 if (r10k_safe_address_p (XEXP (mem, 0), (rtx) data))
14978 return -1;
14979
14980 return 1;
14981 }
14982
14983 /* A note_stores callback for which DATA points to an instruction pointer.
14984 If *DATA is nonnull, make it null if it X contains a MEM that is not
14985 safe from R10K speculation. */
14986
14987 static void
14988 r10k_needs_protection_p_store (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
14989 void *data)
14990 {
14991 rtx *insn_ptr;
14992
14993 insn_ptr = (rtx *) data;
14994 if (*insn_ptr && for_each_rtx (&x, r10k_needs_protection_p_1, *insn_ptr))
14995 *insn_ptr = NULL_RTX;
14996 }
14997
14998 /* A for_each_rtx callback that iterates over the pattern of a CALL_INSN.
14999 Return nonzero if the call is not to a declared function. */
15000
15001 static int
15002 r10k_needs_protection_p_call (rtx *loc, void *data ATTRIBUTE_UNUSED)
15003 {
15004 rtx x;
15005
15006 x = *loc;
15007 if (!MEM_P (x))
15008 return 0;
15009
15010 x = XEXP (x, 0);
15011 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DECL (x))
15012 return -1;
15013
15014 return 1;
15015 }
15016
15017 /* Return true if instruction INSN needs to be protected by an R10K
15018 cache barrier. */
15019
15020 static bool
15021 r10k_needs_protection_p (rtx insn)
15022 {
15023 if (CALL_P (insn))
15024 return for_each_rtx (&PATTERN (insn), r10k_needs_protection_p_call, NULL);
15025
15026 if (mips_r10k_cache_barrier == R10K_CACHE_BARRIER_STORE)
15027 {
15028 note_stores (PATTERN (insn), r10k_needs_protection_p_store, &insn);
15029 return insn == NULL_RTX;
15030 }
15031
15032 return for_each_rtx (&PATTERN (insn), r10k_needs_protection_p_1, insn);
15033 }
15034
15035 /* Return true if BB is only reached by blocks in PROTECTED_BBS and if every
15036 edge is unconditional. */
15037
15038 static bool
15039 r10k_protected_bb_p (basic_block bb, sbitmap protected_bbs)
15040 {
15041 edge_iterator ei;
15042 edge e;
15043
15044 FOR_EACH_EDGE (e, ei, bb->preds)
15045 if (!single_succ_p (e->src)
15046 || !bitmap_bit_p (protected_bbs, e->src->index)
15047 || (e->flags & EDGE_COMPLEX) != 0)
15048 return false;
15049 return true;
15050 }
15051
15052 /* Implement -mr10k-cache-barrier= for the current function. */
15053
15054 static void
15055 r10k_insert_cache_barriers (void)
15056 {
15057 int *rev_post_order;
15058 unsigned int i, n;
15059 basic_block bb;
15060 sbitmap protected_bbs;
15061 rtx insn, end, unprotected_region;
15062
15063 if (TARGET_MIPS16)
15064 {
15065 sorry ("%qs does not support MIPS16 code", "-mr10k-cache-barrier");
15066 return;
15067 }
15068
15069 /* Calculate dominators. */
15070 calculate_dominance_info (CDI_DOMINATORS);
15071
15072 /* Bit X of PROTECTED_BBS is set if the last operation in basic block
15073 X is protected by a cache barrier. */
15074 protected_bbs = sbitmap_alloc (last_basic_block);
15075 bitmap_clear (protected_bbs);
15076
15077 /* Iterate over the basic blocks in reverse post-order. */
15078 rev_post_order = XNEWVEC (int, last_basic_block);
15079 n = pre_and_rev_post_order_compute (NULL, rev_post_order, false);
15080 for (i = 0; i < n; i++)
15081 {
15082 bb = BASIC_BLOCK_FOR_FN (cfun, rev_post_order[i]);
15083
15084 /* If this block is only reached by unconditional edges, and if the
15085 source of every edge is protected, the beginning of the block is
15086 also protected. */
15087 if (r10k_protected_bb_p (bb, protected_bbs))
15088 unprotected_region = NULL_RTX;
15089 else
15090 unprotected_region = pc_rtx;
15091 end = NEXT_INSN (BB_END (bb));
15092
15093 /* UNPROTECTED_REGION is:
15094
15095 - null if we are processing a protected region,
15096 - pc_rtx if we are processing an unprotected region but have
15097 not yet found the first instruction in it
15098 - the first instruction in an unprotected region otherwise. */
15099 for (insn = BB_HEAD (bb); insn != end; insn = NEXT_INSN (insn))
15100 {
15101 if (unprotected_region && USEFUL_INSN_P (insn))
15102 {
15103 if (recog_memoized (insn) == CODE_FOR_mips_cache)
15104 /* This CACHE instruction protects the following code. */
15105 unprotected_region = NULL_RTX;
15106 else
15107 {
15108 /* See if INSN is the first instruction in this
15109 unprotected region. */
15110 if (unprotected_region == pc_rtx)
15111 unprotected_region = insn;
15112
15113 /* See if INSN needs to be protected. If so,
15114 we must insert a cache barrier somewhere between
15115 PREV_INSN (UNPROTECTED_REGION) and INSN. It isn't
15116 clear which position is better performance-wise,
15117 but as a tie-breaker, we assume that it is better
15118 to allow delay slots to be back-filled where
15119 possible, and that it is better not to insert
15120 barriers in the middle of already-scheduled code.
15121 We therefore insert the barrier at the beginning
15122 of the region. */
15123 if (r10k_needs_protection_p (insn))
15124 {
15125 emit_insn_before (gen_r10k_cache_barrier (),
15126 unprotected_region);
15127 unprotected_region = NULL_RTX;
15128 }
15129 }
15130 }
15131
15132 if (CALL_P (insn))
15133 /* The called function is not required to protect the exit path.
15134 The code that follows a call is therefore unprotected. */
15135 unprotected_region = pc_rtx;
15136 }
15137
15138 /* Record whether the end of this block is protected. */
15139 if (unprotected_region == NULL_RTX)
15140 bitmap_set_bit (protected_bbs, bb->index);
15141 }
15142 XDELETEVEC (rev_post_order);
15143
15144 sbitmap_free (protected_bbs);
15145
15146 free_dominance_info (CDI_DOMINATORS);
15147 }
15148 \f
15149 /* If INSN is a call, return the underlying CALL expr. Return NULL_RTX
15150 otherwise. If INSN has two call rtx, then store the second one in
15151 SECOND_CALL. */
15152
15153 static rtx
15154 mips_call_expr_from_insn (rtx insn, rtx *second_call)
15155 {
15156 rtx x;
15157 rtx x2;
15158
15159 if (!CALL_P (insn))
15160 return NULL_RTX;
15161
15162 x = PATTERN (insn);
15163 if (GET_CODE (x) == PARALLEL)
15164 {
15165 /* Calls returning complex values have two CALL rtx. Look for the second
15166 one here, and return it via the SECOND_CALL arg. */
15167 x2 = XVECEXP (x, 0, 1);
15168 if (GET_CODE (x2) == SET)
15169 x2 = XEXP (x2, 1);
15170 if (GET_CODE (x2) == CALL)
15171 *second_call = x2;
15172
15173 x = XVECEXP (x, 0, 0);
15174 }
15175 if (GET_CODE (x) == SET)
15176 x = XEXP (x, 1);
15177 gcc_assert (GET_CODE (x) == CALL);
15178
15179 return x;
15180 }
15181
15182 /* REG is set in DEF. See if the definition is one of the ways we load a
15183 register with a symbol address for a mips_use_pic_fn_addr_reg_p call.
15184 If it is, return the symbol reference of the function, otherwise return
15185 NULL_RTX.
15186
15187 If RECURSE_P is true, use mips_find_pic_call_symbol to interpret
15188 the values of source registers, otherwise treat such registers as
15189 having an unknown value. */
15190
15191 static rtx
15192 mips_pic_call_symbol_from_set (df_ref def, rtx reg, bool recurse_p)
15193 {
15194 rtx def_insn, set;
15195
15196 if (DF_REF_IS_ARTIFICIAL (def))
15197 return NULL_RTX;
15198
15199 def_insn = DF_REF_INSN (def);
15200 set = single_set (def_insn);
15201 if (set && rtx_equal_p (SET_DEST (set), reg))
15202 {
15203 rtx note, src, symbol;
15204
15205 /* First see whether the source is a plain symbol. This is used
15206 when calling symbols that are not lazily bound. */
15207 src = SET_SRC (set);
15208 if (GET_CODE (src) == SYMBOL_REF)
15209 return src;
15210
15211 /* Handle %call16 references. */
15212 symbol = mips_strip_unspec_call (src);
15213 if (symbol)
15214 {
15215 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
15216 return symbol;
15217 }
15218
15219 /* If we have something more complicated, look for a
15220 REG_EQUAL or REG_EQUIV note. */
15221 note = find_reg_equal_equiv_note (def_insn);
15222 if (note && GET_CODE (XEXP (note, 0)) == SYMBOL_REF)
15223 return XEXP (note, 0);
15224
15225 /* Follow at most one simple register copy. Such copies are
15226 interesting in cases like:
15227
15228 for (...)
15229 {
15230 locally_binding_fn (...);
15231 }
15232
15233 and:
15234
15235 locally_binding_fn (...);
15236 ...
15237 locally_binding_fn (...);
15238
15239 where the load of locally_binding_fn can legitimately be
15240 hoisted or shared. However, we do not expect to see complex
15241 chains of copies, so a full worklist solution to the problem
15242 would probably be overkill. */
15243 if (recurse_p && REG_P (src))
15244 return mips_find_pic_call_symbol (def_insn, src, false);
15245 }
15246
15247 return NULL_RTX;
15248 }
15249
15250 /* Find the definition of the use of REG in INSN. See if the definition
15251 is one of the ways we load a register with a symbol address for a
15252 mips_use_pic_fn_addr_reg_p call. If it is return the symbol reference
15253 of the function, otherwise return NULL_RTX. RECURSE_P is as for
15254 mips_pic_call_symbol_from_set. */
15255
15256 static rtx
15257 mips_find_pic_call_symbol (rtx insn, rtx reg, bool recurse_p)
15258 {
15259 df_ref use;
15260 struct df_link *defs;
15261 rtx symbol;
15262
15263 use = df_find_use (insn, regno_reg_rtx[REGNO (reg)]);
15264 if (!use)
15265 return NULL_RTX;
15266 defs = DF_REF_CHAIN (use);
15267 if (!defs)
15268 return NULL_RTX;
15269 symbol = mips_pic_call_symbol_from_set (defs->ref, reg, recurse_p);
15270 if (!symbol)
15271 return NULL_RTX;
15272
15273 /* If we have more than one definition, they need to be identical. */
15274 for (defs = defs->next; defs; defs = defs->next)
15275 {
15276 rtx other;
15277
15278 other = mips_pic_call_symbol_from_set (defs->ref, reg, recurse_p);
15279 if (!rtx_equal_p (symbol, other))
15280 return NULL_RTX;
15281 }
15282
15283 return symbol;
15284 }
15285
15286 /* Replace the args_size operand of the call expression CALL with the
15287 call-attribute UNSPEC and fill in SYMBOL as the function symbol. */
15288
15289 static void
15290 mips_annotate_pic_call_expr (rtx call, rtx symbol)
15291 {
15292 rtx args_size;
15293
15294 args_size = XEXP (call, 1);
15295 XEXP (call, 1) = gen_rtx_UNSPEC (GET_MODE (args_size),
15296 gen_rtvec (2, args_size, symbol),
15297 UNSPEC_CALL_ATTR);
15298 }
15299
15300 /* OPERANDS[ARGS_SIZE_OPNO] is the arg_size operand of a CALL expression. See
15301 if instead of the arg_size argument it contains the call attributes. If
15302 yes return true along with setting OPERANDS[ARGS_SIZE_OPNO] to the function
15303 symbol from the call attributes. Also return false if ARGS_SIZE_OPNO is
15304 -1. */
15305
15306 bool
15307 mips_get_pic_call_symbol (rtx *operands, int args_size_opno)
15308 {
15309 rtx args_size, symbol;
15310
15311 if (!TARGET_RELAX_PIC_CALLS || args_size_opno == -1)
15312 return false;
15313
15314 args_size = operands[args_size_opno];
15315 if (GET_CODE (args_size) != UNSPEC)
15316 return false;
15317 gcc_assert (XINT (args_size, 1) == UNSPEC_CALL_ATTR);
15318
15319 symbol = XVECEXP (args_size, 0, 1);
15320 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
15321
15322 operands[args_size_opno] = symbol;
15323 return true;
15324 }
15325
15326 /* Use DF to annotate PIC indirect calls with the function symbol they
15327 dispatch to. */
15328
15329 static void
15330 mips_annotate_pic_calls (void)
15331 {
15332 basic_block bb;
15333 rtx insn;
15334
15335 FOR_EACH_BB (bb)
15336 FOR_BB_INSNS (bb, insn)
15337 {
15338 rtx call, reg, symbol, second_call;
15339
15340 second_call = 0;
15341 call = mips_call_expr_from_insn (insn, &second_call);
15342 if (!call)
15343 continue;
15344 gcc_assert (MEM_P (XEXP (call, 0)));
15345 reg = XEXP (XEXP (call, 0), 0);
15346 if (!REG_P (reg))
15347 continue;
15348
15349 symbol = mips_find_pic_call_symbol (insn, reg, true);
15350 if (symbol)
15351 {
15352 mips_annotate_pic_call_expr (call, symbol);
15353 if (second_call)
15354 mips_annotate_pic_call_expr (second_call, symbol);
15355 }
15356 }
15357 }
15358 \f
15359 /* A temporary variable used by for_each_rtx callbacks, etc. */
15360 static rtx mips_sim_insn;
15361
15362 /* A structure representing the state of the processor pipeline.
15363 Used by the mips_sim_* family of functions. */
15364 struct mips_sim {
15365 /* The maximum number of instructions that can be issued in a cycle.
15366 (Caches mips_issue_rate.) */
15367 unsigned int issue_rate;
15368
15369 /* The current simulation time. */
15370 unsigned int time;
15371
15372 /* How many more instructions can be issued in the current cycle. */
15373 unsigned int insns_left;
15374
15375 /* LAST_SET[X].INSN is the last instruction to set register X.
15376 LAST_SET[X].TIME is the time at which that instruction was issued.
15377 INSN is null if no instruction has yet set register X. */
15378 struct {
15379 rtx insn;
15380 unsigned int time;
15381 } last_set[FIRST_PSEUDO_REGISTER];
15382
15383 /* The pipeline's current DFA state. */
15384 state_t dfa_state;
15385 };
15386
15387 /* Reset STATE to the initial simulation state. */
15388
15389 static void
15390 mips_sim_reset (struct mips_sim *state)
15391 {
15392 curr_state = state->dfa_state;
15393
15394 state->time = 0;
15395 state->insns_left = state->issue_rate;
15396 memset (&state->last_set, 0, sizeof (state->last_set));
15397 state_reset (curr_state);
15398
15399 targetm.sched.init (0, false, 0);
15400 advance_state (curr_state);
15401 }
15402
15403 /* Initialize STATE before its first use. DFA_STATE points to an
15404 allocated but uninitialized DFA state. */
15405
15406 static void
15407 mips_sim_init (struct mips_sim *state, state_t dfa_state)
15408 {
15409 if (targetm.sched.init_dfa_pre_cycle_insn)
15410 targetm.sched.init_dfa_pre_cycle_insn ();
15411
15412 if (targetm.sched.init_dfa_post_cycle_insn)
15413 targetm.sched.init_dfa_post_cycle_insn ();
15414
15415 state->issue_rate = mips_issue_rate ();
15416 state->dfa_state = dfa_state;
15417 mips_sim_reset (state);
15418 }
15419
15420 /* Advance STATE by one clock cycle. */
15421
15422 static void
15423 mips_sim_next_cycle (struct mips_sim *state)
15424 {
15425 curr_state = state->dfa_state;
15426
15427 state->time++;
15428 state->insns_left = state->issue_rate;
15429 advance_state (curr_state);
15430 }
15431
15432 /* Advance simulation state STATE until instruction INSN can read
15433 register REG. */
15434
15435 static void
15436 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
15437 {
15438 unsigned int regno, end_regno;
15439
15440 end_regno = END_REGNO (reg);
15441 for (regno = REGNO (reg); regno < end_regno; regno++)
15442 if (state->last_set[regno].insn != 0)
15443 {
15444 unsigned int t;
15445
15446 t = (state->last_set[regno].time
15447 + insn_latency (state->last_set[regno].insn, insn));
15448 while (state->time < t)
15449 mips_sim_next_cycle (state);
15450 }
15451 }
15452
15453 /* A for_each_rtx callback. If *X is a register, advance simulation state
15454 DATA until mips_sim_insn can read the register's value. */
15455
15456 static int
15457 mips_sim_wait_regs_2 (rtx *x, void *data)
15458 {
15459 if (REG_P (*x))
15460 mips_sim_wait_reg ((struct mips_sim *) data, mips_sim_insn, *x);
15461 return 0;
15462 }
15463
15464 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
15465
15466 static void
15467 mips_sim_wait_regs_1 (rtx *x, void *data)
15468 {
15469 for_each_rtx (x, mips_sim_wait_regs_2, data);
15470 }
15471
15472 /* Advance simulation state STATE until all of INSN's register
15473 dependencies are satisfied. */
15474
15475 static void
15476 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
15477 {
15478 mips_sim_insn = insn;
15479 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
15480 }
15481
15482 /* Advance simulation state STATE until the units required by
15483 instruction INSN are available. */
15484
15485 static void
15486 mips_sim_wait_units (struct mips_sim *state, rtx insn)
15487 {
15488 state_t tmp_state;
15489
15490 tmp_state = alloca (state_size ());
15491 while (state->insns_left == 0
15492 || (memcpy (tmp_state, state->dfa_state, state_size ()),
15493 state_transition (tmp_state, insn) >= 0))
15494 mips_sim_next_cycle (state);
15495 }
15496
15497 /* Advance simulation state STATE until INSN is ready to issue. */
15498
15499 static void
15500 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
15501 {
15502 mips_sim_wait_regs (state, insn);
15503 mips_sim_wait_units (state, insn);
15504 }
15505
15506 /* mips_sim_insn has just set X. Update the LAST_SET array
15507 in simulation state DATA. */
15508
15509 static void
15510 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
15511 {
15512 struct mips_sim *state;
15513
15514 state = (struct mips_sim *) data;
15515 if (REG_P (x))
15516 {
15517 unsigned int regno, end_regno;
15518
15519 end_regno = END_REGNO (x);
15520 for (regno = REGNO (x); regno < end_regno; regno++)
15521 {
15522 state->last_set[regno].insn = mips_sim_insn;
15523 state->last_set[regno].time = state->time;
15524 }
15525 }
15526 }
15527
15528 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
15529 can issue immediately (i.e., that mips_sim_wait_insn has already
15530 been called). */
15531
15532 static void
15533 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
15534 {
15535 curr_state = state->dfa_state;
15536
15537 state_transition (curr_state, insn);
15538 state->insns_left = targetm.sched.variable_issue (0, false, insn,
15539 state->insns_left);
15540
15541 mips_sim_insn = insn;
15542 note_stores (PATTERN (insn), mips_sim_record_set, state);
15543 }
15544
15545 /* Simulate issuing a NOP in state STATE. */
15546
15547 static void
15548 mips_sim_issue_nop (struct mips_sim *state)
15549 {
15550 if (state->insns_left == 0)
15551 mips_sim_next_cycle (state);
15552 state->insns_left--;
15553 }
15554
15555 /* Update simulation state STATE so that it's ready to accept the instruction
15556 after INSN. INSN should be part of the main rtl chain, not a member of a
15557 SEQUENCE. */
15558
15559 static void
15560 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
15561 {
15562 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
15563 if (JUMP_P (insn))
15564 mips_sim_issue_nop (state);
15565
15566 switch (GET_CODE (SEQ_BEGIN (insn)))
15567 {
15568 case CODE_LABEL:
15569 case CALL_INSN:
15570 /* We can't predict the processor state after a call or label. */
15571 mips_sim_reset (state);
15572 break;
15573
15574 case JUMP_INSN:
15575 /* The delay slots of branch likely instructions are only executed
15576 when the branch is taken. Therefore, if the caller has simulated
15577 the delay slot instruction, STATE does not really reflect the state
15578 of the pipeline for the instruction after the delay slot. Also,
15579 branch likely instructions tend to incur a penalty when not taken,
15580 so there will probably be an extra delay between the branch and
15581 the instruction after the delay slot. */
15582 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
15583 mips_sim_reset (state);
15584 break;
15585
15586 default:
15587 break;
15588 }
15589 }
15590
15591 /* Use simulator state STATE to calculate the execution time of
15592 instruction sequence SEQ. */
15593
15594 static unsigned int
15595 mips_seq_time (struct mips_sim *state, rtx seq)
15596 {
15597 mips_sim_reset (state);
15598 for (rtx insn = seq; insn; insn = NEXT_INSN (insn))
15599 {
15600 mips_sim_wait_insn (state, insn);
15601 mips_sim_issue_insn (state, insn);
15602 }
15603 return state->time;
15604 }
15605 \f
15606 /* Return the execution-time cost of mips_tuning_info.fast_mult_zero_zero_p
15607 setting SETTING, using STATE to simulate instruction sequences. */
15608
15609 static unsigned int
15610 mips_mult_zero_zero_cost (struct mips_sim *state, bool setting)
15611 {
15612 mips_tuning_info.fast_mult_zero_zero_p = setting;
15613 start_sequence ();
15614
15615 enum machine_mode dword_mode = TARGET_64BIT ? TImode : DImode;
15616 rtx hilo = gen_rtx_REG (dword_mode, MD_REG_FIRST);
15617 mips_emit_move_or_split (hilo, const0_rtx, SPLIT_FOR_SPEED);
15618
15619 /* If the target provides mulsidi3_32bit then that's the most likely
15620 consumer of the result. Test for bypasses. */
15621 if (dword_mode == DImode && HAVE_maddsidi4)
15622 {
15623 rtx gpr = gen_rtx_REG (SImode, GP_REG_FIRST + 4);
15624 emit_insn (gen_maddsidi4 (hilo, gpr, gpr, hilo));
15625 }
15626
15627 unsigned int time = mips_seq_time (state, get_insns ());
15628 end_sequence ();
15629 return time;
15630 }
15631
15632 /* Check the relative speeds of "MULT $0,$0" and "MTLO $0; MTHI $0"
15633 and set up mips_tuning_info.fast_mult_zero_zero_p accordingly.
15634 Prefer MULT -- which is shorter -- in the event of a tie. */
15635
15636 static void
15637 mips_set_fast_mult_zero_zero_p (struct mips_sim *state)
15638 {
15639 if (TARGET_MIPS16)
15640 /* No MTLO or MTHI available. */
15641 mips_tuning_info.fast_mult_zero_zero_p = true;
15642 else
15643 {
15644 unsigned int true_time = mips_mult_zero_zero_cost (state, true);
15645 unsigned int false_time = mips_mult_zero_zero_cost (state, false);
15646 mips_tuning_info.fast_mult_zero_zero_p = (true_time <= false_time);
15647 }
15648 }
15649
15650 /* Set up costs based on the current architecture and tuning settings. */
15651
15652 static void
15653 mips_set_tuning_info (void)
15654 {
15655 if (mips_tuning_info.initialized_p
15656 && mips_tuning_info.arch == mips_arch
15657 && mips_tuning_info.tune == mips_tune
15658 && mips_tuning_info.mips16_p == TARGET_MIPS16)
15659 return;
15660
15661 mips_tuning_info.arch = mips_arch;
15662 mips_tuning_info.tune = mips_tune;
15663 mips_tuning_info.mips16_p = TARGET_MIPS16;
15664 mips_tuning_info.initialized_p = true;
15665
15666 dfa_start ();
15667
15668 struct mips_sim state;
15669 mips_sim_init (&state, alloca (state_size ()));
15670
15671 mips_set_fast_mult_zero_zero_p (&state);
15672
15673 dfa_finish ();
15674 }
15675
15676 /* Implement TARGET_EXPAND_TO_RTL_HOOK. */
15677
15678 static void
15679 mips_expand_to_rtl_hook (void)
15680 {
15681 /* We need to call this at a point where we can safely create sequences
15682 of instructions, so TARGET_OVERRIDE_OPTIONS is too early. We also
15683 need to call it at a point where the DFA infrastructure is not
15684 already in use, so we can't just call it lazily on demand.
15685
15686 At present, mips_tuning_info is only needed during post-expand
15687 RTL passes such as split_insns, so this hook should be early enough.
15688 We may need to move the call elsewhere if mips_tuning_info starts
15689 to be used for other things (such as rtx_costs, or expanders that
15690 could be called during gimple optimization). */
15691 mips_set_tuning_info ();
15692 }
15693 \f
15694 /* The VR4130 pipeline issues aligned pairs of instructions together,
15695 but it stalls the second instruction if it depends on the first.
15696 In order to cut down the amount of logic required, this dependence
15697 check is not based on a full instruction decode. Instead, any non-SPECIAL
15698 instruction is assumed to modify the register specified by bits 20-16
15699 (which is usually the "rt" field).
15700
15701 In BEQ, BEQL, BNE and BNEL instructions, the rt field is actually an
15702 input, so we can end up with a false dependence between the branch
15703 and its delay slot. If this situation occurs in instruction INSN,
15704 try to avoid it by swapping rs and rt. */
15705
15706 static void
15707 vr4130_avoid_branch_rt_conflict (rtx insn)
15708 {
15709 rtx first, second;
15710
15711 first = SEQ_BEGIN (insn);
15712 second = SEQ_END (insn);
15713 if (JUMP_P (first)
15714 && NONJUMP_INSN_P (second)
15715 && GET_CODE (PATTERN (first)) == SET
15716 && GET_CODE (SET_DEST (PATTERN (first))) == PC
15717 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
15718 {
15719 /* Check for the right kind of condition. */
15720 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
15721 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
15722 && REG_P (XEXP (cond, 0))
15723 && REG_P (XEXP (cond, 1))
15724 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
15725 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
15726 {
15727 /* SECOND mentions the rt register but not the rs register. */
15728 rtx tmp = XEXP (cond, 0);
15729 XEXP (cond, 0) = XEXP (cond, 1);
15730 XEXP (cond, 1) = tmp;
15731 }
15732 }
15733 }
15734
15735 /* Implement -mvr4130-align. Go through each basic block and simulate the
15736 processor pipeline. If we find that a pair of instructions could execute
15737 in parallel, and the first of those instructions is not 8-byte aligned,
15738 insert a nop to make it aligned. */
15739
15740 static void
15741 vr4130_align_insns (void)
15742 {
15743 struct mips_sim state;
15744 rtx insn, subinsn, last, last2, next;
15745 bool aligned_p;
15746
15747 dfa_start ();
15748
15749 /* LAST is the last instruction before INSN to have a nonzero length.
15750 LAST2 is the last such instruction before LAST. */
15751 last = 0;
15752 last2 = 0;
15753
15754 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
15755 aligned_p = true;
15756
15757 mips_sim_init (&state, alloca (state_size ()));
15758 for (insn = get_insns (); insn != 0; insn = next)
15759 {
15760 unsigned int length;
15761
15762 next = NEXT_INSN (insn);
15763
15764 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
15765 This isn't really related to the alignment pass, but we do it on
15766 the fly to avoid a separate instruction walk. */
15767 vr4130_avoid_branch_rt_conflict (insn);
15768
15769 length = get_attr_length (insn);
15770 if (length > 0 && USEFUL_INSN_P (insn))
15771 FOR_EACH_SUBINSN (subinsn, insn)
15772 {
15773 mips_sim_wait_insn (&state, subinsn);
15774
15775 /* If we want this instruction to issue in parallel with the
15776 previous one, make sure that the previous instruction is
15777 aligned. There are several reasons why this isn't worthwhile
15778 when the second instruction is a call:
15779
15780 - Calls are less likely to be performance critical,
15781 - There's a good chance that the delay slot can execute
15782 in parallel with the call.
15783 - The return address would then be unaligned.
15784
15785 In general, if we're going to insert a nop between instructions
15786 X and Y, it's better to insert it immediately after X. That
15787 way, if the nop makes Y aligned, it will also align any labels
15788 between X and Y. */
15789 if (state.insns_left != state.issue_rate
15790 && !CALL_P (subinsn))
15791 {
15792 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
15793 {
15794 /* SUBINSN is the first instruction in INSN and INSN is
15795 aligned. We want to align the previous instruction
15796 instead, so insert a nop between LAST2 and LAST.
15797
15798 Note that LAST could be either a single instruction
15799 or a branch with a delay slot. In the latter case,
15800 LAST, like INSN, is already aligned, but the delay
15801 slot must have some extra delay that stops it from
15802 issuing at the same time as the branch. We therefore
15803 insert a nop before the branch in order to align its
15804 delay slot. */
15805 gcc_assert (last2);
15806 emit_insn_after (gen_nop (), last2);
15807 aligned_p = false;
15808 }
15809 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
15810 {
15811 /* SUBINSN is the delay slot of INSN, but INSN is
15812 currently unaligned. Insert a nop between
15813 LAST and INSN to align it. */
15814 gcc_assert (last);
15815 emit_insn_after (gen_nop (), last);
15816 aligned_p = true;
15817 }
15818 }
15819 mips_sim_issue_insn (&state, subinsn);
15820 }
15821 mips_sim_finish_insn (&state, insn);
15822
15823 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
15824 length = get_attr_length (insn);
15825 if (length > 0)
15826 {
15827 /* If the instruction is an asm statement or multi-instruction
15828 mips.md patern, the length is only an estimate. Insert an
15829 8 byte alignment after it so that the following instructions
15830 can be handled correctly. */
15831 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
15832 && (recog_memoized (insn) < 0 || length >= 8))
15833 {
15834 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
15835 next = NEXT_INSN (next);
15836 mips_sim_next_cycle (&state);
15837 aligned_p = true;
15838 }
15839 else if (length & 4)
15840 aligned_p = !aligned_p;
15841 last2 = last;
15842 last = insn;
15843 }
15844
15845 /* See whether INSN is an aligned label. */
15846 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
15847 aligned_p = true;
15848 }
15849 dfa_finish ();
15850 }
15851 \f
15852 /* This structure records that the current function has a LO_SUM
15853 involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is
15854 the largest offset applied to BASE by all such LO_SUMs. */
15855 struct mips_lo_sum_offset {
15856 rtx base;
15857 HOST_WIDE_INT offset;
15858 };
15859
15860 /* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */
15861
15862 static hashval_t
15863 mips_hash_base (rtx base)
15864 {
15865 int do_not_record_p;
15866
15867 return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false);
15868 }
15869
15870 /* Hashtable helpers. */
15871
15872 struct mips_lo_sum_offset_hasher : typed_free_remove <mips_lo_sum_offset>
15873 {
15874 typedef mips_lo_sum_offset value_type;
15875 typedef rtx_def compare_type;
15876 static inline hashval_t hash (const value_type *);
15877 static inline bool equal (const value_type *, const compare_type *);
15878 };
15879
15880 /* Hash-table callbacks for mips_lo_sum_offsets. */
15881
15882 inline hashval_t
15883 mips_lo_sum_offset_hasher::hash (const value_type *entry)
15884 {
15885 return mips_hash_base (entry->base);
15886 }
15887
15888 inline bool
15889 mips_lo_sum_offset_hasher::equal (const value_type *entry,
15890 const compare_type *value)
15891 {
15892 return rtx_equal_p (entry->base, value);
15893 }
15894
15895 typedef hash_table <mips_lo_sum_offset_hasher> mips_offset_table;
15896
15897 /* Look up symbolic constant X in HTAB, which is a hash table of
15898 mips_lo_sum_offsets. If OPTION is NO_INSERT, return true if X can be
15899 paired with a recorded LO_SUM, otherwise record X in the table. */
15900
15901 static bool
15902 mips_lo_sum_offset_lookup (mips_offset_table htab, rtx x,
15903 enum insert_option option)
15904 {
15905 rtx base, offset;
15906 mips_lo_sum_offset **slot;
15907 struct mips_lo_sum_offset *entry;
15908
15909 /* Split X into a base and offset. */
15910 split_const (x, &base, &offset);
15911 if (UNSPEC_ADDRESS_P (base))
15912 base = UNSPEC_ADDRESS (base);
15913
15914 /* Look up the base in the hash table. */
15915 slot = htab.find_slot_with_hash (base, mips_hash_base (base), option);
15916 if (slot == NULL)
15917 return false;
15918
15919 entry = (struct mips_lo_sum_offset *) *slot;
15920 if (option == INSERT)
15921 {
15922 if (entry == NULL)
15923 {
15924 entry = XNEW (struct mips_lo_sum_offset);
15925 entry->base = base;
15926 entry->offset = INTVAL (offset);
15927 *slot = entry;
15928 }
15929 else
15930 {
15931 if (INTVAL (offset) > entry->offset)
15932 entry->offset = INTVAL (offset);
15933 }
15934 }
15935 return INTVAL (offset) <= entry->offset;
15936 }
15937
15938 /* A for_each_rtx callback for which DATA is a mips_lo_sum_offset hash table.
15939 Record every LO_SUM in *LOC. */
15940
15941 static int
15942 mips_record_lo_sum (rtx *loc, void *data)
15943 {
15944 if (GET_CODE (*loc) == LO_SUM)
15945 mips_lo_sum_offset_lookup (*(mips_offset_table*) data,
15946 XEXP (*loc, 1), INSERT);
15947 return 0;
15948 }
15949
15950 /* Return true if INSN is a SET of an orphaned high-part relocation.
15951 HTAB is a hash table of mips_lo_sum_offsets that describes all the
15952 LO_SUMs in the current function. */
15953
15954 static bool
15955 mips_orphaned_high_part_p (mips_offset_table htab, rtx insn)
15956 {
15957 enum mips_symbol_type type;
15958 rtx x, set;
15959
15960 set = single_set (insn);
15961 if (set)
15962 {
15963 /* Check for %his. */
15964 x = SET_SRC (set);
15965 if (GET_CODE (x) == HIGH
15966 && absolute_symbolic_operand (XEXP (x, 0), VOIDmode))
15967 return !mips_lo_sum_offset_lookup (htab, XEXP (x, 0), NO_INSERT);
15968
15969 /* Check for local %gots (and %got_pages, which is redundant but OK). */
15970 if (GET_CODE (x) == UNSPEC
15971 && XINT (x, 1) == UNSPEC_LOAD_GOT
15972 && mips_symbolic_constant_p (XVECEXP (x, 0, 1),
15973 SYMBOL_CONTEXT_LEA, &type)
15974 && type == SYMBOL_GOTOFF_PAGE)
15975 return !mips_lo_sum_offset_lookup (htab, XVECEXP (x, 0, 1), NO_INSERT);
15976 }
15977 return false;
15978 }
15979
15980 /* Subroutine of mips_reorg_process_insns. If there is a hazard between
15981 INSN and a previous instruction, avoid it by inserting nops after
15982 instruction AFTER.
15983
15984 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
15985 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
15986 before using the value of that register. *HILO_DELAY counts the
15987 number of instructions since the last hilo hazard (that is,
15988 the number of instructions since the last MFLO or MFHI).
15989
15990 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
15991 for the next instruction.
15992
15993 LO_REG is an rtx for the LO register, used in dependence checking. */
15994
15995 static void
15996 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
15997 rtx *delayed_reg, rtx lo_reg)
15998 {
15999 rtx pattern, set;
16000 int nops, ninsns;
16001
16002 pattern = PATTERN (insn);
16003
16004 /* Do not put the whole function in .set noreorder if it contains
16005 an asm statement. We don't know whether there will be hazards
16006 between the asm statement and the gcc-generated code. */
16007 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
16008 cfun->machine->all_noreorder_p = false;
16009
16010 /* Ignore zero-length instructions (barriers and the like). */
16011 ninsns = get_attr_length (insn) / 4;
16012 if (ninsns == 0)
16013 return;
16014
16015 /* Work out how many nops are needed. Note that we only care about
16016 registers that are explicitly mentioned in the instruction's pattern.
16017 It doesn't matter that calls use the argument registers or that they
16018 clobber hi and lo. */
16019 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
16020 nops = 2 - *hilo_delay;
16021 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
16022 nops = 1;
16023 else
16024 nops = 0;
16025
16026 /* Insert the nops between this instruction and the previous one.
16027 Each new nop takes us further from the last hilo hazard. */
16028 *hilo_delay += nops;
16029 while (nops-- > 0)
16030 emit_insn_after (gen_hazard_nop (), after);
16031
16032 /* Set up the state for the next instruction. */
16033 *hilo_delay += ninsns;
16034 *delayed_reg = 0;
16035 if (INSN_CODE (insn) >= 0)
16036 switch (get_attr_hazard (insn))
16037 {
16038 case HAZARD_NONE:
16039 break;
16040
16041 case HAZARD_HILO:
16042 *hilo_delay = 0;
16043 break;
16044
16045 case HAZARD_DELAY:
16046 set = single_set (insn);
16047 gcc_assert (set);
16048 *delayed_reg = SET_DEST (set);
16049 break;
16050 }
16051 }
16052
16053 /* Go through the instruction stream and insert nops where necessary.
16054 Also delete any high-part relocations whose partnering low parts
16055 are now all dead. See if the whole function can then be put into
16056 .set noreorder and .set nomacro. */
16057
16058 static void
16059 mips_reorg_process_insns (void)
16060 {
16061 rtx insn, last_insn, subinsn, next_insn, lo_reg, delayed_reg;
16062 int hilo_delay;
16063 mips_offset_table htab;
16064
16065 /* Force all instructions to be split into their final form. */
16066 split_all_insns_noflow ();
16067
16068 /* Recalculate instruction lengths without taking nops into account. */
16069 cfun->machine->ignore_hazard_length_p = true;
16070 shorten_branches (get_insns ());
16071
16072 cfun->machine->all_noreorder_p = true;
16073
16074 /* We don't track MIPS16 PC-relative offsets closely enough to make
16075 a good job of "set .noreorder" code in MIPS16 mode. */
16076 if (TARGET_MIPS16)
16077 cfun->machine->all_noreorder_p = false;
16078
16079 /* Code that doesn't use explicit relocs can't be ".set nomacro". */
16080 if (!TARGET_EXPLICIT_RELOCS)
16081 cfun->machine->all_noreorder_p = false;
16082
16083 /* Profiled functions can't be all noreorder because the profiler
16084 support uses assembler macros. */
16085 if (crtl->profile)
16086 cfun->machine->all_noreorder_p = false;
16087
16088 /* Code compiled with -mfix-vr4120, -mfix-rm7000 or -mfix-24k can't be
16089 all noreorder because we rely on the assembler to work around some
16090 errata. The R5900 too has several bugs. */
16091 if (TARGET_FIX_VR4120
16092 || TARGET_FIX_RM7000
16093 || TARGET_FIX_24K
16094 || TARGET_MIPS5900)
16095 cfun->machine->all_noreorder_p = false;
16096
16097 /* The same is true for -mfix-vr4130 if we might generate MFLO or
16098 MFHI instructions. Note that we avoid using MFLO and MFHI if
16099 the VR4130 MACC and DMACC instructions are available instead;
16100 see the *mfhilo_{si,di}_macc patterns. */
16101 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
16102 cfun->machine->all_noreorder_p = false;
16103
16104 htab.create (37);
16105
16106 /* Make a first pass over the instructions, recording all the LO_SUMs. */
16107 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
16108 FOR_EACH_SUBINSN (subinsn, insn)
16109 if (USEFUL_INSN_P (subinsn))
16110 for_each_rtx (&PATTERN (subinsn), mips_record_lo_sum, &htab);
16111
16112 last_insn = 0;
16113 hilo_delay = 2;
16114 delayed_reg = 0;
16115 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
16116
16117 /* Make a second pass over the instructions. Delete orphaned
16118 high-part relocations or turn them into NOPs. Avoid hazards
16119 by inserting NOPs. */
16120 for (insn = get_insns (); insn != 0; insn = next_insn)
16121 {
16122 next_insn = NEXT_INSN (insn);
16123 if (USEFUL_INSN_P (insn))
16124 {
16125 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
16126 {
16127 /* If we find an orphaned high-part relocation in a delay
16128 slot, it's easier to turn that instruction into a NOP than
16129 to delete it. The delay slot will be a NOP either way. */
16130 FOR_EACH_SUBINSN (subinsn, insn)
16131 if (INSN_P (subinsn))
16132 {
16133 if (mips_orphaned_high_part_p (htab, subinsn))
16134 {
16135 PATTERN (subinsn) = gen_nop ();
16136 INSN_CODE (subinsn) = CODE_FOR_nop;
16137 }
16138 mips_avoid_hazard (last_insn, subinsn, &hilo_delay,
16139 &delayed_reg, lo_reg);
16140 }
16141 last_insn = insn;
16142 }
16143 else
16144 {
16145 /* INSN is a single instruction. Delete it if it's an
16146 orphaned high-part relocation. */
16147 if (mips_orphaned_high_part_p (htab, insn))
16148 delete_insn (insn);
16149 /* Also delete cache barriers if the last instruction
16150 was an annulled branch. INSN will not be speculatively
16151 executed. */
16152 else if (recog_memoized (insn) == CODE_FOR_r10k_cache_barrier
16153 && last_insn
16154 && JUMP_P (SEQ_BEGIN (last_insn))
16155 && INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (last_insn)))
16156 delete_insn (insn);
16157 else
16158 {
16159 mips_avoid_hazard (last_insn, insn, &hilo_delay,
16160 &delayed_reg, lo_reg);
16161 last_insn = insn;
16162 }
16163 }
16164 }
16165 }
16166
16167 htab.dispose ();
16168 }
16169
16170 /* Return true if the function has a long branch instruction. */
16171
16172 static bool
16173 mips_has_long_branch_p (void)
16174 {
16175 rtx insn, subinsn;
16176 int normal_length;
16177
16178 /* We need up-to-date instruction lengths. */
16179 shorten_branches (get_insns ());
16180
16181 /* Look for a branch that is longer than normal. The normal length for
16182 non-MIPS16 branches is 8, because the length includes the delay slot.
16183 It is 4 for MIPS16, because MIPS16 branches are extended instructions,
16184 but they have no delay slot. */
16185 normal_length = (TARGET_MIPS16 ? 4 : 8);
16186 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
16187 FOR_EACH_SUBINSN (subinsn, insn)
16188 if (JUMP_P (subinsn)
16189 && get_attr_length (subinsn) > normal_length
16190 && (any_condjump_p (subinsn) || any_uncondjump_p (subinsn)))
16191 return true;
16192
16193 return false;
16194 }
16195
16196 /* If we are using a GOT, but have not decided to use a global pointer yet,
16197 see whether we need one to implement long branches. Convert the ghost
16198 global-pointer instructions into real ones if so. */
16199
16200 static bool
16201 mips_expand_ghost_gp_insns (void)
16202 {
16203 /* Quick exit if we already know that we will or won't need a
16204 global pointer. */
16205 if (!TARGET_USE_GOT
16206 || cfun->machine->global_pointer == INVALID_REGNUM
16207 || mips_must_initialize_gp_p ())
16208 return false;
16209
16210 /* Run a full check for long branches. */
16211 if (!mips_has_long_branch_p ())
16212 return false;
16213
16214 /* We've now established that we need $gp. */
16215 cfun->machine->must_initialize_gp_p = true;
16216 split_all_insns_noflow ();
16217
16218 return true;
16219 }
16220
16221 /* Subroutine of mips_reorg to manage passes that require DF. */
16222
16223 static void
16224 mips_df_reorg (void)
16225 {
16226 /* Create def-use chains. */
16227 df_set_flags (DF_EQ_NOTES);
16228 df_chain_add_problem (DF_UD_CHAIN);
16229 df_analyze ();
16230
16231 if (TARGET_RELAX_PIC_CALLS)
16232 mips_annotate_pic_calls ();
16233
16234 if (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE)
16235 r10k_insert_cache_barriers ();
16236
16237 df_finish_pass (false);
16238 }
16239
16240 /* Emit code to load LABEL_REF SRC into MIPS16 register DEST. This is
16241 called very late in mips_reorg, but the caller is required to run
16242 mips16_lay_out_constants on the result. */
16243
16244 static void
16245 mips16_load_branch_target (rtx dest, rtx src)
16246 {
16247 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
16248 {
16249 rtx page, low;
16250
16251 if (mips_cfun_has_cprestore_slot_p ())
16252 mips_emit_move (dest, mips_cprestore_slot (dest, true));
16253 else
16254 mips_emit_move (dest, pic_offset_table_rtx);
16255 page = mips_unspec_address (src, SYMBOL_GOTOFF_PAGE);
16256 low = mips_unspec_address (src, SYMBOL_GOT_PAGE_OFST);
16257 emit_insn (gen_rtx_SET (VOIDmode, dest,
16258 PMODE_INSN (gen_unspec_got, (dest, page))));
16259 emit_insn (gen_rtx_SET (VOIDmode, dest,
16260 gen_rtx_LO_SUM (Pmode, dest, low)));
16261 }
16262 else
16263 {
16264 src = mips_unspec_address (src, SYMBOL_ABSOLUTE);
16265 mips_emit_move (dest, src);
16266 }
16267 }
16268
16269 /* If we're compiling a MIPS16 function, look for and split any long branches.
16270 This must be called after all other instruction modifications in
16271 mips_reorg. */
16272
16273 static void
16274 mips16_split_long_branches (void)
16275 {
16276 bool something_changed;
16277
16278 if (!TARGET_MIPS16)
16279 return;
16280
16281 /* Loop until the alignments for all targets are sufficient. */
16282 do
16283 {
16284 rtx insn;
16285
16286 shorten_branches (get_insns ());
16287 something_changed = false;
16288 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
16289 if (JUMP_P (insn)
16290 && get_attr_length (insn) > 4
16291 && (any_condjump_p (insn) || any_uncondjump_p (insn)))
16292 {
16293 rtx old_label, new_label, temp, saved_temp;
16294 rtx target, jump, jump_sequence;
16295
16296 start_sequence ();
16297
16298 /* Free up a MIPS16 register by saving it in $1. */
16299 saved_temp = gen_rtx_REG (Pmode, AT_REGNUM);
16300 temp = gen_rtx_REG (Pmode, GP_REG_FIRST + 2);
16301 emit_move_insn (saved_temp, temp);
16302
16303 /* Load the branch target into TEMP. */
16304 old_label = JUMP_LABEL (insn);
16305 target = gen_rtx_LABEL_REF (Pmode, old_label);
16306 mips16_load_branch_target (temp, target);
16307
16308 /* Jump to the target and restore the register's
16309 original value. */
16310 jump = emit_jump_insn (PMODE_INSN (gen_indirect_jump_and_restore,
16311 (temp, temp, saved_temp)));
16312 JUMP_LABEL (jump) = old_label;
16313 LABEL_NUSES (old_label)++;
16314
16315 /* Rewrite any symbolic references that are supposed to use
16316 a PC-relative constant pool. */
16317 mips16_lay_out_constants (false);
16318
16319 if (simplejump_p (insn))
16320 /* We're going to replace INSN with a longer form. */
16321 new_label = NULL_RTX;
16322 else
16323 {
16324 /* Create a branch-around label for the original
16325 instruction. */
16326 new_label = gen_label_rtx ();
16327 emit_label (new_label);
16328 }
16329
16330 jump_sequence = get_insns ();
16331 end_sequence ();
16332
16333 emit_insn_after (jump_sequence, insn);
16334 if (new_label)
16335 invert_jump (insn, new_label, false);
16336 else
16337 delete_insn (insn);
16338 something_changed = true;
16339 }
16340 }
16341 while (something_changed);
16342 }
16343
16344 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
16345
16346 static void
16347 mips_reorg (void)
16348 {
16349 /* Restore the BLOCK_FOR_INSN pointers, which are needed by DF. Also during
16350 insn splitting in mips16_lay_out_constants, DF insn info is only kept up
16351 to date if the CFG is available. */
16352 if (mips_cfg_in_reorg ())
16353 compute_bb_for_insn ();
16354 mips16_lay_out_constants (true);
16355 if (mips_cfg_in_reorg ())
16356 {
16357 mips_df_reorg ();
16358 free_bb_for_insn ();
16359 }
16360 }
16361
16362 /* We use a machine specific pass to do a second machine dependent reorg
16363 pass after delay branch scheduling. */
16364
16365 static unsigned int
16366 mips_machine_reorg2 (void)
16367 {
16368 mips_reorg_process_insns ();
16369 if (!TARGET_MIPS16
16370 && TARGET_EXPLICIT_RELOCS
16371 && TUNE_MIPS4130
16372 && TARGET_VR4130_ALIGN)
16373 vr4130_align_insns ();
16374 if (mips_expand_ghost_gp_insns ())
16375 /* The expansion could invalidate some of the VR4130 alignment
16376 optimizations, but this should be an extremely rare case anyhow. */
16377 mips_reorg_process_insns ();
16378 mips16_split_long_branches ();
16379 return 0;
16380 }
16381
16382 namespace {
16383
16384 const pass_data pass_data_mips_machine_reorg2 =
16385 {
16386 RTL_PASS, /* type */
16387 "mach2", /* name */
16388 OPTGROUP_NONE, /* optinfo_flags */
16389 false, /* has_gate */
16390 true, /* has_execute */
16391 TV_MACH_DEP, /* tv_id */
16392 0, /* properties_required */
16393 0, /* properties_provided */
16394 0, /* properties_destroyed */
16395 0, /* todo_flags_start */
16396 TODO_verify_rtl_sharing, /* todo_flags_finish */
16397 };
16398
16399 class pass_mips_machine_reorg2 : public rtl_opt_pass
16400 {
16401 public:
16402 pass_mips_machine_reorg2(gcc::context *ctxt)
16403 : rtl_opt_pass(pass_data_mips_machine_reorg2, ctxt)
16404 {}
16405
16406 /* opt_pass methods: */
16407 unsigned int execute () { return mips_machine_reorg2 (); }
16408
16409 }; // class pass_mips_machine_reorg2
16410
16411 } // anon namespace
16412
16413 rtl_opt_pass *
16414 make_pass_mips_machine_reorg2 (gcc::context *ctxt)
16415 {
16416 return new pass_mips_machine_reorg2 (ctxt);
16417 }
16418
16419 \f
16420 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
16421 in order to avoid duplicating too much logic from elsewhere. */
16422
16423 static void
16424 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
16425 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
16426 tree function)
16427 {
16428 rtx this_rtx, temp1, temp2, insn, fnaddr;
16429 bool use_sibcall_p;
16430
16431 /* Pretend to be a post-reload pass while generating rtl. */
16432 reload_completed = 1;
16433
16434 /* Mark the end of the (empty) prologue. */
16435 emit_note (NOTE_INSN_PROLOGUE_END);
16436
16437 /* Determine if we can use a sibcall to call FUNCTION directly. */
16438 fnaddr = XEXP (DECL_RTL (function), 0);
16439 use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
16440 && const_call_insn_operand (fnaddr, Pmode));
16441
16442 /* Determine if we need to load FNADDR from the GOT. */
16443 if (!use_sibcall_p
16444 && (mips_got_symbol_type_p
16445 (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))))
16446 {
16447 /* Pick a global pointer. Use a call-clobbered register if
16448 TARGET_CALL_SAVED_GP. */
16449 cfun->machine->global_pointer
16450 = TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
16451 cfun->machine->must_initialize_gp_p = true;
16452 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
16453
16454 /* Set up the global pointer for n32 or n64 abicalls. */
16455 mips_emit_loadgp ();
16456 }
16457
16458 /* We need two temporary registers in some cases. */
16459 temp1 = gen_rtx_REG (Pmode, 2);
16460 temp2 = gen_rtx_REG (Pmode, 3);
16461
16462 /* Find out which register contains the "this" pointer. */
16463 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
16464 this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
16465 else
16466 this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
16467
16468 /* Add DELTA to THIS_RTX. */
16469 if (delta != 0)
16470 {
16471 rtx offset = GEN_INT (delta);
16472 if (!SMALL_OPERAND (delta))
16473 {
16474 mips_emit_move (temp1, offset);
16475 offset = temp1;
16476 }
16477 emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
16478 }
16479
16480 /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
16481 if (vcall_offset != 0)
16482 {
16483 rtx addr;
16484
16485 /* Set TEMP1 to *THIS_RTX. */
16486 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
16487
16488 /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
16489 addr = mips_add_offset (temp2, temp1, vcall_offset);
16490
16491 /* Load the offset and add it to THIS_RTX. */
16492 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
16493 emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
16494 }
16495
16496 /* Jump to the target function. Use a sibcall if direct jumps are
16497 allowed, otherwise load the address into a register first. */
16498 if (use_sibcall_p)
16499 {
16500 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
16501 SIBLING_CALL_P (insn) = 1;
16502 }
16503 else
16504 {
16505 /* This is messy. GAS treats "la $25,foo" as part of a call
16506 sequence and may allow a global "foo" to be lazily bound.
16507 The general move patterns therefore reject this combination.
16508
16509 In this context, lazy binding would actually be OK
16510 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
16511 TARGET_CALL_SAVED_GP; see mips_load_call_address.
16512 We must therefore load the address via a temporary
16513 register if mips_dangerous_for_la25_p.
16514
16515 If we jump to the temporary register rather than $25,
16516 the assembler can use the move insn to fill the jump's
16517 delay slot.
16518
16519 We can use the same technique for MIPS16 code, where $25
16520 is not a valid JR register. */
16521 if (TARGET_USE_PIC_FN_ADDR_REG
16522 && !TARGET_MIPS16
16523 && !mips_dangerous_for_la25_p (fnaddr))
16524 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
16525 mips_load_call_address (MIPS_CALL_SIBCALL, temp1, fnaddr);
16526
16527 if (TARGET_USE_PIC_FN_ADDR_REG
16528 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
16529 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
16530 emit_jump_insn (gen_indirect_jump (temp1));
16531 }
16532
16533 /* Run just enough of rest_of_compilation. This sequence was
16534 "borrowed" from alpha.c. */
16535 insn = get_insns ();
16536 split_all_insns_noflow ();
16537 mips16_lay_out_constants (true);
16538 shorten_branches (insn);
16539 final_start_function (insn, file, 1);
16540 final (insn, file, 1);
16541 final_end_function ();
16542
16543 /* Clean up the vars set above. Note that final_end_function resets
16544 the global pointer for us. */
16545 reload_completed = 0;
16546 }
16547 \f
16548
16549 /* The last argument passed to mips_set_compression_mode,
16550 or negative if the function hasn't been called yet. */
16551 static unsigned int old_compression_mode = -1;
16552
16553 /* Set up the target-dependent global state for ISA mode COMPRESSION_MODE,
16554 which is either MASK_MIPS16 or MASK_MICROMIPS. */
16555
16556 static void
16557 mips_set_compression_mode (unsigned int compression_mode)
16558 {
16559
16560 if (compression_mode == old_compression_mode)
16561 return;
16562
16563 /* Restore base settings of various flags. */
16564 target_flags = mips_base_target_flags;
16565 flag_schedule_insns = mips_base_schedule_insns;
16566 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
16567 flag_move_loop_invariants = mips_base_move_loop_invariants;
16568 align_loops = mips_base_align_loops;
16569 align_jumps = mips_base_align_jumps;
16570 align_functions = mips_base_align_functions;
16571 target_flags &= ~(MASK_MIPS16 | MASK_MICROMIPS);
16572 target_flags |= compression_mode;
16573
16574 if (compression_mode & MASK_MIPS16)
16575 {
16576 /* Switch to MIPS16 mode. */
16577 target_flags |= MASK_MIPS16;
16578
16579 /* Turn off SYNCI if it was on, MIPS16 doesn't support it. */
16580 target_flags &= ~MASK_SYNCI;
16581
16582 /* Don't run the scheduler before reload, since it tends to
16583 increase register pressure. */
16584 flag_schedule_insns = 0;
16585
16586 /* Don't do hot/cold partitioning. mips16_lay_out_constants expects
16587 the whole function to be in a single section. */
16588 flag_reorder_blocks_and_partition = 0;
16589
16590 /* Don't move loop invariants, because it tends to increase
16591 register pressure. It also introduces an extra move in cases
16592 where the constant is the first operand in a two-operand binary
16593 instruction, or when it forms a register argument to a functon
16594 call. */
16595 flag_move_loop_invariants = 0;
16596
16597 target_flags |= MASK_EXPLICIT_RELOCS;
16598
16599 /* Experiments suggest we get the best overall section-anchor
16600 results from using the range of an unextended LW or SW. Code
16601 that makes heavy use of byte or short accesses can do better
16602 with ranges of 0...31 and 0...63 respectively, but most code is
16603 sensitive to the range of LW and SW instead. */
16604 targetm.min_anchor_offset = 0;
16605 targetm.max_anchor_offset = 127;
16606
16607 targetm.const_anchor = 0;
16608
16609 /* MIPS16 has no BAL instruction. */
16610 target_flags &= ~MASK_RELAX_PIC_CALLS;
16611
16612 /* The R4000 errata don't apply to any known MIPS16 cores.
16613 It's simpler to make the R4000 fixes and MIPS16 mode
16614 mutually exclusive. */
16615 target_flags &= ~MASK_FIX_R4000;
16616
16617 if (flag_pic && !TARGET_OLDABI)
16618 sorry ("MIPS16 PIC for ABIs other than o32 and o64");
16619
16620 if (TARGET_XGOT)
16621 sorry ("MIPS16 -mxgot code");
16622
16623 if (TARGET_HARD_FLOAT_ABI && !TARGET_OLDABI)
16624 sorry ("hard-float MIPS16 code for ABIs other than o32 and o64");
16625 }
16626 else
16627 {
16628 /* Switch to microMIPS or the standard encoding. */
16629
16630 if (TARGET_MICROMIPS)
16631 /* Avoid branch likely. */
16632 target_flags &= ~MASK_BRANCHLIKELY;
16633
16634 /* Provide default values for align_* for 64-bit targets. */
16635 if (TARGET_64BIT)
16636 {
16637 if (align_loops == 0)
16638 align_loops = 8;
16639 if (align_jumps == 0)
16640 align_jumps = 8;
16641 if (align_functions == 0)
16642 align_functions = 8;
16643 }
16644
16645 targetm.min_anchor_offset = -32768;
16646 targetm.max_anchor_offset = 32767;
16647
16648 targetm.const_anchor = 0x8000;
16649 }
16650
16651 /* (Re)initialize MIPS target internals for new ISA. */
16652 mips_init_relocs ();
16653
16654 if (compression_mode & MASK_MIPS16)
16655 {
16656 if (!mips16_globals)
16657 mips16_globals = save_target_globals_default_opts ();
16658 else
16659 restore_target_globals (mips16_globals);
16660 }
16661 else
16662 restore_target_globals (&default_target_globals);
16663
16664 old_compression_mode = compression_mode;
16665 }
16666
16667 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
16668 function should use the MIPS16 or microMIPS ISA and switch modes
16669 accordingly. */
16670
16671 static void
16672 mips_set_current_function (tree fndecl)
16673 {
16674 mips_set_compression_mode (mips_get_compress_mode (fndecl));
16675 }
16676 \f
16677 /* Allocate a chunk of memory for per-function machine-dependent data. */
16678
16679 static struct machine_function *
16680 mips_init_machine_status (void)
16681 {
16682 return ggc_alloc_cleared_machine_function ();
16683 }
16684
16685 /* Return the processor associated with the given ISA level, or null
16686 if the ISA isn't valid. */
16687
16688 static const struct mips_cpu_info *
16689 mips_cpu_info_from_isa (int isa)
16690 {
16691 unsigned int i;
16692
16693 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
16694 if (mips_cpu_info_table[i].isa == isa)
16695 return mips_cpu_info_table + i;
16696
16697 return NULL;
16698 }
16699
16700 /* Return a mips_cpu_info entry determined by an option valued
16701 OPT. */
16702
16703 static const struct mips_cpu_info *
16704 mips_cpu_info_from_opt (int opt)
16705 {
16706 switch (opt)
16707 {
16708 case MIPS_ARCH_OPTION_FROM_ABI:
16709 /* 'from-abi' selects the most compatible architecture for the
16710 given ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit
16711 ABIs. For the EABIs, we have to decide whether we're using
16712 the 32-bit or 64-bit version. */
16713 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
16714 : ABI_NEEDS_64BIT_REGS ? 3
16715 : (TARGET_64BIT ? 3 : 1));
16716
16717 case MIPS_ARCH_OPTION_NATIVE:
16718 gcc_unreachable ();
16719
16720 default:
16721 return &mips_cpu_info_table[opt];
16722 }
16723 }
16724
16725 /* Return a default mips_cpu_info entry, given that no -march= option
16726 was explicitly specified. */
16727
16728 static const struct mips_cpu_info *
16729 mips_default_arch (void)
16730 {
16731 #if defined (MIPS_CPU_STRING_DEFAULT)
16732 unsigned int i;
16733 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
16734 if (strcmp (mips_cpu_info_table[i].name, MIPS_CPU_STRING_DEFAULT) == 0)
16735 return mips_cpu_info_table + i;
16736 gcc_unreachable ();
16737 #elif defined (MIPS_ISA_DEFAULT)
16738 return mips_cpu_info_from_isa (MIPS_ISA_DEFAULT);
16739 #else
16740 /* 'from-abi' makes a good default: you get whatever the ABI
16741 requires. */
16742 return mips_cpu_info_from_opt (MIPS_ARCH_OPTION_FROM_ABI);
16743 #endif
16744 }
16745
16746 /* Set up globals to generate code for the ISA or processor
16747 described by INFO. */
16748
16749 static void
16750 mips_set_architecture (const struct mips_cpu_info *info)
16751 {
16752 if (info != 0)
16753 {
16754 mips_arch_info = info;
16755 mips_arch = info->cpu;
16756 mips_isa = info->isa;
16757 }
16758 }
16759
16760 /* Likewise for tuning. */
16761
16762 static void
16763 mips_set_tune (const struct mips_cpu_info *info)
16764 {
16765 if (info != 0)
16766 {
16767 mips_tune_info = info;
16768 mips_tune = info->cpu;
16769 }
16770 }
16771
16772 /* Implement TARGET_OPTION_OVERRIDE. */
16773
16774 static void
16775 mips_option_override (void)
16776 {
16777 int i, start, regno, mode;
16778
16779 if (global_options_set.x_mips_isa_option)
16780 mips_isa_option_info = &mips_cpu_info_table[mips_isa_option];
16781
16782 #ifdef SUBTARGET_OVERRIDE_OPTIONS
16783 SUBTARGET_OVERRIDE_OPTIONS;
16784 #endif
16785
16786 /* MIPS16 and microMIPS cannot coexist. */
16787 if (TARGET_MICROMIPS && TARGET_MIPS16)
16788 error ("unsupported combination: %s", "-mips16 -mmicromips");
16789
16790 /* Save the base compression state and process flags as though we
16791 were generating uncompressed code. */
16792 mips_base_compression_flags = TARGET_COMPRESSION;
16793 target_flags &= ~TARGET_COMPRESSION;
16794
16795 /* -mno-float overrides -mhard-float and -msoft-float. */
16796 if (TARGET_NO_FLOAT)
16797 {
16798 target_flags |= MASK_SOFT_FLOAT_ABI;
16799 target_flags_explicit |= MASK_SOFT_FLOAT_ABI;
16800 }
16801
16802 if (TARGET_FLIP_MIPS16)
16803 TARGET_INTERLINK_COMPRESSED = 1;
16804
16805 /* Set the small data limit. */
16806 mips_small_data_threshold = (global_options_set.x_g_switch_value
16807 ? g_switch_value
16808 : MIPS_DEFAULT_GVALUE);
16809
16810 /* The following code determines the architecture and register size.
16811 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
16812 The GAS and GCC code should be kept in sync as much as possible. */
16813
16814 if (global_options_set.x_mips_arch_option)
16815 mips_set_architecture (mips_cpu_info_from_opt (mips_arch_option));
16816
16817 if (mips_isa_option_info != 0)
16818 {
16819 if (mips_arch_info == 0)
16820 mips_set_architecture (mips_isa_option_info);
16821 else if (mips_arch_info->isa != mips_isa_option_info->isa)
16822 error ("%<-%s%> conflicts with the other architecture options, "
16823 "which specify a %s processor",
16824 mips_isa_option_info->name,
16825 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
16826 }
16827
16828 if (mips_arch_info == 0)
16829 mips_set_architecture (mips_default_arch ());
16830
16831 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
16832 error ("%<-march=%s%> is not compatible with the selected ABI",
16833 mips_arch_info->name);
16834
16835 /* Optimize for mips_arch, unless -mtune selects a different processor. */
16836 if (global_options_set.x_mips_tune_option)
16837 mips_set_tune (mips_cpu_info_from_opt (mips_tune_option));
16838
16839 if (mips_tune_info == 0)
16840 mips_set_tune (mips_arch_info);
16841
16842 if ((target_flags_explicit & MASK_64BIT) != 0)
16843 {
16844 /* The user specified the size of the integer registers. Make sure
16845 it agrees with the ABI and ISA. */
16846 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
16847 error ("%<-mgp64%> used with a 32-bit processor");
16848 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
16849 error ("%<-mgp32%> used with a 64-bit ABI");
16850 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
16851 error ("%<-mgp64%> used with a 32-bit ABI");
16852 }
16853 else
16854 {
16855 /* Infer the integer register size from the ABI and processor.
16856 Restrict ourselves to 32-bit registers if that's all the
16857 processor has, or if the ABI cannot handle 64-bit registers. */
16858 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
16859 target_flags &= ~MASK_64BIT;
16860 else
16861 target_flags |= MASK_64BIT;
16862 }
16863
16864 if ((target_flags_explicit & MASK_FLOAT64) != 0)
16865 {
16866 if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
16867 error ("unsupported combination: %s", "-mfp64 -msingle-float");
16868 else if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
16869 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
16870 else if (!TARGET_64BIT && TARGET_FLOAT64)
16871 {
16872 if (!ISA_HAS_MXHC1)
16873 error ("%<-mgp32%> and %<-mfp64%> can only be combined if"
16874 " the target supports the mfhc1 and mthc1 instructions");
16875 else if (mips_abi != ABI_32)
16876 error ("%<-mgp32%> and %<-mfp64%> can only be combined when using"
16877 " the o32 ABI");
16878 }
16879 }
16880 else
16881 {
16882 /* -msingle-float selects 32-bit float registers. Otherwise the
16883 float registers should be the same size as the integer ones. */
16884 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
16885 target_flags |= MASK_FLOAT64;
16886 else
16887 target_flags &= ~MASK_FLOAT64;
16888 }
16889
16890 /* End of code shared with GAS. */
16891
16892 /* The R5900 FPU only supports single precision. */
16893 if (TARGET_MIPS5900 && TARGET_HARD_FLOAT_ABI && TARGET_DOUBLE_FLOAT)
16894 error ("unsupported combination: %s",
16895 "-march=r5900 -mhard-float -mdouble-float");
16896
16897 /* If a -mlong* option was given, check that it matches the ABI,
16898 otherwise infer the -mlong* setting from the other options. */
16899 if ((target_flags_explicit & MASK_LONG64) != 0)
16900 {
16901 if (TARGET_LONG64)
16902 {
16903 if (mips_abi == ABI_N32)
16904 error ("%qs is incompatible with %qs", "-mabi=n32", "-mlong64");
16905 else if (mips_abi == ABI_32)
16906 error ("%qs is incompatible with %qs", "-mabi=32", "-mlong64");
16907 else if (mips_abi == ABI_O64 && TARGET_ABICALLS)
16908 /* We have traditionally allowed non-abicalls code to use
16909 an LP64 form of o64. However, it would take a bit more
16910 effort to support the combination of 32-bit GOT entries
16911 and 64-bit pointers, so we treat the abicalls case as
16912 an error. */
16913 error ("the combination of %qs and %qs is incompatible with %qs",
16914 "-mabi=o64", "-mabicalls", "-mlong64");
16915 }
16916 else
16917 {
16918 if (mips_abi == ABI_64)
16919 error ("%qs is incompatible with %qs", "-mabi=64", "-mlong32");
16920 }
16921 }
16922 else
16923 {
16924 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
16925 target_flags |= MASK_LONG64;
16926 else
16927 target_flags &= ~MASK_LONG64;
16928 }
16929
16930 if (!TARGET_OLDABI)
16931 flag_pcc_struct_return = 0;
16932
16933 /* Decide which rtx_costs structure to use. */
16934 if (optimize_size)
16935 mips_cost = &mips_rtx_cost_optimize_size;
16936 else
16937 mips_cost = &mips_rtx_cost_data[mips_tune];
16938
16939 /* If the user hasn't specified a branch cost, use the processor's
16940 default. */
16941 if (mips_branch_cost == 0)
16942 mips_branch_cost = mips_cost->branch_cost;
16943
16944 /* If neither -mbranch-likely nor -mno-branch-likely was given
16945 on the command line, set MASK_BRANCHLIKELY based on the target
16946 architecture and tuning flags. Annulled delay slots are a
16947 size win, so we only consider the processor-specific tuning
16948 for !optimize_size. */
16949 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
16950 {
16951 if (ISA_HAS_BRANCHLIKELY
16952 && (optimize_size
16953 || (mips_tune_info->tune_flags & PTF_AVOID_BRANCHLIKELY) == 0))
16954 target_flags |= MASK_BRANCHLIKELY;
16955 else
16956 target_flags &= ~MASK_BRANCHLIKELY;
16957 }
16958 else if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
16959 warning (0, "the %qs architecture does not support branch-likely"
16960 " instructions", mips_arch_info->name);
16961
16962 /* If the user hasn't specified -mimadd or -mno-imadd set
16963 MASK_IMADD based on the target architecture and tuning
16964 flags. */
16965 if ((target_flags_explicit & MASK_IMADD) == 0)
16966 {
16967 if (ISA_HAS_MADD_MSUB &&
16968 (mips_tune_info->tune_flags & PTF_AVOID_IMADD) == 0)
16969 target_flags |= MASK_IMADD;
16970 else
16971 target_flags &= ~MASK_IMADD;
16972 }
16973 else if (TARGET_IMADD && !ISA_HAS_MADD_MSUB)
16974 warning (0, "the %qs architecture does not support madd or msub"
16975 " instructions", mips_arch_info->name);
16976
16977 /* The effect of -mabicalls isn't defined for the EABI. */
16978 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
16979 {
16980 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
16981 target_flags &= ~MASK_ABICALLS;
16982 }
16983
16984 /* PIC requires -mabicalls. */
16985 if (flag_pic)
16986 {
16987 if (mips_abi == ABI_EABI)
16988 error ("cannot generate position-independent code for %qs",
16989 "-mabi=eabi");
16990 else if (!TARGET_ABICALLS)
16991 error ("position-independent code requires %qs", "-mabicalls");
16992 }
16993
16994 if (TARGET_ABICALLS_PIC2)
16995 /* We need to set flag_pic for executables as well as DSOs
16996 because we may reference symbols that are not defined in
16997 the final executable. (MIPS does not use things like
16998 copy relocs, for example.)
16999
17000 There is a body of code that uses __PIC__ to distinguish
17001 between -mabicalls and -mno-abicalls code. The non-__PIC__
17002 variant is usually appropriate for TARGET_ABICALLS_PIC0, as
17003 long as any indirect jumps use $25. */
17004 flag_pic = 1;
17005
17006 /* -mvr4130-align is a "speed over size" optimization: it usually produces
17007 faster code, but at the expense of more nops. Enable it at -O3 and
17008 above. */
17009 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
17010 target_flags |= MASK_VR4130_ALIGN;
17011
17012 /* Prefer a call to memcpy over inline code when optimizing for size,
17013 though see MOVE_RATIO in mips.h. */
17014 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
17015 target_flags |= MASK_MEMCPY;
17016
17017 /* If we have a nonzero small-data limit, check that the -mgpopt
17018 setting is consistent with the other target flags. */
17019 if (mips_small_data_threshold > 0)
17020 {
17021 if (!TARGET_GPOPT)
17022 {
17023 if (!TARGET_EXPLICIT_RELOCS)
17024 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
17025
17026 TARGET_LOCAL_SDATA = false;
17027 TARGET_EXTERN_SDATA = false;
17028 }
17029 else
17030 {
17031 if (TARGET_VXWORKS_RTP)
17032 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
17033
17034 if (TARGET_ABICALLS)
17035 warning (0, "cannot use small-data accesses for %qs",
17036 "-mabicalls");
17037 }
17038 }
17039
17040 /* Pre-IEEE 754-2008 MIPS hardware has a quirky almost-IEEE format
17041 for all its floating point. */
17042 if (mips_nan != MIPS_IEEE_754_2008)
17043 {
17044 REAL_MODE_FORMAT (SFmode) = &mips_single_format;
17045 REAL_MODE_FORMAT (DFmode) = &mips_double_format;
17046 REAL_MODE_FORMAT (TFmode) = &mips_quad_format;
17047 }
17048
17049 /* Make sure that the user didn't turn off paired single support when
17050 MIPS-3D support is requested. */
17051 if (TARGET_MIPS3D
17052 && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
17053 && !TARGET_PAIRED_SINGLE_FLOAT)
17054 error ("%<-mips3d%> requires %<-mpaired-single%>");
17055
17056 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
17057 if (TARGET_MIPS3D)
17058 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
17059
17060 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
17061 and TARGET_HARD_FLOAT_ABI are both true. */
17062 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
17063 error ("%qs must be used with %qs",
17064 TARGET_MIPS3D ? "-mips3d" : "-mpaired-single",
17065 TARGET_HARD_FLOAT_ABI ? "-mfp64" : "-mhard-float");
17066
17067 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
17068 enabled. */
17069 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_HAS_PAIRED_SINGLE)
17070 warning (0, "the %qs architecture does not support paired-single"
17071 " instructions", mips_arch_info->name);
17072
17073 if (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE
17074 && !TARGET_CACHE_BUILTIN)
17075 {
17076 error ("%qs requires a target that provides the %qs instruction",
17077 "-mr10k-cache-barrier", "cache");
17078 mips_r10k_cache_barrier = R10K_CACHE_BARRIER_NONE;
17079 }
17080
17081 /* If TARGET_DSPR2, enable MASK_DSP. */
17082 if (TARGET_DSPR2)
17083 target_flags |= MASK_DSP;
17084
17085 /* .eh_frame addresses should be the same width as a C pointer.
17086 Most MIPS ABIs support only one pointer size, so the assembler
17087 will usually know exactly how big an .eh_frame address is.
17088
17089 Unfortunately, this is not true of the 64-bit EABI. The ABI was
17090 originally defined to use 64-bit pointers (i.e. it is LP64), and
17091 this is still the default mode. However, we also support an n32-like
17092 ILP32 mode, which is selected by -mlong32. The problem is that the
17093 assembler has traditionally not had an -mlong option, so it has
17094 traditionally not known whether we're using the ILP32 or LP64 form.
17095
17096 As it happens, gas versions up to and including 2.19 use _32-bit_
17097 addresses for EABI64 .cfi_* directives. This is wrong for the
17098 default LP64 mode, so we can't use the directives by default.
17099 Moreover, since gas's current behavior is at odds with gcc's
17100 default behavior, it seems unwise to rely on future versions
17101 of gas behaving the same way. We therefore avoid using .cfi
17102 directives for -mlong32 as well. */
17103 if (mips_abi == ABI_EABI && TARGET_64BIT)
17104 flag_dwarf2_cfi_asm = 0;
17105
17106 /* .cfi_* directives generate a read-only section, so fall back on
17107 manual .eh_frame creation if we need the section to be writable. */
17108 if (TARGET_WRITABLE_EH_FRAME)
17109 flag_dwarf2_cfi_asm = 0;
17110
17111 mips_init_print_operand_punct ();
17112
17113 /* Set up array to map GCC register number to debug register number.
17114 Ignore the special purpose register numbers. */
17115
17116 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
17117 {
17118 mips_dbx_regno[i] = IGNORED_DWARF_REGNUM;
17119 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
17120 mips_dwarf_regno[i] = i;
17121 else
17122 mips_dwarf_regno[i] = INVALID_REGNUM;
17123 }
17124
17125 start = GP_DBX_FIRST - GP_REG_FIRST;
17126 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
17127 mips_dbx_regno[i] = i + start;
17128
17129 start = FP_DBX_FIRST - FP_REG_FIRST;
17130 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
17131 mips_dbx_regno[i] = i + start;
17132
17133 /* Accumulator debug registers use big-endian ordering. */
17134 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
17135 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
17136 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
17137 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
17138 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
17139 {
17140 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
17141 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
17142 }
17143
17144 /* Set up mips_hard_regno_mode_ok. */
17145 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
17146 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
17147 mips_hard_regno_mode_ok[mode][regno]
17148 = mips_hard_regno_mode_ok_p (regno, (enum machine_mode) mode);
17149
17150 /* Function to allocate machine-dependent function status. */
17151 init_machine_status = &mips_init_machine_status;
17152
17153 /* Default to working around R4000 errata only if the processor
17154 was selected explicitly. */
17155 if ((target_flags_explicit & MASK_FIX_R4000) == 0
17156 && strcmp (mips_arch_info->name, "r4000") == 0)
17157 target_flags |= MASK_FIX_R4000;
17158
17159 /* Default to working around R4400 errata only if the processor
17160 was selected explicitly. */
17161 if ((target_flags_explicit & MASK_FIX_R4400) == 0
17162 && strcmp (mips_arch_info->name, "r4400") == 0)
17163 target_flags |= MASK_FIX_R4400;
17164
17165 /* Default to working around R10000 errata only if the processor
17166 was selected explicitly. */
17167 if ((target_flags_explicit & MASK_FIX_R10000) == 0
17168 && strcmp (mips_arch_info->name, "r10000") == 0)
17169 target_flags |= MASK_FIX_R10000;
17170
17171 /* Make sure that branch-likely instructions available when using
17172 -mfix-r10000. The instructions are not available if either:
17173
17174 1. -mno-branch-likely was passed.
17175 2. The selected ISA does not support branch-likely and
17176 the command line does not include -mbranch-likely. */
17177 if (TARGET_FIX_R10000
17178 && ((target_flags_explicit & MASK_BRANCHLIKELY) == 0
17179 ? !ISA_HAS_BRANCHLIKELY
17180 : !TARGET_BRANCHLIKELY))
17181 sorry ("%qs requires branch-likely instructions", "-mfix-r10000");
17182
17183 if (TARGET_SYNCI && !ISA_HAS_SYNCI)
17184 {
17185 warning (0, "the %qs architecture does not support the synci "
17186 "instruction", mips_arch_info->name);
17187 target_flags &= ~MASK_SYNCI;
17188 }
17189
17190 /* Only optimize PIC indirect calls if they are actually required. */
17191 if (!TARGET_USE_GOT || !TARGET_EXPLICIT_RELOCS)
17192 target_flags &= ~MASK_RELAX_PIC_CALLS;
17193
17194 /* Save base state of options. */
17195 mips_base_target_flags = target_flags;
17196 mips_base_schedule_insns = flag_schedule_insns;
17197 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
17198 mips_base_move_loop_invariants = flag_move_loop_invariants;
17199 mips_base_align_loops = align_loops;
17200 mips_base_align_jumps = align_jumps;
17201 mips_base_align_functions = align_functions;
17202
17203 /* Now select the ISA mode.
17204
17205 Do all CPP-sensitive stuff in uncompressed mode; we'll switch modes
17206 later if required. */
17207 mips_set_compression_mode (0);
17208
17209 /* We register a second machine specific reorg pass after delay slot
17210 filling. Registering the pass must be done at start up. It's
17211 convenient to do it here. */
17212 opt_pass *new_pass = make_pass_mips_machine_reorg2 (g);
17213 struct register_pass_info insert_pass_mips_machine_reorg2 =
17214 {
17215 new_pass, /* pass */
17216 "dbr", /* reference_pass_name */
17217 1, /* ref_pass_instance_number */
17218 PASS_POS_INSERT_AFTER /* po_op */
17219 };
17220 register_pass (&insert_pass_mips_machine_reorg2);
17221
17222 if (TARGET_HARD_FLOAT_ABI && TARGET_MIPS5900)
17223 REAL_MODE_FORMAT (SFmode) = &spu_single_format;
17224 }
17225
17226 /* Swap the register information for registers I and I + 1, which
17227 currently have the wrong endianness. Note that the registers'
17228 fixedness and call-clobberedness might have been set on the
17229 command line. */
17230
17231 static void
17232 mips_swap_registers (unsigned int i)
17233 {
17234 int tmpi;
17235 const char *tmps;
17236
17237 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
17238 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
17239
17240 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
17241 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
17242 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
17243 SWAP_STRING (reg_names[i], reg_names[i + 1]);
17244
17245 #undef SWAP_STRING
17246 #undef SWAP_INT
17247 }
17248
17249 /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
17250
17251 static void
17252 mips_conditional_register_usage (void)
17253 {
17254
17255 if (ISA_HAS_DSP)
17256 {
17257 /* These DSP control register fields are global. */
17258 global_regs[CCDSP_PO_REGNUM] = 1;
17259 global_regs[CCDSP_SC_REGNUM] = 1;
17260 }
17261 else
17262 AND_COMPL_HARD_REG_SET (accessible_reg_set,
17263 reg_class_contents[(int) DSP_ACC_REGS]);
17264
17265 if (!TARGET_HARD_FLOAT)
17266 {
17267 AND_COMPL_HARD_REG_SET (accessible_reg_set,
17268 reg_class_contents[(int) FP_REGS]);
17269 AND_COMPL_HARD_REG_SET (accessible_reg_set,
17270 reg_class_contents[(int) ST_REGS]);
17271 }
17272 else if (!ISA_HAS_8CC)
17273 {
17274 /* We only have a single condition-code register. We implement
17275 this by fixing all the condition-code registers and generating
17276 RTL that refers directly to ST_REG_FIRST. */
17277 AND_COMPL_HARD_REG_SET (accessible_reg_set,
17278 reg_class_contents[(int) ST_REGS]);
17279 SET_HARD_REG_BIT (accessible_reg_set, FPSW_REGNUM);
17280 fixed_regs[FPSW_REGNUM] = call_used_regs[FPSW_REGNUM] = 1;
17281 }
17282 if (TARGET_MIPS16)
17283 {
17284 /* In MIPS16 mode, we prohibit the unused $s registers, since they
17285 are call-saved, and saving them via a MIPS16 register would
17286 probably waste more time than just reloading the value.
17287
17288 We permit the $t temporary registers when optimizing for speed
17289 but not when optimizing for space because using them results in
17290 code that is larger (but faster) then not using them. We do
17291 allow $24 (t8) because it is used in CMP and CMPI instructions
17292 and $25 (t9) because it is used as the function call address in
17293 SVR4 PIC code. */
17294
17295 fixed_regs[18] = call_used_regs[18] = 1;
17296 fixed_regs[19] = call_used_regs[19] = 1;
17297 fixed_regs[20] = call_used_regs[20] = 1;
17298 fixed_regs[21] = call_used_regs[21] = 1;
17299 fixed_regs[22] = call_used_regs[22] = 1;
17300 fixed_regs[23] = call_used_regs[23] = 1;
17301 fixed_regs[26] = call_used_regs[26] = 1;
17302 fixed_regs[27] = call_used_regs[27] = 1;
17303 fixed_regs[30] = call_used_regs[30] = 1;
17304 if (optimize_size)
17305 {
17306 fixed_regs[8] = call_used_regs[8] = 1;
17307 fixed_regs[9] = call_used_regs[9] = 1;
17308 fixed_regs[10] = call_used_regs[10] = 1;
17309 fixed_regs[11] = call_used_regs[11] = 1;
17310 fixed_regs[12] = call_used_regs[12] = 1;
17311 fixed_regs[13] = call_used_regs[13] = 1;
17312 fixed_regs[14] = call_used_regs[14] = 1;
17313 fixed_regs[15] = call_used_regs[15] = 1;
17314 }
17315
17316 /* Do not allow HI and LO to be treated as register operands.
17317 There are no MTHI or MTLO instructions (or any real need
17318 for them) and one-way registers cannot easily be reloaded. */
17319 AND_COMPL_HARD_REG_SET (operand_reg_set,
17320 reg_class_contents[(int) MD_REGS]);
17321 }
17322 /* $f20-$f23 are call-clobbered for n64. */
17323 if (mips_abi == ABI_64)
17324 {
17325 int regno;
17326 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
17327 call_really_used_regs[regno] = call_used_regs[regno] = 1;
17328 }
17329 /* Odd registers in the range $f21-$f31 (inclusive) are call-clobbered
17330 for n32. */
17331 if (mips_abi == ABI_N32)
17332 {
17333 int regno;
17334 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
17335 call_really_used_regs[regno] = call_used_regs[regno] = 1;
17336 }
17337 /* Make sure that double-register accumulator values are correctly
17338 ordered for the current endianness. */
17339 if (TARGET_LITTLE_ENDIAN)
17340 {
17341 unsigned int regno;
17342
17343 mips_swap_registers (MD_REG_FIRST);
17344 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
17345 mips_swap_registers (regno);
17346 }
17347 }
17348
17349 /* When generating MIPS16 code, we want to allocate $24 (T_REG) before
17350 other registers for instructions for which it is possible. This
17351 encourages the compiler to use CMP in cases where an XOR would
17352 require some register shuffling. */
17353
17354 void
17355 mips_order_regs_for_local_alloc (void)
17356 {
17357 int i;
17358
17359 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
17360 reg_alloc_order[i] = i;
17361
17362 if (TARGET_MIPS16)
17363 {
17364 /* It really doesn't matter where we put register 0, since it is
17365 a fixed register anyhow. */
17366 reg_alloc_order[0] = 24;
17367 reg_alloc_order[24] = 0;
17368 }
17369 }
17370
17371 /* Implement EH_USES. */
17372
17373 bool
17374 mips_eh_uses (unsigned int regno)
17375 {
17376 if (reload_completed && !TARGET_ABSOLUTE_JUMPS)
17377 {
17378 /* We need to force certain registers to be live in order to handle
17379 PIC long branches correctly. See mips_must_initialize_gp_p for
17380 details. */
17381 if (mips_cfun_has_cprestore_slot_p ())
17382 {
17383 if (regno == CPRESTORE_SLOT_REGNUM)
17384 return true;
17385 }
17386 else
17387 {
17388 if (cfun->machine->global_pointer == regno)
17389 return true;
17390 }
17391 }
17392
17393 return false;
17394 }
17395
17396 /* Implement EPILOGUE_USES. */
17397
17398 bool
17399 mips_epilogue_uses (unsigned int regno)
17400 {
17401 /* Say that the epilogue uses the return address register. Note that
17402 in the case of sibcalls, the values "used by the epilogue" are
17403 considered live at the start of the called function. */
17404 if (regno == RETURN_ADDR_REGNUM)
17405 return true;
17406
17407 /* If using a GOT, say that the epilogue also uses GOT_VERSION_REGNUM.
17408 See the comment above load_call<mode> for details. */
17409 if (TARGET_USE_GOT && (regno) == GOT_VERSION_REGNUM)
17410 return true;
17411
17412 /* An interrupt handler must preserve some registers that are
17413 ordinarily call-clobbered. */
17414 if (cfun->machine->interrupt_handler_p
17415 && mips_interrupt_extra_call_saved_reg_p (regno))
17416 return true;
17417
17418 return false;
17419 }
17420
17421 /* A for_each_rtx callback. Stop the search if *X is an AT register. */
17422
17423 static int
17424 mips_at_reg_p (rtx *x, void *data ATTRIBUTE_UNUSED)
17425 {
17426 return REG_P (*x) && REGNO (*x) == AT_REGNUM;
17427 }
17428
17429 /* Return true if INSN needs to be wrapped in ".set noat".
17430 INSN has NOPERANDS operands, stored in OPVEC. */
17431
17432 static bool
17433 mips_need_noat_wrapper_p (rtx insn, rtx *opvec, int noperands)
17434 {
17435 int i;
17436
17437 if (recog_memoized (insn) >= 0)
17438 for (i = 0; i < noperands; i++)
17439 if (for_each_rtx (&opvec[i], mips_at_reg_p, NULL))
17440 return true;
17441 return false;
17442 }
17443
17444 /* Implement FINAL_PRESCAN_INSN. */
17445
17446 void
17447 mips_final_prescan_insn (rtx insn, rtx *opvec, int noperands)
17448 {
17449 if (mips_need_noat_wrapper_p (insn, opvec, noperands))
17450 mips_push_asm_switch (&mips_noat);
17451 }
17452
17453 /* Implement TARGET_ASM_FINAL_POSTSCAN_INSN. */
17454
17455 static void
17456 mips_final_postscan_insn (FILE *file ATTRIBUTE_UNUSED, rtx insn,
17457 rtx *opvec, int noperands)
17458 {
17459 if (mips_need_noat_wrapper_p (insn, opvec, noperands))
17460 mips_pop_asm_switch (&mips_noat);
17461 }
17462
17463 /* Return the function that is used to expand the <u>mulsidi3 pattern.
17464 EXT_CODE is the code of the extension used. Return NULL if widening
17465 multiplication shouldn't be used. */
17466
17467 mulsidi3_gen_fn
17468 mips_mulsidi3_gen_fn (enum rtx_code ext_code)
17469 {
17470 bool signed_p;
17471
17472 signed_p = ext_code == SIGN_EXTEND;
17473 if (TARGET_64BIT)
17474 {
17475 /* Don't use widening multiplication with MULT when we have DMUL. Even
17476 with the extension of its input operands DMUL is faster. Note that
17477 the extension is not needed for signed multiplication. In order to
17478 ensure that we always remove the redundant sign-extension in this
17479 case we still expand mulsidi3 for DMUL. */
17480 if (ISA_HAS_DMUL3)
17481 return signed_p ? gen_mulsidi3_64bit_dmul : NULL;
17482 if (TARGET_MIPS16)
17483 return (signed_p
17484 ? gen_mulsidi3_64bit_mips16
17485 : gen_umulsidi3_64bit_mips16);
17486 if (TARGET_FIX_R4000)
17487 return NULL;
17488 return signed_p ? gen_mulsidi3_64bit : gen_umulsidi3_64bit;
17489 }
17490 else
17491 {
17492 if (TARGET_MIPS16)
17493 return (signed_p
17494 ? gen_mulsidi3_32bit_mips16
17495 : gen_umulsidi3_32bit_mips16);
17496 if (TARGET_FIX_R4000 && !ISA_HAS_DSP)
17497 return signed_p ? gen_mulsidi3_32bit_r4000 : gen_umulsidi3_32bit_r4000;
17498 return signed_p ? gen_mulsidi3_32bit : gen_umulsidi3_32bit;
17499 }
17500 }
17501
17502 /* Return true if PATTERN matches the kind of instruction generated by
17503 umips_build_save_restore. SAVE_P is true for store. */
17504
17505 bool
17506 umips_save_restore_pattern_p (bool save_p, rtx pattern)
17507 {
17508 int n;
17509 unsigned int i;
17510 HOST_WIDE_INT first_offset = 0;
17511 rtx first_base = 0;
17512 unsigned int regmask = 0;
17513
17514 for (n = 0; n < XVECLEN (pattern, 0); n++)
17515 {
17516 rtx set, reg, mem, this_base;
17517 HOST_WIDE_INT this_offset;
17518
17519 /* Check that we have a SET. */
17520 set = XVECEXP (pattern, 0, n);
17521 if (GET_CODE (set) != SET)
17522 return false;
17523
17524 /* Check that the SET is a load (if restoring) or a store
17525 (if saving). */
17526 mem = save_p ? SET_DEST (set) : SET_SRC (set);
17527 if (!MEM_P (mem) || MEM_VOLATILE_P (mem))
17528 return false;
17529
17530 /* Check that the address is the sum of base and a possibly-zero
17531 constant offset. Determine if the offset is in range. */
17532 mips_split_plus (XEXP (mem, 0), &this_base, &this_offset);
17533 if (!REG_P (this_base))
17534 return false;
17535
17536 if (n == 0)
17537 {
17538 if (!UMIPS_12BIT_OFFSET_P (this_offset))
17539 return false;
17540 first_base = this_base;
17541 first_offset = this_offset;
17542 }
17543 else
17544 {
17545 /* Check that the save slots are consecutive. */
17546 if (REGNO (this_base) != REGNO (first_base)
17547 || this_offset != first_offset + UNITS_PER_WORD * n)
17548 return false;
17549 }
17550
17551 /* Check that SET's other operand is a register. */
17552 reg = save_p ? SET_SRC (set) : SET_DEST (set);
17553 if (!REG_P (reg))
17554 return false;
17555
17556 regmask |= 1 << REGNO (reg);
17557 }
17558
17559 for (i = 0; i < ARRAY_SIZE (umips_swm_mask); i++)
17560 if (regmask == umips_swm_mask[i])
17561 return true;
17562
17563 return false;
17564 }
17565
17566 /* Return the assembly instruction for microMIPS LWM or SWM.
17567 SAVE_P and PATTERN are as for umips_save_restore_pattern_p. */
17568
17569 const char *
17570 umips_output_save_restore (bool save_p, rtx pattern)
17571 {
17572 static char buffer[300];
17573 char *s;
17574 int n;
17575 HOST_WIDE_INT offset;
17576 rtx base, mem, set, last_set, last_reg;
17577
17578 /* Parse the pattern. */
17579 gcc_assert (umips_save_restore_pattern_p (save_p, pattern));
17580
17581 s = strcpy (buffer, save_p ? "swm\t" : "lwm\t");
17582 s += strlen (s);
17583 n = XVECLEN (pattern, 0);
17584
17585 set = XVECEXP (pattern, 0, 0);
17586 mem = save_p ? SET_DEST (set) : SET_SRC (set);
17587 mips_split_plus (XEXP (mem, 0), &base, &offset);
17588
17589 last_set = XVECEXP (pattern, 0, n - 1);
17590 last_reg = save_p ? SET_SRC (last_set) : SET_DEST (last_set);
17591
17592 if (REGNO (last_reg) == 31)
17593 n--;
17594
17595 gcc_assert (n <= 9);
17596 if (n == 0)
17597 ;
17598 else if (n == 1)
17599 s += sprintf (s, "%s,", reg_names[16]);
17600 else if (n < 9)
17601 s += sprintf (s, "%s-%s,", reg_names[16], reg_names[15 + n]);
17602 else if (n == 9)
17603 s += sprintf (s, "%s-%s,%s,", reg_names[16], reg_names[23],
17604 reg_names[30]);
17605
17606 if (REGNO (last_reg) == 31)
17607 s += sprintf (s, "%s,", reg_names[31]);
17608
17609 s += sprintf (s, "%d(%s)", (int)offset, reg_names[REGNO (base)]);
17610 return buffer;
17611 }
17612
17613 /* Return true if MEM1 and MEM2 use the same base register, and the
17614 offset of MEM2 equals the offset of MEM1 plus 4. FIRST_REG is the
17615 register into (from) which the contents of MEM1 will be loaded
17616 (stored), depending on the value of LOAD_P.
17617 SWAP_P is true when the 1st and 2nd instructions are swapped. */
17618
17619 static bool
17620 umips_load_store_pair_p_1 (bool load_p, bool swap_p,
17621 rtx first_reg, rtx mem1, rtx mem2)
17622 {
17623 rtx base1, base2;
17624 HOST_WIDE_INT offset1, offset2;
17625
17626 if (!MEM_P (mem1) || !MEM_P (mem2))
17627 return false;
17628
17629 mips_split_plus (XEXP (mem1, 0), &base1, &offset1);
17630 mips_split_plus (XEXP (mem2, 0), &base2, &offset2);
17631
17632 if (!REG_P (base1) || !rtx_equal_p (base1, base2))
17633 return false;
17634
17635 /* Avoid invalid load pair instructions. */
17636 if (load_p && REGNO (first_reg) == REGNO (base1))
17637 return false;
17638
17639 /* We must avoid this case for anti-dependence.
17640 Ex: lw $3, 4($3)
17641 lw $2, 0($3)
17642 first_reg is $2, but the base is $3. */
17643 if (load_p
17644 && swap_p
17645 && REGNO (first_reg) + 1 == REGNO (base1))
17646 return false;
17647
17648 if (offset2 != offset1 + 4)
17649 return false;
17650
17651 if (!UMIPS_12BIT_OFFSET_P (offset1))
17652 return false;
17653
17654 return true;
17655 }
17656
17657 /* OPERANDS describes the operands to a pair of SETs, in the order
17658 dest1, src1, dest2, src2. Return true if the operands can be used
17659 in an LWP or SWP instruction; LOAD_P says which. */
17660
17661 bool
17662 umips_load_store_pair_p (bool load_p, rtx *operands)
17663 {
17664 rtx reg1, reg2, mem1, mem2;
17665
17666 if (load_p)
17667 {
17668 reg1 = operands[0];
17669 reg2 = operands[2];
17670 mem1 = operands[1];
17671 mem2 = operands[3];
17672 }
17673 else
17674 {
17675 reg1 = operands[1];
17676 reg2 = operands[3];
17677 mem1 = operands[0];
17678 mem2 = operands[2];
17679 }
17680
17681 if (REGNO (reg2) == REGNO (reg1) + 1)
17682 return umips_load_store_pair_p_1 (load_p, false, reg1, mem1, mem2);
17683
17684 if (REGNO (reg1) == REGNO (reg2) + 1)
17685 return umips_load_store_pair_p_1 (load_p, true, reg2, mem2, mem1);
17686
17687 return false;
17688 }
17689
17690 /* Return the assembly instruction for a microMIPS LWP or SWP in which
17691 the first register is REG and the first memory slot is MEM.
17692 LOAD_P is true for LWP. */
17693
17694 static void
17695 umips_output_load_store_pair_1 (bool load_p, rtx reg, rtx mem)
17696 {
17697 rtx ops[] = {reg, mem};
17698
17699 if (load_p)
17700 output_asm_insn ("lwp\t%0,%1", ops);
17701 else
17702 output_asm_insn ("swp\t%0,%1", ops);
17703 }
17704
17705 /* Output the assembly instruction for a microMIPS LWP or SWP instruction.
17706 LOAD_P and OPERANDS are as for umips_load_store_pair_p. */
17707
17708 void
17709 umips_output_load_store_pair (bool load_p, rtx *operands)
17710 {
17711 rtx reg1, reg2, mem1, mem2;
17712 if (load_p)
17713 {
17714 reg1 = operands[0];
17715 reg2 = operands[2];
17716 mem1 = operands[1];
17717 mem2 = operands[3];
17718 }
17719 else
17720 {
17721 reg1 = operands[1];
17722 reg2 = operands[3];
17723 mem1 = operands[0];
17724 mem2 = operands[2];
17725 }
17726
17727 if (REGNO (reg2) == REGNO (reg1) + 1)
17728 {
17729 umips_output_load_store_pair_1 (load_p, reg1, mem1);
17730 return;
17731 }
17732
17733 gcc_assert (REGNO (reg1) == REGNO (reg2) + 1);
17734 umips_output_load_store_pair_1 (load_p, reg2, mem2);
17735 }
17736
17737 /* Return true if REG1 and REG2 match the criteria for a movep insn. */
17738
17739 bool
17740 umips_movep_target_p (rtx reg1, rtx reg2)
17741 {
17742 int regno1, regno2, pair;
17743 unsigned int i;
17744 static const int match[8] = {
17745 0x00000060, /* 5, 6 */
17746 0x000000a0, /* 5, 7 */
17747 0x000000c0, /* 6, 7 */
17748 0x00200010, /* 4, 21 */
17749 0x00400010, /* 4, 22 */
17750 0x00000030, /* 4, 5 */
17751 0x00000050, /* 4, 6 */
17752 0x00000090 /* 4, 7 */
17753 };
17754
17755 if (!REG_P (reg1) || !REG_P (reg2))
17756 return false;
17757
17758 regno1 = REGNO (reg1);
17759 regno2 = REGNO (reg2);
17760
17761 if (!GP_REG_P (regno1) || !GP_REG_P (regno2))
17762 return false;
17763
17764 pair = (1 << regno1) | (1 << regno2);
17765
17766 for (i = 0; i < ARRAY_SIZE (match); i++)
17767 if (pair == match[i])
17768 return true;
17769
17770 return false;
17771 }
17772 \f
17773 /* Return the size in bytes of the trampoline code, padded to
17774 TRAMPOLINE_ALIGNMENT bits. The static chain pointer and target
17775 function address immediately follow. */
17776
17777 int
17778 mips_trampoline_code_size (void)
17779 {
17780 if (TARGET_USE_PIC_FN_ADDR_REG)
17781 return 4 * 4;
17782 else if (ptr_mode == DImode)
17783 return 8 * 4;
17784 else if (ISA_HAS_LOAD_DELAY)
17785 return 6 * 4;
17786 else
17787 return 4 * 4;
17788 }
17789
17790 /* Implement TARGET_TRAMPOLINE_INIT. */
17791
17792 static void
17793 mips_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
17794 {
17795 rtx addr, end_addr, high, low, opcode, mem;
17796 rtx trampoline[8];
17797 unsigned int i, j;
17798 HOST_WIDE_INT end_addr_offset, static_chain_offset, target_function_offset;
17799
17800 /* Work out the offsets of the pointers from the start of the
17801 trampoline code. */
17802 end_addr_offset = mips_trampoline_code_size ();
17803 static_chain_offset = end_addr_offset;
17804 target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
17805
17806 /* Get pointers to the beginning and end of the code block. */
17807 addr = force_reg (Pmode, XEXP (m_tramp, 0));
17808 end_addr = mips_force_binary (Pmode, PLUS, addr, GEN_INT (end_addr_offset));
17809
17810 #define OP(X) gen_int_mode (X, SImode)
17811
17812 /* Build up the code in TRAMPOLINE. */
17813 i = 0;
17814 if (TARGET_USE_PIC_FN_ADDR_REG)
17815 {
17816 /* $25 contains the address of the trampoline. Emit code of the form:
17817
17818 l[wd] $1, target_function_offset($25)
17819 l[wd] $static_chain, static_chain_offset($25)
17820 jr $1
17821 move $25,$1. */
17822 trampoline[i++] = OP (MIPS_LOAD_PTR (AT_REGNUM,
17823 target_function_offset,
17824 PIC_FUNCTION_ADDR_REGNUM));
17825 trampoline[i++] = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM,
17826 static_chain_offset,
17827 PIC_FUNCTION_ADDR_REGNUM));
17828 trampoline[i++] = OP (MIPS_JR (AT_REGNUM));
17829 trampoline[i++] = OP (MIPS_MOVE (PIC_FUNCTION_ADDR_REGNUM, AT_REGNUM));
17830 }
17831 else if (ptr_mode == DImode)
17832 {
17833 /* It's too cumbersome to create the full 64-bit address, so let's
17834 instead use:
17835
17836 move $1, $31
17837 bal 1f
17838 nop
17839 1: l[wd] $25, target_function_offset - 12($31)
17840 l[wd] $static_chain, static_chain_offset - 12($31)
17841 jr $25
17842 move $31, $1
17843
17844 where 12 is the offset of "1:" from the start of the code block. */
17845 trampoline[i++] = OP (MIPS_MOVE (AT_REGNUM, RETURN_ADDR_REGNUM));
17846 trampoline[i++] = OP (MIPS_BAL (1));
17847 trampoline[i++] = OP (MIPS_NOP);
17848 trampoline[i++] = OP (MIPS_LOAD_PTR (PIC_FUNCTION_ADDR_REGNUM,
17849 target_function_offset - 12,
17850 RETURN_ADDR_REGNUM));
17851 trampoline[i++] = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM,
17852 static_chain_offset - 12,
17853 RETURN_ADDR_REGNUM));
17854 trampoline[i++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM));
17855 trampoline[i++] = OP (MIPS_MOVE (RETURN_ADDR_REGNUM, AT_REGNUM));
17856 }
17857 else
17858 {
17859 /* If the target has load delays, emit:
17860
17861 lui $1, %hi(end_addr)
17862 lw $25, %lo(end_addr + ...)($1)
17863 lw $static_chain, %lo(end_addr + ...)($1)
17864 jr $25
17865 nop
17866
17867 Otherwise emit:
17868
17869 lui $1, %hi(end_addr)
17870 lw $25, %lo(end_addr + ...)($1)
17871 jr $25
17872 lw $static_chain, %lo(end_addr + ...)($1). */
17873
17874 /* Split END_ADDR into %hi and %lo values. Trampolines are aligned
17875 to 64 bits, so the %lo value will have the bottom 3 bits clear. */
17876 high = expand_simple_binop (SImode, PLUS, end_addr, GEN_INT (0x8000),
17877 NULL, false, OPTAB_WIDEN);
17878 high = expand_simple_binop (SImode, LSHIFTRT, high, GEN_INT (16),
17879 NULL, false, OPTAB_WIDEN);
17880 low = convert_to_mode (SImode, gen_lowpart (HImode, end_addr), true);
17881
17882 /* Emit the LUI. */
17883 opcode = OP (MIPS_LUI (AT_REGNUM, 0));
17884 trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, high,
17885 NULL, false, OPTAB_WIDEN);
17886
17887 /* Emit the load of the target function. */
17888 opcode = OP (MIPS_LOAD_PTR (PIC_FUNCTION_ADDR_REGNUM,
17889 target_function_offset - end_addr_offset,
17890 AT_REGNUM));
17891 trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, low,
17892 NULL, false, OPTAB_WIDEN);
17893
17894 /* Emit the JR here, if we can. */
17895 if (!ISA_HAS_LOAD_DELAY)
17896 trampoline[i++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM));
17897
17898 /* Emit the load of the static chain register. */
17899 opcode = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM,
17900 static_chain_offset - end_addr_offset,
17901 AT_REGNUM));
17902 trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, low,
17903 NULL, false, OPTAB_WIDEN);
17904
17905 /* Emit the JR, if we couldn't above. */
17906 if (ISA_HAS_LOAD_DELAY)
17907 {
17908 trampoline[i++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM));
17909 trampoline[i++] = OP (MIPS_NOP);
17910 }
17911 }
17912
17913 #undef OP
17914
17915 /* Copy the trampoline code. Leave any padding uninitialized. */
17916 for (j = 0; j < i; j++)
17917 {
17918 mem = adjust_address (m_tramp, SImode, j * GET_MODE_SIZE (SImode));
17919 mips_emit_move (mem, trampoline[j]);
17920 }
17921
17922 /* Set up the static chain pointer field. */
17923 mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
17924 mips_emit_move (mem, chain_value);
17925
17926 /* Set up the target function field. */
17927 mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
17928 mips_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
17929
17930 /* Flush the code part of the trampoline. */
17931 emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
17932 emit_insn (gen_clear_cache (addr, end_addr));
17933 }
17934
17935 /* Implement FUNCTION_PROFILER. */
17936
17937 void mips_function_profiler (FILE *file)
17938 {
17939 if (TARGET_MIPS16)
17940 sorry ("mips16 function profiling");
17941 if (TARGET_LONG_CALLS)
17942 {
17943 /* For TARGET_LONG_CALLS use $3 for the address of _mcount. */
17944 if (Pmode == DImode)
17945 fprintf (file, "\tdla\t%s,_mcount\n", reg_names[3]);
17946 else
17947 fprintf (file, "\tla\t%s,_mcount\n", reg_names[3]);
17948 }
17949 mips_push_asm_switch (&mips_noat);
17950 fprintf (file, "\tmove\t%s,%s\t\t# save current return address\n",
17951 reg_names[AT_REGNUM], reg_names[RETURN_ADDR_REGNUM]);
17952 /* _mcount treats $2 as the static chain register. */
17953 if (cfun->static_chain_decl != NULL)
17954 fprintf (file, "\tmove\t%s,%s\n", reg_names[2],
17955 reg_names[STATIC_CHAIN_REGNUM]);
17956 if (TARGET_MCOUNT_RA_ADDRESS)
17957 {
17958 /* If TARGET_MCOUNT_RA_ADDRESS load $12 with the address of the
17959 ra save location. */
17960 if (cfun->machine->frame.ra_fp_offset == 0)
17961 /* ra not saved, pass zero. */
17962 fprintf (file, "\tmove\t%s,%s\n", reg_names[12], reg_names[0]);
17963 else
17964 fprintf (file, "\t%s\t%s," HOST_WIDE_INT_PRINT_DEC "(%s)\n",
17965 Pmode == DImode ? "dla" : "la", reg_names[12],
17966 cfun->machine->frame.ra_fp_offset,
17967 reg_names[STACK_POINTER_REGNUM]);
17968 }
17969 if (!TARGET_NEWABI)
17970 fprintf (file,
17971 "\t%s\t%s,%s,%d\t\t# _mcount pops 2 words from stack\n",
17972 TARGET_64BIT ? "dsubu" : "subu",
17973 reg_names[STACK_POINTER_REGNUM],
17974 reg_names[STACK_POINTER_REGNUM],
17975 Pmode == DImode ? 16 : 8);
17976
17977 if (TARGET_LONG_CALLS)
17978 fprintf (file, "\tjalr\t%s\n", reg_names[3]);
17979 else
17980 fprintf (file, "\tjal\t_mcount\n");
17981 mips_pop_asm_switch (&mips_noat);
17982 /* _mcount treats $2 as the static chain register. */
17983 if (cfun->static_chain_decl != NULL)
17984 fprintf (file, "\tmove\t%s,%s\n", reg_names[STATIC_CHAIN_REGNUM],
17985 reg_names[2]);
17986 }
17987
17988 /* Implement TARGET_SHIFT_TRUNCATION_MASK. We want to keep the default
17989 behaviour of TARGET_SHIFT_TRUNCATION_MASK for non-vector modes even
17990 when TARGET_LOONGSON_VECTORS is true. */
17991
17992 static unsigned HOST_WIDE_INT
17993 mips_shift_truncation_mask (enum machine_mode mode)
17994 {
17995 if (TARGET_LOONGSON_VECTORS && VECTOR_MODE_P (mode))
17996 return 0;
17997
17998 return GET_MODE_BITSIZE (mode) - 1;
17999 }
18000
18001 /* Implement TARGET_PREPARE_PCH_SAVE. */
18002
18003 static void
18004 mips_prepare_pch_save (void)
18005 {
18006 /* We are called in a context where the current MIPS16 vs. non-MIPS16
18007 setting should be irrelevant. The question then is: which setting
18008 makes most sense at load time?
18009
18010 The PCH is loaded before the first token is read. We should never
18011 have switched into MIPS16 mode by that point, and thus should not
18012 have populated mips16_globals. Nor can we load the entire contents
18013 of mips16_globals from the PCH file, because mips16_globals contains
18014 a combination of GGC and non-GGC data.
18015
18016 There is therefore no point in trying save the GGC part of
18017 mips16_globals to the PCH file, or to preserve MIPS16ness across
18018 the PCH save and load. The loading compiler would not have access
18019 to the non-GGC parts of mips16_globals (either from the PCH file,
18020 or from a copy that the loading compiler generated itself) and would
18021 have to call target_reinit anyway.
18022
18023 It therefore seems best to switch back to non-MIPS16 mode at
18024 save time, and to ensure that mips16_globals remains null after
18025 a PCH load. */
18026 mips_set_compression_mode (0);
18027 mips16_globals = 0;
18028 }
18029 \f
18030 /* Generate or test for an insn that supports a constant permutation. */
18031
18032 #define MAX_VECT_LEN 8
18033
18034 struct expand_vec_perm_d
18035 {
18036 rtx target, op0, op1;
18037 unsigned char perm[MAX_VECT_LEN];
18038 enum machine_mode vmode;
18039 unsigned char nelt;
18040 bool one_vector_p;
18041 bool testing_p;
18042 };
18043
18044 /* Construct (set target (vec_select op0 (parallel perm))) and
18045 return true if that's a valid instruction in the active ISA. */
18046
18047 static bool
18048 mips_expand_vselect (rtx target, rtx op0,
18049 const unsigned char *perm, unsigned nelt)
18050 {
18051 rtx rperm[MAX_VECT_LEN], x;
18052 unsigned i;
18053
18054 for (i = 0; i < nelt; ++i)
18055 rperm[i] = GEN_INT (perm[i]);
18056
18057 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
18058 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
18059 x = gen_rtx_SET (VOIDmode, target, x);
18060
18061 x = emit_insn (x);
18062 if (recog_memoized (x) < 0)
18063 {
18064 remove_insn (x);
18065 return false;
18066 }
18067 return true;
18068 }
18069
18070 /* Similar, but generate a vec_concat from op0 and op1 as well. */
18071
18072 static bool
18073 mips_expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
18074 const unsigned char *perm, unsigned nelt)
18075 {
18076 enum machine_mode v2mode;
18077 rtx x;
18078
18079 v2mode = GET_MODE_2XWIDER_MODE (GET_MODE (op0));
18080 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
18081 return mips_expand_vselect (target, x, perm, nelt);
18082 }
18083
18084 /* Recognize patterns for even-odd extraction. */
18085
18086 static bool
18087 mips_expand_vpc_loongson_even_odd (struct expand_vec_perm_d *d)
18088 {
18089 unsigned i, odd, nelt = d->nelt;
18090 rtx t0, t1, t2, t3;
18091
18092 if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS))
18093 return false;
18094 /* Even-odd for V2SI/V2SFmode is matched by interleave directly. */
18095 if (nelt < 4)
18096 return false;
18097
18098 odd = d->perm[0];
18099 if (odd > 1)
18100 return false;
18101 for (i = 1; i < nelt; ++i)
18102 if (d->perm[i] != i * 2 + odd)
18103 return false;
18104
18105 if (d->testing_p)
18106 return true;
18107
18108 /* We need 2*log2(N)-1 operations to achieve odd/even with interleave. */
18109 t0 = gen_reg_rtx (d->vmode);
18110 t1 = gen_reg_rtx (d->vmode);
18111 switch (d->vmode)
18112 {
18113 case V4HImode:
18114 emit_insn (gen_loongson_punpckhhw (t0, d->op0, d->op1));
18115 emit_insn (gen_loongson_punpcklhw (t1, d->op0, d->op1));
18116 if (odd)
18117 emit_insn (gen_loongson_punpckhhw (d->target, t1, t0));
18118 else
18119 emit_insn (gen_loongson_punpcklhw (d->target, t1, t0));
18120 break;
18121
18122 case V8QImode:
18123 t2 = gen_reg_rtx (d->vmode);
18124 t3 = gen_reg_rtx (d->vmode);
18125 emit_insn (gen_loongson_punpckhbh (t0, d->op0, d->op1));
18126 emit_insn (gen_loongson_punpcklbh (t1, d->op0, d->op1));
18127 emit_insn (gen_loongson_punpckhbh (t2, t1, t0));
18128 emit_insn (gen_loongson_punpcklbh (t3, t1, t0));
18129 if (odd)
18130 emit_insn (gen_loongson_punpckhbh (d->target, t3, t2));
18131 else
18132 emit_insn (gen_loongson_punpcklbh (d->target, t3, t2));
18133 break;
18134
18135 default:
18136 gcc_unreachable ();
18137 }
18138 return true;
18139 }
18140
18141 /* Recognize patterns for the Loongson PSHUFH instruction. */
18142
18143 static bool
18144 mips_expand_vpc_loongson_pshufh (struct expand_vec_perm_d *d)
18145 {
18146 unsigned i, mask;
18147 rtx rmask;
18148
18149 if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS))
18150 return false;
18151 if (d->vmode != V4HImode)
18152 return false;
18153 if (d->testing_p)
18154 return true;
18155
18156 /* Convert the selector into the packed 8-bit form for pshufh. */
18157 /* Recall that loongson is little-endian only. No big-endian
18158 adjustment required. */
18159 for (i = mask = 0; i < 4; i++)
18160 mask |= (d->perm[i] & 3) << (i * 2);
18161 rmask = force_reg (SImode, GEN_INT (mask));
18162
18163 if (d->one_vector_p)
18164 emit_insn (gen_loongson_pshufh (d->target, d->op0, rmask));
18165 else
18166 {
18167 rtx t0, t1, x, merge, rmerge[4];
18168
18169 t0 = gen_reg_rtx (V4HImode);
18170 t1 = gen_reg_rtx (V4HImode);
18171 emit_insn (gen_loongson_pshufh (t1, d->op1, rmask));
18172 emit_insn (gen_loongson_pshufh (t0, d->op0, rmask));
18173
18174 for (i = 0; i < 4; ++i)
18175 rmerge[i] = (d->perm[i] & 4 ? constm1_rtx : const0_rtx);
18176 merge = gen_rtx_CONST_VECTOR (V4HImode, gen_rtvec_v (4, rmerge));
18177 merge = force_reg (V4HImode, merge);
18178
18179 x = gen_rtx_AND (V4HImode, merge, t1);
18180 emit_insn (gen_rtx_SET (VOIDmode, t1, x));
18181
18182 x = gen_rtx_NOT (V4HImode, merge);
18183 x = gen_rtx_AND (V4HImode, x, t0);
18184 emit_insn (gen_rtx_SET (VOIDmode, t0, x));
18185
18186 x = gen_rtx_IOR (V4HImode, t0, t1);
18187 emit_insn (gen_rtx_SET (VOIDmode, d->target, x));
18188 }
18189
18190 return true;
18191 }
18192
18193 /* Recognize broadcast patterns for the Loongson. */
18194
18195 static bool
18196 mips_expand_vpc_loongson_bcast (struct expand_vec_perm_d *d)
18197 {
18198 unsigned i, elt;
18199 rtx t0, t1;
18200
18201 if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS))
18202 return false;
18203 /* Note that we've already matched V2SI via punpck and V4HI via pshufh. */
18204 if (d->vmode != V8QImode)
18205 return false;
18206 if (!d->one_vector_p)
18207 return false;
18208
18209 elt = d->perm[0];
18210 for (i = 1; i < 8; ++i)
18211 if (d->perm[i] != elt)
18212 return false;
18213
18214 if (d->testing_p)
18215 return true;
18216
18217 /* With one interleave we put two of the desired element adjacent. */
18218 t0 = gen_reg_rtx (V8QImode);
18219 if (elt < 4)
18220 emit_insn (gen_loongson_punpcklbh (t0, d->op0, d->op0));
18221 else
18222 emit_insn (gen_loongson_punpckhbh (t0, d->op0, d->op0));
18223
18224 /* Shuffle that one HImode element into all locations. */
18225 elt &= 3;
18226 elt *= 0x55;
18227 t1 = gen_reg_rtx (V4HImode);
18228 emit_insn (gen_loongson_pshufh (t1, gen_lowpart (V4HImode, t0),
18229 force_reg (SImode, GEN_INT (elt))));
18230
18231 emit_move_insn (d->target, gen_lowpart (V8QImode, t1));
18232 return true;
18233 }
18234
18235 static bool
18236 mips_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
18237 {
18238 unsigned int i, nelt = d->nelt;
18239 unsigned char perm2[MAX_VECT_LEN];
18240
18241 if (d->one_vector_p)
18242 {
18243 /* Try interleave with alternating operands. */
18244 memcpy (perm2, d->perm, sizeof(perm2));
18245 for (i = 1; i < nelt; i += 2)
18246 perm2[i] += nelt;
18247 if (mips_expand_vselect_vconcat (d->target, d->op0, d->op1, perm2, nelt))
18248 return true;
18249 }
18250 else
18251 {
18252 if (mips_expand_vselect_vconcat (d->target, d->op0, d->op1,
18253 d->perm, nelt))
18254 return true;
18255
18256 /* Try again with swapped operands. */
18257 for (i = 0; i < nelt; ++i)
18258 perm2[i] = (d->perm[i] + nelt) & (2 * nelt - 1);
18259 if (mips_expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
18260 return true;
18261 }
18262
18263 if (mips_expand_vpc_loongson_even_odd (d))
18264 return true;
18265 if (mips_expand_vpc_loongson_pshufh (d))
18266 return true;
18267 if (mips_expand_vpc_loongson_bcast (d))
18268 return true;
18269 return false;
18270 }
18271
18272 /* Expand a vec_perm_const pattern. */
18273
18274 bool
18275 mips_expand_vec_perm_const (rtx operands[4])
18276 {
18277 struct expand_vec_perm_d d;
18278 int i, nelt, which;
18279 unsigned char orig_perm[MAX_VECT_LEN];
18280 rtx sel;
18281 bool ok;
18282
18283 d.target = operands[0];
18284 d.op0 = operands[1];
18285 d.op1 = operands[2];
18286 sel = operands[3];
18287
18288 d.vmode = GET_MODE (d.target);
18289 gcc_assert (VECTOR_MODE_P (d.vmode));
18290 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
18291 d.testing_p = false;
18292
18293 for (i = which = 0; i < nelt; ++i)
18294 {
18295 rtx e = XVECEXP (sel, 0, i);
18296 int ei = INTVAL (e) & (2 * nelt - 1);
18297 which |= (ei < nelt ? 1 : 2);
18298 orig_perm[i] = ei;
18299 }
18300 memcpy (d.perm, orig_perm, MAX_VECT_LEN);
18301
18302 switch (which)
18303 {
18304 default:
18305 gcc_unreachable();
18306
18307 case 3:
18308 d.one_vector_p = false;
18309 if (!rtx_equal_p (d.op0, d.op1))
18310 break;
18311 /* FALLTHRU */
18312
18313 case 2:
18314 for (i = 0; i < nelt; ++i)
18315 d.perm[i] &= nelt - 1;
18316 d.op0 = d.op1;
18317 d.one_vector_p = true;
18318 break;
18319
18320 case 1:
18321 d.op1 = d.op0;
18322 d.one_vector_p = true;
18323 break;
18324 }
18325
18326 ok = mips_expand_vec_perm_const_1 (&d);
18327
18328 /* If we were given a two-vector permutation which just happened to
18329 have both input vectors equal, we folded this into a one-vector
18330 permutation. There are several loongson patterns that are matched
18331 via direct vec_select+vec_concat expansion, but we do not have
18332 support in mips_expand_vec_perm_const_1 to guess the adjustment
18333 that should be made for a single operand. Just try again with
18334 the original permutation. */
18335 if (!ok && which == 3)
18336 {
18337 d.op0 = operands[1];
18338 d.op1 = operands[2];
18339 d.one_vector_p = false;
18340 memcpy (d.perm, orig_perm, MAX_VECT_LEN);
18341 ok = mips_expand_vec_perm_const_1 (&d);
18342 }
18343
18344 return ok;
18345 }
18346
18347 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST_OK. */
18348
18349 static bool
18350 mips_vectorize_vec_perm_const_ok (enum machine_mode vmode,
18351 const unsigned char *sel)
18352 {
18353 struct expand_vec_perm_d d;
18354 unsigned int i, nelt, which;
18355 bool ret;
18356
18357 d.vmode = vmode;
18358 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
18359 d.testing_p = true;
18360 memcpy (d.perm, sel, nelt);
18361
18362 /* Categorize the set of elements in the selector. */
18363 for (i = which = 0; i < nelt; ++i)
18364 {
18365 unsigned char e = d.perm[i];
18366 gcc_assert (e < 2 * nelt);
18367 which |= (e < nelt ? 1 : 2);
18368 }
18369
18370 /* For all elements from second vector, fold the elements to first. */
18371 if (which == 2)
18372 for (i = 0; i < nelt; ++i)
18373 d.perm[i] -= nelt;
18374
18375 /* Check whether the mask can be applied to the vector type. */
18376 d.one_vector_p = (which != 3);
18377
18378 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
18379 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
18380 if (!d.one_vector_p)
18381 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
18382
18383 start_sequence ();
18384 ret = mips_expand_vec_perm_const_1 (&d);
18385 end_sequence ();
18386
18387 return ret;
18388 }
18389
18390 /* Expand an integral vector unpack operation. */
18391
18392 void
18393 mips_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p)
18394 {
18395 enum machine_mode imode = GET_MODE (operands[1]);
18396 rtx (*unpack) (rtx, rtx, rtx);
18397 rtx (*cmpgt) (rtx, rtx, rtx);
18398 rtx tmp, dest, zero;
18399
18400 switch (imode)
18401 {
18402 case V8QImode:
18403 if (high_p)
18404 unpack = gen_loongson_punpckhbh;
18405 else
18406 unpack = gen_loongson_punpcklbh;
18407 cmpgt = gen_loongson_pcmpgtb;
18408 break;
18409 case V4HImode:
18410 if (high_p)
18411 unpack = gen_loongson_punpckhhw;
18412 else
18413 unpack = gen_loongson_punpcklhw;
18414 cmpgt = gen_loongson_pcmpgth;
18415 break;
18416 default:
18417 gcc_unreachable ();
18418 }
18419
18420 zero = force_reg (imode, CONST0_RTX (imode));
18421 if (unsigned_p)
18422 tmp = zero;
18423 else
18424 {
18425 tmp = gen_reg_rtx (imode);
18426 emit_insn (cmpgt (tmp, zero, operands[1]));
18427 }
18428
18429 dest = gen_reg_rtx (imode);
18430 emit_insn (unpack (dest, operands[1], tmp));
18431
18432 emit_move_insn (operands[0], gen_lowpart (GET_MODE (operands[0]), dest));
18433 }
18434
18435 /* A subroutine of mips_expand_vec_init, match constant vector elements. */
18436
18437 static inline bool
18438 mips_constant_elt_p (rtx x)
18439 {
18440 return CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE;
18441 }
18442
18443 /* A subroutine of mips_expand_vec_init, expand via broadcast. */
18444
18445 static void
18446 mips_expand_vi_broadcast (enum machine_mode vmode, rtx target, rtx elt)
18447 {
18448 struct expand_vec_perm_d d;
18449 rtx t1;
18450 bool ok;
18451
18452 if (elt != const0_rtx)
18453 elt = force_reg (GET_MODE_INNER (vmode), elt);
18454 if (REG_P (elt))
18455 elt = gen_lowpart (DImode, elt);
18456
18457 t1 = gen_reg_rtx (vmode);
18458 switch (vmode)
18459 {
18460 case V8QImode:
18461 emit_insn (gen_loongson_vec_init1_v8qi (t1, elt));
18462 break;
18463 case V4HImode:
18464 emit_insn (gen_loongson_vec_init1_v4hi (t1, elt));
18465 break;
18466 default:
18467 gcc_unreachable ();
18468 }
18469
18470 memset (&d, 0, sizeof (d));
18471 d.target = target;
18472 d.op0 = t1;
18473 d.op1 = t1;
18474 d.vmode = vmode;
18475 d.nelt = GET_MODE_NUNITS (vmode);
18476 d.one_vector_p = true;
18477
18478 ok = mips_expand_vec_perm_const_1 (&d);
18479 gcc_assert (ok);
18480 }
18481
18482 /* A subroutine of mips_expand_vec_init, replacing all of the non-constant
18483 elements of VALS with zeros, copy the constant vector to TARGET. */
18484
18485 static void
18486 mips_expand_vi_constant (enum machine_mode vmode, unsigned nelt,
18487 rtx target, rtx vals)
18488 {
18489 rtvec vec = shallow_copy_rtvec (XVEC (vals, 0));
18490 unsigned i;
18491
18492 for (i = 0; i < nelt; ++i)
18493 {
18494 if (!mips_constant_elt_p (RTVEC_ELT (vec, i)))
18495 RTVEC_ELT (vec, i) = const0_rtx;
18496 }
18497
18498 emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, vec));
18499 }
18500
18501
18502 /* A subroutine of mips_expand_vec_init, expand via pinsrh. */
18503
18504 static void
18505 mips_expand_vi_loongson_one_pinsrh (rtx target, rtx vals, unsigned one_var)
18506 {
18507 mips_expand_vi_constant (V4HImode, 4, target, vals);
18508
18509 emit_insn (gen_vec_setv4hi (target, target, XVECEXP (vals, 0, one_var),
18510 GEN_INT (one_var)));
18511 }
18512
18513 /* A subroutine of mips_expand_vec_init, expand anything via memory. */
18514
18515 static void
18516 mips_expand_vi_general (enum machine_mode vmode, enum machine_mode imode,
18517 unsigned nelt, unsigned nvar, rtx target, rtx vals)
18518 {
18519 rtx mem = assign_stack_temp (vmode, GET_MODE_SIZE (vmode));
18520 unsigned int i, isize = GET_MODE_SIZE (imode);
18521
18522 if (nvar < nelt)
18523 mips_expand_vi_constant (vmode, nelt, mem, vals);
18524
18525 for (i = 0; i < nelt; ++i)
18526 {
18527 rtx x = XVECEXP (vals, 0, i);
18528 if (!mips_constant_elt_p (x))
18529 emit_move_insn (adjust_address (mem, imode, i * isize), x);
18530 }
18531
18532 emit_move_insn (target, mem);
18533 }
18534
18535 /* Expand a vector initialization. */
18536
18537 void
18538 mips_expand_vector_init (rtx target, rtx vals)
18539 {
18540 enum machine_mode vmode = GET_MODE (target);
18541 enum machine_mode imode = GET_MODE_INNER (vmode);
18542 unsigned i, nelt = GET_MODE_NUNITS (vmode);
18543 unsigned nvar = 0, one_var = -1u;
18544 bool all_same = true;
18545 rtx x;
18546
18547 for (i = 0; i < nelt; ++i)
18548 {
18549 x = XVECEXP (vals, 0, i);
18550 if (!mips_constant_elt_p (x))
18551 nvar++, one_var = i;
18552 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
18553 all_same = false;
18554 }
18555
18556 /* Load constants from the pool, or whatever's handy. */
18557 if (nvar == 0)
18558 {
18559 emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0)));
18560 return;
18561 }
18562
18563 /* For two-part initialization, always use CONCAT. */
18564 if (nelt == 2)
18565 {
18566 rtx op0 = force_reg (imode, XVECEXP (vals, 0, 0));
18567 rtx op1 = force_reg (imode, XVECEXP (vals, 0, 1));
18568 x = gen_rtx_VEC_CONCAT (vmode, op0, op1);
18569 emit_insn (gen_rtx_SET (VOIDmode, target, x));
18570 return;
18571 }
18572
18573 /* Loongson is the only cpu with vectors with more elements. */
18574 gcc_assert (TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS);
18575
18576 /* If all values are identical, broadcast the value. */
18577 if (all_same)
18578 {
18579 mips_expand_vi_broadcast (vmode, target, XVECEXP (vals, 0, 0));
18580 return;
18581 }
18582
18583 /* If we've only got one non-variable V4HImode, use PINSRH. */
18584 if (nvar == 1 && vmode == V4HImode)
18585 {
18586 mips_expand_vi_loongson_one_pinsrh (target, vals, one_var);
18587 return;
18588 }
18589
18590 mips_expand_vi_general (vmode, imode, nelt, nvar, target, vals);
18591 }
18592
18593 /* Expand a vector reduction. */
18594
18595 void
18596 mips_expand_vec_reduc (rtx target, rtx in, rtx (*gen)(rtx, rtx, rtx))
18597 {
18598 enum machine_mode vmode = GET_MODE (in);
18599 unsigned char perm2[2];
18600 rtx last, next, fold, x;
18601 bool ok;
18602
18603 last = in;
18604 fold = gen_reg_rtx (vmode);
18605 switch (vmode)
18606 {
18607 case V2SFmode:
18608 /* Use PUL/PLU to produce { L, H } op { H, L }.
18609 By reversing the pair order, rather than a pure interleave high,
18610 we avoid erroneous exceptional conditions that we might otherwise
18611 produce from the computation of H op H. */
18612 perm2[0] = 1;
18613 perm2[1] = 2;
18614 ok = mips_expand_vselect_vconcat (fold, last, last, perm2, 2);
18615 gcc_assert (ok);
18616 break;
18617
18618 case V2SImode:
18619 /* Use interleave to produce { H, L } op { H, H }. */
18620 emit_insn (gen_loongson_punpckhwd (fold, last, last));
18621 break;
18622
18623 case V4HImode:
18624 /* Perform the first reduction with interleave,
18625 and subsequent reductions with shifts. */
18626 emit_insn (gen_loongson_punpckhwd_hi (fold, last, last));
18627
18628 next = gen_reg_rtx (vmode);
18629 emit_insn (gen (next, last, fold));
18630 last = next;
18631
18632 fold = gen_reg_rtx (vmode);
18633 x = force_reg (SImode, GEN_INT (16));
18634 emit_insn (gen_vec_shr_v4hi (fold, last, x));
18635 break;
18636
18637 case V8QImode:
18638 emit_insn (gen_loongson_punpckhwd_qi (fold, last, last));
18639
18640 next = gen_reg_rtx (vmode);
18641 emit_insn (gen (next, last, fold));
18642 last = next;
18643
18644 fold = gen_reg_rtx (vmode);
18645 x = force_reg (SImode, GEN_INT (16));
18646 emit_insn (gen_vec_shr_v8qi (fold, last, x));
18647
18648 next = gen_reg_rtx (vmode);
18649 emit_insn (gen (next, last, fold));
18650 last = next;
18651
18652 fold = gen_reg_rtx (vmode);
18653 x = force_reg (SImode, GEN_INT (8));
18654 emit_insn (gen_vec_shr_v8qi (fold, last, x));
18655 break;
18656
18657 default:
18658 gcc_unreachable ();
18659 }
18660
18661 emit_insn (gen (target, last, fold));
18662 }
18663
18664 /* Expand a vector minimum/maximum. */
18665
18666 void
18667 mips_expand_vec_minmax (rtx target, rtx op0, rtx op1,
18668 rtx (*cmp) (rtx, rtx, rtx), bool min_p)
18669 {
18670 enum machine_mode vmode = GET_MODE (target);
18671 rtx tc, t0, t1, x;
18672
18673 tc = gen_reg_rtx (vmode);
18674 t0 = gen_reg_rtx (vmode);
18675 t1 = gen_reg_rtx (vmode);
18676
18677 /* op0 > op1 */
18678 emit_insn (cmp (tc, op0, op1));
18679
18680 x = gen_rtx_AND (vmode, tc, (min_p ? op1 : op0));
18681 emit_insn (gen_rtx_SET (VOIDmode, t0, x));
18682
18683 x = gen_rtx_NOT (vmode, tc);
18684 x = gen_rtx_AND (vmode, x, (min_p ? op0 : op1));
18685 emit_insn (gen_rtx_SET (VOIDmode, t1, x));
18686
18687 x = gen_rtx_IOR (vmode, t0, t1);
18688 emit_insn (gen_rtx_SET (VOIDmode, target, x));
18689 }
18690
18691 /* Implement TARGET_CASE_VALUES_THRESHOLD. */
18692
18693 unsigned int
18694 mips_case_values_threshold (void)
18695 {
18696 /* In MIPS16 mode using a larger case threshold generates smaller code. */
18697 if (TARGET_MIPS16 && optimize_size)
18698 return 10;
18699 else
18700 return default_case_values_threshold ();
18701 }
18702 \f
18703 /* Initialize the GCC target structure. */
18704 #undef TARGET_ASM_ALIGNED_HI_OP
18705 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
18706 #undef TARGET_ASM_ALIGNED_SI_OP
18707 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
18708 #undef TARGET_ASM_ALIGNED_DI_OP
18709 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
18710
18711 #undef TARGET_OPTION_OVERRIDE
18712 #define TARGET_OPTION_OVERRIDE mips_option_override
18713
18714 #undef TARGET_LEGITIMIZE_ADDRESS
18715 #define TARGET_LEGITIMIZE_ADDRESS mips_legitimize_address
18716
18717 #undef TARGET_ASM_FUNCTION_PROLOGUE
18718 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
18719 #undef TARGET_ASM_FUNCTION_EPILOGUE
18720 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
18721 #undef TARGET_ASM_SELECT_RTX_SECTION
18722 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
18723 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
18724 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
18725
18726 #undef TARGET_SCHED_INIT
18727 #define TARGET_SCHED_INIT mips_sched_init
18728 #undef TARGET_SCHED_REORDER
18729 #define TARGET_SCHED_REORDER mips_sched_reorder
18730 #undef TARGET_SCHED_REORDER2
18731 #define TARGET_SCHED_REORDER2 mips_sched_reorder2
18732 #undef TARGET_SCHED_VARIABLE_ISSUE
18733 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
18734 #undef TARGET_SCHED_ADJUST_COST
18735 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
18736 #undef TARGET_SCHED_ISSUE_RATE
18737 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
18738 #undef TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN
18739 #define TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN mips_init_dfa_post_cycle_insn
18740 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
18741 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE mips_dfa_post_advance_cycle
18742 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
18743 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
18744 mips_multipass_dfa_lookahead
18745 #undef TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P
18746 #define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P \
18747 mips_small_register_classes_for_mode_p
18748
18749 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
18750 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
18751
18752 #undef TARGET_INSERT_ATTRIBUTES
18753 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
18754 #undef TARGET_MERGE_DECL_ATTRIBUTES
18755 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
18756 #undef TARGET_CAN_INLINE_P
18757 #define TARGET_CAN_INLINE_P mips_can_inline_p
18758 #undef TARGET_SET_CURRENT_FUNCTION
18759 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
18760
18761 #undef TARGET_VALID_POINTER_MODE
18762 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
18763 #undef TARGET_REGISTER_MOVE_COST
18764 #define TARGET_REGISTER_MOVE_COST mips_register_move_cost
18765 #undef TARGET_MEMORY_MOVE_COST
18766 #define TARGET_MEMORY_MOVE_COST mips_memory_move_cost
18767 #undef TARGET_RTX_COSTS
18768 #define TARGET_RTX_COSTS mips_rtx_costs
18769 #undef TARGET_ADDRESS_COST
18770 #define TARGET_ADDRESS_COST mips_address_cost
18771
18772 #undef TARGET_IN_SMALL_DATA_P
18773 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
18774
18775 #undef TARGET_MACHINE_DEPENDENT_REORG
18776 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
18777
18778 #undef TARGET_PREFERRED_RELOAD_CLASS
18779 #define TARGET_PREFERRED_RELOAD_CLASS mips_preferred_reload_class
18780
18781 #undef TARGET_EXPAND_TO_RTL_HOOK
18782 #define TARGET_EXPAND_TO_RTL_HOOK mips_expand_to_rtl_hook
18783 #undef TARGET_ASM_FILE_START
18784 #define TARGET_ASM_FILE_START mips_file_start
18785 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
18786 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
18787 #undef TARGET_ASM_CODE_END
18788 #define TARGET_ASM_CODE_END mips_code_end
18789
18790 #undef TARGET_INIT_LIBFUNCS
18791 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
18792
18793 #undef TARGET_BUILD_BUILTIN_VA_LIST
18794 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
18795 #undef TARGET_EXPAND_BUILTIN_VA_START
18796 #define TARGET_EXPAND_BUILTIN_VA_START mips_va_start
18797 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
18798 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
18799
18800 #undef TARGET_PROMOTE_FUNCTION_MODE
18801 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
18802 #undef TARGET_PROMOTE_PROTOTYPES
18803 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
18804
18805 #undef TARGET_FUNCTION_VALUE
18806 #define TARGET_FUNCTION_VALUE mips_function_value
18807 #undef TARGET_LIBCALL_VALUE
18808 #define TARGET_LIBCALL_VALUE mips_libcall_value
18809 #undef TARGET_FUNCTION_VALUE_REGNO_P
18810 #define TARGET_FUNCTION_VALUE_REGNO_P mips_function_value_regno_p
18811 #undef TARGET_RETURN_IN_MEMORY
18812 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
18813 #undef TARGET_RETURN_IN_MSB
18814 #define TARGET_RETURN_IN_MSB mips_return_in_msb
18815
18816 #undef TARGET_ASM_OUTPUT_MI_THUNK
18817 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
18818 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
18819 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
18820
18821 #undef TARGET_PRINT_OPERAND
18822 #define TARGET_PRINT_OPERAND mips_print_operand
18823 #undef TARGET_PRINT_OPERAND_ADDRESS
18824 #define TARGET_PRINT_OPERAND_ADDRESS mips_print_operand_address
18825 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
18826 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P mips_print_operand_punct_valid_p
18827
18828 #undef TARGET_SETUP_INCOMING_VARARGS
18829 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
18830 #undef TARGET_STRICT_ARGUMENT_NAMING
18831 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
18832 #undef TARGET_MUST_PASS_IN_STACK
18833 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
18834 #undef TARGET_PASS_BY_REFERENCE
18835 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
18836 #undef TARGET_CALLEE_COPIES
18837 #define TARGET_CALLEE_COPIES mips_callee_copies
18838 #undef TARGET_ARG_PARTIAL_BYTES
18839 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
18840 #undef TARGET_FUNCTION_ARG
18841 #define TARGET_FUNCTION_ARG mips_function_arg
18842 #undef TARGET_FUNCTION_ARG_ADVANCE
18843 #define TARGET_FUNCTION_ARG_ADVANCE mips_function_arg_advance
18844 #undef TARGET_FUNCTION_ARG_BOUNDARY
18845 #define TARGET_FUNCTION_ARG_BOUNDARY mips_function_arg_boundary
18846
18847 #undef TARGET_MODE_REP_EXTENDED
18848 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
18849
18850 #undef TARGET_VECTOR_MODE_SUPPORTED_P
18851 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
18852
18853 #undef TARGET_SCALAR_MODE_SUPPORTED_P
18854 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
18855
18856 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
18857 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE mips_preferred_simd_mode
18858
18859 #undef TARGET_INIT_BUILTINS
18860 #define TARGET_INIT_BUILTINS mips_init_builtins
18861 #undef TARGET_BUILTIN_DECL
18862 #define TARGET_BUILTIN_DECL mips_builtin_decl
18863 #undef TARGET_EXPAND_BUILTIN
18864 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
18865
18866 #undef TARGET_HAVE_TLS
18867 #define TARGET_HAVE_TLS HAVE_AS_TLS
18868
18869 #undef TARGET_CANNOT_FORCE_CONST_MEM
18870 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
18871
18872 #undef TARGET_LEGITIMATE_CONSTANT_P
18873 #define TARGET_LEGITIMATE_CONSTANT_P mips_legitimate_constant_p
18874
18875 #undef TARGET_ENCODE_SECTION_INFO
18876 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
18877
18878 #undef TARGET_ATTRIBUTE_TABLE
18879 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
18880 /* All our function attributes are related to how out-of-line copies should
18881 be compiled or called. They don't in themselves prevent inlining. */
18882 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
18883 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
18884
18885 #undef TARGET_EXTRA_LIVE_ON_ENTRY
18886 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
18887
18888 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
18889 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
18890 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
18891 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
18892
18893 #undef TARGET_COMP_TYPE_ATTRIBUTES
18894 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
18895
18896 #ifdef HAVE_AS_DTPRELWORD
18897 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
18898 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
18899 #endif
18900 #undef TARGET_DWARF_REGISTER_SPAN
18901 #define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
18902
18903 #undef TARGET_ASM_FINAL_POSTSCAN_INSN
18904 #define TARGET_ASM_FINAL_POSTSCAN_INSN mips_final_postscan_insn
18905
18906 #undef TARGET_LEGITIMATE_ADDRESS_P
18907 #define TARGET_LEGITIMATE_ADDRESS_P mips_legitimate_address_p
18908
18909 #undef TARGET_FRAME_POINTER_REQUIRED
18910 #define TARGET_FRAME_POINTER_REQUIRED mips_frame_pointer_required
18911
18912 #undef TARGET_CAN_ELIMINATE
18913 #define TARGET_CAN_ELIMINATE mips_can_eliminate
18914
18915 #undef TARGET_CONDITIONAL_REGISTER_USAGE
18916 #define TARGET_CONDITIONAL_REGISTER_USAGE mips_conditional_register_usage
18917
18918 #undef TARGET_TRAMPOLINE_INIT
18919 #define TARGET_TRAMPOLINE_INIT mips_trampoline_init
18920
18921 #undef TARGET_ASM_OUTPUT_SOURCE_FILENAME
18922 #define TARGET_ASM_OUTPUT_SOURCE_FILENAME mips_output_filename
18923
18924 #undef TARGET_SHIFT_TRUNCATION_MASK
18925 #define TARGET_SHIFT_TRUNCATION_MASK mips_shift_truncation_mask
18926
18927 #undef TARGET_PREPARE_PCH_SAVE
18928 #define TARGET_PREPARE_PCH_SAVE mips_prepare_pch_save
18929
18930 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
18931 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK mips_vectorize_vec_perm_const_ok
18932
18933 #undef TARGET_CASE_VALUES_THRESHOLD
18934 #define TARGET_CASE_VALUES_THRESHOLD mips_case_values_threshold
18935
18936 struct gcc_target targetm = TARGET_INITIALIZER;
18937 \f
18938 #include "gt-mips.h"