1 /* Output routines for GCC for Renesas / SuperH SH.
2 Copyright (C) 1993-2016 Free Software Foundation, Inc.
3 Contributed by Steve Chamberlain (sac@cygnus.com).
4 Improved by Jim Wilson (wilson@cygnus.com).
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #define INCLUDE_VECTOR
27 #include "coretypes.h"
36 #include "stringpool.h"
40 #include "diagnostic-core.h"
42 #include "fold-const.h"
43 #include "stor-layout.h"
51 #include "insn-attr.h"
53 #include "langhooks.h"
56 #include "sched-int.h"
58 #include "tm-constrs.h"
60 #include "tree-pass.h"
65 /* This file should be included last. */
66 #include "target-def.h"
68 int code_for_indirect_jump_scratch
= CODE_FOR_indirect_jump_scratch
;
70 #define CONST_OK_FOR_ADD(size) CONST_OK_FOR_I08 (size)
71 #define GEN_MOV (*(gen_movsi))
72 #define GEN_ADD3 (*(gen_addsi3))
73 #define GEN_SUB3 (*(gen_subsi3))
75 /* Used to simplify the logic below. Find the attributes wherever
77 #define SH_ATTRIBUTES(decl) \
78 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
79 : DECL_ATTRIBUTES (decl) \
80 ? (DECL_ATTRIBUTES (decl)) \
81 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
83 /* Set to 1 by expand_prologue() when the function is an interrupt handler. */
84 int current_function_interrupt
;
86 tree sh_deferred_function_attributes
;
87 tree
*sh_deferred_function_attributes_tail
= &sh_deferred_function_attributes
;
89 /* Global variables for machine-dependent things. */
91 /* Which cpu are we scheduling for. */
92 enum processor_type sh_cpu
;
94 /* Definitions used in ready queue reordering for first scheduling pass. */
96 /* Reg weights arrays for modes SFmode and SImode, indexed by insn LUID. */
97 static short *regmode_weight
[2];
99 /* Total SFmode and SImode weights of scheduled insns. */
100 static int curr_regmode_pressure
[2];
102 /* Number of r0 life regions. */
103 static int r0_life_regions
;
105 /* If true, skip cycles for Q -> R movement. */
106 static int skip_cycles
= 0;
108 /* Cached value of can_issue_more. This is cached in sh_variable_issue hook
109 and returned from sh_reorder2. */
110 static short cached_can_issue_more
;
112 /* Unique number for UNSPEC_BBR pattern. */
113 static unsigned int unspec_bbr_uid
= 1;
115 /* Provides the class number of the smallest class containing
117 enum reg_class regno_reg_class
[FIRST_PSEUDO_REGISTER
] =
119 R0_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
120 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
121 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
122 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
123 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
124 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
125 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
126 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
127 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
128 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
129 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
130 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
131 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
132 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
133 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
134 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
135 FP0_REGS
,FP_REGS
, FP_REGS
, FP_REGS
,
136 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
137 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
138 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
139 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
140 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
141 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
142 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
143 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
144 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
145 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
146 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
147 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
148 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
149 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
150 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
151 TARGET_REGS
, TARGET_REGS
, TARGET_REGS
, TARGET_REGS
,
152 TARGET_REGS
, TARGET_REGS
, TARGET_REGS
, TARGET_REGS
,
153 DF_REGS
, DF_REGS
, DF_REGS
, DF_REGS
,
154 DF_REGS
, DF_REGS
, DF_REGS
, DF_REGS
,
155 NO_REGS
, GENERAL_REGS
, PR_REGS
, T_REGS
,
156 MAC_REGS
, MAC_REGS
, FPUL_REGS
, FPSCR_REGS
,
157 GENERAL_REGS
, GENERAL_REGS
,
160 char sh_register_names
[FIRST_PSEUDO_REGISTER
] \
161 [MAX_REGISTER_NAME_LENGTH
+ 1] = SH_REGISTER_NAMES_INITIALIZER
;
163 char sh_additional_register_names
[ADDREGNAMES_SIZE
] \
164 [MAX_ADDITIONAL_REGISTER_NAME_LENGTH
+ 1]
165 = SH_ADDITIONAL_REGISTER_NAMES_INITIALIZER
;
167 int assembler_dialect
;
169 static void split_branches (rtx_insn
*);
170 static int branch_dest (rtx
);
171 static void print_slot (rtx_sequence
*);
172 static rtx_code_label
*add_constant (rtx
, machine_mode
, rtx
);
173 static void dump_table (rtx_insn
*, rtx_insn
*);
174 static bool broken_move (rtx_insn
*);
175 static bool mova_p (rtx_insn
*);
176 static rtx_insn
*find_barrier (int, rtx_insn
*, rtx_insn
*);
177 static bool noncall_uses_reg (rtx
, rtx_insn
*, rtx
*);
178 static rtx_insn
*gen_block_redirect (rtx_insn
*, int, int);
179 static void sh_reorg (void);
180 static void sh_option_override (void);
181 static void sh_override_options_after_change (void);
182 static void output_stack_adjust (int, rtx
, int, HARD_REG_SET
*, bool);
183 static rtx_insn
*frame_insn (rtx
);
184 static rtx
push (int);
185 static void pop (int);
186 static void push_regs (HARD_REG_SET
*, int);
187 static int calc_live_regs (HARD_REG_SET
*);
188 static HOST_WIDE_INT
rounded_frame_size (int);
189 static bool sh_frame_pointer_required (void);
190 static void sh_emit_mode_set (int, int, int, HARD_REG_SET
);
191 static int sh_mode_needed (int, rtx_insn
*);
192 static int sh_mode_after (int, int, rtx_insn
*);
193 static int sh_mode_entry (int);
194 static int sh_mode_exit (int);
195 static int sh_mode_priority (int entity
, int n
);
197 static rtx
mark_constant_pool_use (rtx
);
198 static tree
sh_handle_interrupt_handler_attribute (tree
*, tree
, tree
,
200 static tree
sh_handle_resbank_handler_attribute (tree
*, tree
,
202 static tree
sh2a_handle_function_vector_handler_attribute (tree
*, tree
,
204 static tree
sh_handle_sp_switch_attribute (tree
*, tree
, tree
, int, bool *);
205 static tree
sh_handle_trap_exit_attribute (tree
*, tree
, tree
, int, bool *);
206 static tree
sh_handle_renesas_attribute (tree
*, tree
, tree
, int, bool *);
207 static void sh_print_operand (FILE *, rtx
, int);
208 static void sh_print_operand_address (FILE *, machine_mode
, rtx
);
209 static bool sh_print_operand_punct_valid_p (unsigned char code
);
210 static bool sh_asm_output_addr_const_extra (FILE *file
, rtx x
);
211 static void sh_output_function_epilogue (FILE *, HOST_WIDE_INT
);
212 static void sh_insert_attributes (tree
, tree
*);
213 static const char *sh_check_pch_target_flags (int);
214 static int sh_register_move_cost (machine_mode
, reg_class_t
, reg_class_t
);
215 static int sh_adjust_cost (rtx_insn
*, rtx
, rtx_insn
*, int);
216 static int sh_issue_rate (void);
217 static int sh_dfa_new_cycle (FILE *, int, rtx_insn
*, int, int, int *sort_p
);
218 static short find_set_regmode_weight (rtx
, machine_mode
);
219 static short find_insn_regmode_weight (rtx
, machine_mode
);
220 static void find_regmode_weight (basic_block
, machine_mode
);
221 static int find_r0_life_regions (basic_block
);
222 static void sh_md_init_global (FILE *, int, int);
223 static void sh_md_finish_global (FILE *, int);
224 static int rank_for_reorder (const void *, const void *);
225 static void swap_reorder (rtx_insn
**, int);
226 static void ready_reorder (rtx_insn
**, int);
227 static bool high_pressure (machine_mode
);
228 static int sh_reorder (FILE *, int, rtx_insn
**, int *, int);
229 static int sh_reorder2 (FILE *, int, rtx_insn
**, int *, int);
230 static void sh_md_init (FILE *, int, int);
231 static int sh_variable_issue (FILE *, int, rtx_insn
*, int);
233 static bool sh_function_ok_for_sibcall (tree
, tree
);
235 static bool sh_can_follow_jump (const rtx_insn
*, const rtx_insn
*);
236 static reg_class_t
sh_target_reg_class (void);
237 static bool sh_optimize_target_register_callee_saved (bool);
238 static bool sh_ms_bitfield_layout_p (const_tree
);
240 static void sh_init_builtins (void);
241 static tree
sh_builtin_decl (unsigned, bool);
242 static rtx
sh_expand_builtin (tree
, rtx
, rtx
, machine_mode
, int);
243 static void sh_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
244 HOST_WIDE_INT
, tree
);
245 static void sh_file_start (void);
246 static bool sh_assemble_integer (rtx
, unsigned int, int);
247 static bool flow_dependent_p (rtx
, rtx
);
248 static void flow_dependent_p_1 (rtx
, const_rtx
, void *);
249 static int shiftcosts (rtx
);
250 static int and_xor_ior_costs (rtx
, int);
251 static int addsubcosts (rtx
);
252 static int multcosts (rtx
);
253 static bool unspec_caller_rtx_p (rtx
);
254 static bool sh_cannot_copy_insn_p (rtx_insn
*);
255 static bool sh_cannot_force_const_mem_p (machine_mode
, rtx
);
256 static bool sh_rtx_costs (rtx
, machine_mode
, int, int, int *, bool);
257 static int sh_address_cost (rtx
, machine_mode
, addr_space_t
, bool);
258 static int sh_pr_n_sets (void);
259 static rtx
sh_allocate_initial_value (rtx
);
260 static reg_class_t
sh_preferred_reload_class (rtx
, reg_class_t
);
261 static reg_class_t
sh_secondary_reload (bool, rtx
, reg_class_t
,
263 struct secondary_reload_info
*);
264 static bool sh_legitimate_address_p (machine_mode
, rtx
, bool);
265 static rtx
sh_legitimize_address (rtx
, rtx
, machine_mode
);
266 static rtx
sh_delegitimize_address (rtx
);
267 static bool sh_cannot_substitute_mem_equiv_p (rtx
);
268 static bool sh_legitimize_address_displacement (rtx
*, rtx
*, machine_mode
);
269 static int scavenge_reg (HARD_REG_SET
*s
);
270 struct save_schedule_s
;
272 static rtx
sh_struct_value_rtx (tree
, int);
273 static rtx
sh_function_value (const_tree
, const_tree
, bool);
274 static bool sh_function_value_regno_p (const unsigned int);
275 static rtx
sh_libcall_value (machine_mode
, const_rtx
);
276 static bool sh_return_in_memory (const_tree
, const_tree
);
277 static rtx
sh_builtin_saveregs (void);
278 static void sh_setup_incoming_varargs (cumulative_args_t
, machine_mode
,
280 static bool sh_strict_argument_naming (cumulative_args_t
);
281 static bool sh_pretend_outgoing_varargs_named (cumulative_args_t
);
282 static void sh_atomic_assign_expand_fenv (tree
*, tree
*, tree
*);
283 static tree
sh_build_builtin_va_list (void);
284 static void sh_va_start (tree
, rtx
);
285 static tree
sh_gimplify_va_arg_expr (tree
, tree
, gimple_seq
*, gimple_seq
*);
286 static bool sh_promote_prototypes (const_tree
);
287 static machine_mode
sh_promote_function_mode (const_tree type
,
292 static bool sh_pass_by_reference (cumulative_args_t
, machine_mode
,
294 static bool sh_callee_copies (cumulative_args_t
, machine_mode
,
296 static int sh_arg_partial_bytes (cumulative_args_t
, machine_mode
,
298 static void sh_function_arg_advance (cumulative_args_t
, machine_mode
,
300 static rtx
sh_function_arg (cumulative_args_t
, machine_mode
,
302 static int sh_dwarf_calling_convention (const_tree
);
303 static void sh_encode_section_info (tree
, rtx
, int);
304 static bool sh2a_function_vector_p (tree
);
305 static void sh_trampoline_init (rtx
, tree
, rtx
);
306 static rtx
sh_trampoline_adjust_address (rtx
);
307 static void sh_conditional_register_usage (void);
308 static bool sh_legitimate_constant_p (machine_mode
, rtx
);
309 static int mov_insn_size (machine_mode
, bool);
310 static int mov_insn_alignment_mask (machine_mode
, bool);
311 static bool sh_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT
,
313 enum by_pieces_operation
,
315 static bool sequence_insn_p (rtx_insn
*);
316 static void sh_canonicalize_comparison (int *, rtx
*, rtx
*, bool);
317 static void sh_canonicalize_comparison (enum rtx_code
&, rtx
&, rtx
&,
319 static bool sh_legitimate_combined_insn (rtx_insn
* insn
);
321 static bool sh_fixed_condition_code_regs (unsigned int* p1
, unsigned int* p2
);
323 static void sh_init_sync_libfuncs (void) ATTRIBUTE_UNUSED
;
325 static const struct attribute_spec sh_attribute_table
[] =
327 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
328 affects_type_identity } */
329 { "interrupt_handler", 0, 0, true, false, false,
330 sh_handle_interrupt_handler_attribute
, false },
331 { "sp_switch", 1, 1, true, false, false,
332 sh_handle_sp_switch_attribute
, false },
333 { "trap_exit", 1, 1, true, false, false,
334 sh_handle_trap_exit_attribute
, false },
335 { "renesas", 0, 0, false, true, false,
336 sh_handle_renesas_attribute
, false },
337 { "trapa_handler", 0, 0, true, false, false,
338 sh_handle_interrupt_handler_attribute
, false },
339 { "nosave_low_regs", 0, 0, true, false, false,
340 sh_handle_interrupt_handler_attribute
, false },
341 { "resbank", 0, 0, true, false, false,
342 sh_handle_resbank_handler_attribute
, false },
343 { "function_vector", 1, 1, true, false, false,
344 sh2a_handle_function_vector_handler_attribute
, false },
345 { NULL
, 0, 0, false, false, false, NULL
, false }
348 /* Initialize the GCC target structure. */
349 #undef TARGET_ATTRIBUTE_TABLE
350 #define TARGET_ATTRIBUTE_TABLE sh_attribute_table
352 /* The next two are used for debug info when compiling with -gdwarf. */
353 #undef TARGET_ASM_UNALIGNED_HI_OP
354 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uaword\t"
355 #undef TARGET_ASM_UNALIGNED_SI_OP
356 #define TARGET_ASM_UNALIGNED_SI_OP "\t.ualong\t"
358 /* These are NULLed out on non-SH5 in TARGET_OPTION_OVERRIDE. */
359 #undef TARGET_ASM_UNALIGNED_DI_OP
360 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaquad\t"
361 #undef TARGET_ASM_ALIGNED_DI_OP
362 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
364 #undef TARGET_OPTION_OVERRIDE
365 #define TARGET_OPTION_OVERRIDE sh_option_override
367 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
368 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE \
369 sh_override_options_after_change
371 #undef TARGET_PRINT_OPERAND
372 #define TARGET_PRINT_OPERAND sh_print_operand
373 #undef TARGET_PRINT_OPERAND_ADDRESS
374 #define TARGET_PRINT_OPERAND_ADDRESS sh_print_operand_address
375 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
376 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sh_print_operand_punct_valid_p
377 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
378 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA sh_asm_output_addr_const_extra
380 #undef TARGET_ASM_FUNCTION_EPILOGUE
381 #define TARGET_ASM_FUNCTION_EPILOGUE sh_output_function_epilogue
383 #undef TARGET_ASM_OUTPUT_MI_THUNK
384 #define TARGET_ASM_OUTPUT_MI_THUNK sh_output_mi_thunk
386 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
387 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK \
388 hook_bool_const_tree_hwi_hwi_const_tree_true
390 #undef TARGET_ASM_FILE_START
391 #define TARGET_ASM_FILE_START sh_file_start
392 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
393 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
395 #undef TARGET_ASM_INTEGER
396 #define TARGET_ASM_INTEGER sh_assemble_integer
398 #undef TARGET_REGISTER_MOVE_COST
399 #define TARGET_REGISTER_MOVE_COST sh_register_move_cost
401 #undef TARGET_INSERT_ATTRIBUTES
402 #define TARGET_INSERT_ATTRIBUTES sh_insert_attributes
404 #undef TARGET_SCHED_ADJUST_COST
405 #define TARGET_SCHED_ADJUST_COST sh_adjust_cost
407 #undef TARGET_SCHED_ISSUE_RATE
408 #define TARGET_SCHED_ISSUE_RATE sh_issue_rate
410 /* The next 5 hooks have been implemented for reenabling sched1. With the
411 help of these macros we are limiting the movement of insns in sched1 to
412 reduce the register pressure. The overall idea is to keep count of SImode
413 and SFmode regs required by already scheduled insns. When these counts
414 cross some threshold values; give priority to insns that free registers.
415 The insn that frees registers is most likely to be the insn with lowest
416 LUID (original insn order); but such an insn might be there in the stalled
417 queue (Q) instead of the ready queue (R). To solve this, we skip cycles
418 up to a max of 8 cycles so that such insns may move from Q -> R.
420 The description of the hooks are as below:
422 TARGET_SCHED_INIT_GLOBAL: Added a new target hook in the generic
423 scheduler; it is called inside the sched_init function just after
424 find_insn_reg_weights function call. It is used to calculate the SImode
425 and SFmode weights of insns of basic blocks; much similar to what
426 find_insn_reg_weights does.
427 TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook.
429 TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is
430 indicated by TARGET_SCHED_REORDER2; doing this may move insns from
433 TARGET_SCHED_REORDER: If the register pressure for SImode or SFmode is
434 high; reorder the ready queue so that the insn with lowest LUID will be
437 TARGET_SCHED_REORDER2: If the register pressure is high, indicate to
438 TARGET_SCHED_DFA_NEW_CYCLE to skip cycles.
440 TARGET_SCHED_VARIABLE_ISSUE: Cache the value of can_issue_more so that it
441 can be returned from TARGET_SCHED_REORDER2.
443 TARGET_SCHED_INIT: Reset the register pressure counting variables. */
445 #undef TARGET_SCHED_DFA_NEW_CYCLE
446 #define TARGET_SCHED_DFA_NEW_CYCLE sh_dfa_new_cycle
448 #undef TARGET_SCHED_INIT_GLOBAL
449 #define TARGET_SCHED_INIT_GLOBAL sh_md_init_global
451 #undef TARGET_SCHED_FINISH_GLOBAL
452 #define TARGET_SCHED_FINISH_GLOBAL sh_md_finish_global
454 #undef TARGET_SCHED_VARIABLE_ISSUE
455 #define TARGET_SCHED_VARIABLE_ISSUE sh_variable_issue
457 #undef TARGET_SCHED_REORDER
458 #define TARGET_SCHED_REORDER sh_reorder
460 #undef TARGET_SCHED_REORDER2
461 #define TARGET_SCHED_REORDER2 sh_reorder2
463 #undef TARGET_SCHED_INIT
464 #define TARGET_SCHED_INIT sh_md_init
466 #undef TARGET_DELEGITIMIZE_ADDRESS
467 #define TARGET_DELEGITIMIZE_ADDRESS sh_delegitimize_address
469 #undef TARGET_LEGITIMIZE_ADDRESS
470 #define TARGET_LEGITIMIZE_ADDRESS sh_legitimize_address
472 #undef TARGET_CAN_FOLLOW_JUMP
473 #define TARGET_CAN_FOLLOW_JUMP sh_can_follow_jump
474 #undef TARGET_BRANCH_TARGET_REGISTER_CLASS
475 #define TARGET_BRANCH_TARGET_REGISTER_CLASS sh_target_reg_class
476 #undef TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED
477 #define TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED \
478 sh_optimize_target_register_callee_saved
480 #undef TARGET_MS_BITFIELD_LAYOUT_P
481 #define TARGET_MS_BITFIELD_LAYOUT_P sh_ms_bitfield_layout_p
483 #undef TARGET_INIT_BUILTINS
484 #define TARGET_INIT_BUILTINS sh_init_builtins
485 #undef TARGET_BUILTIN_DECL
486 #define TARGET_BUILTIN_DECL sh_builtin_decl
487 #undef TARGET_EXPAND_BUILTIN
488 #define TARGET_EXPAND_BUILTIN sh_expand_builtin
490 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
491 #define TARGET_FUNCTION_OK_FOR_SIBCALL sh_function_ok_for_sibcall
493 #undef TARGET_CANNOT_COPY_INSN_P
494 #define TARGET_CANNOT_COPY_INSN_P sh_cannot_copy_insn_p
495 #undef TARGET_RTX_COSTS
496 #define TARGET_RTX_COSTS sh_rtx_costs
497 #undef TARGET_ADDRESS_COST
498 #define TARGET_ADDRESS_COST sh_address_cost
499 #undef TARGET_ALLOCATE_INITIAL_VALUE
500 #define TARGET_ALLOCATE_INITIAL_VALUE sh_allocate_initial_value
502 #undef TARGET_MACHINE_DEPENDENT_REORG
503 #define TARGET_MACHINE_DEPENDENT_REORG sh_reorg
505 #undef TARGET_DWARF_REGISTER_SPAN
506 #define TARGET_DWARF_REGISTER_SPAN sh_dwarf_register_span
509 #undef TARGET_HAVE_TLS
510 #define TARGET_HAVE_TLS true
513 #undef TARGET_PROMOTE_PROTOTYPES
514 #define TARGET_PROMOTE_PROTOTYPES sh_promote_prototypes
515 #undef TARGET_PROMOTE_FUNCTION_MODE
516 #define TARGET_PROMOTE_FUNCTION_MODE sh_promote_function_mode
518 #undef TARGET_FUNCTION_VALUE
519 #define TARGET_FUNCTION_VALUE sh_function_value
520 #undef TARGET_FUNCTION_VALUE_REGNO_P
521 #define TARGET_FUNCTION_VALUE_REGNO_P sh_function_value_regno_p
522 #undef TARGET_LIBCALL_VALUE
523 #define TARGET_LIBCALL_VALUE sh_libcall_value
524 #undef TARGET_STRUCT_VALUE_RTX
525 #define TARGET_STRUCT_VALUE_RTX sh_struct_value_rtx
526 #undef TARGET_RETURN_IN_MEMORY
527 #define TARGET_RETURN_IN_MEMORY sh_return_in_memory
529 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
530 #define TARGET_EXPAND_BUILTIN_SAVEREGS sh_builtin_saveregs
531 #undef TARGET_SETUP_INCOMING_VARARGS
532 #define TARGET_SETUP_INCOMING_VARARGS sh_setup_incoming_varargs
533 #undef TARGET_STRICT_ARGUMENT_NAMING
534 #define TARGET_STRICT_ARGUMENT_NAMING sh_strict_argument_naming
535 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
536 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED sh_pretend_outgoing_varargs_named
537 #undef TARGET_MUST_PASS_IN_STACK
538 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
539 #undef TARGET_PASS_BY_REFERENCE
540 #define TARGET_PASS_BY_REFERENCE sh_pass_by_reference
541 #undef TARGET_CALLEE_COPIES
542 #define TARGET_CALLEE_COPIES sh_callee_copies
543 #undef TARGET_ARG_PARTIAL_BYTES
544 #define TARGET_ARG_PARTIAL_BYTES sh_arg_partial_bytes
545 #undef TARGET_FUNCTION_ARG
546 #define TARGET_FUNCTION_ARG sh_function_arg
547 #undef TARGET_FUNCTION_ARG_ADVANCE
548 #define TARGET_FUNCTION_ARG_ADVANCE sh_function_arg_advance
550 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
551 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV sh_atomic_assign_expand_fenv
553 #undef TARGET_BUILD_BUILTIN_VA_LIST
554 #define TARGET_BUILD_BUILTIN_VA_LIST sh_build_builtin_va_list
555 #undef TARGET_EXPAND_BUILTIN_VA_START
556 #define TARGET_EXPAND_BUILTIN_VA_START sh_va_start
557 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
558 #define TARGET_GIMPLIFY_VA_ARG_EXPR sh_gimplify_va_arg_expr
560 #undef TARGET_VECTOR_MODE_SUPPORTED_P
561 #define TARGET_VECTOR_MODE_SUPPORTED_P sh_vector_mode_supported_p
563 #undef TARGET_CHECK_PCH_TARGET_FLAGS
564 #define TARGET_CHECK_PCH_TARGET_FLAGS sh_check_pch_target_flags
566 #undef TARGET_DWARF_CALLING_CONVENTION
567 #define TARGET_DWARF_CALLING_CONVENTION sh_dwarf_calling_convention
569 #undef TARGET_FRAME_POINTER_REQUIRED
570 #define TARGET_FRAME_POINTER_REQUIRED sh_frame_pointer_required
572 #undef TARGET_MODE_EMIT
573 #define TARGET_MODE_EMIT sh_emit_mode_set
575 #undef TARGET_MODE_NEEDED
576 #define TARGET_MODE_NEEDED sh_mode_needed
578 #undef TARGET_MODE_AFTER
579 #define TARGET_MODE_AFTER sh_mode_after
581 #undef TARGET_MODE_ENTRY
582 #define TARGET_MODE_ENTRY sh_mode_entry
584 #undef TARGET_MODE_EXIT
585 #define TARGET_MODE_EXIT sh_mode_exit
587 #undef TARGET_MODE_PRIORITY
588 #define TARGET_MODE_PRIORITY sh_mode_priority
590 /* Return regmode weight for insn. */
591 #define INSN_REGMODE_WEIGHT(INSN, MODE)\
592 regmode_weight[((MODE) == SImode) ? 0 : 1][INSN_UID (INSN)]
594 /* Return current register pressure for regmode. */
595 #define CURR_REGMODE_PRESSURE(MODE)\
596 curr_regmode_pressure[((MODE) == SImode) ? 0 : 1]
598 #undef TARGET_ENCODE_SECTION_INFO
599 #define TARGET_ENCODE_SECTION_INFO sh_encode_section_info
602 #define TARGET_LRA_P sh_lra_p
604 #undef TARGET_SECONDARY_RELOAD
605 #define TARGET_SECONDARY_RELOAD sh_secondary_reload
607 #undef TARGET_PREFERRED_RELOAD_CLASS
608 #define TARGET_PREFERRED_RELOAD_CLASS sh_preferred_reload_class
610 #undef TARGET_CONDITIONAL_REGISTER_USAGE
611 #define TARGET_CONDITIONAL_REGISTER_USAGE sh_conditional_register_usage
613 #undef TARGET_LEGITIMATE_ADDRESS_P
614 #define TARGET_LEGITIMATE_ADDRESS_P sh_legitimate_address_p
616 #undef TARGET_CANNOT_SUBSTITUTE_MEM_EQUIV_P
617 #define TARGET_CANNOT_SUBSTITUTE_MEM_EQUIV_P sh_cannot_substitute_mem_equiv_p
619 #undef TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT
620 #define TARGET_LEGITIMIZE_ADDRESS_DISPLACEMENT \
621 sh_legitimize_address_displacement
623 #undef TARGET_TRAMPOLINE_INIT
624 #define TARGET_TRAMPOLINE_INIT sh_trampoline_init
625 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
626 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS sh_trampoline_adjust_address
628 #undef TARGET_LEGITIMATE_CONSTANT_P
629 #define TARGET_LEGITIMATE_CONSTANT_P sh_legitimate_constant_p
631 #undef TARGET_CANONICALIZE_COMPARISON
632 #define TARGET_CANONICALIZE_COMPARISON sh_canonicalize_comparison
634 #undef TARGET_LEGITIMATE_COMBINED_INSN
635 #define TARGET_LEGITIMATE_COMBINED_INSN sh_legitimate_combined_insn
637 #undef TARGET_FIXED_CONDITION_CODE_REGS
638 #define TARGET_FIXED_CONDITION_CODE_REGS sh_fixed_condition_code_regs
640 #undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
641 #define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
642 sh_use_by_pieces_infrastructure_p
644 /* Machine-specific symbol_ref flags. */
645 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
647 /* The tas.b instruction sets the 7th bit in the byte, i.e. 0x80. This value
648 is used by optabs.c atomic op expansion code as well as in sync.md. */
649 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
650 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 0x80
652 #undef TARGET_CANNOT_FORCE_CONST_MEM
653 #define TARGET_CANNOT_FORCE_CONST_MEM sh_cannot_force_const_mem_p
655 struct gcc_target targetm
= TARGET_INITIALIZER
;
658 /* Information on the currently selected atomic model.
659 This is initialized in sh_option_override. */
660 static sh_atomic_model selected_atomic_model_
;
662 const sh_atomic_model
&
663 selected_atomic_model (void)
665 return selected_atomic_model_
;
668 static sh_atomic_model
669 parse_validate_atomic_model_option (const char* str
)
671 const char* model_names
[sh_atomic_model::num_models
];
672 model_names
[sh_atomic_model::none
] = "none";
673 model_names
[sh_atomic_model::soft_gusa
] = "soft-gusa";
674 model_names
[sh_atomic_model::hard_llcs
] = "hard-llcs";
675 model_names
[sh_atomic_model::soft_tcb
] = "soft-tcb";
676 model_names
[sh_atomic_model::soft_imask
] = "soft-imask";
678 const char* model_cdef_names
[sh_atomic_model::num_models
];
679 model_cdef_names
[sh_atomic_model::none
] = "NONE";
680 model_cdef_names
[sh_atomic_model::soft_gusa
] = "SOFT_GUSA";
681 model_cdef_names
[sh_atomic_model::hard_llcs
] = "HARD_LLCS";
682 model_cdef_names
[sh_atomic_model::soft_tcb
] = "SOFT_TCB";
683 model_cdef_names
[sh_atomic_model::soft_imask
] = "SOFT_IMASK";
686 ret
.type
= sh_atomic_model::none
;
687 ret
.name
= model_names
[sh_atomic_model::none
];
688 ret
.cdef_name
= model_cdef_names
[sh_atomic_model::none
];
690 ret
.tcb_gbr_offset
= -1;
692 /* Handle empty string as 'none'. */
693 if (str
== NULL
|| *str
== '\0')
696 #define err_ret(...) do { error (__VA_ARGS__); return ret; } while (0)
698 std::vector
<std::string
> tokens
;
699 for (std::stringstream
ss (str
); ss
.good (); )
701 tokens
.push_back (std::string ());
702 std::getline (ss
, tokens
.back (), ',');
706 err_ret ("invalid atomic model option");
708 /* The first token must be the atomic model name. */
710 for (size_t i
= 0; i
< sh_atomic_model::num_models
; ++i
)
711 if (tokens
.front () == model_names
[i
])
713 ret
.type
= (sh_atomic_model::enum_type
)i
;
714 ret
.name
= model_names
[i
];
715 ret
.cdef_name
= model_cdef_names
[i
];
719 err_ret ("invalid atomic model name \"%s\"", tokens
.front ().c_str ());
723 /* Go through the remaining tokens. */
724 for (size_t i
= 1; i
< tokens
.size (); ++i
)
726 if (tokens
[i
] == "strict")
728 else if (tokens
[i
].find ("gbr-offset=") == 0)
730 std::string offset_str
= tokens
[i
].substr (strlen ("gbr-offset="));
731 ret
.tcb_gbr_offset
= integral_argument (offset_str
.c_str ());
732 if (offset_str
.empty () || ret
.tcb_gbr_offset
== -1)
733 err_ret ("could not parse gbr-offset value \"%s\" in atomic model "
734 "option", offset_str
.c_str ());
737 err_ret ("unknown parameter \"%s\" in atomic model option",
741 /* Check that the selection makes sense. */
742 if (ret
.type
== sh_atomic_model::soft_gusa
&& !TARGET_SH3
)
743 err_ret ("atomic model %s is only available on SH3 and SH4 targets",
746 if (ret
.type
== sh_atomic_model::hard_llcs
&& !TARGET_SH4A
)
747 err_ret ("atomic model %s is only available on SH4A targets", ret
.name
);
749 if (ret
.type
== sh_atomic_model::soft_tcb
&& ret
.tcb_gbr_offset
== -1)
750 err_ret ("atomic model %s requires gbr-offset parameter", ret
.name
);
752 if (ret
.type
== sh_atomic_model::soft_tcb
753 && (ret
.tcb_gbr_offset
< 0 || ret
.tcb_gbr_offset
> 1020
754 || (ret
.tcb_gbr_offset
& 3) != 0))
755 err_ret ("invalid gbr-offset value \"%d\" for atomic model %s; it must be "
756 "a multiple of 4 in the range 0-1020", ret
.tcb_gbr_offset
,
759 if (ret
.type
== sh_atomic_model::soft_imask
&& TARGET_USERMODE
)
760 err_ret ("cannot use atomic model %s in user mode", ret
.name
);
767 /* Register SH specific RTL passes. */
768 extern opt_pass
* make_pass_sh_treg_combine (gcc::context
* ctx
, bool split_insns
,
770 extern opt_pass
* make_pass_sh_optimize_sett_clrt (gcc::context
* ctx
,
773 register_sh_passes (void)
775 /* Running the sh_treg_combine pass after ce1 generates better code when
776 comparisons are combined and reg-reg moves are introduced, because
777 reg-reg moves will be eliminated afterwards. However, there are quite
778 some cases where combine will be unable to fold comparison related insns,
779 thus for now don't do it.
780 register_pass (make_pass_sh_treg_combine (g, false, "sh_treg_combine1"),
781 PASS_POS_INSERT_AFTER, "ce1", 1);
784 /* Run sh_treg_combine pass after combine but before register allocation. */
785 register_pass (make_pass_sh_treg_combine (g
, true, "sh_treg_combine2"),
786 PASS_POS_INSERT_AFTER
, "split1", 1);
788 /* Run sh_treg_combine pass after register allocation and basic block
789 reordering as this sometimes creates new opportunities. */
790 register_pass (make_pass_sh_treg_combine (g
, true, "sh_treg_combine3"),
791 PASS_POS_INSERT_AFTER
, "split4", 1);
793 /* Optimize sett and clrt insns, by e.g. removing them if the T bit value
794 is known after a conditional branch.
795 This must be done after basic blocks and branch conditions have
796 stabilized and won't be changed by further passes. */
797 register_pass (make_pass_sh_optimize_sett_clrt (g
, "sh_optimize_sett_clrt"),
798 PASS_POS_INSERT_BEFORE
, "sched2", 1);
801 /* Implement TARGET_OPTION_OVERRIDE macro. Validate and override
802 various options, and do some machine dependent initialization. */
804 sh_option_override (void)
808 SUBTARGET_OVERRIDE_OPTIONS
;
809 if (optimize
> 1 && !optimize_size
)
810 target_flags
|= MASK_SAVE_ALL_TARGET_REGS
;
812 /* Set default values of TARGET_CBRANCHDI4 and TARGET_CMPEQDI_T. */
813 TARGET_CBRANCHDI4
= 1;
814 TARGET_CMPEQDI_T
= 0;
816 sh_cpu
= PROCESSOR_SH1
;
817 assembler_dialect
= 0;
819 sh_cpu
= PROCESSOR_SH2
;
821 sh_cpu
= PROCESSOR_SH2E
;
823 sh_cpu
= PROCESSOR_SH2A
;
825 sh_cpu
= PROCESSOR_SH3
;
827 sh_cpu
= PROCESSOR_SH3E
;
830 assembler_dialect
= 1;
831 sh_cpu
= PROCESSOR_SH4
;
835 assembler_dialect
= 1;
836 sh_cpu
= PROCESSOR_SH4A
;
839 /* Only the sh64-elf assembler fully supports .quad properly. */
840 targetm
.asm_out
.aligned_op
.di
= NULL
;
841 targetm
.asm_out
.unaligned_op
.di
= NULL
;
843 /* User/priviledged mode is supported only on SH3* and SH4*.
844 Disable it for everything else. */
845 if (!TARGET_SH3
&& TARGET_USERMODE
)
846 TARGET_USERMODE
= false;
848 if (! strcmp (sh_div_str
, "call-div1"))
849 sh_div_strategy
= SH_DIV_CALL_DIV1
;
850 else if (! strcmp (sh_div_str
, "call-fp") && TARGET_FPU_ANY
)
851 sh_div_strategy
= SH_DIV_CALL_FP
;
852 else if (! strcmp (sh_div_str
, "call-table") && TARGET_DYNSHIFT
)
853 sh_div_strategy
= SH_DIV_CALL_TABLE
;
856 /* Pick one that makes most sense for the target in general.
857 It is not much good to use different functions depending on -Os,
858 since then we'll end up with two different functions when some of
859 the code is compiled for size, and some for speed. */
861 /* SH4 tends to emphasize speed. */
863 sh_div_strategy
= SH_DIV_CALL_TABLE
;
864 /* These have their own way of doing things. */
865 else if (TARGET_SH2A
)
866 sh_div_strategy
= SH_DIV_INTRINSIC
;
867 /* SH1 .. SH3 cores often go into small-footprint systems, so
868 default to the smallest implementation available. */
870 sh_div_strategy
= SH_DIV_CALL_DIV1
;
873 if (sh_divsi3_libfunc
[0])
874 ; /* User supplied - leave it alone. */
875 else if (TARGET_DIVIDE_CALL_FP
)
876 sh_divsi3_libfunc
= "__sdivsi3_i4";
877 else if (TARGET_DIVIDE_CALL_TABLE
)
878 sh_divsi3_libfunc
= "__sdivsi3_i4i";
880 sh_divsi3_libfunc
= "__sdivsi3";
882 if (sh_branch_cost
== -1)
884 /* The SH1 does not have delay slots, hence we get a pipeline stall
885 at every branch. The SH4 is superscalar, so the single delay slot
886 is not sufficient to keep both pipelines filled.
887 In any case, set the default branch cost to '2', as it results in
888 slightly overall smaller code and also enables some if conversions
889 that are required for matching special T bit related insns. */
893 /* Set -mzdcbranch for SH4 / SH4A if not otherwise specified by the user. */
894 if (! global_options_set
.x_TARGET_ZDCBRANCH
&& TARGET_HARD_SH4
)
895 TARGET_ZDCBRANCH
= 1;
897 /* FDPIC code is a special form of PIC, and the vast majority of code
898 generation constraints that apply to PIC also apply to FDPIC, so we
899 set flag_pic to avoid the need to check TARGET_FDPIC everywhere
900 flag_pic is checked. */
901 if (TARGET_FDPIC
&& !flag_pic
)
904 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
905 if (! VALID_REGISTER_P (regno
))
906 sh_register_names
[regno
][0] = '\0';
908 for (regno
= 0; regno
< ADDREGNAMES_SIZE
; regno
++)
909 if (! VALID_REGISTER_P (ADDREGNAMES_REGNO (regno
)))
910 sh_additional_register_names
[regno
][0] = '\0';
912 if (flag_pic
&& ! TARGET_PREFERGOT
)
913 flag_no_function_cse
= 1;
915 if (targetm
.small_register_classes_for_mode_p (VOIDmode
))
917 /* Never run scheduling before reload, since that can
918 break global alloc, and generates slower code anyway due
919 to the pressure on R0. */
920 /* Enable sched1 for SH4 if the user explicitly requests.
921 When sched1 is enabled, the ready queue will be reordered by
922 the target hooks if pressure is high. We can not do this for
923 PIC, SH3 and lower as they give spill failures for R0. */
924 if (!TARGET_HARD_SH4
|| flag_pic
)
925 flag_schedule_insns
= 0;
926 /* ??? Current exception handling places basic block boundaries
927 after call_insns. It causes the high pressure on R0 and gives
928 spill failures for R0 in reload. See PR 22553 and the thread
930 <http://gcc.gnu.org/ml/gcc-patches/2005-10/msg00816.html>. */
931 else if (flag_exceptions
)
933 if (flag_schedule_insns
&& global_options_set
.x_flag_schedule_insns
)
934 warning (0, "ignoring -fschedule-insns because of exception "
936 flag_schedule_insns
= 0;
938 else if (flag_schedule_insns
939 && !global_options_set
.x_flag_schedule_insns
)
940 flag_schedule_insns
= 0;
943 /* Unwind info is not correct around the CFG unless either a frame
944 pointer is present or M_A_O_A is set. Fixing this requires rewriting
945 unwind info generation to be aware of the CFG and propagating states
947 if ((flag_unwind_tables
|| flag_asynchronous_unwind_tables
948 || flag_exceptions
|| flag_non_call_exceptions
)
949 && flag_omit_frame_pointer
&& !TARGET_ACCUMULATE_OUTGOING_ARGS
)
951 warning (0, "unwind tables currently require either a frame pointer "
952 "or -maccumulate-outgoing-args for correctness");
953 TARGET_ACCUMULATE_OUTGOING_ARGS
= 1;
956 if (flag_unsafe_math_optimizations
)
958 /* Enable fsca insn for SH4A if not otherwise specified by the user. */
959 if (global_options_set
.x_TARGET_FSCA
== 0 && TARGET_SH4A_FP
)
962 /* Enable fsrra insn for SH4A if not otherwise specified by the user. */
963 if (global_options_set
.x_TARGET_FSRRA
== 0 && TARGET_SH4A_FP
)
967 /* Allow fsrra insn only if -funsafe-math-optimizations and
968 -ffinite-math-only is enabled. */
969 TARGET_FSRRA
= TARGET_FSRRA
970 && flag_unsafe_math_optimizations
971 && flag_finite_math_only
;
973 /* If the -mieee option was not explicitly set by the user, turn it on
974 unless -ffinite-math-only was specified. See also PR 33135. */
975 if (! global_options_set
.x_TARGET_IEEE
)
976 TARGET_IEEE
= ! flag_finite_math_only
;
978 if (sh_fixed_range_str
)
979 sh_fix_range (sh_fixed_range_str
);
981 /* This target defaults to strict volatile bitfields. */
982 if (flag_strict_volatile_bitfields
< 0 && abi_version_at_least(2))
983 flag_strict_volatile_bitfields
= 1;
985 sh_override_options_after_change ();
987 /* Parse atomic model option and make sure it is valid for the current
989 selected_atomic_model_
990 = parse_validate_atomic_model_option (sh_atomic_model_str
);
992 register_sh_passes ();
995 /* Implement targetm.override_options_after_change. */
998 sh_override_options_after_change (void)
1000 /* Adjust loop, jump and function alignment values (in bytes), if those
1001 were not specified by the user using -falign-loops, -falign-jumps
1002 and -falign-functions options.
1003 32 bit alignment is better for speed, because instructions can be
1004 fetched as a pair from a longword boundary. For size use 16 bit
1005 alignment to get more compact code.
1006 Aligning all jumps increases the code size, even if it might
1007 result in slightly faster code. Thus, it is set to the smallest
1008 alignment possible if not specified by the user. */
1009 if (align_loops
== 0)
1010 align_loops
= optimize_size
? 2 : 4;
1012 if (align_jumps
== 0)
1014 else if (align_jumps
< 2)
1017 if (align_functions
== 0)
1018 align_functions
= optimize_size
? 2 : 4;
1020 /* The linker relaxation code breaks when a function contains
1021 alignments that are larger than that at the start of a
1022 compilation unit. */
1025 int min_align
= align_loops
> align_jumps
? align_loops
: align_jumps
;
1027 /* Also take possible .long constants / mova tables into account. */
1030 if (align_functions
< min_align
)
1031 align_functions
= min_align
;
1035 /* Print the operand address in x to the stream. */
1037 sh_print_operand_address (FILE *stream
, machine_mode
/*mode*/, rtx x
)
1039 switch (GET_CODE (x
))
1043 fprintf (stream
, "@%s", reg_names
[true_regnum (x
)]);
1048 rtx base
= XEXP (x
, 0);
1049 rtx index
= XEXP (x
, 1);
1051 switch (GET_CODE (index
))
1054 fprintf (stream
, "@(%d,%s)", (int) INTVAL (index
),
1055 reg_names
[true_regnum (base
)]);
1061 int base_num
= true_regnum (base
);
1062 int index_num
= true_regnum (index
);
1064 fprintf (stream
, "@(r0,%s)",
1065 reg_names
[MAX (base_num
, index_num
)]);
1076 fprintf (stream
, "@-%s", reg_names
[true_regnum (XEXP (x
, 0))]);
1080 fprintf (stream
, "@%s+", reg_names
[true_regnum (XEXP (x
, 0))]);
1084 x
= mark_constant_pool_use (x
);
1085 output_addr_const (stream
, x
);
1090 /* Print operand x (an rtx) in assembler syntax to file stream
1091 according to modifier code.
1093 '.' print a .s if insn needs delay slot
1094 ',' print LOCAL_LABEL_PREFIX
1095 '@' print trap, rte or rts depending upon pragma interruptness
1096 '#' output a nop if there is nothing to put in the delay slot
1097 ''' print likelihood suffix (/u for unlikely).
1098 '>' print branch target if -fverbose-asm
1099 'O' print a constant without the #
1100 'R' print the LSW of a dp value - changes if in little endian
1101 'S' print the MSW of a dp value - changes if in little endian
1102 'T' print the next word of a dp value - same as 'R' in big endian mode.
1103 'M' print .b / .w / .l / .s / .d suffix if operand is a MEM.
1104 'N' print 'r63' if the operand is (const_int 0).
1105 'd' print a V2SF reg as dN instead of fpN.
1106 'm' print a pair `base,offset' or `base,index', for LD and ST.
1107 'U' Likewise for {LD,ST}{HI,LO}.
1108 'V' print the position of a single bit set.
1109 'W' print the position of a single bit cleared.
1110 't' print a memory address which is a register.
1111 'u' prints the lowest 16 bits of CONST_INT, as an unsigned value.
1112 'o' output an operator. */
1114 sh_print_operand (FILE *stream
, rtx x
, int code
)
1125 && ! INSN_ANNULLED_BRANCH_P (final_sequence
->insn (0))
1126 && get_attr_length (final_sequence
->insn (1)))
1127 fprintf (stream
, ASSEMBLER_DIALECT
? "/s" : ".s");
1130 fprintf (stream
, "%s", LOCAL_LABEL_PREFIX
);
1133 trapa_attr
= lookup_attribute ("trap_exit",
1134 DECL_ATTRIBUTES (current_function_decl
));
1136 fprintf (stream
, "trapa #%ld",
1137 (long) TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (trapa_attr
))));
1138 else if (sh_cfun_interrupt_handler_p ())
1140 if (sh_cfun_resbank_handler_p ())
1141 fprintf (stream
, "resbank\n");
1142 fprintf (stream
, "rte");
1145 fprintf (stream
, "rts");
1148 /* Output a nop if there's nothing in the delay slot. */
1149 if (dbr_sequence_length () == 0)
1150 fprintf (stream
, "\n\tnop");
1154 rtx note
= find_reg_note (current_output_insn
, REG_BR_PROB
, 0);
1156 if (note
&& XINT (note
, 0) * 2 < REG_BR_PROB_BASE
)
1157 fputs ("/u", stream
);
1161 if (flag_verbose_asm
&& JUMP_LABEL (current_output_insn
))
1163 fputs ("\t! target: ", stream
);
1164 output_addr_const (stream
, JUMP_LABEL (current_output_insn
));
1168 x
= mark_constant_pool_use (x
);
1169 output_addr_const (stream
, x
);
1171 /* N.B.: %R / %S / %T adjust memory addresses by four.
1172 While they can be used to access 64 bit parts of a larger value
1173 held in general purpose registers, that won't work with memory -
1174 neither for fp registers, since the frxx names are used. */
1176 if (REG_P (x
) || GET_CODE (x
) == SUBREG
)
1178 regno
= true_regnum (x
);
1179 regno
+= FP_REGISTER_P (regno
) ? 1 : SH_REG_LSW_OFFSET
;
1180 fputs (reg_names
[regno
], (stream
));
1184 x
= adjust_address (x
, SImode
, 4 * SH_REG_LSW_OFFSET
);
1185 sh_print_operand_address (stream
, GET_MODE (x
), XEXP (x
, 0));
1191 mode
= GET_MODE (x
);
1192 if (mode
== VOIDmode
)
1194 if (GET_MODE_SIZE (mode
) >= 8)
1195 sub
= simplify_subreg (SImode
, x
, mode
, 4 * SH_REG_LSW_OFFSET
);
1197 sh_print_operand (stream
, sub
, 0);
1199 output_operand_lossage ("invalid operand to %%R");
1203 if (REG_P (x
) || GET_CODE (x
) == SUBREG
)
1205 regno
= true_regnum (x
);
1206 regno
+= FP_REGISTER_P (regno
) ? 0 : SH_REG_MSW_OFFSET
;
1207 fputs (reg_names
[regno
], (stream
));
1211 x
= adjust_address (x
, SImode
, 4 * SH_REG_MSW_OFFSET
);
1212 sh_print_operand_address (stream
, GET_MODE (x
), XEXP (x
, 0));
1218 mode
= GET_MODE (x
);
1219 if (mode
== VOIDmode
)
1221 if (GET_MODE_SIZE (mode
) >= 8)
1222 sub
= simplify_subreg (SImode
, x
, mode
, 4 * SH_REG_MSW_OFFSET
);
1224 sh_print_operand (stream
, sub
, 0);
1226 output_operand_lossage ("invalid operand to %%S");
1230 /* Next word of a double. */
1231 switch (GET_CODE (x
))
1234 fputs (reg_names
[REGNO (x
) + 1], (stream
));
1238 machine_mode mode
= GET_MODE (x
);
1239 if (GET_CODE (XEXP (x
, 0)) != PRE_DEC
1240 && GET_CODE (XEXP (x
, 0)) != POST_INC
)
1241 x
= adjust_address (x
, SImode
, 4);
1242 sh_print_operand_address (stream
, mode
, XEXP (x
, 0));
1251 gcc_assert (MEM_P (x
));
1253 switch (GET_CODE (x
))
1257 sh_print_operand (stream
, x
, 0);
1265 switch (GET_CODE (x
))
1267 case PLUS
: fputs ("add", stream
); break;
1268 case MINUS
: fputs ("sub", stream
); break;
1269 case MULT
: fputs ("mul", stream
); break;
1270 case DIV
: fputs ("div", stream
); break;
1271 case EQ
: fputs ("eq", stream
); break;
1272 case NE
: fputs ("ne", stream
); break;
1273 case GT
: case LT
: fputs ("gt", stream
); break;
1274 case GE
: case LE
: fputs ("ge", stream
); break;
1275 case GTU
: case LTU
: fputs ("gtu", stream
); break;
1276 case GEU
: case LEU
: fputs ("geu", stream
); break;
1284 switch (GET_MODE (x
))
1286 case QImode
: fputs (".b", stream
); break;
1287 case HImode
: fputs (".w", stream
); break;
1288 case SImode
: fputs (".l", stream
); break;
1289 case SFmode
: fputs (".s", stream
); break;
1290 case DFmode
: fputs (".d", stream
); break;
1291 default: gcc_unreachable ();
1297 gcc_assert (MEM_P (x
));
1301 switch (GET_CODE (x
))
1305 sh_print_operand (stream
, x
, 0);
1306 fputs (", 0", stream
);
1310 sh_print_operand (stream
, XEXP (x
, 0), 0);
1311 fputs (", ", stream
);
1312 sh_print_operand (stream
, XEXP (x
, 1), 0);
1322 int num
= exact_log2 (INTVAL (x
));
1323 gcc_assert (num
>= 0);
1324 fprintf (stream
, "#%d", num
);
1330 int num
= exact_log2 (~INTVAL (x
));
1331 gcc_assert (num
>= 0);
1332 fprintf (stream
, "#%d", num
);
1337 gcc_assert (REG_P (x
) && GET_MODE (x
) == V2SFmode
);
1339 fprintf ((stream
), "d%s", reg_names
[REGNO (x
)] + 1);
1343 if (x
== CONST0_RTX (GET_MODE (x
)))
1345 fprintf ((stream
), "r63");
1348 goto default_output
;
1350 if (CONST_INT_P (x
))
1352 fprintf ((stream
), "%u", (unsigned) INTVAL (x
) & (0x10000 - 1));
1360 mode
= GET_MODE (x
);
1362 switch (GET_CODE (x
))
1366 rtx inner
= XEXP (x
, 0);
1368 machine_mode inner_mode
;
1370 /* We might see SUBREGs with vector mode registers inside. */
1371 if (GET_CODE (inner
) == SUBREG
1372 && (GET_MODE_SIZE (GET_MODE (inner
))
1373 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner
))))
1374 && subreg_lowpart_p (inner
))
1375 inner
= SUBREG_REG (inner
);
1376 if (CONST_INT_P (inner
))
1378 x
= GEN_INT (trunc_int_for_mode (INTVAL (inner
), GET_MODE (x
)));
1379 goto default_output
;
1381 inner_mode
= GET_MODE (inner
);
1382 if (GET_CODE (inner
) == SUBREG
1383 && (GET_MODE_SIZE (GET_MODE (inner
))
1384 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner
))))
1385 && REG_P (SUBREG_REG (inner
)))
1387 offset
= subreg_regno_offset (REGNO (SUBREG_REG (inner
)),
1388 GET_MODE (SUBREG_REG (inner
)),
1389 SUBREG_BYTE (inner
),
1391 inner
= SUBREG_REG (inner
);
1393 if (!REG_P (inner
) || GET_MODE_SIZE (inner_mode
) > 8)
1395 /* Floating point register pairs are always big endian;
1396 general purpose registers are 64 bit wide. */
1397 regno
= REGNO (inner
);
1398 regno
= (HARD_REGNO_NREGS (regno
, inner_mode
)
1399 - HARD_REGNO_NREGS (regno
, mode
))
1408 gcc_assert (SUBREG_BYTE (x
) == 0
1409 && REG_P (SUBREG_REG (x
)));
1417 if (FP_REGISTER_P (regno
)
1418 && mode
== V16SFmode
)
1419 fprintf ((stream
), "mtrx%s", reg_names
[regno
] + 2);
1420 else if (FP_REGISTER_P (REGNO (x
))
1421 && mode
== V4SFmode
)
1422 fprintf ((stream
), "fv%s", reg_names
[regno
] + 2);
1424 && mode
== V2SFmode
)
1425 fprintf ((stream
), "fp%s", reg_names
[regno
] + 2);
1426 else if (FP_REGISTER_P (REGNO (x
))
1427 && GET_MODE_SIZE (mode
) > 4)
1428 fprintf ((stream
), "d%s", reg_names
[regno
] + 1);
1430 fputs (reg_names
[regno
], (stream
));
1434 output_address (GET_MODE (x
), XEXP (x
, 0));
1438 fputc ('#', stream
);
1439 output_addr_const (stream
, x
);
1447 sh_print_operand_punct_valid_p (unsigned char code
)
1449 return (code
== '.' || code
== '#' || code
== '@' || code
== ','
1450 || code
== '$' || code
== '\'' || code
== '>');
1453 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
1455 sh_asm_output_addr_const_extra (FILE *file
, rtx x
)
1457 if (GET_CODE (x
) == UNSPEC
)
1459 switch (XINT (x
, 1))
1462 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
1463 output_addr_const (file
, XVECEXP (x
, 0, 0));
1466 output_addr_const (file
, XVECEXP (x
, 0, 0));
1467 fputs ("@GOT", file
);
1470 output_addr_const (file
, XVECEXP (x
, 0, 0));
1471 fputs ("@GOTOFF", file
);
1474 output_addr_const (file
, XVECEXP (x
, 0, 0));
1475 fputs ("@PLT", file
);
1478 output_addr_const (file
, XVECEXP (x
, 0, 0));
1479 fputs ("@GOTPLT", file
);
1482 output_addr_const (file
, XVECEXP (x
, 0, 0));
1483 fputs ("@PCREL", file
);
1486 output_addr_const (file
, XVECEXP (x
, 0, 0));
1487 fputs ("@DTPOFF", file
);
1489 case UNSPEC_GOTTPOFF
:
1490 output_addr_const (file
, XVECEXP (x
, 0, 0));
1491 fputs ("@GOTTPOFF", file
);
1494 output_addr_const (file
, XVECEXP (x
, 0, 0));
1495 fputs ("@TPOFF", file
);
1500 /* LPCS stands for Label for PIC Call Site. */
1501 targetm
.asm_out
.generate_internal_label (name
, "LPCS",
1502 INTVAL (XVECEXP (x
, 0, 0)));
1503 assemble_name (file
, name
);
1507 output_addr_const (file
, XVECEXP (x
, 0, 0));
1509 if (GET_CODE (XVECEXP (x
, 0, 1)) == CONST
)
1512 output_addr_const (file
, XVECEXP (x
, 0, 1));
1516 output_addr_const (file
, XVECEXP (x
, 0, 1));
1518 case UNSPEC_PCREL_SYMOFF
:
1519 output_addr_const (file
, XVECEXP (x
, 0, 0));
1521 output_addr_const (file
, XVECEXP (x
, 0, 1));
1522 fputs ("-.)", file
);
1524 case UNSPEC_GOTFUNCDESC
:
1525 output_addr_const (file
, XVECEXP (x
, 0, 0));
1526 fputs ("@GOTFUNCDESC", file
);
1528 case UNSPEC_GOTOFFFUNCDESC
:
1529 output_addr_const (file
, XVECEXP (x
, 0, 0));
1530 fputs ("@GOTOFFFUNCDESC", file
);
1541 /* Encode symbol attributes of a SYMBOL_REF into its
1542 SYMBOL_REF_FLAGS. */
1544 sh_encode_section_info (tree decl
, rtx rtl
, int first
)
1546 default_encode_section_info (decl
, rtl
, first
);
1548 if (TREE_CODE (decl
) == FUNCTION_DECL
1549 && sh2a_function_vector_p (decl
) && TARGET_SH2A
)
1550 SYMBOL_REF_FLAGS (XEXP (rtl
, 0)) |= SYMBOL_FLAG_FUNCVEC_FUNCTION
;
1553 /* Prepare operands for a move define_expand; specifically, one of the
1554 operands must be in a register. */
1556 prepare_move_operands (rtx operands
[], machine_mode mode
)
1558 if ((mode
== SImode
|| mode
== DImode
)
1560 && ! ((mode
== Pmode
|| mode
== ptr_mode
)
1561 && tls_symbolic_operand (operands
[1], Pmode
) != TLS_MODEL_NONE
))
1564 if (SYMBOLIC_CONST_P (operands
[1]))
1566 if (MEM_P (operands
[0]))
1567 operands
[1] = force_reg (Pmode
, operands
[1]);
1570 temp
= (!can_create_pseudo_p ()
1572 : gen_reg_rtx (Pmode
));
1573 operands
[1] = legitimize_pic_address (operands
[1], mode
, temp
);
1576 else if (GET_CODE (operands
[1]) == CONST
1577 && GET_CODE (XEXP (operands
[1], 0)) == PLUS
1578 && SYMBOLIC_CONST_P (XEXP (XEXP (operands
[1], 0), 0)))
1580 temp
= !can_create_pseudo_p () ? operands
[0] : gen_reg_rtx (Pmode
);
1581 temp
= legitimize_pic_address (XEXP (XEXP (operands
[1], 0), 0),
1583 operands
[1] = expand_binop (mode
, add_optab
, temp
,
1584 XEXP (XEXP (operands
[1], 0), 1),
1585 (!can_create_pseudo_p ()
1587 : gen_reg_rtx (Pmode
)),
1588 0, OPTAB_LIB_WIDEN
);
1592 if (! reload_in_progress
&& ! reload_completed
)
1594 /* Copy the source to a register if both operands aren't registers. */
1595 if (! register_operand (operands
[0], mode
)
1596 && ! register_operand (operands
[1], mode
))
1597 operands
[1] = copy_to_mode_reg (mode
, operands
[1]);
1599 if (MEM_P (operands
[0]) && ! memory_operand (operands
[0], mode
))
1601 /* This is like change_address_1 (operands[0], mode, 0, 1) ,
1602 except that we can't use that function because it is static. */
1603 rtx new_rtx
= change_address (operands
[0], mode
, 0);
1604 MEM_COPY_ATTRIBUTES (new_rtx
, operands
[0]);
1605 operands
[0] = new_rtx
;
1608 /* This case can happen while generating code to move the result
1609 of a library call to the target. Reject `st r0,@(rX,rY)' because
1610 reload will fail to find a spill register for rX, since r0 is already
1611 being used for the source. */
1612 else if (refers_to_regno_p (R0_REG
, operands
[1])
1613 && MEM_P (operands
[0])
1614 && GET_CODE (XEXP (operands
[0], 0)) == PLUS
1615 && REG_P (XEXP (XEXP (operands
[0], 0), 1)))
1616 operands
[1] = copy_to_mode_reg (mode
, operands
[1]);
1618 /* When the displacement addressing is used, RA will assign r0 to
1619 the pseudo register operand for the QI/HImode load/store.
1620 This tends to make a long live range for R0 and might cause
1621 anomalous register spills in some case with LRA. See PR
1623 We split possible load/store to two move insns via r0 so as to
1624 shorten R0 live range. It will make some codes worse but will
1625 win on average for LRA.
1626 Also when base+index addressing is used and the index term is
1627 a subreg, LRA assumes that more hard registers can be available
1628 in some situation. It isn't the case for SH in the problematic
1629 case. We can pre-allocate R0 for that index term to avoid
1630 the issue. See PR target/66591. */
1631 else if (sh_lra_p ()
1633 && ((REG_P (operands
[0]) && MEM_P (operands
[1]))
1634 || (REG_P (operands
[1]) && MEM_P (operands
[0]))))
1636 bool load_p
= REG_P (operands
[0]);
1637 rtx reg
= operands
[load_p
? 0 : 1];
1638 rtx adr
= XEXP (operands
[load_p
? 1 : 0], 0);
1640 if ((mode
== QImode
|| mode
== HImode
)
1641 && REGNO (reg
) >= FIRST_PSEUDO_REGISTER
1642 && GET_CODE (adr
) == PLUS
1643 && REG_P (XEXP (adr
, 0))
1644 && (REGNO (XEXP (adr
, 0)) >= FIRST_PSEUDO_REGISTER
)
1645 && CONST_INT_P (XEXP (adr
, 1))
1646 && INTVAL (XEXP (adr
, 1)) != 0
1647 && sh_legitimate_index_p (mode
, XEXP (adr
, 1), false, true))
1649 rtx r0_rtx
= gen_rtx_REG (mode
, R0_REG
);
1650 emit_move_insn (r0_rtx
, operands
[1]);
1651 operands
[1] = r0_rtx
;
1653 if (REGNO (reg
) >= FIRST_PSEUDO_REGISTER
1654 && GET_CODE (adr
) == PLUS
1655 && REG_P (XEXP (adr
, 0))
1656 && (REGNO (XEXP (adr
, 0)) >= FIRST_PSEUDO_REGISTER
)
1657 && SUBREG_P (XEXP (adr
, 1))
1658 && REG_P (SUBREG_REG (XEXP (adr
, 1))))
1660 rtx r0_rtx
= gen_rtx_REG (GET_MODE (XEXP (adr
, 1)), R0_REG
);
1661 emit_move_insn (r0_rtx
, XEXP (adr
, 1));
1662 XEXP (adr
, 1) = r0_rtx
;
1667 if (mode
== Pmode
|| mode
== ptr_mode
)
1670 enum tls_model tls_kind
;
1674 if (GET_CODE (op1
) == CONST
1675 && GET_CODE (XEXP (op1
, 0)) == PLUS
1676 && (tls_symbolic_operand (XEXP (XEXP (op1
, 0), 0), Pmode
)
1679 opc
= XEXP (XEXP (op1
, 0), 1);
1680 op1
= XEXP (XEXP (op1
, 0), 0);
1685 if (! reload_in_progress
&& ! reload_completed
1686 && (tls_kind
= tls_symbolic_operand (op1
, Pmode
)) != TLS_MODEL_NONE
)
1688 rtx tga_op1
, tga_ret
, tmp
, tmp2
;
1691 && (tls_kind
== TLS_MODEL_GLOBAL_DYNAMIC
1692 || tls_kind
== TLS_MODEL_LOCAL_DYNAMIC
1693 || tls_kind
== TLS_MODEL_INITIAL_EXEC
))
1695 static int got_labelno
;
1696 /* Don't schedule insns for getting GOT address when
1697 the first scheduling is enabled, to avoid spill
1699 if (flag_schedule_insns
)
1700 emit_insn (gen_blockage ());
1701 emit_insn (gen_GOTaddr2picreg (GEN_INT (++got_labelno
)));
1702 emit_use (gen_rtx_REG (SImode
, PIC_REG
));
1703 if (flag_schedule_insns
)
1704 emit_insn (gen_blockage ());
1709 case TLS_MODEL_GLOBAL_DYNAMIC
:
1710 tga_ret
= gen_rtx_REG (Pmode
, R0_REG
);
1712 emit_move_insn (gen_rtx_REG (Pmode
, PIC_REG
),
1713 sh_get_fdpic_reg_initial_val ());
1714 emit_call_insn (gen_tls_global_dynamic (tga_ret
, op1
));
1715 tmp
= gen_reg_rtx (Pmode
);
1716 emit_move_insn (tmp
, tga_ret
);
1720 case TLS_MODEL_LOCAL_DYNAMIC
:
1721 tga_ret
= gen_rtx_REG (Pmode
, R0_REG
);
1723 emit_move_insn (gen_rtx_REG (Pmode
, PIC_REG
),
1724 sh_get_fdpic_reg_initial_val ());
1725 emit_call_insn (gen_tls_local_dynamic (tga_ret
, op1
));
1727 tmp
= gen_reg_rtx (Pmode
);
1728 emit_move_insn (tmp
, tga_ret
);
1730 if (register_operand (op0
, Pmode
))
1733 tmp2
= gen_reg_rtx (Pmode
);
1735 emit_insn (gen_symDTPOFF2reg (tmp2
, op1
, tmp
));
1739 case TLS_MODEL_INITIAL_EXEC
:
1740 tga_op1
= !can_create_pseudo_p () ? op0
: gen_reg_rtx (Pmode
);
1741 tmp
= gen_sym2GOTTPOFF (op1
);
1743 emit_move_insn (gen_rtx_REG (Pmode
, PIC_REG
),
1744 sh_get_fdpic_reg_initial_val ());
1745 emit_insn (gen_tls_initial_exec (tga_op1
, tmp
));
1749 case TLS_MODEL_LOCAL_EXEC
:
1750 tmp2
= gen_reg_rtx (Pmode
);
1751 emit_insn (gen_store_gbr (tmp2
));
1752 tmp
= gen_reg_rtx (Pmode
);
1753 emit_insn (gen_symTPOFF2reg (tmp
, op1
));
1755 if (register_operand (op0
, Pmode
))
1758 op1
= gen_reg_rtx (Pmode
);
1760 emit_insn (gen_addsi3 (op1
, tmp
, tmp2
));
1767 emit_insn (gen_addsi3 (op1
, op1
, force_reg (SImode
, opc
)));
1772 if (SH_OFFSETS_MUST_BE_WITHIN_SECTIONS_P
)
1775 split_const (operands
[1], &base
, &offset
);
1777 if (GET_CODE (base
) == SYMBOL_REF
1778 && !offset_within_block_p (base
, INTVAL (offset
)))
1780 rtx tmp
= can_create_pseudo_p () ? gen_reg_rtx (mode
) : operands
[0];
1781 emit_move_insn (tmp
, base
);
1782 if (!arith_operand (offset
, mode
))
1783 offset
= force_reg (mode
, offset
);
1784 emit_insn (gen_add3_insn (operands
[0], tmp
, offset
));
1789 /* Implement the canonicalize_comparison target hook for the combine
1790 pass. For the target hook this function is invoked via
1791 sh_canonicalize_comparison. This function is also re-used to
1792 canonicalize comparisons in cbranch pattern expanders. */
1794 sh_canonicalize_comparison (enum rtx_code
& cmp
, rtx
& op0
, rtx
& op1
,
1796 bool op0_preserve_value
)
1798 /* When invoked from within the combine pass the mode is not specified,
1799 so try to get it from one of the operands. */
1800 if (mode
== VOIDmode
)
1801 mode
= GET_MODE (op0
);
1802 if (mode
== VOIDmode
)
1803 mode
= GET_MODE (op1
);
1805 // We need to have a mode to do something useful here.
1806 if (mode
== VOIDmode
)
1809 // Currently, we don't deal with floats here.
1810 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1813 // Make sure that the constant operand is the second operand.
1814 if (CONST_INT_P (op0
) && !CONST_INT_P (op1
))
1816 if (op0_preserve_value
)
1819 std::swap (op0
, op1
);
1820 cmp
= swap_condition (cmp
);
1823 if (CONST_INT_P (op1
))
1825 /* Try to adjust the constant operand in such a way that available
1826 comparison insns can be utilized better and the constant can be
1827 loaded with a 'mov #imm,Rm' insn. This avoids a load from the
1829 const HOST_WIDE_INT val
= INTVAL (op1
);
1831 /* x > -1 --> x >= 0
1832 x > 0xFFFFFF7F --> x >= 0xFFFFFF80
1834 x <= 0xFFFFFF7F --> x < 0xFFFFFF80 */
1835 if ((val
== -1 || val
== -0x81) && (cmp
== GT
|| cmp
== LE
))
1837 cmp
= cmp
== GT
? GE
: LT
;
1838 op1
= gen_int_mode (val
+ 1, mode
);
1842 x >= 0x80 --> x > 0x7F
1844 x < 0x80 --> x <= 0x7F */
1845 else if ((val
== 1 || val
== 0x80) && (cmp
== GE
|| cmp
== LT
))
1847 cmp
= cmp
== GE
? GT
: LE
;
1848 op1
= gen_int_mode (val
- 1, mode
);
1851 /* unsigned x >= 1 --> x != 0
1852 unsigned x < 1 --> x == 0 */
1853 else if (val
== 1 && (cmp
== GEU
|| cmp
== LTU
))
1855 cmp
= cmp
== GEU
? NE
: EQ
;
1856 op1
= CONST0_RTX (mode
);
1859 /* unsigned x >= 0x80 --> unsigned x > 0x7F
1860 unsigned x < 0x80 --> unsigned x < 0x7F */
1861 else if (val
== 0x80 && (cmp
== GEU
|| cmp
== LTU
))
1863 cmp
= cmp
== GEU
? GTU
: LEU
;
1864 op1
= gen_int_mode (val
- 1, mode
);
1867 /* unsigned x > 0 --> x != 0
1868 unsigned x <= 0 --> x == 0 */
1869 else if (val
== 0 && (cmp
== GTU
|| cmp
== LEU
))
1870 cmp
= cmp
== GTU
? NE
: EQ
;
1872 /* unsigned x > 0x7FFFFFFF --> signed x < 0
1873 unsigned x <= 0x7FFFFFFF --> signed x >= 0 */
1874 else if (mode
== SImode
&& (cmp
== GTU
|| cmp
== LEU
)
1875 && val
== 0x7FFFFFFF)
1877 cmp
= cmp
== GTU
? LT
: GE
;
1881 /* unsigned x >= 0x80000000 --> signed x < 0
1882 unsigned x < 0x80000000 --> signed x >= 0 */
1883 else if (mode
== SImode
&& (cmp
== GEU
|| cmp
== LTU
)
1884 && (unsigned HOST_WIDE_INT
)val
1885 == ((unsigned HOST_WIDE_INT
)0x7FFFFFFF + 1))
1887 cmp
= cmp
== GEU
? LT
: GE
;
1893 /* This function implements the canonicalize_comparison target hook.
1894 This wrapper around the internally used sh_canonicalize_comparison
1895 function is needed to do the enum rtx_code <-> int conversion.
1896 Target hooks cannot use enum rtx_code in its definition. */
1898 sh_canonicalize_comparison (int *code
, rtx
*op0
, rtx
*op1
,
1899 bool op0_preserve_value
)
1901 enum rtx_code tmp_code
= (enum rtx_code
)*code
;
1902 sh_canonicalize_comparison (tmp_code
, *op0
, *op1
,
1903 VOIDmode
, op0_preserve_value
);
1904 *code
= (int)tmp_code
;
1907 /* This function implements the legitimate_combined_insn target hook,
1908 which the combine pass uses to early reject combined insns, before
1909 it tries to recog the insn and determine its cost. */
1911 sh_legitimate_combined_insn (rtx_insn
* insn
)
1913 /* Reject combinations of memory loads and zero extensions, as these
1914 interfere with other combine patterns such as zero extracts and bit
1915 tests. The SH2A movu.{b|w} insns are formed later in the
1916 'sh_optimize_extu_exts' pass after combine/split1. */
1917 rtx p
= PATTERN (insn
);
1918 if (GET_CODE (p
) == SET
1919 && REG_P (XEXP (p
, 0)) && GET_MODE (XEXP (p
, 0)) == SImode
1920 && GET_CODE (XEXP (p
, 1)) == ZERO_EXTEND
1921 && MEM_P (XEXP (XEXP (p
, 1), 0)))
1928 sh_fixed_condition_code_regs (unsigned int* p1
, unsigned int* p2
)
1931 *p2
= INVALID_REGNUM
;
1936 prepare_cbranch_operands (rtx
*operands
, machine_mode mode
,
1937 enum rtx_code comparison
)
1939 /* The scratch reg is only available when this is invoked from within
1940 the cbranchdi4_i splitter, through expand_cbranchdi4. */
1941 rtx scratch
= NULL_RTX
;
1943 if (comparison
== LAST_AND_UNUSED_RTX_CODE
)
1944 comparison
= GET_CODE (operands
[0]);
1946 scratch
= operands
[4];
1948 sh_canonicalize_comparison (comparison
, operands
[1], operands
[2],
1951 /* Notice that this function is also invoked after reload by
1952 the cbranchdi4_i pattern, through expand_cbranchdi4. */
1953 rtx op1
= operands
[1];
1955 if (can_create_pseudo_p ())
1956 operands
[1] = force_reg (mode
, op1
);
1957 /* When we are handling DImode comparisons, we want to keep constants so
1958 that we can optimize the component comparisons; however, memory loads
1959 are better issued as a whole so that they can be scheduled well.
1960 SImode equality comparisons allow I08 constants, but only when they
1961 compare r0. Hence, if operands[1] has to be loaded from somewhere else
1962 into a register, that register might as well be r0, and we allow the
1963 constant. If it is already in a register, this is likely to be
1964 allocated to a different hard register, thus we load the constant into
1965 a register unless it is zero. */
1966 if (!REG_P (operands
[2])
1967 && (!CONST_INT_P (operands
[2])
1968 || (mode
== SImode
&& operands
[2] != CONST0_RTX (SImode
)
1969 && ((comparison
!= EQ
&& comparison
!= NE
)
1970 || (REG_P (op1
) && REGNO (op1
) != R0_REG
)
1971 || !satisfies_constraint_I08 (operands
[2])))))
1973 if (scratch
&& GET_MODE (scratch
) == mode
)
1975 emit_move_insn (scratch
, operands
[2]);
1976 operands
[2] = scratch
;
1978 else if (can_create_pseudo_p ())
1979 operands
[2] = force_reg (mode
, operands
[2]);
1985 expand_cbranchsi4 (rtx
*operands
, enum rtx_code comparison
, int probability
)
1987 rtx (*branch_expander
) (rtx
) = gen_branch_true
;
1988 comparison
= prepare_cbranch_operands (operands
, SImode
, comparison
);
1991 case NE
: case LT
: case LE
: case LTU
: case LEU
:
1992 comparison
= reverse_condition (comparison
);
1993 branch_expander
= gen_branch_false
;
1996 emit_insn (gen_rtx_SET (get_t_reg_rtx (),
1997 gen_rtx_fmt_ee (comparison
, SImode
,
1998 operands
[1], operands
[2])));
1999 rtx_insn
*jump
= emit_jump_insn (branch_expander (operands
[3]));
2000 if (probability
>= 0)
2001 add_int_reg_note (jump
, REG_BR_PROB
, probability
);
2004 /* ??? How should we distribute probabilities when more than one branch
2005 is generated. So far we only have some ad-hoc observations:
2006 - If the operands are random, they are likely to differ in both parts.
2007 - If comparing items in a hash chain, the operands are random or equal;
2008 operation should be EQ or NE.
2009 - If items are searched in an ordered tree from the root, we can expect
2010 the highpart to be unequal about half of the time; operation should be
2011 an inequality comparison, operands non-constant, and overall probability
2012 about 50%. Likewise for quicksort.
2013 - Range checks will be often made against constants. Even if we assume for
2014 simplicity an even distribution of the non-constant operand over a
2015 sub-range here, the same probability could be generated with differently
2016 wide sub-ranges - as long as the ratio of the part of the subrange that
2017 is before the threshold to the part that comes after the threshold stays
2018 the same. Thus, we can't really tell anything here;
2019 assuming random distribution is at least simple.
2022 expand_cbranchdi4 (rtx
*operands
, enum rtx_code comparison
)
2024 enum rtx_code msw_taken
, msw_skip
, lsw_taken
;
2025 rtx_code_label
*skip_label
= NULL
;
2026 rtx op1h
, op1l
, op2h
, op2l
;
2029 int msw_taken_prob
= -1, msw_skip_prob
= -1, lsw_taken_prob
= -1;
2030 rtx scratch
= operands
[4];
2032 comparison
= prepare_cbranch_operands (operands
, DImode
, comparison
);
2033 op1h
= gen_highpart_mode (SImode
, DImode
, operands
[1]);
2034 op2h
= gen_highpart_mode (SImode
, DImode
, operands
[2]);
2035 op1l
= gen_lowpart (SImode
, operands
[1]);
2036 op2l
= gen_lowpart (SImode
, operands
[2]);
2037 msw_taken
= msw_skip
= lsw_taken
= LAST_AND_UNUSED_RTX_CODE
;
2038 prob
= split_branch_probability
;
2039 rev_prob
= REG_BR_PROB_BASE
- prob
;
2042 /* ??? Should we use the cmpeqdi_t pattern for equality comparisons?
2043 That costs 1 cycle more when the first branch can be predicted taken,
2044 but saves us mispredicts because only one branch needs prediction.
2045 It also enables generating the cmpeqdi_t-1 pattern. */
2047 if (TARGET_CMPEQDI_T
)
2049 emit_insn (gen_cmpeqdi_t (operands
[1], operands
[2]));
2050 emit_jump_insn (gen_branch_true (operands
[3]));
2057 // If we had more precision, we'd use rev_prob - (rev_prob >> 32) .
2058 msw_skip_prob
= rev_prob
;
2059 if (REG_BR_PROB_BASE
<= 65535)
2060 lsw_taken_prob
= prob
? REG_BR_PROB_BASE
: 0;
2066 - ((gcov_type
) REG_BR_PROB_BASE
* rev_prob
2067 / ((gcov_type
) prob
<< 32)))
2073 if (TARGET_CMPEQDI_T
)
2075 emit_insn (gen_cmpeqdi_t (operands
[1], operands
[2]));
2076 emit_jump_insn (gen_branch_false (operands
[3]));
2080 msw_taken_prob
= prob
;
2085 msw_taken
= comparison
;
2086 if (CONST_INT_P (op2l
) && INTVAL (op2l
) == -1)
2088 if (comparison
!= GTU
|| op2h
!= CONST0_RTX (SImode
))
2089 msw_skip
= swap_condition (msw_taken
);
2093 if (op2l
== CONST0_RTX (SImode
))
2094 msw_taken
= comparison
;
2097 msw_taken
= comparison
== GE
? GT
: GTU
;
2098 msw_skip
= swap_condition (msw_taken
);
2103 msw_taken
= comparison
;
2104 if (op2l
== CONST0_RTX (SImode
))
2106 msw_skip
= swap_condition (msw_taken
);
2110 if (CONST_INT_P (op2l
) && INTVAL (op2l
) == -1)
2111 msw_taken
= comparison
;
2115 if (comparison
== LE
)
2117 else if (op2h
!= CONST0_RTX (SImode
))
2121 msw_skip
= swap_condition (LTU
);
2124 msw_skip
= swap_condition (msw_taken
);
2127 default: return false;
2129 num_branches
= ((msw_taken
!= LAST_AND_UNUSED_RTX_CODE
)
2130 + (msw_skip
!= LAST_AND_UNUSED_RTX_CODE
)
2131 + (lsw_taken
!= LAST_AND_UNUSED_RTX_CODE
));
2132 if (comparison
!= EQ
&& comparison
!= NE
&& num_branches
> 1)
2134 if (!CONSTANT_P (operands
[2])
2135 && prob
>= (int) (REG_BR_PROB_BASE
* 3 / 8U)
2136 && prob
<= (int) (REG_BR_PROB_BASE
* 5 / 8U))
2138 msw_taken_prob
= prob
/ 2U;
2140 = REG_BR_PROB_BASE
* rev_prob
/ (REG_BR_PROB_BASE
+ rev_prob
);
2141 lsw_taken_prob
= prob
;
2145 msw_taken_prob
= prob
;
2146 msw_skip_prob
= REG_BR_PROB_BASE
;
2147 /* ??? If we have a constant op2h, should we use that when
2148 calculating lsw_taken_prob? */
2149 lsw_taken_prob
= prob
;
2154 operands
[4] = NULL_RTX
;
2155 if (reload_completed
2156 && ! arith_reg_or_0_operand (op2h
, SImode
)
2157 && (true_regnum (op1h
) || (comparison
!= EQ
&& comparison
!= NE
))
2158 && (msw_taken
!= LAST_AND_UNUSED_RTX_CODE
2159 || msw_skip
!= LAST_AND_UNUSED_RTX_CODE
))
2161 emit_move_insn (scratch
, operands
[2]);
2162 operands
[2] = scratch
;
2164 if (msw_taken
!= LAST_AND_UNUSED_RTX_CODE
)
2165 expand_cbranchsi4 (operands
, msw_taken
, msw_taken_prob
);
2166 if (msw_skip
!= LAST_AND_UNUSED_RTX_CODE
)
2168 rtx taken_label
= operands
[3];
2170 /* Operands were possibly modified, but msw_skip doesn't expect this.
2171 Always use the original ones. */
2172 if (msw_taken
!= LAST_AND_UNUSED_RTX_CODE
)
2176 if (reload_completed
2177 && ! arith_reg_or_0_operand (op2h
, SImode
)
2178 && (true_regnum (op1h
) || (comparison
!= EQ
&& comparison
!= NE
)))
2180 emit_move_insn (scratch
, operands
[2]);
2181 operands
[2] = scratch
;
2185 operands
[3] = skip_label
= gen_label_rtx ();
2186 expand_cbranchsi4 (operands
, msw_skip
, msw_skip_prob
);
2187 operands
[3] = taken_label
;
2191 if (lsw_taken
!= LAST_AND_UNUSED_RTX_CODE
)
2193 if (reload_completed
2194 && ! arith_reg_or_0_operand (op2l
, SImode
)
2195 && (true_regnum (op1l
) || (lsw_taken
!= EQ
&& lsw_taken
!= NE
)))
2197 emit_move_insn (scratch
, operands
[2]);
2198 operands
[2] = scratch
;
2200 expand_cbranchsi4 (operands
, lsw_taken
, lsw_taken_prob
);
2202 if (msw_skip
!= LAST_AND_UNUSED_RTX_CODE
)
2203 emit_label (skip_label
);
2207 /* Given an operand, return 1 if the evaluated operand plugged into an
2208 if_then_else will result in a branch_true, 0 if branch_false, or
2209 -1 if neither nor applies. The truth table goes like this:
2211 op | cmpval | code | result
2212 ---------+--------+---------+--------------------
2213 T (0) | 0 | EQ (1) | 0 = 0 ^ (0 == 1)
2214 T (0) | 1 | EQ (1) | 1 = 0 ^ (1 == 1)
2215 T (0) | 0 | NE (0) | 1 = 0 ^ (0 == 0)
2216 T (0) | 1 | NE (0) | 0 = 0 ^ (1 == 0)
2217 !T (1) | 0 | EQ (1) | 1 = 1 ^ (0 == 1)
2218 !T (1) | 1 | EQ (1) | 0 = 1 ^ (1 == 1)
2219 !T (1) | 0 | NE (0) | 0 = 1 ^ (0 == 0)
2220 !T (1) | 1 | NE (0) | 1 = 1 ^ (1 == 0) */
2222 sh_eval_treg_value (rtx op
)
2224 if (t_reg_operand (op
, GET_MODE (op
)))
2226 if (negt_reg_operand (op
, GET_MODE (op
)))
2229 rtx_code code
= GET_CODE (op
);
2230 if ((code
!= EQ
&& code
!= NE
) || !CONST_INT_P (XEXP (op
, 1)))
2233 int cmpop
= code
== EQ
? 1 : 0;
2234 int cmpval
= INTVAL (XEXP (op
, 1));
2235 if (cmpval
!= 0 && cmpval
!= 1)
2239 if (t_reg_operand (XEXP (op
, 0), GET_MODE (XEXP (op
, 0))))
2241 else if (negt_reg_operand (XEXP (op
, 0), GET_MODE (XEXP (op
, 0))))
2246 return t
^ (cmpval
== cmpop
);
2249 /* Emit INSN, possibly in a PARALLEL with an USE/CLOBBER of FPSCR bits in case
2250 of floating-point comparisons. */
2252 sh_emit_set_t_insn (rtx insn
, machine_mode mode
)
2254 if (TARGET_FPU_ANY
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
2255 && GET_CODE (insn
) != PARALLEL
)
2257 insn
= gen_rtx_PARALLEL (VOIDmode
,
2259 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, FPSCR_STAT_REG
)),
2260 gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, FPSCR_MODES_REG
))));
2265 /* Prepare the operands for an scc instruction; make sure that the
2266 compare has been done and the result is in T_REG. */
2268 sh_emit_scc_to_t (enum rtx_code code
, rtx op0
, rtx op1
)
2270 rtx t_reg
= get_t_reg_rtx ();
2271 enum rtx_code oldcode
= code
;
2274 /* First need a compare insn. */
2278 /* It isn't possible to handle this case. */
2295 if (code
!= oldcode
)
2296 std::swap (op0
, op1
);
2298 mode
= GET_MODE (op0
);
2299 if (mode
== VOIDmode
)
2300 mode
= GET_MODE (op1
);
2302 op0
= force_reg (mode
, op0
);
2303 if ((code
!= EQ
&& code
!= NE
2304 && (op1
!= const0_rtx
2305 || code
== GTU
|| code
== GEU
|| code
== LTU
|| code
== LEU
))
2306 || (mode
== DImode
&& op1
!= const0_rtx
)
2307 || (TARGET_SH2E
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
))
2308 op1
= force_reg (mode
, op1
);
2310 sh_emit_set_t_insn (gen_rtx_SET (t_reg
,
2311 gen_rtx_fmt_ee (code
, SImode
, op0
, op1
)),
2315 /* Called from the md file, set up the operands of a compare instruction. */
2317 sh_emit_compare_and_branch (rtx
*operands
, machine_mode mode
)
2319 enum rtx_code code
= GET_CODE (operands
[0]);
2320 enum rtx_code branch_code
;
2321 rtx op0
= operands
[1];
2322 rtx op1
= operands
[2];
2324 bool need_ccmpeq
= false;
2326 if (TARGET_SH2E
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2328 op0
= force_reg (mode
, op0
);
2329 op1
= force_reg (mode
, op1
);
2333 if (code
!= EQ
|| mode
== DImode
)
2335 /* Force args into regs, since we can't use constants here. */
2336 op0
= force_reg (mode
, op0
);
2337 if (op1
!= const0_rtx
|| code
== GTU
|| code
== GEU
)
2338 op1
= force_reg (mode
, op1
);
2342 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2345 || (code
== LE
&& TARGET_IEEE
&& TARGET_SH2E
)
2346 || (code
== GE
&& !(TARGET_IEEE
&& TARGET_SH2E
)))
2348 std::swap (op0
, op1
);
2349 code
= swap_condition (code
);
2352 /* GE becomes fcmp/gt+fcmp/eq, for SH2E and TARGET_IEEE only. */
2355 gcc_assert (TARGET_IEEE
&& TARGET_SH2E
);
2360 /* Now we can have EQ, NE, GT, LE. NE and LE are then transformed
2361 to EQ/GT respectively. */
2362 gcc_assert (code
== EQ
|| code
== GT
|| code
== NE
|| code
== LE
);
2379 branch_code
= reverse_condition (code
);
2385 insn
= gen_rtx_SET (get_t_reg_rtx (),
2386 gen_rtx_fmt_ee (branch_code
, SImode
, op0
, op1
));
2388 sh_emit_set_t_insn (insn
, mode
);
2390 sh_emit_set_t_insn (gen_ieee_ccmpeqsf_t (op0
, op1
), mode
);
2392 if (branch_code
== code
)
2393 emit_jump_insn (gen_branch_true (operands
[3]));
2395 emit_jump_insn (gen_branch_false (operands
[3]));
2399 sh_emit_compare_and_set (rtx
*operands
, machine_mode mode
)
2401 enum rtx_code code
= GET_CODE (operands
[1]);
2402 rtx op0
= operands
[2];
2403 rtx op1
= operands
[3];
2404 rtx_code_label
*lab
= NULL
;
2405 bool invert
= false;
2407 op0
= force_reg (mode
, op0
);
2408 if ((code
!= EQ
&& code
!= NE
2409 && (op1
!= const0_rtx
2410 || code
== GTU
|| code
== GEU
|| code
== LTU
|| code
== LEU
))
2411 || (mode
== DImode
&& op1
!= const0_rtx
)
2412 || (TARGET_SH2E
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
))
2413 op1
= force_reg (mode
, op1
);
2415 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2417 if (code
== LT
|| code
== LE
)
2419 std::swap (op0
, op1
);
2420 code
= swap_condition (code
);
2426 lab
= gen_label_rtx ();
2427 sh_emit_scc_to_t (EQ
, op0
, op1
);
2428 emit_jump_insn (gen_branch_true (lab
));
2445 sh_emit_scc_to_t (code
, op0
, op1
);
2449 emit_insn (gen_movnegt (operands
[0], get_t_reg_rtx ()));
2451 emit_move_insn (operands
[0], get_t_reg_rtx ());
2454 /* Functions to output assembly code. */
2456 /* Return a sequence of instructions to perform DI or DF move.
2458 Since the SH cannot move a DI or DF in one instruction, we have
2459 to take care when we see overlapping source and dest registers. */
2461 output_movedouble (rtx insn ATTRIBUTE_UNUSED
, rtx operands
[],
2464 rtx dst
= operands
[0];
2465 rtx src
= operands
[1];
2468 && GET_CODE (XEXP (dst
, 0)) == PRE_DEC
)
2469 return "mov.l %T1,%0" "\n"
2472 if (register_operand (dst
, mode
)
2473 && register_operand (src
, mode
))
2475 if (REGNO (src
) == MACH_REG
)
2476 return "sts mach,%S0" "\n"
2479 /* When mov.d r1,r2 do r2->r3 then r1->r2;
2480 when mov.d r1,r0 do r1->r0 then r2->r1. */
2481 if (REGNO (src
) + 1 == REGNO (dst
))
2482 return "mov %T1,%T0" "\n"
2485 return "mov %1,%0" "\n"
2488 else if (CONST_INT_P (src
))
2490 if (INTVAL (src
) < 0)
2491 output_asm_insn ("mov #-1,%S0", operands
);
2493 output_asm_insn ("mov #0,%S0", operands
);
2495 return "mov %1,%R0";
2497 else if (MEM_P (src
))
2500 int dreg
= REGNO (dst
);
2501 rtx inside
= XEXP (src
, 0);
2503 switch (GET_CODE (inside
))
2506 ptrreg
= REGNO (inside
);
2510 ptrreg
= subreg_regno (inside
);
2514 ptrreg
= REGNO (XEXP (inside
, 0));
2515 /* ??? A r0+REG address shouldn't be possible here, because it isn't
2516 an offsettable address. Unfortunately, offsettable addresses use
2517 QImode to check the offset, and a QImode offsettable address
2518 requires r0 for the other operand, which is not currently
2519 supported, so we can't use the 'o' constraint.
2520 Thus we must check for and handle r0+REG addresses here.
2521 We punt for now, since this is likely very rare. */
2522 gcc_assert (!REG_P (XEXP (inside
, 1)));
2526 return "mov.l %1,%0" "\n"
2529 return "mov.l %1,%0" "\n"
2535 /* Work out the safe way to copy. Copy into the second half first. */
2537 return "mov.l %T1,%T0" "\n"
2541 return "mov.l %1,%0" "\n"
2545 /* Print an instruction which would have gone into a delay slot after
2546 another instruction, but couldn't because the other instruction expanded
2547 into a sequence where putting the slot insn at the end wouldn't work. */
2549 print_slot (rtx_sequence
*seq
)
2551 final_scan_insn (seq
->insn (1), asm_out_file
, optimize
, 1, NULL
);
2553 seq
->insn (1)->set_deleted ();
2557 output_far_jump (rtx_insn
*insn
, rtx op
)
2559 struct { rtx lab
, reg
, op
; } this_jmp
;
2560 rtx_code_label
*braf_base_lab
= NULL
;
2563 int offset
= branch_dest (insn
) - INSN_ADDRESSES (INSN_UID (insn
));
2566 this_jmp
.lab
= gen_label_rtx ();
2570 && offset
- get_attr_length (insn
) <= 32766
2571 && ! CROSSING_JUMP_P (insn
))
2574 jump
= "mov.w %O0,%1" "\n"
2583 jump
= "mov.l %O0,%1" "\n"
2586 jump
= "mov.l r0,@-r15" "\n"
2588 " mov.l @r0,%1" "\n"
2590 " mov.l @r15+,r0" "\n"
2594 jump
= "mov.l %O0,%1" "\n"
2597 /* If we have a scratch register available, use it. */
2598 if (NONJUMP_INSN_P ((prev
= prev_nonnote_insn (insn
)))
2599 && INSN_CODE (prev
) == CODE_FOR_indirect_jump_scratch
)
2601 this_jmp
.reg
= SET_DEST (XVECEXP (PATTERN (prev
), 0, 0));
2602 if (REGNO (this_jmp
.reg
) == R0_REG
&& flag_pic
&& ! TARGET_SH2
)
2603 jump
= "mov.l r1,@-r15" "\n"
2605 " mov.l @r0,r1" "\n"
2607 " mov.l @r15+,r1" "\n"
2609 output_asm_insn (jump
, &this_jmp
.lab
);
2610 if (dbr_sequence_length ())
2611 print_slot (final_sequence
);
2613 output_asm_insn ("nop", 0);
2617 /* Output the delay slot insn first if any. */
2618 if (dbr_sequence_length ())
2619 print_slot (final_sequence
);
2621 this_jmp
.reg
= gen_rtx_REG (SImode
, 13);
2622 output_asm_insn ("mov.l r13,@-r15", 0);
2623 output_asm_insn (jump
, &this_jmp
.lab
);
2624 output_asm_insn ("mov.l @r15+,r13", 0);
2626 if (far
&& flag_pic
&& TARGET_SH2
)
2628 braf_base_lab
= gen_label_rtx ();
2629 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
2630 CODE_LABEL_NUMBER (braf_base_lab
));
2633 output_asm_insn (".align 2", 0);
2634 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L", CODE_LABEL_NUMBER (this_jmp
.lab
));
2636 if (far
&& flag_pic
)
2639 this_jmp
.lab
= braf_base_lab
;
2640 output_asm_insn (".long %O2-%O0", &this_jmp
.lab
);
2643 output_asm_insn (far
? ".long %O2" : ".word %O2-%O0", &this_jmp
.lab
);
2647 /* Local label counter, used for constants in the pool and inside
2648 pattern branches. */
2649 static int lf
= 100;
2651 /* Output code for ordinary branches. */
2653 output_branch (int logic
, rtx_insn
*insn
, rtx
*operands
)
2655 switch (get_attr_length (insn
))
2658 /* This can happen if filling the delay slot has caused a forward
2659 branch to exceed its range (we could reverse it, but only
2660 when we know we won't overextend other branches; this should
2661 best be handled by relaxation).
2662 It can also happen when other condbranches hoist delay slot insn
2663 from their destination, thus leading to code size increase.
2664 But the branch will still be in the range -4092..+4098 bytes. */
2668 /* The call to print_slot will clobber the operands. */
2669 rtx op0
= operands
[0];
2671 /* If the instruction in the delay slot is annulled (true), then
2672 there is no delay slot where we can put it now. The only safe
2673 place for it is after the label. final will do that by default. */
2676 && ! INSN_ANNULLED_BRANCH_P (final_sequence
->insn (0))
2677 && get_attr_length (final_sequence
->insn (1)))
2679 asm_fprintf (asm_out_file
, "\tb%s%ss\t%LLF%d\n", logic
? "f" : "t",
2680 ASSEMBLER_DIALECT
? "/" : ".", label
);
2681 print_slot (final_sequence
);
2684 asm_fprintf (asm_out_file
, "\tb%s\t%LLF%d\n", logic
? "f" : "t", label
);
2686 output_asm_insn ("bra\t%l0", &op0
);
2687 fprintf (asm_out_file
, "\tnop\n");
2688 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LF", label
);
2692 /* When relaxing, handle this like a short branch. The linker
2693 will fix it up if it still doesn't fit after relaxation. */
2695 return logic
? "bt%.\t%l0" : "bf%.\t%l0";
2697 /* These are for SH2e, in which we have to account for the
2698 extra nop because of the hardware bug in annulled branches. */
2704 gcc_assert (!final_sequence
2705 || !(INSN_ANNULLED_BRANCH_P
2706 (XVECEXP (final_sequence
, 0, 0))));
2707 asm_fprintf (asm_out_file
, "b%s%ss\t%LLF%d\n",
2709 ASSEMBLER_DIALECT
? "/" : ".", label
);
2710 fprintf (asm_out_file
, "\tnop\n");
2711 output_asm_insn ("bra\t%l0", operands
);
2712 fprintf (asm_out_file
, "\tnop\n");
2713 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LF", label
);
2717 /* When relaxing, fall through. */
2722 sprintf (buffer
, "b%s%ss\t%%l0",
2724 ASSEMBLER_DIALECT
? "/" : ".");
2725 output_asm_insn (buffer
, &operands
[0]);
2730 /* There should be no longer branches now - that would
2731 indicate that something has destroyed the branches set
2732 up in machine_dependent_reorg. */
2737 /* Output a code sequence for INSN using TEMPL with OPERANDS; but before,
2738 fill in operands 9 as a label to the successor insn.
2739 We try to use jump threading where possible.
2740 IF CODE matches the comparison in the IF_THEN_ELSE of a following jump,
2741 we assume the jump is taken. I.e. EQ means follow jmp and bf, NE means
2742 follow jmp and bt, if the address is in range. */
2744 output_branchy_insn (enum rtx_code code
, const char *templ
,
2745 rtx_insn
*insn
, rtx
*operands
)
2747 rtx_insn
*next_insn
= NEXT_INSN (insn
);
2749 if (next_insn
&& JUMP_P (next_insn
) && condjump_p (next_insn
))
2751 rtx src
= SET_SRC (PATTERN (next_insn
));
2752 if (GET_CODE (src
) == IF_THEN_ELSE
&& GET_CODE (XEXP (src
, 0)) != code
)
2754 /* Following branch not taken */
2755 rtx_code_label
*lab
= gen_label_rtx ();
2756 emit_label_after (lab
, next_insn
);
2757 INSN_ADDRESSES_NEW (lab
,
2758 INSN_ADDRESSES (INSN_UID (next_insn
))
2759 + get_attr_length (next_insn
));
2765 int offset
= (branch_dest (next_insn
)
2766 - INSN_ADDRESSES (INSN_UID (next_insn
)) + 4);
2767 if (offset
>= -252 && offset
<= 258)
2769 if (GET_CODE (src
) == IF_THEN_ELSE
)
2771 src
= XEXP (src
, 1);
2777 rtx_code_label
*lab
= gen_label_rtx ();
2778 emit_label_after (lab
, insn
);
2779 INSN_ADDRESSES_NEW (lab
,
2780 INSN_ADDRESSES (INSN_UID (insn
))
2781 + get_attr_length (insn
));
2787 output_ieee_ccmpeq (rtx_insn
*insn
, rtx
*operands
)
2789 return output_branchy_insn (NE
, "bt %l9" "\n"
2794 /* Output the start of the assembler file. */
2796 sh_file_start (void)
2798 default_file_start ();
2801 /* We need to show the text section with the proper
2802 attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
2803 emits it without attributes in TEXT_SECTION_ASM_OP, else GAS
2804 will complain. We can teach GAS specifically about the
2805 default attributes for our choice of text section, but
2806 then we would have to change GAS again if/when we change
2807 the text section name. */
2808 fprintf (asm_out_file
, "%s\n", TEXT_SECTION_ASM_OP
);
2810 /* Switch to the data section so that the coffsem symbol
2811 isn't in the text section. */
2812 switch_to_section (data_section
);
2814 if (TARGET_LITTLE_ENDIAN
)
2815 fputs ("\t.little\n", asm_out_file
);
2818 /* Implementation of TARGET_ASM_INTEGER for SH. Pointers to functions
2819 need to be output as pointers to function descriptors for
2823 sh_assemble_integer (rtx value
, unsigned int size
, int aligned_p
)
2825 if (TARGET_FDPIC
&& size
== UNITS_PER_WORD
2826 && GET_CODE (value
) == SYMBOL_REF
&& SYMBOL_REF_FUNCTION_P (value
))
2828 fputs ("\t.long\t", asm_out_file
);
2829 output_addr_const (asm_out_file
, value
);
2830 fputs ("@FUNCDESC\n", asm_out_file
);
2833 return default_assemble_integer (value
, size
, aligned_p
);
2836 /* Check if PAT includes UNSPEC_CALLER unspec pattern. */
2838 unspec_caller_rtx_p (rtx pat
)
2843 split_const (pat
, &base
, &offset
);
2844 if (GET_CODE (base
) == UNSPEC
)
2846 if (XINT (base
, 1) == UNSPEC_CALLER
)
2848 for (i
= 0; i
< XVECLEN (base
, 0); i
++)
2849 if (unspec_caller_rtx_p (XVECEXP (base
, 0, i
)))
2855 /* Indicate that INSN cannot be duplicated. This is true for insn
2856 that generates a unique label. */
2858 sh_cannot_copy_insn_p (rtx_insn
*insn
)
2862 if (!reload_completed
|| !flag_pic
)
2865 if (!NONJUMP_INSN_P (insn
))
2867 if (asm_noperands (insn
) >= 0)
2870 pat
= PATTERN (insn
);
2872 if (GET_CODE (pat
) == CLOBBER
|| GET_CODE (pat
) == USE
)
2875 if (TARGET_FDPIC
&& GET_CODE (pat
) == PARALLEL
)
2877 rtx t
= XVECEXP (pat
, 0, XVECLEN (pat
, 0) - 1);
2878 if (GET_CODE (t
) == USE
&& unspec_caller_rtx_p (XEXP (t
, 0)))
2882 if (GET_CODE (pat
) != SET
)
2884 pat
= SET_SRC (pat
);
2886 if (unspec_caller_rtx_p (pat
))
2892 /* Number of instructions used to make an arithmetic right shift by N. */
2893 static const char ashiftrt_insns
[] =
2894 { 0,1,2,3,4,5,8,8,8,8,8,8,8,8,8,8,2,3,4,5,8,8,8,8,8,8,8,8,8,8,8,2};
2896 /* Description of a logical left or right shift, when expanded to a sequence
2898 Notice that one bit right shifts clobber the T bit. One bit left shifts
2899 are done with an 'add Rn,Rm' insn and thus do not clobber the T bit. */
2902 ASHL_CLOBBERS_T
= 1 << 0,
2903 LSHR_CLOBBERS_T
= 1 << 1
2906 struct ashl_lshr_sequence
2909 signed char amount
[6];
2913 static const struct ashl_lshr_sequence ashl_lshr_seq
[32] =
2915 { 0, { 0 }, 0 }, // 0
2916 { 1, { 1 }, LSHR_CLOBBERS_T
},
2918 { 2, { 2, 1 }, LSHR_CLOBBERS_T
},
2919 { 2, { 2, 2 }, 0 }, // 4
2920 { 3, { 2, 1, 2 }, LSHR_CLOBBERS_T
},
2921 { 3, { 2, 2, 2 }, 0 },
2922 { 4, { 2, 2, 1, 2 }, LSHR_CLOBBERS_T
},
2923 { 1, { 8 }, 0 }, // 8
2924 { 2, { 8, 1 }, LSHR_CLOBBERS_T
},
2926 { 3, { 8, 1, 2 }, LSHR_CLOBBERS_T
},
2927 { 3, { 8, 2, 2 }, 0 }, // 12
2928 { 4, { 8, 2, 1, 2 }, LSHR_CLOBBERS_T
},
2929 { 3, { 8, -2, 8 }, 0 },
2930 { 3, { 8, -1, 8 }, ASHL_CLOBBERS_T
},
2931 { 1, { 16 }, 0 }, // 16
2932 { 2, { 16, 1 }, LSHR_CLOBBERS_T
},
2933 { 2, { 16, 2 }, 0 },
2934 { 3, { 16, 1, 2 }, LSHR_CLOBBERS_T
},
2935 { 3, { 16, 2, 2 }, 0 }, // 20
2936 { 4, { 16, 2, 1, 2 }, LSHR_CLOBBERS_T
},
2937 { 3, { 16, -2, 8 }, 0 },
2938 { 3, { 16, -1, 8 }, ASHL_CLOBBERS_T
},
2939 { 2, { 16, 8 }, 0 }, // 24
2940 { 3, { 16, 1, 8 }, LSHR_CLOBBERS_T
},
2941 { 3, { 16, 8, 2 }, 0 },
2942 { 4, { 16, 8, 1, 2 }, LSHR_CLOBBERS_T
},
2943 { 4, { 16, 8, 2, 2 }, 0 }, // 28
2944 { 4, { 16, -1, -2, 16 }, ASHL_CLOBBERS_T
},
2945 { 3, { 16, -2, 16 }, 0 },
2947 /* For a right shift by 31 a 2 insn shll-movt sequence can be used.
2948 For a left shift by 31 a 2 insn and-rotl sequences can be used.
2949 However, the shift-and combiner code needs this entry here to be in
2950 terms of real shift insns. */
2951 { 3, { 16, -1, 16 }, ASHL_CLOBBERS_T
}
2954 /* Individual shift amounts for shift amounts < 16, up to three highmost
2955 bits might be clobbered. This is typically used when combined with some
2956 kind of sign or zero extension. */
2957 static const struct ashl_lshr_sequence ext_ashl_lshr_seq
[32] =
2959 { 0, { 0 }, 0 }, // 0
2960 { 1, { 1 }, LSHR_CLOBBERS_T
},
2962 { 2, { 2, 1 }, LSHR_CLOBBERS_T
},
2963 { 2, { 2, 2 }, 0 }, // 4
2964 { 3, { 2, 1, 2 }, LSHR_CLOBBERS_T
},
2965 { 2, { 8, -2 }, 0 },
2966 { 2, { 8, -1 }, ASHL_CLOBBERS_T
},
2967 { 1, { 8 }, 0 }, // 8
2968 { 2, { 8, 1 }, LSHR_CLOBBERS_T
},
2970 { 3, { 8, 1, 2 }, LSHR_CLOBBERS_T
},
2971 { 3, { 8, 2, 2 }, 0 }, // 12
2972 { 3, { 16, -2, -1 }, ASHL_CLOBBERS_T
},
2973 { 2, { 16, -2 }, 0 },
2974 { 2, { 16, -1 }, ASHL_CLOBBERS_T
},
2975 { 1, { 16 }, 0 }, // 16
2976 { 2, { 16, 1 }, LSHR_CLOBBERS_T
},
2977 { 2, { 16, 2 }, 0 },
2978 { 3, { 16, 1, 2 }, LSHR_CLOBBERS_T
},
2979 { 3, { 16, 2, 2 }, 0 }, // 20
2980 { 4, { 16, 2, 1, 2 }, LSHR_CLOBBERS_T
},
2981 { 3, { 16, -2, 8 }, 0 },
2982 { 3, { 16, -1, 8 }, ASHL_CLOBBERS_T
},
2983 { 2, { 16, 8 }, 0 }, // 24
2984 { 3, { 16, 1, 8 }, LSHR_CLOBBERS_T
},
2985 { 3, { 16, 8, 2 }, 0 },
2986 { 4, { 16, 8, 1, 2 }, LSHR_CLOBBERS_T
},
2987 { 4, { 16, 8, 2, 2 }, 0 }, // 28
2988 { 4, { 16, -1, -2, 16 }, ASHL_CLOBBERS_T
},
2989 { 3, { 16, -2, 16 }, 0 },
2990 { 3, { 16, -1, 16 }, ASHL_CLOBBERS_T
}
2993 /* Return true if a shift left consisting of 1/2/8/16 shift instructions
2994 will clobber the T bit. */
2996 sh_ashlsi_clobbers_t_reg_p (rtx shift_amount
)
2998 gcc_assert (CONST_INT_P (shift_amount
));
3000 const int shift_amount_i
= INTVAL (shift_amount
) & 31;
3002 /* Special case for shift count of 31: use and-rotl sequence. */
3003 if (shift_amount_i
== 31)
3006 return (ashl_lshr_seq
[shift_amount_i
].clobbers_t
3007 & ASHL_CLOBBERS_T
) != 0;
3010 /* Return true if a logical right shift consisting of 1/2/8/16 shift
3011 instructions will clobber the T bit. */
3013 sh_lshrsi_clobbers_t_reg_p (rtx shift_amount
)
3015 gcc_assert (CONST_INT_P (shift_amount
));
3017 /* For right shifts the constant might be negative. */
3018 const int shift_amount_i
= std::abs (INTVAL (shift_amount
)) & 31;
3020 /* Special case for shift count of 31: use shll-movt sequence. */
3021 if (shift_amount_i
== 31)
3024 return (ashl_lshr_seq
[shift_amount_i
].clobbers_t
3025 & LSHR_CLOBBERS_T
) != 0;
3028 /* Return true if it is potentially beneficial to use a dynamic shift
3029 instruction (shad / shar) instead of a combination of 1/2/8/16
3030 shift instructions for the specified shift count.
3031 If dynamic shifts are not available, always return false. */
3033 sh_dynamicalize_shift_p (rtx count
)
3035 gcc_assert (CONST_INT_P (count
));
3037 /* For right shifts the constant might be negative. */
3038 const int shift_amount_i
= std::abs (INTVAL (count
)) & 31;
3041 /* For left and right shifts, there are shorter 2 insn sequences for
3042 shift amounts of 31. */
3043 if (shift_amount_i
== 31)
3046 insn_count
= ashl_lshr_seq
[shift_amount_i
].insn_count
;
3048 return TARGET_DYNSHIFT
&& (insn_count
> 1 + SH_DYNAMIC_SHIFT_COST
);
3051 /* Assuming we have a value that has been sign-extended by at least one bit,
3052 can we use the ext_shift_amounts with the last shift turned to an
3053 arithmetic shift to shift it by N without data loss, and quicker than by
3055 #define EXT_SHIFT_SIGNED(n) (((n) | 8) == 15)
3057 /* Return the cost of a shift. */
3061 if (GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
3063 if (GET_MODE (x
) == DImode
3064 && CONST_INT_P (XEXP (x
, 1))
3065 && INTVAL (XEXP (x
, 1)) == 1)
3068 /* Everything else is invalid, because there is no pattern for it. */
3071 /* If shift by a non constant, then this will be expensive. */
3072 if (!CONST_INT_P (XEXP (x
, 1)))
3073 return SH_DYNAMIC_SHIFT_COST
;
3075 /* Otherwise, return the true cost in instructions. Cope with out of range
3076 shift counts more or less arbitrarily. */
3077 int value
= INTVAL (XEXP (x
, 1)) & 31;
3079 if (GET_CODE (x
) == ASHIFTRT
)
3081 int cost
= ashiftrt_insns
[value
];
3082 /* If dynamic shifts are available and profitable in this case, then we
3083 put the constant in a reg and use shad. */
3084 if (cost
> 1 + SH_DYNAMIC_SHIFT_COST
)
3085 cost
= 1 + SH_DYNAMIC_SHIFT_COST
;
3089 return ashl_lshr_seq
[value
].insn_count
;
3092 /* Return the cost of an AND/XOR/IOR operation. */
3094 and_xor_ior_costs (rtx x
, int code
)
3096 /* On SH1-4 we have only max. SImode operations.
3097 Double the cost for modes > SImode. */
3098 const int cost_scale
= GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
? 2 : 1;
3100 /* A logical operation with two registers is a single cycle
3102 if (!CONST_INT_P (XEXP (x
, 1)))
3103 return 1 * cost_scale
;
3105 int i
= INTVAL (XEXP (x
, 1));
3107 /* These constants are single cycle extu.[bw] instructions. */
3108 if ((i
== 0xff || i
== 0xffff) && code
== AND
)
3109 return 1 * cost_scale
;
3110 /* Constants that can be used in an instruction as an immediate are
3111 a single cycle, but this requires r0, so make it a little more
3113 if (CONST_OK_FOR_K08 (i
))
3114 return 2 * cost_scale
;
3115 /* Constants that can be loaded with a mov immediate need one more cycle.
3116 This case is probably unnecessary. */
3117 if (CONST_OK_FOR_I08 (i
))
3118 return 2 * cost_scale
;
3119 /* Any other constant requires an additional 2 cycle pc-relative load.
3120 This case is probably unnecessary. */
3121 return 3 * cost_scale
;
3124 /* Return the cost of an addition or a subtraction. */
3128 if (GET_MODE (x
) == SImode
)
3130 /* The addc or subc patterns will eventually become one or two
3131 instructions. Below are some costs for some of the patterns
3132 which combine would reject because the costs of the individual
3133 insns in the patterns are lower.
3135 FIXME: It would be much easier if we had something like insn cost
3136 attributes and the cost calculation machinery used those attributes
3137 in the first place. This would eliminate redundant recog-like C
3138 code to calculate costs of complex patterns. */
3139 rtx op0
= XEXP (x
, 0);
3140 rtx op1
= XEXP (x
, 1);
3142 if (GET_CODE (x
) == PLUS
)
3144 if (GET_CODE (op0
) == AND
3145 && XEXP (op0
, 1) == const1_rtx
3146 && (GET_CODE (op1
) == PLUS
3147 || (GET_CODE (op1
) == MULT
&& XEXP (op1
, 1) == const2_rtx
)))
3150 if (GET_CODE (op0
) == MULT
&& XEXP (op0
, 1) == const2_rtx
3151 && GET_CODE (op1
) == LSHIFTRT
3152 && CONST_INT_P (XEXP (op1
, 1)) && INTVAL (XEXP (op1
, 1)) == 31)
3155 /* Let's assume that adding the result of an insns that stores into
3156 the T bit is cheap. */
3157 if (treg_set_expr (op1
, SImode
))
3159 if (treg_set_expr (op0
, SImode
))
3163 /* On SH1-4 we have only max. SImode operations.
3164 Double the cost for modes > SImode. */
3165 const int cost_scale
= GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
? 2 : 1;
3167 /* Adding a register is a single cycle insn. */
3168 if (REG_P (XEXP (x
, 1))
3169 || GET_CODE (XEXP (x
, 1)) == SUBREG
)
3170 return 1 * cost_scale
;
3172 /* Likewise for small constants. */
3173 if (CONST_INT_P (XEXP (x
, 1))
3174 && CONST_OK_FOR_ADD (INTVAL (XEXP (x
, 1))))
3175 return 1 * cost_scale
;
3177 /* Any other constant requires a 2 cycle pc-relative load plus an
3179 return 3 * cost_scale
;
3182 /* Return the cost of a multiply. */
3184 multcosts (rtx x ATTRIBUTE_UNUSED
)
3186 if (sh_multcost
>= 0)
3191 /* We have a mul insn, so we can never take more than the mul and the
3192 read of the mac reg, but count more because of the latency and extra
3199 /* If we're aiming at small code, then just count the number of
3200 insns in a multiply call sequence. */
3204 /* Otherwise count all the insns in the routine we'd be calling too. */
3208 /* Compute a (partial) cost for rtx X. Return true if the complete
3209 cost has been computed, and false if subexpressions should be
3210 scanned. In either case, *TOTAL contains the cost result. */
3212 sh_rtx_costs (rtx x
, machine_mode mode ATTRIBUTE_UNUSED
, int outer_code
,
3213 int opno ATTRIBUTE_UNUSED
,
3214 int *total
, bool speed ATTRIBUTE_UNUSED
)
3216 int code
= GET_CODE (x
);
3220 /* The lower-subreg pass decides whether to split multi-word regs
3221 into individual regs by looking at the cost for a SET of certain
3222 modes with the following patterns:
3224 (set (reg) (const_int 0))
3225 On machines that support vector-move operations a multi-word move
3226 is the same cost as individual reg move. On SH there is no
3227 vector-move, so we have to provide the correct cost in the number
3228 of move insns to load/store the reg of the mode in question. */
3230 if (register_operand (SET_DEST (x
), VOIDmode
)
3231 && (register_operand (SET_SRC (x
), VOIDmode
)
3232 || satisfies_constraint_Z (SET_SRC (x
))))
3234 const machine_mode mode
= GET_MODE (SET_DEST (x
));
3235 *total
= COSTS_N_INSNS (GET_MODE_SIZE (mode
)
3236 / mov_insn_size (mode
, TARGET_SH2A
));
3241 /* The cost of a mem access is mainly the cost of the address mode. */
3243 *total
= sh_address_cost (XEXP (x
, 0), GET_MODE (x
), MEM_ADDR_SPACE (x
),
3248 /* This case is required for the if_then_else negc pattern. */
3249 if (treg_set_expr (XEXP (x
, 0), SImode
))
3251 *total
= COSTS_N_INSNS (1);
3257 /* Zero extracts of single bits are usually combine patterns for the
3260 if (GET_CODE (XEXP (x
, 0)) == XOR
3261 && arith_reg_operand (XEXP (XEXP (x
, 0), 0), VOIDmode
)
3262 && XEXP (x
, 1) == const1_rtx
3263 && CONST_INT_P (XEXP (x
, 2))
3264 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
3265 /* Check that the xor constaint overlaps with the extracted bit. */
3266 && (INTVAL (XEXP (XEXP (x
, 0), 1)) & (1LL << INTVAL (XEXP (x
, 2)))))
3268 *total
= 1; //COSTS_N_INSNS (1);
3273 /* The cost of a sign or zero extend depends on whether the source is a
3274 reg or a mem. In case of a mem take the address into acount. */
3276 if (arith_reg_operand (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
3278 *total
= COSTS_N_INSNS (1);
3281 if (MEM_P (XEXP (x
, 0)))
3283 *total
= sh_address_cost (XEXP (XEXP (x
, 0), 0),
3284 GET_MODE (XEXP (x
, 0)),
3285 MEM_ADDR_SPACE (XEXP (x
, 0)), true);
3291 if (arith_reg_operand (XEXP (x
, 0), GET_MODE (XEXP (x
, 0))))
3293 *total
= COSTS_N_INSNS (1);
3296 else if (TARGET_SH2A
&& MEM_P (XEXP (x
, 0))
3297 && (GET_MODE (XEXP (x
, 0)) == QImode
3298 || GET_MODE (XEXP (x
, 0)) == HImode
))
3300 /* Handle SH2A's movu.b and movu.w insn. */
3301 *total
= sh_address_cost (XEXP (XEXP (x
, 0), 0),
3302 GET_MODE (XEXP (x
, 0)),
3303 MEM_ADDR_SPACE (XEXP (x
, 0)), true);
3308 /* mems for SFmode and DFmode can be inside a parallel due to
3309 the way the fpscr is handled. */
3311 for (int i
= 0; i
< XVECLEN (x
, 0); i
++)
3313 rtx xx
= XVECEXP (x
, 0, i
);
3314 if (GET_CODE (xx
) == SET
&& MEM_P (XEXP (xx
, 0)))
3316 *total
= sh_address_cost (XEXP (XEXP (xx
, 0), 0),
3317 GET_MODE (XEXP (xx
, 0)),
3318 MEM_ADDR_SPACE (XEXP (xx
, 0)), true);
3321 if (GET_CODE (xx
) == SET
&& MEM_P (XEXP (xx
, 1)))
3323 *total
= sh_address_cost (XEXP (XEXP (xx
, 1), 0),
3324 GET_MODE (XEXP (xx
, 1)),
3325 MEM_ADDR_SPACE (XEXP (xx
, 1)), true);
3330 if (sh_1el_vec (x
, VOIDmode
))
3331 *total
= outer_code
!= SET
;
3332 else if (sh_rep_vec (x
, VOIDmode
))
3333 *total
= ((GET_MODE_UNIT_SIZE (GET_MODE (x
)) + 3) / 4
3334 + (outer_code
!= SET
));
3336 *total
= COSTS_N_INSNS (3) + (outer_code
!= SET
);
3340 if (CONST_OK_FOR_I08 (INTVAL (x
)))
3342 else if ((outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
3343 && CONST_OK_FOR_K08 (INTVAL (x
)))
3345 /* prepare_cmp_insn will force costly constants int registers before
3346 the cbranch[sd]i4 patterns can see them, so preserve potentially
3347 interesting ones not covered by I08 above. */
3348 else if (outer_code
== COMPARE
3349 && ((unsigned HOST_WIDE_INT
) INTVAL (x
)
3350 == (unsigned HOST_WIDE_INT
) 0x7fffffff + 1
3351 || INTVAL (x
) == 0x7fffffff
3352 || INTVAL (x
) == 0x80 || INTVAL (x
) == -0x81))
3359 /* An and with a constant compared against zero is
3360 most likely going to be a TST #imm, R0 instruction. */
3361 if (XEXP (x
, 1) == const0_rtx
3362 && ((GET_CODE (XEXP (x
, 0)) == AND
3363 || (SUBREG_P (XEXP (x
, 0))
3364 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == AND
))
3365 || GET_CODE (XEXP (x
, 0)) == ZERO_EXTRACT
))
3371 else if (XEXP (x
, 1) == const0_rtx
3372 && GET_CODE (XEXP (x
, 0)) == AND
3373 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
3374 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == ASHIFT
3375 && arith_reg_operand (XEXP (XEXP (XEXP (x
, 0), 0), 0), SImode
)
3376 && CONST_INT_P (XEXP (XEXP (XEXP (x
, 0), 0), 1)))
3386 /* This is most likely a clips.b or clips.w insn that is being made up
3389 && (GET_CODE (XEXP (x
, 0)) == SMAX
|| GET_CODE (XEXP (x
, 0)) == SMIN
)
3390 && CONST_INT_P (XEXP (XEXP (x
, 0), 1))
3391 && REG_P (XEXP (XEXP (x
, 0), 0))
3392 && CONST_INT_P (XEXP (x
, 1)))
3394 *total
= COSTS_N_INSNS (1);
3407 /* prepare_cmp_insn will force costly constants int registers before
3408 the cbranchdi4 pattern can see them, so preserve potentially
3409 interesting ones. */
3410 if (outer_code
== COMPARE
&& GET_MODE (x
) == DImode
)
3417 /* FIXME: This looks broken. Only the last statement has any effect.
3418 Probably this could be folded with the PARALLEL case? */
3419 if (x
== CONST0_RTX (GET_MODE (x
)))
3421 else if (sh_1el_vec (x
, VOIDmode
))
3422 *total
= outer_code
!= SET
;
3423 if (sh_rep_vec (x
, VOIDmode
))
3424 *total
= ((GET_MODE_UNIT_SIZE (GET_MODE (x
)) + 3) / 4
3425 + (outer_code
!= SET
));
3426 *total
= COSTS_N_INSNS (3) + (outer_code
!= SET
);
3431 *total
= COSTS_N_INSNS (addsubcosts (x
));
3435 /* Check for (and (not (reg)) (const_int 1)) which is a tst insn. */
3436 if (GET_CODE (XEXP (x
, 0)) == NOT
&& XEXP (x
, 1) == const1_rtx
)
3438 *total
= COSTS_N_INSNS (1);
3445 *total
= COSTS_N_INSNS (and_xor_ior_costs (x
, code
));
3449 *total
= COSTS_N_INSNS (multcosts (x
));
3454 /* div0s sign comparison. */
3455 if (GET_CODE (XEXP (x
, 0)) == XOR
3456 && REG_P ((XEXP (XEXP (x
, 0), 0)))
3457 && REG_P ((XEXP (XEXP (x
, 0), 1)))
3458 && satisfies_constraint_Z (XEXP (x
, 1)))
3460 *total
= COSTS_N_INSNS (1);
3467 /* div0s sign comparison. */
3468 if (GET_CODE (XEXP (x
, 0)) == XOR
3469 && REG_P ((XEXP (XEXP (x
, 0), 0)))
3470 && REG_P ((XEXP (XEXP (x
, 0), 1)))
3471 && CONST_INT_P (XEXP (x
, 1)) && INTVAL (XEXP (x
, 1)) == 31)
3473 *total
= COSTS_N_INSNS (1);
3476 /* Fall through to shiftcosts. */
3480 int cost
= shiftcosts (x
);
3483 *total
= COSTS_N_INSNS (cost
);
3491 *total
= COSTS_N_INSNS (20);
3504 /* Determine the size of the fundamental move insn that will be used
3505 for the specified mode. */
3507 mov_insn_size (machine_mode mode
, bool consider_sh2a
)
3509 const int mode_sz
= GET_MODE_SIZE (mode
);
3511 if ((consider_sh2a
&& TARGET_SH2A_DOUBLE
&& mode
== DFmode
)
3512 || (TARGET_FMOVD
&& mode
== DFmode
))
3516 /* The max. available mode for actual move insns is SImode.
3517 Larger accesses will be split into multiple loads/stores. */
3518 const int max_mov_sz
= GET_MODE_SIZE (SImode
);
3519 return mode_sz
>= max_mov_sz
? max_mov_sz
: mode_sz
;
3523 /* Determine the maximum possible displacement for a move insn for the
3526 sh_max_mov_insn_displacement (machine_mode mode
, bool consider_sh2a
)
3528 /* The 4 byte displacement move insns are the same as the 2 byte
3529 versions but take a 12 bit displacement. All we need to do is to
3530 scale the max. displacement value accordingly. */
3531 const int disp_scale
= consider_sh2a
? (4095 / 15) : 1;
3533 /* SH2A supports FPU move insns with 12 bit displacements.
3534 Other variants to do not support any kind of displacements for
3536 if (! consider_sh2a
&& TARGET_FPU_ANY
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
3540 const int mov_insn_sz
= mov_insn_size (mode
, consider_sh2a
);
3541 const int mode_sz
= GET_MODE_SIZE (mode
);
3542 int r
= 15 * mov_insn_sz
* disp_scale
;
3544 /* If the mov insn will be split into multiple loads/stores, the
3545 maximum possible displacement is a bit smaller. */
3546 if (mode_sz
> mov_insn_sz
)
3547 r
-= mode_sz
- mov_insn_sz
;
3552 /* Determine the alignment mask for a move insn of the
3555 mov_insn_alignment_mask (machine_mode mode
, bool consider_sh2a
)
3557 const int mov_insn_sz
= mov_insn_size (mode
, consider_sh2a
);
3558 return mov_insn_sz
> 0 ? (mov_insn_sz
- 1) : 0;
3561 /* Return the displacement value of a displacement address. */
3563 sh_disp_addr_displacement (rtx x
)
3565 gcc_assert (satisfies_constraint_Sdd (x
));
3566 return INTVAL (XEXP (XEXP (x
, 0), 1));
3569 /* Compute the cost of an address. */
3571 sh_address_cost (rtx x
, machine_mode mode
,
3572 addr_space_t as ATTRIBUTE_UNUSED
, bool speed ATTRIBUTE_UNUSED
)
3574 /* 'GBR + 0'. Account one more because of R0 restriction. */
3575 if (REG_P (x
) && REGNO (x
) == GBR_REG
)
3578 /* Simple reg, post-inc, pre-dec addressing. */
3579 if (REG_P (x
) || GET_CODE (x
) == POST_INC
|| GET_CODE (x
) == PRE_DEC
)
3582 /* 'reg + disp' addressing. */
3583 if (GET_CODE (x
) == PLUS
3584 && REG_P (XEXP (x
, 0)) && CONST_INT_P (XEXP (x
, 1)))
3586 /* 'GBR + disp'. Account one more because of R0 restriction. */
3587 if (REGNO (XEXP (x
, 0)) == GBR_REG
3588 && gbr_displacement (XEXP (x
, 1), mode
))
3591 const HOST_WIDE_INT offset
= INTVAL (XEXP (x
, 1));
3596 /* The displacement would fit into a 2 byte move insn.
3597 HImode and QImode loads/stores with displacement put pressure on
3598 R0 which will most likely require another reg copy. Thus account
3599 a higher cost for that. */
3600 if (offset
> 0 && offset
<= sh_max_mov_insn_displacement (mode
, false))
3601 return (mode
== HImode
|| mode
== QImode
) ? 2 : 1;
3603 /* The displacement would fit into a 4 byte move insn (SH2A). */
3605 && offset
> 0 && offset
<= sh_max_mov_insn_displacement (mode
, true))
3608 /* The displacement is probably out of range and will require extra
3613 /* 'reg + reg' addressing. Account a slightly higher cost because of
3614 increased pressure on R0. */
3615 if (GET_CODE (x
) == PLUS
&& ! CONSTANT_P (XEXP (x
, 1)))
3618 /* Not sure what it is - probably expensive. */
3622 /* Code to expand a shift. */
3624 gen_ashift (int type
, int n
, rtx reg
)
3628 /* Negative values here come from the shift_amounts array. */
3638 n_rtx
= GEN_INT (n
);
3639 gcc_assert (satisfies_constraint_P27 (n_rtx
));
3644 emit_insn (gen_ashrsi3_k (reg
, reg
, n_rtx
));
3648 emit_insn (gen_shlr (reg
, reg
));
3650 emit_insn (gen_lshrsi3_k (reg
, reg
, n_rtx
));
3653 emit_insn (gen_ashlsi3_k (reg
, reg
, n_rtx
));
3660 /* Code to expand a HImode shift. */
3662 gen_ashift_hi (int type
, int n
, rtx reg
)
3664 /* Negative values here come from the shift_amounts array. */
3678 /* We don't have HImode right shift operations because using the
3679 ordinary 32 bit shift instructions for that doesn't generate proper
3680 zero/sign extension.
3681 gen_ashift_hi is only called in contexts where we know that the
3682 sign extension works out correctly. */
3685 if (GET_CODE (reg
) == SUBREG
)
3687 offset
= SUBREG_BYTE (reg
);
3688 reg
= SUBREG_REG (reg
);
3690 gen_ashift (type
, n
, gen_rtx_SUBREG (SImode
, reg
, offset
));
3694 emit_insn (gen_ashlhi3_k (reg
, reg
, GEN_INT (n
)));
3699 /* Output RTL to split a constant shift into its component SH constant
3700 shift instructions. */
3702 gen_shifty_op (int code
, rtx
*operands
)
3704 int value
= INTVAL (operands
[2]);
3707 /* Truncate the shift count in case it is out of bounds. */
3712 if (code
== LSHIFTRT
)
3714 emit_insn (gen_rotlsi3_1 (operands
[0], operands
[0]));
3715 emit_insn (gen_movt (operands
[0], get_t_reg_rtx ()));
3718 else if (code
== ASHIFT
)
3720 /* There is a two instruction sequence for 31 bit left shifts,
3721 but it requires r0. */
3722 if (REG_P (operands
[0]) && REGNO (operands
[0]) == 0)
3724 emit_insn (gen_andsi3 (operands
[0], operands
[0], const1_rtx
));
3725 emit_insn (gen_rotlsi3_31 (operands
[0], operands
[0]));
3730 else if (value
== 0)
3732 /* This can happen even when optimizing, if there were subregs before
3733 reload. Don't output a nop here, as this is never optimized away;
3734 use a no-op move instead. */
3735 emit_insn (gen_rtx_SET (operands
[0], operands
[0]));
3739 max
= ashl_lshr_seq
[value
].insn_count
;
3740 for (i
= 0; i
< max
; i
++)
3741 gen_ashift (code
, ashl_lshr_seq
[value
].amount
[i
], operands
[0]);
3744 /* Same as gen_shifty_op, but optimized for values where the topmost bits
3747 gen_shifty_hi_op (int code
, rtx
*operands
)
3749 int value
= INTVAL (operands
[2]);
3751 void (*gen_fun
) (int, int, rtx
);
3753 /* This operation is used by and_shl for SImode values with a few
3754 high bits known to be cleared. */
3758 emit_insn (gen_nop ());
3762 gen_fun
= GET_MODE (operands
[0]) == HImode
? gen_ashift_hi
: gen_ashift
;
3765 max
= ext_ashl_lshr_seq
[value
].insn_count
;
3766 for (i
= 0; i
< max
; i
++)
3767 gen_fun (code
, ext_ashl_lshr_seq
[value
].amount
[i
], operands
[0]);
3770 /* When shifting right, emit the shifts in reverse order, so that
3771 solitary negative values come first. */
3772 for (i
= ext_ashl_lshr_seq
[value
].insn_count
- 1; i
>= 0; i
--)
3773 gen_fun (code
, ext_ashl_lshr_seq
[value
].amount
[i
], operands
[0]);
3776 /* Output RTL for an arithmetic right shift.
3777 ??? Rewrite to use super-optimizer sequences. */
3779 expand_ashiftrt (rtx
*operands
)
3785 if (TARGET_DYNSHIFT
)
3787 if (!CONST_INT_P (operands
[2]))
3789 rtx count
= copy_to_mode_reg (SImode
, operands
[2]);
3790 emit_insn (gen_negsi2 (count
, count
));
3791 emit_insn (gen_ashrsi3_d (operands
[0], operands
[1], count
));
3794 else if (ashiftrt_insns
[INTVAL (operands
[2]) & 31]
3795 > 1 + SH_DYNAMIC_SHIFT_COST
)
3798 = force_reg (SImode
, GEN_INT (- (INTVAL (operands
[2]) & 31)));
3799 emit_insn (gen_ashrsi3_d (operands
[0], operands
[1], count
));
3803 if (!CONST_INT_P (operands
[2]))
3806 value
= INTVAL (operands
[2]) & 31;
3810 /* If we are called from abs expansion, arrange things so that we
3811 we can use a single MT instruction that doesn't clobber the source,
3812 if LICM can hoist out the load of the constant zero. */
3813 if (currently_expanding_to_rtl
)
3815 emit_insn (gen_cmpgtsi_t (force_reg (SImode
, CONST0_RTX (SImode
)),
3817 emit_insn (gen_mov_neg_si_t (operands
[0], get_t_reg_rtx ()));
3820 emit_insn (gen_ashrsi2_31 (operands
[0], operands
[1]));
3823 else if (value
>= 16 && value
<= 19)
3825 wrk
= gen_reg_rtx (SImode
);
3826 emit_insn (gen_ashrsi2_16 (wrk
, operands
[1]));
3829 gen_ashift (ASHIFTRT
, 1, wrk
);
3830 emit_move_insn (operands
[0], wrk
);
3833 /* Expand a short sequence inline, longer call a magic routine. */
3834 else if (value
<= 5)
3836 wrk
= gen_reg_rtx (SImode
);
3837 emit_move_insn (wrk
, operands
[1]);
3839 gen_ashift (ASHIFTRT
, 1, wrk
);
3840 emit_move_insn (operands
[0], wrk
);
3844 wrk
= gen_reg_rtx (Pmode
);
3846 /* Load the value into an arg reg and call a helper. */
3847 emit_move_insn (gen_rtx_REG (SImode
, 4), operands
[1]);
3848 sprintf (func
, "__ashiftrt_r4_%d", value
);
3849 rtx lab
= function_symbol (wrk
, func
, SFUNC_STATIC
).lab
;
3850 emit_insn (gen_ashrsi3_n (GEN_INT (value
), wrk
, lab
));
3851 emit_move_insn (operands
[0], gen_rtx_REG (SImode
, 4));
3855 /* Try to find a good way to implement the combiner pattern
3856 [(set (match_operand:SI 0 "register_operand" "r")
3857 (and:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
3858 (match_operand:SI 2 "const_int_operand" "n"))
3859 (match_operand:SI 3 "const_int_operand" "n"))) .
3860 LEFT_RTX is operand 2 in the above pattern, and MASK_RTX is operand 3.
3861 return 0 for simple right / left or left/right shift combination.
3862 return 1 for a combination of shifts with zero_extend.
3863 return 2 for a combination of shifts with an AND that needs r0.
3864 return 3 for a combination of shifts with an AND that needs an extra
3865 scratch register, when the three highmost bits of the AND mask are clear.
3866 return 4 for a combination of shifts with an AND that needs an extra
3867 scratch register, when any of the three highmost bits of the AND mask
3869 If ATTRP is set, store an initial right shift width in ATTRP[0],
3870 and the instruction length in ATTRP[1] . These values are not valid
3872 When ATTRP is set and returning 1, ATTRP[2] gets set to the index into
3873 shift_amounts for the last shift value that is to be used before the
3876 shl_and_kind (rtx left_rtx
, rtx mask_rtx
, int *attrp
)
3878 unsigned HOST_WIDE_INT mask
, lsb
, mask2
, lsb2
;
3879 int left
= INTVAL (left_rtx
), right
;
3881 int cost
, best_cost
= 10000;
3882 int best_right
= 0, best_len
= 0;
3886 if (left
< 0 || left
> 31)
3888 if (CONST_INT_P (mask_rtx
))
3889 mask
= (unsigned HOST_WIDE_INT
) INTVAL (mask_rtx
) >> left
;
3891 mask
= (unsigned HOST_WIDE_INT
) GET_MODE_MASK (SImode
) >> left
;
3892 /* Can this be expressed as a right shift / left shift pair? */
3893 lsb
= ((mask
^ (mask
- 1)) >> 1) + 1;
3894 right
= exact_log2 (lsb
);
3895 mask2
= ~(mask
+ lsb
- 1);
3896 lsb2
= ((mask2
^ (mask2
- 1)) >> 1) + 1;
3897 /* mask has no zeroes but trailing zeroes <==> ! mask2 */
3899 best_cost
= ashl_lshr_seq
[right
].insn_count
3900 + ashl_lshr_seq
[right
+ left
].insn_count
;
3901 /* mask has no trailing zeroes <==> ! right */
3902 else if (! right
&& mask2
== ~(lsb2
- 1))
3904 int late_right
= exact_log2 (lsb2
);
3905 best_cost
= ashl_lshr_seq
[left
+ late_right
].insn_count
3906 + ashl_lshr_seq
[late_right
].insn_count
;
3908 /* Try to use zero extend. */
3909 if (mask2
== ~(lsb2
- 1))
3913 for (width
= 8; width
<= 16; width
+= 8)
3915 /* Can we zero-extend right away? */
3916 if (lsb2
== (unsigned HOST_WIDE_INT
) 1 << width
)
3918 cost
= 1 + ext_ashl_lshr_seq
[right
].insn_count
3919 + ext_ashl_lshr_seq
[left
+ right
].insn_count
;
3920 if (cost
< best_cost
)
3931 /* ??? Could try to put zero extend into initial right shift,
3932 or even shift a bit left before the right shift. */
3933 /* Determine value of first part of left shift, to get to the
3934 zero extend cut-off point. */
3935 first
= width
- exact_log2 (lsb2
) + right
;
3936 if (first
>= 0 && right
+ left
- first
>= 0)
3938 cost
= ext_ashl_lshr_seq
[right
].insn_count
3939 + ext_ashl_lshr_seq
[first
].insn_count
+ 1
3940 + ext_ashl_lshr_seq
[right
+ left
- first
].insn_count
;
3942 if (cost
< best_cost
)
3954 /* Try to use r0 AND pattern */
3955 for (i
= 0; i
<= 2; i
++)
3959 if (! CONST_OK_FOR_K08 (mask
>> i
))
3961 cost
= (i
!= 0) + 2 + ext_ashl_lshr_seq
[left
+ i
].insn_count
;
3962 if (cost
< best_cost
)
3967 best_len
= cost
- 1;
3970 /* Try to use a scratch register to hold the AND operand. */
3971 can_ext
= ((mask
<< left
) & ((unsigned HOST_WIDE_INT
) 3 << 30)) == 0;
3972 for (i
= 0; i
<= 2; i
++)
3976 cost
= (i
!= 0) + (CONST_OK_FOR_I08 (mask
>> i
) ? 2 : 3)
3979 : ashl_lshr_seq
)[left
+ i
].insn_count
;
3980 if (cost
< best_cost
)
3985 best_len
= cost
- 1 - ! CONST_OK_FOR_I08 (mask
>> i
);
3991 attrp
[0] = best_right
;
3992 attrp
[1] = best_len
;
3997 /* This is used in length attributes of the unnamed instructions
3998 corresponding to shl_and_kind return values of 1 and 2. */
4000 shl_and_length (rtx insn
)
4002 rtx set_src
, left_rtx
, mask_rtx
;
4005 set_src
= SET_SRC (XVECEXP (PATTERN (insn
), 0, 0));
4006 left_rtx
= XEXP (XEXP (set_src
, 0), 1);
4007 mask_rtx
= XEXP (set_src
, 1);
4008 shl_and_kind (left_rtx
, mask_rtx
, attributes
);
4009 return attributes
[1];
4012 /* This is used in length attribute of the and_shl_scratch instruction. */
4014 shl_and_scr_length (rtx insn
)
4016 rtx set_src
= SET_SRC (XVECEXP (PATTERN (insn
), 0, 0));
4017 int len
= ashl_lshr_seq
[INTVAL (XEXP (set_src
, 1)) & 31].insn_count
;
4018 rtx op
= XEXP (set_src
, 0);
4019 len
+= ashl_lshr_seq
[INTVAL (XEXP (op
, 1)) & 31].insn_count
+ 1;
4020 op
= XEXP (XEXP (op
, 0), 0);
4021 return len
+ ashl_lshr_seq
[INTVAL (XEXP (op
, 1)) & 31].insn_count
;
4024 /* Generate rtl for instructions for which shl_and_kind advised a particular
4025 method of generating them, i.e. returned zero. */
4027 gen_shl_and (rtx dest
, rtx left_rtx
, rtx mask_rtx
, rtx source
)
4030 unsigned HOST_WIDE_INT mask
;
4031 int kind
= shl_and_kind (left_rtx
, mask_rtx
, attributes
);
4032 int right
, total_shift
;
4033 void (*shift_gen_fun
) (int, rtx
*) = gen_shifty_hi_op
;
4035 right
= attributes
[0];
4036 total_shift
= INTVAL (left_rtx
) + right
;
4037 mask
= (unsigned HOST_WIDE_INT
) INTVAL (mask_rtx
) >> total_shift
;
4044 int first
= attributes
[2];
4049 emit_insn ((mask
<< right
) <= 0xff
4050 ? gen_zero_extendqisi2 (dest
,
4051 gen_lowpart (QImode
, source
))
4052 : gen_zero_extendhisi2 (dest
,
4053 gen_lowpart (HImode
, source
)));
4057 emit_insn (gen_movsi (dest
, source
));
4061 operands
[2] = GEN_INT (right
);
4062 gen_shifty_hi_op (LSHIFTRT
, operands
);
4066 operands
[2] = GEN_INT (first
);
4067 gen_shifty_hi_op (ASHIFT
, operands
);
4068 total_shift
-= first
;
4072 emit_insn (mask
<= 0xff
4073 ? gen_zero_extendqisi2 (dest
, gen_lowpart (QImode
, dest
))
4074 : gen_zero_extendhisi2 (dest
, gen_lowpart (HImode
, dest
)));
4075 if (total_shift
> 0)
4077 operands
[2] = GEN_INT (total_shift
);
4078 gen_shifty_hi_op (ASHIFT
, operands
);
4083 shift_gen_fun
= gen_shifty_op
;
4085 /* If the topmost bit that matters is set, set the topmost bits
4086 that don't matter. This way, we might be able to get a shorter
4088 if (mask
& ((HOST_WIDE_INT
) 1 << (31 - total_shift
)))
4089 mask
|= (HOST_WIDE_INT
) ((HOST_WIDE_INT_M1U
) << (31 - total_shift
));
4091 /* Don't expand fine-grained when combining, because that will
4092 make the pattern fail. */
4093 if (currently_expanding_to_rtl
4094 || reload_in_progress
|| reload_completed
)
4098 /* Cases 3 and 4 should be handled by this split
4099 only while combining */
4100 gcc_assert (kind
<= 2);
4103 emit_insn (gen_lshrsi3 (dest
, source
, GEN_INT (right
)));
4106 emit_insn (gen_andsi3 (dest
, source
, GEN_INT (mask
)));
4111 operands
[2] = GEN_INT (total_shift
);
4112 shift_gen_fun (ASHIFT
, operands
);
4119 if (kind
!= 4 && total_shift
< 16)
4121 neg
= -ext_ashl_lshr_seq
[total_shift
].amount
[1];
4123 neg
-= ext_ashl_lshr_seq
[total_shift
].amount
[2];
4127 emit_insn (gen_and_shl_scratch (dest
, source
,
4130 GEN_INT (total_shift
+ neg
),
4132 emit_insn (gen_movsi (dest
, dest
));
4139 /* Try to find a good way to implement the combiner pattern
4140 [(set (match_operand:SI 0 "register_operand" "=r")
4141 (sign_extract:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
4142 (match_operand:SI 2 "const_int_operand" "n")
4143 (match_operand:SI 3 "const_int_operand" "n")
4145 (clobber (reg:SI T_REG))]
4146 LEFT_RTX is operand 2 in the above pattern, and SIZE_RTX is operand 3.
4147 return 0 for simple left / right shift combination.
4148 return 1 for left shift / 8 bit sign extend / left shift.
4149 return 2 for left shift / 16 bit sign extend / left shift.
4150 return 3 for left shift / 8 bit sign extend / shift / sign extend.
4151 return 4 for left shift / 16 bit sign extend / shift / sign extend.
4152 return 5 for left shift / 16 bit sign extend / right shift
4153 return 6 for < 8 bit sign extend / left shift.
4154 return 7 for < 8 bit sign extend / left shift / single right shift.
4155 If COSTP is nonzero, assign the calculated cost to *COSTP. */
4157 shl_sext_kind (rtx left_rtx
, rtx size_rtx
, int *costp
)
4159 int left
, size
, insize
, ext
;
4160 int cost
= 0, best_cost
;
4163 left
= INTVAL (left_rtx
);
4164 size
= INTVAL (size_rtx
);
4165 insize
= size
- left
;
4166 gcc_assert (insize
> 0);
4167 /* Default to left / right shift. */
4169 best_cost
= ashl_lshr_seq
[32 - insize
].insn_count
4170 + ashl_lshr_seq
[32 - size
].insn_count
;
4173 /* 16 bit shift / sign extend / 16 bit shift */
4174 cost
= ashl_lshr_seq
[16 - insize
].insn_count
+ 1
4175 + ashl_lshr_seq
[16 - size
].insn_count
;
4176 /* If ashiftrt_insns[16 - size] is 8, this choice will be overridden
4177 below, by alternative 3 or something even better. */
4178 if (cost
< best_cost
)
4184 /* Try a plain sign extend between two shifts. */
4185 for (ext
= 16; ext
>= insize
; ext
-= 8)
4189 cost
= ext_ashl_lshr_seq
[ext
- insize
].insn_count
+ 1
4190 + ashl_lshr_seq
[size
- ext
].insn_count
;
4191 if (cost
< best_cost
)
4193 kind
= ext
/ (unsigned) 8;
4197 /* Check if we can do a sloppy shift with a final signed shift
4198 restoring the sign. */
4199 if (EXT_SHIFT_SIGNED (size
- ext
))
4200 cost
= ext_ashl_lshr_seq
[ext
- insize
].insn_count
4201 + ext_ashl_lshr_seq
[size
- ext
].insn_count
+ 1;
4202 /* If not, maybe it's still cheaper to do the second shift sloppy,
4203 and do a final sign extend? */
4204 else if (size
<= 16)
4205 cost
= ext_ashl_lshr_seq
[ext
- insize
].insn_count
+ 1
4206 + ext_ashl_lshr_seq
[size
> ext
? size
- ext
: ext
- size
].insn_count
4210 if (cost
< best_cost
)
4212 kind
= ext
/ (unsigned) 8 + 2;
4216 /* Check if we can sign extend in r0 */
4219 cost
= 3 + ashl_lshr_seq
[left
].insn_count
;
4220 if (cost
< best_cost
)
4225 /* Try the same with a final signed shift. */
4228 cost
= 3 + ext_ashl_lshr_seq
[left
+ 1].insn_count
+ 1;
4229 if (cost
< best_cost
)
4236 if (TARGET_DYNSHIFT
)
4238 /* Try to use a dynamic shift. */
4239 cost
= ashl_lshr_seq
[32 - insize
].insn_count
+ 1 + SH_DYNAMIC_SHIFT_COST
;
4240 if (cost
< best_cost
)
4251 /* Function to be used in the length attribute of the instructions
4252 implementing this pattern. */
4254 shl_sext_length (rtx insn
)
4256 rtx set_src
, left_rtx
, size_rtx
;
4259 set_src
= SET_SRC (XVECEXP (PATTERN (insn
), 0, 0));
4260 left_rtx
= XEXP (XEXP (set_src
, 0), 1);
4261 size_rtx
= XEXP (set_src
, 1);
4262 shl_sext_kind (left_rtx
, size_rtx
, &cost
);
4266 /* Generate rtl for this pattern */
4268 gen_shl_sext (rtx dest
, rtx left_rtx
, rtx size_rtx
, rtx source
)
4271 int left
, size
, insize
, cost
;
4274 kind
= shl_sext_kind (left_rtx
, size_rtx
, &cost
);
4275 left
= INTVAL (left_rtx
);
4276 size
= INTVAL (size_rtx
);
4277 insize
= size
- left
;
4285 int ext
= kind
& 1 ? 8 : 16;
4286 int shift2
= size
- ext
;
4288 /* Don't expand fine-grained when combining, because that will
4289 make the pattern fail. */
4290 if (! currently_expanding_to_rtl
4291 && ! reload_in_progress
&& ! reload_completed
)
4293 emit_insn (gen_shl_sext_ext (dest
, source
, left_rtx
, size_rtx
));
4294 emit_insn (gen_movsi (dest
, source
));
4298 emit_insn (gen_movsi (dest
, source
));
4302 operands
[2] = GEN_INT (ext
- insize
);
4303 gen_shifty_hi_op (ASHIFT
, operands
);
4306 ? gen_extendqisi2 (dest
, gen_lowpart (QImode
, dest
))
4307 : gen_extendhisi2 (dest
, gen_lowpart (HImode
, dest
)));
4312 operands
[2] = GEN_INT (shift2
);
4313 gen_shifty_op (ASHIFT
, operands
);
4320 if (EXT_SHIFT_SIGNED (shift2
))
4322 operands
[2] = GEN_INT (shift2
+ 1);
4323 gen_shifty_op (ASHIFT
, operands
);
4324 operands
[2] = const1_rtx
;
4325 gen_shifty_op (ASHIFTRT
, operands
);
4328 operands
[2] = GEN_INT (shift2
);
4329 gen_shifty_hi_op (ASHIFT
, operands
);
4333 operands
[2] = GEN_INT (-shift2
);
4334 gen_shifty_hi_op (LSHIFTRT
, operands
);
4336 emit_insn (size
<= 8
4337 ? gen_extendqisi2 (dest
, gen_lowpart (QImode
, dest
))
4338 : gen_extendhisi2 (dest
, gen_lowpart (HImode
, dest
)));
4345 if (! currently_expanding_to_rtl
4346 && ! reload_in_progress
&& ! reload_completed
)
4347 emit_insn (gen_shl_sext_ext (dest
, source
, left_rtx
, size_rtx
));
4351 operands
[2] = GEN_INT (16 - insize
);
4352 gen_shifty_hi_op (ASHIFT
, operands
);
4353 emit_insn (gen_extendhisi2 (dest
, gen_lowpart (HImode
, dest
)));
4355 /* Don't use gen_ashrsi3 because it generates new pseudos. */
4357 gen_ashift (ASHIFTRT
, 1, dest
);
4362 /* Don't expand fine-grained when combining, because that will
4363 make the pattern fail. */
4364 if (! currently_expanding_to_rtl
4365 && ! reload_in_progress
&& ! reload_completed
)
4367 emit_insn (gen_shl_sext_ext (dest
, source
, left_rtx
, size_rtx
));
4368 emit_insn (gen_movsi (dest
, source
));
4371 emit_insn (gen_andsi3 (dest
, source
, GEN_INT ((1 << insize
) - 1)));
4372 emit_insn (gen_xorsi3 (dest
, dest
, GEN_INT (1 << (insize
- 1))));
4373 emit_insn (gen_addsi3 (dest
, dest
, GEN_INT (HOST_WIDE_INT_M1U
<< (insize
- 1))));
4375 operands
[2] = kind
== 7 ? GEN_INT (left
+ 1) : left_rtx
;
4376 gen_shifty_op (ASHIFT
, operands
);
4378 emit_insn (gen_ashrsi3_k (dest
, dest
, const1_rtx
));
4386 typedef struct label_ref_list_d
4388 rtx_code_label
*label
;
4389 struct label_ref_list_d
*next
;
4390 } *label_ref_list_t
;
4392 static object_allocator
<label_ref_list_d
> label_ref_list_d_pool
4393 ("label references list");
4395 /* The SH cannot load a large constant into a register, constants have to
4396 come from a pc relative load. The reference of a pc relative load
4397 instruction must be less than 1k in front of the instruction. This
4398 means that we often have to dump a constant inside a function, and
4399 generate code to branch around it.
4401 It is important to minimize this, since the branches will slow things
4402 down and make things bigger.
4404 Worst case code looks like:
4422 We fix this by performing a scan before scheduling, which notices which
4423 instructions need to have their operands fetched from the constant table
4424 and builds the table.
4428 scan, find an instruction which needs a pcrel move. Look forward, find the
4429 last barrier which is within MAX_COUNT bytes of the requirement.
4430 If there isn't one, make one. Process all the instructions between
4431 the find and the barrier.
4433 In the above example, we can tell that L3 is within 1k of L1, so
4434 the first move can be shrunk from the 3 insn+constant sequence into
4435 just 1 insn, and the constant moved to L3 to make:
4446 Then the second move becomes the target for the shortening process. */
4450 rtx value
; /* Value in table. */
4451 rtx_code_label
*label
; /* Label of value. */
4452 label_ref_list_t wend
; /* End of window. */
4453 machine_mode mode
; /* Mode of value. */
4455 /* True if this constant is accessed as part of a post-increment
4456 sequence. Note that HImode constants are never accessed in this way. */
4457 bool part_of_sequence_p
;
4460 /* The maximum number of constants that can fit into one pool, since
4461 constants in the range 0..510 are at least 2 bytes long, and in the
4462 range from there to 1018 at least 4 bytes. */
4464 #define MAX_POOL_SIZE 372
4465 static pool_node pool_vector
[MAX_POOL_SIZE
];
4466 static int pool_size
;
4467 static rtx_code_label
*pool_window_label
;
4468 static int pool_window_last
;
4470 static int max_labelno_before_reorg
;
4472 /* ??? If we need a constant in HImode which is the truncated value of a
4473 constant we need in SImode, we could combine the two entries thus saving
4474 two bytes. Is this common enough to be worth the effort of implementing
4477 /* ??? This stuff should be done at the same time that we shorten branches.
4478 As it is now, we must assume that all branches are the maximum size, and
4479 this causes us to almost always output constant pools sooner than
4482 /* Add a constant to the pool and return its label. */
4483 static rtx_code_label
*
4484 add_constant (rtx x
, machine_mode mode
, rtx last_value
)
4487 rtx_code_label
*lab
, *new_rtx
;
4488 label_ref_list_t ref
, newref
;
4490 /* First see if we've already got it. */
4491 for (i
= 0; i
< pool_size
; i
++)
4493 if (x
->code
== pool_vector
[i
].value
->code
4494 && mode
== pool_vector
[i
].mode
)
4496 if (x
->code
== CODE_LABEL
)
4498 if (XINT (x
, 3) != XINT (pool_vector
[i
].value
, 3))
4501 if (rtx_equal_p (x
, pool_vector
[i
].value
))
4506 || ! rtx_equal_p (last_value
, pool_vector
[i
-1].value
))
4508 new_rtx
= gen_label_rtx ();
4509 LABEL_REFS (new_rtx
) = pool_vector
[i
].label
;
4510 pool_vector
[i
].label
= lab
= new_rtx
;
4512 if (lab
&& pool_window_label
)
4514 newref
= label_ref_list_d_pool
.allocate ();
4515 newref
->label
= pool_window_label
;
4516 ref
= pool_vector
[pool_window_last
].wend
;
4518 pool_vector
[pool_window_last
].wend
= newref
;
4521 pool_window_label
= new_rtx
;
4522 pool_window_last
= i
;
4528 /* Need a new one. */
4529 pool_vector
[pool_size
].value
= x
;
4530 if (last_value
&& rtx_equal_p (last_value
, pool_vector
[pool_size
- 1].value
))
4533 pool_vector
[pool_size
- 1].part_of_sequence_p
= true;
4536 lab
= gen_label_rtx ();
4537 pool_vector
[pool_size
].mode
= mode
;
4538 pool_vector
[pool_size
].label
= lab
;
4539 pool_vector
[pool_size
].wend
= NULL
;
4540 pool_vector
[pool_size
].part_of_sequence_p
= (lab
== 0);
4541 if (lab
&& pool_window_label
)
4543 newref
= label_ref_list_d_pool
.allocate ();
4544 newref
->label
= pool_window_label
;
4545 ref
= pool_vector
[pool_window_last
].wend
;
4547 pool_vector
[pool_window_last
].wend
= newref
;
4550 pool_window_label
= lab
;
4551 pool_window_last
= pool_size
;
4556 /* Output the literal table. START, if nonzero, is the first instruction
4557 this table is needed for, and also indicates that there is at least one
4558 casesi_worker_2 instruction; We have to emit the operand3 labels from
4559 these insns at a 4-byte aligned position. BARRIER is the barrier
4560 after which we are to place the table. */
4562 dump_table (rtx_insn
*start
, rtx_insn
*barrier
)
4564 rtx_insn
*scan
= barrier
;
4566 bool need_align
= true;
4568 label_ref_list_t ref
;
4569 bool have_df
= false;
4571 /* Do two passes, first time dump out the HI sized constants. */
4573 for (i
= 0; i
< pool_size
; i
++)
4575 pool_node
*p
= &pool_vector
[i
];
4577 if (p
->mode
== HImode
)
4581 scan
= emit_insn_after (gen_align_2 (), scan
);
4584 for (lab
= p
->label
; lab
; lab
= LABEL_REFS (lab
))
4585 scan
= emit_label_after (lab
, scan
);
4586 scan
= emit_insn_after (gen_consttable_2 (p
->value
, const0_rtx
),
4588 for (ref
= p
->wend
; ref
; ref
= ref
->next
)
4591 scan
= emit_insn_after (gen_consttable_window_end (lab
), scan
);
4594 else if (p
->mode
== DFmode
)
4602 scan
= emit_insn_after (gen_align_4 (), scan
);
4604 for (; start
!= barrier
; start
= NEXT_INSN (start
))
4605 if (NONJUMP_INSN_P (start
)
4606 && recog_memoized (start
) == CODE_FOR_casesi_worker_2
)
4608 rtx src
= SET_SRC (XVECEXP (PATTERN (start
), 0, 0));
4609 rtx lab
= XEXP (XVECEXP (src
, 0, 3), 0);
4611 scan
= emit_label_after (lab
, scan
);
4614 if (TARGET_FMOVD
&& TARGET_ALIGN_DOUBLE
&& have_df
)
4616 rtx_insn
*align_insn
= NULL
;
4618 scan
= emit_label_after (gen_label_rtx (), scan
);
4619 scan
= emit_insn_after (gen_align_log (GEN_INT (3)), scan
);
4622 for (i
= 0; i
< pool_size
; i
++)
4624 pool_node
*p
= &pool_vector
[i
];
4632 if (align_insn
&& !p
->part_of_sequence_p
)
4634 for (lab
= p
->label
; lab
; lab
= LABEL_REFS (lab
))
4635 emit_label_before (lab
, align_insn
);
4636 emit_insn_before (gen_consttable_4 (p
->value
, const0_rtx
),
4638 for (ref
= p
->wend
; ref
; ref
= ref
->next
)
4641 emit_insn_before (gen_consttable_window_end (lab
),
4644 delete_insn (align_insn
);
4650 for (lab
= p
->label
; lab
; lab
= LABEL_REFS (lab
))
4651 scan
= emit_label_after (lab
, scan
);
4652 scan
= emit_insn_after (gen_consttable_4 (p
->value
,
4654 need_align
= ! need_align
;
4660 scan
= emit_insn_after (gen_align_log (GEN_INT (3)), scan
);
4665 for (lab
= p
->label
; lab
; lab
= LABEL_REFS (lab
))
4666 scan
= emit_label_after (lab
, scan
);
4667 scan
= emit_insn_after (gen_consttable_8 (p
->value
, const0_rtx
),
4674 if (p
->mode
!= HImode
)
4676 for (ref
= p
->wend
; ref
; ref
= ref
->next
)
4679 scan
= emit_insn_after (gen_consttable_window_end (lab
),
4688 for (i
= 0; i
< pool_size
; i
++)
4690 pool_node
*p
= &pool_vector
[i
];
4701 scan
= emit_label_after (gen_label_rtx (), scan
);
4702 scan
= emit_insn_after (gen_align_4 (), scan
);
4704 for (lab
= p
->label
; lab
; lab
= LABEL_REFS (lab
))
4705 scan
= emit_label_after (lab
, scan
);
4706 scan
= emit_insn_after (gen_consttable_4 (p
->value
, const0_rtx
),
4714 scan
= emit_label_after (gen_label_rtx (), scan
);
4715 scan
= emit_insn_after (gen_align_4 (), scan
);
4717 for (lab
= p
->label
; lab
; lab
= LABEL_REFS (lab
))
4718 scan
= emit_label_after (lab
, scan
);
4719 scan
= emit_insn_after (gen_consttable_8 (p
->value
, const0_rtx
),
4726 if (p
->mode
!= HImode
)
4728 for (ref
= p
->wend
; ref
; ref
= ref
->next
)
4731 scan
= emit_insn_after (gen_consttable_window_end (lab
), scan
);
4736 scan
= emit_insn_after (gen_consttable_end (), scan
);
4737 scan
= emit_barrier_after (scan
);
4739 pool_window_label
= NULL
;
4740 pool_window_last
= 0;
4743 #define MOVA_LABELREF(mova) XVECEXP (SET_SRC (PATTERN (mova)), 0, 0)
4745 /* Nonzero if the insn is a move instruction which needs to be fixed. */
4747 /* ??? For a DImode/DFmode moves, we don't need to fix it if each half of the
4748 CONST_DOUBLE input value is CONST_OK_FOR_I08. For a SFmode move, we don't
4749 need to fix it if the input value is CONST_OK_FOR_I08. */
4751 broken_move (rtx_insn
*insn
)
4753 if (NONJUMP_INSN_P (insn
))
4755 rtx pat
= PATTERN (insn
);
4756 if (GET_CODE (pat
) == PARALLEL
)
4757 pat
= XVECEXP (pat
, 0, 0);
4758 if (GET_CODE (pat
) == SET
4759 /* We can load any 8-bit value if we don't care what the high
4760 order bits end up as. */
4761 && GET_MODE (SET_DEST (pat
)) != QImode
4762 && (CONSTANT_P (SET_SRC (pat
))
4763 || (GET_CODE (SET_SRC (pat
)) == UNSPEC_VOLATILE
4764 && XINT (SET_SRC (pat
), 1) == UNSPECV_SP_SWITCH_B
)
4765 /* Match mova_const. */
4766 || (GET_CODE (SET_SRC (pat
)) == UNSPEC
4767 && XINT (SET_SRC (pat
), 1) == UNSPEC_MOVA
4768 && GET_CODE (XVECEXP (SET_SRC (pat
), 0, 0)) == CONST
))
4770 && GET_CODE (SET_SRC (pat
)) == CONST_DOUBLE
4771 && (fp_zero_operand (SET_SRC (pat
))
4772 || fp_one_operand (SET_SRC (pat
)))
4773 /* In general we don't know the current setting of fpscr, so
4775 There is an exception if this was a register-register move
4776 before reload - and hence it was ascertained that we have
4777 single precision setting - and in a post-reload optimization
4778 we changed this to do a constant load. In that case
4779 we don't have an r0 clobber, hence we must use fldi. */
4781 || (GET_CODE (XEXP (XVECEXP (PATTERN (insn
), 0, 2), 0))
4783 && REG_P (SET_DEST (pat
))
4784 && FP_REGISTER_P (REGNO (SET_DEST (pat
))))
4786 && GET_MODE (SET_DEST (pat
)) == SImode
4787 && (satisfies_constraint_I20 (SET_SRC (pat
))
4788 || satisfies_constraint_I28 (SET_SRC (pat
))))
4789 && ! satisfies_constraint_I08 (SET_SRC (pat
)))
4796 /* Return true if the specified insn is a mova insn. */
4798 mova_p (rtx_insn
*insn
)
4800 return (NONJUMP_INSN_P (insn
)
4801 && GET_CODE (PATTERN (insn
)) == SET
4802 && GET_CODE (SET_SRC (PATTERN (insn
))) == UNSPEC
4803 && XINT (SET_SRC (PATTERN (insn
)), 1) == UNSPEC_MOVA
4804 /* Don't match mova_const. */
4805 && GET_CODE (MOVA_LABELREF (insn
)) == LABEL_REF
);
4808 /* Fix up a mova from a switch that went out of range. */
4810 fixup_mova (rtx_insn
*mova
)
4812 PUT_MODE (XEXP (MOVA_LABELREF (mova
), 0), QImode
);
4815 SET_SRC (PATTERN (mova
)) = MOVA_LABELREF (mova
);
4816 INSN_CODE (mova
) = -1;
4820 rtx_insn
*worker
= mova
;
4821 rtx_code_label
*lab
= gen_label_rtx ();
4822 rtx wpat
, wpat0
, wpat1
, wsrc
, target
, base
, diff
;
4826 worker
= NEXT_INSN (worker
);
4828 && !LABEL_P (worker
)
4829 && !JUMP_P (worker
));
4830 } while (NOTE_P (worker
)
4831 || recog_memoized (worker
) != CODE_FOR_casesi_worker_1
);
4832 wpat
= PATTERN (worker
);
4833 wpat0
= XVECEXP (wpat
, 0, 0);
4834 wpat1
= XVECEXP (wpat
, 0, 1);
4835 wsrc
= SET_SRC (wpat0
);
4836 PATTERN (worker
) = (gen_casesi_worker_2
4837 (SET_DEST (wpat0
), XVECEXP (wsrc
, 0, 1),
4838 XEXP (XVECEXP (wsrc
, 0, 2), 0), lab
,
4840 INSN_CODE (worker
) = -1;
4841 target
= XVECEXP (SET_SRC (PATTERN (mova
)), 0, 0);
4842 base
= gen_rtx_LABEL_REF (Pmode
, lab
);
4843 diff
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, target
, base
), UNSPEC_SYMOFF
);
4844 SET_SRC (PATTERN (mova
)) = gen_rtx_CONST (Pmode
, diff
);
4845 INSN_CODE (mova
) = -1;
4849 /* NEW_MOVA is a mova we've just encountered while scanning forward. Update
4850 *num_mova, and check if the new mova is not nested within the first one.
4851 return 0 if *first_mova was replaced, 1 if new_mova was replaced,
4852 2 if new_mova has been assigned to *first_mova, -1 otherwise.. */
4854 untangle_mova (int *num_mova
, rtx_insn
**first_mova
, rtx_insn
*new_mova
)
4856 int n_addr
= 0; /* Initialization to shut up spurious warning. */
4857 int f_target
, n_target
= 0; /* Likewise. */
4861 /* If NEW_MOVA has no address yet, it will be handled later. */
4862 if (INSN_ADDRESSES_SIZE() <= (unsigned) INSN_UID (new_mova
))
4865 n_addr
= INSN_ADDRESSES (INSN_UID (new_mova
));
4866 n_target
= INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (new_mova
), 0)));
4867 if (n_addr
> n_target
|| n_addr
+ 1022 < n_target
)
4869 /* Change the mova into a load.
4870 broken_move will then return true for it. */
4871 fixup_mova (new_mova
);
4877 *first_mova
= new_mova
;
4882 = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (*first_mova
), 0))))
4887 if (f_target
- INSN_ADDRESSES (INSN_UID (*first_mova
))
4888 > n_target
- n_addr
)
4890 fixup_mova (*first_mova
);
4895 fixup_mova (new_mova
);
4900 /* Find the last barrier from insn FROM which is close enough to hold the
4901 constant pool. If we can't find one, then create one near the end of
4904 find_barrier (int num_mova
, rtx_insn
*mova
, rtx_insn
*from
)
4912 int leading_mova
= num_mova
;
4913 rtx_insn
*barrier_before_mova
= NULL
;
4914 rtx_insn
*found_barrier
= NULL
;
4915 rtx_insn
*good_barrier
= NULL
;
4918 rtx_insn
*orig
= from
;
4919 rtx_insn
*last_got
= NULL
;
4920 rtx_insn
*last_symoff
= NULL
;
4922 /* For HImode: range is 510, add 4 because pc counts from address of
4923 second instruction after this one, subtract 2 for the jump instruction
4924 that we may need to emit before the table, subtract 2 for the instruction
4925 that fills the jump delay slot (in very rare cases, reorg will take an
4926 instruction from after the constant pool or will leave the delay slot
4927 empty). This gives 510.
4928 For SImode: range is 1020, add 4 because pc counts from address of
4929 second instruction after this one, subtract 2 in case pc is 2 byte
4930 aligned, subtract 2 for the jump instruction that we may need to emit
4931 before the table, subtract 2 for the instruction that fills the jump
4932 delay slot. This gives 1018. */
4934 /* The branch will always be shortened now that the reference address for
4935 forward branches is the successor address, thus we need no longer make
4936 adjustments to the [sh]i_limit for -O0. */
4941 while (from
&& count_si
< si_limit
&& count_hi
< hi_limit
)
4943 int inc
= get_attr_length (from
);
4946 /* If this is a label that existed at the time of the compute_alignments
4947 call, determine the alignment. N.B. When find_barrier recurses for
4948 an out-of-reach mova, we might see labels at the start of previously
4949 inserted constant tables. */
4951 && CODE_LABEL_NUMBER (from
) <= max_labelno_before_reorg
)
4954 new_align
= 1 << label_to_alignment (from
);
4955 else if (BARRIER_P (prev_nonnote_insn (from
)))
4956 new_align
= 1 << barrier_align (from
);
4961 /* In case we are scanning a constant table because of recursion, check
4962 for explicit alignments. If the table is long, we might be forced
4963 to emit the new table in front of it; the length of the alignment
4964 might be the last straw. */
4965 else if (NONJUMP_INSN_P (from
)
4966 && GET_CODE (PATTERN (from
)) == UNSPEC_VOLATILE
4967 && XINT (PATTERN (from
), 1) == UNSPECV_ALIGN
)
4968 new_align
= INTVAL (XVECEXP (PATTERN (from
), 0, 0));
4969 /* When we find the end of a constant table, paste the new constant
4970 at the end. That is better than putting it in front because
4971 this way, we don't need extra alignment for adding a 4-byte-aligned
4972 mov(a) label to a 2/4 or 8/4 byte aligned table. */
4973 else if (NONJUMP_INSN_P (from
)
4974 && GET_CODE (PATTERN (from
)) == UNSPEC_VOLATILE
4975 && XINT (PATTERN (from
), 1) == UNSPECV_CONST_END
)
4978 if (BARRIER_P (from
))
4982 found_barrier
= from
;
4984 /* If we are at the end of the function, or in front of an alignment
4985 instruction, we need not insert an extra alignment. We prefer
4986 this kind of barrier. */
4987 if (barrier_align (from
) > 2)
4988 good_barrier
= from
;
4990 /* If we are at the end of a hot/cold block, dump the constants
4992 next
= NEXT_INSN (from
);
4995 && NOTE_KIND (next
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
4999 if (broken_move (from
))
5004 pat
= PATTERN (from
);
5005 if (GET_CODE (pat
) == PARALLEL
)
5006 pat
= XVECEXP (pat
, 0, 0);
5007 src
= SET_SRC (pat
);
5008 dst
= SET_DEST (pat
);
5009 mode
= GET_MODE (dst
);
5011 /* GOT pcrelat setting comes in pair of
5014 instructions. (plus add r0,r12).
5015 Remember if we see one without the other. */
5016 if (GET_CODE (src
) == UNSPEC
&& PIC_ADDR_P (XVECEXP (src
, 0, 0)))
5017 last_got
= last_got
? NULL
: from
;
5018 else if (PIC_ADDR_P (src
))
5019 last_got
= last_got
? NULL
: from
;
5021 /* We must explicitly check the mode, because sometimes the
5022 front end will generate code to load unsigned constants into
5023 HImode targets without properly sign extending them. */
5025 || (mode
== SImode
&& satisfies_constraint_I16 (src
)
5026 && REGNO (dst
) != FPUL_REG
))
5029 /* We put the short constants before the long constants, so
5030 we must count the length of short constants in the range
5031 for the long constants. */
5032 /* ??? This isn't optimal, but is easy to do. */
5037 /* We dump DF/DI constants before SF/SI ones, because
5038 the limit is the same, but the alignment requirements
5039 are higher. We may waste up to 4 additional bytes
5040 for alignment, and the DF/DI constant may have
5041 another SF/SI constant placed before it. */
5042 while (si_align
> 2 && found_si
+ si_align
- 2 > count_si
)
5044 if (found_si
> count_si
)
5045 count_si
= found_si
;
5046 found_si
+= GET_MODE_SIZE (mode
);
5048 si_limit
-= GET_MODE_SIZE (mode
);
5054 switch (untangle_mova (&num_mova
, &mova
, from
))
5059 rtx src
= SET_SRC (PATTERN (from
));
5060 if (GET_CODE (src
) == CONST
5061 && GET_CODE (XEXP (src
, 0)) == UNSPEC
5062 && XINT (XEXP (src
, 0), 1) == UNSPEC_SYMOFF
)
5066 case 0: return find_barrier (0, 0, mova
);
5071 = good_barrier
? good_barrier
: found_barrier
;
5075 if (found_si
> count_si
)
5076 count_si
= found_si
;
5078 else if (JUMP_TABLE_DATA_P (from
)
5079 && GET_CODE (PATTERN (from
)) == ADDR_DIFF_VEC
)
5081 if ((num_mova
> 1 && GET_MODE (prev_nonnote_insn (from
)) == VOIDmode
)
5083 && (prev_nonnote_insn (from
)
5084 == XEXP (MOVA_LABELREF (mova
), 0))))
5086 if (barrier_align (next_real_insn (from
)) == align_jumps_log
)
5088 /* We have just passed the barrier in front of the
5089 ADDR_DIFF_VEC, which is stored in found_barrier. Since
5090 the ADDR_DIFF_VEC is accessed as data, just like our pool
5091 constants, this is a good opportunity to accommodate what
5092 we have gathered so far.
5093 If we waited any longer, we could end up at a barrier in
5094 front of code, which gives worse cache usage for separated
5095 instruction / data caches. */
5096 good_barrier
= found_barrier
;
5101 rtx body
= PATTERN (from
);
5102 inc
= XVECLEN (body
, 1) * GET_MODE_SIZE (GET_MODE (body
));
5105 /* For the SH1, we generate alignments even after jumps-around-jumps. */
5106 else if (JUMP_P (from
)
5111 /* There is a possibility that a bf is transformed into a bf/s by the
5112 delay slot scheduler. */
5114 && get_attr_type (from
) == TYPE_CBRANCH
5115 && ! sequence_insn_p (from
))
5121 if (new_align
> si_align
)
5123 si_limit
-= (count_si
- 1) & (new_align
- si_align
);
5124 si_align
= new_align
;
5126 count_si
= (count_si
+ new_align
- 1) & -new_align
;
5131 if (new_align
> hi_align
)
5133 hi_limit
-= (count_hi
- 1) & (new_align
- hi_align
);
5134 hi_align
= new_align
;
5136 count_hi
= (count_hi
+ new_align
- 1) & -new_align
;
5138 from
= NEXT_INSN (from
);
5145 /* Try as we might, the leading mova is out of range. Change
5146 it into a load (which will become a pcload) and retry. */
5148 return find_barrier (0, 0, mova
);
5152 /* Insert the constant pool table before the mova instruction,
5153 to prevent the mova label reference from going out of range. */
5155 good_barrier
= found_barrier
= barrier_before_mova
;
5161 if (good_barrier
&& next_real_insn (found_barrier
))
5162 found_barrier
= good_barrier
;
5166 /* We didn't find a barrier in time to dump our stuff,
5167 so we'll make one. */
5168 rtx_code_label
*label
= gen_label_rtx ();
5170 /* Don't emit a constant table in the middle of insns for
5171 casesi_worker_2. This is a bit overkill but is enough
5172 because casesi_worker_2 wouldn't appear so frequently. */
5176 /* If we exceeded the range, then we must back up over the last
5177 instruction we looked at. Otherwise, we just need to undo the
5178 NEXT_INSN at the end of the loop. */
5179 if (PREV_INSN (from
) != orig
5180 && (count_hi
> hi_limit
|| count_si
> si_limit
))
5181 from
= PREV_INSN (PREV_INSN (from
));
5183 from
= PREV_INSN (from
);
5185 /* Don't emit a constant table int the middle of global pointer setting,
5186 since that that would move the addressing base GOT into another table.
5187 We need the first mov instruction before the _GLOBAL_OFFSET_TABLE_
5188 in the pool anyway, so just move up the whole constant pool.
5190 However, avoid doing so when the last single GOT mov is the starting
5191 insn itself. Going past above the start insn would create a negative
5192 offset, causing errors. */
5193 if (last_got
&& last_got
!= orig
)
5194 from
= PREV_INSN (last_got
);
5196 /* Don't insert the constant pool table at the position which
5197 may be the landing pad. */
5200 && find_reg_note (from
, REG_EH_REGION
, NULL_RTX
))
5201 from
= PREV_INSN (from
);
5203 /* Walk back to be just before any jump or label.
5204 Putting it before a label reduces the number of times the branch
5205 around the constant pool table will be hit. Putting it before
5206 a jump makes it more likely that the bra delay slot will be
5208 while (NOTE_P (from
) || JUMP_P (from
)
5210 from
= PREV_INSN (from
);
5212 /* Make sure we do not split between a call and its corresponding
5213 CALL_ARG_LOCATION note. */
5216 rtx_insn
*next
= NEXT_INSN (from
);
5217 if (next
&& NOTE_P (next
)
5218 && NOTE_KIND (next
) == NOTE_INSN_CALL_ARG_LOCATION
)
5222 from
= emit_jump_insn_after (gen_jump (label
), from
);
5223 JUMP_LABEL (from
) = label
;
5224 LABEL_NUSES (label
) = 1;
5225 found_barrier
= emit_barrier_after (from
);
5226 emit_label_after (label
, found_barrier
);
5229 return found_barrier
;
5232 /* If the instruction INSN is implemented by a special function, and we can
5233 positively find the register that is used to call the sfunc, and this
5234 register is not used anywhere else in this instruction - except as the
5235 destination of a set, return this register; else, return 0. */
5237 sfunc_uses_reg (rtx_insn
*insn
)
5240 rtx pattern
, part
, reg_part
, reg
;
5242 if (!NONJUMP_INSN_P (insn
))
5244 pattern
= PATTERN (insn
);
5245 if (GET_CODE (pattern
) != PARALLEL
|| get_attr_type (insn
) != TYPE_SFUNC
)
5248 for (reg_part
= NULL_RTX
, i
= XVECLEN (pattern
, 0) - 1; i
>= 1; i
--)
5250 part
= XVECEXP (pattern
, 0, i
);
5251 if (GET_CODE (part
) == USE
&& GET_MODE (XEXP (part
, 0)) == SImode
)
5256 reg
= XEXP (reg_part
, 0);
5257 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
5259 part
= XVECEXP (pattern
, 0, i
);
5260 if (part
== reg_part
|| GET_CODE (part
) == CLOBBER
)
5262 if (reg_mentioned_p (reg
, ((GET_CODE (part
) == SET
5263 && REG_P (SET_DEST (part
)))
5264 ? SET_SRC (part
) : part
)))
5270 /* See if the only way in which INSN uses REG is by calling it, or by
5271 setting it while calling it. Set *SET to a SET rtx if the register
5274 noncall_uses_reg (rtx reg
, rtx_insn
*insn
, rtx
*set
)
5280 reg2
= sfunc_uses_reg (insn
);
5281 if (reg2
&& REGNO (reg2
) == REGNO (reg
))
5283 pattern
= single_set (insn
);
5285 && REG_P (SET_DEST (pattern
))
5286 && REGNO (reg
) == REGNO (SET_DEST (pattern
)))
5292 /* We don't use rtx_equal_p because we don't care if the mode is
5294 pattern
= single_set (insn
);
5296 && REG_P (SET_DEST (pattern
))
5297 && REGNO (reg
) == REGNO (SET_DEST (pattern
)))
5303 par
= PATTERN (insn
);
5304 if (GET_CODE (par
) == PARALLEL
)
5305 for (i
= XVECLEN (par
, 0) - 1; i
>= 0; i
--)
5307 part
= XVECEXP (par
, 0, i
);
5308 if (GET_CODE (part
) != SET
&& reg_mentioned_p (reg
, part
))
5311 return reg_mentioned_p (reg
, SET_SRC (pattern
));
5317 pattern
= PATTERN (insn
);
5319 if (GET_CODE (pattern
) == PARALLEL
)
5323 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 1; i
--)
5324 if (reg_mentioned_p (reg
, XVECEXP (pattern
, 0, i
)))
5326 pattern
= XVECEXP (pattern
, 0, 0);
5329 if (GET_CODE (pattern
) == SET
)
5331 if (reg_mentioned_p (reg
, SET_DEST (pattern
)))
5333 /* We don't use rtx_equal_p, because we don't care if the
5334 mode is different. */
5335 if (!REG_P (SET_DEST (pattern
))
5336 || REGNO (reg
) != REGNO (SET_DEST (pattern
)))
5342 pattern
= SET_SRC (pattern
);
5345 if (GET_CODE (pattern
) != CALL
5346 || !MEM_P (XEXP (pattern
, 0))
5347 || ! rtx_equal_p (reg
, XEXP (XEXP (pattern
, 0), 0)))
5353 /* Given a X, a pattern of an insn or a part of it, return a mask of used
5354 general registers. Bits 0..15 mean that the respective registers
5355 are used as inputs in the instruction. Bits 16..31 mean that the
5356 registers 0..15, respectively, are used as outputs, or are clobbered.
5357 IS_DEST should be set to 16 if X is the destination of a SET, else to 0. */
5359 regs_used (rtx x
, int is_dest
)
5367 code
= GET_CODE (x
);
5372 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x
))) - 1)
5373 << (REGNO (x
) + is_dest
));
5377 rtx y
= SUBREG_REG (x
);
5382 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x
))) - 1)
5384 subreg_regno_offset (REGNO (y
),
5387 GET_MODE (x
)) + is_dest
));
5391 return regs_used (SET_SRC (x
), 0) | regs_used (SET_DEST (x
), 16);
5393 /* If there was a return value, it must have been indicated with USE. */
5408 fmt
= GET_RTX_FORMAT (code
);
5410 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
5415 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5416 used
|= regs_used (XVECEXP (x
, i
, j
), is_dest
);
5418 else if (fmt
[i
] == 'e')
5419 used
|= regs_used (XEXP (x
, i
), is_dest
);
5424 /* Create an instruction that prevents redirection of a conditional branch
5425 to the destination of the JUMP with address ADDR.
5426 If the branch needs to be implemented as an indirect jump, try to find
5427 a scratch register for it.
5428 If NEED_BLOCK is 0, don't do anything unless we need a scratch register.
5429 If any preceding insn that doesn't fit into a delay slot is good enough,
5430 pass 1. Pass 2 if a definite blocking insn is needed.
5431 -1 is used internally to avoid deep recursion.
5432 If a blocking instruction is made or recognized, return it. */
5434 gen_block_redirect (rtx_insn
*jump
, int addr
, int need_block
)
5437 rtx_insn
*prev
= prev_nonnote_insn (jump
);
5440 /* First, check if we already have an instruction that satisfies our need. */
5441 if (prev
&& NONJUMP_INSN_P (prev
) && ! prev
->deleted ())
5443 if (INSN_CODE (prev
) == CODE_FOR_indirect_jump_scratch
)
5445 if (GET_CODE (PATTERN (prev
)) == USE
5446 || GET_CODE (PATTERN (prev
)) == CLOBBER
5447 || get_attr_in_delay_slot (prev
) == IN_DELAY_SLOT_YES
)
5449 else if ((need_block
&= ~1) < 0)
5451 else if (recog_memoized (prev
) == CODE_FOR_block_branch_redirect
)
5454 if (GET_CODE (PATTERN (jump
)) == RETURN
)
5458 /* Reorg even does nasty things with return insns that cause branches
5459 to go out of range - see find_end_label and callers. */
5460 return emit_insn_before (gen_block_branch_redirect (const0_rtx
) , jump
);
5462 /* We can't use JUMP_LABEL here because it might be undefined
5463 when not optimizing. */
5464 dest
= XEXP (SET_SRC (PATTERN (jump
)), 0);
5465 /* If the branch is out of range, try to find a scratch register for it. */
5467 && (INSN_ADDRESSES (INSN_UID (dest
)) - addr
+ (unsigned) 4092
5471 /* Don't look for the stack pointer as a scratch register,
5472 it would cause trouble if an interrupt occurred. */
5473 unsigned attempt
= 0x7fff, used
;
5474 int jump_left
= flag_expensive_optimizations
+ 1;
5476 /* It is likely that the most recent eligible instruction is wanted for
5477 the delay slot. Therefore, find out which registers it uses, and
5478 try to avoid using them. */
5480 for (scan
= jump
; (scan
= PREV_INSN (scan
)); )
5484 if (scan
->deleted ())
5486 code
= GET_CODE (scan
);
5487 if (code
== CODE_LABEL
|| code
== JUMP_INSN
)
5490 && GET_CODE (PATTERN (scan
)) != USE
5491 && GET_CODE (PATTERN (scan
)) != CLOBBER
5492 && get_attr_in_delay_slot (scan
) == IN_DELAY_SLOT_YES
)
5494 attempt
&= ~regs_used (PATTERN (scan
), 0);
5498 for (used
= dead
= 0, scan
= JUMP_LABEL_AS_INSN (jump
);
5499 (scan
= NEXT_INSN (scan
)); )
5503 if (scan
->deleted ())
5505 code
= GET_CODE (scan
);
5508 used
|= regs_used (PATTERN (scan
), 0);
5509 if (code
== CALL_INSN
)
5510 used
|= regs_used (CALL_INSN_FUNCTION_USAGE (scan
), 0);
5511 dead
|= (used
>> 16) & ~used
;
5517 if (code
== JUMP_INSN
)
5519 if (jump_left
-- && simplejump_p (scan
))
5520 scan
= JUMP_LABEL_AS_INSN (scan
);
5526 /* Mask out the stack pointer again, in case it was
5527 the only 'free' register we have found. */
5530 /* If the immediate destination is still in range, check for possible
5531 threading with a jump beyond the delay slot insn.
5532 Don't check if we are called recursively; the jump has been or will be
5533 checked in a different invocation then. */
5535 else if (optimize
&& need_block
>= 0)
5537 rtx_insn
*next
= next_active_insn (next_active_insn (dest
));
5538 if (next
&& JUMP_P (next
)
5539 && GET_CODE (PATTERN (next
)) == SET
5540 && recog_memoized (next
) == CODE_FOR_jump_compact
)
5542 dest
= JUMP_LABEL (next
);
5544 && (INSN_ADDRESSES (INSN_UID (dest
)) - addr
+ (unsigned) 4092
5546 gen_block_redirect (next
, INSN_ADDRESSES (INSN_UID (next
)), -1);
5552 rtx reg
= gen_rtx_REG (SImode
, exact_log2 (dead
& -dead
));
5554 /* It would be nice if we could convert the jump into an indirect
5555 jump / far branch right now, and thus exposing all constituent
5556 instructions to further optimization. However, reorg uses
5557 simplejump_p to determine if there is an unconditional jump where
5558 it should try to schedule instructions from the target of the
5559 branch; simplejump_p fails for indirect jumps even if they have
5561 rtx_insn
*insn
= emit_insn_before (gen_indirect_jump_scratch
5562 (reg
, GEN_INT (unspec_bbr_uid
++)),
5564 /* ??? We would like this to have the scope of the jump, but that
5565 scope will change when a delay slot insn of an inner scope is added.
5566 Hence, after delay slot scheduling, we'll have to expect
5567 NOTE_INSN_BLOCK_END notes between the indirect_jump_scratch and
5570 INSN_LOCATION (insn
) = INSN_LOCATION (jump
);
5571 INSN_CODE (insn
) = CODE_FOR_indirect_jump_scratch
;
5574 else if (need_block
)
5575 /* We can't use JUMP_LABEL here because it might be undefined
5576 when not optimizing. */
5577 return emit_insn_before (gen_block_branch_redirect
5578 (GEN_INT (unspec_bbr_uid
++)),
5583 #define CONDJUMP_MIN -252
5584 #define CONDJUMP_MAX 262
5587 /* A label (to be placed) in front of the jump
5588 that jumps to our ultimate destination. */
5589 rtx_insn
*near_label
;
5590 /* Where we are going to insert it if we cannot move the jump any farther,
5591 or the jump itself if we have picked up an existing jump. */
5592 rtx_insn
*insert_place
;
5593 /* The ultimate destination. */
5594 rtx_insn
*far_label
;
5595 struct far_branch
*prev
;
5596 /* If the branch has already been created, its address;
5597 else the address of its first prospective user. */
5601 static void gen_far_branch (struct far_branch
*);
5602 enum mdep_reorg_phase_e mdep_reorg_phase
;
5604 gen_far_branch (struct far_branch
*bp
)
5606 rtx_insn
*insn
= bp
->insert_place
;
5607 rtx_jump_insn
*jump
;
5608 rtx_code_label
*label
= gen_label_rtx ();
5611 emit_label_after (label
, insn
);
5614 jump
= emit_jump_insn_after (gen_jump (bp
->far_label
), insn
);
5615 LABEL_NUSES (bp
->far_label
)++;
5618 jump
= emit_jump_insn_after (gen_return (), insn
);
5620 /* Emit a barrier so that reorg knows that any following instructions
5621 are not reachable via a fall-through path.
5622 But don't do this when not optimizing, since we wouldn't suppress the
5623 alignment for the barrier then, and could end up with out-of-range
5624 pc-relative loads. */
5626 emit_barrier_after (jump
);
5627 emit_label_after (bp
->near_label
, insn
);
5630 JUMP_LABEL (jump
) = bp
->far_label
;
5633 rtx pat
= PATTERN (jump
);
5634 gcc_assert (ANY_RETURN_P (pat
));
5635 JUMP_LABEL (jump
) = pat
;
5638 ok
= invert_jump (as_a
<rtx_jump_insn
*> (insn
), label
, 1);
5641 /* If we are branching around a jump (rather than a return), prevent
5642 reorg from using an insn from the jump target as the delay slot insn -
5643 when reorg did this, it pessimized code (we rather hide the delay slot)
5644 and it could cause branches to go out of range. */
5647 (gen_stuff_delay_slot
5648 (GEN_INT (unspec_bbr_uid
++),
5649 GEN_INT (recog_memoized (insn
) == CODE_FOR_branch_false
)),
5651 /* Prevent reorg from undoing our splits. */
5652 gen_block_redirect (jump
, bp
->address
+= 2, 2);
5655 /* Fix up ADDR_DIFF_VECs. */
5657 fixup_addr_diff_vecs (rtx_insn
*first
)
5661 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
5663 rtx vec_lab
, pat
, prevpat
, x
, braf_label
;
5666 if (! JUMP_TABLE_DATA_P (insn
)
5667 || GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
)
5669 pat
= PATTERN (insn
);
5670 vec_lab
= XEXP (XEXP (pat
, 0), 0);
5672 /* Search the matching casesi_jump_2. */
5673 for (prev
= as_a
<rtx_insn
*> (vec_lab
); ; prev
= PREV_INSN (prev
))
5677 prevpat
= PATTERN (prev
);
5678 if (GET_CODE (prevpat
) != PARALLEL
|| XVECLEN (prevpat
, 0) != 2)
5680 x
= XVECEXP (prevpat
, 0, 1);
5681 if (GET_CODE (x
) != USE
)
5684 if (GET_CODE (x
) == LABEL_REF
&& XEXP (x
, 0) == vec_lab
)
5687 /* FIXME: This is a bug in the optimizer, but it seems harmless
5688 to just avoid panicing. */
5692 /* Emit the reference label of the braf where it belongs, right after
5693 the casesi_jump_2 (i.e. braf). */
5694 braf_label
= XEXP (XEXP (SET_SRC (XVECEXP (prevpat
, 0, 0)), 1), 0);
5695 emit_label_after (braf_label
, prev
);
5697 /* Fix up the ADDR_DIF_VEC to be relative
5698 to the reference address of the braf. */
5699 XEXP (XEXP (pat
, 0), 0) = braf_label
;
5703 /* BARRIER_OR_LABEL is either a BARRIER or a CODE_LABEL immediately following
5704 a barrier. Return the base 2 logarithm of the desired alignment. */
5706 barrier_align (rtx_insn
*barrier_or_label
)
5710 if (! barrier_or_label
)
5713 if (LABEL_P (barrier_or_label
)
5714 && NEXT_INSN (barrier_or_label
)
5715 && JUMP_TABLE_DATA_P (NEXT_INSN (barrier_or_label
)))
5718 if (BARRIER_P (barrier_or_label
)
5719 && PREV_INSN (barrier_or_label
)
5720 && JUMP_TABLE_DATA_P (PREV_INSN (barrier_or_label
)))
5722 pat
= PATTERN (PREV_INSN (barrier_or_label
));
5723 /* If this is a very small table, we want to keep the alignment after
5724 the table to the minimum for proper code alignment. */
5725 return ((optimize_size
5726 || ((unsigned) XVECLEN (pat
, 1) * GET_MODE_SIZE (GET_MODE (pat
))
5727 <= (unsigned) 1 << (CACHE_LOG
- 2)))
5728 ? 1 : align_jumps_log
);
5731 next
= next_active_insn (barrier_or_label
);
5736 pat
= PATTERN (next
);
5738 if (GET_CODE (pat
) == UNSPEC_VOLATILE
&& XINT (pat
, 1) == UNSPECV_ALIGN
)
5739 /* This is a barrier in front of a constant table. */
5745 if (! TARGET_SH2
|| ! optimize
)
5746 return align_jumps_log
;
5748 /* When fixing up pcloads, a constant table might be inserted just before
5749 the basic block that ends with the barrier. Thus, we can't trust the
5750 instruction lengths before that. */
5751 if (mdep_reorg_phase
> SH_FIXUP_PCLOAD
)
5753 /* Check if there is an immediately preceding branch to the insn beyond
5754 the barrier. We must weight the cost of discarding useful information
5755 from the current cache line when executing this branch and there is
5756 an alignment, against that of fetching unneeded insn in front of the
5757 branch target when there is no alignment. */
5759 /* There are two delay_slot cases to consider. One is the simple case
5760 where the preceding branch is to the insn beyond the barrier (simple
5761 delay slot filling), and the other is where the preceding branch has
5762 a delay slot that is a duplicate of the insn after the barrier
5763 (fill_eager_delay_slots) and the branch is to the insn after the insn
5764 after the barrier. */
5767 bool jump_to_next
= false;
5769 /* Skip to the insn before the JUMP_INSN before the barrier under
5771 rtx_insn
*prev
= prev_real_insn (prev_active_insn (barrier_or_label
));
5773 for (slot
= 2, credit
= (1 << (CACHE_LOG
- 2)) + 2;
5774 credit
>= 0 && prev
&& NONJUMP_INSN_P (prev
);
5775 prev
= prev_real_insn (prev
))
5777 jump_to_next
= false;
5778 if (GET_CODE (PATTERN (prev
)) == USE
5779 || GET_CODE (PATTERN (prev
)) == CLOBBER
)
5781 if (rtx_sequence
*prev_seq
= dyn_cast
<rtx_sequence
*> (PATTERN (prev
)))
5783 prev
= prev_seq
->insn (1);
5784 if (INSN_UID (prev
) == INSN_UID (next
))
5786 /* Delay slot was filled with insn at jump target. */
5787 jump_to_next
= true;
5793 get_attr_in_delay_slot (prev
) == IN_DELAY_SLOT_YES
)
5795 credit
-= get_attr_length (prev
);
5797 if (prev
&& jump_to_label_p (prev
))
5801 || next_real_insn (JUMP_LABEL (prev
)) == next
5802 /* If relax_delay_slots() decides NEXT was redundant
5803 with some previous instruction, it will have
5804 redirected PREV's jump to the following insn. */
5805 || JUMP_LABEL (prev
) == next_nonnote_insn (next
)
5806 /* There is no upper bound on redundant instructions
5807 that might have been skipped, but we must not put an
5808 alignment where none had been before. */
5809 || (x
= (NEXT_INSN (NEXT_INSN (PREV_INSN (prev
)))),
5811 && (INSN_CODE (x
) == CODE_FOR_block_branch_redirect
5812 || INSN_CODE (x
) == CODE_FOR_indirect_jump_scratch
5813 || INSN_CODE (x
) == CODE_FOR_stuff_delay_slot
))))
5815 rtx pat
= PATTERN (prev
);
5816 if (GET_CODE (pat
) == PARALLEL
)
5817 pat
= XVECEXP (pat
, 0, 0);
5818 if (credit
- slot
>= (GET_CODE (SET_SRC (pat
)) == PC
? 2 : 0))
5824 return align_jumps_log
;
5827 /* If we are inside a phony loop, almost any kind of label can turn up as the
5828 first one in the loop. Aligning a braf label causes incorrect switch
5829 destination addresses; we can detect braf labels because they are
5830 followed by a BARRIER.
5831 Applying loop alignment to small constant or switch tables is a waste
5832 of space, so we suppress this too. */
5834 sh_loop_align (rtx_insn
*label
)
5836 rtx_insn
*next
= label
;
5838 if (! optimize
|| optimize_size
)
5842 next
= next_nonnote_insn (next
);
5843 while (next
&& LABEL_P (next
));
5847 || recog_memoized (next
) == CODE_FOR_consttable_2
)
5850 return align_loops_log
;
5853 /* Do a final pass over the function, just before delayed branch
5858 rtx_insn
*first
, *insn
, *mova
= NULL
;
5860 rtx r0_rtx
= gen_rtx_REG (Pmode
, 0);
5861 rtx r0_inc_rtx
= gen_rtx_POST_INC (Pmode
, r0_rtx
);
5863 first
= get_insns ();
5864 max_labelno_before_reorg
= max_label_num ();
5866 /* We must split call insns before introducing `mova's. If we're
5867 optimizing, they'll have already been split. Otherwise, make
5868 sure we don't split them too late. */
5870 split_all_insns_noflow ();
5872 /* If relaxing, generate pseudo-ops to associate function calls with
5873 the symbols they call. It does no harm to not generate these
5874 pseudo-ops. However, when we can generate them, it enables the
5875 linker to potentially relax the jsr to a bsr, and eliminate the
5876 register load and, possibly, the constant pool entry. */
5878 mdep_reorg_phase
= SH_INSERT_USES_LABELS
;
5881 /* Remove all REG_LABEL_OPERAND notes. We want to use them for our
5882 own purposes. This works because none of the remaining passes
5883 need to look at them.
5885 ??? But it may break in the future. We should use a machine
5886 dependent REG_NOTE, or some other approach entirely. */
5887 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
5893 while ((note
= find_reg_note (insn
, REG_LABEL_OPERAND
,
5895 remove_note (insn
, note
);
5899 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
5901 rtx pattern
, reg
, set
, dies
;
5902 rtx_code_label
*label
;
5903 rtx_insn
*link
, *scan
;
5904 int rescan
= 0, foundinsn
= 0;
5908 pattern
= PATTERN (insn
);
5910 if (GET_CODE (pattern
) == PARALLEL
)
5911 pattern
= XVECEXP (pattern
, 0, 0);
5912 if (GET_CODE (pattern
) == SET
)
5913 pattern
= SET_SRC (pattern
);
5915 if (GET_CODE (pattern
) != CALL
5916 || !MEM_P (XEXP (pattern
, 0)))
5919 reg
= XEXP (XEXP (pattern
, 0), 0);
5923 reg
= sfunc_uses_reg (insn
);
5931 /* Try scanning backward to find where the register is set. */
5933 for (scan
= PREV_INSN (insn
);
5934 scan
&& !LABEL_P (scan
);
5935 scan
= PREV_INSN (scan
))
5937 if (! INSN_P (scan
))
5940 if (! reg_mentioned_p (reg
, scan
))
5943 if (noncall_uses_reg (reg
, scan
, &set
))
5956 /* The register is set at LINK. */
5958 /* We can only optimize the function call if the register is
5959 being set to a symbol. In theory, we could sometimes
5960 optimize calls to a constant location, but the assembler
5961 and linker do not support that at present. */
5962 if (GET_CODE (SET_SRC (set
)) != SYMBOL_REF
5963 && GET_CODE (SET_SRC (set
)) != LABEL_REF
)
5966 /* Scan forward from LINK to the place where REG dies, and
5967 make sure that the only insns which use REG are
5968 themselves function calls. */
5970 /* ??? This doesn't work for call targets that were allocated
5971 by reload, since there may not be a REG_DEAD note for the
5975 for (scan
= NEXT_INSN (link
); scan
; scan
= NEXT_INSN (scan
))
5979 /* Don't try to trace forward past a CODE_LABEL if we haven't
5980 seen INSN yet. Ordinarily, we will only find the setting insn
5981 if it is in the same basic block. However,
5982 cross-jumping can insert code labels in between the load and
5983 the call, and can result in situations where a single call
5984 insn may have two targets depending on where we came from. */
5986 if (LABEL_P (scan
) && ! foundinsn
)
5989 if (! INSN_P (scan
))
5992 /* Don't try to trace forward past a JUMP. To optimize
5993 safely, we would have to check that all the
5994 instructions at the jump destination did not use REG. */
5999 if (! reg_mentioned_p (reg
, scan
))
6002 if (noncall_uses_reg (reg
, scan
, &scanset
))
6009 && (CALL_P (scan
) || sfunc_uses_reg (scan
)))
6011 /* There is a function call to this register other
6012 than the one we are checking. If we optimize
6013 this call, we need to rescan again below. */
6017 /* ??? We shouldn't have to worry about SCANSET here.
6018 We should just be able to check for a REG_DEAD note
6019 on a function call. However, the REG_DEAD notes are
6020 apparently not dependable around libcalls; c-torture
6021 execute/920501-2 is a test case. If SCANSET is set,
6022 then this insn sets the register, so it must have
6023 died earlier. Unfortunately, this will only handle
6024 the cases in which the register is, in fact, set in a
6027 /* ??? We shouldn't have to use FOUNDINSN here.
6028 This dates back to when we used LOG_LINKS to find
6029 the most recent insn which sets the register. */
6033 || find_reg_note (scan
, REG_DEAD
, reg
)))
6042 /* Either there was a branch, or some insn used REG
6043 other than as a function call address. */
6047 /* Create a code label, and put it in a REG_LABEL_OPERAND note
6048 on the insn which sets the register, and on each call insn
6049 which uses the register. In final_prescan_insn we look for
6050 the REG_LABEL_OPERAND notes, and output the appropriate label
6053 label
= gen_label_rtx ();
6054 add_reg_note (link
, REG_LABEL_OPERAND
, label
);
6055 add_reg_note (insn
, REG_LABEL_OPERAND
, label
);
6063 scan
= NEXT_INSN (scan
);
6066 && reg_mentioned_p (reg
, scan
))
6067 || ((reg2
= sfunc_uses_reg (scan
))
6068 && REGNO (reg2
) == REGNO (reg
))))
6069 add_reg_note (scan
, REG_LABEL_OPERAND
, label
);
6071 while (scan
!= dies
);
6077 fixup_addr_diff_vecs (first
);
6081 mdep_reorg_phase
= SH_SHORTEN_BRANCHES0
;
6082 shorten_branches (first
);
6085 /* Scan the function looking for move instructions which have to be
6086 changed to pc-relative loads and insert the literal tables. */
6087 mdep_reorg_phase
= SH_FIXUP_PCLOAD
;
6088 for (insn
= first
, num_mova
= 0; insn
; insn
= NEXT_INSN (insn
))
6092 /* ??? basic block reordering can move a switch table dispatch
6093 below the switch table. Check if that has happened.
6094 We only have the addresses available when optimizing; but then,
6095 this check shouldn't be needed when not optimizing. */
6096 if (!untangle_mova (&num_mova
, &mova
, insn
))
6102 else if (JUMP_TABLE_DATA_P (insn
)
6103 && GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
6105 /* ??? loop invariant motion can also move a mova out of a
6106 loop. Since loop does this code motion anyway, maybe we
6107 should wrap UNSPEC_MOVA into a CONST, so that reload can
6110 && GET_MODE (prev_nonnote_insn (insn
)) == VOIDmode
)
6111 || (prev_nonnote_insn (insn
)
6112 == XEXP (MOVA_LABELREF (mova
), 0))))
6119 /* Some code might have been inserted between the mova and
6120 its ADDR_DIFF_VEC. Check if the mova is still in range. */
6121 for (scan
= mova
, total
= 0; scan
!= insn
; scan
= NEXT_INSN (scan
))
6122 total
+= get_attr_length (scan
);
6124 /* range of mova is 1020, add 4 because pc counts from address of
6125 second instruction after this one, subtract 2 in case pc is 2
6126 byte aligned. Possible alignment needed for the ADDR_DIFF_VEC
6127 cancels out with alignment effects of the mova itself. */
6130 /* Change the mova into a load, and restart scanning
6131 there. broken_move will then return true for mova. */
6136 if (broken_move (insn
)
6137 || (NONJUMP_INSN_P (insn
)
6138 && recog_memoized (insn
) == CODE_FOR_casesi_worker_2
))
6141 /* Scan ahead looking for a barrier to stick the constant table
6143 rtx_insn
*barrier
= find_barrier (num_mova
, mova
, insn
);
6144 rtx_insn
*last_float_move
= NULL
;
6145 rtx last_float
= 0, *last_float_addr
= NULL
;
6146 int need_aligned_label
= 0;
6148 if (num_mova
&& ! mova_p (mova
))
6150 /* find_barrier had to change the first mova into a
6151 pcload; thus, we have to start with this new pcload. */
6155 /* Now find all the moves between the points and modify them. */
6156 for (scan
= insn
; scan
!= barrier
; scan
= NEXT_INSN (scan
))
6160 if (NONJUMP_INSN_P (scan
)
6161 && recog_memoized (scan
) == CODE_FOR_casesi_worker_2
)
6162 need_aligned_label
= 1;
6163 if (broken_move (scan
))
6165 rtx
*patp
= &PATTERN (scan
), pat
= *patp
;
6171 if (GET_CODE (pat
) == PARALLEL
)
6172 patp
= &XVECEXP (pat
, 0, 0), pat
= *patp
;
6173 src
= SET_SRC (pat
);
6174 dst
= SET_DEST (pat
);
6175 mode
= GET_MODE (dst
);
6177 if (mode
== SImode
&& satisfies_constraint_I16 (src
)
6178 && REGNO (dst
) != FPUL_REG
)
6183 while (GET_CODE (dst
) == SUBREG
)
6185 offset
+= subreg_regno_offset (REGNO (SUBREG_REG (dst
)),
6186 GET_MODE (SUBREG_REG (dst
)),
6189 dst
= SUBREG_REG (dst
);
6191 dst
= gen_rtx_REG (HImode
, REGNO (dst
) + offset
);
6193 if (REG_P (dst
) && FP_ANY_REGISTER_P (REGNO (dst
)))
6195 /* This must be an insn that clobbers r0. */
6196 rtx
*clobberp
= &XVECEXP (PATTERN (scan
), 0,
6197 XVECLEN (PATTERN (scan
), 0)
6199 rtx clobber
= *clobberp
;
6201 gcc_assert (GET_CODE (clobber
) == CLOBBER
6202 && rtx_equal_p (XEXP (clobber
, 0), r0_rtx
));
6205 && reg_set_between_p (r0_rtx
, last_float_move
, scan
))
6207 lab
= add_constant (src
, mode
, last_float
);
6209 emit_insn_before (gen_mova (lab
), scan
);
6212 /* There will be a REG_UNUSED note for r0 on
6213 LAST_FLOAT_MOVE; we have to change it to REG_INC,
6214 lest reorg:mark_target_live_regs will not
6215 consider r0 to be used, and we end up with delay
6216 slot insn in front of SCAN that clobbers r0. */
6218 = find_regno_note (last_float_move
, REG_UNUSED
, 0);
6220 /* If we are not optimizing, then there may not be
6223 PUT_REG_NOTE_KIND (note
, REG_INC
);
6225 *last_float_addr
= r0_inc_rtx
;
6227 last_float_move
= scan
;
6229 newsrc
= gen_const_mem (mode
,
6230 (((TARGET_SH4
&& ! TARGET_FMOVD
)
6231 || REGNO (dst
) == FPUL_REG
)
6234 last_float_addr
= &XEXP (newsrc
, 0);
6236 /* Remove the clobber of r0. */
6237 *clobberp
= gen_rtx_CLOBBER (GET_MODE (clobber
),
6238 gen_rtx_SCRATCH (Pmode
));
6240 /* This is a mova needing a label. Create it. */
6241 else if (GET_CODE (src
) == UNSPEC
6242 && XINT (src
, 1) == UNSPEC_MOVA
6243 && GET_CODE (XVECEXP (src
, 0, 0)) == CONST
)
6245 lab
= add_constant (XVECEXP (src
, 0, 0), mode
, 0);
6246 newsrc
= gen_rtx_LABEL_REF (VOIDmode
, lab
);
6247 newsrc
= gen_rtx_UNSPEC (SImode
,
6248 gen_rtvec (1, newsrc
),
6251 else if (GET_CODE (src
) == UNSPEC_VOLATILE
6252 && XINT (src
, 1) == UNSPECV_SP_SWITCH_B
)
6254 newsrc
= XVECEXP (src
, 0, 0);
6255 XVECEXP (src
, 0, 0) = gen_const_mem (mode
, newsrc
);
6256 INSN_CODE (scan
) = -1;
6261 lab
= add_constant (src
, mode
, 0);
6262 newsrc
= gen_rtx_LABEL_REF (VOIDmode
, lab
);
6263 newsrc
= gen_const_mem (mode
, newsrc
);
6265 *patp
= gen_rtx_SET (dst
, newsrc
);
6266 INSN_CODE (scan
) = -1;
6269 dump_table (need_aligned_label
? insn
: 0, barrier
);
6273 label_ref_list_d_pool
.release ();
6274 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
6275 PUT_MODE (insn
, VOIDmode
);
6277 mdep_reorg_phase
= SH_SHORTEN_BRANCHES1
;
6278 INSN_ADDRESSES_FREE ();
6279 split_branches (first
);
6281 /* The INSN_REFERENCES_ARE_DELAYED in sh.h is problematic because it
6282 also has an effect on the register that holds the address of the sfunc.
6283 Insert an extra dummy insn in front of each sfunc that pretends to
6284 use this register. */
6285 if (flag_delayed_branch
)
6287 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
6289 rtx reg
= sfunc_uses_reg (insn
);
6293 emit_insn_before (gen_use_sfunc_addr (reg
), insn
);
6296 mdep_reorg_phase
= SH_AFTER_MDEP_REORG
;
6299 /* Return the UID of the insn that follows the specified label. */
6301 get_dest_uid (rtx label
, int max_uid
)
6303 rtx_insn
*dest
= next_real_insn (label
);
6306 /* This can happen for an undefined label. */
6308 dest_uid
= INSN_UID (dest
);
6309 /* If this is a newly created branch redirection blocking instruction,
6310 we cannot index the branch_uid or insn_addresses arrays with its
6311 uid. But then, we won't need to, because the actual destination is
6312 the following branch. */
6313 while (dest_uid
>= max_uid
)
6315 dest
= NEXT_INSN (dest
);
6316 dest_uid
= INSN_UID (dest
);
6318 if (JUMP_P (dest
) && GET_CODE (PATTERN (dest
)) == RETURN
)
6323 /* Split condbranches that are out of range. Also add clobbers for
6324 scratch registers that are needed in far jumps.
6325 We do this before delay slot scheduling, so that it can take our
6326 newly created instructions into account. It also allows us to
6327 find branches with common targets more easily. */
6329 split_branches (rtx_insn
*first
)
6332 struct far_branch
**uid_branch
, *far_branch_list
= 0;
6333 int max_uid
= get_max_uid ();
6336 /* Find out which branches are out of range. */
6337 shorten_branches (first
);
6339 uid_branch
= (struct far_branch
**) alloca (max_uid
* sizeof *uid_branch
);
6340 memset ((char *) uid_branch
, 0, max_uid
* sizeof *uid_branch
);
6342 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
6343 if (! INSN_P (insn
))
6345 else if (insn
->deleted ())
6347 /* Shorten_branches would split this instruction again,
6348 so transform it into a note. */
6349 SET_INSN_DELETED (insn
);
6351 else if (JUMP_P (insn
))
6353 enum attr_type type
= get_attr_type (insn
);
6354 if (type
== TYPE_CBRANCH
)
6356 rtx_insn
*next
, *beyond
;
6358 if (get_attr_length (insn
) > 4)
6360 rtx src
= SET_SRC (PATTERN (insn
));
6361 rtx olabel
= XEXP (XEXP (src
, 1), 0);
6362 int addr
= INSN_ADDRESSES (INSN_UID (insn
));
6363 rtx_insn
*label
= 0;
6364 int dest_uid
= get_dest_uid (olabel
, max_uid
);
6365 struct far_branch
*bp
= uid_branch
[dest_uid
];
6367 /* redirect_jump needs a valid JUMP_LABEL, and it might delete
6368 the label if the LABEL_NUSES count drops to zero. There is
6369 always a jump_optimize pass that sets these values, but it
6370 proceeds to delete unreferenced code, and then if not
6371 optimizing, to un-delete the deleted instructions, thus
6372 leaving labels with too low uses counts. */
6375 JUMP_LABEL (insn
) = olabel
;
6376 LABEL_NUSES (olabel
)++;
6380 bp
= (struct far_branch
*) alloca (sizeof *bp
);
6381 uid_branch
[dest_uid
] = bp
;
6382 bp
->prev
= far_branch_list
;
6383 far_branch_list
= bp
;
6384 bp
->far_label
= as_a
<rtx_insn
*> (
6385 XEXP (XEXP (SET_SRC (PATTERN (insn
)), 1),
6387 LABEL_NUSES (bp
->far_label
)++;
6391 label
= bp
->near_label
;
6392 if (! label
&& bp
->address
- addr
>= CONDJUMP_MIN
)
6394 rtx_insn
*block
= bp
->insert_place
;
6396 if (GET_CODE (PATTERN (block
)) == RETURN
)
6397 block
= PREV_INSN (block
);
6399 block
= gen_block_redirect (block
,
6401 label
= emit_label_after (gen_label_rtx (),
6403 bp
->near_label
= label
;
6405 else if (label
&& ! NEXT_INSN (label
))
6407 if (addr
+ 2 - bp
->address
<= CONDJUMP_MAX
)
6408 bp
->insert_place
= insn
;
6410 gen_far_branch (bp
);
6414 || (NEXT_INSN (label
) && bp
->address
- addr
< CONDJUMP_MIN
))
6416 bp
->near_label
= label
= gen_label_rtx ();
6417 bp
->insert_place
= insn
;
6420 ok
= redirect_jump (as_a
<rtx_jump_insn
*> (insn
), label
, 0);
6425 /* get_attr_length (insn) == 2 */
6426 /* Check if we have a pattern where reorg wants to redirect
6427 the branch to a label from an unconditional branch that
6429 /* We can't use JUMP_LABEL here because it might be undefined
6430 when not optimizing. */
6431 /* A syntax error might cause beyond to be NULL_RTX. */
6433 = next_active_insn (XEXP (XEXP (SET_SRC (PATTERN (insn
)), 1),
6438 || ((beyond
= next_active_insn (beyond
))
6439 && JUMP_P (beyond
)))
6440 && GET_CODE (PATTERN (beyond
)) == SET
6441 && recog_memoized (beyond
) == CODE_FOR_jump_compact
6443 (INSN_UID (XEXP (SET_SRC (PATTERN (beyond
)), 0)))
6444 - INSN_ADDRESSES (INSN_UID (insn
)) + (unsigned) 252)
6446 gen_block_redirect (beyond
,
6447 INSN_ADDRESSES (INSN_UID (beyond
)), 1);
6450 next
= next_active_insn (insn
);
6454 || ((next
= next_active_insn (next
))
6456 && GET_CODE (PATTERN (next
)) == SET
6457 && recog_memoized (next
) == CODE_FOR_jump_compact
6459 (INSN_UID (XEXP (SET_SRC (PATTERN (next
)), 0)))
6460 - INSN_ADDRESSES (INSN_UID (insn
)) + (unsigned) 252)
6462 gen_block_redirect (next
, INSN_ADDRESSES (INSN_UID (next
)), 1);
6464 else if (type
== TYPE_JUMP
|| type
== TYPE_RETURN
)
6466 int addr
= INSN_ADDRESSES (INSN_UID (insn
));
6467 rtx_insn
*far_label
= 0;
6469 struct far_branch
*bp
;
6471 if (type
== TYPE_JUMP
)
6473 if (CROSSING_JUMP_P (insn
))
6475 emit_insn_before (gen_block_branch_redirect (const0_rtx
),
6480 far_label
= as_a
<rtx_insn
*> (
6481 XEXP (SET_SRC (PATTERN (insn
)), 0));
6482 dest_uid
= get_dest_uid (far_label
, max_uid
);
6485 /* Parse errors can lead to labels outside
6487 if (! NEXT_INSN (far_label
))
6492 JUMP_LABEL (insn
) = far_label
;
6493 LABEL_NUSES (far_label
)++;
6495 redirect_jump (as_a
<rtx_jump_insn
*> (insn
), ret_rtx
, 1);
6499 bp
= uid_branch
[dest_uid
];
6502 bp
= (struct far_branch
*) alloca (sizeof *bp
);
6503 uid_branch
[dest_uid
] = bp
;
6504 bp
->prev
= far_branch_list
;
6505 far_branch_list
= bp
;
6507 bp
->far_label
= far_label
;
6509 LABEL_NUSES (far_label
)++;
6511 else if (bp
->near_label
&& ! NEXT_INSN (bp
->near_label
))
6512 if (addr
- bp
->address
<= CONDJUMP_MAX
)
6513 emit_label_after (bp
->near_label
, PREV_INSN (insn
));
6516 gen_far_branch (bp
);
6522 bp
->insert_place
= insn
;
6524 emit_insn_before (gen_block_branch_redirect (const0_rtx
), insn
);
6526 gen_block_redirect (insn
, addr
, bp
->near_label
? 2 : 0);
6529 /* Generate all pending far branches,
6530 and free our references to the far labels. */
6531 while (far_branch_list
)
6533 if (far_branch_list
->near_label
6534 && ! NEXT_INSN (far_branch_list
->near_label
))
6535 gen_far_branch (far_branch_list
);
6537 && far_branch_list
->far_label
6538 && ! --LABEL_NUSES (far_branch_list
->far_label
))
6539 delete_insn (far_branch_list
->far_label
);
6540 far_branch_list
= far_branch_list
->prev
;
6543 /* Instruction length information is no longer valid due to the new
6544 instructions that have been generated. */
6545 init_insn_lengths ();
6548 /* Dump out instruction addresses, which is useful for debugging the
6549 constant pool table stuff.
6551 If relaxing, output the label and pseudo-ops used to link together
6552 calls and the instruction which set the registers.
6554 ??? The addresses printed by this routine for insns are nonsense for
6555 insns which are inside of a sequence where none of the inner insns have
6556 variable length. This is because the second pass of shorten_branches
6557 does not bother to update them. */
6559 final_prescan_insn (rtx_insn
*insn
, rtx
*opvec ATTRIBUTE_UNUSED
,
6560 int noperands ATTRIBUTE_UNUSED
)
6562 if (TARGET_DUMPISIZE
)
6563 fprintf (asm_out_file
, "\n! at %04x\n", INSN_ADDRESSES (INSN_UID (insn
)));
6569 note
= find_reg_note (insn
, REG_LABEL_OPERAND
, NULL_RTX
);
6574 pattern
= PATTERN (insn
);
6575 if (GET_CODE (pattern
) == PARALLEL
)
6576 pattern
= XVECEXP (pattern
, 0, 0);
6577 switch (GET_CODE (pattern
))
6580 if (GET_CODE (SET_SRC (pattern
)) != CALL
6581 && get_attr_type (insn
) != TYPE_SFUNC
)
6583 targetm
.asm_out
.internal_label
6584 (asm_out_file
, "L", CODE_LABEL_NUMBER (XEXP (note
, 0)));
6587 /* else FALLTHROUGH */
6589 asm_fprintf (asm_out_file
, "\t.uses %LL%d\n",
6590 CODE_LABEL_NUMBER (XEXP (note
, 0)));
6600 /* Dump out any constants accumulated in the final pass. These will
6603 output_jump_label_table (void)
6609 fprintf (asm_out_file
, "\t.align 2\n");
6610 for (i
= 0; i
< pool_size
; i
++)
6612 pool_node
*p
= &pool_vector
[i
];
6614 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
6615 CODE_LABEL_NUMBER (p
->label
));
6616 output_asm_insn (".long %O0", &p
->value
);
6624 /* A full frame looks like:
6628 [ if current_function_anonymous_args
6641 local-0 <- fp points here.
6643 Number of bytes pushed for anonymous args, used to pass information
6644 between expand_prologue and expand_epilogue.
6646 Adjust the stack by SIZE bytes. REG holds the rtl of the register to be
6647 adjusted. If epilogue_p is zero, this is for a prologue; otherwise, it's
6648 for an epilogue and a negative value means that it's for a sibcall
6649 epilogue. If LIVE_REGS_MASK is nonzero, it points to a HARD_REG_SET of
6650 all the registers that are about to be restored, and hence dead. */
6652 output_stack_adjust (int size
, rtx reg
, int epilogue_p
,
6653 HARD_REG_SET
*live_regs_mask
, bool frame_p
)
6655 rtx_insn
*(*emit_fn
) (rtx
) = frame_p
? &frame_insn
: &emit_insn
;
6658 HOST_WIDE_INT align
= STACK_BOUNDARY
/ BITS_PER_UNIT
;
6660 /* This test is bogus, as output_stack_adjust is used to re-align the
6663 gcc_assert (!(size
% align
));
6666 if (CONST_OK_FOR_ADD (size
))
6667 emit_fn (GEN_ADD3 (reg
, reg
, GEN_INT (size
)));
6668 /* Try to do it with two partial adjustments; however, we must make
6669 sure that the stack is properly aligned at all times, in case
6670 an interrupt occurs between the two partial adjustments. */
6671 else if (CONST_OK_FOR_ADD (size
/ 2 & -align
)
6672 && CONST_OK_FOR_ADD (size
- (size
/ 2 & -align
)))
6674 emit_fn (GEN_ADD3 (reg
, reg
, GEN_INT (size
/ 2 & -align
)));
6675 emit_fn (GEN_ADD3 (reg
, reg
, GEN_INT (size
- (size
/ 2 & -align
))));
6681 int temp
= epilogue_p
? 7 : 1;
6684 /* If TEMP is invalid, we could temporarily save a general
6685 register to MACL. However, there is currently no need
6686 to handle this case, so just die when we see it. */
6688 || current_function_interrupt
6689 || ! call_really_used_regs
[temp
] || fixed_regs
[temp
])
6691 if (temp
< 0 && ! current_function_interrupt
&& epilogue_p
>= 0)
6694 COPY_HARD_REG_SET (temps
, call_used_reg_set
);
6695 AND_COMPL_HARD_REG_SET (temps
, call_fixed_reg_set
);
6699 if (crtl
->return_rtx
)
6702 mode
= GET_MODE (crtl
->return_rtx
);
6703 if (BASE_RETURN_VALUE_REG (mode
) == FIRST_RET_REG
)
6704 nreg
= HARD_REGNO_NREGS (FIRST_RET_REG
, mode
);
6706 for (i
= 0; i
< nreg
; i
++)
6707 CLEAR_HARD_REG_BIT (temps
, FIRST_RET_REG
+ i
);
6708 if (crtl
->calls_eh_return
)
6710 CLEAR_HARD_REG_BIT (temps
, EH_RETURN_STACKADJ_REGNO
);
6711 for (i
= 0; i
<= 3; i
++)
6712 CLEAR_HARD_REG_BIT (temps
, EH_RETURN_DATA_REGNO (i
));
6715 if (epilogue_p
<= 0)
6717 for (i
= FIRST_PARM_REG
;
6718 i
< FIRST_PARM_REG
+ NPARM_REGS (SImode
); i
++)
6719 CLEAR_HARD_REG_BIT (temps
, i
);
6720 if (cfun
->static_chain_decl
!= NULL
)
6721 CLEAR_HARD_REG_BIT (temps
, STATIC_CHAIN_REGNUM
);
6723 temp
= scavenge_reg (&temps
);
6725 if (temp
< 0 && live_regs_mask
)
6729 COPY_HARD_REG_SET (temps
, *live_regs_mask
);
6730 CLEAR_HARD_REG_BIT (temps
, REGNO (reg
));
6731 temp
= scavenge_reg (&temps
);
6735 rtx adj_reg
, tmp_reg
, mem
;
6737 /* If we reached here, the most likely case is the (sibcall)
6738 epilogue. Put a special push/pop sequence for such case as
6739 the last resort. This looks lengthy but would not be problem
6740 because it seems to be very rare. */
6741 gcc_assert (epilogue_p
);
6743 /* ??? There is still the slight possibility that r4 or
6744 r5 have been reserved as fixed registers or assigned
6745 as global registers, and they change during an
6746 interrupt. There are possible ways to handle this:
6748 - If we are adjusting the frame pointer (r14), we can do
6749 with a single temp register and an ordinary push / pop
6751 - Grab any call-used or call-saved registers (i.e. not
6752 fixed or globals) for the temps we need. We might
6753 also grab r14 if we are adjusting the stack pointer.
6754 If we can't find enough available registers, issue
6755 a diagnostic and die - the user must have reserved
6756 way too many registers.
6757 But since all this is rather unlikely to happen and
6758 would require extra testing, we just die if r4 / r5
6759 are not available. */
6760 gcc_assert (!fixed_regs
[4] && !fixed_regs
[5]
6761 && !global_regs
[4] && !global_regs
[5]);
6763 adj_reg
= gen_rtx_REG (GET_MODE (reg
), 4);
6764 tmp_reg
= gen_rtx_REG (GET_MODE (reg
), 5);
6765 emit_move_insn (gen_tmp_stack_mem (Pmode
, reg
), adj_reg
);
6766 emit_insn (GEN_MOV (adj_reg
, GEN_INT (size
)));
6767 emit_insn (GEN_ADD3 (adj_reg
, adj_reg
, reg
));
6768 mem
= gen_tmp_stack_mem (Pmode
, gen_rtx_PRE_DEC (Pmode
, adj_reg
));
6769 emit_move_insn (mem
, tmp_reg
);
6770 emit_move_insn (tmp_reg
, gen_tmp_stack_mem (Pmode
, reg
));
6771 mem
= gen_tmp_stack_mem (Pmode
, gen_rtx_PRE_DEC (Pmode
, adj_reg
));
6772 emit_move_insn (mem
, tmp_reg
);
6773 emit_move_insn (reg
, adj_reg
);
6774 mem
= gen_tmp_stack_mem (Pmode
, gen_rtx_POST_INC (Pmode
, reg
));
6775 emit_move_insn (adj_reg
, mem
);
6776 mem
= gen_tmp_stack_mem (Pmode
, gen_rtx_POST_INC (Pmode
, reg
));
6777 emit_move_insn (tmp_reg
, mem
);
6778 /* Tell flow the insns that pop r4/r5 aren't dead. */
6783 const_reg
= gen_rtx_REG (GET_MODE (reg
), temp
);
6785 /* If SIZE is negative, subtract the positive value.
6786 This sometimes allows a constant pool entry to be shared
6787 between prologue and epilogue code. */
6790 emit_insn (GEN_MOV (const_reg
, GEN_INT (-size
)));
6791 insn
= emit_fn (GEN_SUB3 (reg
, reg
, const_reg
));
6795 emit_insn (GEN_MOV (const_reg
, GEN_INT (size
)));
6796 insn
= emit_fn (GEN_ADD3 (reg
, reg
, const_reg
));
6798 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
6799 gen_rtx_SET (reg
, gen_rtx_PLUS (SImode
, reg
,
6805 /* Emit the specified insn and mark it as frame related.
6806 FIXME: Rename this to emit_frame_insn. */
6810 rtx_insn
*insn
= emit_insn (x
);
6811 RTX_FRAME_RELATED_P (insn
) = 1;
6815 /* Output RTL to push register RN onto the stack. */
6821 x
= gen_push_fpul ();
6822 else if (rn
== FPSCR_REG
)
6823 x
= gen_push_fpscr ();
6824 else if (TARGET_FPU_DOUBLE
&& TARGET_FMOVD
6825 && ! TARGET_FPU_SINGLE
&& FP_OR_XD_REGISTER_P (rn
))
6827 if (FP_REGISTER_P (rn
) && (rn
- FIRST_FP_REG
) & 1)
6829 x
= gen_push_4 (gen_rtx_REG (DFmode
, rn
));
6831 else if (TARGET_SH2E
&& FP_REGISTER_P (rn
))
6832 x
= gen_push_e (gen_rtx_REG (SFmode
, rn
));
6834 x
= gen_push (gen_rtx_REG (SImode
, rn
));
6837 add_reg_note (x
, REG_INC
, gen_rtx_REG (SImode
, STACK_POINTER_REGNUM
));
6841 /* Output RTL to pop register RN from the stack. */
6847 x
= gen_pop_fpul ();
6848 else if (rn
== FPSCR_REG
)
6849 x
= gen_pop_fpscr ();
6850 else if (TARGET_FPU_DOUBLE
&& TARGET_FMOVD
6851 && ! TARGET_FPU_SINGLE
&& FP_OR_XD_REGISTER_P (rn
))
6853 if (FP_REGISTER_P (rn
) && (rn
- FIRST_FP_REG
) & 1)
6855 x
= gen_pop_4 (gen_rtx_REG (DFmode
, rn
));
6857 else if (TARGET_SH2E
&& FP_REGISTER_P (rn
))
6858 x
= gen_pop_e (gen_rtx_REG (SFmode
, rn
));
6860 x
= gen_pop (gen_rtx_REG (SImode
, rn
));
6864 sp_reg
= gen_rtx_REG (SImode
, STACK_POINTER_REGNUM
);
6865 reg
= copy_rtx (GET_CODE (PATTERN (x
)) == PARALLEL
6866 ? SET_DEST (XVECEXP (PATTERN (x
), 0, 0))
6867 : SET_DEST (PATTERN (x
)));
6868 add_reg_note (x
, REG_CFA_RESTORE
, reg
);
6869 add_reg_note (x
, REG_CFA_ADJUST_CFA
,
6870 gen_rtx_SET (sp_reg
,
6871 plus_constant (SImode
, sp_reg
,
6872 GET_MODE_SIZE (GET_MODE (reg
)))));
6873 add_reg_note (x
, REG_INC
, gen_rtx_REG (SImode
, STACK_POINTER_REGNUM
));
6874 RTX_FRAME_RELATED_P (x
) = 1;
6877 /* Generate code to push the regs specified in the mask. */
6879 push_regs (HARD_REG_SET
*mask
, int interrupt_handler
)
6881 int i
= interrupt_handler
? LAST_BANKED_REG
+ 1 : 0;
6884 /* Push PR last; this gives better latencies after the prologue, and
6885 candidates for the return delay slot when there are no general
6886 registers pushed. */
6887 for (; i
< FIRST_PSEUDO_REGISTER
; i
++)
6889 /* If this is an interrupt handler, and the SZ bit varies,
6890 and we have to push any floating point register, we need
6891 to switch to the correct precision first. */
6892 if (i
== FIRST_FP_REG
&& interrupt_handler
&& TARGET_FMOVD
6893 && hard_reg_set_intersect_p (*mask
, reg_class_contents
[DF_REGS
]))
6895 HARD_REG_SET unsaved
;
6898 COMPL_HARD_REG_SET (unsaved
, *mask
);
6899 fpscr_set_from_mem (NORMAL_MODE (FP_MODE
), unsaved
);
6903 && (i
!= FPSCR_REG
|| ! skip_fpscr
)
6904 && TEST_HARD_REG_BIT (*mask
, i
))
6906 /* If the ISR has RESBANK attribute assigned, don't push any of
6907 the following registers - R0-R14, MACH, MACL and GBR. */
6908 if (! (sh_cfun_resbank_handler_p ()
6909 && ((i
>= FIRST_GENERAL_REG
&& i
< LAST_GENERAL_REG
)
6917 /* Push banked registers last to improve delay slot opportunities. */
6918 if (interrupt_handler
)
6920 bool use_movml
= false;
6924 unsigned int count
= 0;
6926 for (i
= FIRST_BANKED_REG
; i
<= LAST_BANKED_REG
; i
++)
6927 if (TEST_HARD_REG_BIT (*mask
, i
))
6932 /* Use movml when all banked registers are pushed. */
6933 if (count
== LAST_BANKED_REG
- FIRST_BANKED_REG
+ 1)
6937 if (sh_cfun_resbank_handler_p ())
6941 rtx x
, mem
, reg
, set
;
6942 rtx sp_reg
= gen_rtx_REG (SImode
, STACK_POINTER_REGNUM
);
6944 /* We must avoid scheduling multiple store insn with another
6946 emit_insn (gen_blockage ());
6947 x
= gen_movml_push_banked (sp_reg
);
6949 for (i
= FIRST_BANKED_REG
; i
<= LAST_BANKED_REG
; i
++)
6951 mem
= gen_rtx_MEM (SImode
, plus_constant (Pmode
, sp_reg
, i
* 4));
6952 reg
= gen_rtx_REG (SImode
, i
);
6953 add_reg_note (x
, REG_CFA_OFFSET
, gen_rtx_SET (mem
, reg
));
6956 set
= gen_rtx_SET (sp_reg
, plus_constant (Pmode
, sp_reg
, - 32));
6957 add_reg_note (x
, REG_CFA_ADJUST_CFA
, set
);
6958 emit_insn (gen_blockage ());
6961 for (i
= FIRST_BANKED_REG
; i
<= LAST_BANKED_REG
; i
++)
6962 if (TEST_HARD_REG_BIT (*mask
, i
))
6966 /* Don't push PR register for an ISR with RESBANK attribute assigned. */
6967 if (TEST_HARD_REG_BIT (*mask
, PR_REG
) && !sh_cfun_resbank_handler_p ())
6971 /* Work out the registers which need to be saved, both as a mask and a
6972 count of saved words. Return the count.
6974 If doing a pragma interrupt function, then push all regs used by the
6975 function, and if we call another function (we can tell by looking at PR),
6976 make sure that all the regs it clobbers are safe too. */
6978 calc_live_regs (HARD_REG_SET
*live_regs_mask
)
6983 bool interrupt_or_trapa_handler
, trapa_handler
, interrupt_handler
;
6984 bool nosave_low_regs
;
6985 int pr_live
, has_call
;
6987 attrs
= DECL_ATTRIBUTES (current_function_decl
);
6988 interrupt_or_trapa_handler
= sh_cfun_interrupt_handler_p ();
6989 trapa_handler
= lookup_attribute ("trapa_handler", attrs
) != NULL_TREE
;
6990 interrupt_handler
= interrupt_or_trapa_handler
&& ! trapa_handler
;
6991 nosave_low_regs
= lookup_attribute ("nosave_low_regs", attrs
) != NULL_TREE
;
6993 CLEAR_HARD_REG_SET (*live_regs_mask
);
6994 if (TARGET_FPU_DOUBLE
&& TARGET_FMOVD
&& interrupt_handler
6995 && df_regs_ever_live_p (FPSCR_REG
))
6996 target_flags
&= ~MASK_FPU_SINGLE
;
6997 /* If we can save a lot of saves by switching to double mode, do that. */
6998 else if (TARGET_FPU_DOUBLE
&& TARGET_FMOVD
&& TARGET_FPU_SINGLE
)
6999 for (count
= 0, reg
= FIRST_FP_REG
; reg
<= LAST_FP_REG
; reg
+= 2)
7000 if (df_regs_ever_live_p (reg
) && df_regs_ever_live_p (reg
+1)
7001 && (! call_really_used_regs
[reg
]
7002 || interrupt_handler
)
7005 target_flags
&= ~MASK_FPU_SINGLE
;
7010 rtx pr_initial
= has_hard_reg_initial_val (Pmode
, PR_REG
);
7011 pr_live
= (pr_initial
7012 ? (!REG_P (pr_initial
)
7013 || REGNO (pr_initial
) != (PR_REG
))
7014 : df_regs_ever_live_p (PR_REG
));
7015 /* For Shcompact, if not optimizing, we end up with a memory reference
7016 using the return address pointer for __builtin_return_address even
7017 though there is no actual need to put the PR register on the stack. */
7018 pr_live
|= df_regs_ever_live_p (RETURN_ADDRESS_POINTER_REGNUM
);
7020 /* Force PR to be live if the prologue has to call the SHmedia
7021 argument decoder or register saver. */
7023 for (count
= 0, reg
= FIRST_PSEUDO_REGISTER
; reg
-- != 0; )
7028 ? (/* Need to save all the regs ever live. */
7029 (df_regs_ever_live_p (reg
)
7030 || (call_really_used_regs
[reg
]
7031 && (! fixed_regs
[reg
] || reg
== MACH_REG
|| reg
== MACL_REG
7032 || reg
== PIC_OFFSET_TABLE_REGNUM
)
7034 && reg
!= STACK_POINTER_REGNUM
&& reg
!= ARG_POINTER_REGNUM
7035 && reg
!= RETURN_ADDRESS_POINTER_REGNUM
7036 && reg
!= T_REG
&& reg
!= GBR_REG
7037 && reg
!= FPSCR_MODES_REG
&& reg
!= FPSCR_STAT_REG
7038 /* Push fpscr only on targets which have FPU */
7039 && (reg
!= FPSCR_REG
|| TARGET_FPU_ANY
))
7040 : (/* Only push those regs which are used and need to be saved. */
7042 || (df_regs_ever_live_p (reg
)
7043 && ((!call_really_used_regs
[reg
]
7044 && !(reg
!= PIC_OFFSET_TABLE_REGNUM
7045 && fixed_regs
[reg
] && call_used_regs
[reg
]))
7046 || (trapa_handler
&& reg
== FPSCR_REG
&& TARGET_FPU_ANY
)))
7047 || (crtl
->calls_eh_return
7048 && (reg
== EH_RETURN_DATA_REGNO (0)
7049 || reg
== EH_RETURN_DATA_REGNO (1)
7050 || reg
== EH_RETURN_DATA_REGNO (2)
7051 || reg
== EH_RETURN_DATA_REGNO (3)))
7052 || ((reg
== MACL_REG
|| reg
== MACH_REG
)
7053 && df_regs_ever_live_p (reg
)
7054 && sh_cfun_attr_renesas_p ())
7057 SET_HARD_REG_BIT (*live_regs_mask
, reg
);
7058 count
+= GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg
));
7060 if (TARGET_FPU_DOUBLE
&& TARGET_FMOVD
7061 && GET_MODE_CLASS (REGISTER_NATURAL_MODE (reg
)) == MODE_FLOAT
)
7063 if (FP_REGISTER_P (reg
))
7065 if (! TARGET_FPU_SINGLE
&& ! df_regs_ever_live_p (reg
^ 1))
7067 SET_HARD_REG_BIT (*live_regs_mask
, (reg
^ 1));
7068 count
+= GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg
^ 1));
7071 else if (XD_REGISTER_P (reg
))
7073 /* Must switch to double mode to access these registers. */
7074 target_flags
&= ~MASK_FPU_SINGLE
;
7078 if (nosave_low_regs
&& reg
== R8_REG
)
7081 /* If we have a target register optimization pass after prologue / epilogue
7082 threading, we need to assume all target registers will be live even if
7084 if (flag_branch_target_load_optimize2
&& TARGET_SAVE_ALL_TARGET_REGS
)
7085 for (reg
= LAST_TARGET_REG
; reg
>= FIRST_TARGET_REG
; reg
--)
7086 if ((! call_really_used_regs
[reg
] || interrupt_handler
)
7087 && ! TEST_HARD_REG_BIT (*live_regs_mask
, reg
))
7089 SET_HARD_REG_BIT (*live_regs_mask
, reg
);
7090 count
+= GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg
));
7092 /* If this is an interrupt handler, we don't have any call-clobbered
7093 registers we can conveniently use for target register save/restore.
7094 Make sure we save at least one general purpose register when we need
7095 to save target registers. */
7096 if (interrupt_handler
7097 && hard_reg_set_intersect_p (*live_regs_mask
,
7098 reg_class_contents
[TARGET_REGS
])
7099 && ! hard_reg_set_intersect_p (*live_regs_mask
,
7100 reg_class_contents
[GENERAL_REGS
]))
7102 SET_HARD_REG_BIT (*live_regs_mask
, R0_REG
);
7103 count
+= GET_MODE_SIZE (REGISTER_NATURAL_MODE (R0_REG
));
7109 /* Code to generate prologue and epilogue sequences */
7111 /* PUSHED is the number of bytes that are being pushed on the
7112 stack for register saves. Return the frame size, padded
7113 appropriately so that the stack stays properly aligned. */
7114 static HOST_WIDE_INT
7115 rounded_frame_size (int pushed
)
7117 HOST_WIDE_INT size
= get_frame_size ();
7118 HOST_WIDE_INT align
= STACK_BOUNDARY
/ BITS_PER_UNIT
;
7120 if (ACCUMULATE_OUTGOING_ARGS
)
7121 size
+= crtl
->outgoing_args_size
;
7123 return ((size
+ pushed
+ align
- 1) & -align
) - pushed
;
7126 /* Choose a call-clobbered target-branch register that remains
7127 unchanged along the whole function. We set it up as the return
7128 value in the prologue. */
7130 sh_media_register_for_return (void)
7135 if (! crtl
->is_leaf
)
7137 if (lookup_attribute ("interrupt_handler",
7138 DECL_ATTRIBUTES (current_function_decl
)))
7140 if (sh_cfun_interrupt_handler_p ())
7143 tr0_used
= flag_pic
&& df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM
);
7145 for (regno
= FIRST_TARGET_REG
+ tr0_used
; regno
<= LAST_TARGET_REG
; regno
++)
7146 if (call_really_used_regs
[regno
] && ! df_regs_ever_live_p (regno
))
7152 /* The maximum registers we need to save are:
7153 - 62 general purpose registers (r15 is stack pointer, r63 is zero)
7154 - 32 floating point registers (for each pair, we save none,
7155 one single precision value, or a double precision value).
7156 - 8 target registers
7157 - add 1 entry for a delimiter. */
7158 #define MAX_SAVED_REGS (62+32+8)
7160 typedef struct save_entry_s
7169 /* There will be a delimiter entry with VOIDmode both at the start and the
7170 end of a filled in schedule. The end delimiter has the offset of the
7171 save with the smallest (i.e. most negative) offset. */
7172 typedef struct save_schedule_s
7174 save_entry entries
[MAX_SAVED_REGS
+ 2];
7175 int temps
[MAX_TEMPS
+1];
7178 /* Expand code for the function prologue. */
7180 sh_expand_prologue (void)
7182 HARD_REG_SET live_regs_mask
;
7185 int save_flags
= target_flags
;
7189 = lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl
));
7191 current_function_interrupt
= sh_cfun_interrupt_handler_p ();
7193 /* We have pretend args if we had an object sent partially in registers
7194 and partially on the stack, e.g. a large structure. */
7195 pretend_args
= crtl
->args
.pretend_args_size
;
7196 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl
)
7197 && (NPARM_REGS(SImode
)
7198 > crtl
->args
.info
.arg_count
[(int) SH_ARG_INT
]))
7201 output_stack_adjust (-pretend_args
7202 - crtl
->args
.info
.stack_regs
* 8,
7203 stack_pointer_rtx
, 0, NULL
, true);
7204 stack_usage
= pretend_args
+ crtl
->args
.info
.stack_regs
* 8;
7206 /* Emit the code for SETUP_VARARGS. */
7209 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl
))
7211 /* Push arg regs as if they'd been provided by caller in stack. */
7212 for (i
= 0; i
< NPARM_REGS(SImode
); i
++)
7214 int rn
= NPARM_REGS(SImode
) + FIRST_PARM_REG
- i
- 1;
7216 if (i
>= (NPARM_REGS(SImode
)
7217 - crtl
->args
.info
.arg_count
[(int) SH_ARG_INT
]
7221 stack_usage
+= GET_MODE_SIZE (SImode
);
7226 /* If we're supposed to switch stacks at function entry, do so now. */
7230 /* The argument specifies a variable holding the address of the
7231 stack the interrupt function should switch to/from at entry/exit. */
7232 tree arg
= TREE_VALUE ( TREE_VALUE (sp_switch_attr
));
7234 = ggc_strdup (TREE_STRING_POINTER (arg
));
7235 rtx sp_switch
= gen_rtx_SYMBOL_REF (Pmode
, s
);
7237 lab
= add_constant (sp_switch
, SImode
, 0);
7238 newsrc
= gen_rtx_LABEL_REF (VOIDmode
, lab
);
7240 emit_insn (gen_sp_switch_1 (newsrc
));
7243 d
= calc_live_regs (&live_regs_mask
);
7244 /* ??? Maybe we could save some switching if we can move a mode switch
7245 that already happens to be at the function start into the prologue. */
7246 if (target_flags
!= save_flags
&& ! current_function_interrupt
)
7247 emit_insn (gen_toggle_sz ());
7249 push_regs (&live_regs_mask
, current_function_interrupt
);
7252 if (flag_pic
&& !TARGET_FDPIC
7253 && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM
))
7254 emit_insn (gen_GOTaddr2picreg (const0_rtx
));
7256 if (target_flags
!= save_flags
&& ! current_function_interrupt
)
7257 emit_insn (gen_toggle_sz ());
7259 target_flags
= save_flags
;
7261 output_stack_adjust (-rounded_frame_size (d
) + d_rounding
,
7262 stack_pointer_rtx
, 0, NULL
, true);
7263 stack_usage
+= rounded_frame_size (d
) - d_rounding
;
7265 if (frame_pointer_needed
)
7266 frame_insn (GEN_MOV (hard_frame_pointer_rtx
, stack_pointer_rtx
));
7268 /* If we are profiling, make sure no instructions are scheduled before
7269 the call to mcount. Similarly if some call instructions are swapped
7270 before frame related insns, it'll confuse the unwinder because
7271 currently SH has no unwind info for function epilogues. */
7272 if (crtl
->profile
|| flag_exceptions
|| flag_unwind_tables
)
7273 emit_insn (gen_blockage ());
7275 if (flag_stack_usage_info
)
7276 current_function_static_stack_size
= stack_usage
;
7279 /* Expand code for the function epilogue. */
7281 sh_expand_epilogue (bool sibcall_p
)
7283 HARD_REG_SET live_regs_mask
;
7287 int save_flags
= target_flags
;
7288 int frame_size
, save_size
;
7289 int fpscr_deferred
= 0;
7290 int e
= sibcall_p
? -1 : 1;
7292 d
= calc_live_regs (&live_regs_mask
);
7295 frame_size
= rounded_frame_size (d
);
7297 if (frame_pointer_needed
)
7299 /* We must avoid scheduling the epilogue with previous basic blocks.
7300 See PR/18032 and PR/40313. */
7301 emit_insn (gen_blockage ());
7302 output_stack_adjust (frame_size
, hard_frame_pointer_rtx
, e
,
7303 &live_regs_mask
, true);
7305 /* We must avoid moving the stack pointer adjustment past code
7306 which reads from the local frame, else an interrupt could
7307 occur after the SP adjustment and clobber data in the local
7309 emit_insn (gen_blockage ());
7310 frame_insn (GEN_MOV (stack_pointer_rtx
, hard_frame_pointer_rtx
));
7312 else if (frame_size
)
7314 /* We must avoid moving the stack pointer adjustment past code
7315 which reads from the local frame, else an interrupt could
7316 occur after the SP adjustment and clobber data in the local
7318 emit_insn (gen_blockage ());
7319 output_stack_adjust (frame_size
, stack_pointer_rtx
, e
,
7320 &live_regs_mask
, true);
7323 /* Pop all the registers. */
7325 if (target_flags
!= save_flags
&& ! current_function_interrupt
)
7326 emit_insn (gen_toggle_sz ());
7332 /* For an ISR with RESBANK attribute assigned, don't pop PR
7334 if (TEST_HARD_REG_BIT (live_regs_mask
, PR_REG
)
7335 && !sh_cfun_resbank_handler_p ())
7337 if (!frame_pointer_needed
)
7338 emit_insn (gen_blockage ());
7342 /* Banked registers are popped first to avoid being scheduled in the
7343 delay slot. RTE switches banks before the ds instruction. */
7344 if (current_function_interrupt
)
7346 bool use_movml
= false;
7350 unsigned int count
= 0;
7352 for (i
= FIRST_BANKED_REG
; i
<= LAST_BANKED_REG
; i
++)
7353 if (TEST_HARD_REG_BIT (live_regs_mask
, i
))
7358 /* Use movml when all banked register are poped. */
7359 if (count
== LAST_BANKED_REG
- FIRST_BANKED_REG
+ 1)
7363 if (sh_cfun_resbank_handler_p ())
7367 rtx sp_reg
= gen_rtx_REG (SImode
, STACK_POINTER_REGNUM
);
7369 /* We must avoid scheduling multiple load insn with another
7371 emit_insn (gen_blockage ());
7372 emit_insn (gen_movml_pop_banked (sp_reg
));
7373 emit_insn (gen_blockage ());
7376 for (i
= LAST_BANKED_REG
; i
>= FIRST_BANKED_REG
; i
--)
7377 if (TEST_HARD_REG_BIT (live_regs_mask
, i
))
7380 last_reg
= FIRST_PSEUDO_REGISTER
- LAST_BANKED_REG
- 1;
7383 last_reg
= FIRST_PSEUDO_REGISTER
;
7385 for (i
= 0; i
< last_reg
; i
++)
7387 int j
= (FIRST_PSEUDO_REGISTER
- 1) - i
;
7389 if (j
== FPSCR_REG
&& current_function_interrupt
&& TARGET_FMOVD
7390 && hard_reg_set_intersect_p (live_regs_mask
,
7391 reg_class_contents
[DF_REGS
]))
7393 /* For an ISR with RESBANK attribute assigned, don't pop
7394 following registers, R0-R14, MACH, MACL and GBR. */
7395 else if (j
!= PR_REG
&& TEST_HARD_REG_BIT (live_regs_mask
, j
)
7396 && ! (sh_cfun_resbank_handler_p ()
7397 && ((j
>= FIRST_GENERAL_REG
7398 && j
< LAST_GENERAL_REG
)
7404 if (j
== FIRST_FP_REG
&& fpscr_deferred
)
7408 if (target_flags
!= save_flags
&& ! current_function_interrupt
)
7409 emit_insn (gen_toggle_sz ());
7410 target_flags
= save_flags
;
7412 output_stack_adjust (crtl
->args
.pretend_args_size
7413 + save_size
+ d_rounding
7414 + crtl
->args
.info
.stack_regs
* 8,
7415 stack_pointer_rtx
, e
, NULL
, true);
7417 if (crtl
->calls_eh_return
)
7418 emit_insn (GEN_ADD3 (stack_pointer_rtx
, stack_pointer_rtx
,
7419 EH_RETURN_STACKADJ_RTX
));
7421 /* Switch back to the normal stack if necessary. */
7422 if (lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl
)))
7423 emit_insn (gen_sp_switch_2 ());
7425 /* Tell flow the insn that pops PR isn't dead. */
7426 /* PR_REG will never be live in SHmedia mode, and we don't need to
7427 USE PR_MEDIA_REG, since it will be explicitly copied to TR0_REG
7428 by the return pattern. */
7429 if (TEST_HARD_REG_BIT (live_regs_mask
, PR_REG
))
7430 emit_use (gen_rtx_REG (SImode
, PR_REG
));
7433 /* Emit code to change the current function's return address to RA.
7434 TEMP is available as a scratch register, if needed. */
7436 sh_set_return_address (rtx ra
, rtx tmp
)
7438 HARD_REG_SET live_regs_mask
;
7439 int d
= calc_live_regs (&live_regs_mask
);
7441 /* If pr_reg isn't life, we can set it (or the register given in
7442 sh_media_register_for_return) directly. */
7443 if (! TEST_HARD_REG_BIT (live_regs_mask
, PR_REG
))
7445 rtx rr
= gen_rtx_REG (SImode
, PR_REG
);
7446 emit_insn (GEN_MOV (rr
, ra
));
7447 /* Tell flow the register for return isn't dead. */
7452 int pr_offset
= rounded_frame_size (d
);
7454 emit_insn (GEN_MOV (tmp
, GEN_INT (pr_offset
)));
7456 if (frame_pointer_needed
)
7457 emit_insn (GEN_ADD3 (tmp
, tmp
, hard_frame_pointer_rtx
));
7459 emit_insn (GEN_ADD3 (tmp
, tmp
, stack_pointer_rtx
));
7461 tmp
= gen_frame_mem (Pmode
, tmp
);
7462 emit_insn (GEN_MOV (tmp
, ra
));
7463 /* Tell this store isn't dead. */
7467 /* Clear variables at function end. */
7469 sh_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED
,
7470 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
7475 sh_builtin_saveregs (void)
7477 /* First unnamed integer register. */
7478 int first_intreg
= crtl
->args
.info
.arg_count
[(int) SH_ARG_INT
];
7479 /* Number of integer registers we need to save. */
7480 int n_intregs
= MAX (0, NPARM_REGS (SImode
) - first_intreg
);
7481 /* First unnamed SFmode float reg */
7482 int first_floatreg
= crtl
->args
.info
.arg_count
[(int) SH_ARG_FLOAT
];
7483 /* Number of SFmode float regs to save. */
7484 int n_floatregs
= MAX (0, NPARM_REGS (SFmode
) - first_floatreg
);
7487 alias_set_type alias_set
;
7489 if (! TARGET_SH2E
&& ! TARGET_SH4
)
7491 error ("__builtin_saveregs not supported by this subtarget");
7495 /* Allocate block of memory for the regs. */
7496 /* ??? If n_intregs + n_floatregs == 0, should we allocate at least 1 byte?
7497 Or can assign_stack_local accept a 0 SIZE argument? */
7498 bufsize
= (n_intregs
* UNITS_PER_WORD
) + (n_floatregs
* UNITS_PER_WORD
);
7500 if (n_floatregs
& 1)
7504 regbuf
= assign_stack_local (BLKmode
, bufsize
+ UNITS_PER_WORD
, 0);
7505 addr
= copy_to_mode_reg (Pmode
, XEXP (regbuf
, 0));
7506 emit_insn (gen_iorsi3 (addr
, addr
, GEN_INT (UNITS_PER_WORD
)));
7507 regbuf
= change_address (regbuf
, BLKmode
, addr
);
7509 else if (STACK_BOUNDARY
< 64 && TARGET_FPU_DOUBLE
&& n_floatregs
)
7513 regbuf
= assign_stack_local (BLKmode
, bufsize
+ UNITS_PER_WORD
, 0);
7514 addr
= copy_to_mode_reg (Pmode
, plus_constant (Pmode
,
7515 XEXP (regbuf
, 0), 4));
7516 mask
= copy_to_mode_reg (Pmode
, GEN_INT (-8));
7517 emit_insn (gen_andsi3 (addr
, addr
, mask
));
7518 regbuf
= change_address (regbuf
, BLKmode
, addr
);
7521 regbuf
= assign_stack_local (BLKmode
, bufsize
, TARGET_FPU_DOUBLE
? 64 : 0);
7522 alias_set
= get_varargs_alias_set ();
7523 set_mem_alias_set (regbuf
, alias_set
);
7526 This is optimized to only save the regs that are necessary. Explicitly
7527 named args need not be saved. */
7529 move_block_from_reg (BASE_ARG_REG (SImode
) + first_intreg
,
7530 adjust_address (regbuf
, BLKmode
,
7531 n_floatregs
* UNITS_PER_WORD
),
7535 This is optimized to only save the regs that are necessary. Explicitly
7536 named args need not be saved.
7537 We explicitly build a pointer to the buffer because it halves the insn
7538 count when not optimizing (otherwise the pointer is built for each reg
7540 We emit the moves in reverse order so that we can use predecrement. */
7542 fpregs
= copy_to_mode_reg (Pmode
,
7543 plus_constant (Pmode
, XEXP (regbuf
, 0),
7544 n_floatregs
* UNITS_PER_WORD
));
7545 if (TARGET_FPU_DOUBLE
)
7548 for (regno
= NPARM_REGS (DFmode
) - 2; regno
>= first_floatreg
; regno
-= 2)
7550 emit_insn (gen_addsi3 (fpregs
, fpregs
,
7551 GEN_INT (-2 * UNITS_PER_WORD
)));
7552 mem
= change_address (regbuf
, DFmode
, fpregs
);
7553 emit_move_insn (mem
,
7554 gen_rtx_REG (DFmode
, BASE_ARG_REG (DFmode
) + regno
));
7556 regno
= first_floatreg
;
7559 emit_insn (gen_addsi3 (fpregs
, fpregs
, GEN_INT (-UNITS_PER_WORD
)));
7560 mem
= change_address (regbuf
, SFmode
, fpregs
);
7561 emit_move_insn (mem
,
7562 gen_rtx_REG (SFmode
, BASE_ARG_REG (SFmode
)
7563 + regno
- SH_REG_MSW_OFFSET
));
7567 for (regno
= NPARM_REGS (SFmode
) - 1; regno
>= first_floatreg
; regno
--)
7571 emit_insn (gen_addsi3 (fpregs
, fpregs
, GEN_INT (-UNITS_PER_WORD
)));
7572 mem
= change_address (regbuf
, SFmode
, fpregs
);
7573 emit_move_insn (mem
,
7574 gen_rtx_REG (SFmode
, BASE_ARG_REG (SFmode
) + regno
));
7577 /* Return the address of the regbuf. */
7578 return XEXP (regbuf
, 0);
7581 /* Define the `__builtin_va_list' type for the ABI. */
7583 sh_build_builtin_va_list (void)
7585 tree f_next_o
, f_next_o_limit
, f_next_fp
, f_next_fp_limit
, f_next_stack
;
7586 tree record
, type_decl
;
7588 if ((! TARGET_SH2E
&& ! TARGET_SH4
)
7589 || TARGET_HITACHI
|| sh_cfun_attr_renesas_p ())
7590 return ptr_type_node
;
7592 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
7593 type_decl
= build_decl (BUILTINS_LOCATION
,
7594 TYPE_DECL
, get_identifier ("__va_list_tag"), record
);
7596 f_next_o
= build_decl (BUILTINS_LOCATION
,
7597 FIELD_DECL
, get_identifier ("__va_next_o"),
7599 f_next_o_limit
= build_decl (BUILTINS_LOCATION
,
7601 get_identifier ("__va_next_o_limit"),
7603 f_next_fp
= build_decl (BUILTINS_LOCATION
,
7604 FIELD_DECL
, get_identifier ("__va_next_fp"),
7606 f_next_fp_limit
= build_decl (BUILTINS_LOCATION
,
7608 get_identifier ("__va_next_fp_limit"),
7610 f_next_stack
= build_decl (BUILTINS_LOCATION
,
7611 FIELD_DECL
, get_identifier ("__va_next_stack"),
7614 DECL_FIELD_CONTEXT (f_next_o
) = record
;
7615 DECL_FIELD_CONTEXT (f_next_o_limit
) = record
;
7616 DECL_FIELD_CONTEXT (f_next_fp
) = record
;
7617 DECL_FIELD_CONTEXT (f_next_fp_limit
) = record
;
7618 DECL_FIELD_CONTEXT (f_next_stack
) = record
;
7620 TYPE_STUB_DECL (record
) = type_decl
;
7621 TYPE_NAME (record
) = type_decl
;
7622 TYPE_FIELDS (record
) = f_next_o
;
7623 DECL_CHAIN (f_next_o
) = f_next_o_limit
;
7624 DECL_CHAIN (f_next_o_limit
) = f_next_fp
;
7625 DECL_CHAIN (f_next_fp
) = f_next_fp_limit
;
7626 DECL_CHAIN (f_next_fp_limit
) = f_next_stack
;
7628 layout_type (record
);
7633 /* Implement `va_start' for varargs and stdarg. */
7635 sh_va_start (tree valist
, rtx nextarg
)
7637 tree f_next_o
, f_next_o_limit
, f_next_fp
, f_next_fp_limit
, f_next_stack
;
7638 tree next_o
, next_o_limit
, next_fp
, next_fp_limit
, next_stack
;
7642 if ((! TARGET_SH2E
&& ! TARGET_SH4
)
7643 || TARGET_HITACHI
|| sh_cfun_attr_renesas_p ())
7645 std_expand_builtin_va_start (valist
, nextarg
);
7649 f_next_o
= TYPE_FIELDS (va_list_type_node
);
7650 f_next_o_limit
= DECL_CHAIN (f_next_o
);
7651 f_next_fp
= DECL_CHAIN (f_next_o_limit
);
7652 f_next_fp_limit
= DECL_CHAIN (f_next_fp
);
7653 f_next_stack
= DECL_CHAIN (f_next_fp_limit
);
7655 next_o
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_o
), valist
, f_next_o
,
7657 next_o_limit
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_o_limit
),
7658 valist
, f_next_o_limit
, NULL_TREE
);
7659 next_fp
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_fp
), valist
, f_next_fp
,
7661 next_fp_limit
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_fp_limit
),
7662 valist
, f_next_fp_limit
, NULL_TREE
);
7663 next_stack
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_stack
),
7664 valist
, f_next_stack
, NULL_TREE
);
7666 /* Call __builtin_saveregs. */
7667 u
= make_tree (sizetype
, expand_builtin_saveregs ());
7668 u
= fold_convert (ptr_type_node
, u
);
7669 t
= build2 (MODIFY_EXPR
, ptr_type_node
, next_fp
, u
);
7670 TREE_SIDE_EFFECTS (t
) = 1;
7671 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7673 nfp
= crtl
->args
.info
.arg_count
[SH_ARG_FLOAT
];
7678 u
= fold_build_pointer_plus_hwi (u
, UNITS_PER_WORD
* nfp
);
7679 t
= build2 (MODIFY_EXPR
, ptr_type_node
, next_fp_limit
, u
);
7680 TREE_SIDE_EFFECTS (t
) = 1;
7681 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7683 t
= build2 (MODIFY_EXPR
, ptr_type_node
, next_o
, u
);
7684 TREE_SIDE_EFFECTS (t
) = 1;
7685 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7687 nint
= crtl
->args
.info
.arg_count
[SH_ARG_INT
];
7692 u
= fold_build_pointer_plus_hwi (u
, UNITS_PER_WORD
* nint
);
7693 t
= build2 (MODIFY_EXPR
, ptr_type_node
, next_o_limit
, u
);
7694 TREE_SIDE_EFFECTS (t
) = 1;
7695 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7697 u
= make_tree (ptr_type_node
, nextarg
);
7698 t
= build2 (MODIFY_EXPR
, ptr_type_node
, next_stack
, u
);
7699 TREE_SIDE_EFFECTS (t
) = 1;
7700 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
7703 /* TYPE is a RECORD_TYPE. If there is only a single nonzero-sized
7704 member, return it. */
7706 find_sole_member (tree type
)
7708 tree field
, member
= NULL_TREE
;
7710 for (field
= TYPE_FIELDS (type
); field
; field
= DECL_CHAIN (field
))
7712 if (TREE_CODE (field
) != FIELD_DECL
)
7714 if (!DECL_SIZE (field
))
7716 if (integer_zerop (DECL_SIZE (field
)))
7725 /* Implement `va_arg'. */
7727 sh_gimplify_va_arg_expr (tree valist
, tree type
, gimple_seq
*pre_p
,
7728 gimple_seq
*post_p ATTRIBUTE_UNUSED
)
7730 HOST_WIDE_INT size
, rsize
;
7731 tree tmp
, pptr_type_node
;
7732 tree addr
, lab_over
= NULL
, result
= NULL
;
7736 if (!VOID_TYPE_P (type
))
7737 pass_by_ref
= targetm
.calls
.must_pass_in_stack (TYPE_MODE (type
), type
);
7739 pass_by_ref
= false;
7742 type
= build_pointer_type (type
);
7744 size
= int_size_in_bytes (type
);
7745 rsize
= (size
+ UNITS_PER_WORD
- 1) & -UNITS_PER_WORD
;
7746 pptr_type_node
= build_pointer_type (ptr_type_node
);
7748 if ((TARGET_SH2E
|| TARGET_SH4
)
7749 && ! (TARGET_HITACHI
|| sh_cfun_attr_renesas_p ()))
7751 tree f_next_o
, f_next_o_limit
, f_next_fp
, f_next_fp_limit
, f_next_stack
;
7752 tree next_o
, next_o_limit
, next_fp
, next_fp_limit
, next_stack
;
7757 f_next_o
= TYPE_FIELDS (va_list_type_node
);
7758 f_next_o_limit
= DECL_CHAIN (f_next_o
);
7759 f_next_fp
= DECL_CHAIN (f_next_o_limit
);
7760 f_next_fp_limit
= DECL_CHAIN (f_next_fp
);
7761 f_next_stack
= DECL_CHAIN (f_next_fp_limit
);
7763 next_o
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_o
), valist
, f_next_o
,
7765 next_o_limit
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_o_limit
),
7766 valist
, f_next_o_limit
, NULL_TREE
);
7767 next_fp
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_fp
),
7768 valist
, f_next_fp
, NULL_TREE
);
7769 next_fp_limit
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_fp_limit
),
7770 valist
, f_next_fp_limit
, NULL_TREE
);
7771 next_stack
= build3 (COMPONENT_REF
, TREE_TYPE (f_next_stack
),
7772 valist
, f_next_stack
, NULL_TREE
);
7774 /* Structures with a single member with a distinct mode are passed
7775 like their member. This is relevant if the latter has a REAL_TYPE
7776 or COMPLEX_TYPE type. */
7778 while (TREE_CODE (eff_type
) == RECORD_TYPE
7779 && (member
= find_sole_member (eff_type
))
7780 && (TREE_CODE (TREE_TYPE (member
)) == REAL_TYPE
7781 || TREE_CODE (TREE_TYPE (member
)) == COMPLEX_TYPE
7782 || TREE_CODE (TREE_TYPE (member
)) == RECORD_TYPE
))
7784 tree field_type
= TREE_TYPE (member
);
7786 if (TYPE_MODE (eff_type
) == TYPE_MODE (field_type
))
7787 eff_type
= field_type
;
7790 gcc_assert ((TYPE_ALIGN (eff_type
)
7791 < GET_MODE_ALIGNMENT (TYPE_MODE (field_type
)))
7792 || (TYPE_ALIGN (eff_type
)
7793 > GET_MODE_BITSIZE (TYPE_MODE (field_type
))));
7798 if (TARGET_FPU_DOUBLE
)
7800 pass_as_float
= ((TREE_CODE (eff_type
) == REAL_TYPE
&& size
<= 8)
7801 || (TREE_CODE (eff_type
) == COMPLEX_TYPE
7802 && TREE_CODE (TREE_TYPE (eff_type
)) == REAL_TYPE
7807 pass_as_float
= (TREE_CODE (eff_type
) == REAL_TYPE
&& size
== 4);
7810 addr
= create_tmp_var (pptr_type_node
);
7811 lab_false
= create_artificial_label (UNKNOWN_LOCATION
);
7812 lab_over
= create_artificial_label (UNKNOWN_LOCATION
);
7814 valist
= build_simple_mem_ref (addr
);
7818 tree next_fp_tmp
= create_tmp_var (TREE_TYPE (f_next_fp
));
7820 bool is_double
= size
== 8 && TREE_CODE (eff_type
) == REAL_TYPE
;
7822 tmp
= build1 (ADDR_EXPR
, pptr_type_node
, unshare_expr (next_fp
));
7823 gimplify_assign (unshare_expr (addr
), tmp
, pre_p
);
7825 gimplify_assign (unshare_expr (next_fp_tmp
), valist
, pre_p
);
7826 tmp
= next_fp_limit
;
7827 if (size
> 4 && !is_double
)
7828 tmp
= fold_build_pointer_plus_hwi (unshare_expr (tmp
), 4 - size
);
7829 tmp
= build2 (GE_EXPR
, boolean_type_node
,
7830 unshare_expr (next_fp_tmp
), unshare_expr (tmp
));
7831 cmp
= build3 (COND_EXPR
, void_type_node
, tmp
,
7832 build1 (GOTO_EXPR
, void_type_node
,
7833 unshare_expr (lab_false
)), NULL_TREE
);
7835 gimplify_and_add (cmp
, pre_p
);
7837 if (TYPE_ALIGN (eff_type
) > BITS_PER_WORD
7838 || (is_double
|| size
== 16))
7840 tmp
= fold_convert (sizetype
, next_fp_tmp
);
7841 tmp
= build2 (BIT_AND_EXPR
, sizetype
, tmp
,
7842 size_int (UNITS_PER_WORD
));
7843 tmp
= fold_build_pointer_plus (unshare_expr (next_fp_tmp
), tmp
);
7844 gimplify_assign (unshare_expr (next_fp_tmp
), tmp
, pre_p
);
7847 gimplify_and_add (cmp
, pre_p
);
7849 #ifdef FUNCTION_ARG_SCmode_WART
7850 if (TYPE_MODE (eff_type
) == SCmode
7851 && TARGET_SH4
&& TARGET_LITTLE_ENDIAN
)
7853 tree subtype
= TREE_TYPE (eff_type
);
7857 = std_gimplify_va_arg_expr (next_fp_tmp
, subtype
, pre_p
, NULL
);
7858 imag
= get_initialized_tmp_var (imag
, pre_p
, NULL
);
7861 = std_gimplify_va_arg_expr (next_fp_tmp
, subtype
, pre_p
, NULL
);
7862 real
= get_initialized_tmp_var (real
, pre_p
, NULL
);
7864 result
= build2 (COMPLEX_EXPR
, eff_type
, real
, imag
);
7865 if (type
!= eff_type
)
7866 result
= build1 (VIEW_CONVERT_EXPR
, type
, result
);
7867 result
= get_initialized_tmp_var (result
, pre_p
, NULL
);
7869 #endif /* FUNCTION_ARG_SCmode_WART */
7871 tmp
= build1 (GOTO_EXPR
, void_type_node
, unshare_expr (lab_over
));
7872 gimplify_and_add (tmp
, pre_p
);
7874 tmp
= build1 (LABEL_EXPR
, void_type_node
, unshare_expr (lab_false
));
7875 gimplify_and_add (tmp
, pre_p
);
7877 tmp
= build1 (ADDR_EXPR
, pptr_type_node
, unshare_expr (next_stack
));
7878 gimplify_assign (unshare_expr (addr
), tmp
, pre_p
);
7879 gimplify_assign (unshare_expr (next_fp_tmp
),
7880 unshare_expr (valist
), pre_p
);
7882 gimplify_assign (unshare_expr (valist
),
7883 unshare_expr (next_fp_tmp
), post_p
);
7884 valist
= next_fp_tmp
;
7888 tmp
= fold_build_pointer_plus_hwi (unshare_expr (next_o
), rsize
);
7889 tmp
= build2 (GT_EXPR
, boolean_type_node
, tmp
,
7890 unshare_expr (next_o_limit
));
7891 tmp
= build3 (COND_EXPR
, void_type_node
, tmp
,
7892 build1 (GOTO_EXPR
, void_type_node
,
7893 unshare_expr (lab_false
)),
7895 gimplify_and_add (tmp
, pre_p
);
7897 tmp
= build1 (ADDR_EXPR
, pptr_type_node
, unshare_expr (next_o
));
7898 gimplify_assign (unshare_expr (addr
), tmp
, pre_p
);
7900 tmp
= build1 (GOTO_EXPR
, void_type_node
, unshare_expr (lab_over
));
7901 gimplify_and_add (tmp
, pre_p
);
7903 tmp
= build1 (LABEL_EXPR
, void_type_node
, unshare_expr (lab_false
));
7904 gimplify_and_add (tmp
, pre_p
);
7906 if (size
> 4 && ! (TARGET_SH4
|| TARGET_SH2A
))
7907 gimplify_assign (unshare_expr (next_o
),
7908 unshare_expr (next_o_limit
), pre_p
);
7910 tmp
= build1 (ADDR_EXPR
, pptr_type_node
, unshare_expr (next_stack
));
7911 gimplify_assign (unshare_expr (addr
), tmp
, pre_p
);
7916 tmp
= build1 (LABEL_EXPR
, void_type_node
, unshare_expr (lab_over
));
7917 gimplify_and_add (tmp
, pre_p
);
7921 /* ??? In va-sh.h, there had been code to make values larger than
7922 size 8 indirect. This does not match the FUNCTION_ARG macros. */
7924 tmp
= std_gimplify_va_arg_expr (valist
, type
, pre_p
, NULL
);
7927 gimplify_assign (result
, tmp
, pre_p
);
7928 result
= build1 (NOP_EXPR
, TREE_TYPE (result
), result
);
7929 tmp
= build1 (LABEL_EXPR
, void_type_node
, unshare_expr (lab_over
));
7930 gimplify_and_add (tmp
, pre_p
);
7936 result
= build_va_arg_indirect_ref (result
);
7941 /* 64 bit floating points memory transfers are paired single precision loads
7942 or store. So DWARF information needs fixing in little endian (unless
7943 PR=SZ=1 in FPSCR). */
7945 sh_dwarf_register_span (rtx reg
)
7947 unsigned regno
= REGNO (reg
);
7949 if (WORDS_BIG_ENDIAN
|| GET_MODE (reg
) != DFmode
)
7953 gen_rtx_PARALLEL (VOIDmode
,
7955 gen_rtx_REG (SFmode
, regno
+ 1),
7956 gen_rtx_REG (SFmode
, regno
)));
7960 sh_promote_function_mode (const_tree type
, machine_mode mode
,
7961 int *punsignedp
, const_tree funtype
,
7964 if (sh_promote_prototypes (funtype
))
7965 return promote_mode (type
, mode
, punsignedp
);
7967 return default_promote_function_mode (type
, mode
, punsignedp
, funtype
,
7972 sh_promote_prototypes (const_tree type
)
7978 return ! sh_attr_renesas_p (type
);
7982 sh_pass_by_reference (cumulative_args_t cum_v
, machine_mode mode
,
7983 const_tree type
, bool named ATTRIBUTE_UNUSED
)
7985 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
7987 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
7990 /* ??? std_gimplify_va_arg_expr passes NULL for cum. That function
7991 wants to know about pass-by-reference semantics for incoming
8000 sh_callee_copies (cumulative_args_t cum
, machine_mode mode
,
8001 const_tree type
, bool named ATTRIBUTE_UNUSED
)
8003 /* ??? How can it possibly be correct to return true only on the
8004 caller side of the equation? Is there someplace else in the
8005 sh backend that's magically producing the copies? */
8006 return (get_cumulative_args (cum
)->outgoing
8007 && ((mode
== BLKmode
? TYPE_ALIGN (type
) : GET_MODE_ALIGNMENT (mode
))
8008 % SH_MIN_ALIGN_FOR_CALLEE_COPY
== 0));
8011 /* Round a register number up to a proper boundary for an arg of mode
8013 The SH doesn't care about double alignment, so we only
8014 round doubles to even regs when asked to explicitly. */
8016 sh_round_reg (const CUMULATIVE_ARGS
& cum
, machine_mode mode
)
8018 /* FIXME: This used to be a macro and has been copy pasted into this
8019 function as is. Make this more readable. */
8021 (((TARGET_ALIGN_DOUBLE
8022 || (TARGET_FPU_DOUBLE
8023 && (mode
== DFmode
|| mode
== DCmode
)
8024 && cum
.arg_count
[(int) SH_ARG_FLOAT
] < NPARM_REGS (mode
)))
8025 && GET_MODE_UNIT_SIZE (mode
) > UNITS_PER_WORD
)
8026 ? (cum
.arg_count
[(int) GET_SH_ARG_CLASS (mode
)]
8027 + (cum
.arg_count
[(int) GET_SH_ARG_CLASS (mode
)] & 1))
8028 : cum
.arg_count
[(int) GET_SH_ARG_CLASS (mode
)]);
8031 /* Return true if arg of the specified mode should be passed in a register
8032 or false otherwise. */
8034 sh_pass_in_reg_p (const CUMULATIVE_ARGS
& cum
, machine_mode mode
,
8037 /* FIXME: This used to be a macro and has been copy pasted into this
8038 function as is. Make this more readable. */
8041 || (! TREE_ADDRESSABLE (type
)
8042 && (! (TARGET_HITACHI
|| cum
.renesas_abi
)
8043 || ! (AGGREGATE_TYPE_P (type
)
8045 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
8046 && GET_MODE_SIZE (mode
) > GET_MODE_SIZE (SFmode
)))))))
8049 ? ((mode
) == BLKmode
8050 ? ((cum
.arg_count
[(int) SH_ARG_INT
] * UNITS_PER_WORD
8051 + int_size_in_bytes (type
))
8052 <= NPARM_REGS (SImode
) * UNITS_PER_WORD
)
8053 : ((sh_round_reg (cum
, mode
)
8054 + HARD_REGNO_NREGS (BASE_ARG_REG (mode
), mode
))
8055 <= NPARM_REGS (mode
)))
8056 : sh_round_reg (cum
, mode
) < NPARM_REGS (mode
)));
8060 sh_arg_partial_bytes (cumulative_args_t cum_v
, machine_mode mode
,
8061 tree type
, bool named ATTRIBUTE_UNUSED
)
8063 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
8066 if (sh_pass_in_reg_p (*cum
, mode
, type
)
8067 && !TARGET_FPU_DOUBLE
8068 && (sh_round_reg (*cum
, mode
)
8070 ? CEIL (GET_MODE_SIZE (mode
), UNITS_PER_WORD
)
8071 : CEIL (int_size_in_bytes (type
), UNITS_PER_WORD
))
8072 > NPARM_REGS (mode
)))
8073 words
= NPARM_REGS (mode
) - sh_round_reg (*cum
, mode
);
8075 return words
* UNITS_PER_WORD
;
8079 /* Define where to put the arguments to a function.
8080 Value is zero to push the argument on the stack,
8081 or a hard register in which to store the argument.
8083 MODE is the argument's machine mode.
8084 TYPE is the data type of the argument (as a tree).
8085 This is null for libcalls where that information may
8087 CUM is a variable of type CUMULATIVE_ARGS which gives info about
8088 the preceding args and about the function being called.
8089 NAMED is nonzero if this argument is a named parameter
8090 (otherwise it is an extra parameter matching an ellipsis).
8092 On SH the first args are normally in registers
8093 and the rest are pushed. Any arg that starts within the first
8094 NPARM_REGS words is at least partially passed in a register unless
8095 its data type forbids. */
8097 sh_function_arg (cumulative_args_t ca_v
, machine_mode mode
,
8098 const_tree type
, bool named
)
8100 CUMULATIVE_ARGS
*ca
= get_cumulative_args (ca_v
);
8102 if (mode
== VOIDmode
)
8103 return ca
->renesas_abi
? const1_rtx
: const0_rtx
;
8105 if (sh_pass_in_reg_p (*ca
, mode
, type
)
8106 && (named
|| ! (TARGET_HITACHI
|| ca
->renesas_abi
)))
8110 if (mode
== SCmode
&& TARGET_SH4
&& TARGET_LITTLE_ENDIAN
8111 && (! FUNCTION_ARG_SCmode_WART
|| (sh_round_reg (*ca
, mode
) & 1)))
8113 rtx r1
= gen_rtx_EXPR_LIST (VOIDmode
,
8114 gen_rtx_REG (SFmode
,
8116 + (sh_round_reg (*ca
, mode
) ^ 1)),
8118 rtx r2
= gen_rtx_EXPR_LIST (VOIDmode
,
8119 gen_rtx_REG (SFmode
,
8121 + ((sh_round_reg (*ca
, mode
) + 1) ^ 1)),
8123 return gen_rtx_PARALLEL(SCmode
, gen_rtvec(2, r1
, r2
));
8126 /* If the alignment of a DF value causes an SF register to be
8127 skipped, we will use that skipped register for the next SF
8129 if ((TARGET_HITACHI
|| ca
->renesas_abi
)
8130 && ca
->free_single_fp_reg
8132 return gen_rtx_REG (mode
, ca
->free_single_fp_reg
);
8134 regno
= (BASE_ARG_REG (mode
) + sh_round_reg (*ca
, mode
))
8135 ^ (mode
== SFmode
&& TARGET_SH4
8136 && TARGET_LITTLE_ENDIAN
8137 && ! TARGET_HITACHI
&& ! ca
->renesas_abi
);
8138 return gen_rtx_REG (mode
, regno
);
8145 /* Update the data in CUM to advance over an argument
8146 of mode MODE and data type TYPE.
8147 (TYPE is null for libcalls where that information may not be
8150 sh_function_arg_advance (cumulative_args_t ca_v
, machine_mode mode
,
8151 const_tree type
, bool named ATTRIBUTE_UNUSED
)
8153 CUMULATIVE_ARGS
*ca
= get_cumulative_args (ca_v
);
8158 if ((TARGET_HITACHI
|| ca
->renesas_abi
) && TARGET_FPU_DOUBLE
)
8160 /* Note that we've used the skipped register. */
8161 if (mode
== SFmode
&& ca
->free_single_fp_reg
)
8163 ca
->free_single_fp_reg
= 0;
8166 /* When we have a DF after an SF, there's an SF register that get
8167 skipped in order to align the DF value. We note this skipped
8168 register, because the next SF value will use it, and not the
8169 SF that follows the DF. */
8171 && sh_round_reg (*ca
, DFmode
) != sh_round_reg (*ca
, SFmode
))
8173 ca
->free_single_fp_reg
= (sh_round_reg (*ca
, SFmode
)
8174 + BASE_ARG_REG (mode
));
8178 if (! ((TARGET_SH4
|| TARGET_SH2A
) || ca
->renesas_abi
)
8179 || sh_pass_in_reg_p (*ca
, mode
, type
))
8180 (ca
->arg_count
[(int) GET_SH_ARG_CLASS (mode
)]
8181 = (sh_round_reg (*ca
, mode
)
8183 ? CEIL (int_size_in_bytes (type
), UNITS_PER_WORD
)
8184 : CEIL (GET_MODE_SIZE (mode
), UNITS_PER_WORD
))));
8187 /* The Renesas calling convention doesn't quite fit into this scheme since
8188 the address is passed like an invisible argument, but one that is always
8189 passed in memory. */
8191 sh_struct_value_rtx (tree fndecl
, int incoming ATTRIBUTE_UNUSED
)
8193 if (TARGET_HITACHI
|| sh_attr_renesas_p (fndecl
))
8195 return gen_rtx_REG (Pmode
, 2);
8198 /* Worker function for TARGET_FUNCTION_VALUE.
8200 For the SH, this is like LIBCALL_VALUE, except that we must change the
8201 mode like PROMOTE_MODE does.
8202 ??? PROMOTE_MODE is ignored for non-scalar types. The set of types
8203 tested here has to be kept in sync with the one in
8204 explow.c:promote_mode. */
8206 sh_function_value (const_tree valtype
,
8207 const_tree fn_decl_or_type
,
8208 bool outgoing ATTRIBUTE_UNUSED
)
8211 && !DECL_P (fn_decl_or_type
))
8212 fn_decl_or_type
= NULL
;
8214 return gen_rtx_REG (
8215 ((GET_MODE_CLASS (TYPE_MODE (valtype
)) == MODE_INT
8216 && GET_MODE_SIZE (TYPE_MODE (valtype
)) < 4
8217 && (TREE_CODE (valtype
) == INTEGER_TYPE
8218 || TREE_CODE (valtype
) == ENUMERAL_TYPE
8219 || TREE_CODE (valtype
) == BOOLEAN_TYPE
8220 || TREE_CODE (valtype
) == REAL_TYPE
8221 || TREE_CODE (valtype
) == OFFSET_TYPE
))
8222 && sh_promote_prototypes (fn_decl_or_type
)
8223 ? SImode
: TYPE_MODE (valtype
)),
8224 BASE_RETURN_VALUE_REG (TYPE_MODE (valtype
)));
8227 /* Worker function for TARGET_LIBCALL_VALUE. */
8229 sh_libcall_value (machine_mode mode
, const_rtx fun ATTRIBUTE_UNUSED
)
8231 return gen_rtx_REG (mode
, BASE_RETURN_VALUE_REG (mode
));
8234 /* Return true if N is a possible register number of function value. */
8236 sh_function_value_regno_p (const unsigned int regno
)
8238 return regno
== FIRST_RET_REG
|| (TARGET_SH2E
&& regno
== FIRST_FP_RET_REG
);
8241 /* Worker function for TARGET_RETURN_IN_MEMORY. */
8243 sh_return_in_memory (const_tree type
, const_tree fndecl
)
8245 return TYPE_MODE (type
) == BLKmode
8246 || ((TARGET_HITACHI
|| sh_attr_renesas_p (fndecl
))
8247 && TREE_CODE (type
) == RECORD_TYPE
);
8250 /* We actually emit the code in sh_expand_prologue. We used to use
8251 a static variable to flag that we need to emit this code, but that
8252 doesn't when inlining, when functions are deferred and then emitted
8253 later. Fortunately, we already have two flags that are part of struct
8254 function that tell if a function uses varargs or stdarg. */
8256 sh_setup_incoming_varargs (cumulative_args_t ca
,
8259 int *pretend_arg_size
,
8260 int second_time ATTRIBUTE_UNUSED
)
8262 gcc_assert (cfun
->stdarg
);
8263 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl
))
8265 int named_parm_regs
, anon_parm_regs
;
8267 named_parm_regs
= (sh_round_reg (*get_cumulative_args (ca
), mode
)
8269 ? CEIL (int_size_in_bytes (type
), UNITS_PER_WORD
)
8270 : CEIL (GET_MODE_SIZE (mode
), UNITS_PER_WORD
)));
8271 anon_parm_regs
= NPARM_REGS (SImode
) - named_parm_regs
;
8272 if (anon_parm_regs
> 0)
8273 *pretend_arg_size
= anon_parm_regs
* 4;
8278 sh_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED
)
8284 sh_pretend_outgoing_varargs_named (cumulative_args_t ca_v
)
8286 CUMULATIVE_ARGS
*ca
= get_cumulative_args (ca_v
);
8288 return ! (TARGET_HITACHI
|| ca
->renesas_abi
);
8292 /* Define the offset between two registers, one to be eliminated, and
8293 the other its replacement, at the start of a routine. */
8295 initial_elimination_offset (int from
, int to
)
8298 int regs_saved_rounding
= 0;
8299 int total_saved_regs_space
;
8300 int total_auto_space
;
8301 int save_flags
= target_flags
;
8302 HARD_REG_SET live_regs_mask
;
8304 regs_saved
= calc_live_regs (&live_regs_mask
);
8306 total_auto_space
= rounded_frame_size (regs_saved
) - regs_saved_rounding
;
8307 target_flags
= save_flags
;
8309 total_saved_regs_space
= regs_saved
+ regs_saved_rounding
;
8311 if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
8312 return total_saved_regs_space
+ total_auto_space
8313 + crtl
->args
.info
.byref_regs
* 8;
8315 if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
8316 return total_saved_regs_space
+ total_auto_space
8317 + crtl
->args
.info
.byref_regs
* 8;
8319 /* Initial gap between fp and sp is 0. */
8320 if (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
8323 if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
8324 return rounded_frame_size (0);
8326 if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
8327 return rounded_frame_size (0);
8329 gcc_assert (from
== RETURN_ADDRESS_POINTER_REGNUM
8330 && (to
== HARD_FRAME_POINTER_REGNUM
8331 || to
== STACK_POINTER_REGNUM
));
8332 return total_auto_space
;
8335 /* Parse the -mfixed-range= option string. */
8337 sh_fix_range (const char *const_str
)
8340 char *str
, *dash
, *comma
;
8342 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
8343 REG2 are either register names or register numbers. The effect
8344 of this option is to mark the registers in the range from REG1 to
8345 REG2 as ``fixed'' so they won't be used by the compiler. */
8347 i
= strlen (const_str
);
8348 str
= (char *) alloca (i
+ 1);
8349 memcpy (str
, const_str
, i
+ 1);
8353 dash
= strchr (str
, '-');
8356 warning (0, "value of -mfixed-range must have form REG1-REG2");
8360 comma
= strchr (dash
+ 1, ',');
8364 first
= decode_reg_name (str
);
8367 warning (0, "unknown register name: %s", str
);
8371 last
= decode_reg_name (dash
+ 1);
8374 warning (0, "unknown register name: %s", dash
+ 1);
8382 warning (0, "%s-%s is an empty range", str
, dash
+ 1);
8386 for (i
= first
; i
<= last
; ++i
)
8387 fixed_regs
[i
] = call_used_regs
[i
] = 1;
8397 /* Insert any deferred function attributes from earlier pragmas. */
8399 sh_insert_attributes (tree node
, tree
*attributes
)
8403 if (TREE_CODE (node
) != FUNCTION_DECL
)
8406 /* We are only interested in fields. */
8410 /* Append the attributes to the deferred attributes. */
8411 *sh_deferred_function_attributes_tail
= *attributes
;
8412 attrs
= sh_deferred_function_attributes
;
8416 /* Some attributes imply or require the interrupt attribute. */
8417 if (!lookup_attribute ("interrupt_handler", attrs
)
8418 && !lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (node
)))
8420 /* If we have a trapa_handler, but no interrupt_handler attribute,
8421 insert an interrupt_handler attribute. */
8422 if (lookup_attribute ("trapa_handler", attrs
) != NULL_TREE
)
8423 /* We can't use sh_pr_interrupt here because that's not in the
8426 = tree_cons (get_identifier("interrupt_handler"), NULL_TREE
, attrs
);
8427 /* However, for sp_switch, trap_exit, nosave_low_regs and resbank,
8428 if the interrupt attribute is missing, we ignore the attribute
8430 else if (lookup_attribute ("sp_switch", attrs
)
8431 || lookup_attribute ("trap_exit", attrs
)
8432 || lookup_attribute ("nosave_low_regs", attrs
)
8433 || lookup_attribute ("resbank", attrs
))
8437 for (tail
= attributes
; attrs
; attrs
= TREE_CHAIN (attrs
))
8439 if (is_attribute_p ("sp_switch", TREE_PURPOSE (attrs
))
8440 || is_attribute_p ("trap_exit", TREE_PURPOSE (attrs
))
8441 || is_attribute_p ("nosave_low_regs", TREE_PURPOSE (attrs
))
8442 || is_attribute_p ("resbank", TREE_PURPOSE (attrs
)))
8443 warning (OPT_Wattributes
,
8444 "%qE attribute only applies to interrupt functions",
8445 TREE_PURPOSE (attrs
));
8448 *tail
= tree_cons (TREE_PURPOSE (attrs
), NULL_TREE
,
8450 tail
= &TREE_CHAIN (*tail
);
8453 attrs
= *attributes
;
8457 /* Install the processed list. */
8458 *attributes
= attrs
;
8460 /* Clear deferred attributes. */
8461 sh_deferred_function_attributes
= NULL_TREE
;
8462 sh_deferred_function_attributes_tail
= &sh_deferred_function_attributes
;
8467 /*------------------------------------------------------------------------------
8468 Target specific attributes
8469 Supported attributes are:
8472 Specifies this function is an interrupt handler.
8475 Like interrupt_handler, but don't save all registers.
8478 Specifies an alternate stack for an interrupt handler to run on.
8481 Use a trapa to exit an interrupt function instead of rte.
8484 Don't save r0..r7 in an interrupt handler function.
8485 This is useful on SH3* and SH4*, which have a separate set of low
8486 regs for user and privileged modes.
8487 This is mainly to be used for non-reentrant interrupt handlers (i.e.
8488 those that run with interrupts disabled and thus can't be
8489 interrupted thenselves).
8492 Use Renesas calling/layout conventions (functions and structures).
8495 In case of an interrupt handler function, use a register bank to
8496 save registers R0-R14, MACH, MACL, GBR and PR.
8497 This is available only on SH2A targets.
8500 Declares a function to be called using the TBR relative addressing
8501 mode. Takes an argument that specifies the slot number in the table
8502 where this function can be looked up by the JSR/N @@(disp8,TBR) insn.
8505 /* Handle a 'resbank' attribute. */
8507 sh_handle_resbank_handler_attribute (tree
* node
, tree name
,
8508 tree args ATTRIBUTE_UNUSED
,
8509 int flags ATTRIBUTE_UNUSED
,
8510 bool * no_add_attrs
)
8514 warning (OPT_Wattributes
, "%qE attribute is supported only for SH2A",
8516 *no_add_attrs
= true;
8518 if (TREE_CODE (*node
) != FUNCTION_DECL
)
8520 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
8522 *no_add_attrs
= true;
8528 /* Handle an "interrupt_handler" attribute; arguments as in
8529 struct attribute_spec.handler. */
8531 sh_handle_interrupt_handler_attribute (tree
*node
, tree name
,
8532 tree args ATTRIBUTE_UNUSED
,
8533 int flags ATTRIBUTE_UNUSED
,
8536 if (TREE_CODE (*node
) != FUNCTION_DECL
)
8538 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
8540 *no_add_attrs
= true;
8546 /* Handle an 'function_vector' attribute; arguments as in
8547 struct attribute_spec.handler. */
8549 sh2a_handle_function_vector_handler_attribute (tree
* node
, tree name
,
8550 tree args ATTRIBUTE_UNUSED
,
8551 int flags ATTRIBUTE_UNUSED
,
8552 bool * no_add_attrs
)
8556 warning (OPT_Wattributes
, "%qE attribute only applies to SH2A",
8558 *no_add_attrs
= true;
8560 else if (TREE_CODE (*node
) != FUNCTION_DECL
)
8562 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
8564 *no_add_attrs
= true;
8566 else if (TREE_CODE (TREE_VALUE (args
)) != INTEGER_CST
)
8568 /* The argument must be a constant integer. */
8569 warning (OPT_Wattributes
,
8570 "%qE attribute argument not an integer constant",
8572 *no_add_attrs
= true;
8574 else if (TREE_INT_CST_LOW (TREE_VALUE (args
)) > 255)
8576 /* The argument value must be between 0 to 255. */
8577 warning (OPT_Wattributes
,
8578 "%qE attribute argument should be between 0 to 255",
8580 *no_add_attrs
= true;
8585 /* Returns true if current function has been assigned the attribute
8586 'function_vector'. */
8588 sh2a_is_function_vector_call (rtx x
)
8590 if (GET_CODE (x
) == SYMBOL_REF
8591 && (SYMBOL_REF_FLAGS (x
) & SYMBOL_FLAG_FUNCVEC_FUNCTION
))
8593 tree tr
= SYMBOL_REF_DECL (x
);
8595 if (sh2a_function_vector_p (tr
))
8602 /* Returns the function vector number, if the attribute
8603 'function_vector' is assigned, otherwise returns zero. */
8605 sh2a_get_function_vector_number (rtx x
)
8610 if ((GET_CODE (x
) == SYMBOL_REF
)
8611 && (SYMBOL_REF_FLAGS (x
) & SYMBOL_FLAG_FUNCVEC_FUNCTION
))
8613 t
= SYMBOL_REF_DECL (x
);
8615 if (TREE_CODE (t
) != FUNCTION_DECL
)
8618 list
= SH_ATTRIBUTES (t
);
8621 if (is_attribute_p ("function_vector", TREE_PURPOSE (list
)))
8623 num
= TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list
)));
8627 list
= TREE_CHAIN (list
);
8636 /* Handle an "sp_switch" attribute; arguments as in
8637 struct attribute_spec.handler. */
8639 sh_handle_sp_switch_attribute (tree
*node
, tree name
, tree args
,
8640 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
8642 if (TREE_CODE (*node
) != FUNCTION_DECL
)
8644 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
8646 *no_add_attrs
= true;
8648 else if (TREE_CODE (TREE_VALUE (args
)) != STRING_CST
)
8650 /* The argument must be a constant string. */
8651 warning (OPT_Wattributes
, "%qE attribute argument not a string constant",
8653 *no_add_attrs
= true;
8659 /* Handle an "trap_exit" attribute; arguments as in
8660 struct attribute_spec.handler. */
8662 sh_handle_trap_exit_attribute (tree
*node
, tree name
, tree args
,
8663 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
8665 if (TREE_CODE (*node
) != FUNCTION_DECL
)
8667 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
8669 *no_add_attrs
= true;
8671 /* The argument specifies a trap number to be used in a trapa instruction
8672 at function exit (instead of an rte instruction). */
8673 else if (TREE_CODE (TREE_VALUE (args
)) != INTEGER_CST
)
8675 /* The argument must be a constant integer. */
8676 warning (OPT_Wattributes
, "%qE attribute argument not an "
8677 "integer constant", name
);
8678 *no_add_attrs
= true;
8685 sh_handle_renesas_attribute (tree
*node ATTRIBUTE_UNUSED
,
8686 tree name ATTRIBUTE_UNUSED
,
8687 tree args ATTRIBUTE_UNUSED
,
8688 int flags ATTRIBUTE_UNUSED
,
8689 bool *no_add_attrs ATTRIBUTE_UNUSED
)
8694 /* True if __attribute__((renesas)) or -mrenesas. */
8696 sh_attr_renesas_p (const_tree td
)
8700 if (td
== NULL_TREE
)
8703 td
= TREE_TYPE (td
);
8704 if (td
== error_mark_node
)
8706 return (lookup_attribute ("renesas", TYPE_ATTRIBUTES (td
))
8710 /* True if __attribute__((renesas)) or -mrenesas, for the current
8713 sh_cfun_attr_renesas_p (void)
8715 return sh_attr_renesas_p (current_function_decl
);
8718 /* Returns true if the current function has the "interrupt_handler"
8721 sh_cfun_interrupt_handler_p (void)
8723 return (lookup_attribute ("interrupt_handler",
8724 DECL_ATTRIBUTES (current_function_decl
))
8728 /* Returns true if FUNC has been assigned the attribute
8729 "function_vector". */
8731 sh2a_function_vector_p (tree func
)
8734 if (TREE_CODE (func
) != FUNCTION_DECL
)
8737 list
= SH_ATTRIBUTES (func
);
8740 if (is_attribute_p ("function_vector", TREE_PURPOSE (list
)))
8743 list
= TREE_CHAIN (list
);
8748 /* Returns true if given tree has the "resbank" attribute set. */
8750 sh_cfun_resbank_handler_p (void)
8752 return ((lookup_attribute ("resbank",
8753 DECL_ATTRIBUTES (current_function_decl
))
8755 && (lookup_attribute ("interrupt_handler",
8756 DECL_ATTRIBUTES (current_function_decl
))
8757 != NULL_TREE
) && TARGET_SH2A
);
8760 /* Returns true if the current function has a "trap_exit" attribute set. */
8762 sh_cfun_trap_exit_p (void)
8764 return lookup_attribute ("trap_exit", DECL_ATTRIBUTES (current_function_decl
))
8768 /* Implement TARGET_CHECK_PCH_TARGET_FLAGS. */
8770 sh_check_pch_target_flags (int old_flags
)
8772 if ((old_flags
^ target_flags
) & (MASK_SH1
| MASK_SH2
| MASK_SH3
8773 | MASK_SH_E
| MASK_HARD_SH4
8774 | MASK_FPU_SINGLE
| MASK_SH4
))
8775 return _("created and used with different architectures / ABIs");
8776 if ((old_flags
^ target_flags
) & MASK_HITACHI
)
8777 return _("created and used with different ABIs");
8778 if ((old_flags
^ target_flags
) & MASK_LITTLE_ENDIAN
)
8779 return _("created and used with different endianness");
8783 /* Predicates used by the templates. */
8785 /* Returns true if OP is MACL, MACH or PR. The input must be a REG rtx.
8786 Used only in general_movsrc_operand. */
8788 system_reg_operand (rtx op
, machine_mode mode ATTRIBUTE_UNUSED
)
8800 /* Returns true if OP is a floating point value with value 0.0. */
8802 fp_zero_operand (rtx op
)
8804 const REAL_VALUE_TYPE
*r
;
8806 if (GET_MODE (op
) != SFmode
)
8809 r
= CONST_DOUBLE_REAL_VALUE (op
);
8810 return real_equal (r
, &dconst0
) && ! REAL_VALUE_MINUS_ZERO (*r
);
8813 /* Returns true if OP is a floating point value with value 1.0. */
8815 fp_one_operand (rtx op
)
8817 if (GET_MODE (op
) != SFmode
)
8820 return real_equal (CONST_DOUBLE_REAL_VALUE (op
), &dconst1
);
8823 /* Return the TLS type for TLS symbols. */
8825 tls_symbolic_operand (rtx op
, machine_mode mode ATTRIBUTE_UNUSED
)
8827 if (GET_CODE (op
) != SYMBOL_REF
)
8828 return TLS_MODEL_NONE
;
8829 return SYMBOL_REF_TLS_MODEL (op
);
8832 /* Return the destination address of a branch. */
8834 branch_dest (rtx branch
)
8836 rtx dest
= SET_SRC (PATTERN (branch
));
8839 if (GET_CODE (dest
) == IF_THEN_ELSE
)
8840 dest
= XEXP (dest
, 1);
8841 dest
= XEXP (dest
, 0);
8842 dest_uid
= INSN_UID (dest
);
8843 return INSN_ADDRESSES (dest_uid
);
8846 /* Return nonzero if REG is not used after INSN.
8847 We assume REG is a reload reg, and therefore does
8848 not live past labels. It may live past calls or jumps though. */
8850 reg_unused_after (rtx reg
, rtx_insn
*insn
)
8855 /* If the reg is set by this instruction, then it is safe for our
8856 case. Disregard the case where this is a store to memory, since
8857 we are checking a register used in the store address. */
8858 set
= single_set (insn
);
8859 if (set
&& !MEM_P (SET_DEST (set
))
8860 && reg_overlap_mentioned_p (reg
, SET_DEST (set
)))
8863 while ((insn
= NEXT_INSN (insn
)))
8869 code
= GET_CODE (insn
);
8872 /* If this is a label that existed before reload, then the register
8873 is dead here. However, if this is a label added by reorg, then
8874 the register may still be live here. We can't tell the difference,
8875 so we just ignore labels completely. */
8876 if (code
== CODE_LABEL
)
8881 if (code
== JUMP_INSN
)
8884 /* If this is a sequence, we must handle them all at once.
8885 We could have for instance a call that sets the target register,
8886 and an insn in a delay slot that uses the register. In this case,
8887 we must return 0. */
8888 else if (code
== INSN
&& GET_CODE (PATTERN (insn
)) == SEQUENCE
)
8890 rtx_sequence
*seq
= as_a
<rtx_sequence
*> (PATTERN (insn
));
8894 for (i
= 0; i
< seq
->len (); i
++)
8896 rtx_insn
*this_insn
= seq
->insn (i
);
8897 rtx set
= single_set (this_insn
);
8899 if (CALL_P (this_insn
))
8901 else if (JUMP_P (this_insn
))
8903 if (INSN_ANNULLED_BRANCH_P (this_insn
))
8908 if (set
&& reg_overlap_mentioned_p (reg
, SET_SRC (set
)))
8910 if (set
&& reg_overlap_mentioned_p (reg
, SET_DEST (set
)))
8912 if (!MEM_P (SET_DEST (set
)))
8918 && reg_overlap_mentioned_p (reg
, PATTERN (this_insn
)))
8923 else if (code
== JUMP_INSN
)
8927 set
= single_set (insn
);
8928 if (set
&& reg_overlap_mentioned_p (reg
, SET_SRC (set
)))
8930 if (set
&& reg_overlap_mentioned_p (reg
, SET_DEST (set
)))
8931 return !MEM_P (SET_DEST (set
));
8932 if (set
== 0 && reg_overlap_mentioned_p (reg
, PATTERN (insn
)))
8935 if (code
== CALL_INSN
&& call_really_used_regs
[REGNO (reg
)])
8942 static GTY(()) rtx t_reg_rtx
;
8944 get_t_reg_rtx (void)
8947 t_reg_rtx
= gen_rtx_REG (SImode
, T_REG
);
8951 static GTY(()) tree fpscr_values
;
8954 emit_fpu_switch (rtx scratch
, int index
)
8958 if (fpscr_values
== NULL
)
8962 t
= build_index_type (integer_one_node
);
8963 t
= build_array_type (integer_type_node
, t
);
8964 t
= build_decl (BUILTINS_LOCATION
,
8965 VAR_DECL
, get_identifier ("__fpscr_values"), t
);
8966 DECL_ARTIFICIAL (t
) = 1;
8967 DECL_IGNORED_P (t
) = 1;
8968 DECL_EXTERNAL (t
) = 1;
8969 TREE_STATIC (t
) = 1;
8970 TREE_PUBLIC (t
) = 1;
8976 src
= DECL_RTL (fpscr_values
);
8977 if (!can_create_pseudo_p ())
8979 emit_move_insn (scratch
, XEXP (src
, 0));
8981 emit_insn (gen_addsi3 (scratch
, scratch
, GEN_INT (index
* 4)));
8982 src
= adjust_automodify_address (src
, SImode
, scratch
, index
* 4);
8985 src
= adjust_address (src
, SImode
, index
* 4);
8987 emit_insn (gen_lds_fpscr (src
));
8990 static rtx
get_free_reg (HARD_REG_SET
);
8992 /* This function returns a register to use to load the address to load
8993 the fpscr from. Currently it always returns r1 or r7, but when we are
8994 able to use pseudo registers after combine, or have a better mechanism
8995 for choosing a register, it should be done here. */
8996 /* REGS_LIVE is the liveness information for the point for which we
8997 need this allocation. In some bare-bones exit blocks, r1 is live at the
8998 start. We can even have all of r0..r3 being live:
8999 __complex__ long long f (double d) { if (d == 0) return 2; else return 3; }
9000 INSN before which new insns are placed with will clobber the register
9001 we return. If a basic block consists only of setting the return value
9002 register to a pseudo and using that register, the return value is not
9003 live before or after this block, yet we we'll insert our insns right in
9006 get_free_reg (HARD_REG_SET regs_live
)
9008 if (! TEST_HARD_REG_BIT (regs_live
, 1))
9009 return gen_rtx_REG (Pmode
, 1);
9011 /* Hard reg 1 is live; since this is a small register classes target,
9012 there shouldn't be anything but a jump before the function end. */
9013 gcc_assert (!TEST_HARD_REG_BIT (regs_live
, 7));
9014 return gen_rtx_REG (Pmode
, 7);
9017 /* This function will set the fpscr from memory.
9018 MODE is the mode we are setting it to. */
9020 fpscr_set_from_mem (int mode
, HARD_REG_SET regs_live
)
9022 enum attr_fp_mode fp_mode
= (enum attr_fp_mode
) mode
;
9023 enum attr_fp_mode norm_mode
= ACTUAL_NORMAL_MODE (FP_MODE
);
9026 addr_reg
= !can_create_pseudo_p () ? get_free_reg (regs_live
) : NULL_RTX
;
9027 emit_fpu_switch (addr_reg
, fp_mode
== norm_mode
);
9030 /* Is the given character a logical line separator for the assembler? */
9031 #ifndef IS_ASM_LOGICAL_LINE_SEPARATOR
9032 #define IS_ASM_LOGICAL_LINE_SEPARATOR(C, STR) ((C) == ';')
9036 sequence_insn_p (rtx_insn
*insn
)
9038 rtx_insn
*prev
, *next
;
9040 prev
= PREV_INSN (insn
);
9044 next
= NEXT_INSN (prev
);
9048 return INSN_P (next
) && GET_CODE (PATTERN (next
)) == SEQUENCE
;
9052 sh_insn_length_adjustment (rtx_insn
*insn
)
9054 /* Instructions with unfilled delay slots take up an extra two bytes for
9055 the nop in the delay slot. */
9056 if (((NONJUMP_INSN_P (insn
)
9057 && GET_CODE (PATTERN (insn
)) != USE
9058 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
9059 || CALL_P (insn
) || JUMP_P (insn
))
9060 && ! sequence_insn_p (insn
)
9061 && get_attr_needs_delay_slot (insn
) == NEEDS_DELAY_SLOT_YES
)
9064 /* Increase the insn length of a cbranch without a delay slot insn to
9065 force a delay slot which will be stuffed with a nop. */
9066 if (TARGET_CBRANCH_FORCE_DELAY_SLOT
&& TARGET_SH2
9067 && JUMP_P (insn
) && get_attr_type (insn
) == TYPE_CBRANCH
9068 && ! sequence_insn_p (insn
))
9071 /* sh-dsp parallel processing insn take four bytes instead of two. */
9073 if (NONJUMP_INSN_P (insn
))
9076 rtx body
= PATTERN (insn
);
9079 bool maybe_label
= true;
9081 if (GET_CODE (body
) == ASM_INPUT
)
9082 templ
= XSTR (body
, 0);
9083 else if (asm_noperands (body
) >= 0)
9085 = decode_asm_operands (body
, NULL
, NULL
, NULL
, NULL
, NULL
);
9094 while (c
== ' ' || c
== '\t');
9095 /* all sh-dsp parallel-processing insns start with p.
9096 The only non-ppi sh insn starting with p is pref.
9097 The only ppi starting with pr is prnd. */
9098 if ((c
== 'p' || c
== 'P') && strncasecmp ("re", templ
, 2))
9100 /* The repeat pseudo-insn expands two three insns, a total of
9101 six bytes in size. */
9102 else if ((c
== 'r' || c
== 'R')
9103 && ! strncasecmp ("epeat", templ
, 5))
9105 while (c
&& c
!= '\n'
9106 && ! IS_ASM_LOGICAL_LINE_SEPARATOR (c
, templ
))
9108 /* If this is a label, it is obviously not a ppi insn. */
9109 if (c
== ':' && maybe_label
)
9114 else if (c
== '\'' || c
== '"')
9115 maybe_label
= false;
9119 maybe_label
= c
!= ':';
9127 /* Return TRUE for a valid displacement for the REG+disp addressing
9130 sh_legitimate_index_p (machine_mode mode
, rtx op
, bool consider_sh2a
,
9133 if (! CONST_INT_P (op
))
9137 const HOST_WIDE_INT offset
= INTVAL (op
);
9138 const int max_disp
= sh_max_mov_insn_displacement (mode
, consider_sh2a
);
9139 const int align_mask
= mov_insn_alignment_mask (mode
, consider_sh2a
);
9141 /* If the mode does not support any displacement always return false.
9142 Even though an index of '0' is actually always valid, it will cause
9143 troubles when e.g. a DFmode move is split into two SFmode moves,
9144 where one SFmode move will have index '0' and the other move will
9146 if (!allow_zero
&& max_disp
< 1)
9149 return offset
>= 0 && offset
<= max_disp
&& (offset
& align_mask
) == 0;
9153 /* Recognize an RTL expression that is a valid memory address for
9155 The MODE argument is the machine mode for the MEM expression
9156 that wants to use this address.
9165 sh_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
9167 if (REG_P (x
) && REGNO (x
) == GBR_REG
)
9170 if (MAYBE_BASE_REGISTER_RTX_P (x
, strict
))
9172 else if ((GET_CODE (x
) == POST_INC
|| GET_CODE (x
) == PRE_DEC
)
9173 && MAYBE_BASE_REGISTER_RTX_P (XEXP (x
, 0), strict
))
9175 else if (GET_CODE (x
) == PLUS
)
9177 rtx xop0
= XEXP (x
, 0);
9178 rtx xop1
= XEXP (x
, 1);
9180 if (REG_P (xop0
) && REGNO (xop0
) == GBR_REG
)
9181 return gbr_displacement (xop1
, mode
);
9183 if (GET_MODE_SIZE (mode
) <= 8
9184 && MAYBE_BASE_REGISTER_RTX_P (xop0
, strict
)
9185 && sh_legitimate_index_p (mode
, xop1
, TARGET_SH2A
, false))
9188 if (GET_MODE_SIZE (mode
) <= 4
9189 || (TARGET_FPU_DOUBLE
&& TARGET_FMOVD
&& mode
== DFmode
))
9191 if (MAYBE_BASE_REGISTER_RTX_P (xop1
, strict
)
9192 && MAYBE_INDEX_REGISTER_RTX_P (xop0
, strict
))
9194 if (MAYBE_INDEX_REGISTER_RTX_P (xop1
, strict
)
9195 && MAYBE_BASE_REGISTER_RTX_P (xop0
, strict
))
9203 /* Return TRUE if X references a SYMBOL_REF or LABEL_REF whose symbol
9204 isn't protected by a PIC unspec. */
9206 nonpic_symbol_mentioned_p (rtx x
)
9211 if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
9212 || GET_CODE (x
) == PC
)
9215 /* We don't want to look into the possible MEM location of a
9216 CONST_DOUBLE, since we're not going to use it, in general. */
9217 if (GET_CODE (x
) == CONST_DOUBLE
)
9220 if (GET_CODE (x
) == UNSPEC
9221 && (XINT (x
, 1) == UNSPEC_PIC
9222 || XINT (x
, 1) == UNSPEC_GOT
9223 || XINT (x
, 1) == UNSPEC_GOTOFF
9224 || XINT (x
, 1) == UNSPEC_GOTPLT
9225 || XINT (x
, 1) == UNSPEC_GOTTPOFF
9226 || XINT (x
, 1) == UNSPEC_DTPOFF
9227 || XINT (x
, 1) == UNSPEC_TPOFF
9228 || XINT (x
, 1) == UNSPEC_PLT
9229 || XINT (x
, 1) == UNSPEC_PCREL
9230 || XINT (x
, 1) == UNSPEC_SYMOFF
9231 || XINT (x
, 1) == UNSPEC_PCREL_SYMOFF
9232 || XINT (x
, 1) == UNSPEC_GOTFUNCDESC
9233 || XINT (x
, 1) == UNSPEC_GOTOFFFUNCDESC
))
9236 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
9237 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
9242 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
9243 if (nonpic_symbol_mentioned_p (XVECEXP (x
, i
, j
)))
9246 else if (fmt
[i
] == 'e' && nonpic_symbol_mentioned_p (XEXP (x
, i
)))
9253 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
9254 @GOTOFF in `reg'. */
9256 legitimize_pic_address (rtx orig
, machine_mode mode ATTRIBUTE_UNUSED
,
9259 if (tls_symbolic_operand (orig
, Pmode
) != TLS_MODEL_NONE
)
9262 if (GET_CODE (orig
) == LABEL_REF
9263 || (GET_CODE (orig
) == SYMBOL_REF
&& SYMBOL_REF_LOCAL_P (orig
)))
9265 if (reg
== NULL_RTX
)
9266 reg
= gen_reg_rtx (Pmode
);
9269 && GET_CODE (orig
) == SYMBOL_REF
&& SYMBOL_REF_FUNCTION_P (orig
))
9271 /* Weak functions may be NULL which doesn't work with
9272 GOTOFFFUNCDESC because the runtime offset is not known. */
9273 if (SYMBOL_REF_WEAK (orig
))
9274 emit_insn (gen_symGOTFUNCDESC2reg (reg
, orig
));
9276 emit_insn (gen_symGOTOFFFUNCDESC2reg (reg
, orig
));
9278 else if (TARGET_FDPIC
9279 && (GET_CODE (orig
) == LABEL_REF
9280 || (GET_CODE (orig
) == SYMBOL_REF
&& SYMBOL_REF_DECL (orig
)
9281 && (TREE_READONLY (SYMBOL_REF_DECL (orig
))
9282 || SYMBOL_REF_EXTERNAL_P (orig
)
9283 || DECL_SECTION_NAME(SYMBOL_REF_DECL (orig
))))))
9284 /* In FDPIC, GOTOFF can only be used for writable data. */
9285 emit_insn (gen_symGOT2reg (reg
, orig
));
9287 emit_insn (gen_symGOTOFF2reg (reg
, orig
));
9290 else if (GET_CODE (orig
) == SYMBOL_REF
)
9292 if (reg
== NULL_RTX
)
9293 reg
= gen_reg_rtx (Pmode
);
9295 if (TARGET_FDPIC
&& SYMBOL_REF_FUNCTION_P (orig
))
9296 emit_insn (gen_symGOTFUNCDESC2reg (reg
, orig
));
9298 emit_insn (gen_symGOT2reg (reg
, orig
));
9304 /* Given a (logical) mode size and an offset in bytes, try to find a the
9305 appropriate displacement value for a mov insn. On SH the displacements
9306 are limited to max. 60 bytes for SImode, max. 30 bytes in HImode and max.
9307 15 bytes in QImode. To compensate this we create a new base address by
9308 adding an adjustment value to it.
9310 If the originally requested offset is greater than 127 we prefer using
9311 values 124..127 over 128..131 to increase opportunities to use the
9314 In some cases it is possible that a requested offset might seem unaligned
9315 or inappropriate for the mode size, like offset = 2 and mode size = 4.
9316 This is compensated by adjusting the base address so that the effective
9317 address of the displacement move insn will be aligned.
9319 This is not the best possible way of rebasing the base address, as it
9320 does not look at other present displacement addressings around it.
9321 In some cases this can create more base address adjustments than would
9322 actually be necessary. */
9329 static struct disp_adjust
9330 sh_find_mov_disp_adjust (machine_mode mode
, HOST_WIDE_INT offset
)
9332 struct disp_adjust res
= { NULL_RTX
, NULL_RTX
};
9334 /* Do not try to use SH2A's large displacements here, because this would
9335 effectively disable the small displacement insns. */
9336 const int mode_sz
= GET_MODE_SIZE (mode
);
9337 const int mov_insn_sz
= mov_insn_size (mode
, false);
9338 const int max_disp
= sh_max_mov_insn_displacement (mode
, false);
9339 const int max_disp_next
= max_disp
+ mov_insn_sz
;
9340 HOST_WIDE_INT align_modifier
= offset
> 127 ? mov_insn_sz
: 0;
9341 HOST_WIDE_INT offset_adjust
;
9343 /* In some cases this actually does happen and we must check for it. */
9344 if (mode_sz
< 1 || mode_sz
> 8 || max_disp
< 1)
9347 /* Keeps the previous behavior for QImode displacement addressing.
9348 This just decides how the offset is re-based. Removing this special
9349 case will result in slightly bigger code on average, but it's not that
9351 if (mov_insn_sz
== 1)
9354 offset_adjust
= ((offset
+ align_modifier
) & ~max_disp
) - align_modifier
;
9356 if (mode_sz
+ offset
- offset_adjust
<= max_disp_next
)
9358 res
.offset_adjust
= GEN_INT (offset_adjust
);
9359 res
.mov_disp
= GEN_INT (offset
- offset_adjust
);
9365 /* Try to modify an illegitimate address and make it legitimate.
9366 If we find one, return the new, valid address.
9367 Otherwise, return the original address. */
9369 sh_legitimize_address (rtx x
, rtx oldx
, machine_mode mode
)
9372 x
= legitimize_pic_address (oldx
, mode
, NULL_RTX
);
9374 if ((TARGET_FPU_DOUBLE
&& mode
== DFmode
)
9375 || (TARGET_SH2E
&& mode
== SFmode
))
9378 if (GET_CODE (x
) == PLUS
&& CONST_INT_P (XEXP (x
, 1))
9379 && BASE_REGISTER_RTX_P (XEXP (x
, 0)))
9381 struct disp_adjust adj
= sh_find_mov_disp_adjust (mode
,
9382 INTVAL (XEXP (x
, 1)));
9384 if (adj
.offset_adjust
!= NULL_RTX
&& adj
.mov_disp
!= NULL_RTX
)
9386 rtx sum
= expand_binop (Pmode
, add_optab
, XEXP (x
, 0),
9387 adj
.offset_adjust
, NULL_RTX
, 0,
9389 return gen_rtx_PLUS (Pmode
, sum
, adj
.mov_disp
);
9395 /* Attempt to replace *p, which is an address that needs reloading, with
9396 a valid memory address for an operand of mode MODE.
9397 Like for sh_legitimize_address, for the SH we try to get a normal form
9398 of the address. That will allow inheritance of the address reloads. */
9400 sh_legitimize_reload_address (rtx
*p
, machine_mode mode
, int opnum
,
9403 enum reload_type type
= (enum reload_type
) itype
;
9404 const int mode_sz
= GET_MODE_SIZE (mode
);
9409 if (GET_CODE (*p
) == PLUS
&& CONST_INT_P (XEXP (*p
, 1))
9410 && MAYBE_BASE_REGISTER_RTX_P (XEXP (*p
, 0), true))
9412 const HOST_WIDE_INT offset
= INTVAL (XEXP (*p
, 1));
9413 struct disp_adjust adj
= sh_find_mov_disp_adjust (mode
, offset
);
9415 if (TARGET_SH2A
&& mode
== DFmode
&& (offset
& 0x7))
9417 push_reload (*p
, NULL_RTX
, p
, NULL
,
9418 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0, opnum
, type
);
9422 if (TARGET_SH2E
&& mode
== SFmode
)
9425 push_reload (*p
, NULL_RTX
, p
, NULL
,
9426 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0, opnum
, type
);
9430 /* FIXME: Do not allow to legitimize QImode and HImode displacement
9431 moves because then reload has a problem figuring the constraint
9432 that the move insn target/source reg must be R0.
9433 Or maybe some handling is wrong in sh_secondary_reload for this
9434 to work properly? */
9435 if ((mode_sz
== 4 || mode_sz
== 8)
9436 && ! (TARGET_SH4
&& mode
== DFmode
)
9437 && adj
.offset_adjust
!= NULL_RTX
&& adj
.mov_disp
!= NULL_RTX
)
9439 rtx sum
= gen_rtx_PLUS (Pmode
, XEXP (*p
, 0), adj
.offset_adjust
);
9440 *p
= gen_rtx_PLUS (Pmode
, sum
, adj
.mov_disp
);
9441 push_reload (sum
, NULL_RTX
, &XEXP (*p
, 0), NULL
,
9442 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0, opnum
, type
);
9447 /* We must re-recognize what we created before. */
9448 if (GET_CODE (*p
) == PLUS
9449 && (mode_sz
== 4 || mode_sz
== 8)
9450 && GET_CODE (XEXP (*p
, 0)) == PLUS
9451 && CONST_INT_P (XEXP (XEXP (*p
, 0), 1))
9452 && MAYBE_BASE_REGISTER_RTX_P (XEXP (XEXP (*p
, 0), 0), true)
9453 && CONST_INT_P (XEXP (*p
, 1))
9454 && ! (TARGET_SH2E
&& mode
== SFmode
))
9456 /* Because this address is so complex, we know it must have
9457 been created by LEGITIMIZE_RELOAD_ADDRESS before; thus,
9458 it is already unshared, and needs no further unsharing. */
9459 push_reload (XEXP (*p
, 0), NULL_RTX
, &XEXP (*p
, 0), NULL
,
9460 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0, opnum
, type
);
9467 /* In the name of slightly smaller debug output, and to cater to
9468 general assembler lossage, recognize various UNSPEC sequences
9469 and turn them back into a direct symbol reference. */
9471 sh_delegitimize_address (rtx orig_x
)
9475 orig_x
= delegitimize_mem_from_attrs (orig_x
);
9480 if (GET_CODE (x
) == CONST
)
9483 if (GET_CODE (y
) == UNSPEC
)
9485 if (XINT (y
, 1) == UNSPEC_GOT
9486 || XINT (y
, 1) == UNSPEC_GOTOFF
9487 || XINT (y
, 1) == UNSPEC_SYMOFF
)
9488 return XVECEXP (y
, 0, 0);
9489 else if (XINT (y
, 1) == UNSPEC_PCREL_SYMOFF
)
9491 if (GET_CODE (XVECEXP (y
, 0, 0)) == CONST
)
9493 rtx symplt
= XEXP (XVECEXP (y
, 0, 0), 0);
9495 if (GET_CODE (symplt
) == UNSPEC
9496 && (XINT (symplt
, 1) == UNSPEC_PLT
9497 || XINT (symplt
, 1) == UNSPEC_PCREL
))
9498 return XVECEXP (symplt
, 0, 0);
9507 /* Mark the use of a constant in the literal table. If the constant
9508 has multiple labels, make it unique. */
9510 mark_constant_pool_use (rtx x
)
9512 rtx_insn
*insn
, *lab
;
9518 switch (GET_CODE (x
))
9528 /* Get the first label in the list of labels for the same constant
9529 and delete another labels in the list. */
9530 lab
= as_a
<rtx_insn
*> (x
);
9531 for (insn
= PREV_INSN (lab
); insn
; insn
= PREV_INSN (insn
))
9534 || LABEL_REFS (insn
) != NEXT_INSN (insn
))
9539 for (rtx insn
= LABEL_REFS (lab
); insn
; insn
= LABEL_REFS (insn
))
9540 as_a
<rtx_insn
*> (insn
)->set_deleted ();
9542 /* Mark constants in a window. */
9543 for (insn
= NEXT_INSN (as_a
<rtx_insn
*> (x
)); insn
; insn
= NEXT_INSN (insn
))
9545 if (!NONJUMP_INSN_P (insn
))
9548 pattern
= PATTERN (insn
);
9549 if (GET_CODE (pattern
) != UNSPEC_VOLATILE
)
9552 switch (XINT (pattern
, 1))
9554 case UNSPECV_CONST2
:
9555 case UNSPECV_CONST4
:
9556 case UNSPECV_CONST8
:
9557 XVECEXP (pattern
, 0, 1) = const1_rtx
;
9559 case UNSPECV_WINDOW_END
:
9560 if (XVECEXP (pattern
, 0, 0) == x
)
9563 case UNSPECV_CONST_END
:
9573 /* Return true if it's possible to redirect BRANCH1 to the destination
9574 of an unconditional jump BRANCH2. We only want to do this if the
9575 resulting branch will have a short displacement. */
9577 sh_can_follow_jump (const rtx_insn
*branch1
, const rtx_insn
*branch2
)
9579 /* Don't follow if BRANCH2 is possible to be a jump crossing between
9580 hot and cold partitions. */
9581 if (flag_reorder_blocks_and_partition
9582 && simplejump_p (branch2
)
9583 && CROSSING_JUMP_P (branch2
))
9586 if (flag_expensive_optimizations
&& simplejump_p (branch2
))
9588 rtx dest
= XEXP (SET_SRC (single_set (branch2
)), 0);
9592 for (distance
= 0, insn
= NEXT_INSN (branch1
);
9593 insn
&& distance
< 256;
9594 insn
= PREV_INSN (insn
))
9599 distance
+= get_attr_length (insn
);
9601 for (distance
= 0, insn
= NEXT_INSN (branch1
);
9602 insn
&& distance
< 256;
9603 insn
= NEXT_INSN (insn
))
9608 distance
+= get_attr_length (insn
);
9614 /* Return nonzero if register old_reg can be renamed to register new_reg. */
9616 sh_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED
,
9617 unsigned int new_reg
)
9619 /* Interrupt functions can only use registers that have already been
9620 saved by the prologue, even if they would normally be
9622 if (sh_cfun_interrupt_handler_p () && !df_regs_ever_live_p (new_reg
))
9628 /* Function to update the integer COST
9629 based on the relationship between INSN that is dependent on
9630 DEP_INSN through the dependence LINK. The default is to make no
9631 adjustment to COST. This can be used for example to specify to
9632 the scheduler that an output- or anti-dependence does not incur
9633 the same cost as a data-dependence. The return value should be
9634 the new value for COST. */
9636 sh_adjust_cost (rtx_insn
*insn
, rtx link ATTRIBUTE_UNUSED
,
9637 rtx_insn
*dep_insn
, int cost
)
9641 if (REG_NOTE_KIND (link
) == 0)
9643 enum attr_type type
;
9646 if (recog_memoized (insn
) < 0
9647 || recog_memoized (dep_insn
) < 0)
9650 dep_set
= single_set (dep_insn
);
9652 /* The latency that we specify in the scheduling description refers
9653 to the actual output, not to an auto-increment register; for that,
9654 the latency is one. */
9655 if (dep_set
&& MEM_P (SET_SRC (dep_set
)) && cost
> 1)
9657 rtx set
= single_set (insn
);
9660 && !reg_mentioned_p (SET_DEST (dep_set
), SET_SRC (set
))
9661 && (!MEM_P (SET_DEST (set
))
9662 || !reg_mentioned_p (SET_DEST (dep_set
),
9663 XEXP (SET_DEST (set
), 0))))
9666 /* The only input for a call that is timing-critical is the
9667 function's address. */
9670 rtx call
= get_call_rtx_from (insn
);
9672 /* sibcalli_thunk uses a symbol_ref in an unspec. */
9673 && (GET_CODE (XEXP (XEXP (call
, 0), 0)) == UNSPEC
9674 || ! reg_set_p (XEXP (XEXP (call
, 0), 0), dep_insn
)))
9675 cost
-= TARGET_SH4_300
? 3 : 6;
9677 /* Likewise, the most timing critical input for an sfuncs call
9678 is the function address. However, sfuncs typically start
9679 using their arguments pretty quickly.
9680 Assume a four cycle delay for SH4 before they are needed.
9681 Cached ST40-300 calls are quicker, so assume only a one
9683 ??? Maybe we should encode the delays till input registers
9684 are needed by sfuncs into the sfunc call insn. */
9685 /* All sfunc calls are parallels with at least four components.
9686 Exploit this to avoid unnecessary calls to sfunc_uses_reg. */
9687 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
9688 && XVECLEN (PATTERN (insn
), 0) >= 4
9689 && (reg
= sfunc_uses_reg (insn
)))
9691 if (! reg_set_p (reg
, dep_insn
))
9692 cost
-= TARGET_SH4_300
? 1 : 4;
9694 if (TARGET_HARD_SH4
&& !TARGET_SH4_300
)
9696 enum attr_type dep_type
= get_attr_type (dep_insn
);
9698 if (dep_type
== TYPE_FLOAD
|| dep_type
== TYPE_PCFLOAD
)
9700 else if ((dep_type
== TYPE_LOAD_SI
|| dep_type
== TYPE_PCLOAD_SI
)
9701 && (type
= get_attr_type (insn
)) != TYPE_CALL
9702 && type
!= TYPE_SFUNC
)
9704 /* When the preceding instruction loads the shift amount of
9705 the following SHAD/SHLD, the latency of the load is increased
9707 if (get_attr_type (insn
) == TYPE_DYN_SHIFT
9708 && get_attr_any_int_load (dep_insn
) == ANY_INT_LOAD_YES
9709 && reg_overlap_mentioned_p (SET_DEST (dep_set
),
9710 XEXP (SET_SRC (single_set (insn
)),
9713 /* When an LS group instruction with a latency of less than
9714 3 cycles is followed by a double-precision floating-point
9715 instruction, FIPR, or FTRV, the latency of the first
9716 instruction is increased to 3 cycles. */
9718 && get_attr_insn_class (dep_insn
) == INSN_CLASS_LS_GROUP
9719 && get_attr_dfp_comp (insn
) == DFP_COMP_YES
)
9721 /* The lsw register of a double-precision computation is ready one
9723 else if (reload_completed
9724 && get_attr_dfp_comp (dep_insn
) == DFP_COMP_YES
9725 && (use_pat
= single_set (insn
))
9726 && ! regno_use_in (REGNO (SET_DEST (single_set (dep_insn
))),
9730 if (get_attr_any_fp_comp (dep_insn
) == ANY_FP_COMP_YES
9731 && get_attr_late_fp_use (insn
) == LATE_FP_USE_YES
)
9734 else if (TARGET_SH4_300
)
9736 /* Stores need their input register two cycles later. */
9737 if (dep_set
&& cost
>= 1
9738 && ((type
= get_attr_type (insn
)) == TYPE_STORE
9739 || type
== TYPE_PSTORE
9740 || type
== TYPE_FSTORE
|| type
== TYPE_MAC_MEM
))
9742 rtx set
= single_set (insn
);
9744 if (!reg_mentioned_p (SET_SRC (set
), XEXP (SET_DEST (set
), 0))
9745 && rtx_equal_p (SET_SRC (set
), SET_DEST (dep_set
)))
9748 /* But don't reduce the cost below 1 if the address depends
9749 on a side effect of dep_insn. */
9751 && modified_in_p (XEXP (SET_DEST (set
), 0), dep_insn
))
9757 /* An anti-dependence penalty of two applies if the first insn is a double
9758 precision fadd / fsub / fmul. */
9759 else if (!TARGET_SH4_300
9760 && REG_NOTE_KIND (link
) == REG_DEP_ANTI
9761 && recog_memoized (dep_insn
) >= 0
9762 && (get_attr_type (dep_insn
) == TYPE_DFP_ARITH
9763 || get_attr_type (dep_insn
) == TYPE_DFP_MUL
)
9764 /* A lot of alleged anti-flow dependences are fake,
9765 so check this one is real. */
9766 && flow_dependent_p (dep_insn
, insn
))
9772 /* Check if INSN is flow-dependent on DEP_INSN. Can also be used to check
9773 if DEP_INSN is anti-flow dependent on INSN. */
9775 flow_dependent_p (rtx insn
, rtx dep_insn
)
9777 rtx tmp
= PATTERN (insn
);
9779 note_stores (PATTERN (dep_insn
), flow_dependent_p_1
, &tmp
);
9780 return tmp
== NULL_RTX
;
9783 /* A helper function for flow_dependent_p called through note_stores. */
9785 flow_dependent_p_1 (rtx x
, const_rtx pat ATTRIBUTE_UNUSED
, void *data
)
9787 rtx
* pinsn
= (rtx
*) data
;
9789 if (*pinsn
&& reg_referenced_p (x
, *pinsn
))
9793 /* For use by sh_allocate_initial_value. Note that sh.md contains some
9794 'special function' patterns (type sfunc) that clobber pr, but that
9795 do not look like function calls to leaf_function_p. Hence we must
9796 do this extra check. */
9800 return DF_REG_DEF_COUNT (PR_REG
);
9803 /* Return where to allocate pseudo for a given hard register initial
9806 sh_allocate_initial_value (rtx hard_reg
)
9808 if (REGNO (hard_reg
) == PR_REG
)
9810 if (crtl
->is_leaf
&& ! sh_pr_n_sets ())
9813 return gen_frame_mem (Pmode
, return_address_pointer_rtx
);
9819 /* This function returns "2" to indicate dual issue for the SH4
9820 processor. To be used by the DFA pipeline description. */
9822 sh_issue_rate (void)
9824 if (TARGET_SUPERSCALAR
)
9830 /* Functions for ready queue reordering for sched1. */
9832 /* Get weight for mode for a set x. */
9834 find_set_regmode_weight (rtx x
, machine_mode mode
)
9836 if (GET_CODE (x
) == CLOBBER
&& register_operand (SET_DEST (x
), mode
))
9838 if (GET_CODE (x
) == SET
&& register_operand (SET_DEST (x
), mode
))
9840 if (REG_P (SET_DEST (x
)))
9842 if (!reg_mentioned_p (SET_DEST (x
), SET_SRC (x
)))
9852 /* Get regmode weight for insn. */
9854 find_insn_regmode_weight (rtx insn
, machine_mode mode
)
9856 short reg_weight
= 0;
9859 /* Increment weight for each register born here. */
9861 reg_weight
+= find_set_regmode_weight (x
, mode
);
9862 if (GET_CODE (x
) == PARALLEL
)
9865 for (j
= XVECLEN (x
, 0) - 1; j
>= 0; j
--)
9867 x
= XVECEXP (PATTERN (insn
), 0, j
);
9868 reg_weight
+= find_set_regmode_weight (x
, mode
);
9871 /* Decrement weight for each register that dies here. */
9872 for (x
= REG_NOTES (insn
); x
; x
= XEXP (x
, 1))
9874 if (REG_NOTE_KIND (x
) == REG_DEAD
|| REG_NOTE_KIND (x
) == REG_UNUSED
)
9876 rtx note
= XEXP (x
, 0);
9877 if (REG_P (note
) && GET_MODE (note
) == mode
)
9884 /* Calculate regmode weights for all insns of a basic block. */
9886 find_regmode_weight (basic_block b
, machine_mode mode
)
9888 rtx_insn
*insn
, *next_tail
, *head
, *tail
;
9890 get_ebb_head_tail (b
, b
, &head
, &tail
);
9891 next_tail
= NEXT_INSN (tail
);
9893 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
9895 /* Handle register life information. */
9900 INSN_REGMODE_WEIGHT (insn
, mode
) =
9901 find_insn_regmode_weight (insn
, mode
)
9902 + 2 * find_insn_regmode_weight (insn
, DFmode
);
9903 else if (mode
== SImode
)
9904 INSN_REGMODE_WEIGHT (insn
, mode
) =
9905 find_insn_regmode_weight (insn
, mode
)
9906 + 2 * find_insn_regmode_weight (insn
, DImode
);
9910 /* Comparison function for ready queue sorting. */
9912 rank_for_reorder (const void *x
, const void *y
)
9914 rtx_insn
*tmp
= *(rtx_insn
* const *) y
;
9915 rtx_insn
*tmp2
= *(rtx_insn
* const *) x
;
9917 /* The insn in a schedule group should be issued the first. */
9918 if (SCHED_GROUP_P (tmp
) != SCHED_GROUP_P (tmp2
))
9919 return SCHED_GROUP_P (tmp2
) ? 1 : -1;
9921 /* If insns are equally good, sort by INSN_LUID (original insn order), This
9922 minimizes instruction movement, thus minimizing sched's effect on
9923 register pressure. */
9924 return INSN_LUID (tmp
) - INSN_LUID (tmp2
);
9927 /* Resort the array A in which only element at index N may be out of order. */
9929 swap_reorder (rtx_insn
**a
, int n
)
9931 rtx_insn
*insn
= a
[n
- 1];
9934 while (i
>= 0 && rank_for_reorder (a
+ i
, &insn
) >= 0)
9942 /* Sort the ready list by ascending priority. */
9944 ready_reorder (rtx_insn
**ready
, int nready
)
9947 swap_reorder (ready
, nready
);
9948 else if (nready
> 2)
9949 qsort (ready
, nready
, sizeof (rtx_insn
*), rank_for_reorder
);
9952 /* Count life regions of r0 for a block. */
9954 find_r0_life_regions (basic_block b
)
9956 rtx_insn
*end
, *insn
;
9963 if (REGNO_REG_SET_P (df_get_live_in (b
), R0_REG
))
9976 r0_reg
= gen_rtx_REG (SImode
, R0_REG
);
9981 if (find_regno_note (insn
, REG_DEAD
, R0_REG
))
9987 && (pset
= single_set (insn
))
9988 && reg_overlap_mentioned_p (r0_reg
, SET_DEST (pset
))
9989 && !find_regno_note (insn
, REG_UNUSED
, R0_REG
))
9997 insn
= NEXT_INSN (insn
);
10002 /* Calculate regmode weights for all insns of all basic block. */
10004 sh_md_init_global (FILE *dump ATTRIBUTE_UNUSED
,
10005 int verbose ATTRIBUTE_UNUSED
,
10010 regmode_weight
[0] = (short *) xcalloc (old_max_uid
, sizeof (short));
10011 regmode_weight
[1] = (short *) xcalloc (old_max_uid
, sizeof (short));
10012 r0_life_regions
= 0;
10014 FOR_EACH_BB_REVERSE_FN (b
, cfun
)
10016 find_regmode_weight (b
, SImode
);
10017 find_regmode_weight (b
, SFmode
);
10018 if (!reload_completed
)
10019 r0_life_regions
+= find_r0_life_regions (b
);
10022 CURR_REGMODE_PRESSURE (SImode
) = 0;
10023 CURR_REGMODE_PRESSURE (SFmode
) = 0;
10028 sh_md_finish_global (FILE *dump ATTRIBUTE_UNUSED
,
10029 int verbose ATTRIBUTE_UNUSED
)
10031 if (regmode_weight
[0])
10033 free (regmode_weight
[0]);
10034 regmode_weight
[0] = NULL
;
10036 if (regmode_weight
[1])
10038 free (regmode_weight
[1]);
10039 regmode_weight
[1] = NULL
;
10043 /* Cache the can_issue_more so that we can return it from reorder2. Also,
10044 keep count of register pressures on SImode and SFmode. */
10046 sh_variable_issue (FILE *dump ATTRIBUTE_UNUSED
,
10047 int sched_verbose ATTRIBUTE_UNUSED
,
10049 int can_issue_more
)
10051 if (GET_CODE (PATTERN (insn
)) != USE
10052 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
10053 cached_can_issue_more
= can_issue_more
- 1;
10055 cached_can_issue_more
= can_issue_more
;
10057 if (reload_completed
)
10058 return cached_can_issue_more
;
10060 CURR_REGMODE_PRESSURE (SImode
) += INSN_REGMODE_WEIGHT (insn
, SImode
);
10061 CURR_REGMODE_PRESSURE (SFmode
) += INSN_REGMODE_WEIGHT (insn
, SFmode
);
10063 return cached_can_issue_more
;
10067 sh_md_init (FILE *dump ATTRIBUTE_UNUSED
,
10068 int verbose ATTRIBUTE_UNUSED
,
10069 int veclen ATTRIBUTE_UNUSED
)
10071 CURR_REGMODE_PRESSURE (SImode
) = 0;
10072 CURR_REGMODE_PRESSURE (SFmode
) = 0;
10075 /* Some magic numbers. */
10076 /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
10077 functions that already have high pressure on r0. */
10078 #define R0_MAX_LIFE_REGIONS 2
10079 /* Register Pressure thresholds for SImode and SFmode registers. */
10080 #define SIMODE_MAX_WEIGHT 5
10081 #define SFMODE_MAX_WEIGHT 10
10083 /* Return true if the pressure is high for MODE. */
10085 high_pressure (machine_mode mode
)
10087 /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
10088 functions that already have high pressure on r0. */
10089 if (r0_life_regions
>= R0_MAX_LIFE_REGIONS
)
10092 if (mode
== SFmode
)
10093 return (CURR_REGMODE_PRESSURE (SFmode
) > SFMODE_MAX_WEIGHT
);
10095 return (CURR_REGMODE_PRESSURE (SImode
) > SIMODE_MAX_WEIGHT
);
10098 /* Reorder ready queue if register pressure is high. */
10100 sh_reorder (FILE *dump ATTRIBUTE_UNUSED
,
10101 int sched_verbose ATTRIBUTE_UNUSED
,
10104 int clock_var ATTRIBUTE_UNUSED
)
10106 if (reload_completed
)
10107 return sh_issue_rate ();
10109 if (high_pressure (SFmode
) || high_pressure (SImode
))
10111 ready_reorder (ready
, *n_readyp
);
10114 return sh_issue_rate ();
10117 /* Skip cycles if the current register pressure is high. */
10119 sh_reorder2 (FILE *dump ATTRIBUTE_UNUSED
,
10120 int sched_verbose ATTRIBUTE_UNUSED
,
10121 rtx_insn
**ready ATTRIBUTE_UNUSED
,
10122 int *n_readyp ATTRIBUTE_UNUSED
,
10123 int clock_var ATTRIBUTE_UNUSED
)
10125 if (reload_completed
)
10126 return cached_can_issue_more
;
10128 if (high_pressure(SFmode
) || high_pressure (SImode
))
10131 return cached_can_issue_more
;
10134 /* Skip cycles without sorting the ready queue. This will move insn from
10135 Q->R. If this is the last cycle we are skipping; allow sorting of ready
10136 queue by sh_reorder. */
10138 /* Generally, skipping these many cycles are sufficient for all insns to move
10140 #define MAX_SKIPS 8
10143 sh_dfa_new_cycle (FILE *sched_dump ATTRIBUTE_UNUSED
,
10144 int sched_verbose ATTRIBUTE_UNUSED
,
10145 rtx_insn
*insn ATTRIBUTE_UNUSED
,
10146 int last_clock_var
,
10150 if (reload_completed
)
10155 if ((clock_var
- last_clock_var
) < MAX_SKIPS
)
10160 /* If this is the last cycle we are skipping, allow reordering of R. */
10161 if ((clock_var
- last_clock_var
) == MAX_SKIPS
)
10174 sh_target_reg_class (void)
10180 sh_optimize_target_register_callee_saved (bool after_prologue_epilogue_gen
10187 sh_ms_bitfield_layout_p (const_tree record_type ATTRIBUTE_UNUSED
)
10189 return TARGET_HITACHI
|| sh_attr_renesas_p (record_type
);
10193 On the SH1..SH4, the trampoline looks like
10194 2 0002 D202 mov.l l2,r2
10195 1 0000 D301 mov.l l1,r3
10196 3 0004 422B jmp @r2
10198 5 0008 00000000 l1: .long area
10199 6 000c 00000000 l2: .long function
10201 FDPIC needs a form that includes a function descriptor and
10202 code to load the GOT register:
10203 0 0000 00000000 .long l0
10204 1 0004 00000000 .long gotval
10205 2 0008 D302 l0: mov.l l1,r3
10206 3 000a D203 mov.l l2,r2
10207 4 000c 6122 mov.l @r2,r1
10208 5 000e 5C21 mov.l @(4,r2),r12
10209 6 0010 412B jmp @r1
10211 8 0014 00000000 l1: .long area
10212 9 0018 00000000 l2: .long function
10214 SH5 (compact) uses r1 instead of r3 for the static chain. */
10216 /* Emit insns to store a value at memory address + offset. */
10218 sh_emit_storesi (rtx addr
, HOST_WIDE_INT offset
, rtx value
)
10220 gcc_assert ((offset
& 3) == 0);
10221 emit_move_insn (offset
== 0
10222 ? change_address (addr
, SImode
, NULL_RTX
)
10223 : adjust_address (addr
, SImode
, offset
), value
);
10226 /* Emit insns to store w0 at addr + offset and w1 at addr + offset + 2. */
10228 sh_emit_storehi (rtx addr
, HOST_WIDE_INT offset
, uint16_t w0
, uint16_t w1
)
10230 sh_emit_storesi (addr
, offset
, gen_int_mode (TARGET_LITTLE_ENDIAN
10231 ? (w0
| (w1
<< 16))
10232 : (w1
| (w0
<< 16)), SImode
));
10235 /* Emit RTL insns to initialize the variable parts of a trampoline.
10236 FNADDR is an RTX for the address of the function's pure code.
10237 CXT is an RTX for the static chain value for the function. */
10239 sh_trampoline_init (rtx tramp_mem
, tree fndecl
, rtx cxt
)
10241 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
10242 rtx tramp
= force_reg (Pmode
, XEXP (tramp_mem
, 0));
10246 rtx a
= force_reg (Pmode
, plus_constant (Pmode
, XEXP (tramp_mem
, 0), 8));
10248 sh_emit_storesi (tramp_mem
, 0, a
);
10249 sh_emit_storesi (tramp_mem
, 4, sh_get_fdpic_reg_initial_val ());
10251 sh_emit_storehi (tramp_mem
, 8, 0xd302, 0xd203);
10252 sh_emit_storehi (tramp_mem
, 12, 0x6122, 0x5c21);
10253 sh_emit_storehi (tramp_mem
, 16, 0x412b, 0x0009);
10255 sh_emit_storesi (tramp_mem
, 20, cxt
);
10256 sh_emit_storesi (tramp_mem
, 24, fnaddr
);
10260 sh_emit_storehi (tramp_mem
, 0, 0xd202, 0xd301);
10261 sh_emit_storehi (tramp_mem
, 4, 0x422b, 0x0009);
10263 sh_emit_storesi (tramp_mem
, 8, cxt
);
10264 sh_emit_storesi (tramp_mem
, 12, fnaddr
);
10266 if (TARGET_HARD_SH4
)
10268 if (!TARGET_INLINE_IC_INVALIDATE
10269 || (!(TARGET_SH4A
|| TARGET_SH4_300
) && TARGET_USERMODE
))
10270 emit_library_call (function_symbol (NULL
, "__ic_invalidate",
10271 FUNCTION_ORDINARY
).sym
,
10272 LCT_NORMAL
, VOIDmode
, 1, tramp
, SImode
);
10274 emit_insn (gen_ic_invalidate_line (tramp
));
10278 /* On SH5, trampolines are SHmedia code, so add 1 to the address. */
10280 sh_trampoline_adjust_address (rtx tramp
)
10285 /* FIXME: This is overly conservative. A SHcompact function that
10286 receives arguments ``by reference'' will have them stored in its
10287 own stack frame, so it must not pass pointers or references to
10288 these arguments to other functions by means of sibling calls. */
10289 /* If PIC, we cannot make sibling calls to global functions
10290 because the PLT requires r12 to be live. */
10292 sh_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
10295 && ! sh_cfun_interrupt_handler_p ()
10296 && (! flag_pic
|| TARGET_FDPIC
10297 || (decl
&& ! (TREE_PUBLIC (decl
) || DECL_WEAK (decl
)))
10298 || (decl
&& DECL_VISIBILITY (decl
) != VISIBILITY_DEFAULT
)));
10301 /* Expand to appropriate sym*_label2reg for SYM and SIBCALL_P. */
10303 sh_expand_sym_label2reg (rtx reg
, rtx sym
, rtx lab
, bool sibcall_p
)
10305 const_tree decl
= SYMBOL_REF_DECL (sym
);
10306 bool is_weak
= (decl
&& DECL_P (decl
) && DECL_WEAK (decl
));
10308 if (!is_weak
&& SYMBOL_REF_LOCAL_P (sym
))
10309 emit_insn (gen_sym_label2reg (reg
, sym
, lab
));
10310 else if (sibcall_p
&& SYMBOL_REF_LOCAL_P (sym
))
10311 emit_insn (gen_symPCREL_label2reg (reg
, sym
, lab
));
10313 emit_insn (gen_symPLT_label2reg (reg
, sym
, lab
));
10316 /* Machine specific built-in functions. */
10318 struct builtin_description
10320 bool (* const is_enabled
) (void);
10321 const enum insn_code icode
;
10322 const char *const name
;
10327 /* This function can be used if there are any built-ins that are not for
10328 SHmedia. It's commented out to avoid the defined-but-unused warning. */
10330 sh1_builtin_p (void)
10335 /* describe number and signedness of arguments; arg[0] == result
10336 (1: unsigned, 2: signed, 4: don't care, 8: pointer 0: no argument */
10337 /* 9: 64-bit pointer, 10: 32-bit pointer */
10338 static const char signature_args
[][4] =
10340 #define SH_BLTIN_V2SI2 0
10342 #define SH_BLTIN_V4HI2 1
10344 #define SH_BLTIN_V2SI3 2
10346 #define SH_BLTIN_V4HI3 3
10348 #define SH_BLTIN_V8QI3 4
10350 #define SH_BLTIN_MAC_HISI 5
10352 #define SH_BLTIN_SH_HI 6
10354 #define SH_BLTIN_SH_SI 7
10356 #define SH_BLTIN_V4HI2V2SI 8
10358 #define SH_BLTIN_V4HI2V8QI 9
10360 #define SH_BLTIN_SISF 10
10362 #define SH_BLTIN_LDUA_L 11
10364 #define SH_BLTIN_LDUA_Q 12
10366 #define SH_BLTIN_STUA_L 13
10368 #define SH_BLTIN_STUA_Q 14
10370 #define SH_BLTIN_LDUA_L64 15
10372 #define SH_BLTIN_LDUA_Q64 16
10374 #define SH_BLTIN_STUA_L64 17
10376 #define SH_BLTIN_STUA_Q64 18
10378 #define SH_BLTIN_NUM_SHARED_SIGNATURES 19
10379 #define SH_BLTIN_2 19
10380 #define SH_BLTIN_SU 19
10382 #define SH_BLTIN_3 20
10383 #define SH_BLTIN_SUS 20
10385 #define SH_BLTIN_PSSV 21
10387 #define SH_BLTIN_XXUU 22
10388 #define SH_BLTIN_UUUU 22
10390 #define SH_BLTIN_PV 23
10392 #define SH_BLTIN_VP 24
10394 #define SH_BLTIN_UV 25
10396 #define SH_BLTIN_VU 26
10399 /* mcmv: operands considered unsigned. */
10400 /* mmulsum_wq, msad_ubq: result considered unsigned long long. */
10401 /* mperm: control value considered unsigned int. */
10402 /* mshalds, mshard, mshards, mshlld, mshlrd: shift count is unsigned int. */
10403 /* mshards_q: returns signed short. */
10404 /* nsb: takes long long arg, returns unsigned char. */
10405 static struct builtin_description bdesc
[] =
10408 CODE_FOR_sts_fpscr
, "__builtin_sh_get_fpscr", SH_BLTIN_UV
, 0 },
10410 CODE_FOR_set_fpscr
, "__builtin_sh_set_fpscr", SH_BLTIN_VU
, 0 },
10413 static tree sh_builtin_get_fpscr
;
10414 static tree sh_builtin_set_fpscr
;
10417 sh_init_builtins (void)
10419 tree shared
[SH_BLTIN_NUM_SHARED_SIGNATURES
];
10420 memset (shared
, 0, sizeof shared
);
10422 for (unsigned int di
= 0; di
< ARRAY_SIZE (bdesc
); ++di
)
10424 builtin_description
* d
= &bdesc
[di
];
10426 if (!d
->is_enabled ())
10429 tree type
, arg_type
= NULL_TREE
;
10430 int signature
= d
->signature
;
10432 if (signature
< SH_BLTIN_NUM_SHARED_SIGNATURES
&& shared
[signature
])
10433 type
= shared
[signature
];
10436 int has_result
= signature_args
[signature
][0] != 0;
10439 if (! TARGET_FPU_ANY
10440 && FLOAT_MODE_P (insn_data
[d
->icode
].operand
[0].mode
))
10442 for (unsigned int i
= 0; i
< ARRAY_SIZE (args
); i
++)
10443 args
[i
] = NULL_TREE
;
10444 for (int i
= 3; ; i
--)
10446 int arg
= signature_args
[signature
][i
];
10447 int opno
= i
- 1 + has_result
;
10450 arg_type
= ptr_type_node
;
10452 arg_type
= (*lang_hooks
.types
.type_for_mode
)
10453 (insn_data
[d
->icode
].operand
[opno
].mode
, (arg
& 1));
10457 arg_type
= void_type_node
;
10460 args
[i
-1] = arg_type
;
10462 type
= build_function_type_list (arg_type
, args
[0], args
[1],
10463 args
[2], NULL_TREE
);
10464 if (signature
< SH_BLTIN_NUM_SHARED_SIGNATURES
)
10465 shared
[signature
] = type
;
10468 add_builtin_function (d
->name
, type
, d
- bdesc
, BUILT_IN_MD
,
10470 /* Recode {sts,set}_fpscr decls for sh_atomic_assign_expand_fenv. */
10471 if (d
->icode
== CODE_FOR_sts_fpscr
)
10472 sh_builtin_get_fpscr
= d
->fndecl
;
10473 else if (d
->icode
== CODE_FOR_set_fpscr
)
10474 sh_builtin_set_fpscr
= d
->fndecl
;
10478 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */
10481 sh_atomic_assign_expand_fenv (tree
*hold
, tree
*clear
, tree
*update
)
10483 const unsigned SH_FE_INVALID
= 64;
10484 const unsigned SH_FE_DIVBYZERO
= 32;
10485 const unsigned SH_FE_OVERFLOW
= 16;
10486 const unsigned SH_FE_UNDERFLOW
= 8;
10487 const unsigned SH_FE_INEXACT
= 4;
10488 const unsigned HOST_WIDE_INT SH_FE_ALL_EXCEPT
= (SH_FE_INVALID
10493 const unsigned HOST_WIDE_INT SH_FE_EXCEPT_SHIFT
= 5;
10494 tree fenv_var
, mask
, ld_fenv
, masked_fenv
;
10495 tree new_fenv_var
, reload_fenv
, restore_fnenv
;
10496 tree update_call
, atomic_feraiseexcept
, hold_fnclex
;
10498 if (! TARGET_FPU_ANY
)
10501 /* Generate the equivalent of :
10502 unsigned int fenv_var;
10503 fenv_var = __builtin_sh_get_fpscr ();
10505 unsigned int masked_fenv;
10506 masked_fenv = fenv_var & mask;
10508 __builtin_sh_set_fpscr (masked_fenv); */
10510 fenv_var
= create_tmp_var_raw (unsigned_type_node
);
10511 mask
= build_int_cst (unsigned_type_node
,
10512 ~((SH_FE_ALL_EXCEPT
<< SH_FE_EXCEPT_SHIFT
)
10513 | SH_FE_ALL_EXCEPT
));
10514 ld_fenv
= build2 (MODIFY_EXPR
, unsigned_type_node
,
10515 fenv_var
, build_call_expr (sh_builtin_get_fpscr
, 0));
10516 masked_fenv
= build2 (BIT_AND_EXPR
, unsigned_type_node
, fenv_var
, mask
);
10517 hold_fnclex
= build_call_expr (sh_builtin_set_fpscr
, 1, masked_fenv
);
10518 fenv_var
= build4 (TARGET_EXPR
, unsigned_type_node
, fenv_var
,
10519 build2 (COMPOUND_EXPR
, void_type_node
, masked_fenv
,
10521 NULL_TREE
, NULL_TREE
);
10522 *hold
= build2 (COMPOUND_EXPR
, void_type_node
, fenv_var
, hold_fnclex
);
10524 /* Store the value of masked_fenv to clear the exceptions:
10525 __builtin_sh_set_fpscr (masked_fenv); */
10527 *clear
= build_call_expr (sh_builtin_set_fpscr
, 1, masked_fenv
);
10529 /* Generate the equivalent of :
10530 unsigned int new_fenv_var;
10531 new_fenv_var = __builtin_sh_get_fpscr ();
10533 __builtin_sh_set_fpscr (fenv_var);
10535 __atomic_feraiseexcept (new_fenv_var); */
10537 new_fenv_var
= create_tmp_var_raw (unsigned_type_node
);
10538 reload_fenv
= build2 (MODIFY_EXPR
, unsigned_type_node
, new_fenv_var
,
10539 build_call_expr (sh_builtin_get_fpscr
, 0));
10540 restore_fnenv
= build_call_expr (sh_builtin_set_fpscr
, 1, fenv_var
);
10541 atomic_feraiseexcept
= builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT
);
10542 update_call
= build_call_expr (atomic_feraiseexcept
, 1,
10543 fold_convert (integer_type_node
,
10545 *update
= build2 (COMPOUND_EXPR
, void_type_node
,
10546 build2 (COMPOUND_EXPR
, void_type_node
,
10547 reload_fenv
, restore_fnenv
), update_call
);
10550 /* Implements target hook vector_mode_supported_p. */
10552 sh_vector_mode_supported_p (machine_mode mode ATTRIBUTE_UNUSED
)
10558 sh_frame_pointer_required (void)
10560 /* If needed override this in other tm.h files to cope with various OS
10561 lossage requiring a frame pointer. */
10562 if (SUBTARGET_FRAME_POINTER_REQUIRED
)
10571 /* Implements target hook dwarf_calling_convention. Return an enum
10572 of dwarf_calling_convention. */
10574 sh_dwarf_calling_convention (const_tree func
)
10576 if (sh_attr_renesas_p (func
))
10577 return DW_CC_GNU_renesas_sh
;
10579 return DW_CC_normal
;
10582 /* Returns the sh builtin decl for CODE. */
10584 sh_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
10586 if (code
>= ARRAY_SIZE (bdesc
))
10587 return error_mark_node
;
10589 if (!bdesc
[code
].is_enabled ())
10590 return error_mark_node
;
10592 return bdesc
[code
].fndecl
;
10595 /* Expand an expression EXP that calls a built-in function,
10596 with result going to TARGET if that's convenient
10597 (and in mode MODE if that's convenient).
10598 SUBTARGET may be used as the target for computing one of EXP's operands.
10599 IGNORE is nonzero if the value is to be ignored. */
10601 sh_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
10602 machine_mode mode ATTRIBUTE_UNUSED
, int ignore
)
10604 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
10605 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
10606 const struct builtin_description
*d
= &bdesc
[fcode
];
10607 enum insn_code icode
= d
->icode
;
10608 int signature
= d
->signature
;
10612 if (signature_args
[signature
][0])
10617 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
10618 if (! target
|| GET_MODE (target
) != tmode
10619 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
10620 target
= gen_reg_rtx (tmode
);
10621 op
[nop
++] = target
;
10626 for (int i
= 1; i
<= 3; i
++, nop
++)
10629 machine_mode opmode
, argmode
;
10632 if (! signature_args
[signature
][i
])
10634 arg
= CALL_EXPR_ARG (exp
, i
- 1);
10635 if (arg
== error_mark_node
)
10637 if (signature_args
[signature
][i
] & 8)
10640 optype
= ptr_type_node
;
10644 opmode
= insn_data
[icode
].operand
[nop
].mode
;
10645 optype
= (*lang_hooks
.types
.type_for_mode
) (opmode
, 0);
10647 argmode
= TYPE_MODE (TREE_TYPE (arg
));
10648 if (argmode
!= opmode
)
10649 arg
= build1 (NOP_EXPR
, optype
, arg
);
10650 op
[nop
] = expand_expr (arg
, NULL_RTX
, opmode
, EXPAND_NORMAL
);
10651 if (! (*insn_data
[icode
].operand
[nop
].predicate
) (op
[nop
], opmode
))
10652 op
[nop
] = copy_to_mode_reg (opmode
, op
[nop
]);
10655 rtx pat
= NULL_RTX
;
10660 pat
= (*insn_data
[d
->icode
].genfun
) (op
[0]);
10663 pat
= (*insn_data
[d
->icode
].genfun
) (op
[0], op
[1]);
10666 pat
= (*insn_data
[d
->icode
].genfun
) (op
[0], op
[1], op
[2]);
10669 pat
= (*insn_data
[d
->icode
].genfun
) (op
[0], op
[1], op
[2], op
[3]);
10672 gcc_unreachable ();
10680 /* Return true if hard register REGNO can hold a value of machine-mode MODE.
10681 We can allow any mode in any general register. The special registers
10682 only allow SImode. Don't allow any mode in the PR.
10684 We cannot hold DCmode values in the XD registers because alter_reg
10685 handles subregs of them incorrectly. We could work around this by
10686 spacing the XD registers like the DR registers, but this would require
10687 additional memory in every compilation to hold larger register vectors.
10688 We could hold SFmode / SCmode values in XD registers, but that
10689 would require a tertiary reload when reloading from / to memory,
10690 and a secondary reload to reload from / to general regs; that
10691 seems to be a losing proposition.
10693 We want to allow TImode FP regs so that when V4SFmode is loaded as TImode,
10694 it won't be ferried through GP registers first. */
10696 sh_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
10698 if (SPECIAL_REGISTER_P (regno
))
10699 return mode
== SImode
;
10701 if (regno
== FPUL_REG
)
10702 return (mode
== SImode
|| mode
== SFmode
);
10704 if (FP_REGISTER_P (regno
) && mode
== SFmode
)
10707 if (mode
== V2SFmode
)
10709 if (((FP_REGISTER_P (regno
) && (regno
- FIRST_FP_REG
) % 2 == 0)
10710 || GENERAL_REGISTER_P (regno
)))
10716 if (mode
== V4SFmode
)
10718 if ((FP_REGISTER_P (regno
) && (regno
- FIRST_FP_REG
) % 4 == 0)
10719 || GENERAL_REGISTER_P (regno
))
10725 if (mode
== V16SFmode
)
10726 return regno
== FIRST_XD_REG
;
10728 if (FP_REGISTER_P (regno
))
10732 || ((TARGET_SH2E
) && mode
== SCmode
)
10733 || (((TARGET_FPU_DOUBLE
&& mode
== DFmode
) || mode
== DCmode
)
10734 && ((regno
- FIRST_FP_REG
) & 1) == 0)
10735 || (TARGET_SH4
&& mode
== TImode
10736 && ((regno
- FIRST_FP_REG
) & 3) == 0))
10742 if (XD_REGISTER_P (regno
))
10743 return mode
== DFmode
;
10745 if (TARGET_REGISTER_P (regno
))
10746 return (mode
== DImode
|| mode
== SImode
|| mode
== PDImode
);
10748 if (regno
== PR_REG
)
10749 return mode
== SImode
;
10751 if (regno
== FPSCR_REG
)
10752 return mode
== SImode
;
10757 /* Specify the modes required to caller save a given hard regno.
10758 choose_hard_reg_mode chooses mode based on HARD_REGNO_MODE_OK
10759 and returns ?Imode for float regs when sh_hard_regno_mode_ok
10760 permits integer modes on them. That makes LRA's split process
10761 unhappy. See PR55212.
10764 sh_hard_regno_caller_save_mode (unsigned int regno
, unsigned int nregs
,
10767 if (FP_REGISTER_P (regno
)
10770 || ((mode
== DFmode
|| mode
== DCmode
)
10771 && ((regno
- FIRST_FP_REG
) & 1) == 0)))
10774 return choose_hard_reg_mode (regno
, nregs
, false);
10777 /* Return the class of registers for which a mode change from FROM to TO
10780 sh_cannot_change_mode_class (machine_mode from
, machine_mode to
,
10781 enum reg_class rclass
)
10783 /* We want to enable the use of SUBREGs as a means to
10784 VEC_SELECT a single element of a vector. */
10786 /* This effectively disallows using GENERAL_REGS for SFmode vector subregs.
10787 This can be problematic when SFmode vector subregs need to be accessed
10788 on the stack with displacement addressing, as it happens with -O0.
10789 Thus we disallow the mode change for -O0. */
10790 if (to
== SFmode
&& VECTOR_MODE_P (from
) && GET_MODE_INNER (from
) == SFmode
)
10791 return optimize
? (reg_classes_intersect_p (GENERAL_REGS
, rclass
)) : false;
10793 if (GET_MODE_SIZE (from
) != GET_MODE_SIZE (to
))
10795 if (TARGET_LITTLE_ENDIAN
)
10797 if (GET_MODE_SIZE (to
) < 8 || GET_MODE_SIZE (from
) < 8)
10798 return reg_classes_intersect_p (DF_REGS
, rclass
);
10802 if (GET_MODE_SIZE (from
) < 8)
10803 return reg_classes_intersect_p (DF_REGS
, rclass
);
10809 /* Return true if registers in machine mode MODE will likely be
10810 allocated to registers in small register classes. */
10812 sh_small_register_classes_for_mode_p (machine_mode mode ATTRIBUTE_UNUSED
)
10817 /* If ADDRESS refers to a CODE_LABEL, add NUSES to the number of times
10818 that label is used. */
10820 sh_mark_label (rtx address
, int nuses
)
10822 if (GOTOFF_P (address
))
10824 /* Extract the label or symbol. */
10825 address
= XEXP (address
, 0);
10826 if (GET_CODE (address
) == PLUS
)
10827 address
= XEXP (address
, 0);
10828 address
= XVECEXP (address
, 0, 0);
10830 if (GET_CODE (address
) == LABEL_REF
10831 && LABEL_P (XEXP (address
, 0)))
10832 LABEL_NUSES (XEXP (address
, 0)) += nuses
;
10835 /* Compute extra cost of moving data between one register class
10838 If SECONDARY*_RELOAD_CLASS says something about the src/dst pair, regclass
10839 uses this information. Hence, the general register <-> floating point
10840 register information here is not used for SFmode. */
10842 sh_register_move_cost (machine_mode mode
,
10843 reg_class_t srcclass
, reg_class_t dstclass
)
10845 if (dstclass
== T_REGS
|| dstclass
== PR_REGS
)
10848 if (dstclass
== MAC_REGS
&& srcclass
== MAC_REGS
)
10851 if (mode
== SImode
&& TARGET_FMOVD
10852 && REGCLASS_HAS_FP_REG (srcclass
)
10853 && REGCLASS_HAS_FP_REG (dstclass
))
10856 if (REGCLASS_HAS_FP_REG (dstclass
) && srcclass
== T_REGS
)
10857 return ((TARGET_HARD_SH4
&& !optimize_size
) ? 10 : 7);
10859 if ((REGCLASS_HAS_FP_REG (dstclass
) && srcclass
== MAC_REGS
)
10860 || (dstclass
== MAC_REGS
&& REGCLASS_HAS_FP_REG (srcclass
)))
10863 if ((REGCLASS_HAS_FP_REG (dstclass
)
10864 && REGCLASS_HAS_GENERAL_REG (srcclass
))
10865 || (REGCLASS_HAS_GENERAL_REG (dstclass
)
10866 && REGCLASS_HAS_FP_REG (srcclass
)))
10868 /* Discourage trying to use fp regs for a pointer. This also
10869 discourages fp regs with SImode because Pmode is an alias
10870 of SImode on this target. See PR target/48596. */
10871 int addend
= (mode
== Pmode
) ? 40 : 0;
10873 return ((TARGET_FMOVD
? 8 : 12) + addend
)
10874 * ((GET_MODE_SIZE (mode
) + 7) / 8U);
10877 if ((dstclass
== FPUL_REGS
10878 && REGCLASS_HAS_GENERAL_REG (srcclass
))
10879 || (srcclass
== FPUL_REGS
10880 && REGCLASS_HAS_GENERAL_REG (dstclass
)))
10883 if ((dstclass
== FPUL_REGS
10884 && (srcclass
== PR_REGS
|| srcclass
== MAC_REGS
|| srcclass
== T_REGS
))
10885 || (srcclass
== FPUL_REGS
10886 && (dstclass
== PR_REGS
|| dstclass
== MAC_REGS
)))
10889 if ((srcclass
== TARGET_REGS
&& ! REGCLASS_HAS_GENERAL_REG (dstclass
))
10890 || ((dstclass
) == TARGET_REGS
&& ! REGCLASS_HAS_GENERAL_REG (srcclass
)))
10893 if ((srcclass
== FPSCR_REGS
&& ! REGCLASS_HAS_GENERAL_REG (dstclass
))
10894 || (dstclass
== FPSCR_REGS
&& ! REGCLASS_HAS_GENERAL_REG (srcclass
)))
10898 && ! REGCLASS_HAS_GENERAL_REG (srcclass
)
10899 && ! REGCLASS_HAS_GENERAL_REG (dstclass
))
10900 return 2 * ((GET_MODE_SIZE (mode
) + 7) / 8U);
10902 return 2 * ((GET_MODE_SIZE (mode
) + 3) / 4U);
10906 emit_load_ptr (rtx reg
, rtx addr
)
10908 rtx mem
= gen_const_mem (ptr_mode
, addr
);
10910 if (Pmode
!= ptr_mode
)
10911 mem
= gen_rtx_SIGN_EXTEND (Pmode
, mem
);
10912 return emit_move_insn (reg
, mem
);
10916 sh_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
10917 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
10920 CUMULATIVE_ARGS cum
;
10921 int structure_value_byref
= 0;
10922 rtx this_rtx
, this_value
, sibcall
, funexp
;
10924 tree funtype
= TREE_TYPE (function
);
10925 int simple_add
= CONST_OK_FOR_ADD (delta
);
10927 rtx scratch0
, scratch1
, scratch2
;
10929 reload_completed
= 1;
10930 epilogue_completed
= 1;
10931 crtl
->uses_only_leaf_regs
= 1;
10933 emit_note (NOTE_INSN_PROLOGUE_END
);
10935 /* Find the "this" pointer. We have such a wide range of ABIs for the
10936 SH that it's best to do this completely machine independently.
10937 "this" is passed as first argument, unless a structure return pointer
10938 comes first, in which case "this" comes second. */
10939 INIT_CUMULATIVE_ARGS (cum
, funtype
, NULL_RTX
, 0, 1);
10940 #ifndef PCC_STATIC_STRUCT_RETURN
10941 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
10942 structure_value_byref
= 1;
10943 #endif /* not PCC_STATIC_STRUCT_RETURN */
10944 if (structure_value_byref
&& sh_struct_value_rtx (function
, 0) == 0)
10946 tree ptype
= build_pointer_type (TREE_TYPE (funtype
));
10948 sh_function_arg_advance (pack_cumulative_args (&cum
), Pmode
, ptype
, true);
10951 = sh_function_arg (pack_cumulative_args (&cum
), Pmode
, ptr_type_node
, true);
10953 /* For SHcompact, we only have r0 for a scratch register: r1 is the
10954 static chain pointer (even if you can't have nested virtual functions
10955 right now, someone might implement them sometime), and the rest of the
10956 registers are used for argument passing, are callee-saved, or reserved. */
10957 /* We need to check call_used_regs / fixed_regs in case -fcall_saved-reg /
10958 -ffixed-reg has been used. */
10959 if (! call_used_regs
[0] || fixed_regs
[0])
10960 error ("r0 needs to be available as a call-clobbered register");
10961 scratch0
= scratch1
= scratch2
= gen_rtx_REG (Pmode
, 0);
10964 if (call_used_regs
[1] && ! fixed_regs
[1])
10965 scratch1
= gen_rtx_REG (ptr_mode
, 1);
10966 /* N.B., if not TARGET_HITACHI, register 2 is used to pass the pointer
10967 pointing where to return struct values. */
10968 if (call_used_regs
[3] && ! fixed_regs
[3])
10969 scratch2
= gen_rtx_REG (Pmode
, 3);
10972 this_value
= plus_constant (Pmode
, this_rtx
, delta
);
10974 && (simple_add
|| scratch0
!= scratch1
)
10975 && strict_memory_address_p (ptr_mode
, this_value
))
10977 emit_load_ptr (scratch0
, this_value
);
10982 ; /* Do nothing. */
10983 else if (simple_add
)
10984 emit_move_insn (this_rtx
, this_value
);
10987 emit_move_insn (scratch1
, GEN_INT (delta
));
10988 emit_insn (gen_add2_insn (this_rtx
, scratch1
));
10996 emit_load_ptr (scratch0
, this_rtx
);
10998 offset_addr
= plus_constant (Pmode
, scratch0
, vcall_offset
);
10999 if (strict_memory_address_p (ptr_mode
, offset_addr
))
11000 ; /* Do nothing. */
11001 else if (scratch0
!= scratch1
)
11003 /* scratch0 != scratch1, and we have indexed loads. Get better
11004 schedule by loading the offset into r1 and using an indexed
11005 load - then the load of r1 can issue before the load from
11006 (this_rtx + delta) finishes. */
11007 emit_move_insn (scratch1
, GEN_INT (vcall_offset
));
11008 offset_addr
= gen_rtx_PLUS (Pmode
, scratch0
, scratch1
);
11010 else if (CONST_OK_FOR_ADD (vcall_offset
))
11012 emit_insn (gen_add2_insn (scratch0
, GEN_INT (vcall_offset
)));
11013 offset_addr
= scratch0
;
11015 else if (scratch0
!= scratch1
)
11017 emit_move_insn (scratch1
, GEN_INT (vcall_offset
));
11018 emit_insn (gen_add2_insn (scratch0
, scratch1
));
11019 offset_addr
= scratch0
;
11022 gcc_unreachable (); /* FIXME */
11023 emit_load_ptr (scratch0
, offset_addr
);
11025 if (Pmode
!= ptr_mode
)
11026 scratch0
= gen_rtx_TRUNCATE (ptr_mode
, scratch0
);
11027 emit_insn (gen_add2_insn (this_rtx
, scratch0
));
11030 /* Generate a tail call to the target function. */
11031 if (! TREE_USED (function
))
11033 assemble_external (function
);
11034 TREE_USED (function
) = 1;
11036 funexp
= XEXP (DECL_RTL (function
), 0);
11037 /* If the function is overridden, so is the thunk, hence we don't
11038 need GOT addressing even if this is a public symbol. */
11040 if (TARGET_SH1
&& ! flag_weak
)
11041 sibcall
= gen_sibcalli_thunk (funexp
, const0_rtx
);
11044 if (TARGET_SH2
&& flag_pic
)
11048 sibcall
= gen_sibcall_pcrel_fdpic (funexp
, const0_rtx
);
11049 XEXP (XVECEXP (sibcall
, 0, 3), 0) = scratch2
;
11053 sibcall
= gen_sibcall_pcrel (funexp
, const0_rtx
);
11054 XEXP (XVECEXP (sibcall
, 0, 2), 0) = scratch2
;
11059 emit_move_insn (scratch2
, funexp
);
11060 funexp
= gen_rtx_MEM (FUNCTION_MODE
, scratch2
);
11061 sibcall
= gen_sibcall (funexp
, const0_rtx
, NULL_RTX
);
11063 sibcall
= emit_call_insn (sibcall
);
11064 SIBLING_CALL_P (sibcall
) = 1;
11065 use_reg (&CALL_INSN_FUNCTION_USAGE (sibcall
), this_rtx
);
11068 /* Run just enough of rest_of_compilation to do scheduling and get
11069 the insns emitted. Note that use_thunk calls
11070 assemble_start_function and assemble_end_function. */
11072 insns
= get_insns ();
11078 split_all_insns_noflow ();
11082 shorten_branches (insns
);
11083 final_start_function (insns
, file
, 1);
11084 final (insns
, file
, 1);
11085 final_end_function ();
11087 reload_completed
= 0;
11088 epilogue_completed
= 0;
11091 /* Return an RTX pair for the address and call site label of a function
11092 NAME of kind KIND, placing the result in TARGET if not NULL. For
11093 SFUNC_STATIC, if FDPIC, the LAB member of result will be set to
11094 (const_int 0) if jsr should be used, or a label_ref if bsrf should
11095 be used. For FDPIC, both SFUNC_GOT and SFUNC_STATIC will return the
11096 address of the function itself, not a function descriptor, so they
11097 can only be used with functions not using the FDPIC register that
11098 are known to be called directory without a PLT entry. */
11100 function_symbol_result
11101 function_symbol (rtx target
, const char *name
, sh_function_kind kind
)
11103 /* If this is not an ordinary function, the name usually comes from a
11104 string literal or an sprintf buffer. Make sure we use the same
11105 string consistently, so that cse will be able to unify address loads. */
11106 if (kind
!= FUNCTION_ORDINARY
)
11107 name
= IDENTIFIER_POINTER (get_identifier (name
));
11108 rtx sym
= gen_rtx_SYMBOL_REF (Pmode
, name
);
11109 rtx lab
= const0_rtx
;
11110 SYMBOL_REF_FLAGS (sym
) = SYMBOL_FLAG_FUNCTION
;
11114 case FUNCTION_ORDINARY
:
11118 rtx reg
= target
? target
: gen_reg_rtx (Pmode
);
11120 emit_insn (gen_symGOT2reg (reg
, sym
));
11126 rtx reg
= target
? target
: gen_reg_rtx (Pmode
);
11130 /* We use PC-relative calls, since GOTOFF can only refer
11131 to writable data. This works along with sh_sfunc_call. */
11132 lab
= PATTERN (gen_call_site ());
11133 emit_insn (gen_sym_label2reg (reg
, sym
, lab
));
11137 /* ??? To allow cse to work, we use GOTOFF relocations.
11138 we could add combiner patterns to transform this into
11139 straight pc-relative calls with sym2PIC / bsrf when
11140 label load and function call are still 1:1 and in the
11141 same basic block during combine. */
11142 emit_insn (gen_symGOTOFF2reg (reg
, sym
));
11149 if (target
&& sym
!= target
)
11151 emit_move_insn (target
, sym
);
11152 return function_symbol_result (target
, lab
);
11154 return function_symbol_result (sym
, lab
);
11157 /* Find the number of a general purpose register in S. */
11159 scavenge_reg (HARD_REG_SET
*s
)
11162 for (r
= FIRST_GENERAL_REG
; r
<= LAST_GENERAL_REG
; r
++)
11163 if (TEST_HARD_REG_BIT (*s
, r
))
11169 sh_get_pr_initial_val (void)
11171 /* If we haven't finished rtl generation, there might be a nonlocal label
11172 that we haven't seen yet.
11173 ??? get_hard_reg_initial_val fails if it is called after register
11174 allocation has started, unless it has been called before for the
11175 same register. And even then, we end in trouble if we didn't use
11176 the register in the same basic block before. So call
11177 get_hard_reg_initial_val now and wrap it in an unspec if we might
11178 need to replace it. */
11179 /* ??? We also must do this for TARGET_SH1 in general, because otherwise
11180 combine can put the pseudo returned by get_hard_reg_initial_val into
11181 instructions that need a general purpose registers, which will fail to
11182 be recognized when the pseudo becomes allocated to PR. */
11183 rtx val
= get_hard_reg_initial_val (Pmode
, PR_REG
);
11184 return gen_rtx_UNSPEC (SImode
, gen_rtvec (1, val
), UNSPEC_RA
);
11188 sh_expand_t_scc (rtx operands
[])
11190 enum rtx_code code
= GET_CODE (operands
[1]);
11191 rtx target
= operands
[0];
11192 rtx op0
= operands
[2];
11193 rtx op1
= operands
[3];
11194 rtx result
= target
;
11197 if (!REG_P (op0
) || REGNO (op0
) != T_REG
11198 || !CONST_INT_P (op1
))
11200 if (!REG_P (result
))
11201 result
= gen_reg_rtx (SImode
);
11202 val
= INTVAL (op1
);
11203 if ((code
== EQ
&& val
== 1) || (code
== NE
&& val
== 0))
11204 emit_insn (gen_movt (result
, get_t_reg_rtx ()));
11205 else if ((code
== EQ
&& val
== 0) || (code
== NE
&& val
== 1))
11206 emit_insn (gen_movnegt (result
, get_t_reg_rtx ()));
11207 else if (code
== EQ
|| code
== NE
)
11208 emit_insn (gen_move_insn (result
, GEN_INT (code
== NE
)));
11211 if (result
!= target
)
11212 emit_move_insn (target
, result
);
11216 /* INSN is an sfunc; return the rtx that describes the address used. */
11218 extract_sfunc_addr (rtx insn
)
11220 rtx pattern
, part
= NULL_RTX
;
11223 pattern
= PATTERN (insn
);
11224 len
= XVECLEN (pattern
, 0);
11225 for (i
= 0; i
< len
; i
++)
11227 part
= XVECEXP (pattern
, 0, i
);
11228 if (GET_CODE (part
) == USE
&& GET_MODE (XEXP (part
, 0)) == Pmode
11229 && GENERAL_REGISTER_P (true_regnum (XEXP (part
, 0))))
11230 return XEXP (part
, 0);
11232 gcc_assert (GET_CODE (XVECEXP (pattern
, 0, 0)) == UNSPEC_VOLATILE
);
11233 return XVECEXP (XVECEXP (pattern
, 0, 0), 0, 1);
11236 /* Verify that the register in use_sfunc_addr still agrees with the address
11237 used in the sfunc. This prevents fill_slots_from_thread from changing
11239 INSN is the use_sfunc_addr instruction, and REG is the register it
11242 check_use_sfunc_addr (rtx_insn
*insn
, rtx reg
)
11244 /* Search for the sfunc. It should really come right after INSN. */
11245 while ((insn
= NEXT_INSN (insn
)))
11247 if (LABEL_P (insn
) || JUMP_P (insn
))
11249 if (! INSN_P (insn
))
11252 if (rtx_sequence
*seq
= dyn_cast
<rtx_sequence
*> (PATTERN (insn
)))
11253 insn
= seq
->insn (0);
11254 if (GET_CODE (PATTERN (insn
)) != PARALLEL
11255 || get_attr_type (insn
) != TYPE_SFUNC
)
11257 return rtx_equal_p (extract_sfunc_addr (insn
), reg
);
11259 gcc_unreachable ();
11262 /* This function returns a constant rtx that represents 2**15 / pi in
11263 SFmode. It's used to scale a fixed-point signed 16.16-bit fraction
11264 of a full circle back to an SFmode value, i.e. 0x10000 maps to 2*pi. */
11265 static GTY(()) rtx sh_fsca_sf2int_rtx
;
11268 sh_fsca_sf2int (void)
11270 if (! sh_fsca_sf2int_rtx
)
11272 REAL_VALUE_TYPE rv
;
11274 real_from_string (&rv
, "10430.378350470453");
11275 sh_fsca_sf2int_rtx
= const_double_from_real_value (rv
, SFmode
);
11278 return sh_fsca_sf2int_rtx
;
11281 /* This function returns a constant rtx that represents pi / 2**15 in
11282 SFmode. It's used to scale SFmode angles, in radians, to a
11283 fixed-point signed 16.16-bit fraction of a full circle, i.e. 2*pi
11284 maps to 0x10000. */
11285 static GTY(()) rtx sh_fsca_int2sf_rtx
;
11288 sh_fsca_int2sf (void)
11290 if (! sh_fsca_int2sf_rtx
)
11292 REAL_VALUE_TYPE rv
;
11294 real_from_string (&rv
, "9.587379924285257e-5");
11295 sh_fsca_int2sf_rtx
= const_double_from_real_value (rv
, SFmode
);
11298 return sh_fsca_int2sf_rtx
;
11301 /* Initialize the CUMULATIVE_ARGS structure. */
11303 sh_init_cumulative_args (CUMULATIVE_ARGS
* pcum
,
11305 rtx libname ATTRIBUTE_UNUSED
,
11307 signed int n_named_args
,
11310 pcum
->arg_count
[(int) SH_ARG_FLOAT
] = 0;
11311 pcum
->free_single_fp_reg
= 0;
11312 pcum
->stack_regs
= 0;
11313 pcum
->byref_regs
= 0;
11315 pcum
->outgoing
= (n_named_args
== -1) ? 0 : 1;
11317 /* XXX - Should we check TARGET_HITACHI here ??? */
11318 pcum
->renesas_abi
= sh_attr_renesas_p (fntype
) ? 1 : 0;
11322 pcum
->force_mem
= ((TARGET_HITACHI
|| pcum
->renesas_abi
)
11323 && aggregate_value_p (TREE_TYPE (fntype
), fndecl
));
11324 pcum
->prototype_p
= prototype_p (fntype
);
11325 pcum
->arg_count
[(int) SH_ARG_INT
] = false;
11329 pcum
->arg_count
[(int) SH_ARG_INT
] = 0;
11330 pcum
->prototype_p
= FALSE
;
11331 if (mode
!= VOIDmode
)
11333 /* If the default ABI is the Renesas ABI then all library
11334 calls must assume that the library will be using the
11335 Renesas ABI. So if the function would return its result
11336 in memory then we must force the address of this memory
11337 block onto the stack. Ideally we would like to call
11338 targetm.calls.return_in_memory() here but we do not have
11339 the TYPE or the FNDECL available so we synthesize the
11340 contents of that function as best we can. */
11342 (TARGET_DEFAULT
& MASK_HITACHI
)
11343 && (mode
== BLKmode
11344 || (GET_MODE_SIZE (mode
) > 4
11345 && !(mode
== DFmode
11346 && TARGET_FPU_DOUBLE
)));
11349 pcum
->force_mem
= FALSE
;
11354 sh_gen_truncate (machine_mode mode
, rtx x
, int need_sign_ext
)
11356 enum rtx_code code
= TRUNCATE
;
11358 if (GET_CODE (x
) == ZERO_EXTEND
|| GET_CODE (x
) == SIGN_EXTEND
)
11360 rtx inner
= XEXP (x
, 0);
11361 machine_mode inner_mode
= GET_MODE (inner
);
11363 if (inner_mode
== mode
)
11365 else if (GET_MODE_SIZE (inner_mode
) >= GET_MODE_SIZE (mode
))
11367 else if (GET_MODE_SIZE (inner_mode
) < GET_MODE_SIZE (mode
)
11368 && (! need_sign_ext
|| GET_CODE (x
) == SIGN_EXTEND
))
11370 code
= GET_CODE (x
);
11374 return gen_rtx_fmt_e (code
, mode
, x
);
11377 /* Load and store depend on the highpart of the address. However,
11378 set_attr_alternative does not give well-defined results before reload,
11379 so we must look at the rtl ourselves to see if any of the feeding
11380 registers is used in a memref.
11382 Return true iff INSN contains a MEM. */
11384 sh_contains_memref_p (rtx insn
)
11386 subrtx_iterator::array_type array
;
11387 FOR_EACH_SUBRTX (iter
, array
, PATTERN (insn
), NONCONST
)
11393 /* Return true iff INSN loads a banked register. */
11395 sh_loads_bankedreg_p (rtx insn
)
11397 if (GET_CODE (PATTERN (insn
)) == SET
)
11399 rtx op
= SET_DEST (PATTERN(insn
));
11400 if (REG_P (op
) && BANKED_REGISTER_P (REGNO (op
)))
11407 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
11409 sh_preferred_reload_class (rtx x ATTRIBUTE_UNUSED
, reg_class_t rclass
)
11414 /* Implement TARGET_SECONDARY_RELOAD. */
11416 sh_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
11417 machine_mode mode
, secondary_reload_info
*sri
)
11419 enum reg_class rclass
= (enum reg_class
) rclass_i
;
11421 if (MEM_P (x
) && GET_CODE (XEXP (x
, 0)) == PLUS
11422 && REG_P (XEXP (XEXP (x
, 0), 0))
11423 && REGNO (XEXP (XEXP (x
, 0), 0)) == GBR_REG
)
11424 return rclass
== R0_REGS
? NO_REGS
: R0_REGS
;
11426 if (MEM_P (x
) && REG_P (XEXP (x
, 0)) && REGNO (XEXP (x
, 0)) == GBR_REG
)
11427 return rclass
== R0_REGS
? NO_REGS
: R0_REGS
;
11429 if (REG_P (x
) && REGNO (x
) == GBR_REG
)
11434 if (REGCLASS_HAS_FP_REG (rclass
)
11435 && immediate_operand ((x
), mode
)
11436 && ! ((fp_zero_operand (x
) || fp_one_operand (x
)) && mode
== SFmode
))
11440 sri
->icode
= CODE_FOR_reload_insf__frn
;
11443 sri
->icode
= CODE_FOR_reload_indf__frn
;
11446 /* ??? If we knew that we are in the appropriate mode -
11447 single precision - we could use a reload pattern directly. */
11452 if (rclass
== FPUL_REGS
11453 && ((REG_P (x
) && (REGNO (x
) == MACL_REG
|| REGNO (x
) == MACH_REG
11454 || REGNO (x
) == T_REG
))
11455 || GET_CODE (x
) == PLUS
))
11456 return GENERAL_REGS
;
11457 if (rclass
== FPUL_REGS
&& immediate_operand (x
, mode
))
11459 if (satisfies_constraint_I08 (x
) || fp_zero_operand (x
))
11460 return GENERAL_REGS
;
11461 else if (mode
== SFmode
)
11463 sri
->icode
= CODE_FOR_reload_insi__i_fpul
;
11466 if (rclass
== FPSCR_REGS
11467 && ((REG_P (x
) && REGNO (x
) >= FIRST_PSEUDO_REGISTER
)
11468 || (MEM_P (x
) && GET_CODE (XEXP (x
, 0)) == PLUS
)))
11469 return GENERAL_REGS
;
11470 } /* end of input-only processing. */
11472 if (((REGCLASS_HAS_FP_REG (rclass
)
11474 && (GENERAL_OR_AP_REGISTER_P (REGNO (x
))
11475 || (FP_REGISTER_P (REGNO (x
)) && mode
== SImode
11476 && TARGET_FMOVD
))))
11477 || (REGCLASS_HAS_GENERAL_REG (rclass
)
11479 && FP_REGISTER_P (REGNO (x
))))
11480 && (mode
== SFmode
|| mode
== SImode
))
11482 if ((rclass
== FPUL_REGS
11483 || (REGCLASS_HAS_FP_REG (rclass
) && mode
== SImode
))
11486 && (REGNO (x
) >= FIRST_PSEUDO_REGISTER
11487 || REGNO (x
) == T_REG
11488 || system_reg_operand (x
, VOIDmode
)))))
11490 if (rclass
== FPUL_REGS
)
11491 return GENERAL_REGS
;
11492 return NO_REGS
; // LRA wants NO_REGS here, it used to be FPUL_REGS;
11494 if (rclass
== TARGET_REGS
11495 && !satisfies_constraint_Csy (x
)
11496 && (!REG_P (x
) || ! GENERAL_REGISTER_P (REGNO (x
))))
11497 return GENERAL_REGS
;
11498 if ((rclass
== MAC_REGS
|| rclass
== PR_REGS
)
11499 && REG_P (x
) && ! GENERAL_REGISTER_P (REGNO (x
))
11500 && rclass
!= REGNO_REG_CLASS (REGNO (x
)))
11501 return GENERAL_REGS
;
11502 if (rclass
!= GENERAL_REGS
&& REG_P (x
)
11503 && TARGET_REGISTER_P (REGNO (x
)))
11504 return GENERAL_REGS
;
11506 /* If here fall back to loading FPUL register through general registers.
11507 This case can happen when movsi_ie insn is picked initially to
11508 load/store the FPUL register from/to another register, and then the
11509 other register is allocated on the stack. */
11510 if (rclass
== FPUL_REGS
&& true_regnum (x
) == -1)
11511 return GENERAL_REGS
;
11513 /* Force mov.b / mov.w displacement addressing insn to use R0 as
11515 On SH2A could also just leave it alone here, which would result in a
11516 4 byte move insn being generated instead. However, for this to work
11517 the insns must have the appropriate alternatives. */
11518 if ((mode
== QImode
|| mode
== HImode
) && rclass
!= R0_REGS
11519 && satisfies_constraint_Sdd (x
)
11520 && sh_disp_addr_displacement (x
)
11521 <= sh_max_mov_insn_displacement (mode
, false))
11524 /* When reload is trying to address a QImode or HImode subreg on the stack,
11525 force any subreg byte into R0_REGS, as this is going to become a
11526 displacement address.
11527 We could restrict this to SUBREG_BYTE (x) > 0, but if the actual reg
11528 is on the stack, the memref to it might already require a displacement
11529 and that has to be added to the final address. At this point we don't
11530 know the cumulative displacement so we assume the worst case. */
11531 if ((mode
== QImode
|| mode
== HImode
) && rclass
!= R0_REGS
11532 && GET_CODE (x
) == SUBREG
&& true_regnum (x
) == -1)
11538 /* Return true if SUBST can't safely replace its equivalent during RA. */
11540 sh_cannot_substitute_mem_equiv_p (rtx
)
11542 /* If SUBST is mem[base+index] or QI/HImode mem[base+disp], the insn
11543 uses R0 and may cause spill failure when R0 is already used.
11544 We have to return true for that case at least.
11545 Moreover SH has strong R0 parity and also have not enough numbers of
11546 the hard registers to make the equiv substitution win in the size
11547 and the speed on average working sets. The pseudos produced to
11548 hold the equiv values can't get good hard registers for bad cases
11549 and end up memory save/restore insns which make the code worse. */
11553 /* Return true if DISP can be legitimized. */
11555 sh_legitimize_address_displacement (rtx
*disp
, rtx
*offs
,
11558 if ((TARGET_FPU_DOUBLE
&& mode
== DFmode
)
11559 || (TARGET_SH2E
&& mode
== SFmode
))
11562 struct disp_adjust adj
= sh_find_mov_disp_adjust (mode
, INTVAL (*disp
));
11563 if (adj
.offset_adjust
!= NULL_RTX
&& adj
.mov_disp
!= NULL_RTX
)
11565 *disp
= adj
.mov_disp
;
11566 *offs
= adj
.offset_adjust
;
11573 /* Return true if movsf insn should be splited with an additional
11576 sh_movsf_ie_ra_split_p (rtx op0
, rtx op1
, rtx op2
)
11579 if (rtx_equal_p (op0
, op1
))
11582 if (GET_CODE (op1
) == CONST_DOUBLE
11583 && ! satisfies_constraint_G (op1
)
11584 && ! satisfies_constraint_H (op1
)
11589 if (REG_P (op0
) && FP_REGISTER_P (REGNO (op0
))
11590 && REG_P (op1
) && GENERAL_REGISTER_P (REGNO (op1
))
11591 && REG_P (op2
) && (REGNO (op2
) == FPUL_REG
))
11594 if (REG_P (op1
) && FP_REGISTER_P (REGNO (op1
))
11595 && REG_P (op0
) && GENERAL_REGISTER_P (REGNO (op0
))
11596 && REG_P (op2
) && (REGNO (op2
) == FPUL_REG
))
11603 sh_conditional_register_usage (void)
11606 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
11607 if (! VALID_REGISTER_P (regno
))
11608 fixed_regs
[regno
] = call_used_regs
[regno
] = 1;
11609 /* R8 and R9 are call-clobbered on SH5, but not on earlier SH ABIs. */
11612 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
11613 call_used_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
11617 fixed_regs
[PIC_REG
] = 1;
11618 call_used_regs
[PIC_REG
] = 1;
11619 call_really_used_regs
[PIC_REG
] = 1;
11621 /* Renesas saves and restores mac registers on call. */
11622 if (TARGET_HITACHI
&& ! TARGET_NOMACSAVE
)
11624 call_really_used_regs
[MACH_REG
] = 0;
11625 call_really_used_regs
[MACL_REG
] = 0;
11628 for (regno
= FIRST_GENERAL_REG
; regno
<= LAST_GENERAL_REG
; regno
++)
11629 if (! fixed_regs
[regno
] && call_really_used_regs
[regno
])
11630 SET_HARD_REG_BIT (reg_class_contents
[SIBCALL_REGS
], regno
);
11632 call_really_used_regs
[FPSCR_MODES_REG
] = 0;
11633 call_really_used_regs
[FPSCR_STAT_REG
] = 0;
11636 /* Implement TARGET_LEGITIMATE_CONSTANT_P
11638 can_store_by_pieces constructs VOIDmode CONST_DOUBLEs. */
11640 sh_legitimate_constant_p (machine_mode mode
, rtx x
)
11642 if (SH_OFFSETS_MUST_BE_WITHIN_SECTIONS_P
)
11645 split_const (x
, &base
, &offset
);
11647 if (GET_CODE (base
) == SYMBOL_REF
11648 && !offset_within_block_p (base
, INTVAL (offset
)))
11653 && (SYMBOLIC_CONST_P (x
)
11654 || (GET_CODE (x
) == CONST
&& GET_CODE (XEXP (x
, 0)) == PLUS
11655 && SYMBOLIC_CONST_P (XEXP (XEXP (x
, 0), 0)))))
11658 return GET_CODE (x
) != CONST_DOUBLE
11659 || mode
== DFmode
|| mode
== SFmode
11660 || mode
== DImode
|| GET_MODE (x
) == VOIDmode
;
11663 enum sh_divide_strategy_e sh_div_strategy
= SH_DIV_STRATEGY_DEFAULT
;
11666 sh_init_sync_libfuncs (void)
11668 init_sync_libfuncs (UNITS_PER_WORD
);
11671 /* Return true if it is appropriate to emit `ret' instructions in the
11672 body of a function. */
11674 sh_can_use_simple_return_p (void)
11676 HARD_REG_SET live_regs_mask
;
11679 if (! reload_completed
|| frame_pointer_needed
)
11682 /* Moving prologue around does't reduce the size. */
11683 if (optimize_function_for_size_p (cfun
))
11686 /* Finally, allow for pr save. */
11687 d
= calc_live_regs (&live_regs_mask
);
11689 if (rounded_frame_size (d
) > 4)
11695 /*------------------------------------------------------------------------------
11696 Address mode optimization support code
11699 typedef HOST_WIDE_INT disp_t
;
11700 static const disp_t MIN_DISP
= HOST_WIDE_INT_MIN
;
11701 static const disp_t MAX_DISP
= HOST_WIDE_INT_MAX
;
11702 static const disp_t INVALID_DISP
= MAX_DISP
;
11704 /* A memory reference which is described by a base register and a
11706 class base_reg_disp
11709 base_reg_disp (rtx br
, disp_t d
);
11711 bool is_reg (void) const;
11712 bool is_disp (void) const;
11713 rtx
reg (void) const;
11714 disp_t
disp (void) const;
11722 base_reg_disp::base_reg_disp (rtx br
, disp_t d
)
11723 : reg_ (br
), disp_ (d
)
11728 base_reg_disp::is_reg (void) const
11730 return reg_
!= NULL_RTX
&& disp_
!= INVALID_DISP
;
11734 base_reg_disp::is_disp (void) const
11736 return reg_
== NULL_RTX
&& disp_
!= INVALID_DISP
;
11740 base_reg_disp::reg (void) const
11746 base_reg_disp::disp (void) const
11751 /* Find the base register and calculate the displacement for a given
11752 address rtx 'x'. */
11753 static base_reg_disp
11754 sh_find_base_reg_disp (rtx_insn
* insn
, rtx x
, disp_t disp
= 0,
11755 rtx base_reg
= NULL
)
11759 if (REGNO (x
) == GBR_REG
)
11760 return base_reg_disp (x
, disp
);
11762 /* We've reached a hard-reg. This is probably the point where
11763 function args are copied to pseudos. Do not go any further and
11764 stick to the pseudo. If the original mem addr was in a hard reg
11765 from the beginning, it will become the base reg. */
11766 if (REGNO (x
) < FIRST_PSEUDO_REGISTER
)
11767 return base_reg_disp (base_reg
!= NULL
? base_reg
: x
, disp
);
11769 /* Find the def of the reg and trace it. If there are more than one
11770 defs and they are not the same, assume it's not safe to proceed. */
11771 rtx_insn
* last_i
= NULL
;
11772 rtx last_set
= NULL
;
11773 for (df_ref d
= DF_REG_DEF_CHAIN (REGNO (x
)); d
!= NULL
;
11774 d
= DF_REF_NEXT_REG (d
))
11776 rtx set
= const_cast<rtx
> (set_of (x
, DF_REF_INSN (d
)));
11778 /* Accept multiple defs, as long as they are equal. */
11779 if (last_set
== NULL
|| rtx_equal_p (last_set
, set
))
11781 last_i
= DF_REF_INSN (d
);
11792 if (last_set
!= NULL
&& last_i
!= NULL
)
11793 return sh_find_base_reg_disp (last_i
, XEXP (last_set
, 1), disp
,
11794 XEXP (last_set
, 0));
11796 /* When here, no previous insn was found that sets the reg.
11797 The input reg is already the base reg. */
11798 return base_reg_disp (x
, disp
);
11801 else if (GET_CODE (x
) == PLUS
)
11803 base_reg_disp left_val
= sh_find_base_reg_disp (insn
, XEXP (x
, 0));
11804 base_reg_disp right_val
= sh_find_base_reg_disp (insn
, XEXP (x
, 1));
11806 /* Either left or right val must be a reg.
11807 We don't handle the case of 'reg + reg' here. */
11808 if (left_val
.is_reg () && right_val
.is_disp ())
11809 return base_reg_disp (left_val
.reg (), left_val
.disp ()
11810 + right_val
.disp () + disp
);
11811 else if (right_val
.is_reg () && left_val
.is_disp ())
11812 return base_reg_disp (right_val
.reg (), right_val
.disp ()
11813 + left_val
.disp () + disp
);
11815 return base_reg_disp (base_reg
, disp
);
11818 else if (CONST_INT_P (x
))
11819 return base_reg_disp (NULL
, disp
+ INTVAL (x
));
11821 /* Didn't find anything useful. */
11822 return base_reg_disp (base_reg
, disp
);
11825 /* Given an insn and a memory operand, try to find an equivalent GBR
11826 based memory address and return the corresponding new memory address.
11827 Return NULL_RTX if not found. */
11829 sh_find_equiv_gbr_addr (rtx_insn
* insn
, rtx mem
)
11831 if (!MEM_P (mem
) || gbr_address_mem (mem
, GET_MODE (mem
)))
11834 /* Leave post/pre inc/dec or any other side effect addresses alone. */
11835 if (side_effects_p (XEXP (mem
, 0)))
11838 /* When not optimizing there might be no dataflow available. */
11842 base_reg_disp gbr_disp
= sh_find_base_reg_disp (insn
, XEXP (mem
, 0));
11844 if (gbr_disp
.is_reg () && REGNO (gbr_disp
.reg ()) == GBR_REG
)
11846 /* If GBR is marked as call clobbered we bail out if we see a call.
11847 FIXME: Actually should check if this mem refers to the gbr value
11848 before or after the call. If there is a store_gbr preceeding this
11849 mem, it's safe to use GBR for this mem.
11851 If GBR is not marked as call clobbered, but there is some other
11852 def than a call, it's probably a load_gbr upon which we also
11853 bail out to be on the safe side.
11854 FIXME: Should check if we have a use-after-def case, such as
11855 the call case above. */
11856 for (df_ref d
= DF_REG_DEF_CHAIN (GBR_REG
); d
!= NULL
;
11857 d
= DF_REF_NEXT_REG (d
))
11859 if (CALL_P (DF_REF_INSN (d
)))
11861 if (REGNO_REG_SET_P (regs_invalidated_by_call_regset
, GBR_REG
))
11870 rtx disp
= GEN_INT (gbr_disp
.disp ());
11871 if (gbr_displacement (disp
, GET_MODE (mem
)))
11872 return gen_rtx_PLUS (SImode
, gen_rtx_REG (SImode
, GBR_REG
), disp
);
11878 /*------------------------------------------------------------------------------
11879 Manual insn combine support code.
11882 /* Return true if the specified insn contains any UNSPECs or
11883 UNSPEC_VOLATILEs. */
11885 sh_unspec_insn_p (rtx x
)
11887 subrtx_iterator::array_type array
;
11888 FOR_EACH_SUBRTX (i
, array
, x
, ALL
)
11890 && (GET_CODE (*i
) == UNSPEC
|| GET_CODE (*i
) == UNSPEC_VOLATILE
))
11896 /* Return true if the register operands of the specified insn are modified
11897 between the specified from and to insns (exclusive of those two). */
11899 sh_insn_operands_modified_between_p (rtx_insn
* operands_insn
,
11900 const rtx_insn
* from
,
11901 const rtx_insn
* to
)
11903 /* FIXME: Return true for multiple sets for now. */
11904 rtx s
= single_set (operands_insn
);
11908 subrtx_iterator::array_type array
;
11909 FOR_EACH_SUBRTX (i
, array
, SET_SRC (s
), ALL
)
11911 ((REG_P (*i
) || SUBREG_P (*i
)) && reg_set_between_p (*i
, from
, to
)))
11917 /* Given an insn, determine whether it's a 'nott' insn, i.e. an insn that
11918 negates the T bit and stores the result in the T bit. */
11920 sh_is_nott_insn (const rtx_insn
* i
)
11922 return i
!= NULL
&& GET_CODE (PATTERN (i
)) == SET
11923 && t_reg_operand (XEXP (PATTERN (i
), 0), VOIDmode
)
11924 && negt_reg_operand (XEXP (PATTERN (i
), 1), VOIDmode
);
11928 sh_movt_set_dest (const rtx_insn
* i
)
11933 const_rtx p
= PATTERN (i
);
11934 return GET_CODE (p
) == SET
11935 && arith_reg_dest (XEXP (p
, 0), SImode
)
11936 && t_reg_operand (XEXP (p
, 1), VOIDmode
) ? XEXP (p
, 0) : NULL
;
11939 /* Given an insn, check whether it's a 'movrt' kind of insn, i.e. an insn
11940 that stores the negated T bit in a register, and return the destination
11941 register rtx, or null. */
11943 sh_movrt_set_dest (const rtx_insn
* i
)
11948 const_rtx p
= PATTERN (i
);
11950 /* The negc movrt replacement is inside a parallel. */
11951 if (GET_CODE (p
) == PARALLEL
)
11952 p
= XVECEXP (p
, 0, 0);
11954 return GET_CODE (p
) == SET
11955 && arith_reg_dest (XEXP (p
, 0), SImode
)
11956 && negt_reg_operand (XEXP (p
, 1), VOIDmode
) ? XEXP (p
, 0) : NULL
;
11959 /* Given an insn and a reg number, tell whether the reg dies or is unused
11962 sh_reg_dead_or_unused_after_insn (const rtx_insn
* i
, int regno
)
11964 return find_regno_note (i
, REG_DEAD
, regno
) != NULL
11965 || find_regno_note (i
, REG_UNUSED
, regno
) != NULL
;
11968 /* Given an insn and a reg number, remove reg dead or reg unused notes to
11969 mark it as being used after the insn. */
11971 sh_remove_reg_dead_or_unused_notes (rtx_insn
* i
, int regno
)
11973 if (rtx n
= find_regno_note (i
, REG_DEAD
, regno
))
11974 remove_note (i
, n
);
11975 if (rtx n
= find_regno_note (i
, REG_UNUSED
, regno
))
11976 remove_note (i
, n
);
11979 /* Given an insn check if it contains any post/pre inc/dec mem operands and
11980 add the REG_INC notes accordingly.
11981 FIXME: This function is very similar to lra.c (add_auto_inc_notes).
11982 FIXME: This function is currently used by peephole2 patterns because
11983 the peephole2 pass does not preserve REG_INC notes. If the notes
11984 are dropped the following passes will do wrong things. */
11986 sh_check_add_incdec_notes (rtx_insn
* i
)
11988 struct for_each_inc_dec_clb
11990 static int func (rtx mem ATTRIBUTE_UNUSED
, rtx op ATTRIBUTE_UNUSED
,
11991 rtx dest
, rtx src ATTRIBUTE_UNUSED
,
11992 rtx srcoff ATTRIBUTE_UNUSED
, void* arg
)
11994 gcc_assert (REG_P (dest
));
11996 rtx_insn
* i
= (rtx_insn
*)arg
;
11997 if (find_regno_note (i
, REG_INC
, REGNO (dest
)) == NULL
)
11998 add_reg_note (i
, REG_INC
, dest
);
12004 for_each_inc_dec (PATTERN (i
), for_each_inc_dec_clb::func
, i
);
12008 /* Given a move insn destiation and a source, make sure that the move source
12009 operand is not a post-inc mem load with the same address reg as the
12010 destination. Returns the modified source operand with the post-inc removed
12013 sh_remove_overlapping_post_inc (rtx dst
, rtx src
)
12018 rtx addr
= XEXP (src
, 0);
12020 if (GET_CODE (addr
) == POST_INC
12021 && reg_overlap_mentioned_p (XEXP (addr
, 0), dst
))
12022 return replace_equiv_address (src
, XEXP (addr
, 0));
12024 gcc_assert (GET_CODE (addr
) != POST_MODIFY
);
12028 /* Emit a move insn that is safe to be used in peephole patterns. */
12030 sh_peephole_emit_move_insn (rtx dst
, rtx src
)
12032 return sh_check_add_incdec_notes (
12033 emit_move_insn (dst
, sh_remove_overlapping_post_inc (dst
, src
)));
12036 /* Given an op rtx and an insn, try to find out whether the result of the
12037 specified op consists only of logical operations on T bit stores. */
12039 sh_is_logical_t_store_expr (rtx op
, rtx_insn
* insn
)
12041 if (!logical_operator (op
, SImode
))
12044 rtx ops
[2] = { XEXP (op
, 0), XEXP (op
, 1) };
12045 int op_is_t_count
= 0;
12047 for (int i
= 0; i
< 2; ++i
)
12049 if (t_reg_operand (ops
[i
], VOIDmode
)
12050 || negt_reg_operand (ops
[i
], VOIDmode
))
12055 set_of_reg op_set
= sh_find_set_of_reg (ops
[i
], insn
,
12056 prev_nonnote_insn_bb
);
12057 if (op_set
.set_src
== NULL_RTX
)
12060 if (t_reg_operand (op_set
.set_src
, VOIDmode
)
12061 || negt_reg_operand (op_set
.set_src
, VOIDmode
)
12062 || sh_is_logical_t_store_expr (op_set
.set_src
, op_set
.insn
))
12067 return op_is_t_count
== 2;
12070 /* Given the operand that is extended in a sign/zero extend insn, and the
12071 insn, try to figure out whether the sign/zero extension can be replaced
12072 by a simple reg-reg copy. If so, the replacement reg rtx is returned,
12073 NULL_RTX otherwise. */
12075 sh_try_omit_signzero_extend (rtx extended_op
, rtx_insn
* insn
)
12077 if (REG_P (extended_op
))
12078 extended_op
= extended_op
;
12079 else if (GET_CODE (extended_op
) == SUBREG
&& REG_P (SUBREG_REG (extended_op
)))
12080 extended_op
= SUBREG_REG (extended_op
);
12084 /* Reg moves must be of the same mode. */
12085 if (GET_MODE (extended_op
) != SImode
)
12088 set_of_reg s
= sh_find_set_of_reg (extended_op
, insn
, prev_nonnote_insn_bb
);
12089 if (s
.set_src
== NULL_RTX
)
12092 if (t_reg_operand (s
.set_src
, VOIDmode
)
12093 || negt_reg_operand (s
.set_src
, VOIDmode
))
12094 return extended_op
;
12096 /* If the zero extended reg was formed by a logical operation, check the
12097 operands of the logical operation. If both originated from T bit
12098 stores the zero extension can be eliminated. */
12099 else if (sh_is_logical_t_store_expr (s
.set_src
, s
.insn
))
12100 return extended_op
;
12105 /* Given the current insn, which is assumed to be a movrt_negc insn, try to
12106 figure out whether it should be converted into a movt-xor sequence in
12107 the movrt_negc splitter.
12108 Returns true if insns have been modified and the splitter has succeeded. */
12110 sh_split_movrt_negc_to_movt_xor (rtx_insn
* curr_insn
, rtx operands
[])
12112 /* In cases such as
12117 we can replace the T bit clobbering negc with a movt-xor sequence and
12118 eliminate the redundant comparison.
12119 Because the xor insn depends on register allocation results, allow this
12120 only before reload. */
12121 if (!can_create_pseudo_p ())
12124 set_of_reg t_before_negc
= sh_find_set_of_reg (get_t_reg_rtx (), curr_insn
,
12125 prev_nonnote_insn_bb
);
12126 set_of_reg t_after_negc
= sh_find_set_of_reg (get_t_reg_rtx (), curr_insn
,
12127 next_nonnote_insn_bb
);
12129 if (t_before_negc
.set_rtx
!= NULL_RTX
&& t_after_negc
.set_rtx
!= NULL_RTX
12130 && rtx_equal_p (t_before_negc
.set_rtx
, t_after_negc
.set_rtx
)
12131 && !reg_used_between_p (get_t_reg_rtx (), curr_insn
, t_after_negc
.insn
)
12132 && !sh_insn_operands_modified_between_p (t_before_negc
.insn
,
12133 t_before_negc
.insn
,
12135 && !modified_between_p (get_t_reg_rtx (), curr_insn
, t_after_negc
.insn
)
12136 && !sh_unspec_insn_p (t_after_negc
.insn
)
12137 && !volatile_insn_p (PATTERN (t_after_negc
.insn
))
12138 && !side_effects_p (PATTERN (t_after_negc
.insn
))
12139 && !may_trap_or_fault_p (PATTERN (t_after_negc
.insn
)))
12141 emit_insn (gen_movrt_xor (operands
[0], get_t_reg_rtx ()));
12142 set_insn_deleted (t_after_negc
.insn
);
12149 /* Given a reg and the current insn, see if the value of the reg originated
12150 from a sign or zero extension and return the discovered information. */
12151 sh_extending_set_of_reg
12152 sh_find_extending_set_of_reg (rtx reg
, rtx_insn
* curr_insn
)
12155 return sh_extending_set_of_reg (curr_insn
);
12157 if (SUBREG_P (reg
))
12158 reg
= SUBREG_REG (reg
);
12161 return sh_extending_set_of_reg (curr_insn
);
12163 /* FIXME: Also search the predecessor basic blocks. It seems that checking
12164 only the adjacent predecessor blocks would cover most of the cases.
12165 Also try to look through the first extension that we hit. There are some
12166 cases, where a zero_extend is followed an (implicit) sign_extend, and it
12167 fails to see the sign_extend. */
12168 sh_extending_set_of_reg result
=
12169 sh_find_set_of_reg (reg
, curr_insn
, prev_nonnote_insn_bb
, true);
12171 if (result
.set_src
!= NULL
)
12173 if (GET_CODE (result
.set_src
) == SIGN_EXTEND
12174 || GET_CODE (result
.set_src
) == ZERO_EXTEND
)
12177 fprintf (dump_file
, "sh_find_extending_set_of_reg: reg %d is "
12178 "explicitly sign/zero extended in insn %d\n",
12179 REGNO (reg
), INSN_UID (result
.insn
));
12180 result
.from_mode
= GET_MODE (XEXP (result
.set_src
, 0));
12181 result
.ext_code
= GET_CODE (result
.set_src
);
12183 else if (MEM_P (result
.set_src
)
12184 && (GET_MODE (result
.set_src
) == QImode
12185 || GET_MODE (result
.set_src
) == HImode
)
12186 && !sh_unspec_insn_p (result
.insn
))
12188 /* On SH QIHImode memory loads always sign extend. However, in
12189 some cases where it seems that the higher bits are not
12190 interesting, the loads will not be expanded as sign extending
12191 insns, but as QIHImode loads into QIHImode regs. We report that
12192 the reg has been sign extended by the mem load. When it is used
12193 as such, we must convert the mem load into a sign extending insn,
12194 see also sh_extending_set_of_reg::use_as_extended_reg. */
12196 fprintf (dump_file
, "sh_find_extending_set_of_reg: reg %d is "
12197 "implicitly sign extended in insn %d\n",
12198 REGNO (reg
), INSN_UID (result
.insn
));
12199 result
.from_mode
= GET_MODE (result
.set_src
);
12200 result
.ext_code
= SIGN_EXTEND
;
12207 /* Given a reg that is known to be sign or zero extended at some insn,
12208 take the appropriate measures so that the extended value can be used as
12209 a reg at the specified insn and return the resulting reg rtx. */
12211 sh_extending_set_of_reg::use_as_extended_reg (rtx_insn
* use_at_insn
) const
12213 gcc_assert (insn
!= NULL
&& set_src
!= NULL
&& set_rtx
!= NULL
);
12214 gcc_assert (ext_code
== SIGN_EXTEND
|| ext_code
== ZERO_EXTEND
);
12215 gcc_assert (from_mode
== QImode
|| from_mode
== HImode
);
12217 if (MEM_P (set_src
) && ext_code
== SIGN_EXTEND
)
12220 fprintf (dump_file
,
12221 "use_as_extended_reg: converting non-extending mem load in "
12222 "insn %d into sign-extending load\n", INSN_UID (insn
));
12224 rtx r
= gen_reg_rtx (SImode
);
12226 if (from_mode
== QImode
)
12227 i0
= emit_insn_after (gen_extendqisi2 (r
, set_src
), insn
);
12228 else if (from_mode
== HImode
)
12229 i0
= emit_insn_after (gen_extendhisi2 (r
, set_src
), insn
);
12231 gcc_unreachable ();
12234 gen_move_insn (XEXP (set_rtx
, 0),
12235 gen_lowpart (GET_MODE (set_src
), r
)), i0
);
12236 set_insn_deleted (insn
);
12241 rtx extension_dst
= XEXP (set_rtx
, 0);
12242 if (GET_MODE (extension_dst
) != SImode
)
12243 extension_dst
= simplify_gen_subreg (SImode
, extension_dst
,
12244 GET_MODE (extension_dst
), 0);
12245 if (modified_between_p (extension_dst
, insn
, use_at_insn
))
12248 fprintf (dump_file
,
12249 "use_as_extended_reg: dest reg %d of extending insn %d is "
12250 "modified, inserting a reg-reg copy\n",
12251 REGNO (extension_dst
), INSN_UID (insn
));
12253 rtx r
= gen_reg_rtx (SImode
);
12254 emit_insn_after (gen_move_insn (r
, extension_dst
), insn
);
12259 sh_remove_reg_dead_or_unused_notes (insn
, REGNO (extension_dst
));
12260 return extension_dst
;
12266 sh_extending_set_of_reg::can_use_as_unextended_reg (void) const
12268 if ((ext_code
== SIGN_EXTEND
|| ext_code
== ZERO_EXTEND
)
12269 && (from_mode
== QImode
|| from_mode
== HImode
)
12270 && set_src
!= NULL
)
12271 return arith_reg_operand (XEXP (set_src
, 0), from_mode
);
12277 sh_extending_set_of_reg::use_as_unextended_reg (rtx_insn
* use_at_insn
) const
12279 gcc_assert (can_use_as_unextended_reg ());
12281 rtx r
= XEXP (set_src
, 0);
12282 rtx r0
= simplify_gen_subreg (SImode
, r
, from_mode
, 0);
12284 if (modified_between_p (r
, insn
, use_at_insn
))
12286 rtx r1
= gen_reg_rtx (SImode
);
12287 emit_insn_after (gen_move_insn (r1
, r0
), insn
);
12292 sh_remove_reg_dead_or_unused_notes (insn
, SUBREG_P (r
)
12293 ? REGNO (SUBREG_REG (r
))
12299 /* Given the current insn, which is assumed to be the *tst<mode>_t_subregs insn,
12300 perform the necessary checks on the operands and split it accordingly. */
12302 sh_split_tst_subregs (rtx_insn
* curr_insn
, machine_mode subreg_mode
,
12303 int subreg_offset
, rtx operands
[])
12305 gcc_assert (subreg_mode
== QImode
|| subreg_mode
== HImode
);
12307 sh_extending_set_of_reg eop0
= sh_find_extending_set_of_reg (operands
[0],
12309 sh_extending_set_of_reg eop1
= sh_find_extending_set_of_reg (operands
[1],
12312 /* If one of the operands is known to be zero extended, that's already
12313 sufficient to mask out the unwanted high bits. */
12314 if (eop0
.ext_code
== ZERO_EXTEND
&& eop0
.from_mode
== subreg_mode
)
12316 emit_insn (gen_tstsi_t (eop0
.use_as_extended_reg (curr_insn
),
12320 if (eop1
.ext_code
== ZERO_EXTEND
&& eop1
.from_mode
== subreg_mode
)
12322 emit_insn (gen_tstsi_t (operands
[0],
12323 eop1
.use_as_extended_reg (curr_insn
)));
12327 /* None of the operands seem to be zero extended.
12328 If both are sign extended it's OK, too. */
12329 if (eop0
.ext_code
== SIGN_EXTEND
&& eop1
.ext_code
== SIGN_EXTEND
12330 && eop0
.from_mode
== subreg_mode
&& eop1
.from_mode
== subreg_mode
)
12332 emit_insn (gen_tstsi_t (eop0
.use_as_extended_reg (curr_insn
),
12333 eop1
.use_as_extended_reg (curr_insn
)));
12337 /* Otherwise we have to insert a zero extension on one of the operands to
12338 mask out the unwanted high bits.
12339 Prefer the operand that has no known extension. */
12340 if (eop0
.ext_code
!= UNKNOWN
&& eop1
.ext_code
== UNKNOWN
)
12341 std::swap (operands
[0], operands
[1]);
12343 rtx tmp0
= gen_reg_rtx (SImode
);
12344 rtx tmp1
= simplify_gen_subreg (subreg_mode
, operands
[0],
12345 GET_MODE (operands
[0]), subreg_offset
);
12346 emit_insn (subreg_mode
== QImode
12347 ? gen_zero_extendqisi2 (tmp0
, tmp1
)
12348 : gen_zero_extendhisi2 (tmp0
, tmp1
));
12349 emit_insn (gen_tstsi_t (tmp0
, operands
[1]));
12352 /* A helper class to increment/decrement a counter variable each time a
12353 function is entered/left. */
12354 class scope_counter
12357 scope_counter (int& counter
) : m_counter (counter
) { ++m_counter
; }
12359 ~scope_counter (void)
12362 gcc_assert (m_counter
>= 0);
12365 int count (void) const { return m_counter
; }
12371 /* Given an rtx x, determine whether the expression can be used to create
12372 an insn that calulates x and stores the result in the T bit.
12373 This is used by the 'treg_set_expr' predicate to construct insns sequences
12374 where T bit results are fed into other insns, such as addc, subc, negc
12377 FIXME: The patterns that expand 'treg_set_expr' operands tend to
12378 distinguish between 'positive' and 'negative' forms. For now this has to
12379 be done in the preparation code. We could also introduce
12380 'pos_treg_set_expr' and 'neg_treg_set_expr' predicates for that and write
12381 two different patterns for the 'postive' and 'negative' forms. However,
12382 the total amount of lines of code seems to be about the same and the
12383 '{pos|neg}_treg_set_expr' predicates would be more expensive, because the
12384 recog function would need to look inside the expression by temporarily
12386 static int sh_recog_treg_set_expr_reent_count
= 0;
12389 sh_recog_treg_set_expr (rtx op
, machine_mode mode
)
12391 scope_counter
recursion (sh_recog_treg_set_expr_reent_count
);
12393 /* Limit the recursion count to avoid nested expressions which we can't
12394 resolve to a single treg set insn. */
12395 if (recursion
.count () > 1)
12398 /* Early accept known possible operands before doing recog. */
12399 if (op
== const0_rtx
|| op
== const1_rtx
|| t_reg_operand (op
, mode
)
12400 || negt_reg_operand (op
, mode
))
12403 /* Early reject impossible operands before doing recog.
12404 There are some (set ((t) (subreg ...))) patterns, but we must be careful
12405 not to allow any invalid reg-reg or mem-reg moves, or else other passes
12406 such as lower-subreg will bail out. Some insns such as SH4A movua are
12407 done with UNSPEC, so must reject those, too, or else it would result
12408 in an invalid reg -> treg move. */
12409 if (CONST_INT_P (op
) || register_operand (op
, mode
)
12410 || memory_operand (op
, mode
) || sh_unspec_insn_p (op
))
12413 if (!can_create_pseudo_p ())
12416 /* expand_debug_locations may call this to compute rtx costs at
12417 very early stage. In that case, don't make new insns here to
12418 avoid codegen differences with -g. */
12419 if (currently_expanding_to_rtl
)
12422 /* We are going to invoke recog in a re-entrant way and thus
12423 have to capture its current state and restore it afterwards. */
12424 recog_data_d prev_recog_data
= recog_data
;
12426 rtx_insn
* i
= make_insn_raw (gen_rtx_SET (get_t_reg_rtx (), op
));
12427 SET_PREV_INSN (i
) = NULL
;
12428 SET_NEXT_INSN (i
) = NULL
;
12430 /* If the comparison op doesn't have a result mode, set it to SImode. */
12431 machine_mode prev_op_mode
= GET_MODE (op
);
12432 if (COMPARISON_P (op
) && prev_op_mode
== VOIDmode
)
12433 PUT_MODE (op
, SImode
);
12435 int result
= recog (PATTERN (i
), i
, 0);
12437 /* It seems there is no insn like that. Create a negated version and
12438 try again. If we hit a negated form, we'll allow that and append a
12439 nott sequence when splitting out the insns. Insns that do the split
12440 can then remove the trailing nott if they know how to deal with it. */
12441 if (result
< 0 && COMPARISON_P (op
))
12443 machine_mode cmp_mode
= GET_MODE (XEXP (op
, 0));
12444 if (cmp_mode
== VOIDmode
)
12445 cmp_mode
= GET_MODE (XEXP (op
, 1));
12447 rtx_code prev_code
= GET_CODE (op
);
12448 PUT_CODE (op
, reverse_condition (GET_CODE (op
)));
12449 result
= recog (PATTERN (i
), i
, 0);
12450 PUT_CODE (op
, prev_code
);
12453 PUT_MODE (op
, prev_op_mode
);
12454 recog_data
= prev_recog_data
;
12455 return result
>= 0;
12458 /* Returns true when recog of a 'treg_set_expr' is currently in progress.
12459 This can be used as a condition for insn/split patterns to allow certain
12460 T bit setting patters only to be matched as sub expressions of other
12463 sh_in_recog_treg_set_expr (void)
12465 return sh_recog_treg_set_expr_reent_count
> 0;
12468 /* Given an rtx x, which is assumed to be some expression that has been
12469 matched by the 'treg_set_expr' predicate before, split and emit the
12470 insns that are necessary to calculate the expression and store the result
12472 The splitting is done recursively similar to 'try_split' in emit-rt.c.
12473 Unfortunately we can't use 'try_split' here directly, as it tries to invoke
12474 'delete_insn' which then causes the DF parts to bail out, because we
12475 currently are inside another gen_split* function and would invoke
12476 'try_split' in a reentrant way. */
12477 static std::pair
<rtx_insn
*, rtx_insn
*>
12478 sh_try_split_insn_simple (rtx_insn
* i
, rtx_insn
* curr_insn
, int n
= 0)
12482 fprintf (dump_file
, "sh_try_split_insn_simple n = %d i = \n", n
);
12483 print_rtl_single (dump_file
, i
);
12484 fprintf (dump_file
, "\n");
12487 rtx_insn
* seq
= split_insns (PATTERN (i
), curr_insn
);
12490 return std::make_pair (i
, i
);
12492 /* Avoid infinite splitter loops if any insn of the result matches
12493 the original pattern. */
12494 for (rtx_insn
* s
= seq
; s
!= NULL
; s
= NEXT_INSN (s
))
12495 if (INSN_P (s
) && rtx_equal_p (PATTERN (s
), PATTERN (i
)))
12496 return std::make_pair (i
, i
);
12498 unshare_all_rtl_in_chain (seq
);
12500 /* 'seq' is now a replacement for 'i'. Assuming that 'i' is an insn in
12501 a linked list, replace the single insn with the new insns. */
12502 rtx_insn
* seqlast
= seq
;
12503 while (NEXT_INSN (seqlast
) != NULL
)
12504 seqlast
= NEXT_INSN (seqlast
);
12506 if (rtx_insn
* iprev
= PREV_INSN (i
))
12507 SET_NEXT_INSN (iprev
) = seq
;
12508 if (rtx_insn
* inext
= NEXT_INSN (i
))
12509 SET_PREV_INSN (inext
) = seqlast
;
12511 SET_PREV_INSN (seq
) = PREV_INSN (i
);
12512 SET_NEXT_INSN (seqlast
) = NEXT_INSN (i
);
12514 SET_PREV_INSN (i
) = NULL
;
12515 SET_NEXT_INSN (i
) = NULL
;
12517 /* Recursively split all insns. */
12518 for (i
= seq
; ; i
= NEXT_INSN (i
))
12520 std::pair
<rtx_insn
*, rtx_insn
*> ii
=
12521 sh_try_split_insn_simple (i
, curr_insn
, n
+ 1);
12526 seqlast
= ii
.second
;
12532 return std::make_pair (seq
, seqlast
);
12536 sh_split_treg_set_expr (rtx x
, rtx_insn
* curr_insn
)
12538 if (t_reg_operand (x
, VOIDmode
))
12539 return sh_treg_insns ();
12541 scope_counter
in_treg_set_expr (sh_recog_treg_set_expr_reent_count
);
12543 rtx_insn
* i
= make_insn_raw (gen_rtx_SET (get_t_reg_rtx (), x
));
12544 SET_PREV_INSN (i
) = NULL
;
12545 SET_NEXT_INSN (i
) = NULL
;
12549 fprintf (dump_file
, "split_treg_set_expr insn:\n");
12550 print_rtl (dump_file
, i
);
12551 fprintf (dump_file
, "\n");
12554 /* If the insn is not found, we will try a negated form and append
12556 bool append_nott
= false;
12558 /* We are going to invoke recog/split_insns in a re-entrant way and thus
12559 have to capture its current state and restore it afterwards. */
12560 recog_data_d prev_recog_data
= recog_data
;
12562 if (negt_reg_operand (x
, GET_MODE (x
)))
12564 /* This is a normal movt followed by a nott. It will be converted
12565 into a movrt after initial expansion. */
12566 XEXP (PATTERN (i
), 1) = get_t_reg_rtx ();
12567 append_nott
= true;
12571 /* If the comparison op doesn't have a mode set, set it to SImode. */
12572 if (COMPARISON_P (x
) && GET_MODE (x
) == VOIDmode
)
12573 PUT_MODE (x
, SImode
);
12575 int insn_code
= recog (PATTERN (i
), i
, 0);
12577 if (insn_code
< 0 && COMPARISON_P (x
))
12579 machine_mode cmp_mode
= GET_MODE (XEXP (x
, 0));
12580 if (cmp_mode
== VOIDmode
)
12581 cmp_mode
= GET_MODE (XEXP (x
, 1));
12583 PUT_CODE (x
, reverse_condition (GET_CODE (x
)));
12584 insn_code
= recog (PATTERN (i
), i
, 0);
12585 append_nott
= true;
12588 gcc_assert (insn_code
>= 0);
12591 /* Try to recursively split the insn. Some insns might refuse to split
12592 any further while we are in the treg_set_expr splitting phase. They
12593 will be emitted as part of the outer insn and then split again. */
12594 std::pair
<rtx_insn
*, rtx_insn
*> insnlist
=
12595 sh_try_split_insn_simple (i
, curr_insn
);
12597 /* Restore recog state. */
12598 recog_data
= prev_recog_data
;
12600 rtx_insn
* nott_insn
= sh_is_nott_insn (insnlist
.second
)
12605 fprintf (dump_file
, "split_treg_set_expr insnlist:\n");
12606 print_rtl (dump_file
, insnlist
.first
);
12607 fprintf (dump_file
, "\n");
12609 if (nott_insn
!= NULL
)
12610 fprintf (dump_file
, "trailing nott insn %d\n", INSN_UID (nott_insn
));
12613 emit_insn (insnlist
.first
);
12615 if (nott_insn
!= NULL
&& append_nott
)
12618 fprintf (dump_file
, "removing trailing nott\n");
12619 remove_insn (nott_insn
);
12621 append_nott
= false;
12625 nott_insn
= emit_insn (gen_nott (get_t_reg_rtx ()));
12627 rtx_insn
* first_insn
= get_insns ();
12631 fprintf (dump_file
, "resulting insns:\n");
12632 print_rtl (dump_file
, first_insn
);
12633 fprintf (dump_file
, "\n");
12636 return sh_treg_insns (first_insn
, nott_insn
);
12639 /*------------------------------------------------------------------------------
12640 Mode switching support code.
12644 sh_emit_mode_set (int entity ATTRIBUTE_UNUSED
, int mode
,
12645 int prev_mode
, HARD_REG_SET regs_live ATTRIBUTE_UNUSED
)
12647 if ((TARGET_SH4A_FP
|| TARGET_SH4_300
)
12648 && prev_mode
!= FP_MODE_NONE
&& prev_mode
!= mode
)
12650 emit_insn (gen_toggle_pr ());
12652 emit_insn (gen_toggle_sz ());
12654 else if (mode
!= FP_MODE_NONE
)
12656 rtx tmp
= gen_reg_rtx (SImode
);
12657 emit_insn (gen_sts_fpscr (tmp
));
12660 const unsigned HOST_WIDE_INT fpbits
=
12661 TARGET_FMOVD
? (FPSCR_PR
| FPSCR_SZ
) : FPSCR_PR
;
12663 if (prev_mode
!= FP_MODE_NONE
&& prev_mode
!= mode
)
12664 i
= gen_xorsi3 (tmp
, tmp
, force_reg (SImode
, GEN_INT (fpbits
)));
12665 else if (mode
== FP_MODE_SINGLE
)
12666 i
= gen_andsi3 (tmp
, tmp
, force_reg (SImode
, GEN_INT (~fpbits
)));
12667 else if (mode
== FP_MODE_DOUBLE
)
12668 i
= gen_iorsi3 (tmp
, tmp
, force_reg (SImode
, GEN_INT (fpbits
)));
12670 gcc_unreachable ();
12673 emit_insn (gen_lds_fpscr (tmp
));
12678 sh_mode_needed (int entity ATTRIBUTE_UNUSED
, rtx_insn
*insn
)
12680 return recog_memoized (insn
) >= 0 ? get_attr_fp_mode (insn
) : FP_MODE_NONE
;
12684 sh_mode_after (int entity ATTRIBUTE_UNUSED
, int mode
, rtx_insn
*insn
)
12686 if (TARGET_HITACHI
&& recog_memoized (insn
) >= 0 &&
12687 get_attr_fp_set (insn
) != FP_SET_NONE
)
12688 return (int) get_attr_fp_set (insn
);
12694 sh_mode_entry (int entity ATTRIBUTE_UNUSED
)
12696 return NORMAL_MODE (entity
);
12700 sh_mode_exit (int entity ATTRIBUTE_UNUSED
)
12702 return sh_cfun_attr_renesas_p () ? FP_MODE_NONE
: NORMAL_MODE (entity
);
12706 sh_mode_priority (int entity ATTRIBUTE_UNUSED
, int n
)
12708 return ((TARGET_FPU_SINGLE
!= 0) ^ (n
) ? FP_MODE_SINGLE
: FP_MODE_DOUBLE
);
12711 /*------------------------------------------------------------------------------
12715 /* Return true if we use LRA instead of reload pass. */
12719 return sh_lra_flag
;
12722 /* Implement TARGET_USE_BY_PIECES_INFRASTRUCTURE_P. */
12725 sh_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size
,
12726 unsigned int align
,
12727 enum by_pieces_operation op
,
12732 case MOVE_BY_PIECES
:
12733 return move_by_pieces_ninsns (size
, align
, MOVE_MAX_PIECES
+ 1)
12734 < (!speed_p
? 2 : (align
>= 32) ? 16 : 2);
12735 case STORE_BY_PIECES
:
12736 case SET_BY_PIECES
:
12737 return move_by_pieces_ninsns (size
, align
, STORE_MAX_PIECES
+ 1)
12738 < (!speed_p
? 2 : (align
>= 32) ? 16 : 2);
12740 return default_use_by_pieces_infrastructure_p (size
, align
,
12746 sh_cannot_force_const_mem_p (machine_mode mode ATTRIBUTE_UNUSED
,
12747 rtx x ATTRIBUTE_UNUSED
)
12749 return TARGET_FDPIC
;
12752 /* Emit insns to load the function address from FUNCDESC (an FDPIC
12753 function descriptor) into r1 and the GOT address into r12,
12754 returning an rtx for r1. */
12757 sh_load_function_descriptor (rtx funcdesc
)
12759 rtx r1
= gen_rtx_REG (Pmode
, R1_REG
);
12760 rtx pic_reg
= gen_rtx_REG (Pmode
, PIC_REG
);
12761 rtx fnaddr
= gen_rtx_MEM (Pmode
, funcdesc
);
12762 rtx gotaddr
= gen_rtx_MEM (Pmode
, plus_constant (Pmode
, funcdesc
, 4));
12764 emit_move_insn (r1
, fnaddr
);
12765 /* The ABI requires the entry point address to be loaded first, so
12766 prevent the load from being moved after that of the GOT
12768 emit_insn (gen_blockage ());
12769 emit_move_insn (pic_reg
, gotaddr
);
12773 /* Return an rtx holding the initial value of the FDPIC register (the
12774 FDPIC pointer passed in from the caller). */
12777 sh_get_fdpic_reg_initial_val (void)
12779 return get_hard_reg_initial_val (Pmode
, PIC_REG
);