1 /* Machine description for AArch64 architecture.
2 Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
22 #ifndef GCC_AARCH64_PROTOS_H
23 #define GCC_AARCH64_PROTOS_H
27 /* SYMBOL_SMALL_ABSOLUTE: Generate symbol accesses through
28 high and lo relocs that calculate the base address using a PC
30 So to get the address of foo, we generate
34 To load or store something to foo, we could use the corresponding
35 load store variants that generate an
36 ldr x0, [x0,:lo12:foo]
38 str x1, [x0, :lo12:foo]
40 This corresponds to the small code model of the compiler.
42 SYMBOL_SMALL_GOT_4G: Similar to the one above but this
43 gives us the GOT entry of the symbol being referred to :
44 Thus calculating the GOT entry for foo is done using the
45 following sequence of instructions. The ADRP instruction
46 gets us to the page containing the GOT entry of the symbol
47 and the got_lo12 gets us the actual offset in it, together
48 the base and offset, we can address 4G size GOT table.
51 ldr x0, [x0, :gotoff_lo12:foo]
53 This corresponds to the small PIC model of the compiler.
55 SYMBOL_SMALL_GOT_28K: Similar to SYMBOL_SMALL_GOT_4G, but used for symbol
56 restricted within 28K GOT table size.
58 ldr reg, [gp, #:gotpage_lo15:sym]
60 This corresponds to -fpic model for small memory model of the compiler.
70 Each of these represents a thread-local symbol, and corresponds to the
71 thread local storage relocation operator for the symbol being referred to.
75 Generate symbol accesses as a PC relative address using a single
76 instruction. To compute the address of symbol foo, we generate:
82 Generate symbol accesses via the GOT using a single PC relative
83 instruction. To compute the address of symbol foo, we generate:
87 The value of foo can subsequently read using:
91 SYMBOL_FORCE_TO_MEM : Global variables are addressed using
92 constant pool. All variable addresses are spilled into constant
93 pools. The constant pools themselves are addressed using PC
94 relative accesses. This only works for the large code model.
96 enum aarch64_symbol_type
98 SYMBOL_SMALL_ABSOLUTE
,
102 SYMBOL_SMALL_TLSDESC
,
104 SYMBOL_TINY_ABSOLUTE
,
114 /* Classifies the type of an address query.
117 Query what is valid for an "m" constraint and a memory_operand
118 (the rules are the same for both).
121 Query what is valid for a load/store pair.
124 Query what is valid for a load/store pair, but narrow the incoming mode
125 for address checking. This is used for the store_pair_lanes patterns.
128 Query what is valid for at least one memory constraint, which may
129 allow things that "m" doesn't. For example, the SVE LDR and STR
130 addressing modes allow a wider range of immediate offsets than "m"
132 enum aarch64_addr_query_type
{
135 ADDR_QUERY_LDP_STP_N
,
139 /* A set of tuning parameters contains references to size and time
140 cost models and vectors for address cost calculations, register
141 move costs and memory move costs. */
143 /* Scaled addressing modes can vary cost depending on the mode of the
144 value to be loaded/stored. QImode values cannot use scaled
147 struct scale_addr_mode_cost
155 /* Additional cost for addresses. */
156 struct cpu_addrcost_table
158 const struct scale_addr_mode_cost addr_scale_costs
;
159 const int pre_modify
;
160 const int post_modify
;
161 const int register_offset
;
162 const int register_sextend
;
163 const int register_zextend
;
164 const int imm_offset
;
167 /* Additional costs for register copies. Cost is for one register. */
168 struct cpu_regmove_cost
176 /* Cost for vector insn classes. */
177 struct cpu_vector_cost
179 const int scalar_int_stmt_cost
; /* Cost of any int scalar operation,
180 excluding load and store. */
181 const int scalar_fp_stmt_cost
; /* Cost of any fp scalar operation,
182 excluding load and store. */
183 const int scalar_load_cost
; /* Cost of scalar load. */
184 const int scalar_store_cost
; /* Cost of scalar store. */
185 const int vec_int_stmt_cost
; /* Cost of any int vector operation,
186 excluding load, store, permute,
188 scalar-to-vector operation. */
189 const int vec_fp_stmt_cost
; /* Cost of any fp vector operation,
190 excluding load, store, permute,
192 scalar-to-vector operation. */
193 const int vec_permute_cost
; /* Cost of permute operation. */
194 const int vec_to_scalar_cost
; /* Cost of vec-to-scalar operation. */
195 const int scalar_to_vec_cost
; /* Cost of scalar-to-vector
197 const int vec_align_load_cost
; /* Cost of aligned vector load. */
198 const int vec_unalign_load_cost
; /* Cost of unaligned vector load. */
199 const int vec_unalign_store_cost
; /* Cost of unaligned vector store. */
200 const int vec_store_cost
; /* Cost of vector store. */
201 const int cond_taken_branch_cost
; /* Cost of taken branch. */
202 const int cond_not_taken_branch_cost
; /* Cost of not taken branch. */
206 struct cpu_branch_cost
208 const int predictable
; /* Predictable branch or optimizing for size. */
209 const int unpredictable
; /* Unpredictable branch or optimizing for speed. */
212 /* Control approximate alternatives to certain FP operators. */
213 #define AARCH64_APPROX_MODE(MODE) \
214 ((MIN_MODE_FLOAT <= (MODE) && (MODE) <= MAX_MODE_FLOAT) \
215 ? (1 << ((MODE) - MIN_MODE_FLOAT)) \
216 : (MIN_MODE_VECTOR_FLOAT <= (MODE) && (MODE) <= MAX_MODE_VECTOR_FLOAT) \
217 ? (1 << ((MODE) - MIN_MODE_VECTOR_FLOAT \
218 + MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) \
220 #define AARCH64_APPROX_NONE (0)
221 #define AARCH64_APPROX_ALL (-1)
223 /* Allowed modes for approximations. */
224 struct cpu_approx_modes
226 const unsigned int division
; /* Division. */
227 const unsigned int sqrt
; /* Square root. */
228 const unsigned int recip_sqrt
; /* Reciprocal square root. */
231 /* Cache prefetch settings for prefetch-loop-arrays. */
232 struct cpu_prefetch_tune
235 const int l1_cache_size
;
236 const int l1_cache_line_size
;
237 const int l2_cache_size
;
238 /* Whether software prefetch hints should be issued for non-constant
240 const bool prefetch_dynamic_strides
;
241 /* The minimum constant stride beyond which we should use prefetch
243 const int minimum_stride
;
244 const int default_opt_level
;
249 const struct cpu_cost_table
*insn_extra_cost
;
250 const struct cpu_addrcost_table
*addr_cost
;
251 const struct cpu_regmove_cost
*regmove_cost
;
252 const struct cpu_vector_cost
*vec_costs
;
253 const struct cpu_branch_cost
*branch_costs
;
254 const struct cpu_approx_modes
*approx_modes
;
255 /* Width of the SVE registers or SVE_NOT_IMPLEMENTED if not applicable.
256 Only used for tuning decisions, does not disable VLA
258 enum aarch64_sve_vector_bits_enum sve_width
;
261 unsigned int fusible_ops
;
262 const char *function_align
;
263 const char *jump_align
;
264 const char *loop_align
;
265 int int_reassoc_width
;
266 int fp_reassoc_width
;
267 int vec_reassoc_width
;
268 int min_div_recip_mul_sf
;
269 int min_div_recip_mul_df
;
270 /* Value for aarch64_case_values_threshold; or 0 for the default. */
271 unsigned int max_case_values
;
272 /* An enum specifying how to take into account CPU autoprefetch capabilities
273 during instruction scheduling:
274 - AUTOPREFETCHER_OFF: Do not take autoprefetch capabilities into account.
275 - AUTOPREFETCHER_WEAK: Attempt to sort sequences of loads/store in order of
276 offsets but allow the pipeline hazard recognizer to alter that order to
277 maximize multi-issue opportunities.
278 - AUTOPREFETCHER_STRONG: Attempt to sort sequences of loads/store in order of
279 offsets and prefer this even if it restricts multi-issue opportunities. */
281 enum aarch64_autoprefetch_model
285 AUTOPREFETCHER_STRONG
286 } autoprefetcher_model
;
288 unsigned int extra_tuning_flags
;
290 /* Place prefetch struct pointer at the end to enable type checking
291 errors when tune_params misses elements (e.g., from erroneous merges). */
292 const struct cpu_prefetch_tune
*prefetch
;
295 /* Classifies an address.
298 A simple base register plus immediate offset.
301 A base register indexed by immediate offset with writeback.
304 A base register indexed by (optionally scaled) register.
307 A base register indexed by (optionally scaled) zero-extended register.
310 A base register indexed by (optionally scaled) sign-extended register.
313 A LO_SUM rtx with a base register and "LO12" symbol relocation.
316 A constant symbolic address, in pc-relative literal pool. */
318 enum aarch64_address_type
{
328 /* Address information. */
329 struct aarch64_address_info
{
330 enum aarch64_address_type type
;
333 poly_int64 const_offset
;
335 enum aarch64_symbol_type symbol_type
;
338 #define AARCH64_FUSION_PAIR(x, name) \
339 AARCH64_FUSE_##name##_index,
340 /* Supported fusion operations. */
341 enum aarch64_fusion_pairs_index
343 #include "aarch64-fusion-pairs.def"
344 AARCH64_FUSE_index_END
347 #define AARCH64_FUSION_PAIR(x, name) \
348 AARCH64_FUSE_##name = (1u << AARCH64_FUSE_##name##_index),
349 /* Supported fusion operations. */
350 enum aarch64_fusion_pairs
352 AARCH64_FUSE_NOTHING
= 0,
353 #include "aarch64-fusion-pairs.def"
354 AARCH64_FUSE_ALL
= (1u << AARCH64_FUSE_index_END
) - 1
357 #define AARCH64_EXTRA_TUNING_OPTION(x, name) \
358 AARCH64_EXTRA_TUNE_##name##_index,
359 /* Supported tuning flags indexes. */
360 enum aarch64_extra_tuning_flags_index
362 #include "aarch64-tuning-flags.def"
363 AARCH64_EXTRA_TUNE_index_END
367 #define AARCH64_EXTRA_TUNING_OPTION(x, name) \
368 AARCH64_EXTRA_TUNE_##name = (1u << AARCH64_EXTRA_TUNE_##name##_index),
369 /* Supported tuning flags. */
370 enum aarch64_extra_tuning_flags
372 AARCH64_EXTRA_TUNE_NONE
= 0,
373 #include "aarch64-tuning-flags.def"
374 AARCH64_EXTRA_TUNE_ALL
= (1u << AARCH64_EXTRA_TUNE_index_END
) - 1
377 /* Enum describing the various ways that the
378 aarch64_parse_{arch,tune,cpu,extension} functions can fail.
379 This way their callers can choose what kind of error to give. */
381 enum aarch64_parse_opt_result
383 AARCH64_PARSE_OK
, /* Parsing was successful. */
384 AARCH64_PARSE_MISSING_ARG
, /* Missing argument. */
385 AARCH64_PARSE_INVALID_FEATURE
, /* Invalid feature modifier. */
386 AARCH64_PARSE_INVALID_ARG
/* Invalid arch, tune, cpu arg. */
389 /* Enum to distinguish which type of check is to be done in
390 aarch64_simd_valid_immediate. This is used as a bitmask where
391 AARCH64_CHECK_MOV has both bits set. Thus AARCH64_CHECK_MOV will
392 perform all checks. Adding new types would require changes accordingly. */
393 enum simd_immediate_check
{
394 AARCH64_CHECK_ORR
= 1 << 0,
395 AARCH64_CHECK_BIC
= 1 << 1,
396 AARCH64_CHECK_MOV
= AARCH64_CHECK_ORR
| AARCH64_CHECK_BIC
399 /* The key type that -msign-return-address should use. */
400 enum aarch64_key_type
{
405 extern enum aarch64_key_type aarch64_ra_sign_key
;
407 extern struct tune_params aarch64_tune_params
;
409 /* The available SVE predicate patterns, known in the ACLE as "svpattern". */
410 #define AARCH64_FOR_SVPATTERN(T) \
423 T (VL128, vl128, 12) \
424 T (VL256, vl256, 13) \
429 #define AARCH64_SVENUM(UPPER, LOWER, VALUE) AARCH64_SV_##UPPER = VALUE,
430 enum aarch64_svpattern
{
431 AARCH64_FOR_SVPATTERN (AARCH64_SVENUM
)
432 AARCH64_NUM_SVPATTERNS
434 #undef AARCH64_SVENUM
436 /* It's convenient to divide the built-in function codes into groups,
437 rather than having everything in a single enum. This type enumerates
439 enum aarch64_builtin_class
441 AARCH64_BUILTIN_GENERAL
444 /* Built-in function codes are structured so that the low
445 AARCH64_BUILTIN_SHIFT bits contain the aarch64_builtin_class
446 and the upper bits contain a group-specific subcode. */
447 const unsigned int AARCH64_BUILTIN_SHIFT
= 1;
449 /* Mask that selects the aarch64_builtin_class part of a function code. */
450 const unsigned int AARCH64_BUILTIN_CLASS
= (1 << AARCH64_BUILTIN_SHIFT
) - 1;
452 void aarch64_post_cfi_startproc (void);
453 poly_int64
aarch64_initial_elimination_offset (unsigned, unsigned);
454 int aarch64_get_condition_code (rtx
);
455 bool aarch64_address_valid_for_prefetch_p (rtx
, bool);
456 bool aarch64_bitmask_imm (HOST_WIDE_INT val
, machine_mode
);
457 unsigned HOST_WIDE_INT
aarch64_and_split_imm1 (HOST_WIDE_INT val_in
);
458 unsigned HOST_WIDE_INT
aarch64_and_split_imm2 (HOST_WIDE_INT val_in
);
459 bool aarch64_and_bitmask_imm (unsigned HOST_WIDE_INT val_in
, machine_mode mode
);
460 int aarch64_branch_cost (bool, bool);
461 enum aarch64_symbol_type
aarch64_classify_symbolic_expression (rtx
);
462 opt_machine_mode
aarch64_vq_mode (scalar_mode
);
463 opt_machine_mode
aarch64_full_sve_mode (scalar_mode
);
464 bool aarch64_can_const_movi_rtx_p (rtx x
, machine_mode mode
);
465 bool aarch64_const_vec_all_same_int_p (rtx
, HOST_WIDE_INT
);
466 bool aarch64_const_vec_all_same_in_range_p (rtx
, HOST_WIDE_INT
,
468 bool aarch64_constant_address_p (rtx
);
469 bool aarch64_emit_approx_div (rtx
, rtx
, rtx
);
470 bool aarch64_emit_approx_sqrt (rtx
, rtx
, bool);
471 void aarch64_expand_call (rtx
, rtx
, rtx
, bool);
472 bool aarch64_expand_cpymem (rtx
*);
473 bool aarch64_float_const_zero_rtx_p (rtx
);
474 bool aarch64_float_const_rtx_p (rtx
);
475 bool aarch64_function_arg_regno_p (unsigned);
476 bool aarch64_fusion_enabled_p (enum aarch64_fusion_pairs
);
477 bool aarch64_gen_cpymemqi (rtx
*);
478 bool aarch64_is_extend_from_extract (scalar_int_mode
, rtx
, rtx
);
479 bool aarch64_is_long_call_p (rtx
);
480 bool aarch64_is_noplt_call_p (rtx
);
481 bool aarch64_label_mentioned_p (rtx
);
482 void aarch64_declare_function_name (FILE *, const char*, tree
);
483 void aarch64_asm_output_alias (FILE *, const tree
, const tree
);
484 void aarch64_asm_output_external (FILE *, tree
, const char*);
485 bool aarch64_legitimate_pic_operand_p (rtx
);
486 bool aarch64_mask_and_shift_for_ubfiz_p (scalar_int_mode
, rtx
, rtx
);
487 bool aarch64_masks_and_shift_for_bfi_p (scalar_int_mode
, unsigned HOST_WIDE_INT
,
488 unsigned HOST_WIDE_INT
,
489 unsigned HOST_WIDE_INT
);
490 bool aarch64_zero_extend_const_eq (machine_mode
, rtx
, machine_mode
, rtx
);
491 bool aarch64_move_imm (HOST_WIDE_INT
, machine_mode
);
492 opt_machine_mode
aarch64_sve_pred_mode (unsigned int);
493 bool aarch64_sve_mode_p (machine_mode
);
494 bool aarch64_sve_cnt_immediate_p (rtx
);
495 bool aarch64_sve_scalar_inc_dec_immediate_p (rtx
);
496 bool aarch64_sve_addvl_addpl_immediate_p (rtx
);
497 bool aarch64_sve_vector_inc_dec_immediate_p (rtx
);
498 int aarch64_add_offset_temporaries (rtx
);
499 void aarch64_split_add_offset (scalar_int_mode
, rtx
, rtx
, rtx
, rtx
, rtx
);
500 bool aarch64_mov_operand_p (rtx
, machine_mode
);
501 rtx
aarch64_reverse_mask (machine_mode
, unsigned int);
502 bool aarch64_offset_7bit_signed_scaled_p (machine_mode
, poly_int64
);
503 bool aarch64_offset_9bit_signed_unscaled_p (machine_mode
, poly_int64
);
504 char *aarch64_output_sve_cnt_immediate (const char *, const char *, rtx
);
505 char *aarch64_output_sve_scalar_inc_dec (rtx
);
506 char *aarch64_output_sve_addvl_addpl (rtx
);
507 char *aarch64_output_sve_vector_inc_dec (const char *, rtx
);
508 char *aarch64_output_scalar_simd_mov_immediate (rtx
, scalar_int_mode
);
509 char *aarch64_output_simd_mov_immediate (rtx
, unsigned,
510 enum simd_immediate_check w
= AARCH64_CHECK_MOV
);
511 char *aarch64_output_sve_mov_immediate (rtx
);
512 bool aarch64_pad_reg_upward (machine_mode
, const_tree
, bool);
513 bool aarch64_regno_ok_for_base_p (int, bool);
514 bool aarch64_regno_ok_for_index_p (int, bool);
515 bool aarch64_reinterpret_float_as_int (rtx value
, unsigned HOST_WIDE_INT
*fail
);
516 bool aarch64_simd_check_vect_par_cnst_half (rtx op
, machine_mode mode
,
518 bool aarch64_simd_scalar_immediate_valid_for_move (rtx
, scalar_int_mode
);
519 bool aarch64_simd_shift_imm_p (rtx
, machine_mode
, bool);
520 bool aarch64_simd_valid_immediate (rtx
, struct simd_immediate_info
*,
521 enum simd_immediate_check w
= AARCH64_CHECK_MOV
);
522 rtx
aarch64_check_zero_based_sve_index_immediate (rtx
);
523 bool aarch64_sve_index_immediate_p (rtx
);
524 bool aarch64_sve_arith_immediate_p (rtx
, bool);
525 bool aarch64_sve_bitmask_immediate_p (rtx
);
526 bool aarch64_sve_dup_immediate_p (rtx
);
527 bool aarch64_sve_cmp_immediate_p (rtx
, bool);
528 bool aarch64_sve_float_arith_immediate_p (rtx
, bool);
529 bool aarch64_sve_float_mul_immediate_p (rtx
);
530 bool aarch64_split_dimode_const_store (rtx
, rtx
);
531 bool aarch64_symbolic_address_p (rtx
);
532 bool aarch64_uimm12_shift (HOST_WIDE_INT
);
533 bool aarch64_use_return_insn_p (void);
534 const char *aarch64_output_casesi (rtx
*);
536 unsigned int aarch64_tlsdesc_abi_id ();
537 enum aarch64_symbol_type
aarch64_classify_symbol (rtx
, HOST_WIDE_INT
);
538 enum aarch64_symbol_type
aarch64_classify_tls_symbol (rtx
);
539 enum reg_class
aarch64_regno_regclass (unsigned);
540 int aarch64_asm_preferred_eh_data_format (int, int);
541 int aarch64_fpconst_pow_of_2 (rtx
);
542 int aarch64_fpconst_pow2_recip (rtx
);
543 machine_mode
aarch64_hard_regno_caller_save_mode (unsigned, unsigned,
545 int aarch64_uxt_size (int, HOST_WIDE_INT
);
546 int aarch64_vec_fpconst_pow_of_2 (rtx
);
547 rtx
aarch64_eh_return_handler_rtx (void);
548 rtx
aarch64_mask_from_zextract_ops (rtx
, rtx
);
549 const char *aarch64_output_move_struct (rtx
*operands
);
550 rtx
aarch64_return_addr (int, rtx
);
551 rtx
aarch64_simd_gen_const_vector_dup (machine_mode
, HOST_WIDE_INT
);
552 bool aarch64_simd_mem_operand_p (rtx
);
553 bool aarch64_sve_ld1r_operand_p (rtx
);
554 bool aarch64_sve_ld1rq_operand_p (rtx
);
555 bool aarch64_sve_ldr_operand_p (rtx
);
556 bool aarch64_sve_struct_memory_operand_p (rtx
);
557 rtx
aarch64_simd_vect_par_cnst_half (machine_mode
, int, bool);
558 rtx
aarch64_gen_stepped_int_parallel (unsigned int, int, int);
559 bool aarch64_stepped_int_parallel_p (rtx
, int);
560 rtx
aarch64_tls_get_addr (void);
561 unsigned aarch64_dbx_register_number (unsigned);
562 unsigned aarch64_trampoline_size (void);
563 void aarch64_asm_output_labelref (FILE *, const char *);
564 void aarch64_cpu_cpp_builtins (cpp_reader
*);
565 const char * aarch64_gen_far_branch (rtx
*, int, const char *, const char *);
566 const char * aarch64_output_probe_stack_range (rtx
, rtx
);
567 const char * aarch64_output_probe_sve_stack_clash (rtx
, rtx
, rtx
, rtx
);
568 void aarch64_err_no_fpadvsimd (machine_mode
);
569 void aarch64_expand_epilogue (bool);
570 rtx
aarch64_ptrue_all (unsigned int);
571 void aarch64_expand_mov_immediate (rtx
, rtx
);
572 rtx
aarch64_ptrue_reg (machine_mode
);
573 rtx
aarch64_pfalse_reg (machine_mode
);
574 bool aarch64_sve_pred_dominates_p (rtx
*, rtx
);
575 bool aarch64_sve_same_pred_for_ptest_p (rtx
*, rtx
*);
576 void aarch64_emit_sve_pred_move (rtx
, rtx
, rtx
);
577 void aarch64_expand_sve_mem_move (rtx
, rtx
, machine_mode
);
578 bool aarch64_maybe_expand_sve_subreg_move (rtx
, rtx
);
579 void aarch64_split_sve_subreg_move (rtx
, rtx
, rtx
);
580 void aarch64_expand_prologue (void);
581 void aarch64_expand_vector_init (rtx
, rtx
);
582 void aarch64_sve_expand_vector_init (rtx
, rtx
);
583 void aarch64_init_cumulative_args (CUMULATIVE_ARGS
*, const_tree
, rtx
,
584 const_tree
, unsigned);
585 void aarch64_init_expanders (void);
586 void aarch64_init_simd_builtins (void);
587 void aarch64_emit_call_insn (rtx
);
588 void aarch64_register_pragmas (void);
589 void aarch64_relayout_simd_types (void);
590 void aarch64_reset_previous_fndecl (void);
591 bool aarch64_return_address_signing_enabled (void);
592 bool aarch64_bti_enabled (void);
593 void aarch64_save_restore_target_globals (tree
);
594 void aarch64_addti_scratch_regs (rtx
, rtx
, rtx
*,
598 void aarch64_subvti_scratch_regs (rtx
, rtx
, rtx
*,
600 rtx
*, rtx
*, rtx
*);
601 void aarch64_expand_subvti (rtx
, rtx
, rtx
,
602 rtx
, rtx
, rtx
, rtx
, bool);
605 /* Initialize builtins for SIMD intrinsics. */
606 void init_aarch64_simd_builtins (void);
608 void aarch64_simd_emit_reg_reg_move (rtx
*, machine_mode
, unsigned int);
610 /* Expand builtins for SIMD intrinsics. */
611 rtx
aarch64_simd_expand_builtin (int, tree
, rtx
);
613 void aarch64_simd_lane_bounds (rtx
, HOST_WIDE_INT
, HOST_WIDE_INT
, const_tree
);
614 rtx
aarch64_endian_lane_rtx (machine_mode
, unsigned int);
616 void aarch64_split_128bit_move (rtx
, rtx
);
618 bool aarch64_split_128bit_move_p (rtx
, rtx
);
620 bool aarch64_mov128_immediate (rtx
);
622 void aarch64_split_simd_combine (rtx
, rtx
, rtx
);
624 void aarch64_split_simd_move (rtx
, rtx
);
626 /* Check for a legitimate floating point constant for FMOV. */
627 bool aarch64_float_const_representable_p (rtx
);
629 extern int aarch64_epilogue_uses (int);
631 #if defined (RTX_CODE)
632 void aarch64_gen_unlikely_cbranch (enum rtx_code
, machine_mode cc_mode
,
634 bool aarch64_legitimate_address_p (machine_mode
, rtx
, bool,
635 aarch64_addr_query_type
= ADDR_QUERY_M
);
636 machine_mode
aarch64_select_cc_mode (RTX_CODE
, rtx
, rtx
);
637 rtx
aarch64_gen_compare_reg (RTX_CODE
, rtx
, rtx
);
638 rtx
aarch64_load_tp (rtx
);
640 void aarch64_expand_compare_and_swap (rtx op
[]);
641 void aarch64_split_compare_and_swap (rtx op
[]);
643 void aarch64_split_atomic_op (enum rtx_code
, rtx
, rtx
, rtx
, rtx
, rtx
, rtx
);
645 bool aarch64_gen_adjusted_ldpstp (rtx
*, bool, scalar_mode
, RTX_CODE
);
647 void aarch64_expand_sve_vec_cmp_int (rtx
, rtx_code
, rtx
, rtx
);
648 bool aarch64_expand_sve_vec_cmp_float (rtx
, rtx_code
, rtx
, rtx
, bool);
649 void aarch64_expand_sve_vcond (machine_mode
, machine_mode
, rtx
*);
651 bool aarch64_prepare_sve_int_fma (rtx
*, rtx_code
);
652 bool aarch64_prepare_sve_cond_int_fma (rtx
*, rtx_code
);
653 #endif /* RTX_CODE */
655 bool aarch64_process_target_attr (tree
);
656 void aarch64_override_options_internal (struct gcc_options
*);
658 const char *aarch64_general_mangle_builtin_type (const_tree
);
659 void aarch64_general_init_builtins (void);
660 tree
aarch64_general_fold_builtin (unsigned int, tree
, unsigned int, tree
*);
661 gimple
*aarch64_general_gimple_fold_builtin (unsigned int, gcall
*);
662 rtx
aarch64_general_expand_builtin (unsigned int, tree
, rtx
);
663 tree
aarch64_general_builtin_decl (unsigned, bool);
664 tree
aarch64_general_builtin_rsqrt (unsigned int);
665 tree
aarch64_builtin_vectorized_function (unsigned int, tree
, tree
);
667 extern void aarch64_split_combinev16qi (rtx operands
[3]);
668 extern void aarch64_expand_vec_perm (rtx
, rtx
, rtx
, rtx
, unsigned int);
669 extern void aarch64_expand_sve_vec_perm (rtx
, rtx
, rtx
, rtx
);
670 extern bool aarch64_madd_needs_nop (rtx_insn
*);
671 extern void aarch64_final_prescan_insn (rtx_insn
*);
672 void aarch64_atomic_assign_expand_fenv (tree
*, tree
*, tree
*);
673 int aarch64_ccmp_mode_to_code (machine_mode mode
);
675 bool extract_base_offset_in_addr (rtx mem
, rtx
*base
, rtx
*offset
);
676 bool aarch64_operands_ok_for_ldpstp (rtx
*, bool, machine_mode
);
677 bool aarch64_operands_adjust_ok_for_ldpstp (rtx
*, bool, scalar_mode
);
678 void aarch64_swap_ldrstr_operands (rtx
*, bool);
680 extern void aarch64_asm_output_pool_epilogue (FILE *, const char *,
681 tree
, HOST_WIDE_INT
);
684 extern bool aarch64_classify_address (struct aarch64_address_info
*, rtx
,
686 aarch64_addr_query_type
= ADDR_QUERY_M
);
688 /* Defined in common/config/aarch64-common.c. */
689 bool aarch64_handle_option (struct gcc_options
*, struct gcc_options
*,
690 const struct cl_decoded_option
*, location_t
);
691 const char *aarch64_rewrite_selected_cpu (const char *name
);
692 enum aarch64_parse_opt_result
aarch64_parse_extension (const char *,
695 void aarch64_get_all_extension_candidates (auto_vec
<const char *> *candidates
);
696 std::string
aarch64_get_extension_string_for_isa_flags (uint64_t, uint64_t);
698 /* Defined in aarch64-d.c */
699 extern void aarch64_d_target_versions (void);
701 rtl_opt_pass
*make_pass_fma_steering (gcc::context
*);
702 rtl_opt_pass
*make_pass_track_speculation (gcc::context
*);
703 rtl_opt_pass
*make_pass_tag_collision_avoidance (gcc::context
*);
704 rtl_opt_pass
*make_pass_insert_bti (gcc::context
*ctxt
);
706 poly_uint64
aarch64_regmode_natural_size (machine_mode
);
708 bool aarch64_high_bits_all_ones_p (HOST_WIDE_INT
);
710 struct atomic_ool_names
712 const char *str
[5][4];
715 rtx
aarch64_atomic_ool_func(machine_mode mode
, rtx model_rtx
,
716 const atomic_ool_names
*names
);
717 extern const atomic_ool_names aarch64_ool_swp_names
;
718 extern const atomic_ool_names aarch64_ool_ldadd_names
;
719 extern const atomic_ool_names aarch64_ool_ldset_names
;
720 extern const atomic_ool_names aarch64_ool_ldclr_names
;
721 extern const atomic_ool_names aarch64_ool_ldeor_names
;
723 #endif /* GCC_AARCH64_PROTOS_H */