sim: v850: fix cpu_option testsuite handling
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2021 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* SME horizontal or vertical slice indicator, encoded in "V".
103 Values:
104 0 - Horizontal
105 1 - vertical
106 */
107 enum sme_hv_slice
108 {
109 HV_horizontal = 0,
110 HV_vertical = 1
111 };
112
113 /* Bits for DEFINED field in vector_type_el. */
114 #define NTA_HASTYPE 1
115 #define NTA_HASINDEX 2
116 #define NTA_HASVARWIDTH 4
117
118 struct vector_type_el
119 {
120 enum vector_el_type type;
121 unsigned char defined;
122 unsigned width;
123 int64_t index;
124 };
125
126 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
127
128 struct reloc
129 {
130 bfd_reloc_code_real_type type;
131 expressionS exp;
132 int pc_rel;
133 enum aarch64_opnd opnd;
134 uint32_t flags;
135 unsigned need_libopcodes_p : 1;
136 };
137
138 struct aarch64_instruction
139 {
140 /* libopcodes structure for instruction intermediate representation. */
141 aarch64_inst base;
142 /* Record assembly errors found during the parsing. */
143 struct
144 {
145 enum aarch64_operand_error_kind kind;
146 const char *error;
147 } parsing_error;
148 /* The condition that appears in the assembly line. */
149 int cond;
150 /* Relocation information (including the GAS internal fixup). */
151 struct reloc reloc;
152 /* Need to generate an immediate in the literal pool. */
153 unsigned gen_lit_pool : 1;
154 };
155
156 typedef struct aarch64_instruction aarch64_instruction;
157
158 static aarch64_instruction inst;
159
160 static bool parse_operands (char *, const aarch64_opcode *);
161 static bool programmer_friendly_fixup (aarch64_instruction *);
162
163 #ifdef OBJ_ELF
164 # define now_instr_sequence seg_info \
165 (now_seg)->tc_segment_info_data.insn_sequence
166 #else
167 static struct aarch64_instr_sequence now_instr_sequence;
168 #endif
169
170 /* Diagnostics inline function utilities.
171
172 These are lightweight utilities which should only be called by parse_operands
173 and other parsers. GAS processes each assembly line by parsing it against
174 instruction template(s), in the case of multiple templates (for the same
175 mnemonic name), those templates are tried one by one until one succeeds or
176 all fail. An assembly line may fail a few templates before being
177 successfully parsed; an error saved here in most cases is not a user error
178 but an error indicating the current template is not the right template.
179 Therefore it is very important that errors can be saved at a low cost during
180 the parsing; we don't want to slow down the whole parsing by recording
181 non-user errors in detail.
182
183 Remember that the objective is to help GAS pick up the most appropriate
184 error message in the case of multiple templates, e.g. FMOV which has 8
185 templates. */
186
187 static inline void
188 clear_error (void)
189 {
190 inst.parsing_error.kind = AARCH64_OPDE_NIL;
191 inst.parsing_error.error = NULL;
192 }
193
194 static inline bool
195 error_p (void)
196 {
197 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
198 }
199
200 static inline const char *
201 get_error_message (void)
202 {
203 return inst.parsing_error.error;
204 }
205
206 static inline enum aarch64_operand_error_kind
207 get_error_kind (void)
208 {
209 return inst.parsing_error.kind;
210 }
211
212 static inline void
213 set_error (enum aarch64_operand_error_kind kind, const char *error)
214 {
215 inst.parsing_error.kind = kind;
216 inst.parsing_error.error = error;
217 }
218
219 static inline void
220 set_recoverable_error (const char *error)
221 {
222 set_error (AARCH64_OPDE_RECOVERABLE, error);
223 }
224
225 /* Use the DESC field of the corresponding aarch64_operand entry to compose
226 the error message. */
227 static inline void
228 set_default_error (void)
229 {
230 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
231 }
232
233 static inline void
234 set_syntax_error (const char *error)
235 {
236 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
237 }
238
239 static inline void
240 set_first_syntax_error (const char *error)
241 {
242 if (! error_p ())
243 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
244 }
245
246 static inline void
247 set_fatal_syntax_error (const char *error)
248 {
249 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
250 }
251 \f
252 /* Return value for certain parsers when the parsing fails; those parsers
253 return the information of the parsed result, e.g. register number, on
254 success. */
255 #define PARSE_FAIL -1
256
257 /* This is an invalid condition code that means no conditional field is
258 present. */
259 #define COND_ALWAYS 0x10
260
261 typedef struct
262 {
263 const char *template;
264 uint32_t value;
265 } asm_nzcv;
266
267 struct reloc_entry
268 {
269 char *name;
270 bfd_reloc_code_real_type reloc;
271 };
272
273 /* Macros to define the register types and masks for the purpose
274 of parsing. */
275
276 #undef AARCH64_REG_TYPES
277 #define AARCH64_REG_TYPES \
278 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
279 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
280 BASIC_REG_TYPE(SP_32) /* wsp */ \
281 BASIC_REG_TYPE(SP_64) /* sp */ \
282 BASIC_REG_TYPE(Z_32) /* wzr */ \
283 BASIC_REG_TYPE(Z_64) /* xzr */ \
284 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
285 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
286 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
287 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
288 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
289 BASIC_REG_TYPE(VN) /* v[0-31] */ \
290 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
291 BASIC_REG_TYPE(PN) /* p[0-15] */ \
292 BASIC_REG_TYPE(ZA) /* za[0-15] */ \
293 BASIC_REG_TYPE(ZAH) /* za[0-15]h */ \
294 BASIC_REG_TYPE(ZAV) /* za[0-15]v */ \
295 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
296 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
297 /* Typecheck: same, plus SVE registers. */ \
298 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
299 | REG_TYPE(ZN)) \
300 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
301 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
302 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
303 /* Typecheck: same, plus SVE registers. */ \
304 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
306 | REG_TYPE(ZN)) \
307 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
308 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
309 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
310 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
311 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
312 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
313 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
314 /* Typecheck: any [BHSDQ]P FP. */ \
315 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
316 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
317 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
318 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
319 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
320 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
321 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
322 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
323 be used for SVE instructions, since Zn and Pn are valid symbols \
324 in other contexts. */ \
325 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
326 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
327 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
328 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
329 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
330 | REG_TYPE(ZN) | REG_TYPE(PN)) \
331 /* Any integer register; used for error messages only. */ \
332 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
333 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
334 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
335 /* Pseudo type to mark the end of the enumerator sequence. */ \
336 BASIC_REG_TYPE(MAX)
337
338 #undef BASIC_REG_TYPE
339 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
340 #undef MULTI_REG_TYPE
341 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
342
343 /* Register type enumerators. */
344 typedef enum aarch64_reg_type_
345 {
346 /* A list of REG_TYPE_*. */
347 AARCH64_REG_TYPES
348 } aarch64_reg_type;
349
350 #undef BASIC_REG_TYPE
351 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
352 #undef REG_TYPE
353 #define REG_TYPE(T) (1 << REG_TYPE_##T)
354 #undef MULTI_REG_TYPE
355 #define MULTI_REG_TYPE(T,V) V,
356
357 /* Structure for a hash table entry for a register. */
358 typedef struct
359 {
360 const char *name;
361 unsigned char number;
362 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
363 unsigned char builtin;
364 } reg_entry;
365
366 /* Values indexed by aarch64_reg_type to assist the type checking. */
367 static const unsigned reg_type_masks[] =
368 {
369 AARCH64_REG_TYPES
370 };
371
372 #undef BASIC_REG_TYPE
373 #undef REG_TYPE
374 #undef MULTI_REG_TYPE
375 #undef AARCH64_REG_TYPES
376
377 /* Diagnostics used when we don't get a register of the expected type.
378 Note: this has to synchronized with aarch64_reg_type definitions
379 above. */
380 static const char *
381 get_reg_expected_msg (aarch64_reg_type reg_type)
382 {
383 const char *msg;
384
385 switch (reg_type)
386 {
387 case REG_TYPE_R_32:
388 msg = N_("integer 32-bit register expected");
389 break;
390 case REG_TYPE_R_64:
391 msg = N_("integer 64-bit register expected");
392 break;
393 case REG_TYPE_R_N:
394 msg = N_("integer register expected");
395 break;
396 case REG_TYPE_R64_SP:
397 msg = N_("64-bit integer or SP register expected");
398 break;
399 case REG_TYPE_SVE_BASE:
400 msg = N_("base register expected");
401 break;
402 case REG_TYPE_R_Z:
403 msg = N_("integer or zero register expected");
404 break;
405 case REG_TYPE_SVE_OFFSET:
406 msg = N_("offset register expected");
407 break;
408 case REG_TYPE_R_SP:
409 msg = N_("integer or SP register expected");
410 break;
411 case REG_TYPE_R_Z_SP:
412 msg = N_("integer, zero or SP register expected");
413 break;
414 case REG_TYPE_FP_B:
415 msg = N_("8-bit SIMD scalar register expected");
416 break;
417 case REG_TYPE_FP_H:
418 msg = N_("16-bit SIMD scalar or floating-point half precision "
419 "register expected");
420 break;
421 case REG_TYPE_FP_S:
422 msg = N_("32-bit SIMD scalar or floating-point single precision "
423 "register expected");
424 break;
425 case REG_TYPE_FP_D:
426 msg = N_("64-bit SIMD scalar or floating-point double precision "
427 "register expected");
428 break;
429 case REG_TYPE_FP_Q:
430 msg = N_("128-bit SIMD scalar or floating-point quad precision "
431 "register expected");
432 break;
433 case REG_TYPE_R_Z_BHSDQ_V:
434 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
435 msg = N_("register expected");
436 break;
437 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
438 msg = N_("SIMD scalar or floating-point register expected");
439 break;
440 case REG_TYPE_VN: /* any V reg */
441 msg = N_("vector register expected");
442 break;
443 case REG_TYPE_ZN:
444 msg = N_("SVE vector register expected");
445 break;
446 case REG_TYPE_PN:
447 msg = N_("SVE predicate register expected");
448 break;
449 default:
450 as_fatal (_("invalid register type %d"), reg_type);
451 }
452 return msg;
453 }
454
455 /* Some well known registers that we refer to directly elsewhere. */
456 #define REG_SP 31
457 #define REG_ZR 31
458
459 /* Instructions take 4 bytes in the object file. */
460 #define INSN_SIZE 4
461
462 static htab_t aarch64_ops_hsh;
463 static htab_t aarch64_cond_hsh;
464 static htab_t aarch64_shift_hsh;
465 static htab_t aarch64_sys_regs_hsh;
466 static htab_t aarch64_pstatefield_hsh;
467 static htab_t aarch64_sys_regs_ic_hsh;
468 static htab_t aarch64_sys_regs_dc_hsh;
469 static htab_t aarch64_sys_regs_at_hsh;
470 static htab_t aarch64_sys_regs_tlbi_hsh;
471 static htab_t aarch64_sys_regs_sr_hsh;
472 static htab_t aarch64_reg_hsh;
473 static htab_t aarch64_barrier_opt_hsh;
474 static htab_t aarch64_nzcv_hsh;
475 static htab_t aarch64_pldop_hsh;
476 static htab_t aarch64_hint_opt_hsh;
477
478 /* Stuff needed to resolve the label ambiguity
479 As:
480 ...
481 label: <insn>
482 may differ from:
483 ...
484 label:
485 <insn> */
486
487 static symbolS *last_label_seen;
488
489 /* Literal pool structure. Held on a per-section
490 and per-sub-section basis. */
491
492 #define MAX_LITERAL_POOL_SIZE 1024
493 typedef struct literal_expression
494 {
495 expressionS exp;
496 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
497 LITTLENUM_TYPE * bignum;
498 } literal_expression;
499
500 typedef struct literal_pool
501 {
502 literal_expression literals[MAX_LITERAL_POOL_SIZE];
503 unsigned int next_free_entry;
504 unsigned int id;
505 symbolS *symbol;
506 segT section;
507 subsegT sub_section;
508 int size;
509 struct literal_pool *next;
510 } literal_pool;
511
512 /* Pointer to a linked list of literal pools. */
513 static literal_pool *list_of_pools = NULL;
514 \f
515 /* Pure syntax. */
516
517 /* This array holds the chars that always start a comment. If the
518 pre-processor is disabled, these aren't very useful. */
519 const char comment_chars[] = "";
520
521 /* This array holds the chars that only start a comment at the beginning of
522 a line. If the line seems to have the form '# 123 filename'
523 .line and .file directives will appear in the pre-processed output. */
524 /* Note that input_file.c hand checks for '#' at the beginning of the
525 first line of the input file. This is because the compiler outputs
526 #NO_APP at the beginning of its output. */
527 /* Also note that comments like this one will always work. */
528 const char line_comment_chars[] = "#";
529
530 const char line_separator_chars[] = ";";
531
532 /* Chars that can be used to separate mant
533 from exp in floating point numbers. */
534 const char EXP_CHARS[] = "eE";
535
536 /* Chars that mean this number is a floating point constant. */
537 /* As in 0f12.456 */
538 /* or 0d1.2345e12 */
539
540 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
541
542 /* Prefix character that indicates the start of an immediate value. */
543 #define is_immediate_prefix(C) ((C) == '#')
544
545 /* Separator character handling. */
546
547 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
548
549 static inline bool
550 skip_past_char (char **str, char c)
551 {
552 if (**str == c)
553 {
554 (*str)++;
555 return true;
556 }
557 else
558 return false;
559 }
560
561 #define skip_past_comma(str) skip_past_char (str, ',')
562
563 /* Arithmetic expressions (possibly involving symbols). */
564
565 static bool in_aarch64_get_expression = false;
566
567 /* Third argument to aarch64_get_expression. */
568 #define GE_NO_PREFIX false
569 #define GE_OPT_PREFIX true
570
571 /* Fourth argument to aarch64_get_expression. */
572 #define ALLOW_ABSENT false
573 #define REJECT_ABSENT true
574
575 /* Fifth argument to aarch64_get_expression. */
576 #define NORMAL_RESOLUTION false
577
578 /* Return TRUE if the string pointed by *STR is successfully parsed
579 as an valid expression; *EP will be filled with the information of
580 such an expression. Otherwise return FALSE.
581
582 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
583 If REJECT_ABSENT is true then trat missing expressions as an error.
584 If DEFER_RESOLUTION is true, then do not resolve expressions against
585 constant symbols. Necessary if the expression is part of a fixup
586 that uses a reloc that must be emitted. */
587
588 static bool
589 aarch64_get_expression (expressionS * ep,
590 char ** str,
591 bool allow_immediate_prefix,
592 bool reject_absent,
593 bool defer_resolution)
594 {
595 char *save_in;
596 segT seg;
597 bool prefix_present = false;
598
599 if (allow_immediate_prefix)
600 {
601 if (is_immediate_prefix (**str))
602 {
603 (*str)++;
604 prefix_present = true;
605 }
606 }
607
608 memset (ep, 0, sizeof (expressionS));
609
610 save_in = input_line_pointer;
611 input_line_pointer = *str;
612 in_aarch64_get_expression = true;
613 if (defer_resolution)
614 seg = deferred_expression (ep);
615 else
616 seg = expression (ep);
617 in_aarch64_get_expression = false;
618
619 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
620 {
621 /* We found a bad expression in md_operand(). */
622 *str = input_line_pointer;
623 input_line_pointer = save_in;
624 if (prefix_present && ! error_p ())
625 set_fatal_syntax_error (_("bad expression"));
626 else
627 set_first_syntax_error (_("bad expression"));
628 return false;
629 }
630
631 #ifdef OBJ_AOUT
632 if (seg != absolute_section
633 && seg != text_section
634 && seg != data_section
635 && seg != bss_section
636 && seg != undefined_section)
637 {
638 set_syntax_error (_("bad segment"));
639 *str = input_line_pointer;
640 input_line_pointer = save_in;
641 return false;
642 }
643 #else
644 (void) seg;
645 #endif
646
647 *str = input_line_pointer;
648 input_line_pointer = save_in;
649 return true;
650 }
651
652 /* Turn a string in input_line_pointer into a floating point constant
653 of type TYPE, and store the appropriate bytes in *LITP. The number
654 of LITTLENUMS emitted is stored in *SIZEP. An error message is
655 returned, or NULL on OK. */
656
657 const char *
658 md_atof (int type, char *litP, int *sizeP)
659 {
660 return ieee_md_atof (type, litP, sizeP, target_big_endian);
661 }
662
663 /* We handle all bad expressions here, so that we can report the faulty
664 instruction in the error message. */
665 void
666 md_operand (expressionS * exp)
667 {
668 if (in_aarch64_get_expression)
669 exp->X_op = O_illegal;
670 }
671
672 /* Immediate values. */
673
674 /* Errors may be set multiple times during parsing or bit encoding
675 (particularly in the Neon bits), but usually the earliest error which is set
676 will be the most meaningful. Avoid overwriting it with later (cascading)
677 errors by calling this function. */
678
679 static void
680 first_error (const char *error)
681 {
682 if (! error_p ())
683 set_syntax_error (error);
684 }
685
686 /* Similar to first_error, but this function accepts formatted error
687 message. */
688 static void
689 first_error_fmt (const char *format, ...)
690 {
691 va_list args;
692 enum
693 { size = 100 };
694 /* N.B. this single buffer will not cause error messages for different
695 instructions to pollute each other; this is because at the end of
696 processing of each assembly line, error message if any will be
697 collected by as_bad. */
698 static char buffer[size];
699
700 if (! error_p ())
701 {
702 int ret ATTRIBUTE_UNUSED;
703 va_start (args, format);
704 ret = vsnprintf (buffer, size, format, args);
705 know (ret <= size - 1 && ret >= 0);
706 va_end (args);
707 set_syntax_error (buffer);
708 }
709 }
710
711 /* Register parsing. */
712
713 /* Generic register parser which is called by other specialized
714 register parsers.
715 CCP points to what should be the beginning of a register name.
716 If it is indeed a valid register name, advance CCP over it and
717 return the reg_entry structure; otherwise return NULL.
718 It does not issue diagnostics. */
719
720 static reg_entry *
721 parse_reg (char **ccp)
722 {
723 char *start = *ccp;
724 char *p;
725 reg_entry *reg;
726
727 #ifdef REGISTER_PREFIX
728 if (*start != REGISTER_PREFIX)
729 return NULL;
730 start++;
731 #endif
732
733 p = start;
734 if (!ISALPHA (*p) || !is_name_beginner (*p))
735 return NULL;
736
737 do
738 p++;
739 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
740
741 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
742
743 if (!reg)
744 return NULL;
745
746 *ccp = p;
747 return reg;
748 }
749
750 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
751 return FALSE. */
752 static bool
753 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
754 {
755 return (reg_type_masks[type] & (1 << reg->type)) != 0;
756 }
757
758 /* Try to parse a base or offset register. Allow SVE base and offset
759 registers if REG_TYPE includes SVE registers. Return the register
760 entry on success, setting *QUALIFIER to the register qualifier.
761 Return null otherwise.
762
763 Note that this function does not issue any diagnostics. */
764
765 static const reg_entry *
766 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
767 aarch64_opnd_qualifier_t *qualifier)
768 {
769 char *str = *ccp;
770 const reg_entry *reg = parse_reg (&str);
771
772 if (reg == NULL)
773 return NULL;
774
775 switch (reg->type)
776 {
777 case REG_TYPE_R_32:
778 case REG_TYPE_SP_32:
779 case REG_TYPE_Z_32:
780 *qualifier = AARCH64_OPND_QLF_W;
781 break;
782
783 case REG_TYPE_R_64:
784 case REG_TYPE_SP_64:
785 case REG_TYPE_Z_64:
786 *qualifier = AARCH64_OPND_QLF_X;
787 break;
788
789 case REG_TYPE_ZN:
790 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
791 || str[0] != '.')
792 return NULL;
793 switch (TOLOWER (str[1]))
794 {
795 case 's':
796 *qualifier = AARCH64_OPND_QLF_S_S;
797 break;
798 case 'd':
799 *qualifier = AARCH64_OPND_QLF_S_D;
800 break;
801 default:
802 return NULL;
803 }
804 str += 2;
805 break;
806
807 default:
808 return NULL;
809 }
810
811 *ccp = str;
812
813 return reg;
814 }
815
816 /* Try to parse a base or offset register. Return the register entry
817 on success, setting *QUALIFIER to the register qualifier. Return null
818 otherwise.
819
820 Note that this function does not issue any diagnostics. */
821
822 static const reg_entry *
823 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
824 {
825 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
826 }
827
828 /* Parse the qualifier of a vector register or vector element of type
829 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
830 succeeds; otherwise return FALSE.
831
832 Accept only one occurrence of:
833 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
834 b h s d q */
835 static bool
836 parse_vector_type_for_operand (aarch64_reg_type reg_type,
837 struct vector_type_el *parsed_type, char **str)
838 {
839 char *ptr = *str;
840 unsigned width;
841 unsigned element_size;
842 enum vector_el_type type;
843
844 /* skip '.' */
845 gas_assert (*ptr == '.');
846 ptr++;
847
848 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
849 {
850 width = 0;
851 goto elt_size;
852 }
853 width = strtoul (ptr, &ptr, 10);
854 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
855 {
856 first_error_fmt (_("bad size %d in vector width specifier"), width);
857 return false;
858 }
859
860 elt_size:
861 switch (TOLOWER (*ptr))
862 {
863 case 'b':
864 type = NT_b;
865 element_size = 8;
866 break;
867 case 'h':
868 type = NT_h;
869 element_size = 16;
870 break;
871 case 's':
872 type = NT_s;
873 element_size = 32;
874 break;
875 case 'd':
876 type = NT_d;
877 element_size = 64;
878 break;
879 case 'q':
880 if (reg_type == REG_TYPE_ZN || width == 1)
881 {
882 type = NT_q;
883 element_size = 128;
884 break;
885 }
886 /* fall through. */
887 default:
888 if (*ptr != '\0')
889 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
890 else
891 first_error (_("missing element size"));
892 return false;
893 }
894 if (width != 0 && width * element_size != 64
895 && width * element_size != 128
896 && !(width == 2 && element_size == 16)
897 && !(width == 4 && element_size == 8))
898 {
899 first_error_fmt (_
900 ("invalid element size %d and vector size combination %c"),
901 width, *ptr);
902 return false;
903 }
904 ptr++;
905
906 parsed_type->type = type;
907 parsed_type->width = width;
908
909 *str = ptr;
910
911 return true;
912 }
913
914 /* *STR contains an SVE zero/merge predication suffix. Parse it into
915 *PARSED_TYPE and point *STR at the end of the suffix. */
916
917 static bool
918 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
919 {
920 char *ptr = *str;
921
922 /* Skip '/'. */
923 gas_assert (*ptr == '/');
924 ptr++;
925 switch (TOLOWER (*ptr))
926 {
927 case 'z':
928 parsed_type->type = NT_zero;
929 break;
930 case 'm':
931 parsed_type->type = NT_merge;
932 break;
933 default:
934 if (*ptr != '\0' && *ptr != ',')
935 first_error_fmt (_("unexpected character `%c' in predication type"),
936 *ptr);
937 else
938 first_error (_("missing predication type"));
939 return false;
940 }
941 parsed_type->width = 0;
942 *str = ptr + 1;
943 return true;
944 }
945
946 /* Parse a register of the type TYPE.
947
948 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
949 name or the parsed register is not of TYPE.
950
951 Otherwise return the register number, and optionally fill in the actual
952 type of the register in *RTYPE when multiple alternatives were given, and
953 return the register shape and element index information in *TYPEINFO.
954
955 IN_REG_LIST should be set with TRUE if the caller is parsing a register
956 list. */
957
958 static int
959 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
960 struct vector_type_el *typeinfo, bool in_reg_list)
961 {
962 char *str = *ccp;
963 const reg_entry *reg = parse_reg (&str);
964 struct vector_type_el atype;
965 struct vector_type_el parsetype;
966 bool is_typed_vecreg = false;
967
968 atype.defined = 0;
969 atype.type = NT_invtype;
970 atype.width = -1;
971 atype.index = 0;
972
973 if (reg == NULL)
974 {
975 if (typeinfo)
976 *typeinfo = atype;
977 set_default_error ();
978 return PARSE_FAIL;
979 }
980
981 if (! aarch64_check_reg_type (reg, type))
982 {
983 DEBUG_TRACE ("reg type check failed");
984 set_default_error ();
985 return PARSE_FAIL;
986 }
987 type = reg->type;
988
989 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
990 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
991 {
992 if (*str == '.')
993 {
994 if (!parse_vector_type_for_operand (type, &parsetype, &str))
995 return PARSE_FAIL;
996 }
997 else
998 {
999 if (!parse_predication_for_operand (&parsetype, &str))
1000 return PARSE_FAIL;
1001 }
1002
1003 /* Register if of the form Vn.[bhsdq]. */
1004 is_typed_vecreg = true;
1005
1006 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1007 {
1008 /* The width is always variable; we don't allow an integer width
1009 to be specified. */
1010 gas_assert (parsetype.width == 0);
1011 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1012 }
1013 else if (parsetype.width == 0)
1014 /* Expect index. In the new scheme we cannot have
1015 Vn.[bhsdq] represent a scalar. Therefore any
1016 Vn.[bhsdq] should have an index following it.
1017 Except in reglists of course. */
1018 atype.defined |= NTA_HASINDEX;
1019 else
1020 atype.defined |= NTA_HASTYPE;
1021
1022 atype.type = parsetype.type;
1023 atype.width = parsetype.width;
1024 }
1025
1026 if (skip_past_char (&str, '['))
1027 {
1028 expressionS exp;
1029
1030 /* Reject Sn[index] syntax. */
1031 if (!is_typed_vecreg)
1032 {
1033 first_error (_("this type of register can't be indexed"));
1034 return PARSE_FAIL;
1035 }
1036
1037 if (in_reg_list)
1038 {
1039 first_error (_("index not allowed inside register list"));
1040 return PARSE_FAIL;
1041 }
1042
1043 atype.defined |= NTA_HASINDEX;
1044
1045 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1046 NORMAL_RESOLUTION);
1047
1048 if (exp.X_op != O_constant)
1049 {
1050 first_error (_("constant expression required"));
1051 return PARSE_FAIL;
1052 }
1053
1054 if (! skip_past_char (&str, ']'))
1055 return PARSE_FAIL;
1056
1057 atype.index = exp.X_add_number;
1058 }
1059 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1060 {
1061 /* Indexed vector register expected. */
1062 first_error (_("indexed vector register expected"));
1063 return PARSE_FAIL;
1064 }
1065
1066 /* A vector reg Vn should be typed or indexed. */
1067 if (type == REG_TYPE_VN && atype.defined == 0)
1068 {
1069 first_error (_("invalid use of vector register"));
1070 }
1071
1072 if (typeinfo)
1073 *typeinfo = atype;
1074
1075 if (rtype)
1076 *rtype = type;
1077
1078 *ccp = str;
1079
1080 return reg->number;
1081 }
1082
1083 /* Parse register.
1084
1085 Return the register number on success; return PARSE_FAIL otherwise.
1086
1087 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1088 the register (e.g. NEON double or quad reg when either has been requested).
1089
1090 If this is a NEON vector register with additional type information, fill
1091 in the struct pointed to by VECTYPE (if non-NULL).
1092
1093 This parser does not handle register list. */
1094
1095 static int
1096 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1097 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1098 {
1099 struct vector_type_el atype;
1100 char *str = *ccp;
1101 int reg = parse_typed_reg (&str, type, rtype, &atype,
1102 /*in_reg_list= */ false);
1103
1104 if (reg == PARSE_FAIL)
1105 return PARSE_FAIL;
1106
1107 if (vectype)
1108 *vectype = atype;
1109
1110 *ccp = str;
1111
1112 return reg;
1113 }
1114
1115 static inline bool
1116 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1117 {
1118 return
1119 e1.type == e2.type
1120 && e1.defined == e2.defined
1121 && e1.width == e2.width && e1.index == e2.index;
1122 }
1123
1124 /* This function parses a list of vector registers of type TYPE.
1125 On success, it returns the parsed register list information in the
1126 following encoded format:
1127
1128 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1129 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1130
1131 The information of the register shape and/or index is returned in
1132 *VECTYPE.
1133
1134 It returns PARSE_FAIL if the register list is invalid.
1135
1136 The list contains one to four registers.
1137 Each register can be one of:
1138 <Vt>.<T>[<index>]
1139 <Vt>.<T>
1140 All <T> should be identical.
1141 All <index> should be identical.
1142 There are restrictions on <Vt> numbers which are checked later
1143 (by reg_list_valid_p). */
1144
1145 static int
1146 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1147 struct vector_type_el *vectype)
1148 {
1149 char *str = *ccp;
1150 int nb_regs;
1151 struct vector_type_el typeinfo, typeinfo_first;
1152 int val, val_range;
1153 int in_range;
1154 int ret_val;
1155 int i;
1156 bool error = false;
1157 bool expect_index = false;
1158
1159 if (*str != '{')
1160 {
1161 set_syntax_error (_("expecting {"));
1162 return PARSE_FAIL;
1163 }
1164 str++;
1165
1166 nb_regs = 0;
1167 typeinfo_first.defined = 0;
1168 typeinfo_first.type = NT_invtype;
1169 typeinfo_first.width = -1;
1170 typeinfo_first.index = 0;
1171 ret_val = 0;
1172 val = -1;
1173 val_range = -1;
1174 in_range = 0;
1175 do
1176 {
1177 if (in_range)
1178 {
1179 str++; /* skip over '-' */
1180 val_range = val;
1181 }
1182 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1183 /*in_reg_list= */ true);
1184 if (val == PARSE_FAIL)
1185 {
1186 set_first_syntax_error (_("invalid vector register in list"));
1187 error = true;
1188 continue;
1189 }
1190 /* reject [bhsd]n */
1191 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1192 {
1193 set_first_syntax_error (_("invalid scalar register in list"));
1194 error = true;
1195 continue;
1196 }
1197
1198 if (typeinfo.defined & NTA_HASINDEX)
1199 expect_index = true;
1200
1201 if (in_range)
1202 {
1203 if (val < val_range)
1204 {
1205 set_first_syntax_error
1206 (_("invalid range in vector register list"));
1207 error = true;
1208 }
1209 val_range++;
1210 }
1211 else
1212 {
1213 val_range = val;
1214 if (nb_regs == 0)
1215 typeinfo_first = typeinfo;
1216 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1217 {
1218 set_first_syntax_error
1219 (_("type mismatch in vector register list"));
1220 error = true;
1221 }
1222 }
1223 if (! error)
1224 for (i = val_range; i <= val; i++)
1225 {
1226 ret_val |= i << (5 * nb_regs);
1227 nb_regs++;
1228 }
1229 in_range = 0;
1230 }
1231 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1232
1233 skip_whitespace (str);
1234 if (*str != '}')
1235 {
1236 set_first_syntax_error (_("end of vector register list not found"));
1237 error = true;
1238 }
1239 str++;
1240
1241 skip_whitespace (str);
1242
1243 if (expect_index)
1244 {
1245 if (skip_past_char (&str, '['))
1246 {
1247 expressionS exp;
1248
1249 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1250 NORMAL_RESOLUTION);
1251 if (exp.X_op != O_constant)
1252 {
1253 set_first_syntax_error (_("constant expression required."));
1254 error = true;
1255 }
1256 if (! skip_past_char (&str, ']'))
1257 error = true;
1258 else
1259 typeinfo_first.index = exp.X_add_number;
1260 }
1261 else
1262 {
1263 set_first_syntax_error (_("expected index"));
1264 error = true;
1265 }
1266 }
1267
1268 if (nb_regs > 4)
1269 {
1270 set_first_syntax_error (_("too many registers in vector register list"));
1271 error = true;
1272 }
1273 else if (nb_regs == 0)
1274 {
1275 set_first_syntax_error (_("empty vector register list"));
1276 error = true;
1277 }
1278
1279 *ccp = str;
1280 if (! error)
1281 *vectype = typeinfo_first;
1282
1283 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1284 }
1285
1286 /* Directives: register aliases. */
1287
1288 static reg_entry *
1289 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1290 {
1291 reg_entry *new;
1292 const char *name;
1293
1294 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1295 {
1296 if (new->builtin)
1297 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1298 str);
1299
1300 /* Only warn about a redefinition if it's not defined as the
1301 same register. */
1302 else if (new->number != number || new->type != type)
1303 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1304
1305 return NULL;
1306 }
1307
1308 name = xstrdup (str);
1309 new = XNEW (reg_entry);
1310
1311 new->name = name;
1312 new->number = number;
1313 new->type = type;
1314 new->builtin = false;
1315
1316 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1317
1318 return new;
1319 }
1320
1321 /* Look for the .req directive. This is of the form:
1322
1323 new_register_name .req existing_register_name
1324
1325 If we find one, or if it looks sufficiently like one that we want to
1326 handle any error here, return TRUE. Otherwise return FALSE. */
1327
1328 static bool
1329 create_register_alias (char *newname, char *p)
1330 {
1331 const reg_entry *old;
1332 char *oldname, *nbuf;
1333 size_t nlen;
1334
1335 /* The input scrubber ensures that whitespace after the mnemonic is
1336 collapsed to single spaces. */
1337 oldname = p;
1338 if (!startswith (oldname, " .req "))
1339 return false;
1340
1341 oldname += 6;
1342 if (*oldname == '\0')
1343 return false;
1344
1345 old = str_hash_find (aarch64_reg_hsh, oldname);
1346 if (!old)
1347 {
1348 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1349 return true;
1350 }
1351
1352 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1353 the desired alias name, and p points to its end. If not, then
1354 the desired alias name is in the global original_case_string. */
1355 #ifdef TC_CASE_SENSITIVE
1356 nlen = p - newname;
1357 #else
1358 newname = original_case_string;
1359 nlen = strlen (newname);
1360 #endif
1361
1362 nbuf = xmemdup0 (newname, nlen);
1363
1364 /* Create aliases under the new name as stated; an all-lowercase
1365 version of the new name; and an all-uppercase version of the new
1366 name. */
1367 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1368 {
1369 for (p = nbuf; *p; p++)
1370 *p = TOUPPER (*p);
1371
1372 if (strncmp (nbuf, newname, nlen))
1373 {
1374 /* If this attempt to create an additional alias fails, do not bother
1375 trying to create the all-lower case alias. We will fail and issue
1376 a second, duplicate error message. This situation arises when the
1377 programmer does something like:
1378 foo .req r0
1379 Foo .req r1
1380 The second .req creates the "Foo" alias but then fails to create
1381 the artificial FOO alias because it has already been created by the
1382 first .req. */
1383 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1384 {
1385 free (nbuf);
1386 return true;
1387 }
1388 }
1389
1390 for (p = nbuf; *p; p++)
1391 *p = TOLOWER (*p);
1392
1393 if (strncmp (nbuf, newname, nlen))
1394 insert_reg_alias (nbuf, old->number, old->type);
1395 }
1396
1397 free (nbuf);
1398 return true;
1399 }
1400
1401 /* Should never be called, as .req goes between the alias and the
1402 register name, not at the beginning of the line. */
1403 static void
1404 s_req (int a ATTRIBUTE_UNUSED)
1405 {
1406 as_bad (_("invalid syntax for .req directive"));
1407 }
1408
1409 /* The .unreq directive deletes an alias which was previously defined
1410 by .req. For example:
1411
1412 my_alias .req r11
1413 .unreq my_alias */
1414
1415 static void
1416 s_unreq (int a ATTRIBUTE_UNUSED)
1417 {
1418 char *name;
1419 char saved_char;
1420
1421 name = input_line_pointer;
1422
1423 while (*input_line_pointer != 0
1424 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1425 ++input_line_pointer;
1426
1427 saved_char = *input_line_pointer;
1428 *input_line_pointer = 0;
1429
1430 if (!*name)
1431 as_bad (_("invalid syntax for .unreq directive"));
1432 else
1433 {
1434 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1435
1436 if (!reg)
1437 as_bad (_("unknown register alias '%s'"), name);
1438 else if (reg->builtin)
1439 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1440 name);
1441 else
1442 {
1443 char *p;
1444 char *nbuf;
1445
1446 str_hash_delete (aarch64_reg_hsh, name);
1447 free ((char *) reg->name);
1448 free (reg);
1449
1450 /* Also locate the all upper case and all lower case versions.
1451 Do not complain if we cannot find one or the other as it
1452 was probably deleted above. */
1453
1454 nbuf = strdup (name);
1455 for (p = nbuf; *p; p++)
1456 *p = TOUPPER (*p);
1457 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1458 if (reg)
1459 {
1460 str_hash_delete (aarch64_reg_hsh, nbuf);
1461 free ((char *) reg->name);
1462 free (reg);
1463 }
1464
1465 for (p = nbuf; *p; p++)
1466 *p = TOLOWER (*p);
1467 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1468 if (reg)
1469 {
1470 str_hash_delete (aarch64_reg_hsh, nbuf);
1471 free ((char *) reg->name);
1472 free (reg);
1473 }
1474
1475 free (nbuf);
1476 }
1477 }
1478
1479 *input_line_pointer = saved_char;
1480 demand_empty_rest_of_line ();
1481 }
1482
1483 /* Directives: Instruction set selection. */
1484
1485 #ifdef OBJ_ELF
1486 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1487 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1488 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1489 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1490
1491 /* Create a new mapping symbol for the transition to STATE. */
1492
1493 static void
1494 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1495 {
1496 symbolS *symbolP;
1497 const char *symname;
1498 int type;
1499
1500 switch (state)
1501 {
1502 case MAP_DATA:
1503 symname = "$d";
1504 type = BSF_NO_FLAGS;
1505 break;
1506 case MAP_INSN:
1507 symname = "$x";
1508 type = BSF_NO_FLAGS;
1509 break;
1510 default:
1511 abort ();
1512 }
1513
1514 symbolP = symbol_new (symname, now_seg, frag, value);
1515 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1516
1517 /* Save the mapping symbols for future reference. Also check that
1518 we do not place two mapping symbols at the same offset within a
1519 frag. We'll handle overlap between frags in
1520 check_mapping_symbols.
1521
1522 If .fill or other data filling directive generates zero sized data,
1523 the mapping symbol for the following code will have the same value
1524 as the one generated for the data filling directive. In this case,
1525 we replace the old symbol with the new one at the same address. */
1526 if (value == 0)
1527 {
1528 if (frag->tc_frag_data.first_map != NULL)
1529 {
1530 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1531 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1532 &symbol_lastP);
1533 }
1534 frag->tc_frag_data.first_map = symbolP;
1535 }
1536 if (frag->tc_frag_data.last_map != NULL)
1537 {
1538 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1539 S_GET_VALUE (symbolP));
1540 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1541 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1542 &symbol_lastP);
1543 }
1544 frag->tc_frag_data.last_map = symbolP;
1545 }
1546
1547 /* We must sometimes convert a region marked as code to data during
1548 code alignment, if an odd number of bytes have to be padded. The
1549 code mapping symbol is pushed to an aligned address. */
1550
1551 static void
1552 insert_data_mapping_symbol (enum mstate state,
1553 valueT value, fragS * frag, offsetT bytes)
1554 {
1555 /* If there was already a mapping symbol, remove it. */
1556 if (frag->tc_frag_data.last_map != NULL
1557 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1558 frag->fr_address + value)
1559 {
1560 symbolS *symp = frag->tc_frag_data.last_map;
1561
1562 if (value == 0)
1563 {
1564 know (frag->tc_frag_data.first_map == symp);
1565 frag->tc_frag_data.first_map = NULL;
1566 }
1567 frag->tc_frag_data.last_map = NULL;
1568 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1569 }
1570
1571 make_mapping_symbol (MAP_DATA, value, frag);
1572 make_mapping_symbol (state, value + bytes, frag);
1573 }
1574
1575 static void mapping_state_2 (enum mstate state, int max_chars);
1576
1577 /* Set the mapping state to STATE. Only call this when about to
1578 emit some STATE bytes to the file. */
1579
1580 void
1581 mapping_state (enum mstate state)
1582 {
1583 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1584
1585 if (state == MAP_INSN)
1586 /* AArch64 instructions require 4-byte alignment. When emitting
1587 instructions into any section, record the appropriate section
1588 alignment. */
1589 record_alignment (now_seg, 2);
1590
1591 if (mapstate == state)
1592 /* The mapping symbol has already been emitted.
1593 There is nothing else to do. */
1594 return;
1595
1596 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1597 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1598 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1599 evaluated later in the next else. */
1600 return;
1601 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1602 {
1603 /* Only add the symbol if the offset is > 0:
1604 if we're at the first frag, check it's size > 0;
1605 if we're not at the first frag, then for sure
1606 the offset is > 0. */
1607 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1608 const int add_symbol = (frag_now != frag_first)
1609 || (frag_now_fix () > 0);
1610
1611 if (add_symbol)
1612 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1613 }
1614 #undef TRANSITION
1615
1616 mapping_state_2 (state, 0);
1617 }
1618
1619 /* Same as mapping_state, but MAX_CHARS bytes have already been
1620 allocated. Put the mapping symbol that far back. */
1621
1622 static void
1623 mapping_state_2 (enum mstate state, int max_chars)
1624 {
1625 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1626
1627 if (!SEG_NORMAL (now_seg))
1628 return;
1629
1630 if (mapstate == state)
1631 /* The mapping symbol has already been emitted.
1632 There is nothing else to do. */
1633 return;
1634
1635 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1636 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1637 }
1638 #else
1639 #define mapping_state(x) /* nothing */
1640 #define mapping_state_2(x, y) /* nothing */
1641 #endif
1642
1643 /* Directives: sectioning and alignment. */
1644
1645 static void
1646 s_bss (int ignore ATTRIBUTE_UNUSED)
1647 {
1648 /* We don't support putting frags in the BSS segment, we fake it by
1649 marking in_bss, then looking at s_skip for clues. */
1650 subseg_set (bss_section, 0);
1651 demand_empty_rest_of_line ();
1652 mapping_state (MAP_DATA);
1653 }
1654
1655 static void
1656 s_even (int ignore ATTRIBUTE_UNUSED)
1657 {
1658 /* Never make frag if expect extra pass. */
1659 if (!need_pass_2)
1660 frag_align (1, 0, 0);
1661
1662 record_alignment (now_seg, 1);
1663
1664 demand_empty_rest_of_line ();
1665 }
1666
1667 /* Directives: Literal pools. */
1668
1669 static literal_pool *
1670 find_literal_pool (int size)
1671 {
1672 literal_pool *pool;
1673
1674 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1675 {
1676 if (pool->section == now_seg
1677 && pool->sub_section == now_subseg && pool->size == size)
1678 break;
1679 }
1680
1681 return pool;
1682 }
1683
1684 static literal_pool *
1685 find_or_make_literal_pool (int size)
1686 {
1687 /* Next literal pool ID number. */
1688 static unsigned int latest_pool_num = 1;
1689 literal_pool *pool;
1690
1691 pool = find_literal_pool (size);
1692
1693 if (pool == NULL)
1694 {
1695 /* Create a new pool. */
1696 pool = XNEW (literal_pool);
1697 if (!pool)
1698 return NULL;
1699
1700 /* Currently we always put the literal pool in the current text
1701 section. If we were generating "small" model code where we
1702 knew that all code and initialised data was within 1MB then
1703 we could output literals to mergeable, read-only data
1704 sections. */
1705
1706 pool->next_free_entry = 0;
1707 pool->section = now_seg;
1708 pool->sub_section = now_subseg;
1709 pool->size = size;
1710 pool->next = list_of_pools;
1711 pool->symbol = NULL;
1712
1713 /* Add it to the list. */
1714 list_of_pools = pool;
1715 }
1716
1717 /* New pools, and emptied pools, will have a NULL symbol. */
1718 if (pool->symbol == NULL)
1719 {
1720 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1721 &zero_address_frag, 0);
1722 pool->id = latest_pool_num++;
1723 }
1724
1725 /* Done. */
1726 return pool;
1727 }
1728
1729 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1730 Return TRUE on success, otherwise return FALSE. */
1731 static bool
1732 add_to_lit_pool (expressionS *exp, int size)
1733 {
1734 literal_pool *pool;
1735 unsigned int entry;
1736
1737 pool = find_or_make_literal_pool (size);
1738
1739 /* Check if this literal value is already in the pool. */
1740 for (entry = 0; entry < pool->next_free_entry; entry++)
1741 {
1742 expressionS * litexp = & pool->literals[entry].exp;
1743
1744 if ((litexp->X_op == exp->X_op)
1745 && (exp->X_op == O_constant)
1746 && (litexp->X_add_number == exp->X_add_number)
1747 && (litexp->X_unsigned == exp->X_unsigned))
1748 break;
1749
1750 if ((litexp->X_op == exp->X_op)
1751 && (exp->X_op == O_symbol)
1752 && (litexp->X_add_number == exp->X_add_number)
1753 && (litexp->X_add_symbol == exp->X_add_symbol)
1754 && (litexp->X_op_symbol == exp->X_op_symbol))
1755 break;
1756 }
1757
1758 /* Do we need to create a new entry? */
1759 if (entry == pool->next_free_entry)
1760 {
1761 if (entry >= MAX_LITERAL_POOL_SIZE)
1762 {
1763 set_syntax_error (_("literal pool overflow"));
1764 return false;
1765 }
1766
1767 pool->literals[entry].exp = *exp;
1768 pool->next_free_entry += 1;
1769 if (exp->X_op == O_big)
1770 {
1771 /* PR 16688: Bignums are held in a single global array. We must
1772 copy and preserve that value now, before it is overwritten. */
1773 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1774 exp->X_add_number);
1775 memcpy (pool->literals[entry].bignum, generic_bignum,
1776 CHARS_PER_LITTLENUM * exp->X_add_number);
1777 }
1778 else
1779 pool->literals[entry].bignum = NULL;
1780 }
1781
1782 exp->X_op = O_symbol;
1783 exp->X_add_number = ((int) entry) * size;
1784 exp->X_add_symbol = pool->symbol;
1785
1786 return true;
1787 }
1788
1789 /* Can't use symbol_new here, so have to create a symbol and then at
1790 a later date assign it a value. That's what these functions do. */
1791
1792 static void
1793 symbol_locate (symbolS * symbolP,
1794 const char *name,/* It is copied, the caller can modify. */
1795 segT segment, /* Segment identifier (SEG_<something>). */
1796 valueT valu, /* Symbol value. */
1797 fragS * frag) /* Associated fragment. */
1798 {
1799 size_t name_length;
1800 char *preserved_copy_of_name;
1801
1802 name_length = strlen (name) + 1; /* +1 for \0. */
1803 obstack_grow (&notes, name, name_length);
1804 preserved_copy_of_name = obstack_finish (&notes);
1805
1806 #ifdef tc_canonicalize_symbol_name
1807 preserved_copy_of_name =
1808 tc_canonicalize_symbol_name (preserved_copy_of_name);
1809 #endif
1810
1811 S_SET_NAME (symbolP, preserved_copy_of_name);
1812
1813 S_SET_SEGMENT (symbolP, segment);
1814 S_SET_VALUE (symbolP, valu);
1815 symbol_clear_list_pointers (symbolP);
1816
1817 symbol_set_frag (symbolP, frag);
1818
1819 /* Link to end of symbol chain. */
1820 {
1821 extern int symbol_table_frozen;
1822
1823 if (symbol_table_frozen)
1824 abort ();
1825 }
1826
1827 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1828
1829 obj_symbol_new_hook (symbolP);
1830
1831 #ifdef tc_symbol_new_hook
1832 tc_symbol_new_hook (symbolP);
1833 #endif
1834
1835 #ifdef DEBUG_SYMS
1836 verify_symbol_chain (symbol_rootP, symbol_lastP);
1837 #endif /* DEBUG_SYMS */
1838 }
1839
1840
1841 static void
1842 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1843 {
1844 unsigned int entry;
1845 literal_pool *pool;
1846 char sym_name[20];
1847 int align;
1848
1849 for (align = 2; align <= 4; align++)
1850 {
1851 int size = 1 << align;
1852
1853 pool = find_literal_pool (size);
1854 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1855 continue;
1856
1857 /* Align pool as you have word accesses.
1858 Only make a frag if we have to. */
1859 if (!need_pass_2)
1860 frag_align (align, 0, 0);
1861
1862 mapping_state (MAP_DATA);
1863
1864 record_alignment (now_seg, align);
1865
1866 sprintf (sym_name, "$$lit_\002%x", pool->id);
1867
1868 symbol_locate (pool->symbol, sym_name, now_seg,
1869 (valueT) frag_now_fix (), frag_now);
1870 symbol_table_insert (pool->symbol);
1871
1872 for (entry = 0; entry < pool->next_free_entry; entry++)
1873 {
1874 expressionS * exp = & pool->literals[entry].exp;
1875
1876 if (exp->X_op == O_big)
1877 {
1878 /* PR 16688: Restore the global bignum value. */
1879 gas_assert (pool->literals[entry].bignum != NULL);
1880 memcpy (generic_bignum, pool->literals[entry].bignum,
1881 CHARS_PER_LITTLENUM * exp->X_add_number);
1882 }
1883
1884 /* First output the expression in the instruction to the pool. */
1885 emit_expr (exp, size); /* .word|.xword */
1886
1887 if (exp->X_op == O_big)
1888 {
1889 free (pool->literals[entry].bignum);
1890 pool->literals[entry].bignum = NULL;
1891 }
1892 }
1893
1894 /* Mark the pool as empty. */
1895 pool->next_free_entry = 0;
1896 pool->symbol = NULL;
1897 }
1898 }
1899
1900 #ifdef OBJ_ELF
1901 /* Forward declarations for functions below, in the MD interface
1902 section. */
1903 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1904 static struct reloc_table_entry * find_reloc_table_entry (char **);
1905
1906 /* Directives: Data. */
1907 /* N.B. the support for relocation suffix in this directive needs to be
1908 implemented properly. */
1909
1910 static void
1911 s_aarch64_elf_cons (int nbytes)
1912 {
1913 expressionS exp;
1914
1915 #ifdef md_flush_pending_output
1916 md_flush_pending_output ();
1917 #endif
1918
1919 if (is_it_end_of_statement ())
1920 {
1921 demand_empty_rest_of_line ();
1922 return;
1923 }
1924
1925 #ifdef md_cons_align
1926 md_cons_align (nbytes);
1927 #endif
1928
1929 mapping_state (MAP_DATA);
1930 do
1931 {
1932 struct reloc_table_entry *reloc;
1933
1934 expression (&exp);
1935
1936 if (exp.X_op != O_symbol)
1937 emit_expr (&exp, (unsigned int) nbytes);
1938 else
1939 {
1940 skip_past_char (&input_line_pointer, '#');
1941 if (skip_past_char (&input_line_pointer, ':'))
1942 {
1943 reloc = find_reloc_table_entry (&input_line_pointer);
1944 if (reloc == NULL)
1945 as_bad (_("unrecognized relocation suffix"));
1946 else
1947 as_bad (_("unimplemented relocation suffix"));
1948 ignore_rest_of_line ();
1949 return;
1950 }
1951 else
1952 emit_expr (&exp, (unsigned int) nbytes);
1953 }
1954 }
1955 while (*input_line_pointer++ == ',');
1956
1957 /* Put terminator back into stream. */
1958 input_line_pointer--;
1959 demand_empty_rest_of_line ();
1960 }
1961
1962 /* Mark symbol that it follows a variant PCS convention. */
1963
1964 static void
1965 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1966 {
1967 char *name;
1968 char c;
1969 symbolS *sym;
1970 asymbol *bfdsym;
1971 elf_symbol_type *elfsym;
1972
1973 c = get_symbol_name (&name);
1974 if (!*name)
1975 as_bad (_("Missing symbol name in directive"));
1976 sym = symbol_find_or_make (name);
1977 restore_line_pointer (c);
1978 demand_empty_rest_of_line ();
1979 bfdsym = symbol_get_bfdsym (sym);
1980 elfsym = elf_symbol_from (bfdsym);
1981 gas_assert (elfsym);
1982 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
1983 }
1984 #endif /* OBJ_ELF */
1985
1986 /* Output a 32-bit word, but mark as an instruction. */
1987
1988 static void
1989 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1990 {
1991 expressionS exp;
1992
1993 #ifdef md_flush_pending_output
1994 md_flush_pending_output ();
1995 #endif
1996
1997 if (is_it_end_of_statement ())
1998 {
1999 demand_empty_rest_of_line ();
2000 return;
2001 }
2002
2003 /* Sections are assumed to start aligned. In executable section, there is no
2004 MAP_DATA symbol pending. So we only align the address during
2005 MAP_DATA --> MAP_INSN transition.
2006 For other sections, this is not guaranteed. */
2007 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2008 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2009 frag_align_code (2, 0);
2010
2011 #ifdef OBJ_ELF
2012 mapping_state (MAP_INSN);
2013 #endif
2014
2015 do
2016 {
2017 expression (&exp);
2018 if (exp.X_op != O_constant)
2019 {
2020 as_bad (_("constant expression required"));
2021 ignore_rest_of_line ();
2022 return;
2023 }
2024
2025 if (target_big_endian)
2026 {
2027 unsigned int val = exp.X_add_number;
2028 exp.X_add_number = SWAP_32 (val);
2029 }
2030 emit_expr (&exp, 4);
2031 }
2032 while (*input_line_pointer++ == ',');
2033
2034 /* Put terminator back into stream. */
2035 input_line_pointer--;
2036 demand_empty_rest_of_line ();
2037 }
2038
2039 static void
2040 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2041 {
2042 demand_empty_rest_of_line ();
2043 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2044 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2045 }
2046
2047 #ifdef OBJ_ELF
2048 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2049
2050 static void
2051 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2052 {
2053 expressionS exp;
2054
2055 expression (&exp);
2056 frag_grow (4);
2057 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2058 BFD_RELOC_AARCH64_TLSDESC_ADD);
2059
2060 demand_empty_rest_of_line ();
2061 }
2062
2063 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2064
2065 static void
2066 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2067 {
2068 expressionS exp;
2069
2070 /* Since we're just labelling the code, there's no need to define a
2071 mapping symbol. */
2072 expression (&exp);
2073 /* Make sure there is enough room in this frag for the following
2074 blr. This trick only works if the blr follows immediately after
2075 the .tlsdesc directive. */
2076 frag_grow (4);
2077 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2078 BFD_RELOC_AARCH64_TLSDESC_CALL);
2079
2080 demand_empty_rest_of_line ();
2081 }
2082
2083 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2084
2085 static void
2086 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2087 {
2088 expressionS exp;
2089
2090 expression (&exp);
2091 frag_grow (4);
2092 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2093 BFD_RELOC_AARCH64_TLSDESC_LDR);
2094
2095 demand_empty_rest_of_line ();
2096 }
2097 #endif /* OBJ_ELF */
2098
2099 static void s_aarch64_arch (int);
2100 static void s_aarch64_cpu (int);
2101 static void s_aarch64_arch_extension (int);
2102
2103 /* This table describes all the machine specific pseudo-ops the assembler
2104 has to support. The fields are:
2105 pseudo-op name without dot
2106 function to call to execute this pseudo-op
2107 Integer arg to pass to the function. */
2108
2109 const pseudo_typeS md_pseudo_table[] = {
2110 /* Never called because '.req' does not start a line. */
2111 {"req", s_req, 0},
2112 {"unreq", s_unreq, 0},
2113 {"bss", s_bss, 0},
2114 {"even", s_even, 0},
2115 {"ltorg", s_ltorg, 0},
2116 {"pool", s_ltorg, 0},
2117 {"cpu", s_aarch64_cpu, 0},
2118 {"arch", s_aarch64_arch, 0},
2119 {"arch_extension", s_aarch64_arch_extension, 0},
2120 {"inst", s_aarch64_inst, 0},
2121 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2122 #ifdef OBJ_ELF
2123 {"tlsdescadd", s_tlsdescadd, 0},
2124 {"tlsdesccall", s_tlsdesccall, 0},
2125 {"tlsdescldr", s_tlsdescldr, 0},
2126 {"word", s_aarch64_elf_cons, 4},
2127 {"long", s_aarch64_elf_cons, 4},
2128 {"xword", s_aarch64_elf_cons, 8},
2129 {"dword", s_aarch64_elf_cons, 8},
2130 {"variant_pcs", s_variant_pcs, 0},
2131 #endif
2132 {"float16", float_cons, 'h'},
2133 {"bfloat16", float_cons, 'b'},
2134 {0, 0, 0}
2135 };
2136 \f
2137
2138 /* Check whether STR points to a register name followed by a comma or the
2139 end of line; REG_TYPE indicates which register types are checked
2140 against. Return TRUE if STR is such a register name; otherwise return
2141 FALSE. The function does not intend to produce any diagnostics, but since
2142 the register parser aarch64_reg_parse, which is called by this function,
2143 does produce diagnostics, we call clear_error to clear any diagnostics
2144 that may be generated by aarch64_reg_parse.
2145 Also, the function returns FALSE directly if there is any user error
2146 present at the function entry. This prevents the existing diagnostics
2147 state from being spoiled.
2148 The function currently serves parse_constant_immediate and
2149 parse_big_immediate only. */
2150 static bool
2151 reg_name_p (char *str, aarch64_reg_type reg_type)
2152 {
2153 int reg;
2154
2155 /* Prevent the diagnostics state from being spoiled. */
2156 if (error_p ())
2157 return false;
2158
2159 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2160
2161 /* Clear the parsing error that may be set by the reg parser. */
2162 clear_error ();
2163
2164 if (reg == PARSE_FAIL)
2165 return false;
2166
2167 skip_whitespace (str);
2168 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2169 return true;
2170
2171 return false;
2172 }
2173
2174 /* Parser functions used exclusively in instruction operands. */
2175
2176 /* Parse an immediate expression which may not be constant.
2177
2178 To prevent the expression parser from pushing a register name
2179 into the symbol table as an undefined symbol, firstly a check is
2180 done to find out whether STR is a register of type REG_TYPE followed
2181 by a comma or the end of line. Return FALSE if STR is such a string. */
2182
2183 static bool
2184 parse_immediate_expression (char **str, expressionS *exp,
2185 aarch64_reg_type reg_type)
2186 {
2187 if (reg_name_p (*str, reg_type))
2188 {
2189 set_recoverable_error (_("immediate operand required"));
2190 return false;
2191 }
2192
2193 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT,
2194 NORMAL_RESOLUTION);
2195
2196 if (exp->X_op == O_absent)
2197 {
2198 set_fatal_syntax_error (_("missing immediate expression"));
2199 return false;
2200 }
2201
2202 return true;
2203 }
2204
2205 /* Constant immediate-value read function for use in insn parsing.
2206 STR points to the beginning of the immediate (with the optional
2207 leading #); *VAL receives the value. REG_TYPE says which register
2208 names should be treated as registers rather than as symbolic immediates.
2209
2210 Return TRUE on success; otherwise return FALSE. */
2211
2212 static bool
2213 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2214 {
2215 expressionS exp;
2216
2217 if (! parse_immediate_expression (str, &exp, reg_type))
2218 return false;
2219
2220 if (exp.X_op != O_constant)
2221 {
2222 set_syntax_error (_("constant expression required"));
2223 return false;
2224 }
2225
2226 *val = exp.X_add_number;
2227 return true;
2228 }
2229
2230 static uint32_t
2231 encode_imm_float_bits (uint32_t imm)
2232 {
2233 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2234 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2235 }
2236
2237 /* Return TRUE if the single-precision floating-point value encoded in IMM
2238 can be expressed in the AArch64 8-bit signed floating-point format with
2239 3-bit exponent and normalized 4 bits of precision; in other words, the
2240 floating-point value must be expressable as
2241 (+/-) n / 16 * power (2, r)
2242 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2243
2244 static bool
2245 aarch64_imm_float_p (uint32_t imm)
2246 {
2247 /* If a single-precision floating-point value has the following bit
2248 pattern, it can be expressed in the AArch64 8-bit floating-point
2249 format:
2250
2251 3 32222222 2221111111111
2252 1 09876543 21098765432109876543210
2253 n Eeeeeexx xxxx0000000000000000000
2254
2255 where n, e and each x are either 0 or 1 independently, with
2256 E == ~ e. */
2257
2258 uint32_t pattern;
2259
2260 /* Prepare the pattern for 'Eeeeee'. */
2261 if (((imm >> 30) & 0x1) == 0)
2262 pattern = 0x3e000000;
2263 else
2264 pattern = 0x40000000;
2265
2266 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2267 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2268 }
2269
2270 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2271 as an IEEE float without any loss of precision. Store the value in
2272 *FPWORD if so. */
2273
2274 static bool
2275 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2276 {
2277 /* If a double-precision floating-point value has the following bit
2278 pattern, it can be expressed in a float:
2279
2280 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2281 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2282 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2283
2284 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2285 if Eeee_eeee != 1111_1111
2286
2287 where n, e, s and S are either 0 or 1 independently and where ~ is the
2288 inverse of E. */
2289
2290 uint32_t pattern;
2291 uint32_t high32 = imm >> 32;
2292 uint32_t low32 = imm;
2293
2294 /* Lower 29 bits need to be 0s. */
2295 if ((imm & 0x1fffffff) != 0)
2296 return false;
2297
2298 /* Prepare the pattern for 'Eeeeeeeee'. */
2299 if (((high32 >> 30) & 0x1) == 0)
2300 pattern = 0x38000000;
2301 else
2302 pattern = 0x40000000;
2303
2304 /* Check E~~~. */
2305 if ((high32 & 0x78000000) != pattern)
2306 return false;
2307
2308 /* Check Eeee_eeee != 1111_1111. */
2309 if ((high32 & 0x7ff00000) == 0x47f00000)
2310 return false;
2311
2312 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2313 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2314 | (low32 >> 29)); /* 3 S bits. */
2315 return true;
2316 }
2317
2318 /* Return true if we should treat OPERAND as a double-precision
2319 floating-point operand rather than a single-precision one. */
2320 static bool
2321 double_precision_operand_p (const aarch64_opnd_info *operand)
2322 {
2323 /* Check for unsuffixed SVE registers, which are allowed
2324 for LDR and STR but not in instructions that require an
2325 immediate. We get better error messages if we arbitrarily
2326 pick one size, parse the immediate normally, and then
2327 report the match failure in the normal way. */
2328 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2329 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2330 }
2331
2332 /* Parse a floating-point immediate. Return TRUE on success and return the
2333 value in *IMMED in the format of IEEE754 single-precision encoding.
2334 *CCP points to the start of the string; DP_P is TRUE when the immediate
2335 is expected to be in double-precision (N.B. this only matters when
2336 hexadecimal representation is involved). REG_TYPE says which register
2337 names should be treated as registers rather than as symbolic immediates.
2338
2339 This routine accepts any IEEE float; it is up to the callers to reject
2340 invalid ones. */
2341
2342 static bool
2343 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2344 aarch64_reg_type reg_type)
2345 {
2346 char *str = *ccp;
2347 char *fpnum;
2348 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2349 int64_t val = 0;
2350 unsigned fpword = 0;
2351 bool hex_p = false;
2352
2353 skip_past_char (&str, '#');
2354
2355 fpnum = str;
2356 skip_whitespace (fpnum);
2357
2358 if (startswith (fpnum, "0x"))
2359 {
2360 /* Support the hexadecimal representation of the IEEE754 encoding.
2361 Double-precision is expected when DP_P is TRUE, otherwise the
2362 representation should be in single-precision. */
2363 if (! parse_constant_immediate (&str, &val, reg_type))
2364 goto invalid_fp;
2365
2366 if (dp_p)
2367 {
2368 if (!can_convert_double_to_float (val, &fpword))
2369 goto invalid_fp;
2370 }
2371 else if ((uint64_t) val > 0xffffffff)
2372 goto invalid_fp;
2373 else
2374 fpword = val;
2375
2376 hex_p = true;
2377 }
2378 else if (reg_name_p (str, reg_type))
2379 {
2380 set_recoverable_error (_("immediate operand required"));
2381 return false;
2382 }
2383
2384 if (! hex_p)
2385 {
2386 int i;
2387
2388 if ((str = atof_ieee (str, 's', words)) == NULL)
2389 goto invalid_fp;
2390
2391 /* Our FP word must be 32 bits (single-precision FP). */
2392 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2393 {
2394 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2395 fpword |= words[i];
2396 }
2397 }
2398
2399 *immed = fpword;
2400 *ccp = str;
2401 return true;
2402
2403 invalid_fp:
2404 set_fatal_syntax_error (_("invalid floating-point constant"));
2405 return false;
2406 }
2407
2408 /* Less-generic immediate-value read function with the possibility of loading
2409 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2410 instructions.
2411
2412 To prevent the expression parser from pushing a register name into the
2413 symbol table as an undefined symbol, a check is firstly done to find
2414 out whether STR is a register of type REG_TYPE followed by a comma or
2415 the end of line. Return FALSE if STR is such a register. */
2416
2417 static bool
2418 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2419 {
2420 char *ptr = *str;
2421
2422 if (reg_name_p (ptr, reg_type))
2423 {
2424 set_syntax_error (_("immediate operand required"));
2425 return false;
2426 }
2427
2428 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT,
2429 NORMAL_RESOLUTION);
2430
2431 if (inst.reloc.exp.X_op == O_constant)
2432 *imm = inst.reloc.exp.X_add_number;
2433
2434 *str = ptr;
2435
2436 return true;
2437 }
2438
2439 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2440 if NEED_LIBOPCODES is non-zero, the fixup will need
2441 assistance from the libopcodes. */
2442
2443 static inline void
2444 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2445 const aarch64_opnd_info *operand,
2446 int need_libopcodes_p)
2447 {
2448 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2449 reloc->opnd = operand->type;
2450 if (need_libopcodes_p)
2451 reloc->need_libopcodes_p = 1;
2452 };
2453
2454 /* Return TRUE if the instruction needs to be fixed up later internally by
2455 the GAS; otherwise return FALSE. */
2456
2457 static inline bool
2458 aarch64_gas_internal_fixup_p (void)
2459 {
2460 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2461 }
2462
2463 /* Assign the immediate value to the relevant field in *OPERAND if
2464 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2465 needs an internal fixup in a later stage.
2466 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2467 IMM.VALUE that may get assigned with the constant. */
2468 static inline void
2469 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2470 aarch64_opnd_info *operand,
2471 int addr_off_p,
2472 int need_libopcodes_p,
2473 int skip_p)
2474 {
2475 if (reloc->exp.X_op == O_constant)
2476 {
2477 if (addr_off_p)
2478 operand->addr.offset.imm = reloc->exp.X_add_number;
2479 else
2480 operand->imm.value = reloc->exp.X_add_number;
2481 reloc->type = BFD_RELOC_UNUSED;
2482 }
2483 else
2484 {
2485 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2486 /* Tell libopcodes to ignore this operand or not. This is helpful
2487 when one of the operands needs to be fixed up later but we need
2488 libopcodes to check the other operands. */
2489 operand->skip = skip_p;
2490 }
2491 }
2492
2493 /* Relocation modifiers. Each entry in the table contains the textual
2494 name for the relocation which may be placed before a symbol used as
2495 a load/store offset, or add immediate. It must be surrounded by a
2496 leading and trailing colon, for example:
2497
2498 ldr x0, [x1, #:rello:varsym]
2499 add x0, x1, #:rello:varsym */
2500
2501 struct reloc_table_entry
2502 {
2503 const char *name;
2504 int pc_rel;
2505 bfd_reloc_code_real_type adr_type;
2506 bfd_reloc_code_real_type adrp_type;
2507 bfd_reloc_code_real_type movw_type;
2508 bfd_reloc_code_real_type add_type;
2509 bfd_reloc_code_real_type ldst_type;
2510 bfd_reloc_code_real_type ld_literal_type;
2511 };
2512
2513 static struct reloc_table_entry reloc_table[] =
2514 {
2515 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2516 {"lo12", 0,
2517 0, /* adr_type */
2518 0,
2519 0,
2520 BFD_RELOC_AARCH64_ADD_LO12,
2521 BFD_RELOC_AARCH64_LDST_LO12,
2522 0},
2523
2524 /* Higher 21 bits of pc-relative page offset: ADRP */
2525 {"pg_hi21", 1,
2526 0, /* adr_type */
2527 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2528 0,
2529 0,
2530 0,
2531 0},
2532
2533 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2534 {"pg_hi21_nc", 1,
2535 0, /* adr_type */
2536 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2537 0,
2538 0,
2539 0,
2540 0},
2541
2542 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2543 {"abs_g0", 0,
2544 0, /* adr_type */
2545 0,
2546 BFD_RELOC_AARCH64_MOVW_G0,
2547 0,
2548 0,
2549 0},
2550
2551 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2552 {"abs_g0_s", 0,
2553 0, /* adr_type */
2554 0,
2555 BFD_RELOC_AARCH64_MOVW_G0_S,
2556 0,
2557 0,
2558 0},
2559
2560 /* Less significant bits 0-15 of address/value: MOVK, no check */
2561 {"abs_g0_nc", 0,
2562 0, /* adr_type */
2563 0,
2564 BFD_RELOC_AARCH64_MOVW_G0_NC,
2565 0,
2566 0,
2567 0},
2568
2569 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2570 {"abs_g1", 0,
2571 0, /* adr_type */
2572 0,
2573 BFD_RELOC_AARCH64_MOVW_G1,
2574 0,
2575 0,
2576 0},
2577
2578 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2579 {"abs_g1_s", 0,
2580 0, /* adr_type */
2581 0,
2582 BFD_RELOC_AARCH64_MOVW_G1_S,
2583 0,
2584 0,
2585 0},
2586
2587 /* Less significant bits 16-31 of address/value: MOVK, no check */
2588 {"abs_g1_nc", 0,
2589 0, /* adr_type */
2590 0,
2591 BFD_RELOC_AARCH64_MOVW_G1_NC,
2592 0,
2593 0,
2594 0},
2595
2596 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2597 {"abs_g2", 0,
2598 0, /* adr_type */
2599 0,
2600 BFD_RELOC_AARCH64_MOVW_G2,
2601 0,
2602 0,
2603 0},
2604
2605 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2606 {"abs_g2_s", 0,
2607 0, /* adr_type */
2608 0,
2609 BFD_RELOC_AARCH64_MOVW_G2_S,
2610 0,
2611 0,
2612 0},
2613
2614 /* Less significant bits 32-47 of address/value: MOVK, no check */
2615 {"abs_g2_nc", 0,
2616 0, /* adr_type */
2617 0,
2618 BFD_RELOC_AARCH64_MOVW_G2_NC,
2619 0,
2620 0,
2621 0},
2622
2623 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2624 {"abs_g3", 0,
2625 0, /* adr_type */
2626 0,
2627 BFD_RELOC_AARCH64_MOVW_G3,
2628 0,
2629 0,
2630 0},
2631
2632 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2633 {"prel_g0", 1,
2634 0, /* adr_type */
2635 0,
2636 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2637 0,
2638 0,
2639 0},
2640
2641 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2642 {"prel_g0_nc", 1,
2643 0, /* adr_type */
2644 0,
2645 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2646 0,
2647 0,
2648 0},
2649
2650 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2651 {"prel_g1", 1,
2652 0, /* adr_type */
2653 0,
2654 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2655 0,
2656 0,
2657 0},
2658
2659 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2660 {"prel_g1_nc", 1,
2661 0, /* adr_type */
2662 0,
2663 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2664 0,
2665 0,
2666 0},
2667
2668 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2669 {"prel_g2", 1,
2670 0, /* adr_type */
2671 0,
2672 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2673 0,
2674 0,
2675 0},
2676
2677 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2678 {"prel_g2_nc", 1,
2679 0, /* adr_type */
2680 0,
2681 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2682 0,
2683 0,
2684 0},
2685
2686 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2687 {"prel_g3", 1,
2688 0, /* adr_type */
2689 0,
2690 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2691 0,
2692 0,
2693 0},
2694
2695 /* Get to the page containing GOT entry for a symbol. */
2696 {"got", 1,
2697 0, /* adr_type */
2698 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2699 0,
2700 0,
2701 0,
2702 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2703
2704 /* 12 bit offset into the page containing GOT entry for that symbol. */
2705 {"got_lo12", 0,
2706 0, /* adr_type */
2707 0,
2708 0,
2709 0,
2710 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2711 0},
2712
2713 /* 0-15 bits of address/value: MOVk, no check. */
2714 {"gotoff_g0_nc", 0,
2715 0, /* adr_type */
2716 0,
2717 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2718 0,
2719 0,
2720 0},
2721
2722 /* Most significant bits 16-31 of address/value: MOVZ. */
2723 {"gotoff_g1", 0,
2724 0, /* adr_type */
2725 0,
2726 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2727 0,
2728 0,
2729 0},
2730
2731 /* 15 bit offset into the page containing GOT entry for that symbol. */
2732 {"gotoff_lo15", 0,
2733 0, /* adr_type */
2734 0,
2735 0,
2736 0,
2737 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2738 0},
2739
2740 /* Get to the page containing GOT TLS entry for a symbol */
2741 {"gottprel_g0_nc", 0,
2742 0, /* adr_type */
2743 0,
2744 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2745 0,
2746 0,
2747 0},
2748
2749 /* Get to the page containing GOT TLS entry for a symbol */
2750 {"gottprel_g1", 0,
2751 0, /* adr_type */
2752 0,
2753 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2754 0,
2755 0,
2756 0},
2757
2758 /* Get to the page containing GOT TLS entry for a symbol */
2759 {"tlsgd", 0,
2760 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2761 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2762 0,
2763 0,
2764 0,
2765 0},
2766
2767 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2768 {"tlsgd_lo12", 0,
2769 0, /* adr_type */
2770 0,
2771 0,
2772 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2773 0,
2774 0},
2775
2776 /* Lower 16 bits address/value: MOVk. */
2777 {"tlsgd_g0_nc", 0,
2778 0, /* adr_type */
2779 0,
2780 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2781 0,
2782 0,
2783 0},
2784
2785 /* Most significant bits 16-31 of address/value: MOVZ. */
2786 {"tlsgd_g1", 0,
2787 0, /* adr_type */
2788 0,
2789 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2790 0,
2791 0,
2792 0},
2793
2794 /* Get to the page containing GOT TLS entry for a symbol */
2795 {"tlsdesc", 0,
2796 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2797 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2798 0,
2799 0,
2800 0,
2801 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2802
2803 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2804 {"tlsdesc_lo12", 0,
2805 0, /* adr_type */
2806 0,
2807 0,
2808 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2809 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2810 0},
2811
2812 /* Get to the page containing GOT TLS entry for a symbol.
2813 The same as GD, we allocate two consecutive GOT slots
2814 for module index and module offset, the only difference
2815 with GD is the module offset should be initialized to
2816 zero without any outstanding runtime relocation. */
2817 {"tlsldm", 0,
2818 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2819 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2820 0,
2821 0,
2822 0,
2823 0},
2824
2825 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2826 {"tlsldm_lo12_nc", 0,
2827 0, /* adr_type */
2828 0,
2829 0,
2830 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2831 0,
2832 0},
2833
2834 /* 12 bit offset into the module TLS base address. */
2835 {"dtprel_lo12", 0,
2836 0, /* adr_type */
2837 0,
2838 0,
2839 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2840 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2841 0},
2842
2843 /* Same as dtprel_lo12, no overflow check. */
2844 {"dtprel_lo12_nc", 0,
2845 0, /* adr_type */
2846 0,
2847 0,
2848 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2849 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2850 0},
2851
2852 /* bits[23:12] of offset to the module TLS base address. */
2853 {"dtprel_hi12", 0,
2854 0, /* adr_type */
2855 0,
2856 0,
2857 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2858 0,
2859 0},
2860
2861 /* bits[15:0] of offset to the module TLS base address. */
2862 {"dtprel_g0", 0,
2863 0, /* adr_type */
2864 0,
2865 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2866 0,
2867 0,
2868 0},
2869
2870 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2871 {"dtprel_g0_nc", 0,
2872 0, /* adr_type */
2873 0,
2874 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2875 0,
2876 0,
2877 0},
2878
2879 /* bits[31:16] of offset to the module TLS base address. */
2880 {"dtprel_g1", 0,
2881 0, /* adr_type */
2882 0,
2883 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2884 0,
2885 0,
2886 0},
2887
2888 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2889 {"dtprel_g1_nc", 0,
2890 0, /* adr_type */
2891 0,
2892 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2893 0,
2894 0,
2895 0},
2896
2897 /* bits[47:32] of offset to the module TLS base address. */
2898 {"dtprel_g2", 0,
2899 0, /* adr_type */
2900 0,
2901 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2902 0,
2903 0,
2904 0},
2905
2906 /* Lower 16 bit offset into GOT entry for a symbol */
2907 {"tlsdesc_off_g0_nc", 0,
2908 0, /* adr_type */
2909 0,
2910 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2911 0,
2912 0,
2913 0},
2914
2915 /* Higher 16 bit offset into GOT entry for a symbol */
2916 {"tlsdesc_off_g1", 0,
2917 0, /* adr_type */
2918 0,
2919 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2920 0,
2921 0,
2922 0},
2923
2924 /* Get to the page containing GOT TLS entry for a symbol */
2925 {"gottprel", 0,
2926 0, /* adr_type */
2927 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2928 0,
2929 0,
2930 0,
2931 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2932
2933 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2934 {"gottprel_lo12", 0,
2935 0, /* adr_type */
2936 0,
2937 0,
2938 0,
2939 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2940 0},
2941
2942 /* Get tp offset for a symbol. */
2943 {"tprel", 0,
2944 0, /* adr_type */
2945 0,
2946 0,
2947 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2948 0,
2949 0},
2950
2951 /* Get tp offset for a symbol. */
2952 {"tprel_lo12", 0,
2953 0, /* adr_type */
2954 0,
2955 0,
2956 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2957 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2958 0},
2959
2960 /* Get tp offset for a symbol. */
2961 {"tprel_hi12", 0,
2962 0, /* adr_type */
2963 0,
2964 0,
2965 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2966 0,
2967 0},
2968
2969 /* Get tp offset for a symbol. */
2970 {"tprel_lo12_nc", 0,
2971 0, /* adr_type */
2972 0,
2973 0,
2974 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2975 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2976 0},
2977
2978 /* Most significant bits 32-47 of address/value: MOVZ. */
2979 {"tprel_g2", 0,
2980 0, /* adr_type */
2981 0,
2982 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2983 0,
2984 0,
2985 0},
2986
2987 /* Most significant bits 16-31 of address/value: MOVZ. */
2988 {"tprel_g1", 0,
2989 0, /* adr_type */
2990 0,
2991 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2992 0,
2993 0,
2994 0},
2995
2996 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2997 {"tprel_g1_nc", 0,
2998 0, /* adr_type */
2999 0,
3000 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3001 0,
3002 0,
3003 0},
3004
3005 /* Most significant bits 0-15 of address/value: MOVZ. */
3006 {"tprel_g0", 0,
3007 0, /* adr_type */
3008 0,
3009 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3010 0,
3011 0,
3012 0},
3013
3014 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3015 {"tprel_g0_nc", 0,
3016 0, /* adr_type */
3017 0,
3018 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3019 0,
3020 0,
3021 0},
3022
3023 /* 15bit offset from got entry to base address of GOT table. */
3024 {"gotpage_lo15", 0,
3025 0,
3026 0,
3027 0,
3028 0,
3029 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3030 0},
3031
3032 /* 14bit offset from got entry to base address of GOT table. */
3033 {"gotpage_lo14", 0,
3034 0,
3035 0,
3036 0,
3037 0,
3038 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3039 0},
3040 };
3041
3042 /* Given the address of a pointer pointing to the textual name of a
3043 relocation as may appear in assembler source, attempt to find its
3044 details in reloc_table. The pointer will be updated to the character
3045 after the trailing colon. On failure, NULL will be returned;
3046 otherwise return the reloc_table_entry. */
3047
3048 static struct reloc_table_entry *
3049 find_reloc_table_entry (char **str)
3050 {
3051 unsigned int i;
3052 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3053 {
3054 int length = strlen (reloc_table[i].name);
3055
3056 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3057 && (*str)[length] == ':')
3058 {
3059 *str += (length + 1);
3060 return &reloc_table[i];
3061 }
3062 }
3063
3064 return NULL;
3065 }
3066
3067 /* Returns 0 if the relocation should never be forced,
3068 1 if the relocation must be forced, and -1 if either
3069 result is OK. */
3070
3071 static signed int
3072 aarch64_force_reloc (unsigned int type)
3073 {
3074 switch (type)
3075 {
3076 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3077 /* Perform these "immediate" internal relocations
3078 even if the symbol is extern or weak. */
3079 return 0;
3080
3081 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3082 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3083 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3084 /* Pseudo relocs that need to be fixed up according to
3085 ilp32_p. */
3086 return 0;
3087
3088 case BFD_RELOC_AARCH64_ADD_LO12:
3089 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3090 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3091 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3092 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3093 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3094 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3095 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3096 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3097 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3098 case BFD_RELOC_AARCH64_LDST128_LO12:
3099 case BFD_RELOC_AARCH64_LDST16_LO12:
3100 case BFD_RELOC_AARCH64_LDST32_LO12:
3101 case BFD_RELOC_AARCH64_LDST64_LO12:
3102 case BFD_RELOC_AARCH64_LDST8_LO12:
3103 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3104 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3105 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3106 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3107 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3108 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3109 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3110 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3111 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3112 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3113 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3114 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3115 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3116 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3117 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3118 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3119 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3120 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3121 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3122 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3123 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3124 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3125 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3126 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3127 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3128 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3129 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3130 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3131 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3132 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3133 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3134 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3135 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3136 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3137 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3138 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3139 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3140 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3141 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3142 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3143 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3144 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3145 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3146 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3147 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3148 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3149 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3150 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3151 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3152 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3153 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3154 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3155 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3156 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3157 /* Always leave these relocations for the linker. */
3158 return 1;
3159
3160 default:
3161 return -1;
3162 }
3163 }
3164
3165 int
3166 aarch64_force_relocation (struct fix *fixp)
3167 {
3168 int res = aarch64_force_reloc (fixp->fx_r_type);
3169
3170 if (res == -1)
3171 return generic_force_reloc (fixp);
3172 return res;
3173 }
3174
3175 /* Mode argument to parse_shift and parser_shifter_operand. */
3176 enum parse_shift_mode
3177 {
3178 SHIFTED_NONE, /* no shifter allowed */
3179 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3180 "#imm{,lsl #n}" */
3181 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3182 "#imm" */
3183 SHIFTED_LSL, /* bare "lsl #n" */
3184 SHIFTED_MUL, /* bare "mul #n" */
3185 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3186 SHIFTED_MUL_VL, /* "mul vl" */
3187 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3188 };
3189
3190 /* Parse a <shift> operator on an AArch64 data processing instruction.
3191 Return TRUE on success; otherwise return FALSE. */
3192 static bool
3193 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3194 {
3195 const struct aarch64_name_value_pair *shift_op;
3196 enum aarch64_modifier_kind kind;
3197 expressionS exp;
3198 int exp_has_prefix;
3199 char *s = *str;
3200 char *p = s;
3201
3202 for (p = *str; ISALPHA (*p); p++)
3203 ;
3204
3205 if (p == *str)
3206 {
3207 set_syntax_error (_("shift expression expected"));
3208 return false;
3209 }
3210
3211 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3212
3213 if (shift_op == NULL)
3214 {
3215 set_syntax_error (_("shift operator expected"));
3216 return false;
3217 }
3218
3219 kind = aarch64_get_operand_modifier (shift_op);
3220
3221 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3222 {
3223 set_syntax_error (_("invalid use of 'MSL'"));
3224 return false;
3225 }
3226
3227 if (kind == AARCH64_MOD_MUL
3228 && mode != SHIFTED_MUL
3229 && mode != SHIFTED_MUL_VL)
3230 {
3231 set_syntax_error (_("invalid use of 'MUL'"));
3232 return false;
3233 }
3234
3235 switch (mode)
3236 {
3237 case SHIFTED_LOGIC_IMM:
3238 if (aarch64_extend_operator_p (kind))
3239 {
3240 set_syntax_error (_("extending shift is not permitted"));
3241 return false;
3242 }
3243 break;
3244
3245 case SHIFTED_ARITH_IMM:
3246 if (kind == AARCH64_MOD_ROR)
3247 {
3248 set_syntax_error (_("'ROR' shift is not permitted"));
3249 return false;
3250 }
3251 break;
3252
3253 case SHIFTED_LSL:
3254 if (kind != AARCH64_MOD_LSL)
3255 {
3256 set_syntax_error (_("only 'LSL' shift is permitted"));
3257 return false;
3258 }
3259 break;
3260
3261 case SHIFTED_MUL:
3262 if (kind != AARCH64_MOD_MUL)
3263 {
3264 set_syntax_error (_("only 'MUL' is permitted"));
3265 return false;
3266 }
3267 break;
3268
3269 case SHIFTED_MUL_VL:
3270 /* "MUL VL" consists of two separate tokens. Require the first
3271 token to be "MUL" and look for a following "VL". */
3272 if (kind == AARCH64_MOD_MUL)
3273 {
3274 skip_whitespace (p);
3275 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3276 {
3277 p += 2;
3278 kind = AARCH64_MOD_MUL_VL;
3279 break;
3280 }
3281 }
3282 set_syntax_error (_("only 'MUL VL' is permitted"));
3283 return false;
3284
3285 case SHIFTED_REG_OFFSET:
3286 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3287 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3288 {
3289 set_fatal_syntax_error
3290 (_("invalid shift for the register offset addressing mode"));
3291 return false;
3292 }
3293 break;
3294
3295 case SHIFTED_LSL_MSL:
3296 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3297 {
3298 set_syntax_error (_("invalid shift operator"));
3299 return false;
3300 }
3301 break;
3302
3303 default:
3304 abort ();
3305 }
3306
3307 /* Whitespace can appear here if the next thing is a bare digit. */
3308 skip_whitespace (p);
3309
3310 /* Parse shift amount. */
3311 exp_has_prefix = 0;
3312 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3313 exp.X_op = O_absent;
3314 else
3315 {
3316 if (is_immediate_prefix (*p))
3317 {
3318 p++;
3319 exp_has_prefix = 1;
3320 }
3321 (void) aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT,
3322 NORMAL_RESOLUTION);
3323 }
3324 if (kind == AARCH64_MOD_MUL_VL)
3325 /* For consistency, give MUL VL the same shift amount as an implicit
3326 MUL #1. */
3327 operand->shifter.amount = 1;
3328 else if (exp.X_op == O_absent)
3329 {
3330 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3331 {
3332 set_syntax_error (_("missing shift amount"));
3333 return false;
3334 }
3335 operand->shifter.amount = 0;
3336 }
3337 else if (exp.X_op != O_constant)
3338 {
3339 set_syntax_error (_("constant shift amount required"));
3340 return false;
3341 }
3342 /* For parsing purposes, MUL #n has no inherent range. The range
3343 depends on the operand and will be checked by operand-specific
3344 routines. */
3345 else if (kind != AARCH64_MOD_MUL
3346 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3347 {
3348 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3349 return false;
3350 }
3351 else
3352 {
3353 operand->shifter.amount = exp.X_add_number;
3354 operand->shifter.amount_present = 1;
3355 }
3356
3357 operand->shifter.operator_present = 1;
3358 operand->shifter.kind = kind;
3359
3360 *str = p;
3361 return true;
3362 }
3363
3364 /* Parse a <shifter_operand> for a data processing instruction:
3365
3366 #<immediate>
3367 #<immediate>, LSL #imm
3368
3369 Validation of immediate operands is deferred to md_apply_fix.
3370
3371 Return TRUE on success; otherwise return FALSE. */
3372
3373 static bool
3374 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3375 enum parse_shift_mode mode)
3376 {
3377 char *p;
3378
3379 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3380 return false;
3381
3382 p = *str;
3383
3384 /* Accept an immediate expression. */
3385 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3386 REJECT_ABSENT, NORMAL_RESOLUTION))
3387 return false;
3388
3389 /* Accept optional LSL for arithmetic immediate values. */
3390 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3391 if (! parse_shift (&p, operand, SHIFTED_LSL))
3392 return false;
3393
3394 /* Not accept any shifter for logical immediate values. */
3395 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3396 && parse_shift (&p, operand, mode))
3397 {
3398 set_syntax_error (_("unexpected shift operator"));
3399 return false;
3400 }
3401
3402 *str = p;
3403 return true;
3404 }
3405
3406 /* Parse a <shifter_operand> for a data processing instruction:
3407
3408 <Rm>
3409 <Rm>, <shift>
3410 #<immediate>
3411 #<immediate>, LSL #imm
3412
3413 where <shift> is handled by parse_shift above, and the last two
3414 cases are handled by the function above.
3415
3416 Validation of immediate operands is deferred to md_apply_fix.
3417
3418 Return TRUE on success; otherwise return FALSE. */
3419
3420 static bool
3421 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3422 enum parse_shift_mode mode)
3423 {
3424 const reg_entry *reg;
3425 aarch64_opnd_qualifier_t qualifier;
3426 enum aarch64_operand_class opd_class
3427 = aarch64_get_operand_class (operand->type);
3428
3429 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3430 if (reg)
3431 {
3432 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3433 {
3434 set_syntax_error (_("unexpected register in the immediate operand"));
3435 return false;
3436 }
3437
3438 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3439 {
3440 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3441 return false;
3442 }
3443
3444 operand->reg.regno = reg->number;
3445 operand->qualifier = qualifier;
3446
3447 /* Accept optional shift operation on register. */
3448 if (! skip_past_comma (str))
3449 return true;
3450
3451 if (! parse_shift (str, operand, mode))
3452 return false;
3453
3454 return true;
3455 }
3456 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3457 {
3458 set_syntax_error
3459 (_("integer register expected in the extended/shifted operand "
3460 "register"));
3461 return false;
3462 }
3463
3464 /* We have a shifted immediate variable. */
3465 return parse_shifter_operand_imm (str, operand, mode);
3466 }
3467
3468 /* Return TRUE on success; return FALSE otherwise. */
3469
3470 static bool
3471 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3472 enum parse_shift_mode mode)
3473 {
3474 char *p = *str;
3475
3476 /* Determine if we have the sequence of characters #: or just :
3477 coming next. If we do, then we check for a :rello: relocation
3478 modifier. If we don't, punt the whole lot to
3479 parse_shifter_operand. */
3480
3481 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3482 {
3483 struct reloc_table_entry *entry;
3484
3485 if (p[0] == '#')
3486 p += 2;
3487 else
3488 p++;
3489 *str = p;
3490
3491 /* Try to parse a relocation. Anything else is an error. */
3492 if (!(entry = find_reloc_table_entry (str)))
3493 {
3494 set_syntax_error (_("unknown relocation modifier"));
3495 return false;
3496 }
3497
3498 if (entry->add_type == 0)
3499 {
3500 set_syntax_error
3501 (_("this relocation modifier is not allowed on this instruction"));
3502 return false;
3503 }
3504
3505 /* Save str before we decompose it. */
3506 p = *str;
3507
3508 /* Next, we parse the expression. */
3509 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3510 REJECT_ABSENT,
3511 aarch64_force_reloc (entry->add_type) == 1))
3512 return false;
3513
3514 /* Record the relocation type (use the ADD variant here). */
3515 inst.reloc.type = entry->add_type;
3516 inst.reloc.pc_rel = entry->pc_rel;
3517
3518 /* If str is empty, we've reached the end, stop here. */
3519 if (**str == '\0')
3520 return true;
3521
3522 /* Otherwise, we have a shifted reloc modifier, so rewind to
3523 recover the variable name and continue parsing for the shifter. */
3524 *str = p;
3525 return parse_shifter_operand_imm (str, operand, mode);
3526 }
3527
3528 return parse_shifter_operand (str, operand, mode);
3529 }
3530
3531 /* Parse all forms of an address expression. Information is written
3532 to *OPERAND and/or inst.reloc.
3533
3534 The A64 instruction set has the following addressing modes:
3535
3536 Offset
3537 [base] // in SIMD ld/st structure
3538 [base{,#0}] // in ld/st exclusive
3539 [base{,#imm}]
3540 [base,Xm{,LSL #imm}]
3541 [base,Xm,SXTX {#imm}]
3542 [base,Wm,(S|U)XTW {#imm}]
3543 Pre-indexed
3544 [base]! // in ldraa/ldrab exclusive
3545 [base,#imm]!
3546 Post-indexed
3547 [base],#imm
3548 [base],Xm // in SIMD ld/st structure
3549 PC-relative (literal)
3550 label
3551 SVE:
3552 [base,#imm,MUL VL]
3553 [base,Zm.D{,LSL #imm}]
3554 [base,Zm.S,(S|U)XTW {#imm}]
3555 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3556 [Zn.S,#imm]
3557 [Zn.D,#imm]
3558 [Zn.S{, Xm}]
3559 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3560 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3561 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3562
3563 (As a convenience, the notation "=immediate" is permitted in conjunction
3564 with the pc-relative literal load instructions to automatically place an
3565 immediate value or symbolic address in a nearby literal pool and generate
3566 a hidden label which references it.)
3567
3568 Upon a successful parsing, the address structure in *OPERAND will be
3569 filled in the following way:
3570
3571 .base_regno = <base>
3572 .offset.is_reg // 1 if the offset is a register
3573 .offset.imm = <imm>
3574 .offset.regno = <Rm>
3575
3576 For different addressing modes defined in the A64 ISA:
3577
3578 Offset
3579 .pcrel=0; .preind=1; .postind=0; .writeback=0
3580 Pre-indexed
3581 .pcrel=0; .preind=1; .postind=0; .writeback=1
3582 Post-indexed
3583 .pcrel=0; .preind=0; .postind=1; .writeback=1
3584 PC-relative (literal)
3585 .pcrel=1; .preind=1; .postind=0; .writeback=0
3586
3587 The shift/extension information, if any, will be stored in .shifter.
3588 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3589 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3590 corresponding register.
3591
3592 BASE_TYPE says which types of base register should be accepted and
3593 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3594 is the type of shifter that is allowed for immediate offsets,
3595 or SHIFTED_NONE if none.
3596
3597 In all other respects, it is the caller's responsibility to check
3598 for addressing modes not supported by the instruction, and to set
3599 inst.reloc.type. */
3600
3601 static bool
3602 parse_address_main (char **str, aarch64_opnd_info *operand,
3603 aarch64_opnd_qualifier_t *base_qualifier,
3604 aarch64_opnd_qualifier_t *offset_qualifier,
3605 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3606 enum parse_shift_mode imm_shift_mode)
3607 {
3608 char *p = *str;
3609 const reg_entry *reg;
3610 expressionS *exp = &inst.reloc.exp;
3611
3612 *base_qualifier = AARCH64_OPND_QLF_NIL;
3613 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3614 if (! skip_past_char (&p, '['))
3615 {
3616 /* =immediate or label. */
3617 operand->addr.pcrel = 1;
3618 operand->addr.preind = 1;
3619
3620 /* #:<reloc_op>:<symbol> */
3621 skip_past_char (&p, '#');
3622 if (skip_past_char (&p, ':'))
3623 {
3624 bfd_reloc_code_real_type ty;
3625 struct reloc_table_entry *entry;
3626
3627 /* Try to parse a relocation modifier. Anything else is
3628 an error. */
3629 entry = find_reloc_table_entry (&p);
3630 if (! entry)
3631 {
3632 set_syntax_error (_("unknown relocation modifier"));
3633 return false;
3634 }
3635
3636 switch (operand->type)
3637 {
3638 case AARCH64_OPND_ADDR_PCREL21:
3639 /* adr */
3640 ty = entry->adr_type;
3641 break;
3642
3643 default:
3644 ty = entry->ld_literal_type;
3645 break;
3646 }
3647
3648 if (ty == 0)
3649 {
3650 set_syntax_error
3651 (_("this relocation modifier is not allowed on this "
3652 "instruction"));
3653 return false;
3654 }
3655
3656 /* #:<reloc_op>: */
3657 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3658 aarch64_force_reloc (entry->add_type) == 1))
3659 {
3660 set_syntax_error (_("invalid relocation expression"));
3661 return false;
3662 }
3663 /* #:<reloc_op>:<expr> */
3664 /* Record the relocation type. */
3665 inst.reloc.type = ty;
3666 inst.reloc.pc_rel = entry->pc_rel;
3667 }
3668 else
3669 {
3670 if (skip_past_char (&p, '='))
3671 /* =immediate; need to generate the literal in the literal pool. */
3672 inst.gen_lit_pool = 1;
3673
3674 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3675 NORMAL_RESOLUTION))
3676 {
3677 set_syntax_error (_("invalid address"));
3678 return false;
3679 }
3680 }
3681
3682 *str = p;
3683 return true;
3684 }
3685
3686 /* [ */
3687
3688 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3689 if (!reg || !aarch64_check_reg_type (reg, base_type))
3690 {
3691 set_syntax_error (_(get_reg_expected_msg (base_type)));
3692 return false;
3693 }
3694 operand->addr.base_regno = reg->number;
3695
3696 /* [Xn */
3697 if (skip_past_comma (&p))
3698 {
3699 /* [Xn, */
3700 operand->addr.preind = 1;
3701
3702 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3703 if (reg)
3704 {
3705 if (!aarch64_check_reg_type (reg, offset_type))
3706 {
3707 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3708 return false;
3709 }
3710
3711 /* [Xn,Rm */
3712 operand->addr.offset.regno = reg->number;
3713 operand->addr.offset.is_reg = 1;
3714 /* Shifted index. */
3715 if (skip_past_comma (&p))
3716 {
3717 /* [Xn,Rm, */
3718 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3719 /* Use the diagnostics set in parse_shift, so not set new
3720 error message here. */
3721 return false;
3722 }
3723 /* We only accept:
3724 [base,Xm] # For vector plus scalar SVE2 indexing.
3725 [base,Xm{,LSL #imm}]
3726 [base,Xm,SXTX {#imm}]
3727 [base,Wm,(S|U)XTW {#imm}] */
3728 if (operand->shifter.kind == AARCH64_MOD_NONE
3729 || operand->shifter.kind == AARCH64_MOD_LSL
3730 || operand->shifter.kind == AARCH64_MOD_SXTX)
3731 {
3732 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3733 {
3734 set_syntax_error (_("invalid use of 32-bit register offset"));
3735 return false;
3736 }
3737 if (aarch64_get_qualifier_esize (*base_qualifier)
3738 != aarch64_get_qualifier_esize (*offset_qualifier)
3739 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3740 || *base_qualifier != AARCH64_OPND_QLF_S_S
3741 || *offset_qualifier != AARCH64_OPND_QLF_X))
3742 {
3743 set_syntax_error (_("offset has different size from base"));
3744 return false;
3745 }
3746 }
3747 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3748 {
3749 set_syntax_error (_("invalid use of 64-bit register offset"));
3750 return false;
3751 }
3752 }
3753 else
3754 {
3755 /* [Xn,#:<reloc_op>:<symbol> */
3756 skip_past_char (&p, '#');
3757 if (skip_past_char (&p, ':'))
3758 {
3759 struct reloc_table_entry *entry;
3760
3761 /* Try to parse a relocation modifier. Anything else is
3762 an error. */
3763 if (!(entry = find_reloc_table_entry (&p)))
3764 {
3765 set_syntax_error (_("unknown relocation modifier"));
3766 return false;
3767 }
3768
3769 if (entry->ldst_type == 0)
3770 {
3771 set_syntax_error
3772 (_("this relocation modifier is not allowed on this "
3773 "instruction"));
3774 return false;
3775 }
3776
3777 /* [Xn,#:<reloc_op>: */
3778 /* We now have the group relocation table entry corresponding to
3779 the name in the assembler source. Next, we parse the
3780 expression. */
3781 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3782 aarch64_force_reloc (entry->add_type) == 1))
3783 {
3784 set_syntax_error (_("invalid relocation expression"));
3785 return false;
3786 }
3787
3788 /* [Xn,#:<reloc_op>:<expr> */
3789 /* Record the load/store relocation type. */
3790 inst.reloc.type = entry->ldst_type;
3791 inst.reloc.pc_rel = entry->pc_rel;
3792 }
3793 else
3794 {
3795 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3796 NORMAL_RESOLUTION))
3797 {
3798 set_syntax_error (_("invalid expression in the address"));
3799 return false;
3800 }
3801 /* [Xn,<expr> */
3802 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3803 /* [Xn,<expr>,<shifter> */
3804 if (! parse_shift (&p, operand, imm_shift_mode))
3805 return false;
3806 }
3807 }
3808 }
3809
3810 if (! skip_past_char (&p, ']'))
3811 {
3812 set_syntax_error (_("']' expected"));
3813 return false;
3814 }
3815
3816 if (skip_past_char (&p, '!'))
3817 {
3818 if (operand->addr.preind && operand->addr.offset.is_reg)
3819 {
3820 set_syntax_error (_("register offset not allowed in pre-indexed "
3821 "addressing mode"));
3822 return false;
3823 }
3824 /* [Xn]! */
3825 operand->addr.writeback = 1;
3826 }
3827 else if (skip_past_comma (&p))
3828 {
3829 /* [Xn], */
3830 operand->addr.postind = 1;
3831 operand->addr.writeback = 1;
3832
3833 if (operand->addr.preind)
3834 {
3835 set_syntax_error (_("cannot combine pre- and post-indexing"));
3836 return false;
3837 }
3838
3839 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3840 if (reg)
3841 {
3842 /* [Xn],Xm */
3843 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3844 {
3845 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3846 return false;
3847 }
3848
3849 operand->addr.offset.regno = reg->number;
3850 operand->addr.offset.is_reg = 1;
3851 }
3852 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3853 NORMAL_RESOLUTION))
3854 {
3855 /* [Xn],#expr */
3856 set_syntax_error (_("invalid expression in the address"));
3857 return false;
3858 }
3859 }
3860
3861 /* If at this point neither .preind nor .postind is set, we have a
3862 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3863 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3864 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3865 [Zn.<T>, xzr]. */
3866 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3867 {
3868 if (operand->addr.writeback)
3869 {
3870 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3871 {
3872 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3873 operand->addr.offset.is_reg = 0;
3874 operand->addr.offset.imm = 0;
3875 operand->addr.preind = 1;
3876 }
3877 else
3878 {
3879 /* Reject [Rn]! */
3880 set_syntax_error (_("missing offset in the pre-indexed address"));
3881 return false;
3882 }
3883 }
3884 else
3885 {
3886 operand->addr.preind = 1;
3887 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3888 {
3889 operand->addr.offset.is_reg = 1;
3890 operand->addr.offset.regno = REG_ZR;
3891 *offset_qualifier = AARCH64_OPND_QLF_X;
3892 }
3893 else
3894 {
3895 inst.reloc.exp.X_op = O_constant;
3896 inst.reloc.exp.X_add_number = 0;
3897 }
3898 }
3899 }
3900
3901 *str = p;
3902 return true;
3903 }
3904
3905 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3906 on success. */
3907 static bool
3908 parse_address (char **str, aarch64_opnd_info *operand)
3909 {
3910 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3911 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3912 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3913 }
3914
3915 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3916 The arguments have the same meaning as for parse_address_main.
3917 Return TRUE on success. */
3918 static bool
3919 parse_sve_address (char **str, aarch64_opnd_info *operand,
3920 aarch64_opnd_qualifier_t *base_qualifier,
3921 aarch64_opnd_qualifier_t *offset_qualifier)
3922 {
3923 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3924 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3925 SHIFTED_MUL_VL);
3926 }
3927
3928 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3929 Return TRUE on success; otherwise return FALSE. */
3930 static bool
3931 parse_half (char **str, int *internal_fixup_p)
3932 {
3933 char *p = *str;
3934
3935 skip_past_char (&p, '#');
3936
3937 gas_assert (internal_fixup_p);
3938 *internal_fixup_p = 0;
3939
3940 if (*p == ':')
3941 {
3942 struct reloc_table_entry *entry;
3943
3944 /* Try to parse a relocation. Anything else is an error. */
3945 ++p;
3946
3947 if (!(entry = find_reloc_table_entry (&p)))
3948 {
3949 set_syntax_error (_("unknown relocation modifier"));
3950 return false;
3951 }
3952
3953 if (entry->movw_type == 0)
3954 {
3955 set_syntax_error
3956 (_("this relocation modifier is not allowed on this instruction"));
3957 return false;
3958 }
3959
3960 inst.reloc.type = entry->movw_type;
3961 }
3962 else
3963 *internal_fixup_p = 1;
3964
3965 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3966 aarch64_force_reloc (inst.reloc.type) == 1))
3967 return false;
3968
3969 *str = p;
3970 return true;
3971 }
3972
3973 /* Parse an operand for an ADRP instruction:
3974 ADRP <Xd>, <label>
3975 Return TRUE on success; otherwise return FALSE. */
3976
3977 static bool
3978 parse_adrp (char **str)
3979 {
3980 char *p;
3981
3982 p = *str;
3983 if (*p == ':')
3984 {
3985 struct reloc_table_entry *entry;
3986
3987 /* Try to parse a relocation. Anything else is an error. */
3988 ++p;
3989 if (!(entry = find_reloc_table_entry (&p)))
3990 {
3991 set_syntax_error (_("unknown relocation modifier"));
3992 return false;
3993 }
3994
3995 if (entry->adrp_type == 0)
3996 {
3997 set_syntax_error
3998 (_("this relocation modifier is not allowed on this instruction"));
3999 return false;
4000 }
4001
4002 inst.reloc.type = entry->adrp_type;
4003 }
4004 else
4005 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4006
4007 inst.reloc.pc_rel = 1;
4008 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
4009 aarch64_force_reloc (inst.reloc.type) == 1))
4010 return false;
4011 *str = p;
4012 return true;
4013 }
4014
4015 /* Miscellaneous. */
4016
4017 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4018 of SIZE tokens in which index I gives the token for field value I,
4019 or is null if field value I is invalid. REG_TYPE says which register
4020 names should be treated as registers rather than as symbolic immediates.
4021
4022 Return true on success, moving *STR past the operand and storing the
4023 field value in *VAL. */
4024
4025 static int
4026 parse_enum_string (char **str, int64_t *val, const char *const *array,
4027 size_t size, aarch64_reg_type reg_type)
4028 {
4029 expressionS exp;
4030 char *p, *q;
4031 size_t i;
4032
4033 /* Match C-like tokens. */
4034 p = q = *str;
4035 while (ISALNUM (*q))
4036 q++;
4037
4038 for (i = 0; i < size; ++i)
4039 if (array[i]
4040 && strncasecmp (array[i], p, q - p) == 0
4041 && array[i][q - p] == 0)
4042 {
4043 *val = i;
4044 *str = q;
4045 return true;
4046 }
4047
4048 if (!parse_immediate_expression (&p, &exp, reg_type))
4049 return false;
4050
4051 if (exp.X_op == O_constant
4052 && (uint64_t) exp.X_add_number < size)
4053 {
4054 *val = exp.X_add_number;
4055 *str = p;
4056 return true;
4057 }
4058
4059 /* Use the default error for this operand. */
4060 return false;
4061 }
4062
4063 /* Parse an option for a preload instruction. Returns the encoding for the
4064 option, or PARSE_FAIL. */
4065
4066 static int
4067 parse_pldop (char **str)
4068 {
4069 char *p, *q;
4070 const struct aarch64_name_value_pair *o;
4071
4072 p = q = *str;
4073 while (ISALNUM (*q))
4074 q++;
4075
4076 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4077 if (!o)
4078 return PARSE_FAIL;
4079
4080 *str = q;
4081 return o->value;
4082 }
4083
4084 /* Parse an option for a barrier instruction. Returns the encoding for the
4085 option, or PARSE_FAIL. */
4086
4087 static int
4088 parse_barrier (char **str)
4089 {
4090 char *p, *q;
4091 const struct aarch64_name_value_pair *o;
4092
4093 p = q = *str;
4094 while (ISALPHA (*q))
4095 q++;
4096
4097 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4098 if (!o)
4099 return PARSE_FAIL;
4100
4101 *str = q;
4102 return o->value;
4103 }
4104
4105 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4106 return 0 if successful. Otherwise return PARSE_FAIL. */
4107
4108 static int
4109 parse_barrier_psb (char **str,
4110 const struct aarch64_name_value_pair ** hint_opt)
4111 {
4112 char *p, *q;
4113 const struct aarch64_name_value_pair *o;
4114
4115 p = q = *str;
4116 while (ISALPHA (*q))
4117 q++;
4118
4119 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4120 if (!o)
4121 {
4122 set_fatal_syntax_error
4123 ( _("unknown or missing option to PSB/TSB"));
4124 return PARSE_FAIL;
4125 }
4126
4127 if (o->value != 0x11)
4128 {
4129 /* PSB only accepts option name 'CSYNC'. */
4130 set_syntax_error
4131 (_("the specified option is not accepted for PSB/TSB"));
4132 return PARSE_FAIL;
4133 }
4134
4135 *str = q;
4136 *hint_opt = o;
4137 return 0;
4138 }
4139
4140 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4141 return 0 if successful. Otherwise return PARSE_FAIL. */
4142
4143 static int
4144 parse_bti_operand (char **str,
4145 const struct aarch64_name_value_pair ** hint_opt)
4146 {
4147 char *p, *q;
4148 const struct aarch64_name_value_pair *o;
4149
4150 p = q = *str;
4151 while (ISALPHA (*q))
4152 q++;
4153
4154 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4155 if (!o)
4156 {
4157 set_fatal_syntax_error
4158 ( _("unknown option to BTI"));
4159 return PARSE_FAIL;
4160 }
4161
4162 switch (o->value)
4163 {
4164 /* Valid BTI operands. */
4165 case HINT_OPD_C:
4166 case HINT_OPD_J:
4167 case HINT_OPD_JC:
4168 break;
4169
4170 default:
4171 set_syntax_error
4172 (_("unknown option to BTI"));
4173 return PARSE_FAIL;
4174 }
4175
4176 *str = q;
4177 *hint_opt = o;
4178 return 0;
4179 }
4180
4181 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4182 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4183 on failure. Format:
4184
4185 REG_TYPE.QUALIFIER
4186
4187 Side effect: Update STR with current parse position of success.
4188 */
4189
4190 static const reg_entry *
4191 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4192 aarch64_opnd_qualifier_t *qualifier)
4193 {
4194 char *q;
4195
4196 reg_entry *reg = parse_reg (str);
4197 if (reg != NULL && reg->type == reg_type)
4198 {
4199 if (!skip_past_char (str, '.'))
4200 {
4201 set_syntax_error (_("missing ZA tile element size separator"));
4202 return NULL;
4203 }
4204
4205 q = *str;
4206 switch (TOLOWER (*q))
4207 {
4208 case 'b':
4209 *qualifier = AARCH64_OPND_QLF_S_B;
4210 break;
4211 case 'h':
4212 *qualifier = AARCH64_OPND_QLF_S_H;
4213 break;
4214 case 's':
4215 *qualifier = AARCH64_OPND_QLF_S_S;
4216 break;
4217 case 'd':
4218 *qualifier = AARCH64_OPND_QLF_S_D;
4219 break;
4220 case 'q':
4221 *qualifier = AARCH64_OPND_QLF_S_Q;
4222 break;
4223 default:
4224 return NULL;
4225 }
4226 q++;
4227
4228 *str = q;
4229 return reg;
4230 }
4231
4232 return NULL;
4233 }
4234
4235 /* Parse SME ZA tile encoded in <ZAda> assembler symbol.
4236 Function return tile QUALIFIER on success.
4237
4238 Tiles are in example format: za[0-9]\.[bhsd]
4239
4240 Function returns <ZAda> register number or PARSE_FAIL.
4241 */
4242 static int
4243 parse_sme_zada_operand (char **str, aarch64_opnd_qualifier_t *qualifier)
4244 {
4245 int regno;
4246 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_ZA, qualifier);
4247
4248 if (reg == NULL)
4249 return PARSE_FAIL;
4250 regno = reg->number;
4251
4252 switch (*qualifier)
4253 {
4254 case AARCH64_OPND_QLF_S_B:
4255 if (regno != 0x00)
4256 {
4257 set_syntax_error (_("invalid ZA tile register number, expected za0"));
4258 return PARSE_FAIL;
4259 }
4260 break;
4261 case AARCH64_OPND_QLF_S_H:
4262 if (regno > 0x01)
4263 {
4264 set_syntax_error (_("invalid ZA tile register number, expected za0-za1"));
4265 return PARSE_FAIL;
4266 }
4267 break;
4268 case AARCH64_OPND_QLF_S_S:
4269 if (regno > 0x03)
4270 {
4271 /* For the 32-bit variant: is the name of the ZA tile ZA0-ZA3. */
4272 set_syntax_error (_("invalid ZA tile register number, expected za0-za3"));
4273 return PARSE_FAIL;
4274 }
4275 break;
4276 case AARCH64_OPND_QLF_S_D:
4277 if (regno > 0x07)
4278 {
4279 /* For the 64-bit variant: is the name of the ZA tile ZA0-ZA7 */
4280 set_syntax_error (_("invalid ZA tile register number, expected za0-za7"));
4281 return PARSE_FAIL;
4282 }
4283 break;
4284 default:
4285 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s and d"));
4286 return PARSE_FAIL;
4287 }
4288
4289 return regno;
4290 }
4291
4292 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4293
4294 #<imm>
4295 <imm>
4296
4297 Function return TRUE if immediate was found, or FALSE.
4298 */
4299 static bool
4300 parse_sme_immediate (char **str, int64_t *imm)
4301 {
4302 int64_t val;
4303 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4304 return false;
4305
4306 *imm = val;
4307 return true;
4308 }
4309
4310 /* Parse index with vector select register and immediate:
4311
4312 [<Wv>, <imm>]
4313 [<Wv>, #<imm>]
4314 where <Wv> is in W12-W15 range and # is optional for immediate.
4315
4316 Function performs extra check for mandatory immediate value if REQUIRE_IMM
4317 is set to true.
4318
4319 On success function returns TRUE and populated VECTOR_SELECT_REGISTER and
4320 IMM output.
4321 */
4322 static bool
4323 parse_sme_za_hv_tiles_operand_index (char **str,
4324 int *vector_select_register,
4325 int64_t *imm)
4326 {
4327 const reg_entry *reg;
4328
4329 if (!skip_past_char (str, '['))
4330 {
4331 set_syntax_error (_("expected '['"));
4332 return false;
4333 }
4334
4335 /* Vector select register W12-W15 encoded in the 2-bit Rv field. */
4336 reg = parse_reg (str);
4337 if (reg == NULL || reg->type != REG_TYPE_R_32
4338 || reg->number < 12 || reg->number > 15)
4339 {
4340 set_syntax_error (_("expected vector select register W12-W15"));
4341 return false;
4342 }
4343 *vector_select_register = reg->number;
4344
4345 if (!skip_past_char (str, ',')) /* Optional index offset immediate. */
4346 {
4347 set_syntax_error (_("expected ','"));
4348 return false;
4349 }
4350
4351 if (!parse_sme_immediate (str, imm))
4352 {
4353 set_syntax_error (_("index offset immediate expected"));
4354 return false;
4355 }
4356
4357 if (!skip_past_char (str, ']'))
4358 {
4359 set_syntax_error (_("expected ']'"));
4360 return false;
4361 }
4362
4363 return true;
4364 }
4365
4366 /* Parse SME ZA horizontal or vertical vector access to tiles.
4367 Function extracts from STR to SLICE_INDICATOR <HV> horizontal (0) or
4368 vertical (1) ZA tile vector orientation. VECTOR_SELECT_REGISTER
4369 contains <Wv> select register and corresponding optional IMMEDIATE.
4370 In addition QUALIFIER is extracted.
4371
4372 Field format examples:
4373
4374 ZA0<HV>.B[<Wv>, #<imm>]
4375 <ZAn><HV>.H[<Wv>, #<imm>]
4376 <ZAn><HV>.S[<Wv>, #<imm>]
4377 <ZAn><HV>.D[<Wv>, #<imm>]
4378 <ZAn><HV>.Q[<Wv>, #<imm>]
4379
4380 Function returns <ZAda> register number or PARSE_FAIL.
4381 */
4382 static int
4383 parse_sme_za_hv_tiles_operand (char **str,
4384 enum sme_hv_slice *slice_indicator,
4385 int *vector_select_register,
4386 int *imm,
4387 aarch64_opnd_qualifier_t *qualifier)
4388 {
4389 char *qh, *qv;
4390 int regno;
4391 int regno_limit;
4392 int64_t imm_limit;
4393 int64_t imm_value;
4394 const reg_entry *reg;
4395
4396 qh = qv = *str;
4397 if ((reg = parse_reg_with_qual (&qh, REG_TYPE_ZAH, qualifier)) != NULL)
4398 {
4399 *slice_indicator = HV_horizontal;
4400 *str = qh;
4401 }
4402 else if ((reg = parse_reg_with_qual (&qv, REG_TYPE_ZAV, qualifier)) != NULL)
4403 {
4404 *slice_indicator = HV_vertical;
4405 *str = qv;
4406 }
4407 else
4408 return PARSE_FAIL;
4409 regno = reg->number;
4410
4411 switch (*qualifier)
4412 {
4413 case AARCH64_OPND_QLF_S_B:
4414 regno_limit = 0;
4415 imm_limit = 15;
4416 break;
4417 case AARCH64_OPND_QLF_S_H:
4418 regno_limit = 1;
4419 imm_limit = 7;
4420 break;
4421 case AARCH64_OPND_QLF_S_S:
4422 regno_limit = 3;
4423 imm_limit = 3;
4424 break;
4425 case AARCH64_OPND_QLF_S_D:
4426 regno_limit = 7;
4427 imm_limit = 1;
4428 break;
4429 case AARCH64_OPND_QLF_S_Q:
4430 regno_limit = 15;
4431 imm_limit = 0;
4432 break;
4433 default:
4434 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s, d and q"));
4435 return PARSE_FAIL;
4436 }
4437
4438 /* Check if destination register ZA tile vector is in range for given
4439 instruction variant. */
4440 if (regno < 0 || regno > regno_limit)
4441 {
4442 set_syntax_error (_("ZA tile vector out of range"));
4443 return PARSE_FAIL;
4444 }
4445
4446 if (!parse_sme_za_hv_tiles_operand_index (str, vector_select_register,
4447 &imm_value))
4448 return PARSE_FAIL;
4449
4450 /* Check if optional index offset is in the range for instruction
4451 variant. */
4452 if (imm_value < 0 || imm_value > imm_limit)
4453 {
4454 set_syntax_error (_("index offset out of range"));
4455 return PARSE_FAIL;
4456 }
4457
4458 *imm = imm_value;
4459
4460 return regno;
4461 }
4462
4463
4464 static int
4465 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4466 enum sme_hv_slice *slice_indicator,
4467 int *vector_select_register,
4468 int *imm,
4469 aarch64_opnd_qualifier_t *qualifier)
4470 {
4471 int regno;
4472
4473 if (!skip_past_char (str, '{'))
4474 {
4475 set_syntax_error (_("expected '{'"));
4476 return PARSE_FAIL;
4477 }
4478
4479 regno = parse_sme_za_hv_tiles_operand (str, slice_indicator,
4480 vector_select_register, imm,
4481 qualifier);
4482
4483 if (regno == PARSE_FAIL)
4484 return PARSE_FAIL;
4485
4486 if (!skip_past_char (str, '}'))
4487 {
4488 set_syntax_error (_("expected '}'"));
4489 return PARSE_FAIL;
4490 }
4491
4492 return regno;
4493 }
4494
4495 /* Parse list of up to eight 64-bit element tile names separated by commas in
4496 SME's ZERO instruction:
4497
4498 ZERO { <mask> }
4499
4500 Function returns <mask>:
4501
4502 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4503 */
4504 static int
4505 parse_sme_zero_mask(char **str)
4506 {
4507 char *q;
4508 int mask;
4509 aarch64_opnd_qualifier_t qualifier;
4510
4511 mask = 0x00;
4512 q = *str;
4513 do
4514 {
4515 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA, &qualifier);
4516 if (reg)
4517 {
4518 int regno = reg->number;
4519 if (qualifier == AARCH64_OPND_QLF_S_B && regno == 0)
4520 {
4521 /* { ZA0.B } is assembled as all-ones immediate. */
4522 mask = 0xff;
4523 }
4524 else if (qualifier == AARCH64_OPND_QLF_S_H && regno < 2)
4525 mask |= 0x55 << regno;
4526 else if (qualifier == AARCH64_OPND_QLF_S_S && regno < 4)
4527 mask |= 0x11 << regno;
4528 else if (qualifier == AARCH64_OPND_QLF_S_D && regno < 8)
4529 mask |= 0x01 << regno;
4530 else
4531 {
4532 set_syntax_error (_("wrong ZA tile element format"));
4533 return PARSE_FAIL;
4534 }
4535 continue;
4536 }
4537 else if (strncasecmp (q, "za", 2) == 0
4538 && !ISALNUM (q[2]))
4539 {
4540 /* { ZA } is assembled as all-ones immediate. */
4541 mask = 0xff;
4542 q += 2;
4543 continue;
4544 }
4545 else
4546 {
4547 set_syntax_error (_("wrong ZA tile element format"));
4548 return PARSE_FAIL;
4549 }
4550 }
4551 while (skip_past_char (&q, ','));
4552
4553 *str = q;
4554 return mask;
4555 }
4556
4557 /* Wraps in curly braces <mask> operand ZERO instruction:
4558
4559 ZERO { <mask> }
4560
4561 Function returns value of <mask> bit-field.
4562 */
4563 static int
4564 parse_sme_list_of_64bit_tiles (char **str)
4565 {
4566 int regno;
4567
4568 if (!skip_past_char (str, '{'))
4569 {
4570 set_syntax_error (_("expected '{'"));
4571 return PARSE_FAIL;
4572 }
4573
4574 /* Empty <mask> list is an all-zeros immediate. */
4575 if (!skip_past_char (str, '}'))
4576 {
4577 regno = parse_sme_zero_mask (str);
4578 if (regno == PARSE_FAIL)
4579 return PARSE_FAIL;
4580
4581 if (!skip_past_char (str, '}'))
4582 {
4583 set_syntax_error (_("expected '}'"));
4584 return PARSE_FAIL;
4585 }
4586 }
4587 else
4588 regno = 0x00;
4589
4590 return regno;
4591 }
4592
4593 /* Parse ZA array operand used in e.g. STR and LDR instruction.
4594 Operand format:
4595
4596 ZA[<Wv>, <imm>]
4597 ZA[<Wv>, #<imm>]
4598
4599 Function returns <Wv> or PARSE_FAIL.
4600 */
4601 static int
4602 parse_sme_za_array (char **str, int *imm)
4603 {
4604 char *p, *q;
4605 int regno;
4606 int64_t imm_value;
4607
4608 p = q = *str;
4609 while (ISALPHA (*q))
4610 q++;
4611
4612 if ((q - p != 2) || strncasecmp ("za", p, q - p) != 0)
4613 {
4614 set_syntax_error (_("expected ZA array"));
4615 return PARSE_FAIL;
4616 }
4617
4618 if (! parse_sme_za_hv_tiles_operand_index (&q, &regno, &imm_value))
4619 return PARSE_FAIL;
4620
4621 if (imm_value < 0 || imm_value > 15)
4622 {
4623 set_syntax_error (_("offset out of range"));
4624 return PARSE_FAIL;
4625 }
4626
4627 *imm = imm_value;
4628 *str = q;
4629 return regno;
4630 }
4631
4632 /* Parse streaming mode operand for SMSTART and SMSTOP.
4633
4634 {SM | ZA}
4635
4636 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4637 */
4638 static int
4639 parse_sme_sm_za (char **str)
4640 {
4641 char *p, *q;
4642
4643 p = q = *str;
4644 while (ISALPHA (*q))
4645 q++;
4646
4647 if ((q - p != 2)
4648 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4649 {
4650 set_syntax_error (_("expected SM or ZA operand"));
4651 return PARSE_FAIL;
4652 }
4653
4654 *str = q;
4655 return TOLOWER (p[0]);
4656 }
4657
4658 /* Parse the name of the source scalable predicate register, the index base
4659 register W12-W15 and the element index. Function performs element index
4660 limit checks as well as qualifier type checks.
4661
4662 <Pn>.<T>[<Wv>, <imm>]
4663 <Pn>.<T>[<Wv>, #<imm>]
4664
4665 On success function sets <Wv> to INDEX_BASE_REG, <T> to QUALIFIER and
4666 <imm> to IMM.
4667 Function returns <Pn>, or PARSE_FAIL.
4668 */
4669 static int
4670 parse_sme_pred_reg_with_index(char **str,
4671 int *index_base_reg,
4672 int *imm,
4673 aarch64_opnd_qualifier_t *qualifier)
4674 {
4675 int regno;
4676 int64_t imm_limit;
4677 int64_t imm_value;
4678 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_PN, qualifier);
4679
4680 if (reg == NULL)
4681 return PARSE_FAIL;
4682 regno = reg->number;
4683
4684 switch (*qualifier)
4685 {
4686 case AARCH64_OPND_QLF_S_B:
4687 imm_limit = 15;
4688 break;
4689 case AARCH64_OPND_QLF_S_H:
4690 imm_limit = 7;
4691 break;
4692 case AARCH64_OPND_QLF_S_S:
4693 imm_limit = 3;
4694 break;
4695 case AARCH64_OPND_QLF_S_D:
4696 imm_limit = 1;
4697 break;
4698 default:
4699 set_syntax_error (_("wrong predicate register element size, allowed b, h, s and d"));
4700 return PARSE_FAIL;
4701 }
4702
4703 if (! parse_sme_za_hv_tiles_operand_index (str, index_base_reg, &imm_value))
4704 return PARSE_FAIL;
4705
4706 if (imm_value < 0 || imm_value > imm_limit)
4707 {
4708 set_syntax_error (_("element index out of range for given variant"));
4709 return PARSE_FAIL;
4710 }
4711
4712 *imm = imm_value;
4713
4714 return regno;
4715 }
4716
4717 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4718 Returns the encoding for the option, or PARSE_FAIL.
4719
4720 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4721 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4722
4723 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4724 field, otherwise as a system register.
4725 */
4726
4727 static int
4728 parse_sys_reg (char **str, htab_t sys_regs,
4729 int imple_defined_p, int pstatefield_p,
4730 uint32_t* flags)
4731 {
4732 char *p, *q;
4733 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4734 const aarch64_sys_reg *o;
4735 int value;
4736
4737 p = buf;
4738 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4739 if (p < buf + (sizeof (buf) - 1))
4740 *p++ = TOLOWER (*q);
4741 *p = '\0';
4742
4743 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4744 valid system register. This is enforced by construction of the hash
4745 table. */
4746 if (p - buf != q - *str)
4747 return PARSE_FAIL;
4748
4749 o = str_hash_find (sys_regs, buf);
4750 if (!o)
4751 {
4752 if (!imple_defined_p)
4753 return PARSE_FAIL;
4754 else
4755 {
4756 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4757 unsigned int op0, op1, cn, cm, op2;
4758
4759 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4760 != 5)
4761 return PARSE_FAIL;
4762 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4763 return PARSE_FAIL;
4764 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4765 if (flags)
4766 *flags = 0;
4767 }
4768 }
4769 else
4770 {
4771 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4772 as_bad (_("selected processor does not support PSTATE field "
4773 "name '%s'"), buf);
4774 if (!pstatefield_p
4775 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4776 o->value, o->flags, o->features))
4777 as_bad (_("selected processor does not support system register "
4778 "name '%s'"), buf);
4779 if (aarch64_sys_reg_deprecated_p (o->flags))
4780 as_warn (_("system register name '%s' is deprecated and may be "
4781 "removed in a future release"), buf);
4782 value = o->value;
4783 if (flags)
4784 *flags = o->flags;
4785 }
4786
4787 *str = q;
4788 return value;
4789 }
4790
4791 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4792 for the option, or NULL. */
4793
4794 static const aarch64_sys_ins_reg *
4795 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4796 {
4797 char *p, *q;
4798 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4799 const aarch64_sys_ins_reg *o;
4800
4801 p = buf;
4802 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4803 if (p < buf + (sizeof (buf) - 1))
4804 *p++ = TOLOWER (*q);
4805 *p = '\0';
4806
4807 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4808 valid system register. This is enforced by construction of the hash
4809 table. */
4810 if (p - buf != q - *str)
4811 return NULL;
4812
4813 o = str_hash_find (sys_ins_regs, buf);
4814 if (!o)
4815 return NULL;
4816
4817 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4818 o->name, o->value, o->flags, 0))
4819 as_bad (_("selected processor does not support system register "
4820 "name '%s'"), buf);
4821 if (aarch64_sys_reg_deprecated_p (o->flags))
4822 as_warn (_("system register name '%s' is deprecated and may be "
4823 "removed in a future release"), buf);
4824
4825 *str = q;
4826 return o;
4827 }
4828 \f
4829 #define po_char_or_fail(chr) do { \
4830 if (! skip_past_char (&str, chr)) \
4831 goto failure; \
4832 } while (0)
4833
4834 #define po_reg_or_fail(regtype) do { \
4835 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4836 if (val == PARSE_FAIL) \
4837 { \
4838 set_default_error (); \
4839 goto failure; \
4840 } \
4841 } while (0)
4842
4843 #define po_int_reg_or_fail(reg_type) do { \
4844 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4845 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4846 { \
4847 set_default_error (); \
4848 goto failure; \
4849 } \
4850 info->reg.regno = reg->number; \
4851 info->qualifier = qualifier; \
4852 } while (0)
4853
4854 #define po_imm_nc_or_fail() do { \
4855 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4856 goto failure; \
4857 } while (0)
4858
4859 #define po_imm_or_fail(min, max) do { \
4860 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4861 goto failure; \
4862 if (val < min || val > max) \
4863 { \
4864 set_fatal_syntax_error (_("immediate value out of range "\
4865 #min " to "#max)); \
4866 goto failure; \
4867 } \
4868 } while (0)
4869
4870 #define po_enum_or_fail(array) do { \
4871 if (!parse_enum_string (&str, &val, array, \
4872 ARRAY_SIZE (array), imm_reg_type)) \
4873 goto failure; \
4874 } while (0)
4875
4876 #define po_misc_or_fail(expr) do { \
4877 if (!expr) \
4878 goto failure; \
4879 } while (0)
4880 \f
4881 /* encode the 12-bit imm field of Add/sub immediate */
4882 static inline uint32_t
4883 encode_addsub_imm (uint32_t imm)
4884 {
4885 return imm << 10;
4886 }
4887
4888 /* encode the shift amount field of Add/sub immediate */
4889 static inline uint32_t
4890 encode_addsub_imm_shift_amount (uint32_t cnt)
4891 {
4892 return cnt << 22;
4893 }
4894
4895
4896 /* encode the imm field of Adr instruction */
4897 static inline uint32_t
4898 encode_adr_imm (uint32_t imm)
4899 {
4900 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4901 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4902 }
4903
4904 /* encode the immediate field of Move wide immediate */
4905 static inline uint32_t
4906 encode_movw_imm (uint32_t imm)
4907 {
4908 return imm << 5;
4909 }
4910
4911 /* encode the 26-bit offset of unconditional branch */
4912 static inline uint32_t
4913 encode_branch_ofs_26 (uint32_t ofs)
4914 {
4915 return ofs & ((1 << 26) - 1);
4916 }
4917
4918 /* encode the 19-bit offset of conditional branch and compare & branch */
4919 static inline uint32_t
4920 encode_cond_branch_ofs_19 (uint32_t ofs)
4921 {
4922 return (ofs & ((1 << 19) - 1)) << 5;
4923 }
4924
4925 /* encode the 19-bit offset of ld literal */
4926 static inline uint32_t
4927 encode_ld_lit_ofs_19 (uint32_t ofs)
4928 {
4929 return (ofs & ((1 << 19) - 1)) << 5;
4930 }
4931
4932 /* Encode the 14-bit offset of test & branch. */
4933 static inline uint32_t
4934 encode_tst_branch_ofs_14 (uint32_t ofs)
4935 {
4936 return (ofs & ((1 << 14) - 1)) << 5;
4937 }
4938
4939 /* Encode the 16-bit imm field of svc/hvc/smc. */
4940 static inline uint32_t
4941 encode_svc_imm (uint32_t imm)
4942 {
4943 return imm << 5;
4944 }
4945
4946 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4947 static inline uint32_t
4948 reencode_addsub_switch_add_sub (uint32_t opcode)
4949 {
4950 return opcode ^ (1 << 30);
4951 }
4952
4953 static inline uint32_t
4954 reencode_movzn_to_movz (uint32_t opcode)
4955 {
4956 return opcode | (1 << 30);
4957 }
4958
4959 static inline uint32_t
4960 reencode_movzn_to_movn (uint32_t opcode)
4961 {
4962 return opcode & ~(1 << 30);
4963 }
4964
4965 /* Overall per-instruction processing. */
4966
4967 /* We need to be able to fix up arbitrary expressions in some statements.
4968 This is so that we can handle symbols that are an arbitrary distance from
4969 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4970 which returns part of an address in a form which will be valid for
4971 a data instruction. We do this by pushing the expression into a symbol
4972 in the expr_section, and creating a fix for that. */
4973
4974 static fixS *
4975 fix_new_aarch64 (fragS * frag,
4976 int where,
4977 short int size,
4978 expressionS * exp,
4979 int pc_rel,
4980 int reloc)
4981 {
4982 fixS *new_fix;
4983
4984 switch (exp->X_op)
4985 {
4986 case O_constant:
4987 case O_symbol:
4988 case O_add:
4989 case O_subtract:
4990 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4991 break;
4992
4993 default:
4994 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4995 pc_rel, reloc);
4996 break;
4997 }
4998 return new_fix;
4999 }
5000 \f
5001 /* Diagnostics on operands errors. */
5002
5003 /* By default, output verbose error message.
5004 Disable the verbose error message by -mno-verbose-error. */
5005 static int verbose_error_p = 1;
5006
5007 #ifdef DEBUG_AARCH64
5008 /* N.B. this is only for the purpose of debugging. */
5009 const char* operand_mismatch_kind_names[] =
5010 {
5011 "AARCH64_OPDE_NIL",
5012 "AARCH64_OPDE_RECOVERABLE",
5013 "AARCH64_OPDE_SYNTAX_ERROR",
5014 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5015 "AARCH64_OPDE_INVALID_VARIANT",
5016 "AARCH64_OPDE_OUT_OF_RANGE",
5017 "AARCH64_OPDE_UNALIGNED",
5018 "AARCH64_OPDE_REG_LIST",
5019 "AARCH64_OPDE_OTHER_ERROR",
5020 };
5021 #endif /* DEBUG_AARCH64 */
5022
5023 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5024
5025 When multiple errors of different kinds are found in the same assembly
5026 line, only the error of the highest severity will be picked up for
5027 issuing the diagnostics. */
5028
5029 static inline bool
5030 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5031 enum aarch64_operand_error_kind rhs)
5032 {
5033 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5034 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
5035 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5036 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5037 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
5038 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5039 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
5040 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
5041 return lhs > rhs;
5042 }
5043
5044 /* Helper routine to get the mnemonic name from the assembly instruction
5045 line; should only be called for the diagnosis purpose, as there is
5046 string copy operation involved, which may affect the runtime
5047 performance if used in elsewhere. */
5048
5049 static const char*
5050 get_mnemonic_name (const char *str)
5051 {
5052 static char mnemonic[32];
5053 char *ptr;
5054
5055 /* Get the first 15 bytes and assume that the full name is included. */
5056 strncpy (mnemonic, str, 31);
5057 mnemonic[31] = '\0';
5058
5059 /* Scan up to the end of the mnemonic, which must end in white space,
5060 '.', or end of string. */
5061 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5062 ;
5063
5064 *ptr = '\0';
5065
5066 /* Append '...' to the truncated long name. */
5067 if (ptr - mnemonic == 31)
5068 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5069
5070 return mnemonic;
5071 }
5072
5073 static void
5074 reset_aarch64_instruction (aarch64_instruction *instruction)
5075 {
5076 memset (instruction, '\0', sizeof (aarch64_instruction));
5077 instruction->reloc.type = BFD_RELOC_UNUSED;
5078 }
5079
5080 /* Data structures storing one user error in the assembly code related to
5081 operands. */
5082
5083 struct operand_error_record
5084 {
5085 const aarch64_opcode *opcode;
5086 aarch64_operand_error detail;
5087 struct operand_error_record *next;
5088 };
5089
5090 typedef struct operand_error_record operand_error_record;
5091
5092 struct operand_errors
5093 {
5094 operand_error_record *head;
5095 operand_error_record *tail;
5096 };
5097
5098 typedef struct operand_errors operand_errors;
5099
5100 /* Top-level data structure reporting user errors for the current line of
5101 the assembly code.
5102 The way md_assemble works is that all opcodes sharing the same mnemonic
5103 name are iterated to find a match to the assembly line. In this data
5104 structure, each of the such opcodes will have one operand_error_record
5105 allocated and inserted. In other words, excessive errors related with
5106 a single opcode are disregarded. */
5107 operand_errors operand_error_report;
5108
5109 /* Free record nodes. */
5110 static operand_error_record *free_opnd_error_record_nodes = NULL;
5111
5112 /* Initialize the data structure that stores the operand mismatch
5113 information on assembling one line of the assembly code. */
5114 static void
5115 init_operand_error_report (void)
5116 {
5117 if (operand_error_report.head != NULL)
5118 {
5119 gas_assert (operand_error_report.tail != NULL);
5120 operand_error_report.tail->next = free_opnd_error_record_nodes;
5121 free_opnd_error_record_nodes = operand_error_report.head;
5122 operand_error_report.head = NULL;
5123 operand_error_report.tail = NULL;
5124 return;
5125 }
5126 gas_assert (operand_error_report.tail == NULL);
5127 }
5128
5129 /* Return TRUE if some operand error has been recorded during the
5130 parsing of the current assembly line using the opcode *OPCODE;
5131 otherwise return FALSE. */
5132 static inline bool
5133 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5134 {
5135 operand_error_record *record = operand_error_report.head;
5136 return record && record->opcode == opcode;
5137 }
5138
5139 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5140 OPCODE field is initialized with OPCODE.
5141 N.B. only one record for each opcode, i.e. the maximum of one error is
5142 recorded for each instruction template. */
5143
5144 static void
5145 add_operand_error_record (const operand_error_record* new_record)
5146 {
5147 const aarch64_opcode *opcode = new_record->opcode;
5148 operand_error_record* record = operand_error_report.head;
5149
5150 /* The record may have been created for this opcode. If not, we need
5151 to prepare one. */
5152 if (! opcode_has_operand_error_p (opcode))
5153 {
5154 /* Get one empty record. */
5155 if (free_opnd_error_record_nodes == NULL)
5156 {
5157 record = XNEW (operand_error_record);
5158 }
5159 else
5160 {
5161 record = free_opnd_error_record_nodes;
5162 free_opnd_error_record_nodes = record->next;
5163 }
5164 record->opcode = opcode;
5165 /* Insert at the head. */
5166 record->next = operand_error_report.head;
5167 operand_error_report.head = record;
5168 if (operand_error_report.tail == NULL)
5169 operand_error_report.tail = record;
5170 }
5171 else if (record->detail.kind != AARCH64_OPDE_NIL
5172 && record->detail.index <= new_record->detail.index
5173 && operand_error_higher_severity_p (record->detail.kind,
5174 new_record->detail.kind))
5175 {
5176 /* In the case of multiple errors found on operands related with a
5177 single opcode, only record the error of the leftmost operand and
5178 only if the error is of higher severity. */
5179 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5180 " the existing error %s on operand %d",
5181 operand_mismatch_kind_names[new_record->detail.kind],
5182 new_record->detail.index,
5183 operand_mismatch_kind_names[record->detail.kind],
5184 record->detail.index);
5185 return;
5186 }
5187
5188 record->detail = new_record->detail;
5189 }
5190
5191 static inline void
5192 record_operand_error_info (const aarch64_opcode *opcode,
5193 aarch64_operand_error *error_info)
5194 {
5195 operand_error_record record;
5196 record.opcode = opcode;
5197 record.detail = *error_info;
5198 add_operand_error_record (&record);
5199 }
5200
5201 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5202 error message *ERROR, for operand IDX (count from 0). */
5203
5204 static void
5205 record_operand_error (const aarch64_opcode *opcode, int idx,
5206 enum aarch64_operand_error_kind kind,
5207 const char* error)
5208 {
5209 aarch64_operand_error info;
5210 memset(&info, 0, sizeof (info));
5211 info.index = idx;
5212 info.kind = kind;
5213 info.error = error;
5214 info.non_fatal = false;
5215 record_operand_error_info (opcode, &info);
5216 }
5217
5218 static void
5219 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5220 enum aarch64_operand_error_kind kind,
5221 const char* error, const int *extra_data)
5222 {
5223 aarch64_operand_error info;
5224 info.index = idx;
5225 info.kind = kind;
5226 info.error = error;
5227 info.data[0] = extra_data[0];
5228 info.data[1] = extra_data[1];
5229 info.data[2] = extra_data[2];
5230 info.non_fatal = false;
5231 record_operand_error_info (opcode, &info);
5232 }
5233
5234 static void
5235 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5236 const char* error, int lower_bound,
5237 int upper_bound)
5238 {
5239 int data[3] = {lower_bound, upper_bound, 0};
5240 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5241 error, data);
5242 }
5243
5244 /* Remove the operand error record for *OPCODE. */
5245 static void ATTRIBUTE_UNUSED
5246 remove_operand_error_record (const aarch64_opcode *opcode)
5247 {
5248 if (opcode_has_operand_error_p (opcode))
5249 {
5250 operand_error_record* record = operand_error_report.head;
5251 gas_assert (record != NULL && operand_error_report.tail != NULL);
5252 operand_error_report.head = record->next;
5253 record->next = free_opnd_error_record_nodes;
5254 free_opnd_error_record_nodes = record;
5255 if (operand_error_report.head == NULL)
5256 {
5257 gas_assert (operand_error_report.tail == record);
5258 operand_error_report.tail = NULL;
5259 }
5260 }
5261 }
5262
5263 /* Given the instruction in *INSTR, return the index of the best matched
5264 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5265
5266 Return -1 if there is no qualifier sequence; return the first match
5267 if there is multiple matches found. */
5268
5269 static int
5270 find_best_match (const aarch64_inst *instr,
5271 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5272 {
5273 int i, num_opnds, max_num_matched, idx;
5274
5275 num_opnds = aarch64_num_of_operands (instr->opcode);
5276 if (num_opnds == 0)
5277 {
5278 DEBUG_TRACE ("no operand");
5279 return -1;
5280 }
5281
5282 max_num_matched = 0;
5283 idx = 0;
5284
5285 /* For each pattern. */
5286 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5287 {
5288 int j, num_matched;
5289 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5290
5291 /* Most opcodes has much fewer patterns in the list. */
5292 if (empty_qualifier_sequence_p (qualifiers))
5293 {
5294 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5295 break;
5296 }
5297
5298 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5299 if (*qualifiers == instr->operands[j].qualifier)
5300 ++num_matched;
5301
5302 if (num_matched > max_num_matched)
5303 {
5304 max_num_matched = num_matched;
5305 idx = i;
5306 }
5307 }
5308
5309 DEBUG_TRACE ("return with %d", idx);
5310 return idx;
5311 }
5312
5313 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5314 corresponding operands in *INSTR. */
5315
5316 static inline void
5317 assign_qualifier_sequence (aarch64_inst *instr,
5318 const aarch64_opnd_qualifier_t *qualifiers)
5319 {
5320 int i = 0;
5321 int num_opnds = aarch64_num_of_operands (instr->opcode);
5322 gas_assert (num_opnds);
5323 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5324 instr->operands[i].qualifier = *qualifiers;
5325 }
5326
5327 /* Print operands for the diagnosis purpose. */
5328
5329 static void
5330 print_operands (char *buf, const aarch64_opcode *opcode,
5331 const aarch64_opnd_info *opnds)
5332 {
5333 int i;
5334
5335 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5336 {
5337 char str[128];
5338
5339 /* We regard the opcode operand info more, however we also look into
5340 the inst->operands to support the disassembling of the optional
5341 operand.
5342 The two operand code should be the same in all cases, apart from
5343 when the operand can be optional. */
5344 if (opcode->operands[i] == AARCH64_OPND_NIL
5345 || opnds[i].type == AARCH64_OPND_NIL)
5346 break;
5347
5348 /* Generate the operand string in STR. */
5349 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5350 NULL, cpu_variant);
5351
5352 /* Delimiter. */
5353 if (str[0] != '\0')
5354 strcat (buf, i == 0 ? " " : ", ");
5355
5356 /* Append the operand string. */
5357 strcat (buf, str);
5358 }
5359 }
5360
5361 /* Send to stderr a string as information. */
5362
5363 static void
5364 output_info (const char *format, ...)
5365 {
5366 const char *file;
5367 unsigned int line;
5368 va_list args;
5369
5370 file = as_where (&line);
5371 if (file)
5372 {
5373 if (line != 0)
5374 fprintf (stderr, "%s:%u: ", file, line);
5375 else
5376 fprintf (stderr, "%s: ", file);
5377 }
5378 fprintf (stderr, _("Info: "));
5379 va_start (args, format);
5380 vfprintf (stderr, format, args);
5381 va_end (args);
5382 (void) putc ('\n', stderr);
5383 }
5384
5385 /* Output one operand error record. */
5386
5387 static void
5388 output_operand_error_record (const operand_error_record *record, char *str)
5389 {
5390 const aarch64_operand_error *detail = &record->detail;
5391 int idx = detail->index;
5392 const aarch64_opcode *opcode = record->opcode;
5393 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5394 : AARCH64_OPND_NIL);
5395
5396 typedef void (*handler_t)(const char *format, ...);
5397 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5398
5399 switch (detail->kind)
5400 {
5401 case AARCH64_OPDE_NIL:
5402 gas_assert (0);
5403 break;
5404 case AARCH64_OPDE_SYNTAX_ERROR:
5405 case AARCH64_OPDE_RECOVERABLE:
5406 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5407 case AARCH64_OPDE_OTHER_ERROR:
5408 /* Use the prepared error message if there is, otherwise use the
5409 operand description string to describe the error. */
5410 if (detail->error != NULL)
5411 {
5412 if (idx < 0)
5413 handler (_("%s -- `%s'"), detail->error, str);
5414 else
5415 handler (_("%s at operand %d -- `%s'"),
5416 detail->error, idx + 1, str);
5417 }
5418 else
5419 {
5420 gas_assert (idx >= 0);
5421 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5422 aarch64_get_operand_desc (opd_code), str);
5423 }
5424 break;
5425
5426 case AARCH64_OPDE_INVALID_VARIANT:
5427 handler (_("operand mismatch -- `%s'"), str);
5428 if (verbose_error_p)
5429 {
5430 /* We will try to correct the erroneous instruction and also provide
5431 more information e.g. all other valid variants.
5432
5433 The string representation of the corrected instruction and other
5434 valid variants are generated by
5435
5436 1) obtaining the intermediate representation of the erroneous
5437 instruction;
5438 2) manipulating the IR, e.g. replacing the operand qualifier;
5439 3) printing out the instruction by calling the printer functions
5440 shared with the disassembler.
5441
5442 The limitation of this method is that the exact input assembly
5443 line cannot be accurately reproduced in some cases, for example an
5444 optional operand present in the actual assembly line will be
5445 omitted in the output; likewise for the optional syntax rules,
5446 e.g. the # before the immediate. Another limitation is that the
5447 assembly symbols and relocation operations in the assembly line
5448 currently cannot be printed out in the error report. Last but not
5449 least, when there is other error(s) co-exist with this error, the
5450 'corrected' instruction may be still incorrect, e.g. given
5451 'ldnp h0,h1,[x0,#6]!'
5452 this diagnosis will provide the version:
5453 'ldnp s0,s1,[x0,#6]!'
5454 which is still not right. */
5455 size_t len = strlen (get_mnemonic_name (str));
5456 int i, qlf_idx;
5457 bool result;
5458 char buf[2048];
5459 aarch64_inst *inst_base = &inst.base;
5460 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5461
5462 /* Init inst. */
5463 reset_aarch64_instruction (&inst);
5464 inst_base->opcode = opcode;
5465
5466 /* Reset the error report so that there is no side effect on the
5467 following operand parsing. */
5468 init_operand_error_report ();
5469
5470 /* Fill inst. */
5471 result = parse_operands (str + len, opcode)
5472 && programmer_friendly_fixup (&inst);
5473 gas_assert (result);
5474 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5475 NULL, NULL, insn_sequence);
5476 gas_assert (!result);
5477
5478 /* Find the most matched qualifier sequence. */
5479 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5480 gas_assert (qlf_idx > -1);
5481
5482 /* Assign the qualifiers. */
5483 assign_qualifier_sequence (inst_base,
5484 opcode->qualifiers_list[qlf_idx]);
5485
5486 /* Print the hint. */
5487 output_info (_(" did you mean this?"));
5488 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5489 print_operands (buf, opcode, inst_base->operands);
5490 output_info (_(" %s"), buf);
5491
5492 /* Print out other variant(s) if there is any. */
5493 if (qlf_idx != 0 ||
5494 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5495 output_info (_(" other valid variant(s):"));
5496
5497 /* For each pattern. */
5498 qualifiers_list = opcode->qualifiers_list;
5499 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5500 {
5501 /* Most opcodes has much fewer patterns in the list.
5502 First NIL qualifier indicates the end in the list. */
5503 if (empty_qualifier_sequence_p (*qualifiers_list))
5504 break;
5505
5506 if (i != qlf_idx)
5507 {
5508 /* Mnemonics name. */
5509 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5510
5511 /* Assign the qualifiers. */
5512 assign_qualifier_sequence (inst_base, *qualifiers_list);
5513
5514 /* Print instruction. */
5515 print_operands (buf, opcode, inst_base->operands);
5516
5517 output_info (_(" %s"), buf);
5518 }
5519 }
5520 }
5521 break;
5522
5523 case AARCH64_OPDE_UNTIED_IMMS:
5524 handler (_("operand %d must have the same immediate value "
5525 "as operand 1 -- `%s'"),
5526 detail->index + 1, str);
5527 break;
5528
5529 case AARCH64_OPDE_UNTIED_OPERAND:
5530 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5531 detail->index + 1, str);
5532 break;
5533
5534 case AARCH64_OPDE_OUT_OF_RANGE:
5535 if (detail->data[0] != detail->data[1])
5536 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5537 detail->error ? detail->error : _("immediate value"),
5538 detail->data[0], detail->data[1], idx + 1, str);
5539 else
5540 handler (_("%s must be %d at operand %d -- `%s'"),
5541 detail->error ? detail->error : _("immediate value"),
5542 detail->data[0], idx + 1, str);
5543 break;
5544
5545 case AARCH64_OPDE_REG_LIST:
5546 if (detail->data[0] == 1)
5547 handler (_("invalid number of registers in the list; "
5548 "only 1 register is expected at operand %d -- `%s'"),
5549 idx + 1, str);
5550 else
5551 handler (_("invalid number of registers in the list; "
5552 "%d registers are expected at operand %d -- `%s'"),
5553 detail->data[0], idx + 1, str);
5554 break;
5555
5556 case AARCH64_OPDE_UNALIGNED:
5557 handler (_("immediate value must be a multiple of "
5558 "%d at operand %d -- `%s'"),
5559 detail->data[0], idx + 1, str);
5560 break;
5561
5562 default:
5563 gas_assert (0);
5564 break;
5565 }
5566 }
5567
5568 /* Process and output the error message about the operand mismatching.
5569
5570 When this function is called, the operand error information had
5571 been collected for an assembly line and there will be multiple
5572 errors in the case of multiple instruction templates; output the
5573 error message that most closely describes the problem.
5574
5575 The errors to be printed can be filtered on printing all errors
5576 or only non-fatal errors. This distinction has to be made because
5577 the error buffer may already be filled with fatal errors we don't want to
5578 print due to the different instruction templates. */
5579
5580 static void
5581 output_operand_error_report (char *str, bool non_fatal_only)
5582 {
5583 int largest_error_pos;
5584 const char *msg = NULL;
5585 enum aarch64_operand_error_kind kind;
5586 operand_error_record *curr;
5587 operand_error_record *head = operand_error_report.head;
5588 operand_error_record *record = NULL;
5589
5590 /* No error to report. */
5591 if (head == NULL)
5592 return;
5593
5594 gas_assert (head != NULL && operand_error_report.tail != NULL);
5595
5596 /* Only one error. */
5597 if (head == operand_error_report.tail)
5598 {
5599 /* If the only error is a non-fatal one and we don't want to print it,
5600 just exit. */
5601 if (!non_fatal_only || head->detail.non_fatal)
5602 {
5603 DEBUG_TRACE ("single opcode entry with error kind: %s",
5604 operand_mismatch_kind_names[head->detail.kind]);
5605 output_operand_error_record (head, str);
5606 }
5607 return;
5608 }
5609
5610 /* Find the error kind of the highest severity. */
5611 DEBUG_TRACE ("multiple opcode entries with error kind");
5612 kind = AARCH64_OPDE_NIL;
5613 for (curr = head; curr != NULL; curr = curr->next)
5614 {
5615 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5616 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5617 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5618 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5619 kind = curr->detail.kind;
5620 }
5621
5622 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5623
5624 /* Pick up one of errors of KIND to report. */
5625 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5626 for (curr = head; curr != NULL; curr = curr->next)
5627 {
5628 /* If we don't want to print non-fatal errors then don't consider them
5629 at all. */
5630 if (curr->detail.kind != kind
5631 || (non_fatal_only && !curr->detail.non_fatal))
5632 continue;
5633 /* If there are multiple errors, pick up the one with the highest
5634 mismatching operand index. In the case of multiple errors with
5635 the equally highest operand index, pick up the first one or the
5636 first one with non-NULL error message. */
5637 if (curr->detail.index > largest_error_pos
5638 || (curr->detail.index == largest_error_pos && msg == NULL
5639 && curr->detail.error != NULL))
5640 {
5641 largest_error_pos = curr->detail.index;
5642 record = curr;
5643 msg = record->detail.error;
5644 }
5645 }
5646
5647 /* The way errors are collected in the back-end is a bit non-intuitive. But
5648 essentially, because each operand template is tried recursively you may
5649 always have errors collected from the previous tried OPND. These are
5650 usually skipped if there is one successful match. However now with the
5651 non-fatal errors we have to ignore those previously collected hard errors
5652 when we're only interested in printing the non-fatal ones. This condition
5653 prevents us from printing errors that are not appropriate, since we did
5654 match a condition, but it also has warnings that it wants to print. */
5655 if (non_fatal_only && !record)
5656 return;
5657
5658 gas_assert (largest_error_pos != -2 && record != NULL);
5659 DEBUG_TRACE ("Pick up error kind %s to report",
5660 operand_mismatch_kind_names[record->detail.kind]);
5661
5662 /* Output. */
5663 output_operand_error_record (record, str);
5664 }
5665 \f
5666 /* Write an AARCH64 instruction to buf - always little-endian. */
5667 static void
5668 put_aarch64_insn (char *buf, uint32_t insn)
5669 {
5670 unsigned char *where = (unsigned char *) buf;
5671 where[0] = insn;
5672 where[1] = insn >> 8;
5673 where[2] = insn >> 16;
5674 where[3] = insn >> 24;
5675 }
5676
5677 static uint32_t
5678 get_aarch64_insn (char *buf)
5679 {
5680 unsigned char *where = (unsigned char *) buf;
5681 uint32_t result;
5682 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5683 | ((uint32_t) where[3] << 24)));
5684 return result;
5685 }
5686
5687 static void
5688 output_inst (struct aarch64_inst *new_inst)
5689 {
5690 char *to = NULL;
5691
5692 to = frag_more (INSN_SIZE);
5693
5694 frag_now->tc_frag_data.recorded = 1;
5695
5696 put_aarch64_insn (to, inst.base.value);
5697
5698 if (inst.reloc.type != BFD_RELOC_UNUSED)
5699 {
5700 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5701 INSN_SIZE, &inst.reloc.exp,
5702 inst.reloc.pc_rel,
5703 inst.reloc.type);
5704 DEBUG_TRACE ("Prepared relocation fix up");
5705 /* Don't check the addend value against the instruction size,
5706 that's the job of our code in md_apply_fix(). */
5707 fixp->fx_no_overflow = 1;
5708 if (new_inst != NULL)
5709 fixp->tc_fix_data.inst = new_inst;
5710 if (aarch64_gas_internal_fixup_p ())
5711 {
5712 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5713 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5714 fixp->fx_addnumber = inst.reloc.flags;
5715 }
5716 }
5717
5718 dwarf2_emit_insn (INSN_SIZE);
5719 }
5720
5721 /* Link together opcodes of the same name. */
5722
5723 struct templates
5724 {
5725 const aarch64_opcode *opcode;
5726 struct templates *next;
5727 };
5728
5729 typedef struct templates templates;
5730
5731 static templates *
5732 lookup_mnemonic (const char *start, int len)
5733 {
5734 templates *templ = NULL;
5735
5736 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5737 return templ;
5738 }
5739
5740 /* Subroutine of md_assemble, responsible for looking up the primary
5741 opcode from the mnemonic the user wrote. STR points to the
5742 beginning of the mnemonic. */
5743
5744 static templates *
5745 opcode_lookup (char **str)
5746 {
5747 char *end, *base, *dot;
5748 const aarch64_cond *cond;
5749 char condname[16];
5750 int len;
5751
5752 /* Scan up to the end of the mnemonic, which must end in white space,
5753 '.', or end of string. */
5754 dot = 0;
5755 for (base = end = *str; is_part_of_name(*end); end++)
5756 if (*end == '.' && !dot)
5757 dot = end;
5758
5759 if (end == base || dot == base)
5760 return 0;
5761
5762 inst.cond = COND_ALWAYS;
5763
5764 /* Handle a possible condition. */
5765 if (dot)
5766 {
5767 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5768 if (cond)
5769 {
5770 inst.cond = cond->value;
5771 *str = end;
5772 }
5773 else
5774 {
5775 *str = dot;
5776 return 0;
5777 }
5778 len = dot - base;
5779 }
5780 else
5781 {
5782 *str = end;
5783 len = end - base;
5784 }
5785
5786 if (inst.cond == COND_ALWAYS)
5787 {
5788 /* Look for unaffixed mnemonic. */
5789 return lookup_mnemonic (base, len);
5790 }
5791 else if (len <= 13)
5792 {
5793 /* append ".c" to mnemonic if conditional */
5794 memcpy (condname, base, len);
5795 memcpy (condname + len, ".c", 2);
5796 base = condname;
5797 len += 2;
5798 return lookup_mnemonic (base, len);
5799 }
5800
5801 return NULL;
5802 }
5803
5804 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5805 to a corresponding operand qualifier. */
5806
5807 static inline aarch64_opnd_qualifier_t
5808 vectype_to_qualifier (const struct vector_type_el *vectype)
5809 {
5810 /* Element size in bytes indexed by vector_el_type. */
5811 const unsigned char ele_size[5]
5812 = {1, 2, 4, 8, 16};
5813 const unsigned int ele_base [5] =
5814 {
5815 AARCH64_OPND_QLF_V_4B,
5816 AARCH64_OPND_QLF_V_2H,
5817 AARCH64_OPND_QLF_V_2S,
5818 AARCH64_OPND_QLF_V_1D,
5819 AARCH64_OPND_QLF_V_1Q
5820 };
5821
5822 if (!vectype->defined || vectype->type == NT_invtype)
5823 goto vectype_conversion_fail;
5824
5825 if (vectype->type == NT_zero)
5826 return AARCH64_OPND_QLF_P_Z;
5827 if (vectype->type == NT_merge)
5828 return AARCH64_OPND_QLF_P_M;
5829
5830 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5831
5832 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5833 {
5834 /* Special case S_4B. */
5835 if (vectype->type == NT_b && vectype->width == 4)
5836 return AARCH64_OPND_QLF_S_4B;
5837
5838 /* Special case S_2H. */
5839 if (vectype->type == NT_h && vectype->width == 2)
5840 return AARCH64_OPND_QLF_S_2H;
5841
5842 /* Vector element register. */
5843 return AARCH64_OPND_QLF_S_B + vectype->type;
5844 }
5845 else
5846 {
5847 /* Vector register. */
5848 int reg_size = ele_size[vectype->type] * vectype->width;
5849 unsigned offset;
5850 unsigned shift;
5851 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5852 goto vectype_conversion_fail;
5853
5854 /* The conversion is by calculating the offset from the base operand
5855 qualifier for the vector type. The operand qualifiers are regular
5856 enough that the offset can established by shifting the vector width by
5857 a vector-type dependent amount. */
5858 shift = 0;
5859 if (vectype->type == NT_b)
5860 shift = 3;
5861 else if (vectype->type == NT_h || vectype->type == NT_s)
5862 shift = 2;
5863 else if (vectype->type >= NT_d)
5864 shift = 1;
5865 else
5866 gas_assert (0);
5867
5868 offset = ele_base [vectype->type] + (vectype->width >> shift);
5869 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5870 && offset <= AARCH64_OPND_QLF_V_1Q);
5871 return offset;
5872 }
5873
5874 vectype_conversion_fail:
5875 first_error (_("bad vector arrangement type"));
5876 return AARCH64_OPND_QLF_NIL;
5877 }
5878
5879 /* Process an optional operand that is found omitted from the assembly line.
5880 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5881 instruction's opcode entry while IDX is the index of this omitted operand.
5882 */
5883
5884 static void
5885 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5886 int idx, aarch64_opnd_info *operand)
5887 {
5888 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5889 gas_assert (optional_operand_p (opcode, idx));
5890 gas_assert (!operand->present);
5891
5892 switch (type)
5893 {
5894 case AARCH64_OPND_Rd:
5895 case AARCH64_OPND_Rn:
5896 case AARCH64_OPND_Rm:
5897 case AARCH64_OPND_Rt:
5898 case AARCH64_OPND_Rt2:
5899 case AARCH64_OPND_Rt_LS64:
5900 case AARCH64_OPND_Rt_SP:
5901 case AARCH64_OPND_Rs:
5902 case AARCH64_OPND_Ra:
5903 case AARCH64_OPND_Rt_SYS:
5904 case AARCH64_OPND_Rd_SP:
5905 case AARCH64_OPND_Rn_SP:
5906 case AARCH64_OPND_Rm_SP:
5907 case AARCH64_OPND_Fd:
5908 case AARCH64_OPND_Fn:
5909 case AARCH64_OPND_Fm:
5910 case AARCH64_OPND_Fa:
5911 case AARCH64_OPND_Ft:
5912 case AARCH64_OPND_Ft2:
5913 case AARCH64_OPND_Sd:
5914 case AARCH64_OPND_Sn:
5915 case AARCH64_OPND_Sm:
5916 case AARCH64_OPND_Va:
5917 case AARCH64_OPND_Vd:
5918 case AARCH64_OPND_Vn:
5919 case AARCH64_OPND_Vm:
5920 case AARCH64_OPND_VdD1:
5921 case AARCH64_OPND_VnD1:
5922 operand->reg.regno = default_value;
5923 break;
5924
5925 case AARCH64_OPND_Ed:
5926 case AARCH64_OPND_En:
5927 case AARCH64_OPND_Em:
5928 case AARCH64_OPND_Em16:
5929 case AARCH64_OPND_SM3_IMM2:
5930 operand->reglane.regno = default_value;
5931 break;
5932
5933 case AARCH64_OPND_IDX:
5934 case AARCH64_OPND_BIT_NUM:
5935 case AARCH64_OPND_IMMR:
5936 case AARCH64_OPND_IMMS:
5937 case AARCH64_OPND_SHLL_IMM:
5938 case AARCH64_OPND_IMM_VLSL:
5939 case AARCH64_OPND_IMM_VLSR:
5940 case AARCH64_OPND_CCMP_IMM:
5941 case AARCH64_OPND_FBITS:
5942 case AARCH64_OPND_UIMM4:
5943 case AARCH64_OPND_UIMM3_OP1:
5944 case AARCH64_OPND_UIMM3_OP2:
5945 case AARCH64_OPND_IMM:
5946 case AARCH64_OPND_IMM_2:
5947 case AARCH64_OPND_WIDTH:
5948 case AARCH64_OPND_UIMM7:
5949 case AARCH64_OPND_NZCV:
5950 case AARCH64_OPND_SVE_PATTERN:
5951 case AARCH64_OPND_SVE_PRFOP:
5952 operand->imm.value = default_value;
5953 break;
5954
5955 case AARCH64_OPND_SVE_PATTERN_SCALED:
5956 operand->imm.value = default_value;
5957 operand->shifter.kind = AARCH64_MOD_MUL;
5958 operand->shifter.amount = 1;
5959 break;
5960
5961 case AARCH64_OPND_EXCEPTION:
5962 inst.reloc.type = BFD_RELOC_UNUSED;
5963 break;
5964
5965 case AARCH64_OPND_BARRIER_ISB:
5966 operand->barrier = aarch64_barrier_options + default_value;
5967 break;
5968
5969 case AARCH64_OPND_BTI_TARGET:
5970 operand->hint_option = aarch64_hint_options + default_value;
5971 break;
5972
5973 default:
5974 break;
5975 }
5976 }
5977
5978 /* Process the relocation type for move wide instructions.
5979 Return TRUE on success; otherwise return FALSE. */
5980
5981 static bool
5982 process_movw_reloc_info (void)
5983 {
5984 int is32;
5985 unsigned shift;
5986
5987 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5988
5989 if (inst.base.opcode->op == OP_MOVK)
5990 switch (inst.reloc.type)
5991 {
5992 case BFD_RELOC_AARCH64_MOVW_G0_S:
5993 case BFD_RELOC_AARCH64_MOVW_G1_S:
5994 case BFD_RELOC_AARCH64_MOVW_G2_S:
5995 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5996 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5997 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5998 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5999 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6000 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6001 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6002 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6003 set_syntax_error
6004 (_("the specified relocation type is not allowed for MOVK"));
6005 return false;
6006 default:
6007 break;
6008 }
6009
6010 switch (inst.reloc.type)
6011 {
6012 case BFD_RELOC_AARCH64_MOVW_G0:
6013 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6014 case BFD_RELOC_AARCH64_MOVW_G0_S:
6015 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6016 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6017 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6018 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6019 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6020 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6021 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6022 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6023 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6024 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6025 shift = 0;
6026 break;
6027 case BFD_RELOC_AARCH64_MOVW_G1:
6028 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6029 case BFD_RELOC_AARCH64_MOVW_G1_S:
6030 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6031 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6032 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6033 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6034 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6035 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6036 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6037 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6038 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6039 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6040 shift = 16;
6041 break;
6042 case BFD_RELOC_AARCH64_MOVW_G2:
6043 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6044 case BFD_RELOC_AARCH64_MOVW_G2_S:
6045 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6046 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6047 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6048 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6049 if (is32)
6050 {
6051 set_fatal_syntax_error
6052 (_("the specified relocation type is not allowed for 32-bit "
6053 "register"));
6054 return false;
6055 }
6056 shift = 32;
6057 break;
6058 case BFD_RELOC_AARCH64_MOVW_G3:
6059 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6060 if (is32)
6061 {
6062 set_fatal_syntax_error
6063 (_("the specified relocation type is not allowed for 32-bit "
6064 "register"));
6065 return false;
6066 }
6067 shift = 48;
6068 break;
6069 default:
6070 /* More cases should be added when more MOVW-related relocation types
6071 are supported in GAS. */
6072 gas_assert (aarch64_gas_internal_fixup_p ());
6073 /* The shift amount should have already been set by the parser. */
6074 return true;
6075 }
6076 inst.base.operands[1].shifter.amount = shift;
6077 return true;
6078 }
6079
6080 /* A primitive log calculator. */
6081
6082 static inline unsigned int
6083 get_logsz (unsigned int size)
6084 {
6085 const unsigned char ls[16] =
6086 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6087 if (size > 16)
6088 {
6089 gas_assert (0);
6090 return -1;
6091 }
6092 gas_assert (ls[size - 1] != (unsigned char)-1);
6093 return ls[size - 1];
6094 }
6095
6096 /* Determine and return the real reloc type code for an instruction
6097 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6098
6099 static inline bfd_reloc_code_real_type
6100 ldst_lo12_determine_real_reloc_type (void)
6101 {
6102 unsigned logsz, max_logsz;
6103 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6104 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6105
6106 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6107 {
6108 BFD_RELOC_AARCH64_LDST8_LO12,
6109 BFD_RELOC_AARCH64_LDST16_LO12,
6110 BFD_RELOC_AARCH64_LDST32_LO12,
6111 BFD_RELOC_AARCH64_LDST64_LO12,
6112 BFD_RELOC_AARCH64_LDST128_LO12
6113 },
6114 {
6115 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6116 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6117 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6118 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6119 BFD_RELOC_AARCH64_NONE
6120 },
6121 {
6122 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6123 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6124 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6125 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6126 BFD_RELOC_AARCH64_NONE
6127 },
6128 {
6129 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6130 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6131 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6132 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6133 BFD_RELOC_AARCH64_NONE
6134 },
6135 {
6136 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6137 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6138 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6139 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6140 BFD_RELOC_AARCH64_NONE
6141 }
6142 };
6143
6144 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6145 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6146 || (inst.reloc.type
6147 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6148 || (inst.reloc.type
6149 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6150 || (inst.reloc.type
6151 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6152 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6153
6154 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6155 opd1_qlf =
6156 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6157 1, opd0_qlf, 0);
6158 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6159
6160 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6161
6162 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6163 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6164 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6165 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6166 max_logsz = 3;
6167 else
6168 max_logsz = 4;
6169
6170 if (logsz > max_logsz)
6171 {
6172 /* SEE PR 27904 for an example of this. */
6173 set_fatal_syntax_error
6174 (_("relocation qualifier does not match instruction size"));
6175 return BFD_RELOC_AARCH64_NONE;
6176 }
6177
6178 /* In reloc.c, these pseudo relocation types should be defined in similar
6179 order as above reloc_ldst_lo12 array. Because the array index calculation
6180 below relies on this. */
6181 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6182 }
6183
6184 /* Check whether a register list REGINFO is valid. The registers must be
6185 numbered in increasing order (modulo 32), in increments of one or two.
6186
6187 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6188 increments of two.
6189
6190 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6191
6192 static bool
6193 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6194 {
6195 uint32_t i, nb_regs, prev_regno, incr;
6196
6197 nb_regs = 1 + (reginfo & 0x3);
6198 reginfo >>= 2;
6199 prev_regno = reginfo & 0x1f;
6200 incr = accept_alternate ? 2 : 1;
6201
6202 for (i = 1; i < nb_regs; ++i)
6203 {
6204 uint32_t curr_regno;
6205 reginfo >>= 5;
6206 curr_regno = reginfo & 0x1f;
6207 if (curr_regno != ((prev_regno + incr) & 0x1f))
6208 return false;
6209 prev_regno = curr_regno;
6210 }
6211
6212 return true;
6213 }
6214
6215 /* Generic instruction operand parser. This does no encoding and no
6216 semantic validation; it merely squirrels values away in the inst
6217 structure. Returns TRUE or FALSE depending on whether the
6218 specified grammar matched. */
6219
6220 static bool
6221 parse_operands (char *str, const aarch64_opcode *opcode)
6222 {
6223 int i;
6224 char *backtrack_pos = 0;
6225 const enum aarch64_opnd *operands = opcode->operands;
6226 aarch64_reg_type imm_reg_type;
6227
6228 clear_error ();
6229 skip_whitespace (str);
6230
6231 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
6232 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6233 else
6234 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6235
6236 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6237 {
6238 int64_t val;
6239 const reg_entry *reg;
6240 int comma_skipped_p = 0;
6241 aarch64_reg_type rtype;
6242 struct vector_type_el vectype;
6243 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6244 aarch64_opnd_info *info = &inst.base.operands[i];
6245 aarch64_reg_type reg_type;
6246
6247 DEBUG_TRACE ("parse operand %d", i);
6248
6249 /* Assign the operand code. */
6250 info->type = operands[i];
6251
6252 if (optional_operand_p (opcode, i))
6253 {
6254 /* Remember where we are in case we need to backtrack. */
6255 gas_assert (!backtrack_pos);
6256 backtrack_pos = str;
6257 }
6258
6259 /* Expect comma between operands; the backtrack mechanism will take
6260 care of cases of omitted optional operand. */
6261 if (i > 0 && ! skip_past_char (&str, ','))
6262 {
6263 set_syntax_error (_("comma expected between operands"));
6264 goto failure;
6265 }
6266 else
6267 comma_skipped_p = 1;
6268
6269 switch (operands[i])
6270 {
6271 case AARCH64_OPND_Rd:
6272 case AARCH64_OPND_Rn:
6273 case AARCH64_OPND_Rm:
6274 case AARCH64_OPND_Rt:
6275 case AARCH64_OPND_Rt2:
6276 case AARCH64_OPND_Rs:
6277 case AARCH64_OPND_Ra:
6278 case AARCH64_OPND_Rt_LS64:
6279 case AARCH64_OPND_Rt_SYS:
6280 case AARCH64_OPND_PAIRREG:
6281 case AARCH64_OPND_SVE_Rm:
6282 po_int_reg_or_fail (REG_TYPE_R_Z);
6283
6284 /* In LS64 load/store instructions Rt register number must be even
6285 and <=22. */
6286 if (operands[i] == AARCH64_OPND_Rt_LS64)
6287 {
6288 /* We've already checked if this is valid register.
6289 This will check if register number (Rt) is not undefined for LS64
6290 instructions:
6291 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6292 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6293 {
6294 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6295 goto failure;
6296 }
6297 }
6298 break;
6299
6300 case AARCH64_OPND_Rd_SP:
6301 case AARCH64_OPND_Rn_SP:
6302 case AARCH64_OPND_Rt_SP:
6303 case AARCH64_OPND_SVE_Rn_SP:
6304 case AARCH64_OPND_Rm_SP:
6305 po_int_reg_or_fail (REG_TYPE_R_SP);
6306 break;
6307
6308 case AARCH64_OPND_Rm_EXT:
6309 case AARCH64_OPND_Rm_SFT:
6310 po_misc_or_fail (parse_shifter_operand
6311 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6312 ? SHIFTED_ARITH_IMM
6313 : SHIFTED_LOGIC_IMM)));
6314 if (!info->shifter.operator_present)
6315 {
6316 /* Default to LSL if not present. Libopcodes prefers shifter
6317 kind to be explicit. */
6318 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6319 info->shifter.kind = AARCH64_MOD_LSL;
6320 /* For Rm_EXT, libopcodes will carry out further check on whether
6321 or not stack pointer is used in the instruction (Recall that
6322 "the extend operator is not optional unless at least one of
6323 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6324 }
6325 break;
6326
6327 case AARCH64_OPND_Fd:
6328 case AARCH64_OPND_Fn:
6329 case AARCH64_OPND_Fm:
6330 case AARCH64_OPND_Fa:
6331 case AARCH64_OPND_Ft:
6332 case AARCH64_OPND_Ft2:
6333 case AARCH64_OPND_Sd:
6334 case AARCH64_OPND_Sn:
6335 case AARCH64_OPND_Sm:
6336 case AARCH64_OPND_SVE_VZn:
6337 case AARCH64_OPND_SVE_Vd:
6338 case AARCH64_OPND_SVE_Vm:
6339 case AARCH64_OPND_SVE_Vn:
6340 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
6341 if (val == PARSE_FAIL)
6342 {
6343 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6344 goto failure;
6345 }
6346 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
6347
6348 info->reg.regno = val;
6349 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
6350 break;
6351
6352 case AARCH64_OPND_SVE_Pd:
6353 case AARCH64_OPND_SVE_Pg3:
6354 case AARCH64_OPND_SVE_Pg4_5:
6355 case AARCH64_OPND_SVE_Pg4_10:
6356 case AARCH64_OPND_SVE_Pg4_16:
6357 case AARCH64_OPND_SVE_Pm:
6358 case AARCH64_OPND_SVE_Pn:
6359 case AARCH64_OPND_SVE_Pt:
6360 case AARCH64_OPND_SME_Pm:
6361 reg_type = REG_TYPE_PN;
6362 goto vector_reg;
6363
6364 case AARCH64_OPND_SVE_Za_5:
6365 case AARCH64_OPND_SVE_Za_16:
6366 case AARCH64_OPND_SVE_Zd:
6367 case AARCH64_OPND_SVE_Zm_5:
6368 case AARCH64_OPND_SVE_Zm_16:
6369 case AARCH64_OPND_SVE_Zn:
6370 case AARCH64_OPND_SVE_Zt:
6371 reg_type = REG_TYPE_ZN;
6372 goto vector_reg;
6373
6374 case AARCH64_OPND_Va:
6375 case AARCH64_OPND_Vd:
6376 case AARCH64_OPND_Vn:
6377 case AARCH64_OPND_Vm:
6378 reg_type = REG_TYPE_VN;
6379 vector_reg:
6380 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6381 if (val == PARSE_FAIL)
6382 {
6383 first_error (_(get_reg_expected_msg (reg_type)));
6384 goto failure;
6385 }
6386 if (vectype.defined & NTA_HASINDEX)
6387 goto failure;
6388
6389 info->reg.regno = val;
6390 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6391 && vectype.type == NT_invtype)
6392 /* Unqualified Pn and Zn registers are allowed in certain
6393 contexts. Rely on F_STRICT qualifier checking to catch
6394 invalid uses. */
6395 info->qualifier = AARCH64_OPND_QLF_NIL;
6396 else
6397 {
6398 info->qualifier = vectype_to_qualifier (&vectype);
6399 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6400 goto failure;
6401 }
6402 break;
6403
6404 case AARCH64_OPND_VdD1:
6405 case AARCH64_OPND_VnD1:
6406 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
6407 if (val == PARSE_FAIL)
6408 {
6409 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6410 goto failure;
6411 }
6412 if (vectype.type != NT_d || vectype.index != 1)
6413 {
6414 set_fatal_syntax_error
6415 (_("the top half of a 128-bit FP/SIMD register is expected"));
6416 goto failure;
6417 }
6418 info->reg.regno = val;
6419 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6420 here; it is correct for the purpose of encoding/decoding since
6421 only the register number is explicitly encoded in the related
6422 instructions, although this appears a bit hacky. */
6423 info->qualifier = AARCH64_OPND_QLF_S_D;
6424 break;
6425
6426 case AARCH64_OPND_SVE_Zm3_INDEX:
6427 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6428 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6429 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6430 case AARCH64_OPND_SVE_Zm4_INDEX:
6431 case AARCH64_OPND_SVE_Zn_INDEX:
6432 reg_type = REG_TYPE_ZN;
6433 goto vector_reg_index;
6434
6435 case AARCH64_OPND_Ed:
6436 case AARCH64_OPND_En:
6437 case AARCH64_OPND_Em:
6438 case AARCH64_OPND_Em16:
6439 case AARCH64_OPND_SM3_IMM2:
6440 reg_type = REG_TYPE_VN;
6441 vector_reg_index:
6442 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6443 if (val == PARSE_FAIL)
6444 {
6445 first_error (_(get_reg_expected_msg (reg_type)));
6446 goto failure;
6447 }
6448 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6449 goto failure;
6450
6451 info->reglane.regno = val;
6452 info->reglane.index = vectype.index;
6453 info->qualifier = vectype_to_qualifier (&vectype);
6454 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6455 goto failure;
6456 break;
6457
6458 case AARCH64_OPND_SVE_ZnxN:
6459 case AARCH64_OPND_SVE_ZtxN:
6460 reg_type = REG_TYPE_ZN;
6461 goto vector_reg_list;
6462
6463 case AARCH64_OPND_LVn:
6464 case AARCH64_OPND_LVt:
6465 case AARCH64_OPND_LVt_AL:
6466 case AARCH64_OPND_LEt:
6467 reg_type = REG_TYPE_VN;
6468 vector_reg_list:
6469 if (reg_type == REG_TYPE_ZN
6470 && get_opcode_dependent_value (opcode) == 1
6471 && *str != '{')
6472 {
6473 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6474 if (val == PARSE_FAIL)
6475 {
6476 first_error (_(get_reg_expected_msg (reg_type)));
6477 goto failure;
6478 }
6479 info->reglist.first_regno = val;
6480 info->reglist.num_regs = 1;
6481 }
6482 else
6483 {
6484 val = parse_vector_reg_list (&str, reg_type, &vectype);
6485 if (val == PARSE_FAIL)
6486 goto failure;
6487
6488 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6489 {
6490 set_fatal_syntax_error (_("invalid register list"));
6491 goto failure;
6492 }
6493
6494 if (vectype.width != 0 && *str != ',')
6495 {
6496 set_fatal_syntax_error
6497 (_("expected element type rather than vector type"));
6498 goto failure;
6499 }
6500
6501 info->reglist.first_regno = (val >> 2) & 0x1f;
6502 info->reglist.num_regs = (val & 0x3) + 1;
6503 }
6504 if (operands[i] == AARCH64_OPND_LEt)
6505 {
6506 if (!(vectype.defined & NTA_HASINDEX))
6507 goto failure;
6508 info->reglist.has_index = 1;
6509 info->reglist.index = vectype.index;
6510 }
6511 else
6512 {
6513 if (vectype.defined & NTA_HASINDEX)
6514 goto failure;
6515 if (!(vectype.defined & NTA_HASTYPE))
6516 {
6517 if (reg_type == REG_TYPE_ZN)
6518 set_fatal_syntax_error (_("missing type suffix"));
6519 goto failure;
6520 }
6521 }
6522 info->qualifier = vectype_to_qualifier (&vectype);
6523 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6524 goto failure;
6525 break;
6526
6527 case AARCH64_OPND_CRn:
6528 case AARCH64_OPND_CRm:
6529 {
6530 char prefix = *(str++);
6531 if (prefix != 'c' && prefix != 'C')
6532 goto failure;
6533
6534 po_imm_nc_or_fail ();
6535 if (val > 15)
6536 {
6537 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6538 goto failure;
6539 }
6540 info->qualifier = AARCH64_OPND_QLF_CR;
6541 info->imm.value = val;
6542 break;
6543 }
6544
6545 case AARCH64_OPND_SHLL_IMM:
6546 case AARCH64_OPND_IMM_VLSR:
6547 po_imm_or_fail (1, 64);
6548 info->imm.value = val;
6549 break;
6550
6551 case AARCH64_OPND_CCMP_IMM:
6552 case AARCH64_OPND_SIMM5:
6553 case AARCH64_OPND_FBITS:
6554 case AARCH64_OPND_TME_UIMM16:
6555 case AARCH64_OPND_UIMM4:
6556 case AARCH64_OPND_UIMM4_ADDG:
6557 case AARCH64_OPND_UIMM10:
6558 case AARCH64_OPND_UIMM3_OP1:
6559 case AARCH64_OPND_UIMM3_OP2:
6560 case AARCH64_OPND_IMM_VLSL:
6561 case AARCH64_OPND_IMM:
6562 case AARCH64_OPND_IMM_2:
6563 case AARCH64_OPND_WIDTH:
6564 case AARCH64_OPND_SVE_INV_LIMM:
6565 case AARCH64_OPND_SVE_LIMM:
6566 case AARCH64_OPND_SVE_LIMM_MOV:
6567 case AARCH64_OPND_SVE_SHLIMM_PRED:
6568 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6569 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6570 case AARCH64_OPND_SVE_SHRIMM_PRED:
6571 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6572 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6573 case AARCH64_OPND_SVE_SIMM5:
6574 case AARCH64_OPND_SVE_SIMM5B:
6575 case AARCH64_OPND_SVE_SIMM6:
6576 case AARCH64_OPND_SVE_SIMM8:
6577 case AARCH64_OPND_SVE_UIMM3:
6578 case AARCH64_OPND_SVE_UIMM7:
6579 case AARCH64_OPND_SVE_UIMM8:
6580 case AARCH64_OPND_SVE_UIMM8_53:
6581 case AARCH64_OPND_IMM_ROT1:
6582 case AARCH64_OPND_IMM_ROT2:
6583 case AARCH64_OPND_IMM_ROT3:
6584 case AARCH64_OPND_SVE_IMM_ROT1:
6585 case AARCH64_OPND_SVE_IMM_ROT2:
6586 case AARCH64_OPND_SVE_IMM_ROT3:
6587 po_imm_nc_or_fail ();
6588 info->imm.value = val;
6589 break;
6590
6591 case AARCH64_OPND_SVE_AIMM:
6592 case AARCH64_OPND_SVE_ASIMM:
6593 po_imm_nc_or_fail ();
6594 info->imm.value = val;
6595 skip_whitespace (str);
6596 if (skip_past_comma (&str))
6597 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6598 else
6599 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6600 break;
6601
6602 case AARCH64_OPND_SVE_PATTERN:
6603 po_enum_or_fail (aarch64_sve_pattern_array);
6604 info->imm.value = val;
6605 break;
6606
6607 case AARCH64_OPND_SVE_PATTERN_SCALED:
6608 po_enum_or_fail (aarch64_sve_pattern_array);
6609 info->imm.value = val;
6610 if (skip_past_comma (&str)
6611 && !parse_shift (&str, info, SHIFTED_MUL))
6612 goto failure;
6613 if (!info->shifter.operator_present)
6614 {
6615 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6616 info->shifter.kind = AARCH64_MOD_MUL;
6617 info->shifter.amount = 1;
6618 }
6619 break;
6620
6621 case AARCH64_OPND_SVE_PRFOP:
6622 po_enum_or_fail (aarch64_sve_prfop_array);
6623 info->imm.value = val;
6624 break;
6625
6626 case AARCH64_OPND_UIMM7:
6627 po_imm_or_fail (0, 127);
6628 info->imm.value = val;
6629 break;
6630
6631 case AARCH64_OPND_IDX:
6632 case AARCH64_OPND_MASK:
6633 case AARCH64_OPND_BIT_NUM:
6634 case AARCH64_OPND_IMMR:
6635 case AARCH64_OPND_IMMS:
6636 po_imm_or_fail (0, 63);
6637 info->imm.value = val;
6638 break;
6639
6640 case AARCH64_OPND_IMM0:
6641 po_imm_nc_or_fail ();
6642 if (val != 0)
6643 {
6644 set_fatal_syntax_error (_("immediate zero expected"));
6645 goto failure;
6646 }
6647 info->imm.value = 0;
6648 break;
6649
6650 case AARCH64_OPND_FPIMM0:
6651 {
6652 int qfloat;
6653 bool res1 = false, res2 = false;
6654 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6655 it is probably not worth the effort to support it. */
6656 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6657 imm_reg_type))
6658 && (error_p ()
6659 || !(res2 = parse_constant_immediate (&str, &val,
6660 imm_reg_type))))
6661 goto failure;
6662 if ((res1 && qfloat == 0) || (res2 && val == 0))
6663 {
6664 info->imm.value = 0;
6665 info->imm.is_fp = 1;
6666 break;
6667 }
6668 set_fatal_syntax_error (_("immediate zero expected"));
6669 goto failure;
6670 }
6671
6672 case AARCH64_OPND_IMM_MOV:
6673 {
6674 char *saved = str;
6675 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6676 reg_name_p (str, REG_TYPE_VN))
6677 goto failure;
6678 str = saved;
6679 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6680 GE_OPT_PREFIX, REJECT_ABSENT,
6681 NORMAL_RESOLUTION));
6682 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6683 later. fix_mov_imm_insn will try to determine a machine
6684 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6685 message if the immediate cannot be moved by a single
6686 instruction. */
6687 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6688 inst.base.operands[i].skip = 1;
6689 }
6690 break;
6691
6692 case AARCH64_OPND_SIMD_IMM:
6693 case AARCH64_OPND_SIMD_IMM_SFT:
6694 if (! parse_big_immediate (&str, &val, imm_reg_type))
6695 goto failure;
6696 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6697 /* addr_off_p */ 0,
6698 /* need_libopcodes_p */ 1,
6699 /* skip_p */ 1);
6700 /* Parse shift.
6701 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6702 shift, we don't check it here; we leave the checking to
6703 the libopcodes (operand_general_constraint_met_p). By
6704 doing this, we achieve better diagnostics. */
6705 if (skip_past_comma (&str)
6706 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6707 goto failure;
6708 if (!info->shifter.operator_present
6709 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6710 {
6711 /* Default to LSL if not present. Libopcodes prefers shifter
6712 kind to be explicit. */
6713 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6714 info->shifter.kind = AARCH64_MOD_LSL;
6715 }
6716 break;
6717
6718 case AARCH64_OPND_FPIMM:
6719 case AARCH64_OPND_SIMD_FPIMM:
6720 case AARCH64_OPND_SVE_FPIMM8:
6721 {
6722 int qfloat;
6723 bool dp_p;
6724
6725 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6726 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6727 || !aarch64_imm_float_p (qfloat))
6728 {
6729 if (!error_p ())
6730 set_fatal_syntax_error (_("invalid floating-point"
6731 " constant"));
6732 goto failure;
6733 }
6734 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6735 inst.base.operands[i].imm.is_fp = 1;
6736 }
6737 break;
6738
6739 case AARCH64_OPND_SVE_I1_HALF_ONE:
6740 case AARCH64_OPND_SVE_I1_HALF_TWO:
6741 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6742 {
6743 int qfloat;
6744 bool dp_p;
6745
6746 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6747 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6748 {
6749 if (!error_p ())
6750 set_fatal_syntax_error (_("invalid floating-point"
6751 " constant"));
6752 goto failure;
6753 }
6754 inst.base.operands[i].imm.value = qfloat;
6755 inst.base.operands[i].imm.is_fp = 1;
6756 }
6757 break;
6758
6759 case AARCH64_OPND_LIMM:
6760 po_misc_or_fail (parse_shifter_operand (&str, info,
6761 SHIFTED_LOGIC_IMM));
6762 if (info->shifter.operator_present)
6763 {
6764 set_fatal_syntax_error
6765 (_("shift not allowed for bitmask immediate"));
6766 goto failure;
6767 }
6768 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6769 /* addr_off_p */ 0,
6770 /* need_libopcodes_p */ 1,
6771 /* skip_p */ 1);
6772 break;
6773
6774 case AARCH64_OPND_AIMM:
6775 if (opcode->op == OP_ADD)
6776 /* ADD may have relocation types. */
6777 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6778 SHIFTED_ARITH_IMM));
6779 else
6780 po_misc_or_fail (parse_shifter_operand (&str, info,
6781 SHIFTED_ARITH_IMM));
6782 switch (inst.reloc.type)
6783 {
6784 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6785 info->shifter.amount = 12;
6786 break;
6787 case BFD_RELOC_UNUSED:
6788 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6789 if (info->shifter.kind != AARCH64_MOD_NONE)
6790 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6791 inst.reloc.pc_rel = 0;
6792 break;
6793 default:
6794 break;
6795 }
6796 info->imm.value = 0;
6797 if (!info->shifter.operator_present)
6798 {
6799 /* Default to LSL if not present. Libopcodes prefers shifter
6800 kind to be explicit. */
6801 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6802 info->shifter.kind = AARCH64_MOD_LSL;
6803 }
6804 break;
6805
6806 case AARCH64_OPND_HALF:
6807 {
6808 /* #<imm16> or relocation. */
6809 int internal_fixup_p;
6810 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6811 if (internal_fixup_p)
6812 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6813 skip_whitespace (str);
6814 if (skip_past_comma (&str))
6815 {
6816 /* {, LSL #<shift>} */
6817 if (! aarch64_gas_internal_fixup_p ())
6818 {
6819 set_fatal_syntax_error (_("can't mix relocation modifier "
6820 "with explicit shift"));
6821 goto failure;
6822 }
6823 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6824 }
6825 else
6826 inst.base.operands[i].shifter.amount = 0;
6827 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6828 inst.base.operands[i].imm.value = 0;
6829 if (! process_movw_reloc_info ())
6830 goto failure;
6831 }
6832 break;
6833
6834 case AARCH64_OPND_EXCEPTION:
6835 case AARCH64_OPND_UNDEFINED:
6836 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6837 imm_reg_type));
6838 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6839 /* addr_off_p */ 0,
6840 /* need_libopcodes_p */ 0,
6841 /* skip_p */ 1);
6842 break;
6843
6844 case AARCH64_OPND_NZCV:
6845 {
6846 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6847 if (nzcv != NULL)
6848 {
6849 str += 4;
6850 info->imm.value = nzcv->value;
6851 break;
6852 }
6853 po_imm_or_fail (0, 15);
6854 info->imm.value = val;
6855 }
6856 break;
6857
6858 case AARCH64_OPND_COND:
6859 case AARCH64_OPND_COND1:
6860 {
6861 char *start = str;
6862 do
6863 str++;
6864 while (ISALPHA (*str));
6865 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6866 if (info->cond == NULL)
6867 {
6868 set_syntax_error (_("invalid condition"));
6869 goto failure;
6870 }
6871 else if (operands[i] == AARCH64_OPND_COND1
6872 && (info->cond->value & 0xe) == 0xe)
6873 {
6874 /* Do not allow AL or NV. */
6875 set_default_error ();
6876 goto failure;
6877 }
6878 }
6879 break;
6880
6881 case AARCH64_OPND_ADDR_ADRP:
6882 po_misc_or_fail (parse_adrp (&str));
6883 /* Clear the value as operand needs to be relocated. */
6884 info->imm.value = 0;
6885 break;
6886
6887 case AARCH64_OPND_ADDR_PCREL14:
6888 case AARCH64_OPND_ADDR_PCREL19:
6889 case AARCH64_OPND_ADDR_PCREL21:
6890 case AARCH64_OPND_ADDR_PCREL26:
6891 po_misc_or_fail (parse_address (&str, info));
6892 if (!info->addr.pcrel)
6893 {
6894 set_syntax_error (_("invalid pc-relative address"));
6895 goto failure;
6896 }
6897 if (inst.gen_lit_pool
6898 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6899 {
6900 /* Only permit "=value" in the literal load instructions.
6901 The literal will be generated by programmer_friendly_fixup. */
6902 set_syntax_error (_("invalid use of \"=immediate\""));
6903 goto failure;
6904 }
6905 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6906 {
6907 set_syntax_error (_("unrecognized relocation suffix"));
6908 goto failure;
6909 }
6910 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6911 {
6912 info->imm.value = inst.reloc.exp.X_add_number;
6913 inst.reloc.type = BFD_RELOC_UNUSED;
6914 }
6915 else
6916 {
6917 info->imm.value = 0;
6918 if (inst.reloc.type == BFD_RELOC_UNUSED)
6919 switch (opcode->iclass)
6920 {
6921 case compbranch:
6922 case condbranch:
6923 /* e.g. CBZ or B.COND */
6924 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6925 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6926 break;
6927 case testbranch:
6928 /* e.g. TBZ */
6929 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6930 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6931 break;
6932 case branch_imm:
6933 /* e.g. B or BL */
6934 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6935 inst.reloc.type =
6936 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6937 : BFD_RELOC_AARCH64_JUMP26;
6938 break;
6939 case loadlit:
6940 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6941 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6942 break;
6943 case pcreladdr:
6944 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6945 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6946 break;
6947 default:
6948 gas_assert (0);
6949 abort ();
6950 }
6951 inst.reloc.pc_rel = 1;
6952 }
6953 break;
6954
6955 case AARCH64_OPND_ADDR_SIMPLE:
6956 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6957 {
6958 /* [<Xn|SP>{, #<simm>}] */
6959 char *start = str;
6960 /* First use the normal address-parsing routines, to get
6961 the usual syntax errors. */
6962 po_misc_or_fail (parse_address (&str, info));
6963 if (info->addr.pcrel || info->addr.offset.is_reg
6964 || !info->addr.preind || info->addr.postind
6965 || info->addr.writeback)
6966 {
6967 set_syntax_error (_("invalid addressing mode"));
6968 goto failure;
6969 }
6970
6971 /* Then retry, matching the specific syntax of these addresses. */
6972 str = start;
6973 po_char_or_fail ('[');
6974 po_reg_or_fail (REG_TYPE_R64_SP);
6975 /* Accept optional ", #0". */
6976 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6977 && skip_past_char (&str, ','))
6978 {
6979 skip_past_char (&str, '#');
6980 if (! skip_past_char (&str, '0'))
6981 {
6982 set_fatal_syntax_error
6983 (_("the optional immediate offset can only be 0"));
6984 goto failure;
6985 }
6986 }
6987 po_char_or_fail (']');
6988 break;
6989 }
6990
6991 case AARCH64_OPND_ADDR_REGOFF:
6992 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6993 po_misc_or_fail (parse_address (&str, info));
6994 regoff_addr:
6995 if (info->addr.pcrel || !info->addr.offset.is_reg
6996 || !info->addr.preind || info->addr.postind
6997 || info->addr.writeback)
6998 {
6999 set_syntax_error (_("invalid addressing mode"));
7000 goto failure;
7001 }
7002 if (!info->shifter.operator_present)
7003 {
7004 /* Default to LSL if not present. Libopcodes prefers shifter
7005 kind to be explicit. */
7006 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7007 info->shifter.kind = AARCH64_MOD_LSL;
7008 }
7009 /* Qualifier to be deduced by libopcodes. */
7010 break;
7011
7012 case AARCH64_OPND_ADDR_SIMM7:
7013 po_misc_or_fail (parse_address (&str, info));
7014 if (info->addr.pcrel || info->addr.offset.is_reg
7015 || (!info->addr.preind && !info->addr.postind))
7016 {
7017 set_syntax_error (_("invalid addressing mode"));
7018 goto failure;
7019 }
7020 if (inst.reloc.type != BFD_RELOC_UNUSED)
7021 {
7022 set_syntax_error (_("relocation not allowed"));
7023 goto failure;
7024 }
7025 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7026 /* addr_off_p */ 1,
7027 /* need_libopcodes_p */ 1,
7028 /* skip_p */ 0);
7029 break;
7030
7031 case AARCH64_OPND_ADDR_SIMM9:
7032 case AARCH64_OPND_ADDR_SIMM9_2:
7033 case AARCH64_OPND_ADDR_SIMM11:
7034 case AARCH64_OPND_ADDR_SIMM13:
7035 po_misc_or_fail (parse_address (&str, info));
7036 if (info->addr.pcrel || info->addr.offset.is_reg
7037 || (!info->addr.preind && !info->addr.postind)
7038 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7039 && info->addr.writeback))
7040 {
7041 set_syntax_error (_("invalid addressing mode"));
7042 goto failure;
7043 }
7044 if (inst.reloc.type != BFD_RELOC_UNUSED)
7045 {
7046 set_syntax_error (_("relocation not allowed"));
7047 goto failure;
7048 }
7049 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7050 /* addr_off_p */ 1,
7051 /* need_libopcodes_p */ 1,
7052 /* skip_p */ 0);
7053 break;
7054
7055 case AARCH64_OPND_ADDR_SIMM10:
7056 case AARCH64_OPND_ADDR_OFFSET:
7057 po_misc_or_fail (parse_address (&str, info));
7058 if (info->addr.pcrel || info->addr.offset.is_reg
7059 || !info->addr.preind || info->addr.postind)
7060 {
7061 set_syntax_error (_("invalid addressing mode"));
7062 goto failure;
7063 }
7064 if (inst.reloc.type != BFD_RELOC_UNUSED)
7065 {
7066 set_syntax_error (_("relocation not allowed"));
7067 goto failure;
7068 }
7069 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7070 /* addr_off_p */ 1,
7071 /* need_libopcodes_p */ 1,
7072 /* skip_p */ 0);
7073 break;
7074
7075 case AARCH64_OPND_ADDR_UIMM12:
7076 po_misc_or_fail (parse_address (&str, info));
7077 if (info->addr.pcrel || info->addr.offset.is_reg
7078 || !info->addr.preind || info->addr.writeback)
7079 {
7080 set_syntax_error (_("invalid addressing mode"));
7081 goto failure;
7082 }
7083 if (inst.reloc.type == BFD_RELOC_UNUSED)
7084 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7085 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7086 || (inst.reloc.type
7087 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7088 || (inst.reloc.type
7089 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7090 || (inst.reloc.type
7091 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7092 || (inst.reloc.type
7093 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7094 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7095 /* Leave qualifier to be determined by libopcodes. */
7096 break;
7097
7098 case AARCH64_OPND_SIMD_ADDR_POST:
7099 /* [<Xn|SP>], <Xm|#<amount>> */
7100 po_misc_or_fail (parse_address (&str, info));
7101 if (!info->addr.postind || !info->addr.writeback)
7102 {
7103 set_syntax_error (_("invalid addressing mode"));
7104 goto failure;
7105 }
7106 if (!info->addr.offset.is_reg)
7107 {
7108 if (inst.reloc.exp.X_op == O_constant)
7109 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7110 else
7111 {
7112 set_fatal_syntax_error
7113 (_("writeback value must be an immediate constant"));
7114 goto failure;
7115 }
7116 }
7117 /* No qualifier. */
7118 break;
7119
7120 case AARCH64_OPND_SME_SM_ZA:
7121 /* { SM | ZA } */
7122 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7123 {
7124 set_syntax_error (_("unknown or missing PSTATE field name"));
7125 goto failure;
7126 }
7127 info->reg.regno = val;
7128 break;
7129
7130 case AARCH64_OPND_SME_PnT_Wm_imm:
7131 /* <Pn>.<T>[<Wm>, #<imm>] */
7132 {
7133 int index_base_reg;
7134 int imm;
7135 val = parse_sme_pred_reg_with_index (&str,
7136 &index_base_reg,
7137 &imm,
7138 &qualifier);
7139 if (val == PARSE_FAIL)
7140 goto failure;
7141
7142 info->za_tile_vector.regno = val;
7143 info->za_tile_vector.index.regno = index_base_reg;
7144 info->za_tile_vector.index.imm = imm;
7145 info->qualifier = qualifier;
7146 break;
7147 }
7148
7149 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7150 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7151 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7152 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7153 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7154 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7155 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7156 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7157 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7158 case AARCH64_OPND_SVE_ADDR_RI_U6:
7159 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7160 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7161 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7162 /* [X<n>{, #imm, MUL VL}]
7163 [X<n>{, #imm}]
7164 but recognizing SVE registers. */
7165 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7166 &offset_qualifier));
7167 if (base_qualifier != AARCH64_OPND_QLF_X)
7168 {
7169 set_syntax_error (_("invalid addressing mode"));
7170 goto failure;
7171 }
7172 sve_regimm:
7173 if (info->addr.pcrel || info->addr.offset.is_reg
7174 || !info->addr.preind || info->addr.writeback)
7175 {
7176 set_syntax_error (_("invalid addressing mode"));
7177 goto failure;
7178 }
7179 if (inst.reloc.type != BFD_RELOC_UNUSED
7180 || inst.reloc.exp.X_op != O_constant)
7181 {
7182 /* Make sure this has priority over
7183 "invalid addressing mode". */
7184 set_fatal_syntax_error (_("constant offset required"));
7185 goto failure;
7186 }
7187 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7188 break;
7189
7190 case AARCH64_OPND_SVE_ADDR_R:
7191 /* [<Xn|SP>{, <R><m>}]
7192 but recognizing SVE registers. */
7193 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7194 &offset_qualifier));
7195 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7196 {
7197 offset_qualifier = AARCH64_OPND_QLF_X;
7198 info->addr.offset.is_reg = 1;
7199 info->addr.offset.regno = 31;
7200 }
7201 else if (base_qualifier != AARCH64_OPND_QLF_X
7202 || offset_qualifier != AARCH64_OPND_QLF_X)
7203 {
7204 set_syntax_error (_("invalid addressing mode"));
7205 goto failure;
7206 }
7207 goto regoff_addr;
7208
7209 case AARCH64_OPND_SVE_ADDR_RR:
7210 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7211 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7212 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7213 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7214 case AARCH64_OPND_SVE_ADDR_RX:
7215 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7216 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7217 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7218 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7219 but recognizing SVE registers. */
7220 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7221 &offset_qualifier));
7222 if (base_qualifier != AARCH64_OPND_QLF_X
7223 || offset_qualifier != AARCH64_OPND_QLF_X)
7224 {
7225 set_syntax_error (_("invalid addressing mode"));
7226 goto failure;
7227 }
7228 goto regoff_addr;
7229
7230 case AARCH64_OPND_SVE_ADDR_RZ:
7231 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7232 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7233 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7234 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7235 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7236 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7237 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7238 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7239 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7240 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7241 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7242 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7243 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7244 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7245 &offset_qualifier));
7246 if (base_qualifier != AARCH64_OPND_QLF_X
7247 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7248 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7249 {
7250 set_syntax_error (_("invalid addressing mode"));
7251 goto failure;
7252 }
7253 info->qualifier = offset_qualifier;
7254 goto regoff_addr;
7255
7256 case AARCH64_OPND_SVE_ADDR_ZX:
7257 /* [Zn.<T>{, <Xm>}]. */
7258 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7259 &offset_qualifier));
7260 /* Things to check:
7261 base_qualifier either S_S or S_D
7262 offset_qualifier must be X
7263 */
7264 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7265 && base_qualifier != AARCH64_OPND_QLF_S_D)
7266 || offset_qualifier != AARCH64_OPND_QLF_X)
7267 {
7268 set_syntax_error (_("invalid addressing mode"));
7269 goto failure;
7270 }
7271 info->qualifier = base_qualifier;
7272 if (!info->addr.offset.is_reg || info->addr.pcrel
7273 || !info->addr.preind || info->addr.writeback
7274 || info->shifter.operator_present != 0)
7275 {
7276 set_syntax_error (_("invalid addressing mode"));
7277 goto failure;
7278 }
7279 info->shifter.kind = AARCH64_MOD_LSL;
7280 break;
7281
7282
7283 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7284 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7285 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7286 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7287 /* [Z<n>.<T>{, #imm}] */
7288 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7289 &offset_qualifier));
7290 if (base_qualifier != AARCH64_OPND_QLF_S_S
7291 && base_qualifier != AARCH64_OPND_QLF_S_D)
7292 {
7293 set_syntax_error (_("invalid addressing mode"));
7294 goto failure;
7295 }
7296 info->qualifier = base_qualifier;
7297 goto sve_regimm;
7298
7299 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7300 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7301 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7302 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7303 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7304
7305 We don't reject:
7306
7307 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7308
7309 here since we get better error messages by leaving it to
7310 the qualifier checking routines. */
7311 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7312 &offset_qualifier));
7313 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7314 && base_qualifier != AARCH64_OPND_QLF_S_D)
7315 || offset_qualifier != base_qualifier)
7316 {
7317 set_syntax_error (_("invalid addressing mode"));
7318 goto failure;
7319 }
7320 info->qualifier = base_qualifier;
7321 goto regoff_addr;
7322
7323 case AARCH64_OPND_SYSREG:
7324 {
7325 uint32_t sysreg_flags;
7326 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7327 &sysreg_flags)) == PARSE_FAIL)
7328 {
7329 set_syntax_error (_("unknown or missing system register name"));
7330 goto failure;
7331 }
7332 inst.base.operands[i].sysreg.value = val;
7333 inst.base.operands[i].sysreg.flags = sysreg_flags;
7334 break;
7335 }
7336
7337 case AARCH64_OPND_PSTATEFIELD:
7338 {
7339 uint32_t sysreg_flags;
7340 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7341 &sysreg_flags)) == PARSE_FAIL)
7342 {
7343 set_syntax_error (_("unknown or missing PSTATE field name"));
7344 goto failure;
7345 }
7346 inst.base.operands[i].pstatefield = val;
7347 inst.base.operands[i].sysreg.flags = sysreg_flags;
7348 break;
7349 }
7350
7351 case AARCH64_OPND_SYSREG_IC:
7352 inst.base.operands[i].sysins_op =
7353 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7354 goto sys_reg_ins;
7355
7356 case AARCH64_OPND_SYSREG_DC:
7357 inst.base.operands[i].sysins_op =
7358 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7359 goto sys_reg_ins;
7360
7361 case AARCH64_OPND_SYSREG_AT:
7362 inst.base.operands[i].sysins_op =
7363 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7364 goto sys_reg_ins;
7365
7366 case AARCH64_OPND_SYSREG_SR:
7367 inst.base.operands[i].sysins_op =
7368 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7369 goto sys_reg_ins;
7370
7371 case AARCH64_OPND_SYSREG_TLBI:
7372 inst.base.operands[i].sysins_op =
7373 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7374 sys_reg_ins:
7375 if (inst.base.operands[i].sysins_op == NULL)
7376 {
7377 set_fatal_syntax_error ( _("unknown or missing operation name"));
7378 goto failure;
7379 }
7380 break;
7381
7382 case AARCH64_OPND_BARRIER:
7383 case AARCH64_OPND_BARRIER_ISB:
7384 val = parse_barrier (&str);
7385 if (val != PARSE_FAIL
7386 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7387 {
7388 /* ISB only accepts options name 'sy'. */
7389 set_syntax_error
7390 (_("the specified option is not accepted in ISB"));
7391 /* Turn off backtrack as this optional operand is present. */
7392 backtrack_pos = 0;
7393 goto failure;
7394 }
7395 if (val != PARSE_FAIL
7396 && operands[i] == AARCH64_OPND_BARRIER)
7397 {
7398 /* Regular barriers accept options CRm (C0-C15).
7399 DSB nXS barrier variant accepts values > 15. */
7400 if (val < 0 || val > 15)
7401 {
7402 set_syntax_error (_("the specified option is not accepted in DSB"));
7403 goto failure;
7404 }
7405 }
7406 /* This is an extension to accept a 0..15 immediate. */
7407 if (val == PARSE_FAIL)
7408 po_imm_or_fail (0, 15);
7409 info->barrier = aarch64_barrier_options + val;
7410 break;
7411
7412 case AARCH64_OPND_BARRIER_DSB_NXS:
7413 val = parse_barrier (&str);
7414 if (val != PARSE_FAIL)
7415 {
7416 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7417 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7418 {
7419 set_syntax_error (_("the specified option is not accepted in DSB"));
7420 /* Turn off backtrack as this optional operand is present. */
7421 backtrack_pos = 0;
7422 goto failure;
7423 }
7424 }
7425 else
7426 {
7427 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7428 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7429 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7430 goto failure;
7431 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7432 {
7433 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7434 goto failure;
7435 }
7436 }
7437 /* Option index is encoded as 2-bit value in val<3:2>. */
7438 val = (val >> 2) - 4;
7439 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7440 break;
7441
7442 case AARCH64_OPND_PRFOP:
7443 val = parse_pldop (&str);
7444 /* This is an extension to accept a 0..31 immediate. */
7445 if (val == PARSE_FAIL)
7446 po_imm_or_fail (0, 31);
7447 inst.base.operands[i].prfop = aarch64_prfops + val;
7448 break;
7449
7450 case AARCH64_OPND_BARRIER_PSB:
7451 val = parse_barrier_psb (&str, &(info->hint_option));
7452 if (val == PARSE_FAIL)
7453 goto failure;
7454 break;
7455
7456 case AARCH64_OPND_BTI_TARGET:
7457 val = parse_bti_operand (&str, &(info->hint_option));
7458 if (val == PARSE_FAIL)
7459 goto failure;
7460 break;
7461
7462 case AARCH64_OPND_SME_ZAda_2b:
7463 case AARCH64_OPND_SME_ZAda_3b:
7464 val = parse_sme_zada_operand (&str, &qualifier);
7465 if (val == PARSE_FAIL)
7466 goto failure;
7467 info->reg.regno = val;
7468 info->qualifier = qualifier;
7469 break;
7470
7471 case AARCH64_OPND_SME_ZA_HV_idx_src:
7472 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7473 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7474 {
7475 enum sme_hv_slice slice_indicator;
7476 int vector_select_register;
7477 int imm;
7478
7479 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr)
7480 val = parse_sme_za_hv_tiles_operand_with_braces (&str,
7481 &slice_indicator,
7482 &vector_select_register,
7483 &imm,
7484 &qualifier);
7485 else
7486 val = parse_sme_za_hv_tiles_operand (&str, &slice_indicator,
7487 &vector_select_register,
7488 &imm,
7489 &qualifier);
7490 if (val == PARSE_FAIL)
7491 goto failure;
7492 info->za_tile_vector.regno = val;
7493 info->za_tile_vector.index.regno = vector_select_register;
7494 info->za_tile_vector.index.imm = imm;
7495 info->za_tile_vector.v = slice_indicator;
7496 info->qualifier = qualifier;
7497 break;
7498 }
7499
7500 case AARCH64_OPND_SME_list_of_64bit_tiles:
7501 val = parse_sme_list_of_64bit_tiles (&str);
7502 if (val == PARSE_FAIL)
7503 goto failure;
7504 info->imm.value = val;
7505 break;
7506
7507 case AARCH64_OPND_SME_ZA_array:
7508 {
7509 int imm;
7510 val = parse_sme_za_array (&str, &imm);
7511 if (val == PARSE_FAIL)
7512 goto failure;
7513 info->za_tile_vector.index.regno = val;
7514 info->za_tile_vector.index.imm = imm;
7515 break;
7516 }
7517
7518 default:
7519 as_fatal (_("unhandled operand code %d"), operands[i]);
7520 }
7521
7522 /* If we get here, this operand was successfully parsed. */
7523 inst.base.operands[i].present = 1;
7524 continue;
7525
7526 failure:
7527 /* The parse routine should already have set the error, but in case
7528 not, set a default one here. */
7529 if (! error_p ())
7530 set_default_error ();
7531
7532 if (! backtrack_pos)
7533 goto parse_operands_return;
7534
7535 {
7536 /* We reach here because this operand is marked as optional, and
7537 either no operand was supplied or the operand was supplied but it
7538 was syntactically incorrect. In the latter case we report an
7539 error. In the former case we perform a few more checks before
7540 dropping through to the code to insert the default operand. */
7541
7542 char *tmp = backtrack_pos;
7543 char endchar = END_OF_INSN;
7544
7545 if (i != (aarch64_num_of_operands (opcode) - 1))
7546 endchar = ',';
7547 skip_past_char (&tmp, ',');
7548
7549 if (*tmp != endchar)
7550 /* The user has supplied an operand in the wrong format. */
7551 goto parse_operands_return;
7552
7553 /* Make sure there is not a comma before the optional operand.
7554 For example the fifth operand of 'sys' is optional:
7555
7556 sys #0,c0,c0,#0, <--- wrong
7557 sys #0,c0,c0,#0 <--- correct. */
7558 if (comma_skipped_p && i && endchar == END_OF_INSN)
7559 {
7560 set_fatal_syntax_error
7561 (_("unexpected comma before the omitted optional operand"));
7562 goto parse_operands_return;
7563 }
7564 }
7565
7566 /* Reaching here means we are dealing with an optional operand that is
7567 omitted from the assembly line. */
7568 gas_assert (optional_operand_p (opcode, i));
7569 info->present = 0;
7570 process_omitted_operand (operands[i], opcode, i, info);
7571
7572 /* Try again, skipping the optional operand at backtrack_pos. */
7573 str = backtrack_pos;
7574 backtrack_pos = 0;
7575
7576 /* Clear any error record after the omitted optional operand has been
7577 successfully handled. */
7578 clear_error ();
7579 }
7580
7581 /* Check if we have parsed all the operands. */
7582 if (*str != '\0' && ! error_p ())
7583 {
7584 /* Set I to the index of the last present operand; this is
7585 for the purpose of diagnostics. */
7586 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7587 ;
7588 set_fatal_syntax_error
7589 (_("unexpected characters following instruction"));
7590 }
7591
7592 parse_operands_return:
7593
7594 if (error_p ())
7595 {
7596 DEBUG_TRACE ("parsing FAIL: %s - %s",
7597 operand_mismatch_kind_names[get_error_kind ()],
7598 get_error_message ());
7599 /* Record the operand error properly; this is useful when there
7600 are multiple instruction templates for a mnemonic name, so that
7601 later on, we can select the error that most closely describes
7602 the problem. */
7603 record_operand_error (opcode, i, get_error_kind (),
7604 get_error_message ());
7605 return false;
7606 }
7607 else
7608 {
7609 DEBUG_TRACE ("parsing SUCCESS");
7610 return true;
7611 }
7612 }
7613
7614 /* It does some fix-up to provide some programmer friendly feature while
7615 keeping the libopcodes happy, i.e. libopcodes only accepts
7616 the preferred architectural syntax.
7617 Return FALSE if there is any failure; otherwise return TRUE. */
7618
7619 static bool
7620 programmer_friendly_fixup (aarch64_instruction *instr)
7621 {
7622 aarch64_inst *base = &instr->base;
7623 const aarch64_opcode *opcode = base->opcode;
7624 enum aarch64_op op = opcode->op;
7625 aarch64_opnd_info *operands = base->operands;
7626
7627 DEBUG_TRACE ("enter");
7628
7629 switch (opcode->iclass)
7630 {
7631 case testbranch:
7632 /* TBNZ Xn|Wn, #uimm6, label
7633 Test and Branch Not Zero: conditionally jumps to label if bit number
7634 uimm6 in register Xn is not zero. The bit number implies the width of
7635 the register, which may be written and should be disassembled as Wn if
7636 uimm is less than 32. */
7637 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7638 {
7639 if (operands[1].imm.value >= 32)
7640 {
7641 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7642 0, 31);
7643 return false;
7644 }
7645 operands[0].qualifier = AARCH64_OPND_QLF_X;
7646 }
7647 break;
7648 case loadlit:
7649 /* LDR Wt, label | =value
7650 As a convenience assemblers will typically permit the notation
7651 "=value" in conjunction with the pc-relative literal load instructions
7652 to automatically place an immediate value or symbolic address in a
7653 nearby literal pool and generate a hidden label which references it.
7654 ISREG has been set to 0 in the case of =value. */
7655 if (instr->gen_lit_pool
7656 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7657 {
7658 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7659 if (op == OP_LDRSW_LIT)
7660 size = 4;
7661 if (instr->reloc.exp.X_op != O_constant
7662 && instr->reloc.exp.X_op != O_big
7663 && instr->reloc.exp.X_op != O_symbol)
7664 {
7665 record_operand_error (opcode, 1,
7666 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7667 _("constant expression expected"));
7668 return false;
7669 }
7670 if (! add_to_lit_pool (&instr->reloc.exp, size))
7671 {
7672 record_operand_error (opcode, 1,
7673 AARCH64_OPDE_OTHER_ERROR,
7674 _("literal pool insertion failed"));
7675 return false;
7676 }
7677 }
7678 break;
7679 case log_shift:
7680 case bitfield:
7681 /* UXT[BHW] Wd, Wn
7682 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7683 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7684 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7685 A programmer-friendly assembler should accept a destination Xd in
7686 place of Wd, however that is not the preferred form for disassembly.
7687 */
7688 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7689 && operands[1].qualifier == AARCH64_OPND_QLF_W
7690 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7691 operands[0].qualifier = AARCH64_OPND_QLF_W;
7692 break;
7693
7694 case addsub_ext:
7695 {
7696 /* In the 64-bit form, the final register operand is written as Wm
7697 for all but the (possibly omitted) UXTX/LSL and SXTX
7698 operators.
7699 As a programmer-friendly assembler, we accept e.g.
7700 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7701 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7702 int idx = aarch64_operand_index (opcode->operands,
7703 AARCH64_OPND_Rm_EXT);
7704 gas_assert (idx == 1 || idx == 2);
7705 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7706 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7707 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7708 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7709 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7710 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7711 }
7712 break;
7713
7714 default:
7715 break;
7716 }
7717
7718 DEBUG_TRACE ("exit with SUCCESS");
7719 return true;
7720 }
7721
7722 /* Check for loads and stores that will cause unpredictable behavior. */
7723
7724 static void
7725 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7726 {
7727 aarch64_inst *base = &instr->base;
7728 const aarch64_opcode *opcode = base->opcode;
7729 const aarch64_opnd_info *opnds = base->operands;
7730 switch (opcode->iclass)
7731 {
7732 case ldst_pos:
7733 case ldst_imm9:
7734 case ldst_imm10:
7735 case ldst_unscaled:
7736 case ldst_unpriv:
7737 /* Loading/storing the base register is unpredictable if writeback. */
7738 if ((aarch64_get_operand_class (opnds[0].type)
7739 == AARCH64_OPND_CLASS_INT_REG)
7740 && opnds[0].reg.regno == opnds[1].addr.base_regno
7741 && opnds[1].addr.base_regno != REG_SP
7742 /* Exempt STG/STZG/ST2G/STZ2G. */
7743 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7744 && opnds[1].addr.writeback)
7745 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7746 break;
7747
7748 case ldstpair_off:
7749 case ldstnapair_offs:
7750 case ldstpair_indexed:
7751 /* Loading/storing the base register is unpredictable if writeback. */
7752 if ((aarch64_get_operand_class (opnds[0].type)
7753 == AARCH64_OPND_CLASS_INT_REG)
7754 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7755 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7756 && opnds[2].addr.base_regno != REG_SP
7757 /* Exempt STGP. */
7758 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7759 && opnds[2].addr.writeback)
7760 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7761 /* Load operations must load different registers. */
7762 if ((opcode->opcode & (1 << 22))
7763 && opnds[0].reg.regno == opnds[1].reg.regno)
7764 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7765 break;
7766
7767 case ldstexcl:
7768 if ((aarch64_get_operand_class (opnds[0].type)
7769 == AARCH64_OPND_CLASS_INT_REG)
7770 && (aarch64_get_operand_class (opnds[1].type)
7771 == AARCH64_OPND_CLASS_INT_REG))
7772 {
7773 if ((opcode->opcode & (1 << 22)))
7774 {
7775 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7776 if ((opcode->opcode & (1 << 21))
7777 && opnds[0].reg.regno == opnds[1].reg.regno)
7778 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7779 }
7780 else
7781 {
7782 /* Store-Exclusive is unpredictable if Rt == Rs. */
7783 if (opnds[0].reg.regno == opnds[1].reg.regno)
7784 as_warn
7785 (_("unpredictable: identical transfer and status registers"
7786 " --`%s'"),str);
7787
7788 if (opnds[0].reg.regno == opnds[2].reg.regno)
7789 {
7790 if (!(opcode->opcode & (1 << 21)))
7791 /* Store-Exclusive is unpredictable if Rn == Rs. */
7792 as_warn
7793 (_("unpredictable: identical base and status registers"
7794 " --`%s'"),str);
7795 else
7796 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7797 as_warn
7798 (_("unpredictable: "
7799 "identical transfer and status registers"
7800 " --`%s'"),str);
7801 }
7802
7803 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7804 if ((opcode->opcode & (1 << 21))
7805 && opnds[0].reg.regno == opnds[3].reg.regno
7806 && opnds[3].reg.regno != REG_SP)
7807 as_warn (_("unpredictable: identical base and status registers"
7808 " --`%s'"),str);
7809 }
7810 }
7811 break;
7812
7813 default:
7814 break;
7815 }
7816 }
7817
7818 static void
7819 force_automatic_sequence_close (void)
7820 {
7821 if (now_instr_sequence.instr)
7822 {
7823 as_warn (_("previous `%s' sequence has not been closed"),
7824 now_instr_sequence.instr->opcode->name);
7825 init_insn_sequence (NULL, &now_instr_sequence);
7826 }
7827 }
7828
7829 /* A wrapper function to interface with libopcodes on encoding and
7830 record the error message if there is any.
7831
7832 Return TRUE on success; otherwise return FALSE. */
7833
7834 static bool
7835 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7836 aarch64_insn *code)
7837 {
7838 aarch64_operand_error error_info;
7839 memset (&error_info, '\0', sizeof (error_info));
7840 error_info.kind = AARCH64_OPDE_NIL;
7841 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7842 && !error_info.non_fatal)
7843 return true;
7844
7845 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7846 record_operand_error_info (opcode, &error_info);
7847 return error_info.non_fatal;
7848 }
7849
7850 #ifdef DEBUG_AARCH64
7851 static inline void
7852 dump_opcode_operands (const aarch64_opcode *opcode)
7853 {
7854 int i = 0;
7855 while (opcode->operands[i] != AARCH64_OPND_NIL)
7856 {
7857 aarch64_verbose ("\t\t opnd%d: %s", i,
7858 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7859 ? aarch64_get_operand_name (opcode->operands[i])
7860 : aarch64_get_operand_desc (opcode->operands[i]));
7861 ++i;
7862 }
7863 }
7864 #endif /* DEBUG_AARCH64 */
7865
7866 /* This is the guts of the machine-dependent assembler. STR points to a
7867 machine dependent instruction. This function is supposed to emit
7868 the frags/bytes it assembles to. */
7869
7870 void
7871 md_assemble (char *str)
7872 {
7873 char *p = str;
7874 templates *template;
7875 const aarch64_opcode *opcode;
7876 aarch64_inst *inst_base;
7877 unsigned saved_cond;
7878
7879 /* Align the previous label if needed. */
7880 if (last_label_seen != NULL)
7881 {
7882 symbol_set_frag (last_label_seen, frag_now);
7883 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7884 S_SET_SEGMENT (last_label_seen, now_seg);
7885 }
7886
7887 /* Update the current insn_sequence from the segment. */
7888 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
7889
7890 inst.reloc.type = BFD_RELOC_UNUSED;
7891
7892 DEBUG_TRACE ("\n\n");
7893 DEBUG_TRACE ("==============================");
7894 DEBUG_TRACE ("Enter md_assemble with %s", str);
7895
7896 template = opcode_lookup (&p);
7897 if (!template)
7898 {
7899 /* It wasn't an instruction, but it might be a register alias of
7900 the form alias .req reg directive. */
7901 if (!create_register_alias (str, p))
7902 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7903 str);
7904 return;
7905 }
7906
7907 skip_whitespace (p);
7908 if (*p == ',')
7909 {
7910 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7911 get_mnemonic_name (str), str);
7912 return;
7913 }
7914
7915 init_operand_error_report ();
7916
7917 /* Sections are assumed to start aligned. In executable section, there is no
7918 MAP_DATA symbol pending. So we only align the address during
7919 MAP_DATA --> MAP_INSN transition.
7920 For other sections, this is not guaranteed. */
7921 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7922 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7923 frag_align_code (2, 0);
7924
7925 saved_cond = inst.cond;
7926 reset_aarch64_instruction (&inst);
7927 inst.cond = saved_cond;
7928
7929 /* Iterate through all opcode entries with the same mnemonic name. */
7930 do
7931 {
7932 opcode = template->opcode;
7933
7934 DEBUG_TRACE ("opcode %s found", opcode->name);
7935 #ifdef DEBUG_AARCH64
7936 if (debug_dump)
7937 dump_opcode_operands (opcode);
7938 #endif /* DEBUG_AARCH64 */
7939
7940 mapping_state (MAP_INSN);
7941
7942 inst_base = &inst.base;
7943 inst_base->opcode = opcode;
7944
7945 /* Truly conditionally executed instructions, e.g. b.cond. */
7946 if (opcode->flags & F_COND)
7947 {
7948 gas_assert (inst.cond != COND_ALWAYS);
7949 inst_base->cond = get_cond_from_value (inst.cond);
7950 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7951 }
7952 else if (inst.cond != COND_ALWAYS)
7953 {
7954 /* It shouldn't arrive here, where the assembly looks like a
7955 conditional instruction but the found opcode is unconditional. */
7956 gas_assert (0);
7957 continue;
7958 }
7959
7960 if (parse_operands (p, opcode)
7961 && programmer_friendly_fixup (&inst)
7962 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7963 {
7964 /* Check that this instruction is supported for this CPU. */
7965 if (!opcode->avariant
7966 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7967 {
7968 as_bad (_("selected processor does not support `%s'"), str);
7969 return;
7970 }
7971
7972 warn_unpredictable_ldst (&inst, str);
7973
7974 if (inst.reloc.type == BFD_RELOC_UNUSED
7975 || !inst.reloc.need_libopcodes_p)
7976 output_inst (NULL);
7977 else
7978 {
7979 /* If there is relocation generated for the instruction,
7980 store the instruction information for the future fix-up. */
7981 struct aarch64_inst *copy;
7982 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7983 copy = XNEW (struct aarch64_inst);
7984 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7985 output_inst (copy);
7986 }
7987
7988 /* Issue non-fatal messages if any. */
7989 output_operand_error_report (str, true);
7990 return;
7991 }
7992
7993 template = template->next;
7994 if (template != NULL)
7995 {
7996 reset_aarch64_instruction (&inst);
7997 inst.cond = saved_cond;
7998 }
7999 }
8000 while (template != NULL);
8001
8002 /* Issue the error messages if any. */
8003 output_operand_error_report (str, false);
8004 }
8005
8006 /* Various frobbings of labels and their addresses. */
8007
8008 void
8009 aarch64_start_line_hook (void)
8010 {
8011 last_label_seen = NULL;
8012 }
8013
8014 void
8015 aarch64_frob_label (symbolS * sym)
8016 {
8017 last_label_seen = sym;
8018
8019 dwarf2_emit_label (sym);
8020 }
8021
8022 void
8023 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8024 {
8025 /* Check to see if we have a block to close. */
8026 force_automatic_sequence_close ();
8027 }
8028
8029 int
8030 aarch64_data_in_code (void)
8031 {
8032 if (startswith (input_line_pointer + 1, "data:"))
8033 {
8034 *input_line_pointer = '/';
8035 input_line_pointer += 5;
8036 *input_line_pointer = 0;
8037 return 1;
8038 }
8039
8040 return 0;
8041 }
8042
8043 char *
8044 aarch64_canonicalize_symbol_name (char *name)
8045 {
8046 int len;
8047
8048 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8049 *(name + len - 5) = 0;
8050
8051 return name;
8052 }
8053 \f
8054 /* Table of all register names defined by default. The user can
8055 define additional names with .req. Note that all register names
8056 should appear in both upper and lowercase variants. Some registers
8057 also have mixed-case names. */
8058
8059 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8060 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8061 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8062 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8063 #define REGSET16(p,t) \
8064 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8065 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8066 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8067 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8068 #define REGSET16S(p,s,t) \
8069 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8070 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8071 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8072 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8073 #define REGSET31(p,t) \
8074 REGSET16(p, t), \
8075 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8076 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8077 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8078 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8079 #define REGSET(p,t) \
8080 REGSET31(p,t), REGNUM(p,31,t)
8081
8082 /* These go into aarch64_reg_hsh hash-table. */
8083 static const reg_entry reg_names[] = {
8084 /* Integer registers. */
8085 REGSET31 (x, R_64), REGSET31 (X, R_64),
8086 REGSET31 (w, R_32), REGSET31 (W, R_32),
8087
8088 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8089 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8090 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8091 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8092 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8093 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8094
8095 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8096 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8097
8098 /* Floating-point single precision registers. */
8099 REGSET (s, FP_S), REGSET (S, FP_S),
8100
8101 /* Floating-point double precision registers. */
8102 REGSET (d, FP_D), REGSET (D, FP_D),
8103
8104 /* Floating-point half precision registers. */
8105 REGSET (h, FP_H), REGSET (H, FP_H),
8106
8107 /* Floating-point byte precision registers. */
8108 REGSET (b, FP_B), REGSET (B, FP_B),
8109
8110 /* Floating-point quad precision registers. */
8111 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8112
8113 /* FP/SIMD registers. */
8114 REGSET (v, VN), REGSET (V, VN),
8115
8116 /* SVE vector registers. */
8117 REGSET (z, ZN), REGSET (Z, ZN),
8118
8119 /* SVE predicate registers. */
8120 REGSET16 (p, PN), REGSET16 (P, PN),
8121
8122 /* SME ZA tile registers. */
8123 REGSET16 (za, ZA), REGSET16 (ZA, ZA),
8124
8125 /* SME ZA tile registers (horizontal slice). */
8126 REGSET16S (za, h, ZAH), REGSET16S (ZA, H, ZAH),
8127
8128 /* SME ZA tile registers (vertical slice). */
8129 REGSET16S (za, v, ZAV), REGSET16S (ZA, V, ZAV)
8130 };
8131
8132 #undef REGDEF
8133 #undef REGDEF_ALIAS
8134 #undef REGNUM
8135 #undef REGSET16
8136 #undef REGSET31
8137 #undef REGSET
8138
8139 #define N 1
8140 #define n 0
8141 #define Z 1
8142 #define z 0
8143 #define C 1
8144 #define c 0
8145 #define V 1
8146 #define v 0
8147 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8148 static const asm_nzcv nzcv_names[] = {
8149 {"nzcv", B (n, z, c, v)},
8150 {"nzcV", B (n, z, c, V)},
8151 {"nzCv", B (n, z, C, v)},
8152 {"nzCV", B (n, z, C, V)},
8153 {"nZcv", B (n, Z, c, v)},
8154 {"nZcV", B (n, Z, c, V)},
8155 {"nZCv", B (n, Z, C, v)},
8156 {"nZCV", B (n, Z, C, V)},
8157 {"Nzcv", B (N, z, c, v)},
8158 {"NzcV", B (N, z, c, V)},
8159 {"NzCv", B (N, z, C, v)},
8160 {"NzCV", B (N, z, C, V)},
8161 {"NZcv", B (N, Z, c, v)},
8162 {"NZcV", B (N, Z, c, V)},
8163 {"NZCv", B (N, Z, C, v)},
8164 {"NZCV", B (N, Z, C, V)}
8165 };
8166
8167 #undef N
8168 #undef n
8169 #undef Z
8170 #undef z
8171 #undef C
8172 #undef c
8173 #undef V
8174 #undef v
8175 #undef B
8176 \f
8177 /* MD interface: bits in the object file. */
8178
8179 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8180 for use in the a.out file, and stores them in the array pointed to by buf.
8181 This knows about the endian-ness of the target machine and does
8182 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8183 2 (short) and 4 (long) Floating numbers are put out as a series of
8184 LITTLENUMS (shorts, here at least). */
8185
8186 void
8187 md_number_to_chars (char *buf, valueT val, int n)
8188 {
8189 if (target_big_endian)
8190 number_to_chars_bigendian (buf, val, n);
8191 else
8192 number_to_chars_littleendian (buf, val, n);
8193 }
8194
8195 /* MD interface: Sections. */
8196
8197 /* Estimate the size of a frag before relaxing. Assume everything fits in
8198 4 bytes. */
8199
8200 int
8201 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8202 {
8203 fragp->fr_var = 4;
8204 return 4;
8205 }
8206
8207 /* Round up a section size to the appropriate boundary. */
8208
8209 valueT
8210 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8211 {
8212 return size;
8213 }
8214
8215 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8216 of an rs_align_code fragment.
8217
8218 Here we fill the frag with the appropriate info for padding the
8219 output stream. The resulting frag will consist of a fixed (fr_fix)
8220 and of a repeating (fr_var) part.
8221
8222 The fixed content is always emitted before the repeating content and
8223 these two parts are used as follows in constructing the output:
8224 - the fixed part will be used to align to a valid instruction word
8225 boundary, in case that we start at a misaligned address; as no
8226 executable instruction can live at the misaligned location, we
8227 simply fill with zeros;
8228 - the variable part will be used to cover the remaining padding and
8229 we fill using the AArch64 NOP instruction.
8230
8231 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8232 enough storage space for up to 3 bytes for padding the back to a valid
8233 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8234
8235 void
8236 aarch64_handle_align (fragS * fragP)
8237 {
8238 /* NOP = d503201f */
8239 /* AArch64 instructions are always little-endian. */
8240 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8241
8242 int bytes, fix, noop_size;
8243 char *p;
8244
8245 if (fragP->fr_type != rs_align_code)
8246 return;
8247
8248 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8249 p = fragP->fr_literal + fragP->fr_fix;
8250
8251 #ifdef OBJ_ELF
8252 gas_assert (fragP->tc_frag_data.recorded);
8253 #endif
8254
8255 noop_size = sizeof (aarch64_noop);
8256
8257 fix = bytes & (noop_size - 1);
8258 if (fix)
8259 {
8260 #ifdef OBJ_ELF
8261 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8262 #endif
8263 memset (p, 0, fix);
8264 p += fix;
8265 fragP->fr_fix += fix;
8266 }
8267
8268 if (noop_size)
8269 memcpy (p, aarch64_noop, noop_size);
8270 fragP->fr_var = noop_size;
8271 }
8272
8273 /* Perform target specific initialisation of a frag.
8274 Note - despite the name this initialisation is not done when the frag
8275 is created, but only when its type is assigned. A frag can be created
8276 and used a long time before its type is set, so beware of assuming that
8277 this initialisation is performed first. */
8278
8279 #ifndef OBJ_ELF
8280 void
8281 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8282 int max_chars ATTRIBUTE_UNUSED)
8283 {
8284 }
8285
8286 #else /* OBJ_ELF is defined. */
8287 void
8288 aarch64_init_frag (fragS * fragP, int max_chars)
8289 {
8290 /* Record a mapping symbol for alignment frags. We will delete this
8291 later if the alignment ends up empty. */
8292 if (!fragP->tc_frag_data.recorded)
8293 fragP->tc_frag_data.recorded = 1;
8294
8295 /* PR 21809: Do not set a mapping state for debug sections
8296 - it just confuses other tools. */
8297 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8298 return;
8299
8300 switch (fragP->fr_type)
8301 {
8302 case rs_align_test:
8303 case rs_fill:
8304 mapping_state_2 (MAP_DATA, max_chars);
8305 break;
8306 case rs_align:
8307 /* PR 20364: We can get alignment frags in code sections,
8308 so do not just assume that we should use the MAP_DATA state. */
8309 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8310 break;
8311 case rs_align_code:
8312 mapping_state_2 (MAP_INSN, max_chars);
8313 break;
8314 default:
8315 break;
8316 }
8317 }
8318 \f
8319 /* Initialize the DWARF-2 unwind information for this procedure. */
8320
8321 void
8322 tc_aarch64_frame_initial_instructions (void)
8323 {
8324 cfi_add_CFA_def_cfa (REG_SP, 0);
8325 }
8326 #endif /* OBJ_ELF */
8327
8328 /* Convert REGNAME to a DWARF-2 register number. */
8329
8330 int
8331 tc_aarch64_regname_to_dw2regnum (char *regname)
8332 {
8333 const reg_entry *reg = parse_reg (&regname);
8334 if (reg == NULL)
8335 return -1;
8336
8337 switch (reg->type)
8338 {
8339 case REG_TYPE_SP_32:
8340 case REG_TYPE_SP_64:
8341 case REG_TYPE_R_32:
8342 case REG_TYPE_R_64:
8343 return reg->number;
8344
8345 case REG_TYPE_FP_B:
8346 case REG_TYPE_FP_H:
8347 case REG_TYPE_FP_S:
8348 case REG_TYPE_FP_D:
8349 case REG_TYPE_FP_Q:
8350 return reg->number + 64;
8351
8352 default:
8353 break;
8354 }
8355 return -1;
8356 }
8357
8358 /* Implement DWARF2_ADDR_SIZE. */
8359
8360 int
8361 aarch64_dwarf2_addr_size (void)
8362 {
8363 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8364 if (ilp32_p)
8365 return 4;
8366 #endif
8367 return bfd_arch_bits_per_address (stdoutput) / 8;
8368 }
8369
8370 /* MD interface: Symbol and relocation handling. */
8371
8372 /* Return the address within the segment that a PC-relative fixup is
8373 relative to. For AArch64 PC-relative fixups applied to instructions
8374 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8375
8376 long
8377 md_pcrel_from_section (fixS * fixP, segT seg)
8378 {
8379 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8380
8381 /* If this is pc-relative and we are going to emit a relocation
8382 then we just want to put out any pipeline compensation that the linker
8383 will need. Otherwise we want to use the calculated base. */
8384 if (fixP->fx_pcrel
8385 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8386 || aarch64_force_relocation (fixP)))
8387 base = 0;
8388
8389 /* AArch64 should be consistent for all pc-relative relocations. */
8390 return base + AARCH64_PCREL_OFFSET;
8391 }
8392
8393 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8394 Otherwise we have no need to default values of symbols. */
8395
8396 symbolS *
8397 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8398 {
8399 #ifdef OBJ_ELF
8400 if (name[0] == '_' && name[1] == 'G'
8401 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8402 {
8403 if (!GOT_symbol)
8404 {
8405 if (symbol_find (name))
8406 as_bad (_("GOT already in the symbol table"));
8407
8408 GOT_symbol = symbol_new (name, undefined_section,
8409 &zero_address_frag, 0);
8410 }
8411
8412 return GOT_symbol;
8413 }
8414 #endif
8415
8416 return 0;
8417 }
8418
8419 /* Return non-zero if the indicated VALUE has overflowed the maximum
8420 range expressible by a unsigned number with the indicated number of
8421 BITS. */
8422
8423 static bool
8424 unsigned_overflow (valueT value, unsigned bits)
8425 {
8426 valueT lim;
8427 if (bits >= sizeof (valueT) * 8)
8428 return false;
8429 lim = (valueT) 1 << bits;
8430 return (value >= lim);
8431 }
8432
8433
8434 /* Return non-zero if the indicated VALUE has overflowed the maximum
8435 range expressible by an signed number with the indicated number of
8436 BITS. */
8437
8438 static bool
8439 signed_overflow (offsetT value, unsigned bits)
8440 {
8441 offsetT lim;
8442 if (bits >= sizeof (offsetT) * 8)
8443 return false;
8444 lim = (offsetT) 1 << (bits - 1);
8445 return (value < -lim || value >= lim);
8446 }
8447
8448 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8449 unsigned immediate offset load/store instruction, try to encode it as
8450 an unscaled, 9-bit, signed immediate offset load/store instruction.
8451 Return TRUE if it is successful; otherwise return FALSE.
8452
8453 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8454 in response to the standard LDR/STR mnemonics when the immediate offset is
8455 unambiguous, i.e. when it is negative or unaligned. */
8456
8457 static bool
8458 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8459 {
8460 int idx;
8461 enum aarch64_op new_op;
8462 const aarch64_opcode *new_opcode;
8463
8464 gas_assert (instr->opcode->iclass == ldst_pos);
8465
8466 switch (instr->opcode->op)
8467 {
8468 case OP_LDRB_POS:new_op = OP_LDURB; break;
8469 case OP_STRB_POS: new_op = OP_STURB; break;
8470 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8471 case OP_LDRH_POS: new_op = OP_LDURH; break;
8472 case OP_STRH_POS: new_op = OP_STURH; break;
8473 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8474 case OP_LDR_POS: new_op = OP_LDUR; break;
8475 case OP_STR_POS: new_op = OP_STUR; break;
8476 case OP_LDRF_POS: new_op = OP_LDURV; break;
8477 case OP_STRF_POS: new_op = OP_STURV; break;
8478 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8479 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8480 default: new_op = OP_NIL; break;
8481 }
8482
8483 if (new_op == OP_NIL)
8484 return false;
8485
8486 new_opcode = aarch64_get_opcode (new_op);
8487 gas_assert (new_opcode != NULL);
8488
8489 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8490 instr->opcode->op, new_opcode->op);
8491
8492 aarch64_replace_opcode (instr, new_opcode);
8493
8494 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8495 qualifier matching may fail because the out-of-date qualifier will
8496 prevent the operand being updated with a new and correct qualifier. */
8497 idx = aarch64_operand_index (instr->opcode->operands,
8498 AARCH64_OPND_ADDR_SIMM9);
8499 gas_assert (idx == 1);
8500 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8501
8502 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8503
8504 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8505 insn_sequence))
8506 return false;
8507
8508 return true;
8509 }
8510
8511 /* Called by fix_insn to fix a MOV immediate alias instruction.
8512
8513 Operand for a generic move immediate instruction, which is an alias
8514 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8515 a 32-bit/64-bit immediate value into general register. An assembler error
8516 shall result if the immediate cannot be created by a single one of these
8517 instructions. If there is a choice, then to ensure reversability an
8518 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8519
8520 static void
8521 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8522 {
8523 const aarch64_opcode *opcode;
8524
8525 /* Need to check if the destination is SP/ZR. The check has to be done
8526 before any aarch64_replace_opcode. */
8527 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8528 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8529
8530 instr->operands[1].imm.value = value;
8531 instr->operands[1].skip = 0;
8532
8533 if (try_mov_wide_p)
8534 {
8535 /* Try the MOVZ alias. */
8536 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8537 aarch64_replace_opcode (instr, opcode);
8538 if (aarch64_opcode_encode (instr->opcode, instr,
8539 &instr->value, NULL, NULL, insn_sequence))
8540 {
8541 put_aarch64_insn (buf, instr->value);
8542 return;
8543 }
8544 /* Try the MOVK alias. */
8545 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8546 aarch64_replace_opcode (instr, opcode);
8547 if (aarch64_opcode_encode (instr->opcode, instr,
8548 &instr->value, NULL, NULL, insn_sequence))
8549 {
8550 put_aarch64_insn (buf, instr->value);
8551 return;
8552 }
8553 }
8554
8555 if (try_mov_bitmask_p)
8556 {
8557 /* Try the ORR alias. */
8558 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8559 aarch64_replace_opcode (instr, opcode);
8560 if (aarch64_opcode_encode (instr->opcode, instr,
8561 &instr->value, NULL, NULL, insn_sequence))
8562 {
8563 put_aarch64_insn (buf, instr->value);
8564 return;
8565 }
8566 }
8567
8568 as_bad_where (fixP->fx_file, fixP->fx_line,
8569 _("immediate cannot be moved by a single instruction"));
8570 }
8571
8572 /* An instruction operand which is immediate related may have symbol used
8573 in the assembly, e.g.
8574
8575 mov w0, u32
8576 .set u32, 0x00ffff00
8577
8578 At the time when the assembly instruction is parsed, a referenced symbol,
8579 like 'u32' in the above example may not have been seen; a fixS is created
8580 in such a case and is handled here after symbols have been resolved.
8581 Instruction is fixed up with VALUE using the information in *FIXP plus
8582 extra information in FLAGS.
8583
8584 This function is called by md_apply_fix to fix up instructions that need
8585 a fix-up described above but does not involve any linker-time relocation. */
8586
8587 static void
8588 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8589 {
8590 int idx;
8591 uint32_t insn;
8592 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8593 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8594 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8595
8596 if (new_inst)
8597 {
8598 /* Now the instruction is about to be fixed-up, so the operand that
8599 was previously marked as 'ignored' needs to be unmarked in order
8600 to get the encoding done properly. */
8601 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8602 new_inst->operands[idx].skip = 0;
8603 }
8604
8605 gas_assert (opnd != AARCH64_OPND_NIL);
8606
8607 switch (opnd)
8608 {
8609 case AARCH64_OPND_EXCEPTION:
8610 case AARCH64_OPND_UNDEFINED:
8611 if (unsigned_overflow (value, 16))
8612 as_bad_where (fixP->fx_file, fixP->fx_line,
8613 _("immediate out of range"));
8614 insn = get_aarch64_insn (buf);
8615 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8616 put_aarch64_insn (buf, insn);
8617 break;
8618
8619 case AARCH64_OPND_AIMM:
8620 /* ADD or SUB with immediate.
8621 NOTE this assumes we come here with a add/sub shifted reg encoding
8622 3 322|2222|2 2 2 21111 111111
8623 1 098|7654|3 2 1 09876 543210 98765 43210
8624 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8625 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8626 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8627 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8628 ->
8629 3 322|2222|2 2 221111111111
8630 1 098|7654|3 2 109876543210 98765 43210
8631 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8632 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8633 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8634 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8635 Fields sf Rn Rd are already set. */
8636 insn = get_aarch64_insn (buf);
8637 if (value < 0)
8638 {
8639 /* Add <-> sub. */
8640 insn = reencode_addsub_switch_add_sub (insn);
8641 value = -value;
8642 }
8643
8644 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8645 && unsigned_overflow (value, 12))
8646 {
8647 /* Try to shift the value by 12 to make it fit. */
8648 if (((value >> 12) << 12) == value
8649 && ! unsigned_overflow (value, 12 + 12))
8650 {
8651 value >>= 12;
8652 insn |= encode_addsub_imm_shift_amount (1);
8653 }
8654 }
8655
8656 if (unsigned_overflow (value, 12))
8657 as_bad_where (fixP->fx_file, fixP->fx_line,
8658 _("immediate out of range"));
8659
8660 insn |= encode_addsub_imm (value);
8661
8662 put_aarch64_insn (buf, insn);
8663 break;
8664
8665 case AARCH64_OPND_SIMD_IMM:
8666 case AARCH64_OPND_SIMD_IMM_SFT:
8667 case AARCH64_OPND_LIMM:
8668 /* Bit mask immediate. */
8669 gas_assert (new_inst != NULL);
8670 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8671 new_inst->operands[idx].imm.value = value;
8672 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8673 &new_inst->value, NULL, NULL, insn_sequence))
8674 put_aarch64_insn (buf, new_inst->value);
8675 else
8676 as_bad_where (fixP->fx_file, fixP->fx_line,
8677 _("invalid immediate"));
8678 break;
8679
8680 case AARCH64_OPND_HALF:
8681 /* 16-bit unsigned immediate. */
8682 if (unsigned_overflow (value, 16))
8683 as_bad_where (fixP->fx_file, fixP->fx_line,
8684 _("immediate out of range"));
8685 insn = get_aarch64_insn (buf);
8686 insn |= encode_movw_imm (value & 0xffff);
8687 put_aarch64_insn (buf, insn);
8688 break;
8689
8690 case AARCH64_OPND_IMM_MOV:
8691 /* Operand for a generic move immediate instruction, which is
8692 an alias instruction that generates a single MOVZ, MOVN or ORR
8693 instruction to loads a 32-bit/64-bit immediate value into general
8694 register. An assembler error shall result if the immediate cannot be
8695 created by a single one of these instructions. If there is a choice,
8696 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8697 and MOVZ or MOVN to ORR. */
8698 gas_assert (new_inst != NULL);
8699 fix_mov_imm_insn (fixP, buf, new_inst, value);
8700 break;
8701
8702 case AARCH64_OPND_ADDR_SIMM7:
8703 case AARCH64_OPND_ADDR_SIMM9:
8704 case AARCH64_OPND_ADDR_SIMM9_2:
8705 case AARCH64_OPND_ADDR_SIMM10:
8706 case AARCH64_OPND_ADDR_UIMM12:
8707 case AARCH64_OPND_ADDR_SIMM11:
8708 case AARCH64_OPND_ADDR_SIMM13:
8709 /* Immediate offset in an address. */
8710 insn = get_aarch64_insn (buf);
8711
8712 gas_assert (new_inst != NULL && new_inst->value == insn);
8713 gas_assert (new_inst->opcode->operands[1] == opnd
8714 || new_inst->opcode->operands[2] == opnd);
8715
8716 /* Get the index of the address operand. */
8717 if (new_inst->opcode->operands[1] == opnd)
8718 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8719 idx = 1;
8720 else
8721 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8722 idx = 2;
8723
8724 /* Update the resolved offset value. */
8725 new_inst->operands[idx].addr.offset.imm = value;
8726
8727 /* Encode/fix-up. */
8728 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8729 &new_inst->value, NULL, NULL, insn_sequence))
8730 {
8731 put_aarch64_insn (buf, new_inst->value);
8732 break;
8733 }
8734 else if (new_inst->opcode->iclass == ldst_pos
8735 && try_to_encode_as_unscaled_ldst (new_inst))
8736 {
8737 put_aarch64_insn (buf, new_inst->value);
8738 break;
8739 }
8740
8741 as_bad_where (fixP->fx_file, fixP->fx_line,
8742 _("immediate offset out of range"));
8743 break;
8744
8745 default:
8746 gas_assert (0);
8747 as_fatal (_("unhandled operand code %d"), opnd);
8748 }
8749 }
8750
8751 /* Apply a fixup (fixP) to segment data, once it has been determined
8752 by our caller that we have all the info we need to fix it up.
8753
8754 Parameter valP is the pointer to the value of the bits. */
8755
8756 void
8757 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8758 {
8759 offsetT value = *valP;
8760 uint32_t insn;
8761 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8762 int scale;
8763 unsigned flags = fixP->fx_addnumber;
8764
8765 DEBUG_TRACE ("\n\n");
8766 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8767 DEBUG_TRACE ("Enter md_apply_fix");
8768
8769 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8770
8771 /* Note whether this will delete the relocation. */
8772
8773 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
8774 fixP->fx_done = 1;
8775
8776 /* Process the relocations. */
8777 switch (fixP->fx_r_type)
8778 {
8779 case BFD_RELOC_NONE:
8780 /* This will need to go in the object file. */
8781 fixP->fx_done = 0;
8782 break;
8783
8784 case BFD_RELOC_8:
8785 case BFD_RELOC_8_PCREL:
8786 if (fixP->fx_done || !seg->use_rela_p)
8787 md_number_to_chars (buf, value, 1);
8788 break;
8789
8790 case BFD_RELOC_16:
8791 case BFD_RELOC_16_PCREL:
8792 if (fixP->fx_done || !seg->use_rela_p)
8793 md_number_to_chars (buf, value, 2);
8794 break;
8795
8796 case BFD_RELOC_32:
8797 case BFD_RELOC_32_PCREL:
8798 if (fixP->fx_done || !seg->use_rela_p)
8799 md_number_to_chars (buf, value, 4);
8800 break;
8801
8802 case BFD_RELOC_64:
8803 case BFD_RELOC_64_PCREL:
8804 if (fixP->fx_done || !seg->use_rela_p)
8805 md_number_to_chars (buf, value, 8);
8806 break;
8807
8808 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8809 /* We claim that these fixups have been processed here, even if
8810 in fact we generate an error because we do not have a reloc
8811 for them, so tc_gen_reloc() will reject them. */
8812 fixP->fx_done = 1;
8813 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8814 {
8815 as_bad_where (fixP->fx_file, fixP->fx_line,
8816 _("undefined symbol %s used as an immediate value"),
8817 S_GET_NAME (fixP->fx_addsy));
8818 goto apply_fix_return;
8819 }
8820 fix_insn (fixP, flags, value);
8821 break;
8822
8823 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8824 if (fixP->fx_done || !seg->use_rela_p)
8825 {
8826 if (value & 3)
8827 as_bad_where (fixP->fx_file, fixP->fx_line,
8828 _("pc-relative load offset not word aligned"));
8829 if (signed_overflow (value, 21))
8830 as_bad_where (fixP->fx_file, fixP->fx_line,
8831 _("pc-relative load offset out of range"));
8832 insn = get_aarch64_insn (buf);
8833 insn |= encode_ld_lit_ofs_19 (value >> 2);
8834 put_aarch64_insn (buf, insn);
8835 }
8836 break;
8837
8838 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8839 if (fixP->fx_done || !seg->use_rela_p)
8840 {
8841 if (signed_overflow (value, 21))
8842 as_bad_where (fixP->fx_file, fixP->fx_line,
8843 _("pc-relative address offset out of range"));
8844 insn = get_aarch64_insn (buf);
8845 insn |= encode_adr_imm (value);
8846 put_aarch64_insn (buf, insn);
8847 }
8848 break;
8849
8850 case BFD_RELOC_AARCH64_BRANCH19:
8851 if (fixP->fx_done || !seg->use_rela_p)
8852 {
8853 if (value & 3)
8854 as_bad_where (fixP->fx_file, fixP->fx_line,
8855 _("conditional branch target not word aligned"));
8856 if (signed_overflow (value, 21))
8857 as_bad_where (fixP->fx_file, fixP->fx_line,
8858 _("conditional branch out of range"));
8859 insn = get_aarch64_insn (buf);
8860 insn |= encode_cond_branch_ofs_19 (value >> 2);
8861 put_aarch64_insn (buf, insn);
8862 }
8863 break;
8864
8865 case BFD_RELOC_AARCH64_TSTBR14:
8866 if (fixP->fx_done || !seg->use_rela_p)
8867 {
8868 if (value & 3)
8869 as_bad_where (fixP->fx_file, fixP->fx_line,
8870 _("conditional branch target not word aligned"));
8871 if (signed_overflow (value, 16))
8872 as_bad_where (fixP->fx_file, fixP->fx_line,
8873 _("conditional branch out of range"));
8874 insn = get_aarch64_insn (buf);
8875 insn |= encode_tst_branch_ofs_14 (value >> 2);
8876 put_aarch64_insn (buf, insn);
8877 }
8878 break;
8879
8880 case BFD_RELOC_AARCH64_CALL26:
8881 case BFD_RELOC_AARCH64_JUMP26:
8882 if (fixP->fx_done || !seg->use_rela_p)
8883 {
8884 if (value & 3)
8885 as_bad_where (fixP->fx_file, fixP->fx_line,
8886 _("branch target not word aligned"));
8887 if (signed_overflow (value, 28))
8888 as_bad_where (fixP->fx_file, fixP->fx_line,
8889 _("branch out of range"));
8890 insn = get_aarch64_insn (buf);
8891 insn |= encode_branch_ofs_26 (value >> 2);
8892 put_aarch64_insn (buf, insn);
8893 }
8894 break;
8895
8896 case BFD_RELOC_AARCH64_MOVW_G0:
8897 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8898 case BFD_RELOC_AARCH64_MOVW_G0_S:
8899 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8900 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8901 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8902 scale = 0;
8903 goto movw_common;
8904 case BFD_RELOC_AARCH64_MOVW_G1:
8905 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8906 case BFD_RELOC_AARCH64_MOVW_G1_S:
8907 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8908 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8909 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8910 scale = 16;
8911 goto movw_common;
8912 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8913 scale = 0;
8914 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8915 /* Should always be exported to object file, see
8916 aarch64_force_relocation(). */
8917 gas_assert (!fixP->fx_done);
8918 gas_assert (seg->use_rela_p);
8919 goto movw_common;
8920 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8921 scale = 16;
8922 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8923 /* Should always be exported to object file, see
8924 aarch64_force_relocation(). */
8925 gas_assert (!fixP->fx_done);
8926 gas_assert (seg->use_rela_p);
8927 goto movw_common;
8928 case BFD_RELOC_AARCH64_MOVW_G2:
8929 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8930 case BFD_RELOC_AARCH64_MOVW_G2_S:
8931 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8932 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8933 scale = 32;
8934 goto movw_common;
8935 case BFD_RELOC_AARCH64_MOVW_G3:
8936 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8937 scale = 48;
8938 movw_common:
8939 if (fixP->fx_done || !seg->use_rela_p)
8940 {
8941 insn = get_aarch64_insn (buf);
8942
8943 if (!fixP->fx_done)
8944 {
8945 /* REL signed addend must fit in 16 bits */
8946 if (signed_overflow (value, 16))
8947 as_bad_where (fixP->fx_file, fixP->fx_line,
8948 _("offset out of range"));
8949 }
8950 else
8951 {
8952 /* Check for overflow and scale. */
8953 switch (fixP->fx_r_type)
8954 {
8955 case BFD_RELOC_AARCH64_MOVW_G0:
8956 case BFD_RELOC_AARCH64_MOVW_G1:
8957 case BFD_RELOC_AARCH64_MOVW_G2:
8958 case BFD_RELOC_AARCH64_MOVW_G3:
8959 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8960 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8961 if (unsigned_overflow (value, scale + 16))
8962 as_bad_where (fixP->fx_file, fixP->fx_line,
8963 _("unsigned value out of range"));
8964 break;
8965 case BFD_RELOC_AARCH64_MOVW_G0_S:
8966 case BFD_RELOC_AARCH64_MOVW_G1_S:
8967 case BFD_RELOC_AARCH64_MOVW_G2_S:
8968 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8969 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8970 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8971 /* NOTE: We can only come here with movz or movn. */
8972 if (signed_overflow (value, scale + 16))
8973 as_bad_where (fixP->fx_file, fixP->fx_line,
8974 _("signed value out of range"));
8975 if (value < 0)
8976 {
8977 /* Force use of MOVN. */
8978 value = ~value;
8979 insn = reencode_movzn_to_movn (insn);
8980 }
8981 else
8982 {
8983 /* Force use of MOVZ. */
8984 insn = reencode_movzn_to_movz (insn);
8985 }
8986 break;
8987 default:
8988 /* Unchecked relocations. */
8989 break;
8990 }
8991 value >>= scale;
8992 }
8993
8994 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8995 insn |= encode_movw_imm (value & 0xffff);
8996
8997 put_aarch64_insn (buf, insn);
8998 }
8999 break;
9000
9001 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9002 fixP->fx_r_type = (ilp32_p
9003 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9004 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9005 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9006 /* Should always be exported to object file, see
9007 aarch64_force_relocation(). */
9008 gas_assert (!fixP->fx_done);
9009 gas_assert (seg->use_rela_p);
9010 break;
9011
9012 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9013 fixP->fx_r_type = (ilp32_p
9014 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9015 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9016 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9017 /* Should always be exported to object file, see
9018 aarch64_force_relocation(). */
9019 gas_assert (!fixP->fx_done);
9020 gas_assert (seg->use_rela_p);
9021 break;
9022
9023 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9024 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9025 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9026 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9027 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9028 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9029 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9030 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9031 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9032 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9033 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9034 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9035 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9036 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9037 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9038 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9039 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9040 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9041 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9042 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9043 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9044 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9045 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9046 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9047 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9048 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9049 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9050 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9051 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9052 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9053 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9054 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9055 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9056 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9057 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9058 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9059 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9060 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9061 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9062 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9063 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9064 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9065 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9066 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9067 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9068 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9069 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9070 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9071 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9072 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9073 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9074 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9075 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9076 /* Should always be exported to object file, see
9077 aarch64_force_relocation(). */
9078 gas_assert (!fixP->fx_done);
9079 gas_assert (seg->use_rela_p);
9080 break;
9081
9082 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9083 /* Should always be exported to object file, see
9084 aarch64_force_relocation(). */
9085 fixP->fx_r_type = (ilp32_p
9086 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9087 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9088 gas_assert (!fixP->fx_done);
9089 gas_assert (seg->use_rela_p);
9090 break;
9091
9092 case BFD_RELOC_AARCH64_ADD_LO12:
9093 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9094 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9095 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9096 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9097 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9098 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9099 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9100 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9101 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9102 case BFD_RELOC_AARCH64_LDST128_LO12:
9103 case BFD_RELOC_AARCH64_LDST16_LO12:
9104 case BFD_RELOC_AARCH64_LDST32_LO12:
9105 case BFD_RELOC_AARCH64_LDST64_LO12:
9106 case BFD_RELOC_AARCH64_LDST8_LO12:
9107 /* Should always be exported to object file, see
9108 aarch64_force_relocation(). */
9109 gas_assert (!fixP->fx_done);
9110 gas_assert (seg->use_rela_p);
9111 break;
9112
9113 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9114 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9115 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9116 break;
9117
9118 case BFD_RELOC_UNUSED:
9119 /* An error will already have been reported. */
9120 break;
9121
9122 default:
9123 as_bad_where (fixP->fx_file, fixP->fx_line,
9124 _("unexpected %s fixup"),
9125 bfd_get_reloc_code_name (fixP->fx_r_type));
9126 break;
9127 }
9128
9129 apply_fix_return:
9130 /* Free the allocated the struct aarch64_inst.
9131 N.B. currently there are very limited number of fix-up types actually use
9132 this field, so the impact on the performance should be minimal . */
9133 free (fixP->tc_fix_data.inst);
9134
9135 return;
9136 }
9137
9138 /* Translate internal representation of relocation info to BFD target
9139 format. */
9140
9141 arelent *
9142 tc_gen_reloc (asection * section, fixS * fixp)
9143 {
9144 arelent *reloc;
9145 bfd_reloc_code_real_type code;
9146
9147 reloc = XNEW (arelent);
9148
9149 reloc->sym_ptr_ptr = XNEW (asymbol *);
9150 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9151 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9152
9153 if (fixp->fx_pcrel)
9154 {
9155 if (section->use_rela_p)
9156 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9157 else
9158 fixp->fx_offset = reloc->address;
9159 }
9160 reloc->addend = fixp->fx_offset;
9161
9162 code = fixp->fx_r_type;
9163 switch (code)
9164 {
9165 case BFD_RELOC_16:
9166 if (fixp->fx_pcrel)
9167 code = BFD_RELOC_16_PCREL;
9168 break;
9169
9170 case BFD_RELOC_32:
9171 if (fixp->fx_pcrel)
9172 code = BFD_RELOC_32_PCREL;
9173 break;
9174
9175 case BFD_RELOC_64:
9176 if (fixp->fx_pcrel)
9177 code = BFD_RELOC_64_PCREL;
9178 break;
9179
9180 default:
9181 break;
9182 }
9183
9184 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9185 if (reloc->howto == NULL)
9186 {
9187 as_bad_where (fixp->fx_file, fixp->fx_line,
9188 _
9189 ("cannot represent %s relocation in this object file format"),
9190 bfd_get_reloc_code_name (code));
9191 return NULL;
9192 }
9193
9194 return reloc;
9195 }
9196
9197 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9198
9199 void
9200 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9201 {
9202 bfd_reloc_code_real_type type;
9203 int pcrel = 0;
9204
9205 /* Pick a reloc.
9206 FIXME: @@ Should look at CPU word size. */
9207 switch (size)
9208 {
9209 case 1:
9210 type = BFD_RELOC_8;
9211 break;
9212 case 2:
9213 type = BFD_RELOC_16;
9214 break;
9215 case 4:
9216 type = BFD_RELOC_32;
9217 break;
9218 case 8:
9219 type = BFD_RELOC_64;
9220 break;
9221 default:
9222 as_bad (_("cannot do %u-byte relocation"), size);
9223 type = BFD_RELOC_UNUSED;
9224 break;
9225 }
9226
9227 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9228 }
9229
9230 #ifdef OBJ_ELF
9231
9232 /* Implement md_after_parse_args. This is the earliest time we need to decide
9233 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9234
9235 void
9236 aarch64_after_parse_args (void)
9237 {
9238 if (aarch64_abi != AARCH64_ABI_NONE)
9239 return;
9240
9241 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9242 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9243 aarch64_abi = AARCH64_ABI_ILP32;
9244 else
9245 aarch64_abi = AARCH64_ABI_LP64;
9246 }
9247
9248 const char *
9249 elf64_aarch64_target_format (void)
9250 {
9251 #ifdef TE_CLOUDABI
9252 /* FIXME: What to do for ilp32_p ? */
9253 if (target_big_endian)
9254 return "elf64-bigaarch64-cloudabi";
9255 else
9256 return "elf64-littleaarch64-cloudabi";
9257 #else
9258 if (target_big_endian)
9259 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9260 else
9261 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9262 #endif
9263 }
9264
9265 void
9266 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9267 {
9268 elf_frob_symbol (symp, puntp);
9269 }
9270 #endif
9271
9272 /* MD interface: Finalization. */
9273
9274 /* A good place to do this, although this was probably not intended
9275 for this kind of use. We need to dump the literal pool before
9276 references are made to a null symbol pointer. */
9277
9278 void
9279 aarch64_cleanup (void)
9280 {
9281 literal_pool *pool;
9282
9283 for (pool = list_of_pools; pool; pool = pool->next)
9284 {
9285 /* Put it at the end of the relevant section. */
9286 subseg_set (pool->section, pool->sub_section);
9287 s_ltorg (0);
9288 }
9289 }
9290
9291 #ifdef OBJ_ELF
9292 /* Remove any excess mapping symbols generated for alignment frags in
9293 SEC. We may have created a mapping symbol before a zero byte
9294 alignment; remove it if there's a mapping symbol after the
9295 alignment. */
9296 static void
9297 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9298 void *dummy ATTRIBUTE_UNUSED)
9299 {
9300 segment_info_type *seginfo = seg_info (sec);
9301 fragS *fragp;
9302
9303 if (seginfo == NULL || seginfo->frchainP == NULL)
9304 return;
9305
9306 for (fragp = seginfo->frchainP->frch_root;
9307 fragp != NULL; fragp = fragp->fr_next)
9308 {
9309 symbolS *sym = fragp->tc_frag_data.last_map;
9310 fragS *next = fragp->fr_next;
9311
9312 /* Variable-sized frags have been converted to fixed size by
9313 this point. But if this was variable-sized to start with,
9314 there will be a fixed-size frag after it. So don't handle
9315 next == NULL. */
9316 if (sym == NULL || next == NULL)
9317 continue;
9318
9319 if (S_GET_VALUE (sym) < next->fr_address)
9320 /* Not at the end of this frag. */
9321 continue;
9322 know (S_GET_VALUE (sym) == next->fr_address);
9323
9324 do
9325 {
9326 if (next->tc_frag_data.first_map != NULL)
9327 {
9328 /* Next frag starts with a mapping symbol. Discard this
9329 one. */
9330 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9331 break;
9332 }
9333
9334 if (next->fr_next == NULL)
9335 {
9336 /* This mapping symbol is at the end of the section. Discard
9337 it. */
9338 know (next->fr_fix == 0 && next->fr_var == 0);
9339 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9340 break;
9341 }
9342
9343 /* As long as we have empty frags without any mapping symbols,
9344 keep looking. */
9345 /* If the next frag is non-empty and does not start with a
9346 mapping symbol, then this mapping symbol is required. */
9347 if (next->fr_address != next->fr_next->fr_address)
9348 break;
9349
9350 next = next->fr_next;
9351 }
9352 while (next != NULL);
9353 }
9354 }
9355 #endif
9356
9357 /* Adjust the symbol table. */
9358
9359 void
9360 aarch64_adjust_symtab (void)
9361 {
9362 #ifdef OBJ_ELF
9363 /* Remove any overlapping mapping symbols generated by alignment frags. */
9364 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9365 /* Now do generic ELF adjustments. */
9366 elf_adjust_symtab ();
9367 #endif
9368 }
9369
9370 static void
9371 checked_hash_insert (htab_t table, const char *key, void *value)
9372 {
9373 str_hash_insert (table, key, value, 0);
9374 }
9375
9376 static void
9377 sysreg_hash_insert (htab_t table, const char *key, void *value)
9378 {
9379 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9380 checked_hash_insert (table, key, value);
9381 }
9382
9383 static void
9384 fill_instruction_hash_table (void)
9385 {
9386 const aarch64_opcode *opcode = aarch64_opcode_table;
9387
9388 while (opcode->name != NULL)
9389 {
9390 templates *templ, *new_templ;
9391 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9392
9393 new_templ = XNEW (templates);
9394 new_templ->opcode = opcode;
9395 new_templ->next = NULL;
9396
9397 if (!templ)
9398 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9399 else
9400 {
9401 new_templ->next = templ->next;
9402 templ->next = new_templ;
9403 }
9404 ++opcode;
9405 }
9406 }
9407
9408 static inline void
9409 convert_to_upper (char *dst, const char *src, size_t num)
9410 {
9411 unsigned int i;
9412 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9413 *dst = TOUPPER (*src);
9414 *dst = '\0';
9415 }
9416
9417 /* Assume STR point to a lower-case string, allocate, convert and return
9418 the corresponding upper-case string. */
9419 static inline const char*
9420 get_upper_str (const char *str)
9421 {
9422 char *ret;
9423 size_t len = strlen (str);
9424 ret = XNEWVEC (char, len + 1);
9425 convert_to_upper (ret, str, len);
9426 return ret;
9427 }
9428
9429 /* MD interface: Initialization. */
9430
9431 void
9432 md_begin (void)
9433 {
9434 unsigned mach;
9435 unsigned int i;
9436
9437 aarch64_ops_hsh = str_htab_create ();
9438 aarch64_cond_hsh = str_htab_create ();
9439 aarch64_shift_hsh = str_htab_create ();
9440 aarch64_sys_regs_hsh = str_htab_create ();
9441 aarch64_pstatefield_hsh = str_htab_create ();
9442 aarch64_sys_regs_ic_hsh = str_htab_create ();
9443 aarch64_sys_regs_dc_hsh = str_htab_create ();
9444 aarch64_sys_regs_at_hsh = str_htab_create ();
9445 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9446 aarch64_sys_regs_sr_hsh = str_htab_create ();
9447 aarch64_reg_hsh = str_htab_create ();
9448 aarch64_barrier_opt_hsh = str_htab_create ();
9449 aarch64_nzcv_hsh = str_htab_create ();
9450 aarch64_pldop_hsh = str_htab_create ();
9451 aarch64_hint_opt_hsh = str_htab_create ();
9452
9453 fill_instruction_hash_table ();
9454
9455 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9456 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9457 (void *) (aarch64_sys_regs + i));
9458
9459 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9460 sysreg_hash_insert (aarch64_pstatefield_hsh,
9461 aarch64_pstatefields[i].name,
9462 (void *) (aarch64_pstatefields + i));
9463
9464 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9465 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9466 aarch64_sys_regs_ic[i].name,
9467 (void *) (aarch64_sys_regs_ic + i));
9468
9469 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9470 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9471 aarch64_sys_regs_dc[i].name,
9472 (void *) (aarch64_sys_regs_dc + i));
9473
9474 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9475 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9476 aarch64_sys_regs_at[i].name,
9477 (void *) (aarch64_sys_regs_at + i));
9478
9479 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9480 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9481 aarch64_sys_regs_tlbi[i].name,
9482 (void *) (aarch64_sys_regs_tlbi + i));
9483
9484 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9485 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9486 aarch64_sys_regs_sr[i].name,
9487 (void *) (aarch64_sys_regs_sr + i));
9488
9489 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9490 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9491 (void *) (reg_names + i));
9492
9493 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9494 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9495 (void *) (nzcv_names + i));
9496
9497 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9498 {
9499 const char *name = aarch64_operand_modifiers[i].name;
9500 checked_hash_insert (aarch64_shift_hsh, name,
9501 (void *) (aarch64_operand_modifiers + i));
9502 /* Also hash the name in the upper case. */
9503 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9504 (void *) (aarch64_operand_modifiers + i));
9505 }
9506
9507 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9508 {
9509 unsigned int j;
9510 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9511 the same condition code. */
9512 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9513 {
9514 const char *name = aarch64_conds[i].names[j];
9515 if (name == NULL)
9516 break;
9517 checked_hash_insert (aarch64_cond_hsh, name,
9518 (void *) (aarch64_conds + i));
9519 /* Also hash the name in the upper case. */
9520 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9521 (void *) (aarch64_conds + i));
9522 }
9523 }
9524
9525 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9526 {
9527 const char *name = aarch64_barrier_options[i].name;
9528 /* Skip xx00 - the unallocated values of option. */
9529 if ((i & 0x3) == 0)
9530 continue;
9531 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9532 (void *) (aarch64_barrier_options + i));
9533 /* Also hash the name in the upper case. */
9534 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9535 (void *) (aarch64_barrier_options + i));
9536 }
9537
9538 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9539 {
9540 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9541 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9542 (void *) (aarch64_barrier_dsb_nxs_options + i));
9543 /* Also hash the name in the upper case. */
9544 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9545 (void *) (aarch64_barrier_dsb_nxs_options + i));
9546 }
9547
9548 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9549 {
9550 const char* name = aarch64_prfops[i].name;
9551 /* Skip the unallocated hint encodings. */
9552 if (name == NULL)
9553 continue;
9554 checked_hash_insert (aarch64_pldop_hsh, name,
9555 (void *) (aarch64_prfops + i));
9556 /* Also hash the name in the upper case. */
9557 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9558 (void *) (aarch64_prfops + i));
9559 }
9560
9561 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9562 {
9563 const char* name = aarch64_hint_options[i].name;
9564 const char* upper_name = get_upper_str(name);
9565
9566 checked_hash_insert (aarch64_hint_opt_hsh, name,
9567 (void *) (aarch64_hint_options + i));
9568
9569 /* Also hash the name in the upper case if not the same. */
9570 if (strcmp (name, upper_name) != 0)
9571 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9572 (void *) (aarch64_hint_options + i));
9573 }
9574
9575 /* Set the cpu variant based on the command-line options. */
9576 if (!mcpu_cpu_opt)
9577 mcpu_cpu_opt = march_cpu_opt;
9578
9579 if (!mcpu_cpu_opt)
9580 mcpu_cpu_opt = &cpu_default;
9581
9582 cpu_variant = *mcpu_cpu_opt;
9583
9584 /* Record the CPU type. */
9585 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
9586
9587 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9588 }
9589
9590 /* Command line processing. */
9591
9592 const char *md_shortopts = "m:";
9593
9594 #ifdef AARCH64_BI_ENDIAN
9595 #define OPTION_EB (OPTION_MD_BASE + 0)
9596 #define OPTION_EL (OPTION_MD_BASE + 1)
9597 #else
9598 #if TARGET_BYTES_BIG_ENDIAN
9599 #define OPTION_EB (OPTION_MD_BASE + 0)
9600 #else
9601 #define OPTION_EL (OPTION_MD_BASE + 1)
9602 #endif
9603 #endif
9604
9605 struct option md_longopts[] = {
9606 #ifdef OPTION_EB
9607 {"EB", no_argument, NULL, OPTION_EB},
9608 #endif
9609 #ifdef OPTION_EL
9610 {"EL", no_argument, NULL, OPTION_EL},
9611 #endif
9612 {NULL, no_argument, NULL, 0}
9613 };
9614
9615 size_t md_longopts_size = sizeof (md_longopts);
9616
9617 struct aarch64_option_table
9618 {
9619 const char *option; /* Option name to match. */
9620 const char *help; /* Help information. */
9621 int *var; /* Variable to change. */
9622 int value; /* What to change it to. */
9623 char *deprecated; /* If non-null, print this message. */
9624 };
9625
9626 static struct aarch64_option_table aarch64_opts[] = {
9627 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9628 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9629 NULL},
9630 #ifdef DEBUG_AARCH64
9631 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9632 #endif /* DEBUG_AARCH64 */
9633 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9634 NULL},
9635 {"mno-verbose-error", N_("do not output verbose error messages"),
9636 &verbose_error_p, 0, NULL},
9637 {NULL, NULL, NULL, 0, NULL}
9638 };
9639
9640 struct aarch64_cpu_option_table
9641 {
9642 const char *name;
9643 const aarch64_feature_set value;
9644 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9645 case. */
9646 const char *canonical_name;
9647 };
9648
9649 /* This list should, at a minimum, contain all the cpu names
9650 recognized by GCC. */
9651 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9652 {"all", AARCH64_ANY, NULL},
9653 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9654 AARCH64_FEATURE_CRC), "Cortex-A34"},
9655 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9656 AARCH64_FEATURE_CRC), "Cortex-A35"},
9657 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9658 AARCH64_FEATURE_CRC), "Cortex-A53"},
9659 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9660 AARCH64_FEATURE_CRC), "Cortex-A57"},
9661 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9662 AARCH64_FEATURE_CRC), "Cortex-A72"},
9663 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9664 AARCH64_FEATURE_CRC), "Cortex-A73"},
9665 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9666 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9667 "Cortex-A55"},
9668 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9669 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9670 "Cortex-A75"},
9671 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9672 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9673 "Cortex-A76"},
9674 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9675 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9676 | AARCH64_FEATURE_DOTPROD
9677 | AARCH64_FEATURE_SSBS),
9678 "Cortex-A76AE"},
9679 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9680 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9681 | AARCH64_FEATURE_DOTPROD
9682 | AARCH64_FEATURE_SSBS),
9683 "Cortex-A77"},
9684 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9685 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9686 | AARCH64_FEATURE_DOTPROD
9687 | AARCH64_FEATURE_SSBS),
9688 "Cortex-A65"},
9689 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9690 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9691 | AARCH64_FEATURE_DOTPROD
9692 | AARCH64_FEATURE_SSBS),
9693 "Cortex-A65AE"},
9694 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9695 AARCH64_FEATURE_F16
9696 | AARCH64_FEATURE_RCPC
9697 | AARCH64_FEATURE_DOTPROD
9698 | AARCH64_FEATURE_SSBS
9699 | AARCH64_FEATURE_PROFILE),
9700 "Cortex-A78"},
9701 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9702 AARCH64_FEATURE_F16
9703 | AARCH64_FEATURE_RCPC
9704 | AARCH64_FEATURE_DOTPROD
9705 | AARCH64_FEATURE_SSBS
9706 | AARCH64_FEATURE_PROFILE),
9707 "Cortex-A78AE"},
9708 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9709 AARCH64_FEATURE_DOTPROD
9710 | AARCH64_FEATURE_F16
9711 | AARCH64_FEATURE_FLAGM
9712 | AARCH64_FEATURE_PAC
9713 | AARCH64_FEATURE_PROFILE
9714 | AARCH64_FEATURE_RCPC
9715 | AARCH64_FEATURE_SSBS),
9716 "Cortex-A78C"},
9717 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9718 AARCH64_FEATURE_BFLOAT16
9719 | AARCH64_FEATURE_I8MM
9720 | AARCH64_FEATURE_MEMTAG
9721 | AARCH64_FEATURE_SVE2_BITPERM),
9722 "Cortex-A510"},
9723 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9724 AARCH64_FEATURE_BFLOAT16
9725 | AARCH64_FEATURE_I8MM
9726 | AARCH64_FEATURE_MEMTAG
9727 | AARCH64_FEATURE_SVE2_BITPERM),
9728 "Cortex-A710"},
9729 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9730 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9731 | AARCH64_FEATURE_DOTPROD
9732 | AARCH64_FEATURE_PROFILE),
9733 "Ares"},
9734 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9735 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9736 "Samsung Exynos M1"},
9737 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9738 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9739 | AARCH64_FEATURE_RDMA),
9740 "Qualcomm Falkor"},
9741 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9742 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9743 | AARCH64_FEATURE_DOTPROD
9744 | AARCH64_FEATURE_SSBS),
9745 "Neoverse E1"},
9746 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9747 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9748 | AARCH64_FEATURE_DOTPROD
9749 | AARCH64_FEATURE_PROFILE),
9750 "Neoverse N1"},
9751 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9752 AARCH64_FEATURE_BFLOAT16
9753 | AARCH64_FEATURE_I8MM
9754 | AARCH64_FEATURE_F16
9755 | AARCH64_FEATURE_SVE
9756 | AARCH64_FEATURE_SVE2
9757 | AARCH64_FEATURE_SVE2_BITPERM
9758 | AARCH64_FEATURE_MEMTAG
9759 | AARCH64_FEATURE_RNG),
9760 "Neoverse N2"},
9761 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9762 AARCH64_FEATURE_PROFILE
9763 | AARCH64_FEATURE_CVADP
9764 | AARCH64_FEATURE_SVE
9765 | AARCH64_FEATURE_SSBS
9766 | AARCH64_FEATURE_RNG
9767 | AARCH64_FEATURE_F16
9768 | AARCH64_FEATURE_BFLOAT16
9769 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9770 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9771 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9772 | AARCH64_FEATURE_RDMA),
9773 "Qualcomm QDF24XX"},
9774 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9775 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9776 "Qualcomm Saphira"},
9777 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9778 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9779 "Cavium ThunderX"},
9780 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9781 AARCH64_FEATURE_CRYPTO),
9782 "Broadcom Vulcan"},
9783 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9784 in earlier releases and is superseded by 'xgene1' in all
9785 tools. */
9786 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9787 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9788 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9789 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9790 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9791 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9792 AARCH64_FEATURE_F16
9793 | AARCH64_FEATURE_RCPC
9794 | AARCH64_FEATURE_DOTPROD
9795 | AARCH64_FEATURE_SSBS
9796 | AARCH64_FEATURE_PROFILE),
9797 "Cortex-X1"},
9798 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9799 AARCH64_FEATURE_BFLOAT16
9800 | AARCH64_FEATURE_I8MM
9801 | AARCH64_FEATURE_MEMTAG
9802 | AARCH64_FEATURE_SVE2_BITPERM),
9803 "Cortex-X2"},
9804 {"generic", AARCH64_ARCH_V8, NULL},
9805
9806 {NULL, AARCH64_ARCH_NONE, NULL}
9807 };
9808
9809 struct aarch64_arch_option_table
9810 {
9811 const char *name;
9812 const aarch64_feature_set value;
9813 };
9814
9815 /* This list should, at a minimum, contain all the architecture names
9816 recognized by GCC. */
9817 static const struct aarch64_arch_option_table aarch64_archs[] = {
9818 {"all", AARCH64_ANY},
9819 {"armv8-a", AARCH64_ARCH_V8},
9820 {"armv8.1-a", AARCH64_ARCH_V8_1},
9821 {"armv8.2-a", AARCH64_ARCH_V8_2},
9822 {"armv8.3-a", AARCH64_ARCH_V8_3},
9823 {"armv8.4-a", AARCH64_ARCH_V8_4},
9824 {"armv8.5-a", AARCH64_ARCH_V8_5},
9825 {"armv8.6-a", AARCH64_ARCH_V8_6},
9826 {"armv8.7-a", AARCH64_ARCH_V8_7},
9827 {"armv8-r", AARCH64_ARCH_V8_R},
9828 {"armv9-a", AARCH64_ARCH_V9},
9829 {NULL, AARCH64_ARCH_NONE}
9830 };
9831
9832 /* ISA extensions. */
9833 struct aarch64_option_cpu_value_table
9834 {
9835 const char *name;
9836 const aarch64_feature_set value;
9837 const aarch64_feature_set require; /* Feature dependencies. */
9838 };
9839
9840 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9841 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9842 AARCH64_ARCH_NONE},
9843 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9844 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9845 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9846 AARCH64_ARCH_NONE},
9847 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9848 AARCH64_ARCH_NONE},
9849 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9850 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9851 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9852 AARCH64_ARCH_NONE},
9853 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9854 AARCH64_ARCH_NONE},
9855 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9856 AARCH64_ARCH_NONE},
9857 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9858 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9859 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9860 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9861 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9862 AARCH64_FEATURE (AARCH64_FEATURE_FP
9863 | AARCH64_FEATURE_F16, 0)},
9864 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9865 AARCH64_ARCH_NONE},
9866 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9867 AARCH64_FEATURE (AARCH64_FEATURE_F16
9868 | AARCH64_FEATURE_SIMD
9869 | AARCH64_FEATURE_COMPNUM, 0)},
9870 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9871 AARCH64_ARCH_NONE},
9872 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9873 AARCH64_FEATURE (AARCH64_FEATURE_F16
9874 | AARCH64_FEATURE_SIMD, 0)},
9875 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9876 AARCH64_ARCH_NONE},
9877 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9878 AARCH64_ARCH_NONE},
9879 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9880 AARCH64_ARCH_NONE},
9881 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9882 AARCH64_ARCH_NONE},
9883 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9884 AARCH64_ARCH_NONE},
9885 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9886 AARCH64_ARCH_NONE},
9887 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9888 AARCH64_ARCH_NONE},
9889 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9890 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9891 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9892 AARCH64_ARCH_NONE},
9893 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9894 AARCH64_ARCH_NONE},
9895 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9896 AARCH64_ARCH_NONE},
9897 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9898 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9899 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9900 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9901 | AARCH64_FEATURE_SM4, 0)},
9902 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9903 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9904 | AARCH64_FEATURE_AES, 0)},
9905 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9906 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9907 | AARCH64_FEATURE_SHA3, 0)},
9908 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9909 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9910 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
9911 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9912 | AARCH64_FEATURE_BFLOAT16, 0)},
9913 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64, 0),
9914 AARCH64_FEATURE (AARCH64_FEATURE_SME
9915 | AARCH64_FEATURE_SVE2
9916 | AARCH64_FEATURE_BFLOAT16, 0)},
9917 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I64, 0),
9918 AARCH64_FEATURE (AARCH64_FEATURE_SME
9919 | AARCH64_FEATURE_SVE2
9920 | AARCH64_FEATURE_BFLOAT16, 0)},
9921 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9922 AARCH64_ARCH_NONE},
9923 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9924 AARCH64_ARCH_NONE},
9925 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9926 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9927 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9928 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9929 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
9930 AARCH64_ARCH_NONE},
9931 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
9932 AARCH64_ARCH_NONE},
9933 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
9934 AARCH64_ARCH_NONE},
9935 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9936 };
9937
9938 struct aarch64_long_option_table
9939 {
9940 const char *option; /* Substring to match. */
9941 const char *help; /* Help information. */
9942 int (*func) (const char *subopt); /* Function to decode sub-option. */
9943 char *deprecated; /* If non-null, print this message. */
9944 };
9945
9946 /* Transitive closure of features depending on set. */
9947 static aarch64_feature_set
9948 aarch64_feature_disable_set (aarch64_feature_set set)
9949 {
9950 const struct aarch64_option_cpu_value_table *opt;
9951 aarch64_feature_set prev = 0;
9952
9953 while (prev != set) {
9954 prev = set;
9955 for (opt = aarch64_features; opt->name != NULL; opt++)
9956 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9957 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9958 }
9959 return set;
9960 }
9961
9962 /* Transitive closure of dependencies of set. */
9963 static aarch64_feature_set
9964 aarch64_feature_enable_set (aarch64_feature_set set)
9965 {
9966 const struct aarch64_option_cpu_value_table *opt;
9967 aarch64_feature_set prev = 0;
9968
9969 while (prev != set) {
9970 prev = set;
9971 for (opt = aarch64_features; opt->name != NULL; opt++)
9972 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9973 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9974 }
9975 return set;
9976 }
9977
9978 static int
9979 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9980 bool ext_only)
9981 {
9982 /* We insist on extensions being added before being removed. We achieve
9983 this by using the ADDING_VALUE variable to indicate whether we are
9984 adding an extension (1) or removing it (0) and only allowing it to
9985 change in the order -1 -> 1 -> 0. */
9986 int adding_value = -1;
9987 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9988
9989 /* Copy the feature set, so that we can modify it. */
9990 *ext_set = **opt_p;
9991 *opt_p = ext_set;
9992
9993 while (str != NULL && *str != 0)
9994 {
9995 const struct aarch64_option_cpu_value_table *opt;
9996 const char *ext = NULL;
9997 int optlen;
9998
9999 if (!ext_only)
10000 {
10001 if (*str != '+')
10002 {
10003 as_bad (_("invalid architectural extension"));
10004 return 0;
10005 }
10006
10007 ext = strchr (++str, '+');
10008 }
10009
10010 if (ext != NULL)
10011 optlen = ext - str;
10012 else
10013 optlen = strlen (str);
10014
10015 if (optlen >= 2 && startswith (str, "no"))
10016 {
10017 if (adding_value != 0)
10018 adding_value = 0;
10019 optlen -= 2;
10020 str += 2;
10021 }
10022 else if (optlen > 0)
10023 {
10024 if (adding_value == -1)
10025 adding_value = 1;
10026 else if (adding_value != 1)
10027 {
10028 as_bad (_("must specify extensions to add before specifying "
10029 "those to remove"));
10030 return false;
10031 }
10032 }
10033
10034 if (optlen == 0)
10035 {
10036 as_bad (_("missing architectural extension"));
10037 return 0;
10038 }
10039
10040 gas_assert (adding_value != -1);
10041
10042 for (opt = aarch64_features; opt->name != NULL; opt++)
10043 if (strncmp (opt->name, str, optlen) == 0)
10044 {
10045 aarch64_feature_set set;
10046
10047 /* Add or remove the extension. */
10048 if (adding_value)
10049 {
10050 set = aarch64_feature_enable_set (opt->value);
10051 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10052 }
10053 else
10054 {
10055 set = aarch64_feature_disable_set (opt->value);
10056 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10057 }
10058 break;
10059 }
10060
10061 if (opt->name == NULL)
10062 {
10063 as_bad (_("unknown architectural extension `%s'"), str);
10064 return 0;
10065 }
10066
10067 str = ext;
10068 };
10069
10070 return 1;
10071 }
10072
10073 static int
10074 aarch64_parse_cpu (const char *str)
10075 {
10076 const struct aarch64_cpu_option_table *opt;
10077 const char *ext = strchr (str, '+');
10078 size_t optlen;
10079
10080 if (ext != NULL)
10081 optlen = ext - str;
10082 else
10083 optlen = strlen (str);
10084
10085 if (optlen == 0)
10086 {
10087 as_bad (_("missing cpu name `%s'"), str);
10088 return 0;
10089 }
10090
10091 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10092 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10093 {
10094 mcpu_cpu_opt = &opt->value;
10095 if (ext != NULL)
10096 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10097
10098 return 1;
10099 }
10100
10101 as_bad (_("unknown cpu `%s'"), str);
10102 return 0;
10103 }
10104
10105 static int
10106 aarch64_parse_arch (const char *str)
10107 {
10108 const struct aarch64_arch_option_table *opt;
10109 const char *ext = strchr (str, '+');
10110 size_t optlen;
10111
10112 if (ext != NULL)
10113 optlen = ext - str;
10114 else
10115 optlen = strlen (str);
10116
10117 if (optlen == 0)
10118 {
10119 as_bad (_("missing architecture name `%s'"), str);
10120 return 0;
10121 }
10122
10123 for (opt = aarch64_archs; opt->name != NULL; opt++)
10124 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10125 {
10126 march_cpu_opt = &opt->value;
10127 if (ext != NULL)
10128 return aarch64_parse_features (ext, &march_cpu_opt, false);
10129
10130 return 1;
10131 }
10132
10133 as_bad (_("unknown architecture `%s'\n"), str);
10134 return 0;
10135 }
10136
10137 /* ABIs. */
10138 struct aarch64_option_abi_value_table
10139 {
10140 const char *name;
10141 enum aarch64_abi_type value;
10142 };
10143
10144 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10145 {"ilp32", AARCH64_ABI_ILP32},
10146 {"lp64", AARCH64_ABI_LP64},
10147 };
10148
10149 static int
10150 aarch64_parse_abi (const char *str)
10151 {
10152 unsigned int i;
10153
10154 if (str[0] == '\0')
10155 {
10156 as_bad (_("missing abi name `%s'"), str);
10157 return 0;
10158 }
10159
10160 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10161 if (strcmp (str, aarch64_abis[i].name) == 0)
10162 {
10163 aarch64_abi = aarch64_abis[i].value;
10164 return 1;
10165 }
10166
10167 as_bad (_("unknown abi `%s'\n"), str);
10168 return 0;
10169 }
10170
10171 static struct aarch64_long_option_table aarch64_long_opts[] = {
10172 #ifdef OBJ_ELF
10173 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10174 aarch64_parse_abi, NULL},
10175 #endif /* OBJ_ELF */
10176 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10177 aarch64_parse_cpu, NULL},
10178 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10179 aarch64_parse_arch, NULL},
10180 {NULL, NULL, 0, NULL}
10181 };
10182
10183 int
10184 md_parse_option (int c, const char *arg)
10185 {
10186 struct aarch64_option_table *opt;
10187 struct aarch64_long_option_table *lopt;
10188
10189 switch (c)
10190 {
10191 #ifdef OPTION_EB
10192 case OPTION_EB:
10193 target_big_endian = 1;
10194 break;
10195 #endif
10196
10197 #ifdef OPTION_EL
10198 case OPTION_EL:
10199 target_big_endian = 0;
10200 break;
10201 #endif
10202
10203 case 'a':
10204 /* Listing option. Just ignore these, we don't support additional
10205 ones. */
10206 return 0;
10207
10208 default:
10209 for (opt = aarch64_opts; opt->option != NULL; opt++)
10210 {
10211 if (c == opt->option[0]
10212 && ((arg == NULL && opt->option[1] == 0)
10213 || streq (arg, opt->option + 1)))
10214 {
10215 /* If the option is deprecated, tell the user. */
10216 if (opt->deprecated != NULL)
10217 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10218 arg ? arg : "", _(opt->deprecated));
10219
10220 if (opt->var != NULL)
10221 *opt->var = opt->value;
10222
10223 return 1;
10224 }
10225 }
10226
10227 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10228 {
10229 /* These options are expected to have an argument. */
10230 if (c == lopt->option[0]
10231 && arg != NULL
10232 && startswith (arg, lopt->option + 1))
10233 {
10234 /* If the option is deprecated, tell the user. */
10235 if (lopt->deprecated != NULL)
10236 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10237 _(lopt->deprecated));
10238
10239 /* Call the sup-option parser. */
10240 return lopt->func (arg + strlen (lopt->option) - 1);
10241 }
10242 }
10243
10244 return 0;
10245 }
10246
10247 return 1;
10248 }
10249
10250 void
10251 md_show_usage (FILE * fp)
10252 {
10253 struct aarch64_option_table *opt;
10254 struct aarch64_long_option_table *lopt;
10255
10256 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10257
10258 for (opt = aarch64_opts; opt->option != NULL; opt++)
10259 if (opt->help != NULL)
10260 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10261
10262 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10263 if (lopt->help != NULL)
10264 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10265
10266 #ifdef OPTION_EB
10267 fprintf (fp, _("\
10268 -EB assemble code for a big-endian cpu\n"));
10269 #endif
10270
10271 #ifdef OPTION_EL
10272 fprintf (fp, _("\
10273 -EL assemble code for a little-endian cpu\n"));
10274 #endif
10275 }
10276
10277 /* Parse a .cpu directive. */
10278
10279 static void
10280 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10281 {
10282 const struct aarch64_cpu_option_table *opt;
10283 char saved_char;
10284 char *name;
10285 char *ext;
10286 size_t optlen;
10287
10288 name = input_line_pointer;
10289 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10290 input_line_pointer++;
10291 saved_char = *input_line_pointer;
10292 *input_line_pointer = 0;
10293
10294 ext = strchr (name, '+');
10295
10296 if (ext != NULL)
10297 optlen = ext - name;
10298 else
10299 optlen = strlen (name);
10300
10301 /* Skip the first "all" entry. */
10302 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10303 if (strlen (opt->name) == optlen
10304 && strncmp (name, opt->name, optlen) == 0)
10305 {
10306 mcpu_cpu_opt = &opt->value;
10307 if (ext != NULL)
10308 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10309 return;
10310
10311 cpu_variant = *mcpu_cpu_opt;
10312
10313 *input_line_pointer = saved_char;
10314 demand_empty_rest_of_line ();
10315 return;
10316 }
10317 as_bad (_("unknown cpu `%s'"), name);
10318 *input_line_pointer = saved_char;
10319 ignore_rest_of_line ();
10320 }
10321
10322
10323 /* Parse a .arch directive. */
10324
10325 static void
10326 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10327 {
10328 const struct aarch64_arch_option_table *opt;
10329 char saved_char;
10330 char *name;
10331 char *ext;
10332 size_t optlen;
10333
10334 name = input_line_pointer;
10335 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10336 input_line_pointer++;
10337 saved_char = *input_line_pointer;
10338 *input_line_pointer = 0;
10339
10340 ext = strchr (name, '+');
10341
10342 if (ext != NULL)
10343 optlen = ext - name;
10344 else
10345 optlen = strlen (name);
10346
10347 /* Skip the first "all" entry. */
10348 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10349 if (strlen (opt->name) == optlen
10350 && strncmp (name, opt->name, optlen) == 0)
10351 {
10352 mcpu_cpu_opt = &opt->value;
10353 if (ext != NULL)
10354 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10355 return;
10356
10357 cpu_variant = *mcpu_cpu_opt;
10358
10359 *input_line_pointer = saved_char;
10360 demand_empty_rest_of_line ();
10361 return;
10362 }
10363
10364 as_bad (_("unknown architecture `%s'\n"), name);
10365 *input_line_pointer = saved_char;
10366 ignore_rest_of_line ();
10367 }
10368
10369 /* Parse a .arch_extension directive. */
10370
10371 static void
10372 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10373 {
10374 char saved_char;
10375 char *ext = input_line_pointer;;
10376
10377 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10378 input_line_pointer++;
10379 saved_char = *input_line_pointer;
10380 *input_line_pointer = 0;
10381
10382 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10383 return;
10384
10385 cpu_variant = *mcpu_cpu_opt;
10386
10387 *input_line_pointer = saved_char;
10388 demand_empty_rest_of_line ();
10389 }
10390
10391 /* Copy symbol information. */
10392
10393 void
10394 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10395 {
10396 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10397 }
10398
10399 #ifdef OBJ_ELF
10400 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10401 This is needed so AArch64 specific st_other values can be independently
10402 specified for an IFUNC resolver (that is called by the dynamic linker)
10403 and the symbol it resolves (aliased to the resolver). In particular,
10404 if a function symbol has special st_other value set via directives,
10405 then attaching an IFUNC resolver to that symbol should not override
10406 the st_other setting. Requiring the directive on the IFUNC resolver
10407 symbol would be unexpected and problematic in C code, where the two
10408 symbols appear as two independent function declarations. */
10409
10410 void
10411 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10412 {
10413 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10414 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10415 if (srcelf->size)
10416 {
10417 if (destelf->size == NULL)
10418 destelf->size = XNEW (expressionS);
10419 *destelf->size = *srcelf->size;
10420 }
10421 else
10422 {
10423 free (destelf->size);
10424 destelf->size = NULL;
10425 }
10426 S_SET_SIZE (dest, S_GET_SIZE (src));
10427 }
10428 #endif