Arm64: re-work PR gas/27217 fix
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2022 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* SME horizontal or vertical slice indicator, encoded in "V".
103 Values:
104 0 - Horizontal
105 1 - vertical
106 */
107 enum sme_hv_slice
108 {
109 HV_horizontal = 0,
110 HV_vertical = 1
111 };
112
113 /* Bits for DEFINED field in vector_type_el. */
114 #define NTA_HASTYPE 1
115 #define NTA_HASINDEX 2
116 #define NTA_HASVARWIDTH 4
117
118 struct vector_type_el
119 {
120 enum vector_el_type type;
121 unsigned char defined;
122 unsigned width;
123 int64_t index;
124 };
125
126 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
127
128 struct reloc
129 {
130 bfd_reloc_code_real_type type;
131 expressionS exp;
132 int pc_rel;
133 enum aarch64_opnd opnd;
134 uint32_t flags;
135 unsigned need_libopcodes_p : 1;
136 };
137
138 struct aarch64_instruction
139 {
140 /* libopcodes structure for instruction intermediate representation. */
141 aarch64_inst base;
142 /* Record assembly errors found during the parsing. */
143 struct
144 {
145 enum aarch64_operand_error_kind kind;
146 const char *error;
147 } parsing_error;
148 /* The condition that appears in the assembly line. */
149 int cond;
150 /* Relocation information (including the GAS internal fixup). */
151 struct reloc reloc;
152 /* Need to generate an immediate in the literal pool. */
153 unsigned gen_lit_pool : 1;
154 };
155
156 typedef struct aarch64_instruction aarch64_instruction;
157
158 static aarch64_instruction inst;
159
160 static bool parse_operands (char *, const aarch64_opcode *);
161 static bool programmer_friendly_fixup (aarch64_instruction *);
162
163 /* Diagnostics inline function utilities.
164
165 These are lightweight utilities which should only be called by parse_operands
166 and other parsers. GAS processes each assembly line by parsing it against
167 instruction template(s), in the case of multiple templates (for the same
168 mnemonic name), those templates are tried one by one until one succeeds or
169 all fail. An assembly line may fail a few templates before being
170 successfully parsed; an error saved here in most cases is not a user error
171 but an error indicating the current template is not the right template.
172 Therefore it is very important that errors can be saved at a low cost during
173 the parsing; we don't want to slow down the whole parsing by recording
174 non-user errors in detail.
175
176 Remember that the objective is to help GAS pick up the most appropriate
177 error message in the case of multiple templates, e.g. FMOV which has 8
178 templates. */
179
180 static inline void
181 clear_error (void)
182 {
183 inst.parsing_error.kind = AARCH64_OPDE_NIL;
184 inst.parsing_error.error = NULL;
185 }
186
187 static inline bool
188 error_p (void)
189 {
190 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
191 }
192
193 static inline const char *
194 get_error_message (void)
195 {
196 return inst.parsing_error.error;
197 }
198
199 static inline enum aarch64_operand_error_kind
200 get_error_kind (void)
201 {
202 return inst.parsing_error.kind;
203 }
204
205 static inline void
206 set_error (enum aarch64_operand_error_kind kind, const char *error)
207 {
208 inst.parsing_error.kind = kind;
209 inst.parsing_error.error = error;
210 }
211
212 static inline void
213 set_recoverable_error (const char *error)
214 {
215 set_error (AARCH64_OPDE_RECOVERABLE, error);
216 }
217
218 /* Use the DESC field of the corresponding aarch64_operand entry to compose
219 the error message. */
220 static inline void
221 set_default_error (void)
222 {
223 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
224 }
225
226 static inline void
227 set_syntax_error (const char *error)
228 {
229 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
230 }
231
232 static inline void
233 set_first_syntax_error (const char *error)
234 {
235 if (! error_p ())
236 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
237 }
238
239 static inline void
240 set_fatal_syntax_error (const char *error)
241 {
242 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
243 }
244 \f
245 /* Return value for certain parsers when the parsing fails; those parsers
246 return the information of the parsed result, e.g. register number, on
247 success. */
248 #define PARSE_FAIL -1
249
250 /* This is an invalid condition code that means no conditional field is
251 present. */
252 #define COND_ALWAYS 0x10
253
254 typedef struct
255 {
256 const char *template;
257 uint32_t value;
258 } asm_nzcv;
259
260 struct reloc_entry
261 {
262 char *name;
263 bfd_reloc_code_real_type reloc;
264 };
265
266 /* Macros to define the register types and masks for the purpose
267 of parsing. */
268
269 #undef AARCH64_REG_TYPES
270 #define AARCH64_REG_TYPES \
271 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
272 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
273 BASIC_REG_TYPE(SP_32) /* wsp */ \
274 BASIC_REG_TYPE(SP_64) /* sp */ \
275 BASIC_REG_TYPE(Z_32) /* wzr */ \
276 BASIC_REG_TYPE(Z_64) /* xzr */ \
277 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
278 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
279 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
280 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
281 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
282 BASIC_REG_TYPE(VN) /* v[0-31] */ \
283 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
284 BASIC_REG_TYPE(PN) /* p[0-15] */ \
285 BASIC_REG_TYPE(ZA) /* za[0-15] */ \
286 BASIC_REG_TYPE(ZAH) /* za[0-15]h */ \
287 BASIC_REG_TYPE(ZAV) /* za[0-15]v */ \
288 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
289 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
290 /* Typecheck: same, plus SVE registers. */ \
291 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
292 | REG_TYPE(ZN)) \
293 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
294 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
295 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
296 /* Typecheck: same, plus SVE registers. */ \
297 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
298 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
299 | REG_TYPE(ZN)) \
300 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
301 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
302 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
303 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
304 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
306 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
307 /* Typecheck: any [BHSDQ]P FP. */ \
308 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
309 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
310 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
311 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
312 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
313 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
314 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
315 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
316 be used for SVE instructions, since Zn and Pn are valid symbols \
317 in other contexts. */ \
318 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
319 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
320 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
321 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
322 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
323 | REG_TYPE(ZN) | REG_TYPE(PN)) \
324 /* Any integer register; used for error messages only. */ \
325 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
326 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
327 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
328 /* Pseudo type to mark the end of the enumerator sequence. */ \
329 BASIC_REG_TYPE(MAX)
330
331 #undef BASIC_REG_TYPE
332 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
333 #undef MULTI_REG_TYPE
334 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
335
336 /* Register type enumerators. */
337 typedef enum aarch64_reg_type_
338 {
339 /* A list of REG_TYPE_*. */
340 AARCH64_REG_TYPES
341 } aarch64_reg_type;
342
343 #undef BASIC_REG_TYPE
344 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
345 #undef REG_TYPE
346 #define REG_TYPE(T) (1 << REG_TYPE_##T)
347 #undef MULTI_REG_TYPE
348 #define MULTI_REG_TYPE(T,V) V,
349
350 /* Structure for a hash table entry for a register. */
351 typedef struct
352 {
353 const char *name;
354 unsigned char number;
355 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
356 unsigned char builtin;
357 } reg_entry;
358
359 /* Values indexed by aarch64_reg_type to assist the type checking. */
360 static const unsigned reg_type_masks[] =
361 {
362 AARCH64_REG_TYPES
363 };
364
365 #undef BASIC_REG_TYPE
366 #undef REG_TYPE
367 #undef MULTI_REG_TYPE
368 #undef AARCH64_REG_TYPES
369
370 /* Diagnostics used when we don't get a register of the expected type.
371 Note: this has to synchronized with aarch64_reg_type definitions
372 above. */
373 static const char *
374 get_reg_expected_msg (aarch64_reg_type reg_type)
375 {
376 const char *msg;
377
378 switch (reg_type)
379 {
380 case REG_TYPE_R_32:
381 msg = N_("integer 32-bit register expected");
382 break;
383 case REG_TYPE_R_64:
384 msg = N_("integer 64-bit register expected");
385 break;
386 case REG_TYPE_R_N:
387 msg = N_("integer register expected");
388 break;
389 case REG_TYPE_R64_SP:
390 msg = N_("64-bit integer or SP register expected");
391 break;
392 case REG_TYPE_SVE_BASE:
393 msg = N_("base register expected");
394 break;
395 case REG_TYPE_R_Z:
396 msg = N_("integer or zero register expected");
397 break;
398 case REG_TYPE_SVE_OFFSET:
399 msg = N_("offset register expected");
400 break;
401 case REG_TYPE_R_SP:
402 msg = N_("integer or SP register expected");
403 break;
404 case REG_TYPE_R_Z_SP:
405 msg = N_("integer, zero or SP register expected");
406 break;
407 case REG_TYPE_FP_B:
408 msg = N_("8-bit SIMD scalar register expected");
409 break;
410 case REG_TYPE_FP_H:
411 msg = N_("16-bit SIMD scalar or floating-point half precision "
412 "register expected");
413 break;
414 case REG_TYPE_FP_S:
415 msg = N_("32-bit SIMD scalar or floating-point single precision "
416 "register expected");
417 break;
418 case REG_TYPE_FP_D:
419 msg = N_("64-bit SIMD scalar or floating-point double precision "
420 "register expected");
421 break;
422 case REG_TYPE_FP_Q:
423 msg = N_("128-bit SIMD scalar or floating-point quad precision "
424 "register expected");
425 break;
426 case REG_TYPE_R_Z_BHSDQ_V:
427 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
428 msg = N_("register expected");
429 break;
430 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
431 msg = N_("SIMD scalar or floating-point register expected");
432 break;
433 case REG_TYPE_VN: /* any V reg */
434 msg = N_("vector register expected");
435 break;
436 case REG_TYPE_ZN:
437 msg = N_("SVE vector register expected");
438 break;
439 case REG_TYPE_PN:
440 msg = N_("SVE predicate register expected");
441 break;
442 default:
443 as_fatal (_("invalid register type %d"), reg_type);
444 }
445 return msg;
446 }
447
448 /* Some well known registers that we refer to directly elsewhere. */
449 #define REG_SP 31
450 #define REG_ZR 31
451
452 /* Instructions take 4 bytes in the object file. */
453 #define INSN_SIZE 4
454
455 static htab_t aarch64_ops_hsh;
456 static htab_t aarch64_cond_hsh;
457 static htab_t aarch64_shift_hsh;
458 static htab_t aarch64_sys_regs_hsh;
459 static htab_t aarch64_pstatefield_hsh;
460 static htab_t aarch64_sys_regs_ic_hsh;
461 static htab_t aarch64_sys_regs_dc_hsh;
462 static htab_t aarch64_sys_regs_at_hsh;
463 static htab_t aarch64_sys_regs_tlbi_hsh;
464 static htab_t aarch64_sys_regs_sr_hsh;
465 static htab_t aarch64_reg_hsh;
466 static htab_t aarch64_barrier_opt_hsh;
467 static htab_t aarch64_nzcv_hsh;
468 static htab_t aarch64_pldop_hsh;
469 static htab_t aarch64_hint_opt_hsh;
470
471 /* Stuff needed to resolve the label ambiguity
472 As:
473 ...
474 label: <insn>
475 may differ from:
476 ...
477 label:
478 <insn> */
479
480 static symbolS *last_label_seen;
481
482 /* Literal pool structure. Held on a per-section
483 and per-sub-section basis. */
484
485 #define MAX_LITERAL_POOL_SIZE 1024
486 typedef struct literal_expression
487 {
488 expressionS exp;
489 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
490 LITTLENUM_TYPE * bignum;
491 } literal_expression;
492
493 typedef struct literal_pool
494 {
495 literal_expression literals[MAX_LITERAL_POOL_SIZE];
496 unsigned int next_free_entry;
497 unsigned int id;
498 symbolS *symbol;
499 segT section;
500 subsegT sub_section;
501 int size;
502 struct literal_pool *next;
503 } literal_pool;
504
505 /* Pointer to a linked list of literal pools. */
506 static literal_pool *list_of_pools = NULL;
507 \f
508 /* Pure syntax. */
509
510 /* This array holds the chars that always start a comment. If the
511 pre-processor is disabled, these aren't very useful. */
512 const char comment_chars[] = "";
513
514 /* This array holds the chars that only start a comment at the beginning of
515 a line. If the line seems to have the form '# 123 filename'
516 .line and .file directives will appear in the pre-processed output. */
517 /* Note that input_file.c hand checks for '#' at the beginning of the
518 first line of the input file. This is because the compiler outputs
519 #NO_APP at the beginning of its output. */
520 /* Also note that comments like this one will always work. */
521 const char line_comment_chars[] = "#";
522
523 const char line_separator_chars[] = ";";
524
525 /* Chars that can be used to separate mant
526 from exp in floating point numbers. */
527 const char EXP_CHARS[] = "eE";
528
529 /* Chars that mean this number is a floating point constant. */
530 /* As in 0f12.456 */
531 /* or 0d1.2345e12 */
532
533 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
534
535 /* Prefix character that indicates the start of an immediate value. */
536 #define is_immediate_prefix(C) ((C) == '#')
537
538 /* Separator character handling. */
539
540 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
541
542 static inline bool
543 skip_past_char (char **str, char c)
544 {
545 if (**str == c)
546 {
547 (*str)++;
548 return true;
549 }
550 else
551 return false;
552 }
553
554 #define skip_past_comma(str) skip_past_char (str, ',')
555
556 /* Arithmetic expressions (possibly involving symbols). */
557
558 static bool in_aarch64_get_expression = false;
559
560 /* Third argument to aarch64_get_expression. */
561 #define GE_NO_PREFIX false
562 #define GE_OPT_PREFIX true
563
564 /* Fourth argument to aarch64_get_expression. */
565 #define ALLOW_ABSENT false
566 #define REJECT_ABSENT true
567
568 /* Return TRUE if the string pointed by *STR is successfully parsed
569 as an valid expression; *EP will be filled with the information of
570 such an expression. Otherwise return FALSE.
571
572 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
573 If REJECT_ABSENT is true then trat missing expressions as an error. */
574
575 static bool
576 aarch64_get_expression (expressionS * ep,
577 char ** str,
578 bool allow_immediate_prefix,
579 bool reject_absent)
580 {
581 char *save_in;
582 segT seg;
583 bool prefix_present = false;
584
585 if (allow_immediate_prefix)
586 {
587 if (is_immediate_prefix (**str))
588 {
589 (*str)++;
590 prefix_present = true;
591 }
592 }
593
594 memset (ep, 0, sizeof (expressionS));
595
596 save_in = input_line_pointer;
597 input_line_pointer = *str;
598 in_aarch64_get_expression = true;
599 seg = expression (ep);
600 in_aarch64_get_expression = false;
601
602 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
603 {
604 /* We found a bad expression in md_operand(). */
605 *str = input_line_pointer;
606 input_line_pointer = save_in;
607 if (prefix_present && ! error_p ())
608 set_fatal_syntax_error (_("bad expression"));
609 else
610 set_first_syntax_error (_("bad expression"));
611 return false;
612 }
613
614 #ifdef OBJ_AOUT
615 if (seg != absolute_section
616 && seg != text_section
617 && seg != data_section
618 && seg != bss_section
619 && seg != undefined_section)
620 {
621 set_syntax_error (_("bad segment"));
622 *str = input_line_pointer;
623 input_line_pointer = save_in;
624 return false;
625 }
626 #else
627 (void) seg;
628 #endif
629
630 *str = input_line_pointer;
631 input_line_pointer = save_in;
632 return true;
633 }
634
635 /* Turn a string in input_line_pointer into a floating point constant
636 of type TYPE, and store the appropriate bytes in *LITP. The number
637 of LITTLENUMS emitted is stored in *SIZEP. An error message is
638 returned, or NULL on OK. */
639
640 const char *
641 md_atof (int type, char *litP, int *sizeP)
642 {
643 return ieee_md_atof (type, litP, sizeP, target_big_endian);
644 }
645
646 /* We handle all bad expressions here, so that we can report the faulty
647 instruction in the error message. */
648 void
649 md_operand (expressionS * exp)
650 {
651 if (in_aarch64_get_expression)
652 exp->X_op = O_illegal;
653 }
654
655 /* Immediate values. */
656
657 /* Errors may be set multiple times during parsing or bit encoding
658 (particularly in the Neon bits), but usually the earliest error which is set
659 will be the most meaningful. Avoid overwriting it with later (cascading)
660 errors by calling this function. */
661
662 static void
663 first_error (const char *error)
664 {
665 if (! error_p ())
666 set_syntax_error (error);
667 }
668
669 /* Similar to first_error, but this function accepts formatted error
670 message. */
671 static void
672 first_error_fmt (const char *format, ...)
673 {
674 va_list args;
675 enum
676 { size = 100 };
677 /* N.B. this single buffer will not cause error messages for different
678 instructions to pollute each other; this is because at the end of
679 processing of each assembly line, error message if any will be
680 collected by as_bad. */
681 static char buffer[size];
682
683 if (! error_p ())
684 {
685 int ret ATTRIBUTE_UNUSED;
686 va_start (args, format);
687 ret = vsnprintf (buffer, size, format, args);
688 know (ret <= size - 1 && ret >= 0);
689 va_end (args);
690 set_syntax_error (buffer);
691 }
692 }
693
694 /* Register parsing. */
695
696 /* Generic register parser which is called by other specialized
697 register parsers.
698 CCP points to what should be the beginning of a register name.
699 If it is indeed a valid register name, advance CCP over it and
700 return the reg_entry structure; otherwise return NULL.
701 It does not issue diagnostics. */
702
703 static reg_entry *
704 parse_reg (char **ccp)
705 {
706 char *start = *ccp;
707 char *p;
708 reg_entry *reg;
709
710 #ifdef REGISTER_PREFIX
711 if (*start != REGISTER_PREFIX)
712 return NULL;
713 start++;
714 #endif
715
716 p = start;
717 if (!ISALPHA (*p) || !is_name_beginner (*p))
718 return NULL;
719
720 do
721 p++;
722 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
723
724 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
725
726 if (!reg)
727 return NULL;
728
729 *ccp = p;
730 return reg;
731 }
732
733 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
734 return FALSE. */
735 static bool
736 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
737 {
738 return (reg_type_masks[type] & (1 << reg->type)) != 0;
739 }
740
741 /* Try to parse a base or offset register. Allow SVE base and offset
742 registers if REG_TYPE includes SVE registers. Return the register
743 entry on success, setting *QUALIFIER to the register qualifier.
744 Return null otherwise.
745
746 Note that this function does not issue any diagnostics. */
747
748 static const reg_entry *
749 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
750 aarch64_opnd_qualifier_t *qualifier)
751 {
752 char *str = *ccp;
753 const reg_entry *reg = parse_reg (&str);
754
755 if (reg == NULL)
756 return NULL;
757
758 switch (reg->type)
759 {
760 case REG_TYPE_R_32:
761 case REG_TYPE_SP_32:
762 case REG_TYPE_Z_32:
763 *qualifier = AARCH64_OPND_QLF_W;
764 break;
765
766 case REG_TYPE_R_64:
767 case REG_TYPE_SP_64:
768 case REG_TYPE_Z_64:
769 *qualifier = AARCH64_OPND_QLF_X;
770 break;
771
772 case REG_TYPE_ZN:
773 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
774 || str[0] != '.')
775 return NULL;
776 switch (TOLOWER (str[1]))
777 {
778 case 's':
779 *qualifier = AARCH64_OPND_QLF_S_S;
780 break;
781 case 'd':
782 *qualifier = AARCH64_OPND_QLF_S_D;
783 break;
784 default:
785 return NULL;
786 }
787 str += 2;
788 break;
789
790 default:
791 return NULL;
792 }
793
794 *ccp = str;
795
796 return reg;
797 }
798
799 /* Try to parse a base or offset register. Return the register entry
800 on success, setting *QUALIFIER to the register qualifier. Return null
801 otherwise.
802
803 Note that this function does not issue any diagnostics. */
804
805 static const reg_entry *
806 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
807 {
808 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
809 }
810
811 /* Parse the qualifier of a vector register or vector element of type
812 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
813 succeeds; otherwise return FALSE.
814
815 Accept only one occurrence of:
816 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
817 b h s d q */
818 static bool
819 parse_vector_type_for_operand (aarch64_reg_type reg_type,
820 struct vector_type_el *parsed_type, char **str)
821 {
822 char *ptr = *str;
823 unsigned width;
824 unsigned element_size;
825 enum vector_el_type type;
826
827 /* skip '.' */
828 gas_assert (*ptr == '.');
829 ptr++;
830
831 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
832 {
833 width = 0;
834 goto elt_size;
835 }
836 width = strtoul (ptr, &ptr, 10);
837 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
838 {
839 first_error_fmt (_("bad size %d in vector width specifier"), width);
840 return false;
841 }
842
843 elt_size:
844 switch (TOLOWER (*ptr))
845 {
846 case 'b':
847 type = NT_b;
848 element_size = 8;
849 break;
850 case 'h':
851 type = NT_h;
852 element_size = 16;
853 break;
854 case 's':
855 type = NT_s;
856 element_size = 32;
857 break;
858 case 'd':
859 type = NT_d;
860 element_size = 64;
861 break;
862 case 'q':
863 if (reg_type == REG_TYPE_ZN || width == 1)
864 {
865 type = NT_q;
866 element_size = 128;
867 break;
868 }
869 /* fall through. */
870 default:
871 if (*ptr != '\0')
872 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
873 else
874 first_error (_("missing element size"));
875 return false;
876 }
877 if (width != 0 && width * element_size != 64
878 && width * element_size != 128
879 && !(width == 2 && element_size == 16)
880 && !(width == 4 && element_size == 8))
881 {
882 first_error_fmt (_
883 ("invalid element size %d and vector size combination %c"),
884 width, *ptr);
885 return false;
886 }
887 ptr++;
888
889 parsed_type->type = type;
890 parsed_type->width = width;
891
892 *str = ptr;
893
894 return true;
895 }
896
897 /* *STR contains an SVE zero/merge predication suffix. Parse it into
898 *PARSED_TYPE and point *STR at the end of the suffix. */
899
900 static bool
901 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
902 {
903 char *ptr = *str;
904
905 /* Skip '/'. */
906 gas_assert (*ptr == '/');
907 ptr++;
908 switch (TOLOWER (*ptr))
909 {
910 case 'z':
911 parsed_type->type = NT_zero;
912 break;
913 case 'm':
914 parsed_type->type = NT_merge;
915 break;
916 default:
917 if (*ptr != '\0' && *ptr != ',')
918 first_error_fmt (_("unexpected character `%c' in predication type"),
919 *ptr);
920 else
921 first_error (_("missing predication type"));
922 return false;
923 }
924 parsed_type->width = 0;
925 *str = ptr + 1;
926 return true;
927 }
928
929 /* Parse a register of the type TYPE.
930
931 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
932 name or the parsed register is not of TYPE.
933
934 Otherwise return the register number, and optionally fill in the actual
935 type of the register in *RTYPE when multiple alternatives were given, and
936 return the register shape and element index information in *TYPEINFO.
937
938 IN_REG_LIST should be set with TRUE if the caller is parsing a register
939 list. */
940
941 static int
942 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
943 struct vector_type_el *typeinfo, bool in_reg_list)
944 {
945 char *str = *ccp;
946 const reg_entry *reg = parse_reg (&str);
947 struct vector_type_el atype;
948 struct vector_type_el parsetype;
949 bool is_typed_vecreg = false;
950
951 atype.defined = 0;
952 atype.type = NT_invtype;
953 atype.width = -1;
954 atype.index = 0;
955
956 if (reg == NULL)
957 {
958 if (typeinfo)
959 *typeinfo = atype;
960 set_default_error ();
961 return PARSE_FAIL;
962 }
963
964 if (! aarch64_check_reg_type (reg, type))
965 {
966 DEBUG_TRACE ("reg type check failed");
967 set_default_error ();
968 return PARSE_FAIL;
969 }
970 type = reg->type;
971
972 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
973 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
974 {
975 if (*str == '.')
976 {
977 if (!parse_vector_type_for_operand (type, &parsetype, &str))
978 return PARSE_FAIL;
979 }
980 else
981 {
982 if (!parse_predication_for_operand (&parsetype, &str))
983 return PARSE_FAIL;
984 }
985
986 /* Register if of the form Vn.[bhsdq]. */
987 is_typed_vecreg = true;
988
989 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
990 {
991 /* The width is always variable; we don't allow an integer width
992 to be specified. */
993 gas_assert (parsetype.width == 0);
994 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
995 }
996 else if (parsetype.width == 0)
997 /* Expect index. In the new scheme we cannot have
998 Vn.[bhsdq] represent a scalar. Therefore any
999 Vn.[bhsdq] should have an index following it.
1000 Except in reglists of course. */
1001 atype.defined |= NTA_HASINDEX;
1002 else
1003 atype.defined |= NTA_HASTYPE;
1004
1005 atype.type = parsetype.type;
1006 atype.width = parsetype.width;
1007 }
1008
1009 if (skip_past_char (&str, '['))
1010 {
1011 expressionS exp;
1012
1013 /* Reject Sn[index] syntax. */
1014 if (!is_typed_vecreg)
1015 {
1016 first_error (_("this type of register can't be indexed"));
1017 return PARSE_FAIL;
1018 }
1019
1020 if (in_reg_list)
1021 {
1022 first_error (_("index not allowed inside register list"));
1023 return PARSE_FAIL;
1024 }
1025
1026 atype.defined |= NTA_HASINDEX;
1027
1028 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT);
1029
1030 if (exp.X_op != O_constant)
1031 {
1032 first_error (_("constant expression required"));
1033 return PARSE_FAIL;
1034 }
1035
1036 if (! skip_past_char (&str, ']'))
1037 return PARSE_FAIL;
1038
1039 atype.index = exp.X_add_number;
1040 }
1041 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1042 {
1043 /* Indexed vector register expected. */
1044 first_error (_("indexed vector register expected"));
1045 return PARSE_FAIL;
1046 }
1047
1048 /* A vector reg Vn should be typed or indexed. */
1049 if (type == REG_TYPE_VN && atype.defined == 0)
1050 {
1051 first_error (_("invalid use of vector register"));
1052 }
1053
1054 if (typeinfo)
1055 *typeinfo = atype;
1056
1057 if (rtype)
1058 *rtype = type;
1059
1060 *ccp = str;
1061
1062 return reg->number;
1063 }
1064
1065 /* Parse register.
1066
1067 Return the register number on success; return PARSE_FAIL otherwise.
1068
1069 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1070 the register (e.g. NEON double or quad reg when either has been requested).
1071
1072 If this is a NEON vector register with additional type information, fill
1073 in the struct pointed to by VECTYPE (if non-NULL).
1074
1075 This parser does not handle register list. */
1076
1077 static int
1078 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1079 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1080 {
1081 struct vector_type_el atype;
1082 char *str = *ccp;
1083 int reg = parse_typed_reg (&str, type, rtype, &atype,
1084 /*in_reg_list= */ false);
1085
1086 if (reg == PARSE_FAIL)
1087 return PARSE_FAIL;
1088
1089 if (vectype)
1090 *vectype = atype;
1091
1092 *ccp = str;
1093
1094 return reg;
1095 }
1096
1097 static inline bool
1098 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1099 {
1100 return
1101 e1.type == e2.type
1102 && e1.defined == e2.defined
1103 && e1.width == e2.width && e1.index == e2.index;
1104 }
1105
1106 /* This function parses a list of vector registers of type TYPE.
1107 On success, it returns the parsed register list information in the
1108 following encoded format:
1109
1110 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1111 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1112
1113 The information of the register shape and/or index is returned in
1114 *VECTYPE.
1115
1116 It returns PARSE_FAIL if the register list is invalid.
1117
1118 The list contains one to four registers.
1119 Each register can be one of:
1120 <Vt>.<T>[<index>]
1121 <Vt>.<T>
1122 All <T> should be identical.
1123 All <index> should be identical.
1124 There are restrictions on <Vt> numbers which are checked later
1125 (by reg_list_valid_p). */
1126
1127 static int
1128 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1129 struct vector_type_el *vectype)
1130 {
1131 char *str = *ccp;
1132 int nb_regs;
1133 struct vector_type_el typeinfo, typeinfo_first;
1134 int val, val_range;
1135 int in_range;
1136 int ret_val;
1137 int i;
1138 bool error = false;
1139 bool expect_index = false;
1140
1141 if (*str != '{')
1142 {
1143 set_syntax_error (_("expecting {"));
1144 return PARSE_FAIL;
1145 }
1146 str++;
1147
1148 nb_regs = 0;
1149 typeinfo_first.defined = 0;
1150 typeinfo_first.type = NT_invtype;
1151 typeinfo_first.width = -1;
1152 typeinfo_first.index = 0;
1153 ret_val = 0;
1154 val = -1;
1155 val_range = -1;
1156 in_range = 0;
1157 do
1158 {
1159 if (in_range)
1160 {
1161 str++; /* skip over '-' */
1162 val_range = val;
1163 }
1164 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1165 /*in_reg_list= */ true);
1166 if (val == PARSE_FAIL)
1167 {
1168 set_first_syntax_error (_("invalid vector register in list"));
1169 error = true;
1170 continue;
1171 }
1172 /* reject [bhsd]n */
1173 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1174 {
1175 set_first_syntax_error (_("invalid scalar register in list"));
1176 error = true;
1177 continue;
1178 }
1179
1180 if (typeinfo.defined & NTA_HASINDEX)
1181 expect_index = true;
1182
1183 if (in_range)
1184 {
1185 if (val < val_range)
1186 {
1187 set_first_syntax_error
1188 (_("invalid range in vector register list"));
1189 error = true;
1190 }
1191 val_range++;
1192 }
1193 else
1194 {
1195 val_range = val;
1196 if (nb_regs == 0)
1197 typeinfo_first = typeinfo;
1198 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1199 {
1200 set_first_syntax_error
1201 (_("type mismatch in vector register list"));
1202 error = true;
1203 }
1204 }
1205 if (! error)
1206 for (i = val_range; i <= val; i++)
1207 {
1208 ret_val |= i << (5 * nb_regs);
1209 nb_regs++;
1210 }
1211 in_range = 0;
1212 }
1213 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1214
1215 skip_whitespace (str);
1216 if (*str != '}')
1217 {
1218 set_first_syntax_error (_("end of vector register list not found"));
1219 error = true;
1220 }
1221 str++;
1222
1223 skip_whitespace (str);
1224
1225 if (expect_index)
1226 {
1227 if (skip_past_char (&str, '['))
1228 {
1229 expressionS exp;
1230
1231 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT);
1232 if (exp.X_op != O_constant)
1233 {
1234 set_first_syntax_error (_("constant expression required."));
1235 error = true;
1236 }
1237 if (! skip_past_char (&str, ']'))
1238 error = true;
1239 else
1240 typeinfo_first.index = exp.X_add_number;
1241 }
1242 else
1243 {
1244 set_first_syntax_error (_("expected index"));
1245 error = true;
1246 }
1247 }
1248
1249 if (nb_regs > 4)
1250 {
1251 set_first_syntax_error (_("too many registers in vector register list"));
1252 error = true;
1253 }
1254 else if (nb_regs == 0)
1255 {
1256 set_first_syntax_error (_("empty vector register list"));
1257 error = true;
1258 }
1259
1260 *ccp = str;
1261 if (! error)
1262 *vectype = typeinfo_first;
1263
1264 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1265 }
1266
1267 /* Directives: register aliases. */
1268
1269 static reg_entry *
1270 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1271 {
1272 reg_entry *new;
1273 const char *name;
1274
1275 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1276 {
1277 if (new->builtin)
1278 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1279 str);
1280
1281 /* Only warn about a redefinition if it's not defined as the
1282 same register. */
1283 else if (new->number != number || new->type != type)
1284 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1285
1286 return NULL;
1287 }
1288
1289 name = xstrdup (str);
1290 new = XNEW (reg_entry);
1291
1292 new->name = name;
1293 new->number = number;
1294 new->type = type;
1295 new->builtin = false;
1296
1297 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1298
1299 return new;
1300 }
1301
1302 /* Look for the .req directive. This is of the form:
1303
1304 new_register_name .req existing_register_name
1305
1306 If we find one, or if it looks sufficiently like one that we want to
1307 handle any error here, return TRUE. Otherwise return FALSE. */
1308
1309 static bool
1310 create_register_alias (char *newname, char *p)
1311 {
1312 const reg_entry *old;
1313 char *oldname, *nbuf;
1314 size_t nlen;
1315
1316 /* The input scrubber ensures that whitespace after the mnemonic is
1317 collapsed to single spaces. */
1318 oldname = p;
1319 if (!startswith (oldname, " .req "))
1320 return false;
1321
1322 oldname += 6;
1323 if (*oldname == '\0')
1324 return false;
1325
1326 old = str_hash_find (aarch64_reg_hsh, oldname);
1327 if (!old)
1328 {
1329 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1330 return true;
1331 }
1332
1333 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1334 the desired alias name, and p points to its end. If not, then
1335 the desired alias name is in the global original_case_string. */
1336 #ifdef TC_CASE_SENSITIVE
1337 nlen = p - newname;
1338 #else
1339 newname = original_case_string;
1340 nlen = strlen (newname);
1341 #endif
1342
1343 nbuf = xmemdup0 (newname, nlen);
1344
1345 /* Create aliases under the new name as stated; an all-lowercase
1346 version of the new name; and an all-uppercase version of the new
1347 name. */
1348 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1349 {
1350 for (p = nbuf; *p; p++)
1351 *p = TOUPPER (*p);
1352
1353 if (strncmp (nbuf, newname, nlen))
1354 {
1355 /* If this attempt to create an additional alias fails, do not bother
1356 trying to create the all-lower case alias. We will fail and issue
1357 a second, duplicate error message. This situation arises when the
1358 programmer does something like:
1359 foo .req r0
1360 Foo .req r1
1361 The second .req creates the "Foo" alias but then fails to create
1362 the artificial FOO alias because it has already been created by the
1363 first .req. */
1364 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1365 {
1366 free (nbuf);
1367 return true;
1368 }
1369 }
1370
1371 for (p = nbuf; *p; p++)
1372 *p = TOLOWER (*p);
1373
1374 if (strncmp (nbuf, newname, nlen))
1375 insert_reg_alias (nbuf, old->number, old->type);
1376 }
1377
1378 free (nbuf);
1379 return true;
1380 }
1381
1382 /* Should never be called, as .req goes between the alias and the
1383 register name, not at the beginning of the line. */
1384 static void
1385 s_req (int a ATTRIBUTE_UNUSED)
1386 {
1387 as_bad (_("invalid syntax for .req directive"));
1388 }
1389
1390 /* The .unreq directive deletes an alias which was previously defined
1391 by .req. For example:
1392
1393 my_alias .req r11
1394 .unreq my_alias */
1395
1396 static void
1397 s_unreq (int a ATTRIBUTE_UNUSED)
1398 {
1399 char *name;
1400 char saved_char;
1401
1402 name = input_line_pointer;
1403
1404 while (*input_line_pointer != 0
1405 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1406 ++input_line_pointer;
1407
1408 saved_char = *input_line_pointer;
1409 *input_line_pointer = 0;
1410
1411 if (!*name)
1412 as_bad (_("invalid syntax for .unreq directive"));
1413 else
1414 {
1415 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1416
1417 if (!reg)
1418 as_bad (_("unknown register alias '%s'"), name);
1419 else if (reg->builtin)
1420 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1421 name);
1422 else
1423 {
1424 char *p;
1425 char *nbuf;
1426
1427 str_hash_delete (aarch64_reg_hsh, name);
1428 free ((char *) reg->name);
1429 free (reg);
1430
1431 /* Also locate the all upper case and all lower case versions.
1432 Do not complain if we cannot find one or the other as it
1433 was probably deleted above. */
1434
1435 nbuf = strdup (name);
1436 for (p = nbuf; *p; p++)
1437 *p = TOUPPER (*p);
1438 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1439 if (reg)
1440 {
1441 str_hash_delete (aarch64_reg_hsh, nbuf);
1442 free ((char *) reg->name);
1443 free (reg);
1444 }
1445
1446 for (p = nbuf; *p; p++)
1447 *p = TOLOWER (*p);
1448 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1449 if (reg)
1450 {
1451 str_hash_delete (aarch64_reg_hsh, nbuf);
1452 free ((char *) reg->name);
1453 free (reg);
1454 }
1455
1456 free (nbuf);
1457 }
1458 }
1459
1460 *input_line_pointer = saved_char;
1461 demand_empty_rest_of_line ();
1462 }
1463
1464 /* Directives: Instruction set selection. */
1465
1466 #ifdef OBJ_ELF
1467 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1468 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1469 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1470 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1471
1472 /* Create a new mapping symbol for the transition to STATE. */
1473
1474 static void
1475 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1476 {
1477 symbolS *symbolP;
1478 const char *symname;
1479 int type;
1480
1481 switch (state)
1482 {
1483 case MAP_DATA:
1484 symname = "$d";
1485 type = BSF_NO_FLAGS;
1486 break;
1487 case MAP_INSN:
1488 symname = "$x";
1489 type = BSF_NO_FLAGS;
1490 break;
1491 default:
1492 abort ();
1493 }
1494
1495 symbolP = symbol_new (symname, now_seg, frag, value);
1496 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1497
1498 /* Save the mapping symbols for future reference. Also check that
1499 we do not place two mapping symbols at the same offset within a
1500 frag. We'll handle overlap between frags in
1501 check_mapping_symbols.
1502
1503 If .fill or other data filling directive generates zero sized data,
1504 the mapping symbol for the following code will have the same value
1505 as the one generated for the data filling directive. In this case,
1506 we replace the old symbol with the new one at the same address. */
1507 if (value == 0)
1508 {
1509 if (frag->tc_frag_data.first_map != NULL)
1510 {
1511 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1512 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1513 &symbol_lastP);
1514 }
1515 frag->tc_frag_data.first_map = symbolP;
1516 }
1517 if (frag->tc_frag_data.last_map != NULL)
1518 {
1519 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1520 S_GET_VALUE (symbolP));
1521 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1522 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1523 &symbol_lastP);
1524 }
1525 frag->tc_frag_data.last_map = symbolP;
1526 }
1527
1528 /* We must sometimes convert a region marked as code to data during
1529 code alignment, if an odd number of bytes have to be padded. The
1530 code mapping symbol is pushed to an aligned address. */
1531
1532 static void
1533 insert_data_mapping_symbol (enum mstate state,
1534 valueT value, fragS * frag, offsetT bytes)
1535 {
1536 /* If there was already a mapping symbol, remove it. */
1537 if (frag->tc_frag_data.last_map != NULL
1538 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1539 frag->fr_address + value)
1540 {
1541 symbolS *symp = frag->tc_frag_data.last_map;
1542
1543 if (value == 0)
1544 {
1545 know (frag->tc_frag_data.first_map == symp);
1546 frag->tc_frag_data.first_map = NULL;
1547 }
1548 frag->tc_frag_data.last_map = NULL;
1549 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1550 }
1551
1552 make_mapping_symbol (MAP_DATA, value, frag);
1553 make_mapping_symbol (state, value + bytes, frag);
1554 }
1555
1556 static void mapping_state_2 (enum mstate state, int max_chars);
1557
1558 /* Set the mapping state to STATE. Only call this when about to
1559 emit some STATE bytes to the file. */
1560
1561 void
1562 mapping_state (enum mstate state)
1563 {
1564 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1565
1566 if (state == MAP_INSN)
1567 /* AArch64 instructions require 4-byte alignment. When emitting
1568 instructions into any section, record the appropriate section
1569 alignment. */
1570 record_alignment (now_seg, 2);
1571
1572 if (mapstate == state)
1573 /* The mapping symbol has already been emitted.
1574 There is nothing else to do. */
1575 return;
1576
1577 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1578 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1579 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1580 evaluated later in the next else. */
1581 return;
1582 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1583 {
1584 /* Only add the symbol if the offset is > 0:
1585 if we're at the first frag, check it's size > 0;
1586 if we're not at the first frag, then for sure
1587 the offset is > 0. */
1588 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1589 const int add_symbol = (frag_now != frag_first)
1590 || (frag_now_fix () > 0);
1591
1592 if (add_symbol)
1593 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1594 }
1595 #undef TRANSITION
1596
1597 mapping_state_2 (state, 0);
1598 }
1599
1600 /* Same as mapping_state, but MAX_CHARS bytes have already been
1601 allocated. Put the mapping symbol that far back. */
1602
1603 static void
1604 mapping_state_2 (enum mstate state, int max_chars)
1605 {
1606 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1607
1608 if (!SEG_NORMAL (now_seg))
1609 return;
1610
1611 if (mapstate == state)
1612 /* The mapping symbol has already been emitted.
1613 There is nothing else to do. */
1614 return;
1615
1616 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1617 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1618 }
1619 #else
1620 #define mapping_state(x) /* nothing */
1621 #define mapping_state_2(x, y) /* nothing */
1622 #endif
1623
1624 /* Directives: sectioning and alignment. */
1625
1626 static void
1627 s_bss (int ignore ATTRIBUTE_UNUSED)
1628 {
1629 /* We don't support putting frags in the BSS segment, we fake it by
1630 marking in_bss, then looking at s_skip for clues. */
1631 subseg_set (bss_section, 0);
1632 demand_empty_rest_of_line ();
1633 mapping_state (MAP_DATA);
1634 }
1635
1636 static void
1637 s_even (int ignore ATTRIBUTE_UNUSED)
1638 {
1639 /* Never make frag if expect extra pass. */
1640 if (!need_pass_2)
1641 frag_align (1, 0, 0);
1642
1643 record_alignment (now_seg, 1);
1644
1645 demand_empty_rest_of_line ();
1646 }
1647
1648 /* Directives: Literal pools. */
1649
1650 static literal_pool *
1651 find_literal_pool (int size)
1652 {
1653 literal_pool *pool;
1654
1655 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1656 {
1657 if (pool->section == now_seg
1658 && pool->sub_section == now_subseg && pool->size == size)
1659 break;
1660 }
1661
1662 return pool;
1663 }
1664
1665 static literal_pool *
1666 find_or_make_literal_pool (int size)
1667 {
1668 /* Next literal pool ID number. */
1669 static unsigned int latest_pool_num = 1;
1670 literal_pool *pool;
1671
1672 pool = find_literal_pool (size);
1673
1674 if (pool == NULL)
1675 {
1676 /* Create a new pool. */
1677 pool = XNEW (literal_pool);
1678 if (!pool)
1679 return NULL;
1680
1681 /* Currently we always put the literal pool in the current text
1682 section. If we were generating "small" model code where we
1683 knew that all code and initialised data was within 1MB then
1684 we could output literals to mergeable, read-only data
1685 sections. */
1686
1687 pool->next_free_entry = 0;
1688 pool->section = now_seg;
1689 pool->sub_section = now_subseg;
1690 pool->size = size;
1691 pool->next = list_of_pools;
1692 pool->symbol = NULL;
1693
1694 /* Add it to the list. */
1695 list_of_pools = pool;
1696 }
1697
1698 /* New pools, and emptied pools, will have a NULL symbol. */
1699 if (pool->symbol == NULL)
1700 {
1701 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1702 &zero_address_frag, 0);
1703 pool->id = latest_pool_num++;
1704 }
1705
1706 /* Done. */
1707 return pool;
1708 }
1709
1710 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1711 Return TRUE on success, otherwise return FALSE. */
1712 static bool
1713 add_to_lit_pool (expressionS *exp, int size)
1714 {
1715 literal_pool *pool;
1716 unsigned int entry;
1717
1718 pool = find_or_make_literal_pool (size);
1719
1720 /* Check if this literal value is already in the pool. */
1721 for (entry = 0; entry < pool->next_free_entry; entry++)
1722 {
1723 expressionS * litexp = & pool->literals[entry].exp;
1724
1725 if ((litexp->X_op == exp->X_op)
1726 && (exp->X_op == O_constant)
1727 && (litexp->X_add_number == exp->X_add_number)
1728 && (litexp->X_unsigned == exp->X_unsigned))
1729 break;
1730
1731 if ((litexp->X_op == exp->X_op)
1732 && (exp->X_op == O_symbol)
1733 && (litexp->X_add_number == exp->X_add_number)
1734 && (litexp->X_add_symbol == exp->X_add_symbol)
1735 && (litexp->X_op_symbol == exp->X_op_symbol))
1736 break;
1737 }
1738
1739 /* Do we need to create a new entry? */
1740 if (entry == pool->next_free_entry)
1741 {
1742 if (entry >= MAX_LITERAL_POOL_SIZE)
1743 {
1744 set_syntax_error (_("literal pool overflow"));
1745 return false;
1746 }
1747
1748 pool->literals[entry].exp = *exp;
1749 pool->next_free_entry += 1;
1750 if (exp->X_op == O_big)
1751 {
1752 /* PR 16688: Bignums are held in a single global array. We must
1753 copy and preserve that value now, before it is overwritten. */
1754 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1755 exp->X_add_number);
1756 memcpy (pool->literals[entry].bignum, generic_bignum,
1757 CHARS_PER_LITTLENUM * exp->X_add_number);
1758 }
1759 else
1760 pool->literals[entry].bignum = NULL;
1761 }
1762
1763 exp->X_op = O_symbol;
1764 exp->X_add_number = ((int) entry) * size;
1765 exp->X_add_symbol = pool->symbol;
1766
1767 return true;
1768 }
1769
1770 /* Can't use symbol_new here, so have to create a symbol and then at
1771 a later date assign it a value. That's what these functions do. */
1772
1773 static void
1774 symbol_locate (symbolS * symbolP,
1775 const char *name,/* It is copied, the caller can modify. */
1776 segT segment, /* Segment identifier (SEG_<something>). */
1777 valueT valu, /* Symbol value. */
1778 fragS * frag) /* Associated fragment. */
1779 {
1780 size_t name_length;
1781 char *preserved_copy_of_name;
1782
1783 name_length = strlen (name) + 1; /* +1 for \0. */
1784 obstack_grow (&notes, name, name_length);
1785 preserved_copy_of_name = obstack_finish (&notes);
1786
1787 #ifdef tc_canonicalize_symbol_name
1788 preserved_copy_of_name =
1789 tc_canonicalize_symbol_name (preserved_copy_of_name);
1790 #endif
1791
1792 S_SET_NAME (symbolP, preserved_copy_of_name);
1793
1794 S_SET_SEGMENT (symbolP, segment);
1795 S_SET_VALUE (symbolP, valu);
1796 symbol_clear_list_pointers (symbolP);
1797
1798 symbol_set_frag (symbolP, frag);
1799
1800 /* Link to end of symbol chain. */
1801 {
1802 extern int symbol_table_frozen;
1803
1804 if (symbol_table_frozen)
1805 abort ();
1806 }
1807
1808 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1809
1810 obj_symbol_new_hook (symbolP);
1811
1812 #ifdef tc_symbol_new_hook
1813 tc_symbol_new_hook (symbolP);
1814 #endif
1815
1816 #ifdef DEBUG_SYMS
1817 verify_symbol_chain (symbol_rootP, symbol_lastP);
1818 #endif /* DEBUG_SYMS */
1819 }
1820
1821
1822 static void
1823 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1824 {
1825 unsigned int entry;
1826 literal_pool *pool;
1827 char sym_name[20];
1828 int align;
1829
1830 for (align = 2; align <= 4; align++)
1831 {
1832 int size = 1 << align;
1833
1834 pool = find_literal_pool (size);
1835 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1836 continue;
1837
1838 /* Align pool as you have word accesses.
1839 Only make a frag if we have to. */
1840 if (!need_pass_2)
1841 frag_align (align, 0, 0);
1842
1843 mapping_state (MAP_DATA);
1844
1845 record_alignment (now_seg, align);
1846
1847 sprintf (sym_name, "$$lit_\002%x", pool->id);
1848
1849 symbol_locate (pool->symbol, sym_name, now_seg,
1850 (valueT) frag_now_fix (), frag_now);
1851 symbol_table_insert (pool->symbol);
1852
1853 for (entry = 0; entry < pool->next_free_entry; entry++)
1854 {
1855 expressionS * exp = & pool->literals[entry].exp;
1856
1857 if (exp->X_op == O_big)
1858 {
1859 /* PR 16688: Restore the global bignum value. */
1860 gas_assert (pool->literals[entry].bignum != NULL);
1861 memcpy (generic_bignum, pool->literals[entry].bignum,
1862 CHARS_PER_LITTLENUM * exp->X_add_number);
1863 }
1864
1865 /* First output the expression in the instruction to the pool. */
1866 emit_expr (exp, size); /* .word|.xword */
1867
1868 if (exp->X_op == O_big)
1869 {
1870 free (pool->literals[entry].bignum);
1871 pool->literals[entry].bignum = NULL;
1872 }
1873 }
1874
1875 /* Mark the pool as empty. */
1876 pool->next_free_entry = 0;
1877 pool->symbol = NULL;
1878 }
1879 }
1880
1881 #ifdef OBJ_ELF
1882 /* Forward declarations for functions below, in the MD interface
1883 section. */
1884 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1885 static struct reloc_table_entry * find_reloc_table_entry (char **);
1886
1887 /* Directives: Data. */
1888 /* N.B. the support for relocation suffix in this directive needs to be
1889 implemented properly. */
1890
1891 static void
1892 s_aarch64_elf_cons (int nbytes)
1893 {
1894 expressionS exp;
1895
1896 #ifdef md_flush_pending_output
1897 md_flush_pending_output ();
1898 #endif
1899
1900 if (is_it_end_of_statement ())
1901 {
1902 demand_empty_rest_of_line ();
1903 return;
1904 }
1905
1906 #ifdef md_cons_align
1907 md_cons_align (nbytes);
1908 #endif
1909
1910 mapping_state (MAP_DATA);
1911 do
1912 {
1913 struct reloc_table_entry *reloc;
1914
1915 expression (&exp);
1916
1917 if (exp.X_op != O_symbol)
1918 emit_expr (&exp, (unsigned int) nbytes);
1919 else
1920 {
1921 skip_past_char (&input_line_pointer, '#');
1922 if (skip_past_char (&input_line_pointer, ':'))
1923 {
1924 reloc = find_reloc_table_entry (&input_line_pointer);
1925 if (reloc == NULL)
1926 as_bad (_("unrecognized relocation suffix"));
1927 else
1928 as_bad (_("unimplemented relocation suffix"));
1929 ignore_rest_of_line ();
1930 return;
1931 }
1932 else
1933 emit_expr (&exp, (unsigned int) nbytes);
1934 }
1935 }
1936 while (*input_line_pointer++ == ',');
1937
1938 /* Put terminator back into stream. */
1939 input_line_pointer--;
1940 demand_empty_rest_of_line ();
1941 }
1942
1943 /* Mark symbol that it follows a variant PCS convention. */
1944
1945 static void
1946 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1947 {
1948 char *name;
1949 char c;
1950 symbolS *sym;
1951 asymbol *bfdsym;
1952 elf_symbol_type *elfsym;
1953
1954 c = get_symbol_name (&name);
1955 if (!*name)
1956 as_bad (_("Missing symbol name in directive"));
1957 sym = symbol_find_or_make (name);
1958 restore_line_pointer (c);
1959 demand_empty_rest_of_line ();
1960 bfdsym = symbol_get_bfdsym (sym);
1961 elfsym = elf_symbol_from (bfdsym);
1962 gas_assert (elfsym);
1963 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
1964 }
1965 #endif /* OBJ_ELF */
1966
1967 /* Output a 32-bit word, but mark as an instruction. */
1968
1969 static void
1970 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1971 {
1972 expressionS exp;
1973 unsigned n = 0;
1974
1975 #ifdef md_flush_pending_output
1976 md_flush_pending_output ();
1977 #endif
1978
1979 if (is_it_end_of_statement ())
1980 {
1981 demand_empty_rest_of_line ();
1982 return;
1983 }
1984
1985 /* Sections are assumed to start aligned. In executable section, there is no
1986 MAP_DATA symbol pending. So we only align the address during
1987 MAP_DATA --> MAP_INSN transition.
1988 For other sections, this is not guaranteed. */
1989 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1990 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1991 frag_align_code (2, 0);
1992
1993 #ifdef OBJ_ELF
1994 mapping_state (MAP_INSN);
1995 #endif
1996
1997 do
1998 {
1999 expression (&exp);
2000 if (exp.X_op != O_constant)
2001 {
2002 as_bad (_("constant expression required"));
2003 ignore_rest_of_line ();
2004 return;
2005 }
2006
2007 if (target_big_endian)
2008 {
2009 unsigned int val = exp.X_add_number;
2010 exp.X_add_number = SWAP_32 (val);
2011 }
2012 emit_expr (&exp, INSN_SIZE);
2013 ++n;
2014 }
2015 while (*input_line_pointer++ == ',');
2016
2017 dwarf2_emit_insn (n * INSN_SIZE);
2018
2019 /* Put terminator back into stream. */
2020 input_line_pointer--;
2021 demand_empty_rest_of_line ();
2022 }
2023
2024 static void
2025 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2026 {
2027 demand_empty_rest_of_line ();
2028 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2029 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2030 }
2031
2032 #ifdef OBJ_ELF
2033 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2034
2035 static void
2036 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2037 {
2038 expressionS exp;
2039
2040 expression (&exp);
2041 frag_grow (4);
2042 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2043 BFD_RELOC_AARCH64_TLSDESC_ADD);
2044
2045 demand_empty_rest_of_line ();
2046 }
2047
2048 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2049
2050 static void
2051 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2052 {
2053 expressionS exp;
2054
2055 /* Since we're just labelling the code, there's no need to define a
2056 mapping symbol. */
2057 expression (&exp);
2058 /* Make sure there is enough room in this frag for the following
2059 blr. This trick only works if the blr follows immediately after
2060 the .tlsdesc directive. */
2061 frag_grow (4);
2062 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2063 BFD_RELOC_AARCH64_TLSDESC_CALL);
2064
2065 demand_empty_rest_of_line ();
2066 }
2067
2068 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2069
2070 static void
2071 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2072 {
2073 expressionS exp;
2074
2075 expression (&exp);
2076 frag_grow (4);
2077 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2078 BFD_RELOC_AARCH64_TLSDESC_LDR);
2079
2080 demand_empty_rest_of_line ();
2081 }
2082 #endif /* OBJ_ELF */
2083
2084 static void s_aarch64_arch (int);
2085 static void s_aarch64_cpu (int);
2086 static void s_aarch64_arch_extension (int);
2087
2088 /* This table describes all the machine specific pseudo-ops the assembler
2089 has to support. The fields are:
2090 pseudo-op name without dot
2091 function to call to execute this pseudo-op
2092 Integer arg to pass to the function. */
2093
2094 const pseudo_typeS md_pseudo_table[] = {
2095 /* Never called because '.req' does not start a line. */
2096 {"req", s_req, 0},
2097 {"unreq", s_unreq, 0},
2098 {"bss", s_bss, 0},
2099 {"even", s_even, 0},
2100 {"ltorg", s_ltorg, 0},
2101 {"pool", s_ltorg, 0},
2102 {"cpu", s_aarch64_cpu, 0},
2103 {"arch", s_aarch64_arch, 0},
2104 {"arch_extension", s_aarch64_arch_extension, 0},
2105 {"inst", s_aarch64_inst, 0},
2106 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2107 #ifdef OBJ_ELF
2108 {"tlsdescadd", s_tlsdescadd, 0},
2109 {"tlsdesccall", s_tlsdesccall, 0},
2110 {"tlsdescldr", s_tlsdescldr, 0},
2111 {"word", s_aarch64_elf_cons, 4},
2112 {"long", s_aarch64_elf_cons, 4},
2113 {"xword", s_aarch64_elf_cons, 8},
2114 {"dword", s_aarch64_elf_cons, 8},
2115 {"variant_pcs", s_variant_pcs, 0},
2116 #endif
2117 {"float16", float_cons, 'h'},
2118 {"bfloat16", float_cons, 'b'},
2119 {0, 0, 0}
2120 };
2121 \f
2122
2123 /* Check whether STR points to a register name followed by a comma or the
2124 end of line; REG_TYPE indicates which register types are checked
2125 against. Return TRUE if STR is such a register name; otherwise return
2126 FALSE. The function does not intend to produce any diagnostics, but since
2127 the register parser aarch64_reg_parse, which is called by this function,
2128 does produce diagnostics, we call clear_error to clear any diagnostics
2129 that may be generated by aarch64_reg_parse.
2130 Also, the function returns FALSE directly if there is any user error
2131 present at the function entry. This prevents the existing diagnostics
2132 state from being spoiled.
2133 The function currently serves parse_constant_immediate and
2134 parse_big_immediate only. */
2135 static bool
2136 reg_name_p (char *str, aarch64_reg_type reg_type)
2137 {
2138 int reg;
2139
2140 /* Prevent the diagnostics state from being spoiled. */
2141 if (error_p ())
2142 return false;
2143
2144 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2145
2146 /* Clear the parsing error that may be set by the reg parser. */
2147 clear_error ();
2148
2149 if (reg == PARSE_FAIL)
2150 return false;
2151
2152 skip_whitespace (str);
2153 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2154 return true;
2155
2156 return false;
2157 }
2158
2159 /* Parser functions used exclusively in instruction operands. */
2160
2161 /* Parse an immediate expression which may not be constant.
2162
2163 To prevent the expression parser from pushing a register name
2164 into the symbol table as an undefined symbol, firstly a check is
2165 done to find out whether STR is a register of type REG_TYPE followed
2166 by a comma or the end of line. Return FALSE if STR is such a string. */
2167
2168 static bool
2169 parse_immediate_expression (char **str, expressionS *exp,
2170 aarch64_reg_type reg_type)
2171 {
2172 if (reg_name_p (*str, reg_type))
2173 {
2174 set_recoverable_error (_("immediate operand required"));
2175 return false;
2176 }
2177
2178 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2179
2180 if (exp->X_op == O_absent)
2181 {
2182 set_fatal_syntax_error (_("missing immediate expression"));
2183 return false;
2184 }
2185
2186 return true;
2187 }
2188
2189 /* Constant immediate-value read function for use in insn parsing.
2190 STR points to the beginning of the immediate (with the optional
2191 leading #); *VAL receives the value. REG_TYPE says which register
2192 names should be treated as registers rather than as symbolic immediates.
2193
2194 Return TRUE on success; otherwise return FALSE. */
2195
2196 static bool
2197 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2198 {
2199 expressionS exp;
2200
2201 if (! parse_immediate_expression (str, &exp, reg_type))
2202 return false;
2203
2204 if (exp.X_op != O_constant)
2205 {
2206 set_syntax_error (_("constant expression required"));
2207 return false;
2208 }
2209
2210 *val = exp.X_add_number;
2211 return true;
2212 }
2213
2214 static uint32_t
2215 encode_imm_float_bits (uint32_t imm)
2216 {
2217 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2218 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2219 }
2220
2221 /* Return TRUE if the single-precision floating-point value encoded in IMM
2222 can be expressed in the AArch64 8-bit signed floating-point format with
2223 3-bit exponent and normalized 4 bits of precision; in other words, the
2224 floating-point value must be expressable as
2225 (+/-) n / 16 * power (2, r)
2226 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2227
2228 static bool
2229 aarch64_imm_float_p (uint32_t imm)
2230 {
2231 /* If a single-precision floating-point value has the following bit
2232 pattern, it can be expressed in the AArch64 8-bit floating-point
2233 format:
2234
2235 3 32222222 2221111111111
2236 1 09876543 21098765432109876543210
2237 n Eeeeeexx xxxx0000000000000000000
2238
2239 where n, e and each x are either 0 or 1 independently, with
2240 E == ~ e. */
2241
2242 uint32_t pattern;
2243
2244 /* Prepare the pattern for 'Eeeeee'. */
2245 if (((imm >> 30) & 0x1) == 0)
2246 pattern = 0x3e000000;
2247 else
2248 pattern = 0x40000000;
2249
2250 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2251 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2252 }
2253
2254 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2255 as an IEEE float without any loss of precision. Store the value in
2256 *FPWORD if so. */
2257
2258 static bool
2259 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2260 {
2261 /* If a double-precision floating-point value has the following bit
2262 pattern, it can be expressed in a float:
2263
2264 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2265 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2266 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2267
2268 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2269 if Eeee_eeee != 1111_1111
2270
2271 where n, e, s and S are either 0 or 1 independently and where ~ is the
2272 inverse of E. */
2273
2274 uint32_t pattern;
2275 uint32_t high32 = imm >> 32;
2276 uint32_t low32 = imm;
2277
2278 /* Lower 29 bits need to be 0s. */
2279 if ((imm & 0x1fffffff) != 0)
2280 return false;
2281
2282 /* Prepare the pattern for 'Eeeeeeeee'. */
2283 if (((high32 >> 30) & 0x1) == 0)
2284 pattern = 0x38000000;
2285 else
2286 pattern = 0x40000000;
2287
2288 /* Check E~~~. */
2289 if ((high32 & 0x78000000) != pattern)
2290 return false;
2291
2292 /* Check Eeee_eeee != 1111_1111. */
2293 if ((high32 & 0x7ff00000) == 0x47f00000)
2294 return false;
2295
2296 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2297 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2298 | (low32 >> 29)); /* 3 S bits. */
2299 return true;
2300 }
2301
2302 /* Return true if we should treat OPERAND as a double-precision
2303 floating-point operand rather than a single-precision one. */
2304 static bool
2305 double_precision_operand_p (const aarch64_opnd_info *operand)
2306 {
2307 /* Check for unsuffixed SVE registers, which are allowed
2308 for LDR and STR but not in instructions that require an
2309 immediate. We get better error messages if we arbitrarily
2310 pick one size, parse the immediate normally, and then
2311 report the match failure in the normal way. */
2312 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2313 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2314 }
2315
2316 /* Parse a floating-point immediate. Return TRUE on success and return the
2317 value in *IMMED in the format of IEEE754 single-precision encoding.
2318 *CCP points to the start of the string; DP_P is TRUE when the immediate
2319 is expected to be in double-precision (N.B. this only matters when
2320 hexadecimal representation is involved). REG_TYPE says which register
2321 names should be treated as registers rather than as symbolic immediates.
2322
2323 This routine accepts any IEEE float; it is up to the callers to reject
2324 invalid ones. */
2325
2326 static bool
2327 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2328 aarch64_reg_type reg_type)
2329 {
2330 char *str = *ccp;
2331 char *fpnum;
2332 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2333 int64_t val = 0;
2334 unsigned fpword = 0;
2335 bool hex_p = false;
2336
2337 skip_past_char (&str, '#');
2338
2339 fpnum = str;
2340 skip_whitespace (fpnum);
2341
2342 if (startswith (fpnum, "0x"))
2343 {
2344 /* Support the hexadecimal representation of the IEEE754 encoding.
2345 Double-precision is expected when DP_P is TRUE, otherwise the
2346 representation should be in single-precision. */
2347 if (! parse_constant_immediate (&str, &val, reg_type))
2348 goto invalid_fp;
2349
2350 if (dp_p)
2351 {
2352 if (!can_convert_double_to_float (val, &fpword))
2353 goto invalid_fp;
2354 }
2355 else if ((uint64_t) val > 0xffffffff)
2356 goto invalid_fp;
2357 else
2358 fpword = val;
2359
2360 hex_p = true;
2361 }
2362 else if (reg_name_p (str, reg_type))
2363 {
2364 set_recoverable_error (_("immediate operand required"));
2365 return false;
2366 }
2367
2368 if (! hex_p)
2369 {
2370 int i;
2371
2372 if ((str = atof_ieee (str, 's', words)) == NULL)
2373 goto invalid_fp;
2374
2375 /* Our FP word must be 32 bits (single-precision FP). */
2376 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2377 {
2378 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2379 fpword |= words[i];
2380 }
2381 }
2382
2383 *immed = fpword;
2384 *ccp = str;
2385 return true;
2386
2387 invalid_fp:
2388 set_fatal_syntax_error (_("invalid floating-point constant"));
2389 return false;
2390 }
2391
2392 /* Less-generic immediate-value read function with the possibility of loading
2393 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2394 instructions.
2395
2396 To prevent the expression parser from pushing a register name into the
2397 symbol table as an undefined symbol, a check is firstly done to find
2398 out whether STR is a register of type REG_TYPE followed by a comma or
2399 the end of line. Return FALSE if STR is such a register. */
2400
2401 static bool
2402 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2403 {
2404 char *ptr = *str;
2405
2406 if (reg_name_p (ptr, reg_type))
2407 {
2408 set_syntax_error (_("immediate operand required"));
2409 return false;
2410 }
2411
2412 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2413
2414 if (inst.reloc.exp.X_op == O_constant)
2415 *imm = inst.reloc.exp.X_add_number;
2416
2417 *str = ptr;
2418
2419 return true;
2420 }
2421
2422 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2423 if NEED_LIBOPCODES is non-zero, the fixup will need
2424 assistance from the libopcodes. */
2425
2426 static inline void
2427 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2428 const aarch64_opnd_info *operand,
2429 int need_libopcodes_p)
2430 {
2431 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2432 reloc->opnd = operand->type;
2433 if (need_libopcodes_p)
2434 reloc->need_libopcodes_p = 1;
2435 };
2436
2437 /* Return TRUE if the instruction needs to be fixed up later internally by
2438 the GAS; otherwise return FALSE. */
2439
2440 static inline bool
2441 aarch64_gas_internal_fixup_p (void)
2442 {
2443 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2444 }
2445
2446 /* Assign the immediate value to the relevant field in *OPERAND if
2447 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2448 needs an internal fixup in a later stage.
2449 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2450 IMM.VALUE that may get assigned with the constant. */
2451 static inline void
2452 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2453 aarch64_opnd_info *operand,
2454 int addr_off_p,
2455 int need_libopcodes_p,
2456 int skip_p)
2457 {
2458 if (reloc->exp.X_op == O_constant)
2459 {
2460 if (addr_off_p)
2461 operand->addr.offset.imm = reloc->exp.X_add_number;
2462 else
2463 operand->imm.value = reloc->exp.X_add_number;
2464 reloc->type = BFD_RELOC_UNUSED;
2465 }
2466 else
2467 {
2468 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2469 /* Tell libopcodes to ignore this operand or not. This is helpful
2470 when one of the operands needs to be fixed up later but we need
2471 libopcodes to check the other operands. */
2472 operand->skip = skip_p;
2473 }
2474 }
2475
2476 /* Relocation modifiers. Each entry in the table contains the textual
2477 name for the relocation which may be placed before a symbol used as
2478 a load/store offset, or add immediate. It must be surrounded by a
2479 leading and trailing colon, for example:
2480
2481 ldr x0, [x1, #:rello:varsym]
2482 add x0, x1, #:rello:varsym */
2483
2484 struct reloc_table_entry
2485 {
2486 const char *name;
2487 int pc_rel;
2488 bfd_reloc_code_real_type adr_type;
2489 bfd_reloc_code_real_type adrp_type;
2490 bfd_reloc_code_real_type movw_type;
2491 bfd_reloc_code_real_type add_type;
2492 bfd_reloc_code_real_type ldst_type;
2493 bfd_reloc_code_real_type ld_literal_type;
2494 };
2495
2496 static struct reloc_table_entry reloc_table[] =
2497 {
2498 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2499 {"lo12", 0,
2500 0, /* adr_type */
2501 0,
2502 0,
2503 BFD_RELOC_AARCH64_ADD_LO12,
2504 BFD_RELOC_AARCH64_LDST_LO12,
2505 0},
2506
2507 /* Higher 21 bits of pc-relative page offset: ADRP */
2508 {"pg_hi21", 1,
2509 0, /* adr_type */
2510 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2511 0,
2512 0,
2513 0,
2514 0},
2515
2516 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2517 {"pg_hi21_nc", 1,
2518 0, /* adr_type */
2519 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2520 0,
2521 0,
2522 0,
2523 0},
2524
2525 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2526 {"abs_g0", 0,
2527 0, /* adr_type */
2528 0,
2529 BFD_RELOC_AARCH64_MOVW_G0,
2530 0,
2531 0,
2532 0},
2533
2534 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2535 {"abs_g0_s", 0,
2536 0, /* adr_type */
2537 0,
2538 BFD_RELOC_AARCH64_MOVW_G0_S,
2539 0,
2540 0,
2541 0},
2542
2543 /* Less significant bits 0-15 of address/value: MOVK, no check */
2544 {"abs_g0_nc", 0,
2545 0, /* adr_type */
2546 0,
2547 BFD_RELOC_AARCH64_MOVW_G0_NC,
2548 0,
2549 0,
2550 0},
2551
2552 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2553 {"abs_g1", 0,
2554 0, /* adr_type */
2555 0,
2556 BFD_RELOC_AARCH64_MOVW_G1,
2557 0,
2558 0,
2559 0},
2560
2561 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2562 {"abs_g1_s", 0,
2563 0, /* adr_type */
2564 0,
2565 BFD_RELOC_AARCH64_MOVW_G1_S,
2566 0,
2567 0,
2568 0},
2569
2570 /* Less significant bits 16-31 of address/value: MOVK, no check */
2571 {"abs_g1_nc", 0,
2572 0, /* adr_type */
2573 0,
2574 BFD_RELOC_AARCH64_MOVW_G1_NC,
2575 0,
2576 0,
2577 0},
2578
2579 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2580 {"abs_g2", 0,
2581 0, /* adr_type */
2582 0,
2583 BFD_RELOC_AARCH64_MOVW_G2,
2584 0,
2585 0,
2586 0},
2587
2588 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2589 {"abs_g2_s", 0,
2590 0, /* adr_type */
2591 0,
2592 BFD_RELOC_AARCH64_MOVW_G2_S,
2593 0,
2594 0,
2595 0},
2596
2597 /* Less significant bits 32-47 of address/value: MOVK, no check */
2598 {"abs_g2_nc", 0,
2599 0, /* adr_type */
2600 0,
2601 BFD_RELOC_AARCH64_MOVW_G2_NC,
2602 0,
2603 0,
2604 0},
2605
2606 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2607 {"abs_g3", 0,
2608 0, /* adr_type */
2609 0,
2610 BFD_RELOC_AARCH64_MOVW_G3,
2611 0,
2612 0,
2613 0},
2614
2615 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2616 {"prel_g0", 1,
2617 0, /* adr_type */
2618 0,
2619 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2620 0,
2621 0,
2622 0},
2623
2624 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2625 {"prel_g0_nc", 1,
2626 0, /* adr_type */
2627 0,
2628 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2629 0,
2630 0,
2631 0},
2632
2633 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2634 {"prel_g1", 1,
2635 0, /* adr_type */
2636 0,
2637 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2638 0,
2639 0,
2640 0},
2641
2642 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2643 {"prel_g1_nc", 1,
2644 0, /* adr_type */
2645 0,
2646 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2647 0,
2648 0,
2649 0},
2650
2651 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2652 {"prel_g2", 1,
2653 0, /* adr_type */
2654 0,
2655 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2656 0,
2657 0,
2658 0},
2659
2660 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2661 {"prel_g2_nc", 1,
2662 0, /* adr_type */
2663 0,
2664 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2665 0,
2666 0,
2667 0},
2668
2669 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2670 {"prel_g3", 1,
2671 0, /* adr_type */
2672 0,
2673 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2674 0,
2675 0,
2676 0},
2677
2678 /* Get to the page containing GOT entry for a symbol. */
2679 {"got", 1,
2680 0, /* adr_type */
2681 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2682 0,
2683 0,
2684 0,
2685 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2686
2687 /* 12 bit offset into the page containing GOT entry for that symbol. */
2688 {"got_lo12", 0,
2689 0, /* adr_type */
2690 0,
2691 0,
2692 0,
2693 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2694 0},
2695
2696 /* 0-15 bits of address/value: MOVk, no check. */
2697 {"gotoff_g0_nc", 0,
2698 0, /* adr_type */
2699 0,
2700 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2701 0,
2702 0,
2703 0},
2704
2705 /* Most significant bits 16-31 of address/value: MOVZ. */
2706 {"gotoff_g1", 0,
2707 0, /* adr_type */
2708 0,
2709 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2710 0,
2711 0,
2712 0},
2713
2714 /* 15 bit offset into the page containing GOT entry for that symbol. */
2715 {"gotoff_lo15", 0,
2716 0, /* adr_type */
2717 0,
2718 0,
2719 0,
2720 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2721 0},
2722
2723 /* Get to the page containing GOT TLS entry for a symbol */
2724 {"gottprel_g0_nc", 0,
2725 0, /* adr_type */
2726 0,
2727 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2728 0,
2729 0,
2730 0},
2731
2732 /* Get to the page containing GOT TLS entry for a symbol */
2733 {"gottprel_g1", 0,
2734 0, /* adr_type */
2735 0,
2736 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2737 0,
2738 0,
2739 0},
2740
2741 /* Get to the page containing GOT TLS entry for a symbol */
2742 {"tlsgd", 0,
2743 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2744 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2745 0,
2746 0,
2747 0,
2748 0},
2749
2750 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2751 {"tlsgd_lo12", 0,
2752 0, /* adr_type */
2753 0,
2754 0,
2755 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2756 0,
2757 0},
2758
2759 /* Lower 16 bits address/value: MOVk. */
2760 {"tlsgd_g0_nc", 0,
2761 0, /* adr_type */
2762 0,
2763 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2764 0,
2765 0,
2766 0},
2767
2768 /* Most significant bits 16-31 of address/value: MOVZ. */
2769 {"tlsgd_g1", 0,
2770 0, /* adr_type */
2771 0,
2772 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2773 0,
2774 0,
2775 0},
2776
2777 /* Get to the page containing GOT TLS entry for a symbol */
2778 {"tlsdesc", 0,
2779 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2780 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2781 0,
2782 0,
2783 0,
2784 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2785
2786 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2787 {"tlsdesc_lo12", 0,
2788 0, /* adr_type */
2789 0,
2790 0,
2791 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2792 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2793 0},
2794
2795 /* Get to the page containing GOT TLS entry for a symbol.
2796 The same as GD, we allocate two consecutive GOT slots
2797 for module index and module offset, the only difference
2798 with GD is the module offset should be initialized to
2799 zero without any outstanding runtime relocation. */
2800 {"tlsldm", 0,
2801 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2802 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2803 0,
2804 0,
2805 0,
2806 0},
2807
2808 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2809 {"tlsldm_lo12_nc", 0,
2810 0, /* adr_type */
2811 0,
2812 0,
2813 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2814 0,
2815 0},
2816
2817 /* 12 bit offset into the module TLS base address. */
2818 {"dtprel_lo12", 0,
2819 0, /* adr_type */
2820 0,
2821 0,
2822 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2823 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2824 0},
2825
2826 /* Same as dtprel_lo12, no overflow check. */
2827 {"dtprel_lo12_nc", 0,
2828 0, /* adr_type */
2829 0,
2830 0,
2831 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2832 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2833 0},
2834
2835 /* bits[23:12] of offset to the module TLS base address. */
2836 {"dtprel_hi12", 0,
2837 0, /* adr_type */
2838 0,
2839 0,
2840 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2841 0,
2842 0},
2843
2844 /* bits[15:0] of offset to the module TLS base address. */
2845 {"dtprel_g0", 0,
2846 0, /* adr_type */
2847 0,
2848 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2849 0,
2850 0,
2851 0},
2852
2853 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2854 {"dtprel_g0_nc", 0,
2855 0, /* adr_type */
2856 0,
2857 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2858 0,
2859 0,
2860 0},
2861
2862 /* bits[31:16] of offset to the module TLS base address. */
2863 {"dtprel_g1", 0,
2864 0, /* adr_type */
2865 0,
2866 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2867 0,
2868 0,
2869 0},
2870
2871 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2872 {"dtprel_g1_nc", 0,
2873 0, /* adr_type */
2874 0,
2875 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2876 0,
2877 0,
2878 0},
2879
2880 /* bits[47:32] of offset to the module TLS base address. */
2881 {"dtprel_g2", 0,
2882 0, /* adr_type */
2883 0,
2884 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2885 0,
2886 0,
2887 0},
2888
2889 /* Lower 16 bit offset into GOT entry for a symbol */
2890 {"tlsdesc_off_g0_nc", 0,
2891 0, /* adr_type */
2892 0,
2893 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2894 0,
2895 0,
2896 0},
2897
2898 /* Higher 16 bit offset into GOT entry for a symbol */
2899 {"tlsdesc_off_g1", 0,
2900 0, /* adr_type */
2901 0,
2902 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2903 0,
2904 0,
2905 0},
2906
2907 /* Get to the page containing GOT TLS entry for a symbol */
2908 {"gottprel", 0,
2909 0, /* adr_type */
2910 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2911 0,
2912 0,
2913 0,
2914 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2915
2916 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2917 {"gottprel_lo12", 0,
2918 0, /* adr_type */
2919 0,
2920 0,
2921 0,
2922 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2923 0},
2924
2925 /* Get tp offset for a symbol. */
2926 {"tprel", 0,
2927 0, /* adr_type */
2928 0,
2929 0,
2930 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2931 0,
2932 0},
2933
2934 /* Get tp offset for a symbol. */
2935 {"tprel_lo12", 0,
2936 0, /* adr_type */
2937 0,
2938 0,
2939 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2940 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2941 0},
2942
2943 /* Get tp offset for a symbol. */
2944 {"tprel_hi12", 0,
2945 0, /* adr_type */
2946 0,
2947 0,
2948 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2949 0,
2950 0},
2951
2952 /* Get tp offset for a symbol. */
2953 {"tprel_lo12_nc", 0,
2954 0, /* adr_type */
2955 0,
2956 0,
2957 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2958 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2959 0},
2960
2961 /* Most significant bits 32-47 of address/value: MOVZ. */
2962 {"tprel_g2", 0,
2963 0, /* adr_type */
2964 0,
2965 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2966 0,
2967 0,
2968 0},
2969
2970 /* Most significant bits 16-31 of address/value: MOVZ. */
2971 {"tprel_g1", 0,
2972 0, /* adr_type */
2973 0,
2974 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2975 0,
2976 0,
2977 0},
2978
2979 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2980 {"tprel_g1_nc", 0,
2981 0, /* adr_type */
2982 0,
2983 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2984 0,
2985 0,
2986 0},
2987
2988 /* Most significant bits 0-15 of address/value: MOVZ. */
2989 {"tprel_g0", 0,
2990 0, /* adr_type */
2991 0,
2992 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2993 0,
2994 0,
2995 0},
2996
2997 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2998 {"tprel_g0_nc", 0,
2999 0, /* adr_type */
3000 0,
3001 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3002 0,
3003 0,
3004 0},
3005
3006 /* 15bit offset from got entry to base address of GOT table. */
3007 {"gotpage_lo15", 0,
3008 0,
3009 0,
3010 0,
3011 0,
3012 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3013 0},
3014
3015 /* 14bit offset from got entry to base address of GOT table. */
3016 {"gotpage_lo14", 0,
3017 0,
3018 0,
3019 0,
3020 0,
3021 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3022 0},
3023 };
3024
3025 /* Given the address of a pointer pointing to the textual name of a
3026 relocation as may appear in assembler source, attempt to find its
3027 details in reloc_table. The pointer will be updated to the character
3028 after the trailing colon. On failure, NULL will be returned;
3029 otherwise return the reloc_table_entry. */
3030
3031 static struct reloc_table_entry *
3032 find_reloc_table_entry (char **str)
3033 {
3034 unsigned int i;
3035 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3036 {
3037 int length = strlen (reloc_table[i].name);
3038
3039 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3040 && (*str)[length] == ':')
3041 {
3042 *str += (length + 1);
3043 return &reloc_table[i];
3044 }
3045 }
3046
3047 return NULL;
3048 }
3049
3050 /* Returns 0 if the relocation should never be forced,
3051 1 if the relocation must be forced, and -1 if either
3052 result is OK. */
3053
3054 static signed int
3055 aarch64_force_reloc (unsigned int type)
3056 {
3057 switch (type)
3058 {
3059 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3060 /* Perform these "immediate" internal relocations
3061 even if the symbol is extern or weak. */
3062 return 0;
3063
3064 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3065 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3066 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3067 /* Pseudo relocs that need to be fixed up according to
3068 ilp32_p. */
3069 return 1;
3070
3071 case BFD_RELOC_AARCH64_ADD_LO12:
3072 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3073 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3074 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3075 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3076 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3077 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3078 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3079 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3080 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3081 case BFD_RELOC_AARCH64_LDST128_LO12:
3082 case BFD_RELOC_AARCH64_LDST16_LO12:
3083 case BFD_RELOC_AARCH64_LDST32_LO12:
3084 case BFD_RELOC_AARCH64_LDST64_LO12:
3085 case BFD_RELOC_AARCH64_LDST8_LO12:
3086 case BFD_RELOC_AARCH64_LDST_LO12:
3087 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3088 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3089 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3090 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3091 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3092 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3093 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3094 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3095 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3096 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3097 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3098 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3099 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3100 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3101 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3102 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3103 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3104 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3105 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3106 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3107 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3108 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3109 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3110 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3111 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3112 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3113 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3114 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3115 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3116 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3117 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3118 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3119 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3120 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3121 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3122 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3123 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3124 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3125 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3126 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3127 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3128 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3129 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3130 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3131 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3132 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3133 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3134 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3135 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3136 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3137 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3138 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3139 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3140 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3141 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3142 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3143 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3144 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3145 /* Always leave these relocations for the linker. */
3146 return 1;
3147
3148 default:
3149 return -1;
3150 }
3151 }
3152
3153 int
3154 aarch64_force_relocation (struct fix *fixp)
3155 {
3156 int res = aarch64_force_reloc (fixp->fx_r_type);
3157
3158 if (res == -1)
3159 return generic_force_reloc (fixp);
3160 return res;
3161 }
3162
3163 /* Mode argument to parse_shift and parser_shifter_operand. */
3164 enum parse_shift_mode
3165 {
3166 SHIFTED_NONE, /* no shifter allowed */
3167 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3168 "#imm{,lsl #n}" */
3169 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3170 "#imm" */
3171 SHIFTED_LSL, /* bare "lsl #n" */
3172 SHIFTED_MUL, /* bare "mul #n" */
3173 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3174 SHIFTED_MUL_VL, /* "mul vl" */
3175 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3176 };
3177
3178 /* Parse a <shift> operator on an AArch64 data processing instruction.
3179 Return TRUE on success; otherwise return FALSE. */
3180 static bool
3181 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3182 {
3183 const struct aarch64_name_value_pair *shift_op;
3184 enum aarch64_modifier_kind kind;
3185 expressionS exp;
3186 int exp_has_prefix;
3187 char *s = *str;
3188 char *p = s;
3189
3190 for (p = *str; ISALPHA (*p); p++)
3191 ;
3192
3193 if (p == *str)
3194 {
3195 set_syntax_error (_("shift expression expected"));
3196 return false;
3197 }
3198
3199 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3200
3201 if (shift_op == NULL)
3202 {
3203 set_syntax_error (_("shift operator expected"));
3204 return false;
3205 }
3206
3207 kind = aarch64_get_operand_modifier (shift_op);
3208
3209 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3210 {
3211 set_syntax_error (_("invalid use of 'MSL'"));
3212 return false;
3213 }
3214
3215 if (kind == AARCH64_MOD_MUL
3216 && mode != SHIFTED_MUL
3217 && mode != SHIFTED_MUL_VL)
3218 {
3219 set_syntax_error (_("invalid use of 'MUL'"));
3220 return false;
3221 }
3222
3223 switch (mode)
3224 {
3225 case SHIFTED_LOGIC_IMM:
3226 if (aarch64_extend_operator_p (kind))
3227 {
3228 set_syntax_error (_("extending shift is not permitted"));
3229 return false;
3230 }
3231 break;
3232
3233 case SHIFTED_ARITH_IMM:
3234 if (kind == AARCH64_MOD_ROR)
3235 {
3236 set_syntax_error (_("'ROR' shift is not permitted"));
3237 return false;
3238 }
3239 break;
3240
3241 case SHIFTED_LSL:
3242 if (kind != AARCH64_MOD_LSL)
3243 {
3244 set_syntax_error (_("only 'LSL' shift is permitted"));
3245 return false;
3246 }
3247 break;
3248
3249 case SHIFTED_MUL:
3250 if (kind != AARCH64_MOD_MUL)
3251 {
3252 set_syntax_error (_("only 'MUL' is permitted"));
3253 return false;
3254 }
3255 break;
3256
3257 case SHIFTED_MUL_VL:
3258 /* "MUL VL" consists of two separate tokens. Require the first
3259 token to be "MUL" and look for a following "VL". */
3260 if (kind == AARCH64_MOD_MUL)
3261 {
3262 skip_whitespace (p);
3263 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3264 {
3265 p += 2;
3266 kind = AARCH64_MOD_MUL_VL;
3267 break;
3268 }
3269 }
3270 set_syntax_error (_("only 'MUL VL' is permitted"));
3271 return false;
3272
3273 case SHIFTED_REG_OFFSET:
3274 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3275 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3276 {
3277 set_fatal_syntax_error
3278 (_("invalid shift for the register offset addressing mode"));
3279 return false;
3280 }
3281 break;
3282
3283 case SHIFTED_LSL_MSL:
3284 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3285 {
3286 set_syntax_error (_("invalid shift operator"));
3287 return false;
3288 }
3289 break;
3290
3291 default:
3292 abort ();
3293 }
3294
3295 /* Whitespace can appear here if the next thing is a bare digit. */
3296 skip_whitespace (p);
3297
3298 /* Parse shift amount. */
3299 exp_has_prefix = 0;
3300 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3301 exp.X_op = O_absent;
3302 else
3303 {
3304 if (is_immediate_prefix (*p))
3305 {
3306 p++;
3307 exp_has_prefix = 1;
3308 }
3309 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3310 }
3311 if (kind == AARCH64_MOD_MUL_VL)
3312 /* For consistency, give MUL VL the same shift amount as an implicit
3313 MUL #1. */
3314 operand->shifter.amount = 1;
3315 else if (exp.X_op == O_absent)
3316 {
3317 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3318 {
3319 set_syntax_error (_("missing shift amount"));
3320 return false;
3321 }
3322 operand->shifter.amount = 0;
3323 }
3324 else if (exp.X_op != O_constant)
3325 {
3326 set_syntax_error (_("constant shift amount required"));
3327 return false;
3328 }
3329 /* For parsing purposes, MUL #n has no inherent range. The range
3330 depends on the operand and will be checked by operand-specific
3331 routines. */
3332 else if (kind != AARCH64_MOD_MUL
3333 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3334 {
3335 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3336 return false;
3337 }
3338 else
3339 {
3340 operand->shifter.amount = exp.X_add_number;
3341 operand->shifter.amount_present = 1;
3342 }
3343
3344 operand->shifter.operator_present = 1;
3345 operand->shifter.kind = kind;
3346
3347 *str = p;
3348 return true;
3349 }
3350
3351 /* Parse a <shifter_operand> for a data processing instruction:
3352
3353 #<immediate>
3354 #<immediate>, LSL #imm
3355
3356 Validation of immediate operands is deferred to md_apply_fix.
3357
3358 Return TRUE on success; otherwise return FALSE. */
3359
3360 static bool
3361 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3362 enum parse_shift_mode mode)
3363 {
3364 char *p;
3365
3366 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3367 return false;
3368
3369 p = *str;
3370
3371 /* Accept an immediate expression. */
3372 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3373 REJECT_ABSENT))
3374 return false;
3375
3376 /* Accept optional LSL for arithmetic immediate values. */
3377 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3378 if (! parse_shift (&p, operand, SHIFTED_LSL))
3379 return false;
3380
3381 /* Not accept any shifter for logical immediate values. */
3382 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3383 && parse_shift (&p, operand, mode))
3384 {
3385 set_syntax_error (_("unexpected shift operator"));
3386 return false;
3387 }
3388
3389 *str = p;
3390 return true;
3391 }
3392
3393 /* Parse a <shifter_operand> for a data processing instruction:
3394
3395 <Rm>
3396 <Rm>, <shift>
3397 #<immediate>
3398 #<immediate>, LSL #imm
3399
3400 where <shift> is handled by parse_shift above, and the last two
3401 cases are handled by the function above.
3402
3403 Validation of immediate operands is deferred to md_apply_fix.
3404
3405 Return TRUE on success; otherwise return FALSE. */
3406
3407 static bool
3408 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3409 enum parse_shift_mode mode)
3410 {
3411 const reg_entry *reg;
3412 aarch64_opnd_qualifier_t qualifier;
3413 enum aarch64_operand_class opd_class
3414 = aarch64_get_operand_class (operand->type);
3415
3416 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3417 if (reg)
3418 {
3419 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3420 {
3421 set_syntax_error (_("unexpected register in the immediate operand"));
3422 return false;
3423 }
3424
3425 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3426 {
3427 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3428 return false;
3429 }
3430
3431 operand->reg.regno = reg->number;
3432 operand->qualifier = qualifier;
3433
3434 /* Accept optional shift operation on register. */
3435 if (! skip_past_comma (str))
3436 return true;
3437
3438 if (! parse_shift (str, operand, mode))
3439 return false;
3440
3441 return true;
3442 }
3443 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3444 {
3445 set_syntax_error
3446 (_("integer register expected in the extended/shifted operand "
3447 "register"));
3448 return false;
3449 }
3450
3451 /* We have a shifted immediate variable. */
3452 return parse_shifter_operand_imm (str, operand, mode);
3453 }
3454
3455 /* Return TRUE on success; return FALSE otherwise. */
3456
3457 static bool
3458 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3459 enum parse_shift_mode mode)
3460 {
3461 char *p = *str;
3462
3463 /* Determine if we have the sequence of characters #: or just :
3464 coming next. If we do, then we check for a :rello: relocation
3465 modifier. If we don't, punt the whole lot to
3466 parse_shifter_operand. */
3467
3468 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3469 {
3470 struct reloc_table_entry *entry;
3471
3472 if (p[0] == '#')
3473 p += 2;
3474 else
3475 p++;
3476 *str = p;
3477
3478 /* Try to parse a relocation. Anything else is an error. */
3479 if (!(entry = find_reloc_table_entry (str)))
3480 {
3481 set_syntax_error (_("unknown relocation modifier"));
3482 return false;
3483 }
3484
3485 if (entry->add_type == 0)
3486 {
3487 set_syntax_error
3488 (_("this relocation modifier is not allowed on this instruction"));
3489 return false;
3490 }
3491
3492 /* Save str before we decompose it. */
3493 p = *str;
3494
3495 /* Next, we parse the expression. */
3496 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3497 REJECT_ABSENT))
3498 return false;
3499
3500 /* Record the relocation type (use the ADD variant here). */
3501 inst.reloc.type = entry->add_type;
3502 inst.reloc.pc_rel = entry->pc_rel;
3503
3504 /* If str is empty, we've reached the end, stop here. */
3505 if (**str == '\0')
3506 return true;
3507
3508 /* Otherwise, we have a shifted reloc modifier, so rewind to
3509 recover the variable name and continue parsing for the shifter. */
3510 *str = p;
3511 return parse_shifter_operand_imm (str, operand, mode);
3512 }
3513
3514 return parse_shifter_operand (str, operand, mode);
3515 }
3516
3517 /* Parse all forms of an address expression. Information is written
3518 to *OPERAND and/or inst.reloc.
3519
3520 The A64 instruction set has the following addressing modes:
3521
3522 Offset
3523 [base] // in SIMD ld/st structure
3524 [base{,#0}] // in ld/st exclusive
3525 [base{,#imm}]
3526 [base,Xm{,LSL #imm}]
3527 [base,Xm,SXTX {#imm}]
3528 [base,Wm,(S|U)XTW {#imm}]
3529 Pre-indexed
3530 [base]! // in ldraa/ldrab exclusive
3531 [base,#imm]!
3532 Post-indexed
3533 [base],#imm
3534 [base],Xm // in SIMD ld/st structure
3535 PC-relative (literal)
3536 label
3537 SVE:
3538 [base,#imm,MUL VL]
3539 [base,Zm.D{,LSL #imm}]
3540 [base,Zm.S,(S|U)XTW {#imm}]
3541 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3542 [Zn.S,#imm]
3543 [Zn.D,#imm]
3544 [Zn.S{, Xm}]
3545 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3546 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3547 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3548
3549 (As a convenience, the notation "=immediate" is permitted in conjunction
3550 with the pc-relative literal load instructions to automatically place an
3551 immediate value or symbolic address in a nearby literal pool and generate
3552 a hidden label which references it.)
3553
3554 Upon a successful parsing, the address structure in *OPERAND will be
3555 filled in the following way:
3556
3557 .base_regno = <base>
3558 .offset.is_reg // 1 if the offset is a register
3559 .offset.imm = <imm>
3560 .offset.regno = <Rm>
3561
3562 For different addressing modes defined in the A64 ISA:
3563
3564 Offset
3565 .pcrel=0; .preind=1; .postind=0; .writeback=0
3566 Pre-indexed
3567 .pcrel=0; .preind=1; .postind=0; .writeback=1
3568 Post-indexed
3569 .pcrel=0; .preind=0; .postind=1; .writeback=1
3570 PC-relative (literal)
3571 .pcrel=1; .preind=1; .postind=0; .writeback=0
3572
3573 The shift/extension information, if any, will be stored in .shifter.
3574 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3575 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3576 corresponding register.
3577
3578 BASE_TYPE says which types of base register should be accepted and
3579 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3580 is the type of shifter that is allowed for immediate offsets,
3581 or SHIFTED_NONE if none.
3582
3583 In all other respects, it is the caller's responsibility to check
3584 for addressing modes not supported by the instruction, and to set
3585 inst.reloc.type. */
3586
3587 static bool
3588 parse_address_main (char **str, aarch64_opnd_info *operand,
3589 aarch64_opnd_qualifier_t *base_qualifier,
3590 aarch64_opnd_qualifier_t *offset_qualifier,
3591 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3592 enum parse_shift_mode imm_shift_mode)
3593 {
3594 char *p = *str;
3595 const reg_entry *reg;
3596 expressionS *exp = &inst.reloc.exp;
3597
3598 *base_qualifier = AARCH64_OPND_QLF_NIL;
3599 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3600 if (! skip_past_char (&p, '['))
3601 {
3602 /* =immediate or label. */
3603 operand->addr.pcrel = 1;
3604 operand->addr.preind = 1;
3605
3606 /* #:<reloc_op>:<symbol> */
3607 skip_past_char (&p, '#');
3608 if (skip_past_char (&p, ':'))
3609 {
3610 bfd_reloc_code_real_type ty;
3611 struct reloc_table_entry *entry;
3612
3613 /* Try to parse a relocation modifier. Anything else is
3614 an error. */
3615 entry = find_reloc_table_entry (&p);
3616 if (! entry)
3617 {
3618 set_syntax_error (_("unknown relocation modifier"));
3619 return false;
3620 }
3621
3622 switch (operand->type)
3623 {
3624 case AARCH64_OPND_ADDR_PCREL21:
3625 /* adr */
3626 ty = entry->adr_type;
3627 break;
3628
3629 default:
3630 ty = entry->ld_literal_type;
3631 break;
3632 }
3633
3634 if (ty == 0)
3635 {
3636 set_syntax_error
3637 (_("this relocation modifier is not allowed on this "
3638 "instruction"));
3639 return false;
3640 }
3641
3642 /* #:<reloc_op>: */
3643 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3644 {
3645 set_syntax_error (_("invalid relocation expression"));
3646 return false;
3647 }
3648 /* #:<reloc_op>:<expr> */
3649 /* Record the relocation type. */
3650 inst.reloc.type = ty;
3651 inst.reloc.pc_rel = entry->pc_rel;
3652 }
3653 else
3654 {
3655 if (skip_past_char (&p, '='))
3656 /* =immediate; need to generate the literal in the literal pool. */
3657 inst.gen_lit_pool = 1;
3658
3659 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3660 {
3661 set_syntax_error (_("invalid address"));
3662 return false;
3663 }
3664 }
3665
3666 *str = p;
3667 return true;
3668 }
3669
3670 /* [ */
3671
3672 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3673 if (!reg || !aarch64_check_reg_type (reg, base_type))
3674 {
3675 set_syntax_error (_(get_reg_expected_msg (base_type)));
3676 return false;
3677 }
3678 operand->addr.base_regno = reg->number;
3679
3680 /* [Xn */
3681 if (skip_past_comma (&p))
3682 {
3683 /* [Xn, */
3684 operand->addr.preind = 1;
3685
3686 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3687 if (reg)
3688 {
3689 if (!aarch64_check_reg_type (reg, offset_type))
3690 {
3691 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3692 return false;
3693 }
3694
3695 /* [Xn,Rm */
3696 operand->addr.offset.regno = reg->number;
3697 operand->addr.offset.is_reg = 1;
3698 /* Shifted index. */
3699 if (skip_past_comma (&p))
3700 {
3701 /* [Xn,Rm, */
3702 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3703 /* Use the diagnostics set in parse_shift, so not set new
3704 error message here. */
3705 return false;
3706 }
3707 /* We only accept:
3708 [base,Xm] # For vector plus scalar SVE2 indexing.
3709 [base,Xm{,LSL #imm}]
3710 [base,Xm,SXTX {#imm}]
3711 [base,Wm,(S|U)XTW {#imm}] */
3712 if (operand->shifter.kind == AARCH64_MOD_NONE
3713 || operand->shifter.kind == AARCH64_MOD_LSL
3714 || operand->shifter.kind == AARCH64_MOD_SXTX)
3715 {
3716 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3717 {
3718 set_syntax_error (_("invalid use of 32-bit register offset"));
3719 return false;
3720 }
3721 if (aarch64_get_qualifier_esize (*base_qualifier)
3722 != aarch64_get_qualifier_esize (*offset_qualifier)
3723 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3724 || *base_qualifier != AARCH64_OPND_QLF_S_S
3725 || *offset_qualifier != AARCH64_OPND_QLF_X))
3726 {
3727 set_syntax_error (_("offset has different size from base"));
3728 return false;
3729 }
3730 }
3731 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3732 {
3733 set_syntax_error (_("invalid use of 64-bit register offset"));
3734 return false;
3735 }
3736 }
3737 else
3738 {
3739 /* [Xn,#:<reloc_op>:<symbol> */
3740 skip_past_char (&p, '#');
3741 if (skip_past_char (&p, ':'))
3742 {
3743 struct reloc_table_entry *entry;
3744
3745 /* Try to parse a relocation modifier. Anything else is
3746 an error. */
3747 if (!(entry = find_reloc_table_entry (&p)))
3748 {
3749 set_syntax_error (_("unknown relocation modifier"));
3750 return false;
3751 }
3752
3753 if (entry->ldst_type == 0)
3754 {
3755 set_syntax_error
3756 (_("this relocation modifier is not allowed on this "
3757 "instruction"));
3758 return false;
3759 }
3760
3761 /* [Xn,#:<reloc_op>: */
3762 /* We now have the group relocation table entry corresponding to
3763 the name in the assembler source. Next, we parse the
3764 expression. */
3765 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3766 {
3767 set_syntax_error (_("invalid relocation expression"));
3768 return false;
3769 }
3770
3771 /* [Xn,#:<reloc_op>:<expr> */
3772 /* Record the load/store relocation type. */
3773 inst.reloc.type = entry->ldst_type;
3774 inst.reloc.pc_rel = entry->pc_rel;
3775 }
3776 else
3777 {
3778 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3779 {
3780 set_syntax_error (_("invalid expression in the address"));
3781 return false;
3782 }
3783 /* [Xn,<expr> */
3784 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3785 /* [Xn,<expr>,<shifter> */
3786 if (! parse_shift (&p, operand, imm_shift_mode))
3787 return false;
3788 }
3789 }
3790 }
3791
3792 if (! skip_past_char (&p, ']'))
3793 {
3794 set_syntax_error (_("']' expected"));
3795 return false;
3796 }
3797
3798 if (skip_past_char (&p, '!'))
3799 {
3800 if (operand->addr.preind && operand->addr.offset.is_reg)
3801 {
3802 set_syntax_error (_("register offset not allowed in pre-indexed "
3803 "addressing mode"));
3804 return false;
3805 }
3806 /* [Xn]! */
3807 operand->addr.writeback = 1;
3808 }
3809 else if (skip_past_comma (&p))
3810 {
3811 /* [Xn], */
3812 operand->addr.postind = 1;
3813 operand->addr.writeback = 1;
3814
3815 if (operand->addr.preind)
3816 {
3817 set_syntax_error (_("cannot combine pre- and post-indexing"));
3818 return false;
3819 }
3820
3821 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3822 if (reg)
3823 {
3824 /* [Xn],Xm */
3825 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3826 {
3827 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3828 return false;
3829 }
3830
3831 operand->addr.offset.regno = reg->number;
3832 operand->addr.offset.is_reg = 1;
3833 }
3834 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3835 {
3836 /* [Xn],#expr */
3837 set_syntax_error (_("invalid expression in the address"));
3838 return false;
3839 }
3840 }
3841
3842 /* If at this point neither .preind nor .postind is set, we have a
3843 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3844 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3845 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3846 [Zn.<T>, xzr]. */
3847 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3848 {
3849 if (operand->addr.writeback)
3850 {
3851 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3852 {
3853 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3854 operand->addr.offset.is_reg = 0;
3855 operand->addr.offset.imm = 0;
3856 operand->addr.preind = 1;
3857 }
3858 else
3859 {
3860 /* Reject [Rn]! */
3861 set_syntax_error (_("missing offset in the pre-indexed address"));
3862 return false;
3863 }
3864 }
3865 else
3866 {
3867 operand->addr.preind = 1;
3868 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3869 {
3870 operand->addr.offset.is_reg = 1;
3871 operand->addr.offset.regno = REG_ZR;
3872 *offset_qualifier = AARCH64_OPND_QLF_X;
3873 }
3874 else
3875 {
3876 inst.reloc.exp.X_op = O_constant;
3877 inst.reloc.exp.X_add_number = 0;
3878 }
3879 }
3880 }
3881
3882 *str = p;
3883 return true;
3884 }
3885
3886 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3887 on success. */
3888 static bool
3889 parse_address (char **str, aarch64_opnd_info *operand)
3890 {
3891 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3892 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3893 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3894 }
3895
3896 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3897 The arguments have the same meaning as for parse_address_main.
3898 Return TRUE on success. */
3899 static bool
3900 parse_sve_address (char **str, aarch64_opnd_info *operand,
3901 aarch64_opnd_qualifier_t *base_qualifier,
3902 aarch64_opnd_qualifier_t *offset_qualifier)
3903 {
3904 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3905 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3906 SHIFTED_MUL_VL);
3907 }
3908
3909 /* Parse a register X0-X30. The register must be 64-bit and register 31
3910 is unallocated. */
3911 static bool
3912 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
3913 {
3914 const reg_entry *reg = parse_reg (str);
3915 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
3916 {
3917 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3918 return false;
3919 }
3920 operand->reg.regno = reg->number;
3921 operand->qualifier = AARCH64_OPND_QLF_X;
3922 return true;
3923 }
3924
3925 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3926 Return TRUE on success; otherwise return FALSE. */
3927 static bool
3928 parse_half (char **str, int *internal_fixup_p)
3929 {
3930 char *p = *str;
3931
3932 skip_past_char (&p, '#');
3933
3934 gas_assert (internal_fixup_p);
3935 *internal_fixup_p = 0;
3936
3937 if (*p == ':')
3938 {
3939 struct reloc_table_entry *entry;
3940
3941 /* Try to parse a relocation. Anything else is an error. */
3942 ++p;
3943
3944 if (!(entry = find_reloc_table_entry (&p)))
3945 {
3946 set_syntax_error (_("unknown relocation modifier"));
3947 return false;
3948 }
3949
3950 if (entry->movw_type == 0)
3951 {
3952 set_syntax_error
3953 (_("this relocation modifier is not allowed on this instruction"));
3954 return false;
3955 }
3956
3957 inst.reloc.type = entry->movw_type;
3958 }
3959 else
3960 *internal_fixup_p = 1;
3961
3962 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3963 return false;
3964
3965 *str = p;
3966 return true;
3967 }
3968
3969 /* Parse an operand for an ADRP instruction:
3970 ADRP <Xd>, <label>
3971 Return TRUE on success; otherwise return FALSE. */
3972
3973 static bool
3974 parse_adrp (char **str)
3975 {
3976 char *p;
3977
3978 p = *str;
3979 if (*p == ':')
3980 {
3981 struct reloc_table_entry *entry;
3982
3983 /* Try to parse a relocation. Anything else is an error. */
3984 ++p;
3985 if (!(entry = find_reloc_table_entry (&p)))
3986 {
3987 set_syntax_error (_("unknown relocation modifier"));
3988 return false;
3989 }
3990
3991 if (entry->adrp_type == 0)
3992 {
3993 set_syntax_error
3994 (_("this relocation modifier is not allowed on this instruction"));
3995 return false;
3996 }
3997
3998 inst.reloc.type = entry->adrp_type;
3999 }
4000 else
4001 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4002
4003 inst.reloc.pc_rel = 1;
4004 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4005 return false;
4006 *str = p;
4007 return true;
4008 }
4009
4010 /* Miscellaneous. */
4011
4012 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4013 of SIZE tokens in which index I gives the token for field value I,
4014 or is null if field value I is invalid. REG_TYPE says which register
4015 names should be treated as registers rather than as symbolic immediates.
4016
4017 Return true on success, moving *STR past the operand and storing the
4018 field value in *VAL. */
4019
4020 static int
4021 parse_enum_string (char **str, int64_t *val, const char *const *array,
4022 size_t size, aarch64_reg_type reg_type)
4023 {
4024 expressionS exp;
4025 char *p, *q;
4026 size_t i;
4027
4028 /* Match C-like tokens. */
4029 p = q = *str;
4030 while (ISALNUM (*q))
4031 q++;
4032
4033 for (i = 0; i < size; ++i)
4034 if (array[i]
4035 && strncasecmp (array[i], p, q - p) == 0
4036 && array[i][q - p] == 0)
4037 {
4038 *val = i;
4039 *str = q;
4040 return true;
4041 }
4042
4043 if (!parse_immediate_expression (&p, &exp, reg_type))
4044 return false;
4045
4046 if (exp.X_op == O_constant
4047 && (uint64_t) exp.X_add_number < size)
4048 {
4049 *val = exp.X_add_number;
4050 *str = p;
4051 return true;
4052 }
4053
4054 /* Use the default error for this operand. */
4055 return false;
4056 }
4057
4058 /* Parse an option for a preload instruction. Returns the encoding for the
4059 option, or PARSE_FAIL. */
4060
4061 static int
4062 parse_pldop (char **str)
4063 {
4064 char *p, *q;
4065 const struct aarch64_name_value_pair *o;
4066
4067 p = q = *str;
4068 while (ISALNUM (*q))
4069 q++;
4070
4071 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4072 if (!o)
4073 return PARSE_FAIL;
4074
4075 *str = q;
4076 return o->value;
4077 }
4078
4079 /* Parse an option for a barrier instruction. Returns the encoding for the
4080 option, or PARSE_FAIL. */
4081
4082 static int
4083 parse_barrier (char **str)
4084 {
4085 char *p, *q;
4086 const struct aarch64_name_value_pair *o;
4087
4088 p = q = *str;
4089 while (ISALPHA (*q))
4090 q++;
4091
4092 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4093 if (!o)
4094 return PARSE_FAIL;
4095
4096 *str = q;
4097 return o->value;
4098 }
4099
4100 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4101 return 0 if successful. Otherwise return PARSE_FAIL. */
4102
4103 static int
4104 parse_barrier_psb (char **str,
4105 const struct aarch64_name_value_pair ** hint_opt)
4106 {
4107 char *p, *q;
4108 const struct aarch64_name_value_pair *o;
4109
4110 p = q = *str;
4111 while (ISALPHA (*q))
4112 q++;
4113
4114 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4115 if (!o)
4116 {
4117 set_fatal_syntax_error
4118 ( _("unknown or missing option to PSB/TSB"));
4119 return PARSE_FAIL;
4120 }
4121
4122 if (o->value != 0x11)
4123 {
4124 /* PSB only accepts option name 'CSYNC'. */
4125 set_syntax_error
4126 (_("the specified option is not accepted for PSB/TSB"));
4127 return PARSE_FAIL;
4128 }
4129
4130 *str = q;
4131 *hint_opt = o;
4132 return 0;
4133 }
4134
4135 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4136 return 0 if successful. Otherwise return PARSE_FAIL. */
4137
4138 static int
4139 parse_bti_operand (char **str,
4140 const struct aarch64_name_value_pair ** hint_opt)
4141 {
4142 char *p, *q;
4143 const struct aarch64_name_value_pair *o;
4144
4145 p = q = *str;
4146 while (ISALPHA (*q))
4147 q++;
4148
4149 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4150 if (!o)
4151 {
4152 set_fatal_syntax_error
4153 ( _("unknown option to BTI"));
4154 return PARSE_FAIL;
4155 }
4156
4157 switch (o->value)
4158 {
4159 /* Valid BTI operands. */
4160 case HINT_OPD_C:
4161 case HINT_OPD_J:
4162 case HINT_OPD_JC:
4163 break;
4164
4165 default:
4166 set_syntax_error
4167 (_("unknown option to BTI"));
4168 return PARSE_FAIL;
4169 }
4170
4171 *str = q;
4172 *hint_opt = o;
4173 return 0;
4174 }
4175
4176 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4177 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4178 on failure. Format:
4179
4180 REG_TYPE.QUALIFIER
4181
4182 Side effect: Update STR with current parse position of success.
4183 */
4184
4185 static const reg_entry *
4186 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4187 aarch64_opnd_qualifier_t *qualifier)
4188 {
4189 char *q;
4190
4191 reg_entry *reg = parse_reg (str);
4192 if (reg != NULL && reg->type == reg_type)
4193 {
4194 if (!skip_past_char (str, '.'))
4195 {
4196 set_syntax_error (_("missing ZA tile element size separator"));
4197 return NULL;
4198 }
4199
4200 q = *str;
4201 switch (TOLOWER (*q))
4202 {
4203 case 'b':
4204 *qualifier = AARCH64_OPND_QLF_S_B;
4205 break;
4206 case 'h':
4207 *qualifier = AARCH64_OPND_QLF_S_H;
4208 break;
4209 case 's':
4210 *qualifier = AARCH64_OPND_QLF_S_S;
4211 break;
4212 case 'd':
4213 *qualifier = AARCH64_OPND_QLF_S_D;
4214 break;
4215 case 'q':
4216 *qualifier = AARCH64_OPND_QLF_S_Q;
4217 break;
4218 default:
4219 return NULL;
4220 }
4221 q++;
4222
4223 *str = q;
4224 return reg;
4225 }
4226
4227 return NULL;
4228 }
4229
4230 /* Parse SME ZA tile encoded in <ZAda> assembler symbol.
4231 Function return tile QUALIFIER on success.
4232
4233 Tiles are in example format: za[0-9]\.[bhsd]
4234
4235 Function returns <ZAda> register number or PARSE_FAIL.
4236 */
4237 static int
4238 parse_sme_zada_operand (char **str, aarch64_opnd_qualifier_t *qualifier)
4239 {
4240 int regno;
4241 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_ZA, qualifier);
4242
4243 if (reg == NULL)
4244 return PARSE_FAIL;
4245 regno = reg->number;
4246
4247 switch (*qualifier)
4248 {
4249 case AARCH64_OPND_QLF_S_B:
4250 if (regno != 0x00)
4251 {
4252 set_syntax_error (_("invalid ZA tile register number, expected za0"));
4253 return PARSE_FAIL;
4254 }
4255 break;
4256 case AARCH64_OPND_QLF_S_H:
4257 if (regno > 0x01)
4258 {
4259 set_syntax_error (_("invalid ZA tile register number, expected za0-za1"));
4260 return PARSE_FAIL;
4261 }
4262 break;
4263 case AARCH64_OPND_QLF_S_S:
4264 if (regno > 0x03)
4265 {
4266 /* For the 32-bit variant: is the name of the ZA tile ZA0-ZA3. */
4267 set_syntax_error (_("invalid ZA tile register number, expected za0-za3"));
4268 return PARSE_FAIL;
4269 }
4270 break;
4271 case AARCH64_OPND_QLF_S_D:
4272 if (regno > 0x07)
4273 {
4274 /* For the 64-bit variant: is the name of the ZA tile ZA0-ZA7 */
4275 set_syntax_error (_("invalid ZA tile register number, expected za0-za7"));
4276 return PARSE_FAIL;
4277 }
4278 break;
4279 default:
4280 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s and d"));
4281 return PARSE_FAIL;
4282 }
4283
4284 return regno;
4285 }
4286
4287 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4288
4289 #<imm>
4290 <imm>
4291
4292 Function return TRUE if immediate was found, or FALSE.
4293 */
4294 static bool
4295 parse_sme_immediate (char **str, int64_t *imm)
4296 {
4297 int64_t val;
4298 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4299 return false;
4300
4301 *imm = val;
4302 return true;
4303 }
4304
4305 /* Parse index with vector select register and immediate:
4306
4307 [<Wv>, <imm>]
4308 [<Wv>, #<imm>]
4309 where <Wv> is in W12-W15 range and # is optional for immediate.
4310
4311 Function performs extra check for mandatory immediate value if REQUIRE_IMM
4312 is set to true.
4313
4314 On success function returns TRUE and populated VECTOR_SELECT_REGISTER and
4315 IMM output.
4316 */
4317 static bool
4318 parse_sme_za_hv_tiles_operand_index (char **str,
4319 int *vector_select_register,
4320 int64_t *imm)
4321 {
4322 const reg_entry *reg;
4323
4324 if (!skip_past_char (str, '['))
4325 {
4326 set_syntax_error (_("expected '['"));
4327 return false;
4328 }
4329
4330 /* Vector select register W12-W15 encoded in the 2-bit Rv field. */
4331 reg = parse_reg (str);
4332 if (reg == NULL || reg->type != REG_TYPE_R_32
4333 || reg->number < 12 || reg->number > 15)
4334 {
4335 set_syntax_error (_("expected vector select register W12-W15"));
4336 return false;
4337 }
4338 *vector_select_register = reg->number;
4339
4340 if (!skip_past_char (str, ',')) /* Optional index offset immediate. */
4341 {
4342 set_syntax_error (_("expected ','"));
4343 return false;
4344 }
4345
4346 if (!parse_sme_immediate (str, imm))
4347 {
4348 set_syntax_error (_("index offset immediate expected"));
4349 return false;
4350 }
4351
4352 if (!skip_past_char (str, ']'))
4353 {
4354 set_syntax_error (_("expected ']'"));
4355 return false;
4356 }
4357
4358 return true;
4359 }
4360
4361 /* Parse SME ZA horizontal or vertical vector access to tiles.
4362 Function extracts from STR to SLICE_INDICATOR <HV> horizontal (0) or
4363 vertical (1) ZA tile vector orientation. VECTOR_SELECT_REGISTER
4364 contains <Wv> select register and corresponding optional IMMEDIATE.
4365 In addition QUALIFIER is extracted.
4366
4367 Field format examples:
4368
4369 ZA0<HV>.B[<Wv>, #<imm>]
4370 <ZAn><HV>.H[<Wv>, #<imm>]
4371 <ZAn><HV>.S[<Wv>, #<imm>]
4372 <ZAn><HV>.D[<Wv>, #<imm>]
4373 <ZAn><HV>.Q[<Wv>, #<imm>]
4374
4375 Function returns <ZAda> register number or PARSE_FAIL.
4376 */
4377 static int
4378 parse_sme_za_hv_tiles_operand (char **str,
4379 enum sme_hv_slice *slice_indicator,
4380 int *vector_select_register,
4381 int *imm,
4382 aarch64_opnd_qualifier_t *qualifier)
4383 {
4384 char *qh, *qv;
4385 int regno;
4386 int regno_limit;
4387 int64_t imm_limit;
4388 int64_t imm_value;
4389 const reg_entry *reg;
4390
4391 qh = qv = *str;
4392 if ((reg = parse_reg_with_qual (&qh, REG_TYPE_ZAH, qualifier)) != NULL)
4393 {
4394 *slice_indicator = HV_horizontal;
4395 *str = qh;
4396 }
4397 else if ((reg = parse_reg_with_qual (&qv, REG_TYPE_ZAV, qualifier)) != NULL)
4398 {
4399 *slice_indicator = HV_vertical;
4400 *str = qv;
4401 }
4402 else
4403 return PARSE_FAIL;
4404 regno = reg->number;
4405
4406 switch (*qualifier)
4407 {
4408 case AARCH64_OPND_QLF_S_B:
4409 regno_limit = 0;
4410 imm_limit = 15;
4411 break;
4412 case AARCH64_OPND_QLF_S_H:
4413 regno_limit = 1;
4414 imm_limit = 7;
4415 break;
4416 case AARCH64_OPND_QLF_S_S:
4417 regno_limit = 3;
4418 imm_limit = 3;
4419 break;
4420 case AARCH64_OPND_QLF_S_D:
4421 regno_limit = 7;
4422 imm_limit = 1;
4423 break;
4424 case AARCH64_OPND_QLF_S_Q:
4425 regno_limit = 15;
4426 imm_limit = 0;
4427 break;
4428 default:
4429 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s, d and q"));
4430 return PARSE_FAIL;
4431 }
4432
4433 /* Check if destination register ZA tile vector is in range for given
4434 instruction variant. */
4435 if (regno < 0 || regno > regno_limit)
4436 {
4437 set_syntax_error (_("ZA tile vector out of range"));
4438 return PARSE_FAIL;
4439 }
4440
4441 if (!parse_sme_za_hv_tiles_operand_index (str, vector_select_register,
4442 &imm_value))
4443 return PARSE_FAIL;
4444
4445 /* Check if optional index offset is in the range for instruction
4446 variant. */
4447 if (imm_value < 0 || imm_value > imm_limit)
4448 {
4449 set_syntax_error (_("index offset out of range"));
4450 return PARSE_FAIL;
4451 }
4452
4453 *imm = imm_value;
4454
4455 return regno;
4456 }
4457
4458
4459 static int
4460 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4461 enum sme_hv_slice *slice_indicator,
4462 int *vector_select_register,
4463 int *imm,
4464 aarch64_opnd_qualifier_t *qualifier)
4465 {
4466 int regno;
4467
4468 if (!skip_past_char (str, '{'))
4469 {
4470 set_syntax_error (_("expected '{'"));
4471 return PARSE_FAIL;
4472 }
4473
4474 regno = parse_sme_za_hv_tiles_operand (str, slice_indicator,
4475 vector_select_register, imm,
4476 qualifier);
4477
4478 if (regno == PARSE_FAIL)
4479 return PARSE_FAIL;
4480
4481 if (!skip_past_char (str, '}'))
4482 {
4483 set_syntax_error (_("expected '}'"));
4484 return PARSE_FAIL;
4485 }
4486
4487 return regno;
4488 }
4489
4490 /* Parse list of up to eight 64-bit element tile names separated by commas in
4491 SME's ZERO instruction:
4492
4493 ZERO { <mask> }
4494
4495 Function returns <mask>:
4496
4497 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4498 */
4499 static int
4500 parse_sme_zero_mask(char **str)
4501 {
4502 char *q;
4503 int mask;
4504 aarch64_opnd_qualifier_t qualifier;
4505
4506 mask = 0x00;
4507 q = *str;
4508 do
4509 {
4510 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA, &qualifier);
4511 if (reg)
4512 {
4513 int regno = reg->number;
4514 if (qualifier == AARCH64_OPND_QLF_S_B && regno == 0)
4515 {
4516 /* { ZA0.B } is assembled as all-ones immediate. */
4517 mask = 0xff;
4518 }
4519 else if (qualifier == AARCH64_OPND_QLF_S_H && regno < 2)
4520 mask |= 0x55 << regno;
4521 else if (qualifier == AARCH64_OPND_QLF_S_S && regno < 4)
4522 mask |= 0x11 << regno;
4523 else if (qualifier == AARCH64_OPND_QLF_S_D && regno < 8)
4524 mask |= 0x01 << regno;
4525 else
4526 {
4527 set_syntax_error (_("wrong ZA tile element format"));
4528 return PARSE_FAIL;
4529 }
4530 continue;
4531 }
4532 else if (strncasecmp (q, "za", 2) == 0
4533 && !ISALNUM (q[2]))
4534 {
4535 /* { ZA } is assembled as all-ones immediate. */
4536 mask = 0xff;
4537 q += 2;
4538 continue;
4539 }
4540 else
4541 {
4542 set_syntax_error (_("wrong ZA tile element format"));
4543 return PARSE_FAIL;
4544 }
4545 }
4546 while (skip_past_char (&q, ','));
4547
4548 *str = q;
4549 return mask;
4550 }
4551
4552 /* Wraps in curly braces <mask> operand ZERO instruction:
4553
4554 ZERO { <mask> }
4555
4556 Function returns value of <mask> bit-field.
4557 */
4558 static int
4559 parse_sme_list_of_64bit_tiles (char **str)
4560 {
4561 int regno;
4562
4563 if (!skip_past_char (str, '{'))
4564 {
4565 set_syntax_error (_("expected '{'"));
4566 return PARSE_FAIL;
4567 }
4568
4569 /* Empty <mask> list is an all-zeros immediate. */
4570 if (!skip_past_char (str, '}'))
4571 {
4572 regno = parse_sme_zero_mask (str);
4573 if (regno == PARSE_FAIL)
4574 return PARSE_FAIL;
4575
4576 if (!skip_past_char (str, '}'))
4577 {
4578 set_syntax_error (_("expected '}'"));
4579 return PARSE_FAIL;
4580 }
4581 }
4582 else
4583 regno = 0x00;
4584
4585 return regno;
4586 }
4587
4588 /* Parse ZA array operand used in e.g. STR and LDR instruction.
4589 Operand format:
4590
4591 ZA[<Wv>, <imm>]
4592 ZA[<Wv>, #<imm>]
4593
4594 Function returns <Wv> or PARSE_FAIL.
4595 */
4596 static int
4597 parse_sme_za_array (char **str, int *imm)
4598 {
4599 char *p, *q;
4600 int regno;
4601 int64_t imm_value;
4602
4603 p = q = *str;
4604 while (ISALPHA (*q))
4605 q++;
4606
4607 if ((q - p != 2) || strncasecmp ("za", p, q - p) != 0)
4608 {
4609 set_syntax_error (_("expected ZA array"));
4610 return PARSE_FAIL;
4611 }
4612
4613 if (! parse_sme_za_hv_tiles_operand_index (&q, &regno, &imm_value))
4614 return PARSE_FAIL;
4615
4616 if (imm_value < 0 || imm_value > 15)
4617 {
4618 set_syntax_error (_("offset out of range"));
4619 return PARSE_FAIL;
4620 }
4621
4622 *imm = imm_value;
4623 *str = q;
4624 return regno;
4625 }
4626
4627 /* Parse streaming mode operand for SMSTART and SMSTOP.
4628
4629 {SM | ZA}
4630
4631 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4632 */
4633 static int
4634 parse_sme_sm_za (char **str)
4635 {
4636 char *p, *q;
4637
4638 p = q = *str;
4639 while (ISALPHA (*q))
4640 q++;
4641
4642 if ((q - p != 2)
4643 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4644 {
4645 set_syntax_error (_("expected SM or ZA operand"));
4646 return PARSE_FAIL;
4647 }
4648
4649 *str = q;
4650 return TOLOWER (p[0]);
4651 }
4652
4653 /* Parse the name of the source scalable predicate register, the index base
4654 register W12-W15 and the element index. Function performs element index
4655 limit checks as well as qualifier type checks.
4656
4657 <Pn>.<T>[<Wv>, <imm>]
4658 <Pn>.<T>[<Wv>, #<imm>]
4659
4660 On success function sets <Wv> to INDEX_BASE_REG, <T> to QUALIFIER and
4661 <imm> to IMM.
4662 Function returns <Pn>, or PARSE_FAIL.
4663 */
4664 static int
4665 parse_sme_pred_reg_with_index(char **str,
4666 int *index_base_reg,
4667 int *imm,
4668 aarch64_opnd_qualifier_t *qualifier)
4669 {
4670 int regno;
4671 int64_t imm_limit;
4672 int64_t imm_value;
4673 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_PN, qualifier);
4674
4675 if (reg == NULL)
4676 return PARSE_FAIL;
4677 regno = reg->number;
4678
4679 switch (*qualifier)
4680 {
4681 case AARCH64_OPND_QLF_S_B:
4682 imm_limit = 15;
4683 break;
4684 case AARCH64_OPND_QLF_S_H:
4685 imm_limit = 7;
4686 break;
4687 case AARCH64_OPND_QLF_S_S:
4688 imm_limit = 3;
4689 break;
4690 case AARCH64_OPND_QLF_S_D:
4691 imm_limit = 1;
4692 break;
4693 default:
4694 set_syntax_error (_("wrong predicate register element size, allowed b, h, s and d"));
4695 return PARSE_FAIL;
4696 }
4697
4698 if (! parse_sme_za_hv_tiles_operand_index (str, index_base_reg, &imm_value))
4699 return PARSE_FAIL;
4700
4701 if (imm_value < 0 || imm_value > imm_limit)
4702 {
4703 set_syntax_error (_("element index out of range for given variant"));
4704 return PARSE_FAIL;
4705 }
4706
4707 *imm = imm_value;
4708
4709 return regno;
4710 }
4711
4712 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4713 Returns the encoding for the option, or PARSE_FAIL.
4714
4715 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4716 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4717
4718 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4719 field, otherwise as a system register.
4720 */
4721
4722 static int
4723 parse_sys_reg (char **str, htab_t sys_regs,
4724 int imple_defined_p, int pstatefield_p,
4725 uint32_t* flags)
4726 {
4727 char *p, *q;
4728 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4729 const aarch64_sys_reg *o;
4730 int value;
4731
4732 p = buf;
4733 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4734 if (p < buf + (sizeof (buf) - 1))
4735 *p++ = TOLOWER (*q);
4736 *p = '\0';
4737
4738 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4739 valid system register. This is enforced by construction of the hash
4740 table. */
4741 if (p - buf != q - *str)
4742 return PARSE_FAIL;
4743
4744 o = str_hash_find (sys_regs, buf);
4745 if (!o)
4746 {
4747 if (!imple_defined_p)
4748 return PARSE_FAIL;
4749 else
4750 {
4751 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4752 unsigned int op0, op1, cn, cm, op2;
4753
4754 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4755 != 5)
4756 return PARSE_FAIL;
4757 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4758 return PARSE_FAIL;
4759 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4760 if (flags)
4761 *flags = 0;
4762 }
4763 }
4764 else
4765 {
4766 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4767 as_bad (_("selected processor does not support PSTATE field "
4768 "name '%s'"), buf);
4769 if (!pstatefield_p
4770 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4771 o->value, o->flags, o->features))
4772 as_bad (_("selected processor does not support system register "
4773 "name '%s'"), buf);
4774 if (aarch64_sys_reg_deprecated_p (o->flags))
4775 as_warn (_("system register name '%s' is deprecated and may be "
4776 "removed in a future release"), buf);
4777 value = o->value;
4778 if (flags)
4779 *flags = o->flags;
4780 }
4781
4782 *str = q;
4783 return value;
4784 }
4785
4786 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4787 for the option, or NULL. */
4788
4789 static const aarch64_sys_ins_reg *
4790 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4791 {
4792 char *p, *q;
4793 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4794 const aarch64_sys_ins_reg *o;
4795
4796 p = buf;
4797 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4798 if (p < buf + (sizeof (buf) - 1))
4799 *p++ = TOLOWER (*q);
4800 *p = '\0';
4801
4802 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4803 valid system register. This is enforced by construction of the hash
4804 table. */
4805 if (p - buf != q - *str)
4806 return NULL;
4807
4808 o = str_hash_find (sys_ins_regs, buf);
4809 if (!o)
4810 return NULL;
4811
4812 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4813 o->name, o->value, o->flags, 0))
4814 as_bad (_("selected processor does not support system register "
4815 "name '%s'"), buf);
4816 if (aarch64_sys_reg_deprecated_p (o->flags))
4817 as_warn (_("system register name '%s' is deprecated and may be "
4818 "removed in a future release"), buf);
4819
4820 *str = q;
4821 return o;
4822 }
4823 \f
4824 #define po_char_or_fail(chr) do { \
4825 if (! skip_past_char (&str, chr)) \
4826 goto failure; \
4827 } while (0)
4828
4829 #define po_reg_or_fail(regtype) do { \
4830 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4831 if (val == PARSE_FAIL) \
4832 { \
4833 set_default_error (); \
4834 goto failure; \
4835 } \
4836 } while (0)
4837
4838 #define po_int_reg_or_fail(reg_type) do { \
4839 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4840 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4841 { \
4842 set_default_error (); \
4843 goto failure; \
4844 } \
4845 info->reg.regno = reg->number; \
4846 info->qualifier = qualifier; \
4847 } while (0)
4848
4849 #define po_imm_nc_or_fail() do { \
4850 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4851 goto failure; \
4852 } while (0)
4853
4854 #define po_imm_or_fail(min, max) do { \
4855 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4856 goto failure; \
4857 if (val < min || val > max) \
4858 { \
4859 set_fatal_syntax_error (_("immediate value out of range "\
4860 #min " to "#max)); \
4861 goto failure; \
4862 } \
4863 } while (0)
4864
4865 #define po_enum_or_fail(array) do { \
4866 if (!parse_enum_string (&str, &val, array, \
4867 ARRAY_SIZE (array), imm_reg_type)) \
4868 goto failure; \
4869 } while (0)
4870
4871 #define po_misc_or_fail(expr) do { \
4872 if (!expr) \
4873 goto failure; \
4874 } while (0)
4875 \f
4876 /* encode the 12-bit imm field of Add/sub immediate */
4877 static inline uint32_t
4878 encode_addsub_imm (uint32_t imm)
4879 {
4880 return imm << 10;
4881 }
4882
4883 /* encode the shift amount field of Add/sub immediate */
4884 static inline uint32_t
4885 encode_addsub_imm_shift_amount (uint32_t cnt)
4886 {
4887 return cnt << 22;
4888 }
4889
4890
4891 /* encode the imm field of Adr instruction */
4892 static inline uint32_t
4893 encode_adr_imm (uint32_t imm)
4894 {
4895 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4896 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4897 }
4898
4899 /* encode the immediate field of Move wide immediate */
4900 static inline uint32_t
4901 encode_movw_imm (uint32_t imm)
4902 {
4903 return imm << 5;
4904 }
4905
4906 /* encode the 26-bit offset of unconditional branch */
4907 static inline uint32_t
4908 encode_branch_ofs_26 (uint32_t ofs)
4909 {
4910 return ofs & ((1 << 26) - 1);
4911 }
4912
4913 /* encode the 19-bit offset of conditional branch and compare & branch */
4914 static inline uint32_t
4915 encode_cond_branch_ofs_19 (uint32_t ofs)
4916 {
4917 return (ofs & ((1 << 19) - 1)) << 5;
4918 }
4919
4920 /* encode the 19-bit offset of ld literal */
4921 static inline uint32_t
4922 encode_ld_lit_ofs_19 (uint32_t ofs)
4923 {
4924 return (ofs & ((1 << 19) - 1)) << 5;
4925 }
4926
4927 /* Encode the 14-bit offset of test & branch. */
4928 static inline uint32_t
4929 encode_tst_branch_ofs_14 (uint32_t ofs)
4930 {
4931 return (ofs & ((1 << 14) - 1)) << 5;
4932 }
4933
4934 /* Encode the 16-bit imm field of svc/hvc/smc. */
4935 static inline uint32_t
4936 encode_svc_imm (uint32_t imm)
4937 {
4938 return imm << 5;
4939 }
4940
4941 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4942 static inline uint32_t
4943 reencode_addsub_switch_add_sub (uint32_t opcode)
4944 {
4945 return opcode ^ (1 << 30);
4946 }
4947
4948 static inline uint32_t
4949 reencode_movzn_to_movz (uint32_t opcode)
4950 {
4951 return opcode | (1 << 30);
4952 }
4953
4954 static inline uint32_t
4955 reencode_movzn_to_movn (uint32_t opcode)
4956 {
4957 return opcode & ~(1 << 30);
4958 }
4959
4960 /* Overall per-instruction processing. */
4961
4962 /* We need to be able to fix up arbitrary expressions in some statements.
4963 This is so that we can handle symbols that are an arbitrary distance from
4964 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4965 which returns part of an address in a form which will be valid for
4966 a data instruction. We do this by pushing the expression into a symbol
4967 in the expr_section, and creating a fix for that. */
4968
4969 static fixS *
4970 fix_new_aarch64 (fragS * frag,
4971 int where,
4972 short int size,
4973 expressionS * exp,
4974 int pc_rel,
4975 int reloc)
4976 {
4977 fixS *new_fix;
4978
4979 switch (exp->X_op)
4980 {
4981 case O_constant:
4982 case O_symbol:
4983 case O_add:
4984 case O_subtract:
4985 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4986 break;
4987
4988 default:
4989 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4990 pc_rel, reloc);
4991 break;
4992 }
4993 return new_fix;
4994 }
4995 \f
4996 /* Diagnostics on operands errors. */
4997
4998 /* By default, output verbose error message.
4999 Disable the verbose error message by -mno-verbose-error. */
5000 static int verbose_error_p = 1;
5001
5002 #ifdef DEBUG_AARCH64
5003 /* N.B. this is only for the purpose of debugging. */
5004 const char* operand_mismatch_kind_names[] =
5005 {
5006 "AARCH64_OPDE_NIL",
5007 "AARCH64_OPDE_RECOVERABLE",
5008 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5009 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5010 "AARCH64_OPDE_SYNTAX_ERROR",
5011 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5012 "AARCH64_OPDE_INVALID_VARIANT",
5013 "AARCH64_OPDE_OUT_OF_RANGE",
5014 "AARCH64_OPDE_UNALIGNED",
5015 "AARCH64_OPDE_REG_LIST",
5016 "AARCH64_OPDE_OTHER_ERROR",
5017 };
5018 #endif /* DEBUG_AARCH64 */
5019
5020 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5021
5022 When multiple errors of different kinds are found in the same assembly
5023 line, only the error of the highest severity will be picked up for
5024 issuing the diagnostics. */
5025
5026 static inline bool
5027 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5028 enum aarch64_operand_error_kind rhs)
5029 {
5030 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5031 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5032 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5033 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5034 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5035 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5036 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5037 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
5038 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5039 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
5040 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
5041 return lhs > rhs;
5042 }
5043
5044 /* Helper routine to get the mnemonic name from the assembly instruction
5045 line; should only be called for the diagnosis purpose, as there is
5046 string copy operation involved, which may affect the runtime
5047 performance if used in elsewhere. */
5048
5049 static const char*
5050 get_mnemonic_name (const char *str)
5051 {
5052 static char mnemonic[32];
5053 char *ptr;
5054
5055 /* Get the first 15 bytes and assume that the full name is included. */
5056 strncpy (mnemonic, str, 31);
5057 mnemonic[31] = '\0';
5058
5059 /* Scan up to the end of the mnemonic, which must end in white space,
5060 '.', or end of string. */
5061 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5062 ;
5063
5064 *ptr = '\0';
5065
5066 /* Append '...' to the truncated long name. */
5067 if (ptr - mnemonic == 31)
5068 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5069
5070 return mnemonic;
5071 }
5072
5073 static void
5074 reset_aarch64_instruction (aarch64_instruction *instruction)
5075 {
5076 memset (instruction, '\0', sizeof (aarch64_instruction));
5077 instruction->reloc.type = BFD_RELOC_UNUSED;
5078 }
5079
5080 /* Data structures storing one user error in the assembly code related to
5081 operands. */
5082
5083 struct operand_error_record
5084 {
5085 const aarch64_opcode *opcode;
5086 aarch64_operand_error detail;
5087 struct operand_error_record *next;
5088 };
5089
5090 typedef struct operand_error_record operand_error_record;
5091
5092 struct operand_errors
5093 {
5094 operand_error_record *head;
5095 operand_error_record *tail;
5096 };
5097
5098 typedef struct operand_errors operand_errors;
5099
5100 /* Top-level data structure reporting user errors for the current line of
5101 the assembly code.
5102 The way md_assemble works is that all opcodes sharing the same mnemonic
5103 name are iterated to find a match to the assembly line. In this data
5104 structure, each of the such opcodes will have one operand_error_record
5105 allocated and inserted. In other words, excessive errors related with
5106 a single opcode are disregarded. */
5107 operand_errors operand_error_report;
5108
5109 /* Free record nodes. */
5110 static operand_error_record *free_opnd_error_record_nodes = NULL;
5111
5112 /* Initialize the data structure that stores the operand mismatch
5113 information on assembling one line of the assembly code. */
5114 static void
5115 init_operand_error_report (void)
5116 {
5117 if (operand_error_report.head != NULL)
5118 {
5119 gas_assert (operand_error_report.tail != NULL);
5120 operand_error_report.tail->next = free_opnd_error_record_nodes;
5121 free_opnd_error_record_nodes = operand_error_report.head;
5122 operand_error_report.head = NULL;
5123 operand_error_report.tail = NULL;
5124 return;
5125 }
5126 gas_assert (operand_error_report.tail == NULL);
5127 }
5128
5129 /* Return TRUE if some operand error has been recorded during the
5130 parsing of the current assembly line using the opcode *OPCODE;
5131 otherwise return FALSE. */
5132 static inline bool
5133 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5134 {
5135 operand_error_record *record = operand_error_report.head;
5136 return record && record->opcode == opcode;
5137 }
5138
5139 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5140 OPCODE field is initialized with OPCODE.
5141 N.B. only one record for each opcode, i.e. the maximum of one error is
5142 recorded for each instruction template. */
5143
5144 static void
5145 add_operand_error_record (const operand_error_record* new_record)
5146 {
5147 const aarch64_opcode *opcode = new_record->opcode;
5148 operand_error_record* record = operand_error_report.head;
5149
5150 /* The record may have been created for this opcode. If not, we need
5151 to prepare one. */
5152 if (! opcode_has_operand_error_p (opcode))
5153 {
5154 /* Get one empty record. */
5155 if (free_opnd_error_record_nodes == NULL)
5156 {
5157 record = XNEW (operand_error_record);
5158 }
5159 else
5160 {
5161 record = free_opnd_error_record_nodes;
5162 free_opnd_error_record_nodes = record->next;
5163 }
5164 record->opcode = opcode;
5165 /* Insert at the head. */
5166 record->next = operand_error_report.head;
5167 operand_error_report.head = record;
5168 if (operand_error_report.tail == NULL)
5169 operand_error_report.tail = record;
5170 }
5171 else if (record->detail.kind != AARCH64_OPDE_NIL
5172 && record->detail.index <= new_record->detail.index
5173 && operand_error_higher_severity_p (record->detail.kind,
5174 new_record->detail.kind))
5175 {
5176 /* In the case of multiple errors found on operands related with a
5177 single opcode, only record the error of the leftmost operand and
5178 only if the error is of higher severity. */
5179 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5180 " the existing error %s on operand %d",
5181 operand_mismatch_kind_names[new_record->detail.kind],
5182 new_record->detail.index,
5183 operand_mismatch_kind_names[record->detail.kind],
5184 record->detail.index);
5185 return;
5186 }
5187
5188 record->detail = new_record->detail;
5189 }
5190
5191 static inline void
5192 record_operand_error_info (const aarch64_opcode *opcode,
5193 aarch64_operand_error *error_info)
5194 {
5195 operand_error_record record;
5196 record.opcode = opcode;
5197 record.detail = *error_info;
5198 add_operand_error_record (&record);
5199 }
5200
5201 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5202 error message *ERROR, for operand IDX (count from 0). */
5203
5204 static void
5205 record_operand_error (const aarch64_opcode *opcode, int idx,
5206 enum aarch64_operand_error_kind kind,
5207 const char* error)
5208 {
5209 aarch64_operand_error info;
5210 memset(&info, 0, sizeof (info));
5211 info.index = idx;
5212 info.kind = kind;
5213 info.error = error;
5214 info.non_fatal = false;
5215 record_operand_error_info (opcode, &info);
5216 }
5217
5218 static void
5219 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5220 enum aarch64_operand_error_kind kind,
5221 const char* error, const int *extra_data)
5222 {
5223 aarch64_operand_error info;
5224 info.index = idx;
5225 info.kind = kind;
5226 info.error = error;
5227 info.data[0].i = extra_data[0];
5228 info.data[1].i = extra_data[1];
5229 info.data[2].i = extra_data[2];
5230 info.non_fatal = false;
5231 record_operand_error_info (opcode, &info);
5232 }
5233
5234 static void
5235 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5236 const char* error, int lower_bound,
5237 int upper_bound)
5238 {
5239 int data[3] = {lower_bound, upper_bound, 0};
5240 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5241 error, data);
5242 }
5243
5244 /* Remove the operand error record for *OPCODE. */
5245 static void ATTRIBUTE_UNUSED
5246 remove_operand_error_record (const aarch64_opcode *opcode)
5247 {
5248 if (opcode_has_operand_error_p (opcode))
5249 {
5250 operand_error_record* record = operand_error_report.head;
5251 gas_assert (record != NULL && operand_error_report.tail != NULL);
5252 operand_error_report.head = record->next;
5253 record->next = free_opnd_error_record_nodes;
5254 free_opnd_error_record_nodes = record;
5255 if (operand_error_report.head == NULL)
5256 {
5257 gas_assert (operand_error_report.tail == record);
5258 operand_error_report.tail = NULL;
5259 }
5260 }
5261 }
5262
5263 /* Given the instruction in *INSTR, return the index of the best matched
5264 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5265
5266 Return -1 if there is no qualifier sequence; return the first match
5267 if there is multiple matches found. */
5268
5269 static int
5270 find_best_match (const aarch64_inst *instr,
5271 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5272 {
5273 int i, num_opnds, max_num_matched, idx;
5274
5275 num_opnds = aarch64_num_of_operands (instr->opcode);
5276 if (num_opnds == 0)
5277 {
5278 DEBUG_TRACE ("no operand");
5279 return -1;
5280 }
5281
5282 max_num_matched = 0;
5283 idx = 0;
5284
5285 /* For each pattern. */
5286 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5287 {
5288 int j, num_matched;
5289 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5290
5291 /* Most opcodes has much fewer patterns in the list. */
5292 if (empty_qualifier_sequence_p (qualifiers))
5293 {
5294 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5295 break;
5296 }
5297
5298 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5299 if (*qualifiers == instr->operands[j].qualifier)
5300 ++num_matched;
5301
5302 if (num_matched > max_num_matched)
5303 {
5304 max_num_matched = num_matched;
5305 idx = i;
5306 }
5307 }
5308
5309 DEBUG_TRACE ("return with %d", idx);
5310 return idx;
5311 }
5312
5313 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5314 corresponding operands in *INSTR. */
5315
5316 static inline void
5317 assign_qualifier_sequence (aarch64_inst *instr,
5318 const aarch64_opnd_qualifier_t *qualifiers)
5319 {
5320 int i = 0;
5321 int num_opnds = aarch64_num_of_operands (instr->opcode);
5322 gas_assert (num_opnds);
5323 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5324 instr->operands[i].qualifier = *qualifiers;
5325 }
5326
5327 /* Print operands for the diagnosis purpose. */
5328
5329 static void
5330 print_operands (char *buf, const aarch64_opcode *opcode,
5331 const aarch64_opnd_info *opnds)
5332 {
5333 int i;
5334
5335 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5336 {
5337 char str[128];
5338 char cmt[128];
5339
5340 /* We regard the opcode operand info more, however we also look into
5341 the inst->operands to support the disassembling of the optional
5342 operand.
5343 The two operand code should be the same in all cases, apart from
5344 when the operand can be optional. */
5345 if (opcode->operands[i] == AARCH64_OPND_NIL
5346 || opnds[i].type == AARCH64_OPND_NIL)
5347 break;
5348
5349 /* Generate the operand string in STR. */
5350 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5351 NULL, cmt, sizeof (cmt), cpu_variant);
5352
5353 /* Delimiter. */
5354 if (str[0] != '\0')
5355 strcat (buf, i == 0 ? " " : ", ");
5356
5357 /* Append the operand string. */
5358 strcat (buf, str);
5359
5360 /* Append a comment. This works because only the last operand ever
5361 adds a comment. If that ever changes then we'll need to be
5362 smarter here. */
5363 if (cmt[0] != '\0')
5364 {
5365 strcat (buf, "\t// ");
5366 strcat (buf, cmt);
5367 }
5368 }
5369 }
5370
5371 /* Send to stderr a string as information. */
5372
5373 static void
5374 output_info (const char *format, ...)
5375 {
5376 const char *file;
5377 unsigned int line;
5378 va_list args;
5379
5380 file = as_where (&line);
5381 if (file)
5382 {
5383 if (line != 0)
5384 fprintf (stderr, "%s:%u: ", file, line);
5385 else
5386 fprintf (stderr, "%s: ", file);
5387 }
5388 fprintf (stderr, _("Info: "));
5389 va_start (args, format);
5390 vfprintf (stderr, format, args);
5391 va_end (args);
5392 (void) putc ('\n', stderr);
5393 }
5394
5395 /* Output one operand error record. */
5396
5397 static void
5398 output_operand_error_record (const operand_error_record *record, char *str)
5399 {
5400 const aarch64_operand_error *detail = &record->detail;
5401 int idx = detail->index;
5402 const aarch64_opcode *opcode = record->opcode;
5403 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5404 : AARCH64_OPND_NIL);
5405
5406 typedef void (*handler_t)(const char *format, ...);
5407 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5408
5409 switch (detail->kind)
5410 {
5411 case AARCH64_OPDE_NIL:
5412 gas_assert (0);
5413 break;
5414
5415 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5416 handler (_("this `%s' should have an immediately preceding `%s'"
5417 " -- `%s'"),
5418 detail->data[0].s, detail->data[1].s, str);
5419 break;
5420
5421 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5422 handler (_("the preceding `%s' should be followed by `%s` rather"
5423 " than `%s` -- `%s'"),
5424 detail->data[1].s, detail->data[0].s, opcode->name, str);
5425 break;
5426
5427 case AARCH64_OPDE_SYNTAX_ERROR:
5428 case AARCH64_OPDE_RECOVERABLE:
5429 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5430 case AARCH64_OPDE_OTHER_ERROR:
5431 /* Use the prepared error message if there is, otherwise use the
5432 operand description string to describe the error. */
5433 if (detail->error != NULL)
5434 {
5435 if (idx < 0)
5436 handler (_("%s -- `%s'"), detail->error, str);
5437 else
5438 handler (_("%s at operand %d -- `%s'"),
5439 detail->error, idx + 1, str);
5440 }
5441 else
5442 {
5443 gas_assert (idx >= 0);
5444 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5445 aarch64_get_operand_desc (opd_code), str);
5446 }
5447 break;
5448
5449 case AARCH64_OPDE_INVALID_VARIANT:
5450 handler (_("operand mismatch -- `%s'"), str);
5451 if (verbose_error_p)
5452 {
5453 /* We will try to correct the erroneous instruction and also provide
5454 more information e.g. all other valid variants.
5455
5456 The string representation of the corrected instruction and other
5457 valid variants are generated by
5458
5459 1) obtaining the intermediate representation of the erroneous
5460 instruction;
5461 2) manipulating the IR, e.g. replacing the operand qualifier;
5462 3) printing out the instruction by calling the printer functions
5463 shared with the disassembler.
5464
5465 The limitation of this method is that the exact input assembly
5466 line cannot be accurately reproduced in some cases, for example an
5467 optional operand present in the actual assembly line will be
5468 omitted in the output; likewise for the optional syntax rules,
5469 e.g. the # before the immediate. Another limitation is that the
5470 assembly symbols and relocation operations in the assembly line
5471 currently cannot be printed out in the error report. Last but not
5472 least, when there is other error(s) co-exist with this error, the
5473 'corrected' instruction may be still incorrect, e.g. given
5474 'ldnp h0,h1,[x0,#6]!'
5475 this diagnosis will provide the version:
5476 'ldnp s0,s1,[x0,#6]!'
5477 which is still not right. */
5478 size_t len = strlen (get_mnemonic_name (str));
5479 int i, qlf_idx;
5480 bool result;
5481 char buf[2048];
5482 aarch64_inst *inst_base = &inst.base;
5483 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5484
5485 /* Init inst. */
5486 reset_aarch64_instruction (&inst);
5487 inst_base->opcode = opcode;
5488
5489 /* Reset the error report so that there is no side effect on the
5490 following operand parsing. */
5491 init_operand_error_report ();
5492
5493 /* Fill inst. */
5494 result = parse_operands (str + len, opcode)
5495 && programmer_friendly_fixup (&inst);
5496 gas_assert (result);
5497 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5498 NULL, NULL, insn_sequence);
5499 gas_assert (!result);
5500
5501 /* Find the most matched qualifier sequence. */
5502 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5503 gas_assert (qlf_idx > -1);
5504
5505 /* Assign the qualifiers. */
5506 assign_qualifier_sequence (inst_base,
5507 opcode->qualifiers_list[qlf_idx]);
5508
5509 /* Print the hint. */
5510 output_info (_(" did you mean this?"));
5511 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5512 print_operands (buf, opcode, inst_base->operands);
5513 output_info (_(" %s"), buf);
5514
5515 /* Print out other variant(s) if there is any. */
5516 if (qlf_idx != 0 ||
5517 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5518 output_info (_(" other valid variant(s):"));
5519
5520 /* For each pattern. */
5521 qualifiers_list = opcode->qualifiers_list;
5522 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5523 {
5524 /* Most opcodes has much fewer patterns in the list.
5525 First NIL qualifier indicates the end in the list. */
5526 if (empty_qualifier_sequence_p (*qualifiers_list))
5527 break;
5528
5529 if (i != qlf_idx)
5530 {
5531 /* Mnemonics name. */
5532 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5533
5534 /* Assign the qualifiers. */
5535 assign_qualifier_sequence (inst_base, *qualifiers_list);
5536
5537 /* Print instruction. */
5538 print_operands (buf, opcode, inst_base->operands);
5539
5540 output_info (_(" %s"), buf);
5541 }
5542 }
5543 }
5544 break;
5545
5546 case AARCH64_OPDE_UNTIED_IMMS:
5547 handler (_("operand %d must have the same immediate value "
5548 "as operand 1 -- `%s'"),
5549 detail->index + 1, str);
5550 break;
5551
5552 case AARCH64_OPDE_UNTIED_OPERAND:
5553 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5554 detail->index + 1, str);
5555 break;
5556
5557 case AARCH64_OPDE_OUT_OF_RANGE:
5558 if (detail->data[0].i != detail->data[1].i)
5559 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5560 detail->error ? detail->error : _("immediate value"),
5561 detail->data[0].i, detail->data[1].i, idx + 1, str);
5562 else
5563 handler (_("%s must be %d at operand %d -- `%s'"),
5564 detail->error ? detail->error : _("immediate value"),
5565 detail->data[0].i, idx + 1, str);
5566 break;
5567
5568 case AARCH64_OPDE_REG_LIST:
5569 if (detail->data[0].i == 1)
5570 handler (_("invalid number of registers in the list; "
5571 "only 1 register is expected at operand %d -- `%s'"),
5572 idx + 1, str);
5573 else
5574 handler (_("invalid number of registers in the list; "
5575 "%d registers are expected at operand %d -- `%s'"),
5576 detail->data[0].i, idx + 1, str);
5577 break;
5578
5579 case AARCH64_OPDE_UNALIGNED:
5580 handler (_("immediate value must be a multiple of "
5581 "%d at operand %d -- `%s'"),
5582 detail->data[0].i, idx + 1, str);
5583 break;
5584
5585 default:
5586 gas_assert (0);
5587 break;
5588 }
5589 }
5590
5591 /* Process and output the error message about the operand mismatching.
5592
5593 When this function is called, the operand error information had
5594 been collected for an assembly line and there will be multiple
5595 errors in the case of multiple instruction templates; output the
5596 error message that most closely describes the problem.
5597
5598 The errors to be printed can be filtered on printing all errors
5599 or only non-fatal errors. This distinction has to be made because
5600 the error buffer may already be filled with fatal errors we don't want to
5601 print due to the different instruction templates. */
5602
5603 static void
5604 output_operand_error_report (char *str, bool non_fatal_only)
5605 {
5606 int largest_error_pos;
5607 const char *msg = NULL;
5608 enum aarch64_operand_error_kind kind;
5609 operand_error_record *curr;
5610 operand_error_record *head = operand_error_report.head;
5611 operand_error_record *record = NULL;
5612
5613 /* No error to report. */
5614 if (head == NULL)
5615 return;
5616
5617 gas_assert (head != NULL && operand_error_report.tail != NULL);
5618
5619 /* Only one error. */
5620 if (head == operand_error_report.tail)
5621 {
5622 /* If the only error is a non-fatal one and we don't want to print it,
5623 just exit. */
5624 if (!non_fatal_only || head->detail.non_fatal)
5625 {
5626 DEBUG_TRACE ("single opcode entry with error kind: %s",
5627 operand_mismatch_kind_names[head->detail.kind]);
5628 output_operand_error_record (head, str);
5629 }
5630 return;
5631 }
5632
5633 /* Find the error kind of the highest severity. */
5634 DEBUG_TRACE ("multiple opcode entries with error kind");
5635 kind = AARCH64_OPDE_NIL;
5636 for (curr = head; curr != NULL; curr = curr->next)
5637 {
5638 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5639 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5640 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5641 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5642 kind = curr->detail.kind;
5643 }
5644
5645 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5646
5647 /* Pick up one of errors of KIND to report. */
5648 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5649 for (curr = head; curr != NULL; curr = curr->next)
5650 {
5651 /* If we don't want to print non-fatal errors then don't consider them
5652 at all. */
5653 if (curr->detail.kind != kind
5654 || (non_fatal_only && !curr->detail.non_fatal))
5655 continue;
5656 /* If there are multiple errors, pick up the one with the highest
5657 mismatching operand index. In the case of multiple errors with
5658 the equally highest operand index, pick up the first one or the
5659 first one with non-NULL error message. */
5660 if (curr->detail.index > largest_error_pos
5661 || (curr->detail.index == largest_error_pos && msg == NULL
5662 && curr->detail.error != NULL))
5663 {
5664 largest_error_pos = curr->detail.index;
5665 record = curr;
5666 msg = record->detail.error;
5667 }
5668 }
5669
5670 /* The way errors are collected in the back-end is a bit non-intuitive. But
5671 essentially, because each operand template is tried recursively you may
5672 always have errors collected from the previous tried OPND. These are
5673 usually skipped if there is one successful match. However now with the
5674 non-fatal errors we have to ignore those previously collected hard errors
5675 when we're only interested in printing the non-fatal ones. This condition
5676 prevents us from printing errors that are not appropriate, since we did
5677 match a condition, but it also has warnings that it wants to print. */
5678 if (non_fatal_only && !record)
5679 return;
5680
5681 gas_assert (largest_error_pos != -2 && record != NULL);
5682 DEBUG_TRACE ("Pick up error kind %s to report",
5683 operand_mismatch_kind_names[record->detail.kind]);
5684
5685 /* Output. */
5686 output_operand_error_record (record, str);
5687 }
5688 \f
5689 /* Write an AARCH64 instruction to buf - always little-endian. */
5690 static void
5691 put_aarch64_insn (char *buf, uint32_t insn)
5692 {
5693 unsigned char *where = (unsigned char *) buf;
5694 where[0] = insn;
5695 where[1] = insn >> 8;
5696 where[2] = insn >> 16;
5697 where[3] = insn >> 24;
5698 }
5699
5700 static uint32_t
5701 get_aarch64_insn (char *buf)
5702 {
5703 unsigned char *where = (unsigned char *) buf;
5704 uint32_t result;
5705 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5706 | ((uint32_t) where[3] << 24)));
5707 return result;
5708 }
5709
5710 static void
5711 output_inst (struct aarch64_inst *new_inst)
5712 {
5713 char *to = NULL;
5714
5715 to = frag_more (INSN_SIZE);
5716
5717 frag_now->tc_frag_data.recorded = 1;
5718
5719 put_aarch64_insn (to, inst.base.value);
5720
5721 if (inst.reloc.type != BFD_RELOC_UNUSED)
5722 {
5723 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5724 INSN_SIZE, &inst.reloc.exp,
5725 inst.reloc.pc_rel,
5726 inst.reloc.type);
5727 DEBUG_TRACE ("Prepared relocation fix up");
5728 /* Don't check the addend value against the instruction size,
5729 that's the job of our code in md_apply_fix(). */
5730 fixp->fx_no_overflow = 1;
5731 if (new_inst != NULL)
5732 fixp->tc_fix_data.inst = new_inst;
5733 if (aarch64_gas_internal_fixup_p ())
5734 {
5735 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5736 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5737 fixp->fx_addnumber = inst.reloc.flags;
5738 }
5739 }
5740
5741 dwarf2_emit_insn (INSN_SIZE);
5742 }
5743
5744 /* Link together opcodes of the same name. */
5745
5746 struct templates
5747 {
5748 const aarch64_opcode *opcode;
5749 struct templates *next;
5750 };
5751
5752 typedef struct templates templates;
5753
5754 static templates *
5755 lookup_mnemonic (const char *start, int len)
5756 {
5757 templates *templ = NULL;
5758
5759 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5760 return templ;
5761 }
5762
5763 /* Subroutine of md_assemble, responsible for looking up the primary
5764 opcode from the mnemonic the user wrote. BASE points to the beginning
5765 of the mnemonic, DOT points to the first '.' within the mnemonic
5766 (if any) and END points to the end of the mnemonic. */
5767
5768 static templates *
5769 opcode_lookup (char *base, char *dot, char *end)
5770 {
5771 const aarch64_cond *cond;
5772 char condname[16];
5773 int len;
5774
5775 if (dot == end)
5776 return 0;
5777
5778 inst.cond = COND_ALWAYS;
5779
5780 /* Handle a possible condition. */
5781 if (dot)
5782 {
5783 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5784 if (!cond)
5785 return 0;
5786 inst.cond = cond->value;
5787 len = dot - base;
5788 }
5789 else
5790 len = end - base;
5791
5792 if (inst.cond == COND_ALWAYS)
5793 {
5794 /* Look for unaffixed mnemonic. */
5795 return lookup_mnemonic (base, len);
5796 }
5797 else if (len <= 13)
5798 {
5799 /* append ".c" to mnemonic if conditional */
5800 memcpy (condname, base, len);
5801 memcpy (condname + len, ".c", 2);
5802 base = condname;
5803 len += 2;
5804 return lookup_mnemonic (base, len);
5805 }
5806
5807 return NULL;
5808 }
5809
5810 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5811 to a corresponding operand qualifier. */
5812
5813 static inline aarch64_opnd_qualifier_t
5814 vectype_to_qualifier (const struct vector_type_el *vectype)
5815 {
5816 /* Element size in bytes indexed by vector_el_type. */
5817 const unsigned char ele_size[5]
5818 = {1, 2, 4, 8, 16};
5819 const unsigned int ele_base [5] =
5820 {
5821 AARCH64_OPND_QLF_V_4B,
5822 AARCH64_OPND_QLF_V_2H,
5823 AARCH64_OPND_QLF_V_2S,
5824 AARCH64_OPND_QLF_V_1D,
5825 AARCH64_OPND_QLF_V_1Q
5826 };
5827
5828 if (!vectype->defined || vectype->type == NT_invtype)
5829 goto vectype_conversion_fail;
5830
5831 if (vectype->type == NT_zero)
5832 return AARCH64_OPND_QLF_P_Z;
5833 if (vectype->type == NT_merge)
5834 return AARCH64_OPND_QLF_P_M;
5835
5836 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5837
5838 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5839 {
5840 /* Special case S_4B. */
5841 if (vectype->type == NT_b && vectype->width == 4)
5842 return AARCH64_OPND_QLF_S_4B;
5843
5844 /* Special case S_2H. */
5845 if (vectype->type == NT_h && vectype->width == 2)
5846 return AARCH64_OPND_QLF_S_2H;
5847
5848 /* Vector element register. */
5849 return AARCH64_OPND_QLF_S_B + vectype->type;
5850 }
5851 else
5852 {
5853 /* Vector register. */
5854 int reg_size = ele_size[vectype->type] * vectype->width;
5855 unsigned offset;
5856 unsigned shift;
5857 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5858 goto vectype_conversion_fail;
5859
5860 /* The conversion is by calculating the offset from the base operand
5861 qualifier for the vector type. The operand qualifiers are regular
5862 enough that the offset can established by shifting the vector width by
5863 a vector-type dependent amount. */
5864 shift = 0;
5865 if (vectype->type == NT_b)
5866 shift = 3;
5867 else if (vectype->type == NT_h || vectype->type == NT_s)
5868 shift = 2;
5869 else if (vectype->type >= NT_d)
5870 shift = 1;
5871 else
5872 gas_assert (0);
5873
5874 offset = ele_base [vectype->type] + (vectype->width >> shift);
5875 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5876 && offset <= AARCH64_OPND_QLF_V_1Q);
5877 return offset;
5878 }
5879
5880 vectype_conversion_fail:
5881 first_error (_("bad vector arrangement type"));
5882 return AARCH64_OPND_QLF_NIL;
5883 }
5884
5885 /* Process an optional operand that is found omitted from the assembly line.
5886 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5887 instruction's opcode entry while IDX is the index of this omitted operand.
5888 */
5889
5890 static void
5891 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5892 int idx, aarch64_opnd_info *operand)
5893 {
5894 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5895 gas_assert (optional_operand_p (opcode, idx));
5896 gas_assert (!operand->present);
5897
5898 switch (type)
5899 {
5900 case AARCH64_OPND_Rd:
5901 case AARCH64_OPND_Rn:
5902 case AARCH64_OPND_Rm:
5903 case AARCH64_OPND_Rt:
5904 case AARCH64_OPND_Rt2:
5905 case AARCH64_OPND_Rt_LS64:
5906 case AARCH64_OPND_Rt_SP:
5907 case AARCH64_OPND_Rs:
5908 case AARCH64_OPND_Ra:
5909 case AARCH64_OPND_Rt_SYS:
5910 case AARCH64_OPND_Rd_SP:
5911 case AARCH64_OPND_Rn_SP:
5912 case AARCH64_OPND_Rm_SP:
5913 case AARCH64_OPND_Fd:
5914 case AARCH64_OPND_Fn:
5915 case AARCH64_OPND_Fm:
5916 case AARCH64_OPND_Fa:
5917 case AARCH64_OPND_Ft:
5918 case AARCH64_OPND_Ft2:
5919 case AARCH64_OPND_Sd:
5920 case AARCH64_OPND_Sn:
5921 case AARCH64_OPND_Sm:
5922 case AARCH64_OPND_Va:
5923 case AARCH64_OPND_Vd:
5924 case AARCH64_OPND_Vn:
5925 case AARCH64_OPND_Vm:
5926 case AARCH64_OPND_VdD1:
5927 case AARCH64_OPND_VnD1:
5928 operand->reg.regno = default_value;
5929 break;
5930
5931 case AARCH64_OPND_Ed:
5932 case AARCH64_OPND_En:
5933 case AARCH64_OPND_Em:
5934 case AARCH64_OPND_Em16:
5935 case AARCH64_OPND_SM3_IMM2:
5936 operand->reglane.regno = default_value;
5937 break;
5938
5939 case AARCH64_OPND_IDX:
5940 case AARCH64_OPND_BIT_NUM:
5941 case AARCH64_OPND_IMMR:
5942 case AARCH64_OPND_IMMS:
5943 case AARCH64_OPND_SHLL_IMM:
5944 case AARCH64_OPND_IMM_VLSL:
5945 case AARCH64_OPND_IMM_VLSR:
5946 case AARCH64_OPND_CCMP_IMM:
5947 case AARCH64_OPND_FBITS:
5948 case AARCH64_OPND_UIMM4:
5949 case AARCH64_OPND_UIMM3_OP1:
5950 case AARCH64_OPND_UIMM3_OP2:
5951 case AARCH64_OPND_IMM:
5952 case AARCH64_OPND_IMM_2:
5953 case AARCH64_OPND_WIDTH:
5954 case AARCH64_OPND_UIMM7:
5955 case AARCH64_OPND_NZCV:
5956 case AARCH64_OPND_SVE_PATTERN:
5957 case AARCH64_OPND_SVE_PRFOP:
5958 operand->imm.value = default_value;
5959 break;
5960
5961 case AARCH64_OPND_SVE_PATTERN_SCALED:
5962 operand->imm.value = default_value;
5963 operand->shifter.kind = AARCH64_MOD_MUL;
5964 operand->shifter.amount = 1;
5965 break;
5966
5967 case AARCH64_OPND_EXCEPTION:
5968 inst.reloc.type = BFD_RELOC_UNUSED;
5969 break;
5970
5971 case AARCH64_OPND_BARRIER_ISB:
5972 operand->barrier = aarch64_barrier_options + default_value;
5973 break;
5974
5975 case AARCH64_OPND_BTI_TARGET:
5976 operand->hint_option = aarch64_hint_options + default_value;
5977 break;
5978
5979 default:
5980 break;
5981 }
5982 }
5983
5984 /* Process the relocation type for move wide instructions.
5985 Return TRUE on success; otherwise return FALSE. */
5986
5987 static bool
5988 process_movw_reloc_info (void)
5989 {
5990 int is32;
5991 unsigned shift;
5992
5993 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5994
5995 if (inst.base.opcode->op == OP_MOVK)
5996 switch (inst.reloc.type)
5997 {
5998 case BFD_RELOC_AARCH64_MOVW_G0_S:
5999 case BFD_RELOC_AARCH64_MOVW_G1_S:
6000 case BFD_RELOC_AARCH64_MOVW_G2_S:
6001 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6002 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6003 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6004 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6005 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6006 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6007 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6008 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6009 set_syntax_error
6010 (_("the specified relocation type is not allowed for MOVK"));
6011 return false;
6012 default:
6013 break;
6014 }
6015
6016 switch (inst.reloc.type)
6017 {
6018 case BFD_RELOC_AARCH64_MOVW_G0:
6019 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6020 case BFD_RELOC_AARCH64_MOVW_G0_S:
6021 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6022 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6023 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6024 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6025 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6026 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6027 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6028 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6029 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6030 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6031 shift = 0;
6032 break;
6033 case BFD_RELOC_AARCH64_MOVW_G1:
6034 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6035 case BFD_RELOC_AARCH64_MOVW_G1_S:
6036 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6037 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6038 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6039 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6040 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6041 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6042 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6043 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6044 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6045 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6046 shift = 16;
6047 break;
6048 case BFD_RELOC_AARCH64_MOVW_G2:
6049 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6050 case BFD_RELOC_AARCH64_MOVW_G2_S:
6051 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6052 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6053 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6054 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6055 if (is32)
6056 {
6057 set_fatal_syntax_error
6058 (_("the specified relocation type is not allowed for 32-bit "
6059 "register"));
6060 return false;
6061 }
6062 shift = 32;
6063 break;
6064 case BFD_RELOC_AARCH64_MOVW_G3:
6065 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6066 if (is32)
6067 {
6068 set_fatal_syntax_error
6069 (_("the specified relocation type is not allowed for 32-bit "
6070 "register"));
6071 return false;
6072 }
6073 shift = 48;
6074 break;
6075 default:
6076 /* More cases should be added when more MOVW-related relocation types
6077 are supported in GAS. */
6078 gas_assert (aarch64_gas_internal_fixup_p ());
6079 /* The shift amount should have already been set by the parser. */
6080 return true;
6081 }
6082 inst.base.operands[1].shifter.amount = shift;
6083 return true;
6084 }
6085
6086 /* A primitive log calculator. */
6087
6088 static inline unsigned int
6089 get_logsz (unsigned int size)
6090 {
6091 const unsigned char ls[16] =
6092 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6093 if (size > 16)
6094 {
6095 gas_assert (0);
6096 return -1;
6097 }
6098 gas_assert (ls[size - 1] != (unsigned char)-1);
6099 return ls[size - 1];
6100 }
6101
6102 /* Determine and return the real reloc type code for an instruction
6103 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6104
6105 static inline bfd_reloc_code_real_type
6106 ldst_lo12_determine_real_reloc_type (void)
6107 {
6108 unsigned logsz, max_logsz;
6109 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6110 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6111
6112 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6113 {
6114 BFD_RELOC_AARCH64_LDST8_LO12,
6115 BFD_RELOC_AARCH64_LDST16_LO12,
6116 BFD_RELOC_AARCH64_LDST32_LO12,
6117 BFD_RELOC_AARCH64_LDST64_LO12,
6118 BFD_RELOC_AARCH64_LDST128_LO12
6119 },
6120 {
6121 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6122 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6123 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6124 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6125 BFD_RELOC_AARCH64_NONE
6126 },
6127 {
6128 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6129 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6130 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6131 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6132 BFD_RELOC_AARCH64_NONE
6133 },
6134 {
6135 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6136 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6137 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6138 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6139 BFD_RELOC_AARCH64_NONE
6140 },
6141 {
6142 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6143 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6144 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6145 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6146 BFD_RELOC_AARCH64_NONE
6147 }
6148 };
6149
6150 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6151 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6152 || (inst.reloc.type
6153 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6154 || (inst.reloc.type
6155 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6156 || (inst.reloc.type
6157 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6158 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6159
6160 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6161 opd1_qlf =
6162 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6163 1, opd0_qlf, 0);
6164 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6165
6166 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6167
6168 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6169 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6170 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6171 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6172 max_logsz = 3;
6173 else
6174 max_logsz = 4;
6175
6176 if (logsz > max_logsz)
6177 {
6178 /* SEE PR 27904 for an example of this. */
6179 set_fatal_syntax_error
6180 (_("relocation qualifier does not match instruction size"));
6181 return BFD_RELOC_AARCH64_NONE;
6182 }
6183
6184 /* In reloc.c, these pseudo relocation types should be defined in similar
6185 order as above reloc_ldst_lo12 array. Because the array index calculation
6186 below relies on this. */
6187 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6188 }
6189
6190 /* Check whether a register list REGINFO is valid. The registers must be
6191 numbered in increasing order (modulo 32), in increments of one or two.
6192
6193 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6194 increments of two.
6195
6196 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6197
6198 static bool
6199 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6200 {
6201 uint32_t i, nb_regs, prev_regno, incr;
6202
6203 nb_regs = 1 + (reginfo & 0x3);
6204 reginfo >>= 2;
6205 prev_regno = reginfo & 0x1f;
6206 incr = accept_alternate ? 2 : 1;
6207
6208 for (i = 1; i < nb_regs; ++i)
6209 {
6210 uint32_t curr_regno;
6211 reginfo >>= 5;
6212 curr_regno = reginfo & 0x1f;
6213 if (curr_regno != ((prev_regno + incr) & 0x1f))
6214 return false;
6215 prev_regno = curr_regno;
6216 }
6217
6218 return true;
6219 }
6220
6221 /* Generic instruction operand parser. This does no encoding and no
6222 semantic validation; it merely squirrels values away in the inst
6223 structure. Returns TRUE or FALSE depending on whether the
6224 specified grammar matched. */
6225
6226 static bool
6227 parse_operands (char *str, const aarch64_opcode *opcode)
6228 {
6229 int i;
6230 char *backtrack_pos = 0;
6231 const enum aarch64_opnd *operands = opcode->operands;
6232 aarch64_reg_type imm_reg_type;
6233
6234 clear_error ();
6235 skip_whitespace (str);
6236
6237 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
6238 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6239 else
6240 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6241
6242 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6243 {
6244 int64_t val;
6245 const reg_entry *reg;
6246 int comma_skipped_p = 0;
6247 aarch64_reg_type rtype;
6248 struct vector_type_el vectype;
6249 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6250 aarch64_opnd_info *info = &inst.base.operands[i];
6251 aarch64_reg_type reg_type;
6252
6253 DEBUG_TRACE ("parse operand %d", i);
6254
6255 /* Assign the operand code. */
6256 info->type = operands[i];
6257
6258 if (optional_operand_p (opcode, i))
6259 {
6260 /* Remember where we are in case we need to backtrack. */
6261 gas_assert (!backtrack_pos);
6262 backtrack_pos = str;
6263 }
6264
6265 /* Expect comma between operands; the backtrack mechanism will take
6266 care of cases of omitted optional operand. */
6267 if (i > 0 && ! skip_past_char (&str, ','))
6268 {
6269 set_syntax_error (_("comma expected between operands"));
6270 goto failure;
6271 }
6272 else
6273 comma_skipped_p = 1;
6274
6275 switch (operands[i])
6276 {
6277 case AARCH64_OPND_Rd:
6278 case AARCH64_OPND_Rn:
6279 case AARCH64_OPND_Rm:
6280 case AARCH64_OPND_Rt:
6281 case AARCH64_OPND_Rt2:
6282 case AARCH64_OPND_Rs:
6283 case AARCH64_OPND_Ra:
6284 case AARCH64_OPND_Rt_LS64:
6285 case AARCH64_OPND_Rt_SYS:
6286 case AARCH64_OPND_PAIRREG:
6287 case AARCH64_OPND_SVE_Rm:
6288 po_int_reg_or_fail (REG_TYPE_R_Z);
6289
6290 /* In LS64 load/store instructions Rt register number must be even
6291 and <=22. */
6292 if (operands[i] == AARCH64_OPND_Rt_LS64)
6293 {
6294 /* We've already checked if this is valid register.
6295 This will check if register number (Rt) is not undefined for LS64
6296 instructions:
6297 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6298 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6299 {
6300 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6301 goto failure;
6302 }
6303 }
6304 break;
6305
6306 case AARCH64_OPND_Rd_SP:
6307 case AARCH64_OPND_Rn_SP:
6308 case AARCH64_OPND_Rt_SP:
6309 case AARCH64_OPND_SVE_Rn_SP:
6310 case AARCH64_OPND_Rm_SP:
6311 po_int_reg_or_fail (REG_TYPE_R_SP);
6312 break;
6313
6314 case AARCH64_OPND_Rm_EXT:
6315 case AARCH64_OPND_Rm_SFT:
6316 po_misc_or_fail (parse_shifter_operand
6317 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6318 ? SHIFTED_ARITH_IMM
6319 : SHIFTED_LOGIC_IMM)));
6320 if (!info->shifter.operator_present)
6321 {
6322 /* Default to LSL if not present. Libopcodes prefers shifter
6323 kind to be explicit. */
6324 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6325 info->shifter.kind = AARCH64_MOD_LSL;
6326 /* For Rm_EXT, libopcodes will carry out further check on whether
6327 or not stack pointer is used in the instruction (Recall that
6328 "the extend operator is not optional unless at least one of
6329 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6330 }
6331 break;
6332
6333 case AARCH64_OPND_Fd:
6334 case AARCH64_OPND_Fn:
6335 case AARCH64_OPND_Fm:
6336 case AARCH64_OPND_Fa:
6337 case AARCH64_OPND_Ft:
6338 case AARCH64_OPND_Ft2:
6339 case AARCH64_OPND_Sd:
6340 case AARCH64_OPND_Sn:
6341 case AARCH64_OPND_Sm:
6342 case AARCH64_OPND_SVE_VZn:
6343 case AARCH64_OPND_SVE_Vd:
6344 case AARCH64_OPND_SVE_Vm:
6345 case AARCH64_OPND_SVE_Vn:
6346 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
6347 if (val == PARSE_FAIL)
6348 {
6349 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6350 goto failure;
6351 }
6352 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
6353
6354 info->reg.regno = val;
6355 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
6356 break;
6357
6358 case AARCH64_OPND_SVE_Pd:
6359 case AARCH64_OPND_SVE_Pg3:
6360 case AARCH64_OPND_SVE_Pg4_5:
6361 case AARCH64_OPND_SVE_Pg4_10:
6362 case AARCH64_OPND_SVE_Pg4_16:
6363 case AARCH64_OPND_SVE_Pm:
6364 case AARCH64_OPND_SVE_Pn:
6365 case AARCH64_OPND_SVE_Pt:
6366 case AARCH64_OPND_SME_Pm:
6367 reg_type = REG_TYPE_PN;
6368 goto vector_reg;
6369
6370 case AARCH64_OPND_SVE_Za_5:
6371 case AARCH64_OPND_SVE_Za_16:
6372 case AARCH64_OPND_SVE_Zd:
6373 case AARCH64_OPND_SVE_Zm_5:
6374 case AARCH64_OPND_SVE_Zm_16:
6375 case AARCH64_OPND_SVE_Zn:
6376 case AARCH64_OPND_SVE_Zt:
6377 reg_type = REG_TYPE_ZN;
6378 goto vector_reg;
6379
6380 case AARCH64_OPND_Va:
6381 case AARCH64_OPND_Vd:
6382 case AARCH64_OPND_Vn:
6383 case AARCH64_OPND_Vm:
6384 reg_type = REG_TYPE_VN;
6385 vector_reg:
6386 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6387 if (val == PARSE_FAIL)
6388 {
6389 first_error (_(get_reg_expected_msg (reg_type)));
6390 goto failure;
6391 }
6392 if (vectype.defined & NTA_HASINDEX)
6393 goto failure;
6394
6395 info->reg.regno = val;
6396 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6397 && vectype.type == NT_invtype)
6398 /* Unqualified Pn and Zn registers are allowed in certain
6399 contexts. Rely on F_STRICT qualifier checking to catch
6400 invalid uses. */
6401 info->qualifier = AARCH64_OPND_QLF_NIL;
6402 else
6403 {
6404 info->qualifier = vectype_to_qualifier (&vectype);
6405 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6406 goto failure;
6407 }
6408 break;
6409
6410 case AARCH64_OPND_VdD1:
6411 case AARCH64_OPND_VnD1:
6412 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
6413 if (val == PARSE_FAIL)
6414 {
6415 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6416 goto failure;
6417 }
6418 if (vectype.type != NT_d || vectype.index != 1)
6419 {
6420 set_fatal_syntax_error
6421 (_("the top half of a 128-bit FP/SIMD register is expected"));
6422 goto failure;
6423 }
6424 info->reg.regno = val;
6425 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6426 here; it is correct for the purpose of encoding/decoding since
6427 only the register number is explicitly encoded in the related
6428 instructions, although this appears a bit hacky. */
6429 info->qualifier = AARCH64_OPND_QLF_S_D;
6430 break;
6431
6432 case AARCH64_OPND_SVE_Zm3_INDEX:
6433 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6434 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6435 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6436 case AARCH64_OPND_SVE_Zm4_INDEX:
6437 case AARCH64_OPND_SVE_Zn_INDEX:
6438 reg_type = REG_TYPE_ZN;
6439 goto vector_reg_index;
6440
6441 case AARCH64_OPND_Ed:
6442 case AARCH64_OPND_En:
6443 case AARCH64_OPND_Em:
6444 case AARCH64_OPND_Em16:
6445 case AARCH64_OPND_SM3_IMM2:
6446 reg_type = REG_TYPE_VN;
6447 vector_reg_index:
6448 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6449 if (val == PARSE_FAIL)
6450 {
6451 first_error (_(get_reg_expected_msg (reg_type)));
6452 goto failure;
6453 }
6454 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6455 goto failure;
6456
6457 info->reglane.regno = val;
6458 info->reglane.index = vectype.index;
6459 info->qualifier = vectype_to_qualifier (&vectype);
6460 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6461 goto failure;
6462 break;
6463
6464 case AARCH64_OPND_SVE_ZnxN:
6465 case AARCH64_OPND_SVE_ZtxN:
6466 reg_type = REG_TYPE_ZN;
6467 goto vector_reg_list;
6468
6469 case AARCH64_OPND_LVn:
6470 case AARCH64_OPND_LVt:
6471 case AARCH64_OPND_LVt_AL:
6472 case AARCH64_OPND_LEt:
6473 reg_type = REG_TYPE_VN;
6474 vector_reg_list:
6475 if (reg_type == REG_TYPE_ZN
6476 && get_opcode_dependent_value (opcode) == 1
6477 && *str != '{')
6478 {
6479 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6480 if (val == PARSE_FAIL)
6481 {
6482 first_error (_(get_reg_expected_msg (reg_type)));
6483 goto failure;
6484 }
6485 info->reglist.first_regno = val;
6486 info->reglist.num_regs = 1;
6487 }
6488 else
6489 {
6490 val = parse_vector_reg_list (&str, reg_type, &vectype);
6491 if (val == PARSE_FAIL)
6492 goto failure;
6493
6494 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6495 {
6496 set_fatal_syntax_error (_("invalid register list"));
6497 goto failure;
6498 }
6499
6500 if (vectype.width != 0 && *str != ',')
6501 {
6502 set_fatal_syntax_error
6503 (_("expected element type rather than vector type"));
6504 goto failure;
6505 }
6506
6507 info->reglist.first_regno = (val >> 2) & 0x1f;
6508 info->reglist.num_regs = (val & 0x3) + 1;
6509 }
6510 if (operands[i] == AARCH64_OPND_LEt)
6511 {
6512 if (!(vectype.defined & NTA_HASINDEX))
6513 goto failure;
6514 info->reglist.has_index = 1;
6515 info->reglist.index = vectype.index;
6516 }
6517 else
6518 {
6519 if (vectype.defined & NTA_HASINDEX)
6520 goto failure;
6521 if (!(vectype.defined & NTA_HASTYPE))
6522 {
6523 if (reg_type == REG_TYPE_ZN)
6524 set_fatal_syntax_error (_("missing type suffix"));
6525 goto failure;
6526 }
6527 }
6528 info->qualifier = vectype_to_qualifier (&vectype);
6529 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6530 goto failure;
6531 break;
6532
6533 case AARCH64_OPND_CRn:
6534 case AARCH64_OPND_CRm:
6535 {
6536 char prefix = *(str++);
6537 if (prefix != 'c' && prefix != 'C')
6538 goto failure;
6539
6540 po_imm_nc_or_fail ();
6541 if (val > 15)
6542 {
6543 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6544 goto failure;
6545 }
6546 info->qualifier = AARCH64_OPND_QLF_CR;
6547 info->imm.value = val;
6548 break;
6549 }
6550
6551 case AARCH64_OPND_SHLL_IMM:
6552 case AARCH64_OPND_IMM_VLSR:
6553 po_imm_or_fail (1, 64);
6554 info->imm.value = val;
6555 break;
6556
6557 case AARCH64_OPND_CCMP_IMM:
6558 case AARCH64_OPND_SIMM5:
6559 case AARCH64_OPND_FBITS:
6560 case AARCH64_OPND_TME_UIMM16:
6561 case AARCH64_OPND_UIMM4:
6562 case AARCH64_OPND_UIMM4_ADDG:
6563 case AARCH64_OPND_UIMM10:
6564 case AARCH64_OPND_UIMM3_OP1:
6565 case AARCH64_OPND_UIMM3_OP2:
6566 case AARCH64_OPND_IMM_VLSL:
6567 case AARCH64_OPND_IMM:
6568 case AARCH64_OPND_IMM_2:
6569 case AARCH64_OPND_WIDTH:
6570 case AARCH64_OPND_SVE_INV_LIMM:
6571 case AARCH64_OPND_SVE_LIMM:
6572 case AARCH64_OPND_SVE_LIMM_MOV:
6573 case AARCH64_OPND_SVE_SHLIMM_PRED:
6574 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6575 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6576 case AARCH64_OPND_SVE_SHRIMM_PRED:
6577 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6578 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6579 case AARCH64_OPND_SVE_SIMM5:
6580 case AARCH64_OPND_SVE_SIMM5B:
6581 case AARCH64_OPND_SVE_SIMM6:
6582 case AARCH64_OPND_SVE_SIMM8:
6583 case AARCH64_OPND_SVE_UIMM3:
6584 case AARCH64_OPND_SVE_UIMM7:
6585 case AARCH64_OPND_SVE_UIMM8:
6586 case AARCH64_OPND_SVE_UIMM8_53:
6587 case AARCH64_OPND_IMM_ROT1:
6588 case AARCH64_OPND_IMM_ROT2:
6589 case AARCH64_OPND_IMM_ROT3:
6590 case AARCH64_OPND_SVE_IMM_ROT1:
6591 case AARCH64_OPND_SVE_IMM_ROT2:
6592 case AARCH64_OPND_SVE_IMM_ROT3:
6593 po_imm_nc_or_fail ();
6594 info->imm.value = val;
6595 break;
6596
6597 case AARCH64_OPND_SVE_AIMM:
6598 case AARCH64_OPND_SVE_ASIMM:
6599 po_imm_nc_or_fail ();
6600 info->imm.value = val;
6601 skip_whitespace (str);
6602 if (skip_past_comma (&str))
6603 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6604 else
6605 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6606 break;
6607
6608 case AARCH64_OPND_SVE_PATTERN:
6609 po_enum_or_fail (aarch64_sve_pattern_array);
6610 info->imm.value = val;
6611 break;
6612
6613 case AARCH64_OPND_SVE_PATTERN_SCALED:
6614 po_enum_or_fail (aarch64_sve_pattern_array);
6615 info->imm.value = val;
6616 if (skip_past_comma (&str)
6617 && !parse_shift (&str, info, SHIFTED_MUL))
6618 goto failure;
6619 if (!info->shifter.operator_present)
6620 {
6621 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6622 info->shifter.kind = AARCH64_MOD_MUL;
6623 info->shifter.amount = 1;
6624 }
6625 break;
6626
6627 case AARCH64_OPND_SVE_PRFOP:
6628 po_enum_or_fail (aarch64_sve_prfop_array);
6629 info->imm.value = val;
6630 break;
6631
6632 case AARCH64_OPND_UIMM7:
6633 po_imm_or_fail (0, 127);
6634 info->imm.value = val;
6635 break;
6636
6637 case AARCH64_OPND_IDX:
6638 case AARCH64_OPND_MASK:
6639 case AARCH64_OPND_BIT_NUM:
6640 case AARCH64_OPND_IMMR:
6641 case AARCH64_OPND_IMMS:
6642 po_imm_or_fail (0, 63);
6643 info->imm.value = val;
6644 break;
6645
6646 case AARCH64_OPND_IMM0:
6647 po_imm_nc_or_fail ();
6648 if (val != 0)
6649 {
6650 set_fatal_syntax_error (_("immediate zero expected"));
6651 goto failure;
6652 }
6653 info->imm.value = 0;
6654 break;
6655
6656 case AARCH64_OPND_FPIMM0:
6657 {
6658 int qfloat;
6659 bool res1 = false, res2 = false;
6660 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6661 it is probably not worth the effort to support it. */
6662 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6663 imm_reg_type))
6664 && (error_p ()
6665 || !(res2 = parse_constant_immediate (&str, &val,
6666 imm_reg_type))))
6667 goto failure;
6668 if ((res1 && qfloat == 0) || (res2 && val == 0))
6669 {
6670 info->imm.value = 0;
6671 info->imm.is_fp = 1;
6672 break;
6673 }
6674 set_fatal_syntax_error (_("immediate zero expected"));
6675 goto failure;
6676 }
6677
6678 case AARCH64_OPND_IMM_MOV:
6679 {
6680 char *saved = str;
6681 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6682 reg_name_p (str, REG_TYPE_VN))
6683 goto failure;
6684 str = saved;
6685 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6686 GE_OPT_PREFIX, REJECT_ABSENT));
6687 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6688 later. fix_mov_imm_insn will try to determine a machine
6689 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6690 message if the immediate cannot be moved by a single
6691 instruction. */
6692 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6693 inst.base.operands[i].skip = 1;
6694 }
6695 break;
6696
6697 case AARCH64_OPND_SIMD_IMM:
6698 case AARCH64_OPND_SIMD_IMM_SFT:
6699 if (! parse_big_immediate (&str, &val, imm_reg_type))
6700 goto failure;
6701 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6702 /* addr_off_p */ 0,
6703 /* need_libopcodes_p */ 1,
6704 /* skip_p */ 1);
6705 /* Parse shift.
6706 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6707 shift, we don't check it here; we leave the checking to
6708 the libopcodes (operand_general_constraint_met_p). By
6709 doing this, we achieve better diagnostics. */
6710 if (skip_past_comma (&str)
6711 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6712 goto failure;
6713 if (!info->shifter.operator_present
6714 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6715 {
6716 /* Default to LSL if not present. Libopcodes prefers shifter
6717 kind to be explicit. */
6718 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6719 info->shifter.kind = AARCH64_MOD_LSL;
6720 }
6721 break;
6722
6723 case AARCH64_OPND_FPIMM:
6724 case AARCH64_OPND_SIMD_FPIMM:
6725 case AARCH64_OPND_SVE_FPIMM8:
6726 {
6727 int qfloat;
6728 bool dp_p;
6729
6730 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6731 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6732 || !aarch64_imm_float_p (qfloat))
6733 {
6734 if (!error_p ())
6735 set_fatal_syntax_error (_("invalid floating-point"
6736 " constant"));
6737 goto failure;
6738 }
6739 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6740 inst.base.operands[i].imm.is_fp = 1;
6741 }
6742 break;
6743
6744 case AARCH64_OPND_SVE_I1_HALF_ONE:
6745 case AARCH64_OPND_SVE_I1_HALF_TWO:
6746 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6747 {
6748 int qfloat;
6749 bool dp_p;
6750
6751 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6752 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6753 {
6754 if (!error_p ())
6755 set_fatal_syntax_error (_("invalid floating-point"
6756 " constant"));
6757 goto failure;
6758 }
6759 inst.base.operands[i].imm.value = qfloat;
6760 inst.base.operands[i].imm.is_fp = 1;
6761 }
6762 break;
6763
6764 case AARCH64_OPND_LIMM:
6765 po_misc_or_fail (parse_shifter_operand (&str, info,
6766 SHIFTED_LOGIC_IMM));
6767 if (info->shifter.operator_present)
6768 {
6769 set_fatal_syntax_error
6770 (_("shift not allowed for bitmask immediate"));
6771 goto failure;
6772 }
6773 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6774 /* addr_off_p */ 0,
6775 /* need_libopcodes_p */ 1,
6776 /* skip_p */ 1);
6777 break;
6778
6779 case AARCH64_OPND_AIMM:
6780 if (opcode->op == OP_ADD)
6781 /* ADD may have relocation types. */
6782 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6783 SHIFTED_ARITH_IMM));
6784 else
6785 po_misc_or_fail (parse_shifter_operand (&str, info,
6786 SHIFTED_ARITH_IMM));
6787 switch (inst.reloc.type)
6788 {
6789 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6790 info->shifter.amount = 12;
6791 break;
6792 case BFD_RELOC_UNUSED:
6793 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6794 if (info->shifter.kind != AARCH64_MOD_NONE)
6795 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6796 inst.reloc.pc_rel = 0;
6797 break;
6798 default:
6799 break;
6800 }
6801 info->imm.value = 0;
6802 if (!info->shifter.operator_present)
6803 {
6804 /* Default to LSL if not present. Libopcodes prefers shifter
6805 kind to be explicit. */
6806 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6807 info->shifter.kind = AARCH64_MOD_LSL;
6808 }
6809 break;
6810
6811 case AARCH64_OPND_HALF:
6812 {
6813 /* #<imm16> or relocation. */
6814 int internal_fixup_p;
6815 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6816 if (internal_fixup_p)
6817 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6818 skip_whitespace (str);
6819 if (skip_past_comma (&str))
6820 {
6821 /* {, LSL #<shift>} */
6822 if (! aarch64_gas_internal_fixup_p ())
6823 {
6824 set_fatal_syntax_error (_("can't mix relocation modifier "
6825 "with explicit shift"));
6826 goto failure;
6827 }
6828 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6829 }
6830 else
6831 inst.base.operands[i].shifter.amount = 0;
6832 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6833 inst.base.operands[i].imm.value = 0;
6834 if (! process_movw_reloc_info ())
6835 goto failure;
6836 }
6837 break;
6838
6839 case AARCH64_OPND_EXCEPTION:
6840 case AARCH64_OPND_UNDEFINED:
6841 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6842 imm_reg_type));
6843 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6844 /* addr_off_p */ 0,
6845 /* need_libopcodes_p */ 0,
6846 /* skip_p */ 1);
6847 break;
6848
6849 case AARCH64_OPND_NZCV:
6850 {
6851 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6852 if (nzcv != NULL)
6853 {
6854 str += 4;
6855 info->imm.value = nzcv->value;
6856 break;
6857 }
6858 po_imm_or_fail (0, 15);
6859 info->imm.value = val;
6860 }
6861 break;
6862
6863 case AARCH64_OPND_COND:
6864 case AARCH64_OPND_COND1:
6865 {
6866 char *start = str;
6867 do
6868 str++;
6869 while (ISALPHA (*str));
6870 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6871 if (info->cond == NULL)
6872 {
6873 set_syntax_error (_("invalid condition"));
6874 goto failure;
6875 }
6876 else if (operands[i] == AARCH64_OPND_COND1
6877 && (info->cond->value & 0xe) == 0xe)
6878 {
6879 /* Do not allow AL or NV. */
6880 set_default_error ();
6881 goto failure;
6882 }
6883 }
6884 break;
6885
6886 case AARCH64_OPND_ADDR_ADRP:
6887 po_misc_or_fail (parse_adrp (&str));
6888 /* Clear the value as operand needs to be relocated. */
6889 info->imm.value = 0;
6890 break;
6891
6892 case AARCH64_OPND_ADDR_PCREL14:
6893 case AARCH64_OPND_ADDR_PCREL19:
6894 case AARCH64_OPND_ADDR_PCREL21:
6895 case AARCH64_OPND_ADDR_PCREL26:
6896 po_misc_or_fail (parse_address (&str, info));
6897 if (!info->addr.pcrel)
6898 {
6899 set_syntax_error (_("invalid pc-relative address"));
6900 goto failure;
6901 }
6902 if (inst.gen_lit_pool
6903 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6904 {
6905 /* Only permit "=value" in the literal load instructions.
6906 The literal will be generated by programmer_friendly_fixup. */
6907 set_syntax_error (_("invalid use of \"=immediate\""));
6908 goto failure;
6909 }
6910 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6911 {
6912 set_syntax_error (_("unrecognized relocation suffix"));
6913 goto failure;
6914 }
6915 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6916 {
6917 info->imm.value = inst.reloc.exp.X_add_number;
6918 inst.reloc.type = BFD_RELOC_UNUSED;
6919 }
6920 else
6921 {
6922 info->imm.value = 0;
6923 if (inst.reloc.type == BFD_RELOC_UNUSED)
6924 switch (opcode->iclass)
6925 {
6926 case compbranch:
6927 case condbranch:
6928 /* e.g. CBZ or B.COND */
6929 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6930 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6931 break;
6932 case testbranch:
6933 /* e.g. TBZ */
6934 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6935 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6936 break;
6937 case branch_imm:
6938 /* e.g. B or BL */
6939 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6940 inst.reloc.type =
6941 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6942 : BFD_RELOC_AARCH64_JUMP26;
6943 break;
6944 case loadlit:
6945 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6946 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6947 break;
6948 case pcreladdr:
6949 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6950 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6951 break;
6952 default:
6953 gas_assert (0);
6954 abort ();
6955 }
6956 inst.reloc.pc_rel = 1;
6957 }
6958 break;
6959
6960 case AARCH64_OPND_ADDR_SIMPLE:
6961 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6962 {
6963 /* [<Xn|SP>{, #<simm>}] */
6964 char *start = str;
6965 /* First use the normal address-parsing routines, to get
6966 the usual syntax errors. */
6967 po_misc_or_fail (parse_address (&str, info));
6968 if (info->addr.pcrel || info->addr.offset.is_reg
6969 || !info->addr.preind || info->addr.postind
6970 || info->addr.writeback)
6971 {
6972 set_syntax_error (_("invalid addressing mode"));
6973 goto failure;
6974 }
6975
6976 /* Then retry, matching the specific syntax of these addresses. */
6977 str = start;
6978 po_char_or_fail ('[');
6979 po_reg_or_fail (REG_TYPE_R64_SP);
6980 /* Accept optional ", #0". */
6981 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6982 && skip_past_char (&str, ','))
6983 {
6984 skip_past_char (&str, '#');
6985 if (! skip_past_char (&str, '0'))
6986 {
6987 set_fatal_syntax_error
6988 (_("the optional immediate offset can only be 0"));
6989 goto failure;
6990 }
6991 }
6992 po_char_or_fail (']');
6993 break;
6994 }
6995
6996 case AARCH64_OPND_ADDR_REGOFF:
6997 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6998 po_misc_or_fail (parse_address (&str, info));
6999 regoff_addr:
7000 if (info->addr.pcrel || !info->addr.offset.is_reg
7001 || !info->addr.preind || info->addr.postind
7002 || info->addr.writeback)
7003 {
7004 set_syntax_error (_("invalid addressing mode"));
7005 goto failure;
7006 }
7007 if (!info->shifter.operator_present)
7008 {
7009 /* Default to LSL if not present. Libopcodes prefers shifter
7010 kind to be explicit. */
7011 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7012 info->shifter.kind = AARCH64_MOD_LSL;
7013 }
7014 /* Qualifier to be deduced by libopcodes. */
7015 break;
7016
7017 case AARCH64_OPND_ADDR_SIMM7:
7018 po_misc_or_fail (parse_address (&str, info));
7019 if (info->addr.pcrel || info->addr.offset.is_reg
7020 || (!info->addr.preind && !info->addr.postind))
7021 {
7022 set_syntax_error (_("invalid addressing mode"));
7023 goto failure;
7024 }
7025 if (inst.reloc.type != BFD_RELOC_UNUSED)
7026 {
7027 set_syntax_error (_("relocation not allowed"));
7028 goto failure;
7029 }
7030 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7031 /* addr_off_p */ 1,
7032 /* need_libopcodes_p */ 1,
7033 /* skip_p */ 0);
7034 break;
7035
7036 case AARCH64_OPND_ADDR_SIMM9:
7037 case AARCH64_OPND_ADDR_SIMM9_2:
7038 case AARCH64_OPND_ADDR_SIMM11:
7039 case AARCH64_OPND_ADDR_SIMM13:
7040 po_misc_or_fail (parse_address (&str, info));
7041 if (info->addr.pcrel || info->addr.offset.is_reg
7042 || (!info->addr.preind && !info->addr.postind)
7043 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7044 && info->addr.writeback))
7045 {
7046 set_syntax_error (_("invalid addressing mode"));
7047 goto failure;
7048 }
7049 if (inst.reloc.type != BFD_RELOC_UNUSED)
7050 {
7051 set_syntax_error (_("relocation not allowed"));
7052 goto failure;
7053 }
7054 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7055 /* addr_off_p */ 1,
7056 /* need_libopcodes_p */ 1,
7057 /* skip_p */ 0);
7058 break;
7059
7060 case AARCH64_OPND_ADDR_SIMM10:
7061 case AARCH64_OPND_ADDR_OFFSET:
7062 po_misc_or_fail (parse_address (&str, info));
7063 if (info->addr.pcrel || info->addr.offset.is_reg
7064 || !info->addr.preind || info->addr.postind)
7065 {
7066 set_syntax_error (_("invalid addressing mode"));
7067 goto failure;
7068 }
7069 if (inst.reloc.type != BFD_RELOC_UNUSED)
7070 {
7071 set_syntax_error (_("relocation not allowed"));
7072 goto failure;
7073 }
7074 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7075 /* addr_off_p */ 1,
7076 /* need_libopcodes_p */ 1,
7077 /* skip_p */ 0);
7078 break;
7079
7080 case AARCH64_OPND_ADDR_UIMM12:
7081 po_misc_or_fail (parse_address (&str, info));
7082 if (info->addr.pcrel || info->addr.offset.is_reg
7083 || !info->addr.preind || info->addr.writeback)
7084 {
7085 set_syntax_error (_("invalid addressing mode"));
7086 goto failure;
7087 }
7088 if (inst.reloc.type == BFD_RELOC_UNUSED)
7089 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7090 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7091 || (inst.reloc.type
7092 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7093 || (inst.reloc.type
7094 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7095 || (inst.reloc.type
7096 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7097 || (inst.reloc.type
7098 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7099 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7100 /* Leave qualifier to be determined by libopcodes. */
7101 break;
7102
7103 case AARCH64_OPND_SIMD_ADDR_POST:
7104 /* [<Xn|SP>], <Xm|#<amount>> */
7105 po_misc_or_fail (parse_address (&str, info));
7106 if (!info->addr.postind || !info->addr.writeback)
7107 {
7108 set_syntax_error (_("invalid addressing mode"));
7109 goto failure;
7110 }
7111 if (!info->addr.offset.is_reg)
7112 {
7113 if (inst.reloc.exp.X_op == O_constant)
7114 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7115 else
7116 {
7117 set_fatal_syntax_error
7118 (_("writeback value must be an immediate constant"));
7119 goto failure;
7120 }
7121 }
7122 /* No qualifier. */
7123 break;
7124
7125 case AARCH64_OPND_SME_SM_ZA:
7126 /* { SM | ZA } */
7127 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7128 {
7129 set_syntax_error (_("unknown or missing PSTATE field name"));
7130 goto failure;
7131 }
7132 info->reg.regno = val;
7133 break;
7134
7135 case AARCH64_OPND_SME_PnT_Wm_imm:
7136 /* <Pn>.<T>[<Wm>, #<imm>] */
7137 {
7138 int index_base_reg;
7139 int imm;
7140 val = parse_sme_pred_reg_with_index (&str,
7141 &index_base_reg,
7142 &imm,
7143 &qualifier);
7144 if (val == PARSE_FAIL)
7145 goto failure;
7146
7147 info->za_tile_vector.regno = val;
7148 info->za_tile_vector.index.regno = index_base_reg;
7149 info->za_tile_vector.index.imm = imm;
7150 info->qualifier = qualifier;
7151 break;
7152 }
7153
7154 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7155 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7156 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7157 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7158 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7159 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7160 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7161 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7162 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7163 case AARCH64_OPND_SVE_ADDR_RI_U6:
7164 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7165 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7166 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7167 /* [X<n>{, #imm, MUL VL}]
7168 [X<n>{, #imm}]
7169 but recognizing SVE registers. */
7170 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7171 &offset_qualifier));
7172 if (base_qualifier != AARCH64_OPND_QLF_X)
7173 {
7174 set_syntax_error (_("invalid addressing mode"));
7175 goto failure;
7176 }
7177 sve_regimm:
7178 if (info->addr.pcrel || info->addr.offset.is_reg
7179 || !info->addr.preind || info->addr.writeback)
7180 {
7181 set_syntax_error (_("invalid addressing mode"));
7182 goto failure;
7183 }
7184 if (inst.reloc.type != BFD_RELOC_UNUSED
7185 || inst.reloc.exp.X_op != O_constant)
7186 {
7187 /* Make sure this has priority over
7188 "invalid addressing mode". */
7189 set_fatal_syntax_error (_("constant offset required"));
7190 goto failure;
7191 }
7192 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7193 break;
7194
7195 case AARCH64_OPND_SVE_ADDR_R:
7196 /* [<Xn|SP>{, <R><m>}]
7197 but recognizing SVE registers. */
7198 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7199 &offset_qualifier));
7200 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7201 {
7202 offset_qualifier = AARCH64_OPND_QLF_X;
7203 info->addr.offset.is_reg = 1;
7204 info->addr.offset.regno = 31;
7205 }
7206 else if (base_qualifier != AARCH64_OPND_QLF_X
7207 || offset_qualifier != AARCH64_OPND_QLF_X)
7208 {
7209 set_syntax_error (_("invalid addressing mode"));
7210 goto failure;
7211 }
7212 goto regoff_addr;
7213
7214 case AARCH64_OPND_SVE_ADDR_RR:
7215 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7216 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7217 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7218 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7219 case AARCH64_OPND_SVE_ADDR_RX:
7220 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7221 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7222 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7223 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7224 but recognizing SVE registers. */
7225 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7226 &offset_qualifier));
7227 if (base_qualifier != AARCH64_OPND_QLF_X
7228 || offset_qualifier != AARCH64_OPND_QLF_X)
7229 {
7230 set_syntax_error (_("invalid addressing mode"));
7231 goto failure;
7232 }
7233 goto regoff_addr;
7234
7235 case AARCH64_OPND_SVE_ADDR_RZ:
7236 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7237 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7238 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7239 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7240 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7241 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7242 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7243 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7244 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7245 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7246 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7247 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7248 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7249 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7250 &offset_qualifier));
7251 if (base_qualifier != AARCH64_OPND_QLF_X
7252 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7253 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7254 {
7255 set_syntax_error (_("invalid addressing mode"));
7256 goto failure;
7257 }
7258 info->qualifier = offset_qualifier;
7259 goto regoff_addr;
7260
7261 case AARCH64_OPND_SVE_ADDR_ZX:
7262 /* [Zn.<T>{, <Xm>}]. */
7263 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7264 &offset_qualifier));
7265 /* Things to check:
7266 base_qualifier either S_S or S_D
7267 offset_qualifier must be X
7268 */
7269 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7270 && base_qualifier != AARCH64_OPND_QLF_S_D)
7271 || offset_qualifier != AARCH64_OPND_QLF_X)
7272 {
7273 set_syntax_error (_("invalid addressing mode"));
7274 goto failure;
7275 }
7276 info->qualifier = base_qualifier;
7277 if (!info->addr.offset.is_reg || info->addr.pcrel
7278 || !info->addr.preind || info->addr.writeback
7279 || info->shifter.operator_present != 0)
7280 {
7281 set_syntax_error (_("invalid addressing mode"));
7282 goto failure;
7283 }
7284 info->shifter.kind = AARCH64_MOD_LSL;
7285 break;
7286
7287
7288 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7289 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7290 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7291 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7292 /* [Z<n>.<T>{, #imm}] */
7293 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7294 &offset_qualifier));
7295 if (base_qualifier != AARCH64_OPND_QLF_S_S
7296 && base_qualifier != AARCH64_OPND_QLF_S_D)
7297 {
7298 set_syntax_error (_("invalid addressing mode"));
7299 goto failure;
7300 }
7301 info->qualifier = base_qualifier;
7302 goto sve_regimm;
7303
7304 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7305 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7306 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7307 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7308 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7309
7310 We don't reject:
7311
7312 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7313
7314 here since we get better error messages by leaving it to
7315 the qualifier checking routines. */
7316 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7317 &offset_qualifier));
7318 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7319 && base_qualifier != AARCH64_OPND_QLF_S_D)
7320 || offset_qualifier != base_qualifier)
7321 {
7322 set_syntax_error (_("invalid addressing mode"));
7323 goto failure;
7324 }
7325 info->qualifier = base_qualifier;
7326 goto regoff_addr;
7327
7328 case AARCH64_OPND_SYSREG:
7329 {
7330 uint32_t sysreg_flags;
7331 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7332 &sysreg_flags)) == PARSE_FAIL)
7333 {
7334 set_syntax_error (_("unknown or missing system register name"));
7335 goto failure;
7336 }
7337 inst.base.operands[i].sysreg.value = val;
7338 inst.base.operands[i].sysreg.flags = sysreg_flags;
7339 break;
7340 }
7341
7342 case AARCH64_OPND_PSTATEFIELD:
7343 {
7344 uint32_t sysreg_flags;
7345 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7346 &sysreg_flags)) == PARSE_FAIL)
7347 {
7348 set_syntax_error (_("unknown or missing PSTATE field name"));
7349 goto failure;
7350 }
7351 inst.base.operands[i].pstatefield = val;
7352 inst.base.operands[i].sysreg.flags = sysreg_flags;
7353 break;
7354 }
7355
7356 case AARCH64_OPND_SYSREG_IC:
7357 inst.base.operands[i].sysins_op =
7358 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7359 goto sys_reg_ins;
7360
7361 case AARCH64_OPND_SYSREG_DC:
7362 inst.base.operands[i].sysins_op =
7363 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7364 goto sys_reg_ins;
7365
7366 case AARCH64_OPND_SYSREG_AT:
7367 inst.base.operands[i].sysins_op =
7368 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7369 goto sys_reg_ins;
7370
7371 case AARCH64_OPND_SYSREG_SR:
7372 inst.base.operands[i].sysins_op =
7373 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7374 goto sys_reg_ins;
7375
7376 case AARCH64_OPND_SYSREG_TLBI:
7377 inst.base.operands[i].sysins_op =
7378 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7379 sys_reg_ins:
7380 if (inst.base.operands[i].sysins_op == NULL)
7381 {
7382 set_fatal_syntax_error ( _("unknown or missing operation name"));
7383 goto failure;
7384 }
7385 break;
7386
7387 case AARCH64_OPND_BARRIER:
7388 case AARCH64_OPND_BARRIER_ISB:
7389 val = parse_barrier (&str);
7390 if (val != PARSE_FAIL
7391 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7392 {
7393 /* ISB only accepts options name 'sy'. */
7394 set_syntax_error
7395 (_("the specified option is not accepted in ISB"));
7396 /* Turn off backtrack as this optional operand is present. */
7397 backtrack_pos = 0;
7398 goto failure;
7399 }
7400 if (val != PARSE_FAIL
7401 && operands[i] == AARCH64_OPND_BARRIER)
7402 {
7403 /* Regular barriers accept options CRm (C0-C15).
7404 DSB nXS barrier variant accepts values > 15. */
7405 if (val < 0 || val > 15)
7406 {
7407 set_syntax_error (_("the specified option is not accepted in DSB"));
7408 goto failure;
7409 }
7410 }
7411 /* This is an extension to accept a 0..15 immediate. */
7412 if (val == PARSE_FAIL)
7413 po_imm_or_fail (0, 15);
7414 info->barrier = aarch64_barrier_options + val;
7415 break;
7416
7417 case AARCH64_OPND_BARRIER_DSB_NXS:
7418 val = parse_barrier (&str);
7419 if (val != PARSE_FAIL)
7420 {
7421 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7422 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7423 {
7424 set_syntax_error (_("the specified option is not accepted in DSB"));
7425 /* Turn off backtrack as this optional operand is present. */
7426 backtrack_pos = 0;
7427 goto failure;
7428 }
7429 }
7430 else
7431 {
7432 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7433 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7434 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7435 goto failure;
7436 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7437 {
7438 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7439 goto failure;
7440 }
7441 }
7442 /* Option index is encoded as 2-bit value in val<3:2>. */
7443 val = (val >> 2) - 4;
7444 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7445 break;
7446
7447 case AARCH64_OPND_PRFOP:
7448 val = parse_pldop (&str);
7449 /* This is an extension to accept a 0..31 immediate. */
7450 if (val == PARSE_FAIL)
7451 po_imm_or_fail (0, 31);
7452 inst.base.operands[i].prfop = aarch64_prfops + val;
7453 break;
7454
7455 case AARCH64_OPND_BARRIER_PSB:
7456 val = parse_barrier_psb (&str, &(info->hint_option));
7457 if (val == PARSE_FAIL)
7458 goto failure;
7459 break;
7460
7461 case AARCH64_OPND_BTI_TARGET:
7462 val = parse_bti_operand (&str, &(info->hint_option));
7463 if (val == PARSE_FAIL)
7464 goto failure;
7465 break;
7466
7467 case AARCH64_OPND_SME_ZAda_2b:
7468 case AARCH64_OPND_SME_ZAda_3b:
7469 val = parse_sme_zada_operand (&str, &qualifier);
7470 if (val == PARSE_FAIL)
7471 goto failure;
7472 info->reg.regno = val;
7473 info->qualifier = qualifier;
7474 break;
7475
7476 case AARCH64_OPND_SME_ZA_HV_idx_src:
7477 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7478 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7479 {
7480 enum sme_hv_slice slice_indicator;
7481 int vector_select_register;
7482 int imm;
7483
7484 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr)
7485 val = parse_sme_za_hv_tiles_operand_with_braces (&str,
7486 &slice_indicator,
7487 &vector_select_register,
7488 &imm,
7489 &qualifier);
7490 else
7491 val = parse_sme_za_hv_tiles_operand (&str, &slice_indicator,
7492 &vector_select_register,
7493 &imm,
7494 &qualifier);
7495 if (val == PARSE_FAIL)
7496 goto failure;
7497 info->za_tile_vector.regno = val;
7498 info->za_tile_vector.index.regno = vector_select_register;
7499 info->za_tile_vector.index.imm = imm;
7500 info->za_tile_vector.v = slice_indicator;
7501 info->qualifier = qualifier;
7502 break;
7503 }
7504
7505 case AARCH64_OPND_SME_list_of_64bit_tiles:
7506 val = parse_sme_list_of_64bit_tiles (&str);
7507 if (val == PARSE_FAIL)
7508 goto failure;
7509 info->imm.value = val;
7510 break;
7511
7512 case AARCH64_OPND_SME_ZA_array:
7513 {
7514 int imm;
7515 val = parse_sme_za_array (&str, &imm);
7516 if (val == PARSE_FAIL)
7517 goto failure;
7518 info->za_tile_vector.index.regno = val;
7519 info->za_tile_vector.index.imm = imm;
7520 break;
7521 }
7522
7523 case AARCH64_OPND_MOPS_ADDR_Rd:
7524 case AARCH64_OPND_MOPS_ADDR_Rs:
7525 po_char_or_fail ('[');
7526 if (!parse_x0_to_x30 (&str, info))
7527 goto failure;
7528 po_char_or_fail (']');
7529 po_char_or_fail ('!');
7530 break;
7531
7532 case AARCH64_OPND_MOPS_WB_Rn:
7533 if (!parse_x0_to_x30 (&str, info))
7534 goto failure;
7535 po_char_or_fail ('!');
7536 break;
7537
7538 default:
7539 as_fatal (_("unhandled operand code %d"), operands[i]);
7540 }
7541
7542 /* If we get here, this operand was successfully parsed. */
7543 inst.base.operands[i].present = 1;
7544 continue;
7545
7546 failure:
7547 /* The parse routine should already have set the error, but in case
7548 not, set a default one here. */
7549 if (! error_p ())
7550 set_default_error ();
7551
7552 if (! backtrack_pos)
7553 goto parse_operands_return;
7554
7555 {
7556 /* We reach here because this operand is marked as optional, and
7557 either no operand was supplied or the operand was supplied but it
7558 was syntactically incorrect. In the latter case we report an
7559 error. In the former case we perform a few more checks before
7560 dropping through to the code to insert the default operand. */
7561
7562 char *tmp = backtrack_pos;
7563 char endchar = END_OF_INSN;
7564
7565 if (i != (aarch64_num_of_operands (opcode) - 1))
7566 endchar = ',';
7567 skip_past_char (&tmp, ',');
7568
7569 if (*tmp != endchar)
7570 /* The user has supplied an operand in the wrong format. */
7571 goto parse_operands_return;
7572
7573 /* Make sure there is not a comma before the optional operand.
7574 For example the fifth operand of 'sys' is optional:
7575
7576 sys #0,c0,c0,#0, <--- wrong
7577 sys #0,c0,c0,#0 <--- correct. */
7578 if (comma_skipped_p && i && endchar == END_OF_INSN)
7579 {
7580 set_fatal_syntax_error
7581 (_("unexpected comma before the omitted optional operand"));
7582 goto parse_operands_return;
7583 }
7584 }
7585
7586 /* Reaching here means we are dealing with an optional operand that is
7587 omitted from the assembly line. */
7588 gas_assert (optional_operand_p (opcode, i));
7589 info->present = 0;
7590 process_omitted_operand (operands[i], opcode, i, info);
7591
7592 /* Try again, skipping the optional operand at backtrack_pos. */
7593 str = backtrack_pos;
7594 backtrack_pos = 0;
7595
7596 /* Clear any error record after the omitted optional operand has been
7597 successfully handled. */
7598 clear_error ();
7599 }
7600
7601 /* Check if we have parsed all the operands. */
7602 if (*str != '\0' && ! error_p ())
7603 {
7604 /* Set I to the index of the last present operand; this is
7605 for the purpose of diagnostics. */
7606 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7607 ;
7608 set_fatal_syntax_error
7609 (_("unexpected characters following instruction"));
7610 }
7611
7612 parse_operands_return:
7613
7614 if (error_p ())
7615 {
7616 DEBUG_TRACE ("parsing FAIL: %s - %s",
7617 operand_mismatch_kind_names[get_error_kind ()],
7618 get_error_message ());
7619 /* Record the operand error properly; this is useful when there
7620 are multiple instruction templates for a mnemonic name, so that
7621 later on, we can select the error that most closely describes
7622 the problem. */
7623 record_operand_error (opcode, i, get_error_kind (),
7624 get_error_message ());
7625 return false;
7626 }
7627 else
7628 {
7629 DEBUG_TRACE ("parsing SUCCESS");
7630 return true;
7631 }
7632 }
7633
7634 /* It does some fix-up to provide some programmer friendly feature while
7635 keeping the libopcodes happy, i.e. libopcodes only accepts
7636 the preferred architectural syntax.
7637 Return FALSE if there is any failure; otherwise return TRUE. */
7638
7639 static bool
7640 programmer_friendly_fixup (aarch64_instruction *instr)
7641 {
7642 aarch64_inst *base = &instr->base;
7643 const aarch64_opcode *opcode = base->opcode;
7644 enum aarch64_op op = opcode->op;
7645 aarch64_opnd_info *operands = base->operands;
7646
7647 DEBUG_TRACE ("enter");
7648
7649 switch (opcode->iclass)
7650 {
7651 case testbranch:
7652 /* TBNZ Xn|Wn, #uimm6, label
7653 Test and Branch Not Zero: conditionally jumps to label if bit number
7654 uimm6 in register Xn is not zero. The bit number implies the width of
7655 the register, which may be written and should be disassembled as Wn if
7656 uimm is less than 32. */
7657 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7658 {
7659 if (operands[1].imm.value >= 32)
7660 {
7661 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7662 0, 31);
7663 return false;
7664 }
7665 operands[0].qualifier = AARCH64_OPND_QLF_X;
7666 }
7667 break;
7668 case loadlit:
7669 /* LDR Wt, label | =value
7670 As a convenience assemblers will typically permit the notation
7671 "=value" in conjunction with the pc-relative literal load instructions
7672 to automatically place an immediate value or symbolic address in a
7673 nearby literal pool and generate a hidden label which references it.
7674 ISREG has been set to 0 in the case of =value. */
7675 if (instr->gen_lit_pool
7676 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7677 {
7678 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7679 if (op == OP_LDRSW_LIT)
7680 size = 4;
7681 if (instr->reloc.exp.X_op != O_constant
7682 && instr->reloc.exp.X_op != O_big
7683 && instr->reloc.exp.X_op != O_symbol)
7684 {
7685 record_operand_error (opcode, 1,
7686 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7687 _("constant expression expected"));
7688 return false;
7689 }
7690 if (! add_to_lit_pool (&instr->reloc.exp, size))
7691 {
7692 record_operand_error (opcode, 1,
7693 AARCH64_OPDE_OTHER_ERROR,
7694 _("literal pool insertion failed"));
7695 return false;
7696 }
7697 }
7698 break;
7699 case log_shift:
7700 case bitfield:
7701 /* UXT[BHW] Wd, Wn
7702 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7703 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7704 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7705 A programmer-friendly assembler should accept a destination Xd in
7706 place of Wd, however that is not the preferred form for disassembly.
7707 */
7708 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7709 && operands[1].qualifier == AARCH64_OPND_QLF_W
7710 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7711 operands[0].qualifier = AARCH64_OPND_QLF_W;
7712 break;
7713
7714 case addsub_ext:
7715 {
7716 /* In the 64-bit form, the final register operand is written as Wm
7717 for all but the (possibly omitted) UXTX/LSL and SXTX
7718 operators.
7719 As a programmer-friendly assembler, we accept e.g.
7720 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7721 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7722 int idx = aarch64_operand_index (opcode->operands,
7723 AARCH64_OPND_Rm_EXT);
7724 gas_assert (idx == 1 || idx == 2);
7725 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7726 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7727 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7728 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7729 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7730 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7731 }
7732 break;
7733
7734 default:
7735 break;
7736 }
7737
7738 DEBUG_TRACE ("exit with SUCCESS");
7739 return true;
7740 }
7741
7742 /* Check for loads and stores that will cause unpredictable behavior. */
7743
7744 static void
7745 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7746 {
7747 aarch64_inst *base = &instr->base;
7748 const aarch64_opcode *opcode = base->opcode;
7749 const aarch64_opnd_info *opnds = base->operands;
7750 switch (opcode->iclass)
7751 {
7752 case ldst_pos:
7753 case ldst_imm9:
7754 case ldst_imm10:
7755 case ldst_unscaled:
7756 case ldst_unpriv:
7757 /* Loading/storing the base register is unpredictable if writeback. */
7758 if ((aarch64_get_operand_class (opnds[0].type)
7759 == AARCH64_OPND_CLASS_INT_REG)
7760 && opnds[0].reg.regno == opnds[1].addr.base_regno
7761 && opnds[1].addr.base_regno != REG_SP
7762 /* Exempt STG/STZG/ST2G/STZ2G. */
7763 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7764 && opnds[1].addr.writeback)
7765 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7766 break;
7767
7768 case ldstpair_off:
7769 case ldstnapair_offs:
7770 case ldstpair_indexed:
7771 /* Loading/storing the base register is unpredictable if writeback. */
7772 if ((aarch64_get_operand_class (opnds[0].type)
7773 == AARCH64_OPND_CLASS_INT_REG)
7774 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7775 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7776 && opnds[2].addr.base_regno != REG_SP
7777 /* Exempt STGP. */
7778 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7779 && opnds[2].addr.writeback)
7780 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7781 /* Load operations must load different registers. */
7782 if ((opcode->opcode & (1 << 22))
7783 && opnds[0].reg.regno == opnds[1].reg.regno)
7784 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7785 break;
7786
7787 case ldstexcl:
7788 if ((aarch64_get_operand_class (opnds[0].type)
7789 == AARCH64_OPND_CLASS_INT_REG)
7790 && (aarch64_get_operand_class (opnds[1].type)
7791 == AARCH64_OPND_CLASS_INT_REG))
7792 {
7793 if ((opcode->opcode & (1 << 22)))
7794 {
7795 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7796 if ((opcode->opcode & (1 << 21))
7797 && opnds[0].reg.regno == opnds[1].reg.regno)
7798 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7799 }
7800 else
7801 {
7802 /* Store-Exclusive is unpredictable if Rt == Rs. */
7803 if (opnds[0].reg.regno == opnds[1].reg.regno)
7804 as_warn
7805 (_("unpredictable: identical transfer and status registers"
7806 " --`%s'"),str);
7807
7808 if (opnds[0].reg.regno == opnds[2].reg.regno)
7809 {
7810 if (!(opcode->opcode & (1 << 21)))
7811 /* Store-Exclusive is unpredictable if Rn == Rs. */
7812 as_warn
7813 (_("unpredictable: identical base and status registers"
7814 " --`%s'"),str);
7815 else
7816 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7817 as_warn
7818 (_("unpredictable: "
7819 "identical transfer and status registers"
7820 " --`%s'"),str);
7821 }
7822
7823 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7824 if ((opcode->opcode & (1 << 21))
7825 && opnds[0].reg.regno == opnds[3].reg.regno
7826 && opnds[3].reg.regno != REG_SP)
7827 as_warn (_("unpredictable: identical base and status registers"
7828 " --`%s'"),str);
7829 }
7830 }
7831 break;
7832
7833 default:
7834 break;
7835 }
7836 }
7837
7838 static void
7839 force_automatic_sequence_close (void)
7840 {
7841 struct aarch64_segment_info_type *tc_seg_info;
7842
7843 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7844 if (tc_seg_info->insn_sequence.instr)
7845 {
7846 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7847 _("previous `%s' sequence has not been closed"),
7848 tc_seg_info->insn_sequence.instr->opcode->name);
7849 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7850 }
7851 }
7852
7853 /* A wrapper function to interface with libopcodes on encoding and
7854 record the error message if there is any.
7855
7856 Return TRUE on success; otherwise return FALSE. */
7857
7858 static bool
7859 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7860 aarch64_insn *code)
7861 {
7862 aarch64_operand_error error_info;
7863 memset (&error_info, '\0', sizeof (error_info));
7864 error_info.kind = AARCH64_OPDE_NIL;
7865 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7866 && !error_info.non_fatal)
7867 return true;
7868
7869 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7870 record_operand_error_info (opcode, &error_info);
7871 return error_info.non_fatal;
7872 }
7873
7874 #ifdef DEBUG_AARCH64
7875 static inline void
7876 dump_opcode_operands (const aarch64_opcode *opcode)
7877 {
7878 int i = 0;
7879 while (opcode->operands[i] != AARCH64_OPND_NIL)
7880 {
7881 aarch64_verbose ("\t\t opnd%d: %s", i,
7882 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7883 ? aarch64_get_operand_name (opcode->operands[i])
7884 : aarch64_get_operand_desc (opcode->operands[i]));
7885 ++i;
7886 }
7887 }
7888 #endif /* DEBUG_AARCH64 */
7889
7890 /* This is the guts of the machine-dependent assembler. STR points to a
7891 machine dependent instruction. This function is supposed to emit
7892 the frags/bytes it assembles to. */
7893
7894 void
7895 md_assemble (char *str)
7896 {
7897 templates *template;
7898 const aarch64_opcode *opcode;
7899 struct aarch64_segment_info_type *tc_seg_info;
7900 aarch64_inst *inst_base;
7901 unsigned saved_cond;
7902
7903 /* Align the previous label if needed. */
7904 if (last_label_seen != NULL)
7905 {
7906 symbol_set_frag (last_label_seen, frag_now);
7907 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7908 S_SET_SEGMENT (last_label_seen, now_seg);
7909 }
7910
7911 /* Update the current insn_sequence from the segment. */
7912 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7913 insn_sequence = &tc_seg_info->insn_sequence;
7914 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
7915
7916 inst.reloc.type = BFD_RELOC_UNUSED;
7917
7918 DEBUG_TRACE ("\n\n");
7919 DEBUG_TRACE ("==============================");
7920 DEBUG_TRACE ("Enter md_assemble with %s", str);
7921
7922 /* Scan up to the end of the mnemonic, which must end in whitespace,
7923 '.', or end of string. */
7924 char *p = str;
7925 char *dot = 0;
7926 for (; is_part_of_name (*p); p++)
7927 if (*p == '.' && !dot)
7928 dot = p;
7929
7930 if (p == str)
7931 {
7932 as_bad (_("unknown mnemonic -- `%s'"), str);
7933 return;
7934 }
7935
7936 if (!dot && create_register_alias (str, p))
7937 return;
7938
7939 template = opcode_lookup (str, dot, p);
7940 if (!template)
7941 {
7942 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7943 str);
7944 return;
7945 }
7946
7947 skip_whitespace (p);
7948 if (*p == ',')
7949 {
7950 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7951 get_mnemonic_name (str), str);
7952 return;
7953 }
7954
7955 init_operand_error_report ();
7956
7957 /* Sections are assumed to start aligned. In executable section, there is no
7958 MAP_DATA symbol pending. So we only align the address during
7959 MAP_DATA --> MAP_INSN transition.
7960 For other sections, this is not guaranteed. */
7961 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7962 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7963 frag_align_code (2, 0);
7964
7965 saved_cond = inst.cond;
7966 reset_aarch64_instruction (&inst);
7967 inst.cond = saved_cond;
7968
7969 /* Iterate through all opcode entries with the same mnemonic name. */
7970 do
7971 {
7972 opcode = template->opcode;
7973
7974 DEBUG_TRACE ("opcode %s found", opcode->name);
7975 #ifdef DEBUG_AARCH64
7976 if (debug_dump)
7977 dump_opcode_operands (opcode);
7978 #endif /* DEBUG_AARCH64 */
7979
7980 mapping_state (MAP_INSN);
7981
7982 inst_base = &inst.base;
7983 inst_base->opcode = opcode;
7984
7985 /* Truly conditionally executed instructions, e.g. b.cond. */
7986 if (opcode->flags & F_COND)
7987 {
7988 gas_assert (inst.cond != COND_ALWAYS);
7989 inst_base->cond = get_cond_from_value (inst.cond);
7990 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7991 }
7992 else if (inst.cond != COND_ALWAYS)
7993 {
7994 /* It shouldn't arrive here, where the assembly looks like a
7995 conditional instruction but the found opcode is unconditional. */
7996 gas_assert (0);
7997 continue;
7998 }
7999
8000 if (parse_operands (p, opcode)
8001 && programmer_friendly_fixup (&inst)
8002 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8003 {
8004 /* Check that this instruction is supported for this CPU. */
8005 if (!opcode->avariant
8006 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
8007 {
8008 as_bad (_("selected processor does not support `%s'"), str);
8009 return;
8010 }
8011
8012 warn_unpredictable_ldst (&inst, str);
8013
8014 if (inst.reloc.type == BFD_RELOC_UNUSED
8015 || !inst.reloc.need_libopcodes_p)
8016 output_inst (NULL);
8017 else
8018 {
8019 /* If there is relocation generated for the instruction,
8020 store the instruction information for the future fix-up. */
8021 struct aarch64_inst *copy;
8022 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8023 copy = XNEW (struct aarch64_inst);
8024 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8025 output_inst (copy);
8026 }
8027
8028 /* Issue non-fatal messages if any. */
8029 output_operand_error_report (str, true);
8030 return;
8031 }
8032
8033 template = template->next;
8034 if (template != NULL)
8035 {
8036 reset_aarch64_instruction (&inst);
8037 inst.cond = saved_cond;
8038 }
8039 }
8040 while (template != NULL);
8041
8042 /* Issue the error messages if any. */
8043 output_operand_error_report (str, false);
8044 }
8045
8046 /* Various frobbings of labels and their addresses. */
8047
8048 void
8049 aarch64_start_line_hook (void)
8050 {
8051 last_label_seen = NULL;
8052 }
8053
8054 void
8055 aarch64_frob_label (symbolS * sym)
8056 {
8057 last_label_seen = sym;
8058
8059 dwarf2_emit_label (sym);
8060 }
8061
8062 void
8063 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8064 {
8065 /* Check to see if we have a block to close. */
8066 force_automatic_sequence_close ();
8067 }
8068
8069 int
8070 aarch64_data_in_code (void)
8071 {
8072 if (startswith (input_line_pointer + 1, "data:"))
8073 {
8074 *input_line_pointer = '/';
8075 input_line_pointer += 5;
8076 *input_line_pointer = 0;
8077 return 1;
8078 }
8079
8080 return 0;
8081 }
8082
8083 char *
8084 aarch64_canonicalize_symbol_name (char *name)
8085 {
8086 int len;
8087
8088 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8089 *(name + len - 5) = 0;
8090
8091 return name;
8092 }
8093 \f
8094 /* Table of all register names defined by default. The user can
8095 define additional names with .req. Note that all register names
8096 should appear in both upper and lowercase variants. Some registers
8097 also have mixed-case names. */
8098
8099 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8100 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8101 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8102 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8103 #define REGSET16(p,t) \
8104 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8105 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8106 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8107 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8108 #define REGSET16S(p,s,t) \
8109 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8110 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8111 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8112 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8113 #define REGSET31(p,t) \
8114 REGSET16(p, t), \
8115 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8116 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8117 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8118 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8119 #define REGSET(p,t) \
8120 REGSET31(p,t), REGNUM(p,31,t)
8121
8122 /* These go into aarch64_reg_hsh hash-table. */
8123 static const reg_entry reg_names[] = {
8124 /* Integer registers. */
8125 REGSET31 (x, R_64), REGSET31 (X, R_64),
8126 REGSET31 (w, R_32), REGSET31 (W, R_32),
8127
8128 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8129 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8130 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8131 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8132 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8133 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8134
8135 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8136 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8137
8138 /* Floating-point single precision registers. */
8139 REGSET (s, FP_S), REGSET (S, FP_S),
8140
8141 /* Floating-point double precision registers. */
8142 REGSET (d, FP_D), REGSET (D, FP_D),
8143
8144 /* Floating-point half precision registers. */
8145 REGSET (h, FP_H), REGSET (H, FP_H),
8146
8147 /* Floating-point byte precision registers. */
8148 REGSET (b, FP_B), REGSET (B, FP_B),
8149
8150 /* Floating-point quad precision registers. */
8151 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8152
8153 /* FP/SIMD registers. */
8154 REGSET (v, VN), REGSET (V, VN),
8155
8156 /* SVE vector registers. */
8157 REGSET (z, ZN), REGSET (Z, ZN),
8158
8159 /* SVE predicate registers. */
8160 REGSET16 (p, PN), REGSET16 (P, PN),
8161
8162 /* SME ZA tile registers. */
8163 REGSET16 (za, ZA), REGSET16 (ZA, ZA),
8164
8165 /* SME ZA tile registers (horizontal slice). */
8166 REGSET16S (za, h, ZAH), REGSET16S (ZA, H, ZAH),
8167
8168 /* SME ZA tile registers (vertical slice). */
8169 REGSET16S (za, v, ZAV), REGSET16S (ZA, V, ZAV)
8170 };
8171
8172 #undef REGDEF
8173 #undef REGDEF_ALIAS
8174 #undef REGNUM
8175 #undef REGSET16
8176 #undef REGSET31
8177 #undef REGSET
8178
8179 #define N 1
8180 #define n 0
8181 #define Z 1
8182 #define z 0
8183 #define C 1
8184 #define c 0
8185 #define V 1
8186 #define v 0
8187 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8188 static const asm_nzcv nzcv_names[] = {
8189 {"nzcv", B (n, z, c, v)},
8190 {"nzcV", B (n, z, c, V)},
8191 {"nzCv", B (n, z, C, v)},
8192 {"nzCV", B (n, z, C, V)},
8193 {"nZcv", B (n, Z, c, v)},
8194 {"nZcV", B (n, Z, c, V)},
8195 {"nZCv", B (n, Z, C, v)},
8196 {"nZCV", B (n, Z, C, V)},
8197 {"Nzcv", B (N, z, c, v)},
8198 {"NzcV", B (N, z, c, V)},
8199 {"NzCv", B (N, z, C, v)},
8200 {"NzCV", B (N, z, C, V)},
8201 {"NZcv", B (N, Z, c, v)},
8202 {"NZcV", B (N, Z, c, V)},
8203 {"NZCv", B (N, Z, C, v)},
8204 {"NZCV", B (N, Z, C, V)}
8205 };
8206
8207 #undef N
8208 #undef n
8209 #undef Z
8210 #undef z
8211 #undef C
8212 #undef c
8213 #undef V
8214 #undef v
8215 #undef B
8216 \f
8217 /* MD interface: bits in the object file. */
8218
8219 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8220 for use in the a.out file, and stores them in the array pointed to by buf.
8221 This knows about the endian-ness of the target machine and does
8222 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8223 2 (short) and 4 (long) Floating numbers are put out as a series of
8224 LITTLENUMS (shorts, here at least). */
8225
8226 void
8227 md_number_to_chars (char *buf, valueT val, int n)
8228 {
8229 if (target_big_endian)
8230 number_to_chars_bigendian (buf, val, n);
8231 else
8232 number_to_chars_littleendian (buf, val, n);
8233 }
8234
8235 /* MD interface: Sections. */
8236
8237 /* Estimate the size of a frag before relaxing. Assume everything fits in
8238 4 bytes. */
8239
8240 int
8241 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8242 {
8243 fragp->fr_var = 4;
8244 return 4;
8245 }
8246
8247 /* Round up a section size to the appropriate boundary. */
8248
8249 valueT
8250 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8251 {
8252 return size;
8253 }
8254
8255 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8256 of an rs_align_code fragment.
8257
8258 Here we fill the frag with the appropriate info for padding the
8259 output stream. The resulting frag will consist of a fixed (fr_fix)
8260 and of a repeating (fr_var) part.
8261
8262 The fixed content is always emitted before the repeating content and
8263 these two parts are used as follows in constructing the output:
8264 - the fixed part will be used to align to a valid instruction word
8265 boundary, in case that we start at a misaligned address; as no
8266 executable instruction can live at the misaligned location, we
8267 simply fill with zeros;
8268 - the variable part will be used to cover the remaining padding and
8269 we fill using the AArch64 NOP instruction.
8270
8271 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8272 enough storage space for up to 3 bytes for padding the back to a valid
8273 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8274
8275 void
8276 aarch64_handle_align (fragS * fragP)
8277 {
8278 /* NOP = d503201f */
8279 /* AArch64 instructions are always little-endian. */
8280 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8281
8282 int bytes, fix, noop_size;
8283 char *p;
8284
8285 if (fragP->fr_type != rs_align_code)
8286 return;
8287
8288 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8289 p = fragP->fr_literal + fragP->fr_fix;
8290
8291 #ifdef OBJ_ELF
8292 gas_assert (fragP->tc_frag_data.recorded);
8293 #endif
8294
8295 noop_size = sizeof (aarch64_noop);
8296
8297 fix = bytes & (noop_size - 1);
8298 if (fix)
8299 {
8300 #ifdef OBJ_ELF
8301 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8302 #endif
8303 memset (p, 0, fix);
8304 p += fix;
8305 fragP->fr_fix += fix;
8306 }
8307
8308 if (noop_size)
8309 memcpy (p, aarch64_noop, noop_size);
8310 fragP->fr_var = noop_size;
8311 }
8312
8313 /* Perform target specific initialisation of a frag.
8314 Note - despite the name this initialisation is not done when the frag
8315 is created, but only when its type is assigned. A frag can be created
8316 and used a long time before its type is set, so beware of assuming that
8317 this initialisation is performed first. */
8318
8319 #ifndef OBJ_ELF
8320 void
8321 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8322 int max_chars ATTRIBUTE_UNUSED)
8323 {
8324 }
8325
8326 #else /* OBJ_ELF is defined. */
8327 void
8328 aarch64_init_frag (fragS * fragP, int max_chars)
8329 {
8330 /* Record a mapping symbol for alignment frags. We will delete this
8331 later if the alignment ends up empty. */
8332 if (!fragP->tc_frag_data.recorded)
8333 fragP->tc_frag_data.recorded = 1;
8334
8335 /* PR 21809: Do not set a mapping state for debug sections
8336 - it just confuses other tools. */
8337 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8338 return;
8339
8340 switch (fragP->fr_type)
8341 {
8342 case rs_align_test:
8343 case rs_fill:
8344 mapping_state_2 (MAP_DATA, max_chars);
8345 break;
8346 case rs_align:
8347 /* PR 20364: We can get alignment frags in code sections,
8348 so do not just assume that we should use the MAP_DATA state. */
8349 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8350 break;
8351 case rs_align_code:
8352 mapping_state_2 (MAP_INSN, max_chars);
8353 break;
8354 default:
8355 break;
8356 }
8357 }
8358 \f
8359 /* Initialize the DWARF-2 unwind information for this procedure. */
8360
8361 void
8362 tc_aarch64_frame_initial_instructions (void)
8363 {
8364 cfi_add_CFA_def_cfa (REG_SP, 0);
8365 }
8366 #endif /* OBJ_ELF */
8367
8368 /* Convert REGNAME to a DWARF-2 register number. */
8369
8370 int
8371 tc_aarch64_regname_to_dw2regnum (char *regname)
8372 {
8373 const reg_entry *reg = parse_reg (&regname);
8374 if (reg == NULL)
8375 return -1;
8376
8377 switch (reg->type)
8378 {
8379 case REG_TYPE_SP_32:
8380 case REG_TYPE_SP_64:
8381 case REG_TYPE_R_32:
8382 case REG_TYPE_R_64:
8383 return reg->number;
8384
8385 case REG_TYPE_FP_B:
8386 case REG_TYPE_FP_H:
8387 case REG_TYPE_FP_S:
8388 case REG_TYPE_FP_D:
8389 case REG_TYPE_FP_Q:
8390 return reg->number + 64;
8391
8392 default:
8393 break;
8394 }
8395 return -1;
8396 }
8397
8398 /* Implement DWARF2_ADDR_SIZE. */
8399
8400 int
8401 aarch64_dwarf2_addr_size (void)
8402 {
8403 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8404 if (ilp32_p)
8405 return 4;
8406 #endif
8407 return bfd_arch_bits_per_address (stdoutput) / 8;
8408 }
8409
8410 /* MD interface: Symbol and relocation handling. */
8411
8412 /* Return the address within the segment that a PC-relative fixup is
8413 relative to. For AArch64 PC-relative fixups applied to instructions
8414 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8415
8416 long
8417 md_pcrel_from_section (fixS * fixP, segT seg)
8418 {
8419 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8420
8421 /* If this is pc-relative and we are going to emit a relocation
8422 then we just want to put out any pipeline compensation that the linker
8423 will need. Otherwise we want to use the calculated base. */
8424 if (fixP->fx_pcrel
8425 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8426 || aarch64_force_relocation (fixP)))
8427 base = 0;
8428
8429 /* AArch64 should be consistent for all pc-relative relocations. */
8430 return base + AARCH64_PCREL_OFFSET;
8431 }
8432
8433 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8434 Otherwise we have no need to default values of symbols. */
8435
8436 symbolS *
8437 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8438 {
8439 #ifdef OBJ_ELF
8440 if (name[0] == '_' && name[1] == 'G'
8441 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8442 {
8443 if (!GOT_symbol)
8444 {
8445 if (symbol_find (name))
8446 as_bad (_("GOT already in the symbol table"));
8447
8448 GOT_symbol = symbol_new (name, undefined_section,
8449 &zero_address_frag, 0);
8450 }
8451
8452 return GOT_symbol;
8453 }
8454 #endif
8455
8456 return 0;
8457 }
8458
8459 /* Return non-zero if the indicated VALUE has overflowed the maximum
8460 range expressible by a unsigned number with the indicated number of
8461 BITS. */
8462
8463 static bool
8464 unsigned_overflow (valueT value, unsigned bits)
8465 {
8466 valueT lim;
8467 if (bits >= sizeof (valueT) * 8)
8468 return false;
8469 lim = (valueT) 1 << bits;
8470 return (value >= lim);
8471 }
8472
8473
8474 /* Return non-zero if the indicated VALUE has overflowed the maximum
8475 range expressible by an signed number with the indicated number of
8476 BITS. */
8477
8478 static bool
8479 signed_overflow (offsetT value, unsigned bits)
8480 {
8481 offsetT lim;
8482 if (bits >= sizeof (offsetT) * 8)
8483 return false;
8484 lim = (offsetT) 1 << (bits - 1);
8485 return (value < -lim || value >= lim);
8486 }
8487
8488 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8489 unsigned immediate offset load/store instruction, try to encode it as
8490 an unscaled, 9-bit, signed immediate offset load/store instruction.
8491 Return TRUE if it is successful; otherwise return FALSE.
8492
8493 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8494 in response to the standard LDR/STR mnemonics when the immediate offset is
8495 unambiguous, i.e. when it is negative or unaligned. */
8496
8497 static bool
8498 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8499 {
8500 int idx;
8501 enum aarch64_op new_op;
8502 const aarch64_opcode *new_opcode;
8503
8504 gas_assert (instr->opcode->iclass == ldst_pos);
8505
8506 switch (instr->opcode->op)
8507 {
8508 case OP_LDRB_POS:new_op = OP_LDURB; break;
8509 case OP_STRB_POS: new_op = OP_STURB; break;
8510 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8511 case OP_LDRH_POS: new_op = OP_LDURH; break;
8512 case OP_STRH_POS: new_op = OP_STURH; break;
8513 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8514 case OP_LDR_POS: new_op = OP_LDUR; break;
8515 case OP_STR_POS: new_op = OP_STUR; break;
8516 case OP_LDRF_POS: new_op = OP_LDURV; break;
8517 case OP_STRF_POS: new_op = OP_STURV; break;
8518 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8519 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8520 default: new_op = OP_NIL; break;
8521 }
8522
8523 if (new_op == OP_NIL)
8524 return false;
8525
8526 new_opcode = aarch64_get_opcode (new_op);
8527 gas_assert (new_opcode != NULL);
8528
8529 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8530 instr->opcode->op, new_opcode->op);
8531
8532 aarch64_replace_opcode (instr, new_opcode);
8533
8534 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8535 qualifier matching may fail because the out-of-date qualifier will
8536 prevent the operand being updated with a new and correct qualifier. */
8537 idx = aarch64_operand_index (instr->opcode->operands,
8538 AARCH64_OPND_ADDR_SIMM9);
8539 gas_assert (idx == 1);
8540 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8541
8542 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8543
8544 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8545 insn_sequence))
8546 return false;
8547
8548 return true;
8549 }
8550
8551 /* Called by fix_insn to fix a MOV immediate alias instruction.
8552
8553 Operand for a generic move immediate instruction, which is an alias
8554 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8555 a 32-bit/64-bit immediate value into general register. An assembler error
8556 shall result if the immediate cannot be created by a single one of these
8557 instructions. If there is a choice, then to ensure reversability an
8558 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8559
8560 static void
8561 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8562 {
8563 const aarch64_opcode *opcode;
8564
8565 /* Need to check if the destination is SP/ZR. The check has to be done
8566 before any aarch64_replace_opcode. */
8567 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8568 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8569
8570 instr->operands[1].imm.value = value;
8571 instr->operands[1].skip = 0;
8572
8573 if (try_mov_wide_p)
8574 {
8575 /* Try the MOVZ alias. */
8576 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8577 aarch64_replace_opcode (instr, opcode);
8578 if (aarch64_opcode_encode (instr->opcode, instr,
8579 &instr->value, NULL, NULL, insn_sequence))
8580 {
8581 put_aarch64_insn (buf, instr->value);
8582 return;
8583 }
8584 /* Try the MOVK alias. */
8585 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8586 aarch64_replace_opcode (instr, opcode);
8587 if (aarch64_opcode_encode (instr->opcode, instr,
8588 &instr->value, NULL, NULL, insn_sequence))
8589 {
8590 put_aarch64_insn (buf, instr->value);
8591 return;
8592 }
8593 }
8594
8595 if (try_mov_bitmask_p)
8596 {
8597 /* Try the ORR alias. */
8598 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8599 aarch64_replace_opcode (instr, opcode);
8600 if (aarch64_opcode_encode (instr->opcode, instr,
8601 &instr->value, NULL, NULL, insn_sequence))
8602 {
8603 put_aarch64_insn (buf, instr->value);
8604 return;
8605 }
8606 }
8607
8608 as_bad_where (fixP->fx_file, fixP->fx_line,
8609 _("immediate cannot be moved by a single instruction"));
8610 }
8611
8612 /* An instruction operand which is immediate related may have symbol used
8613 in the assembly, e.g.
8614
8615 mov w0, u32
8616 .set u32, 0x00ffff00
8617
8618 At the time when the assembly instruction is parsed, a referenced symbol,
8619 like 'u32' in the above example may not have been seen; a fixS is created
8620 in such a case and is handled here after symbols have been resolved.
8621 Instruction is fixed up with VALUE using the information in *FIXP plus
8622 extra information in FLAGS.
8623
8624 This function is called by md_apply_fix to fix up instructions that need
8625 a fix-up described above but does not involve any linker-time relocation. */
8626
8627 static void
8628 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8629 {
8630 int idx;
8631 uint32_t insn;
8632 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8633 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8634 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8635
8636 if (new_inst)
8637 {
8638 /* Now the instruction is about to be fixed-up, so the operand that
8639 was previously marked as 'ignored' needs to be unmarked in order
8640 to get the encoding done properly. */
8641 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8642 new_inst->operands[idx].skip = 0;
8643 }
8644
8645 gas_assert (opnd != AARCH64_OPND_NIL);
8646
8647 switch (opnd)
8648 {
8649 case AARCH64_OPND_EXCEPTION:
8650 case AARCH64_OPND_UNDEFINED:
8651 if (unsigned_overflow (value, 16))
8652 as_bad_where (fixP->fx_file, fixP->fx_line,
8653 _("immediate out of range"));
8654 insn = get_aarch64_insn (buf);
8655 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8656 put_aarch64_insn (buf, insn);
8657 break;
8658
8659 case AARCH64_OPND_AIMM:
8660 /* ADD or SUB with immediate.
8661 NOTE this assumes we come here with a add/sub shifted reg encoding
8662 3 322|2222|2 2 2 21111 111111
8663 1 098|7654|3 2 1 09876 543210 98765 43210
8664 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8665 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8666 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8667 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8668 ->
8669 3 322|2222|2 2 221111111111
8670 1 098|7654|3 2 109876543210 98765 43210
8671 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8672 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8673 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8674 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8675 Fields sf Rn Rd are already set. */
8676 insn = get_aarch64_insn (buf);
8677 if (value < 0)
8678 {
8679 /* Add <-> sub. */
8680 insn = reencode_addsub_switch_add_sub (insn);
8681 value = -value;
8682 }
8683
8684 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8685 && unsigned_overflow (value, 12))
8686 {
8687 /* Try to shift the value by 12 to make it fit. */
8688 if (((value >> 12) << 12) == value
8689 && ! unsigned_overflow (value, 12 + 12))
8690 {
8691 value >>= 12;
8692 insn |= encode_addsub_imm_shift_amount (1);
8693 }
8694 }
8695
8696 if (unsigned_overflow (value, 12))
8697 as_bad_where (fixP->fx_file, fixP->fx_line,
8698 _("immediate out of range"));
8699
8700 insn |= encode_addsub_imm (value);
8701
8702 put_aarch64_insn (buf, insn);
8703 break;
8704
8705 case AARCH64_OPND_SIMD_IMM:
8706 case AARCH64_OPND_SIMD_IMM_SFT:
8707 case AARCH64_OPND_LIMM:
8708 /* Bit mask immediate. */
8709 gas_assert (new_inst != NULL);
8710 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8711 new_inst->operands[idx].imm.value = value;
8712 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8713 &new_inst->value, NULL, NULL, insn_sequence))
8714 put_aarch64_insn (buf, new_inst->value);
8715 else
8716 as_bad_where (fixP->fx_file, fixP->fx_line,
8717 _("invalid immediate"));
8718 break;
8719
8720 case AARCH64_OPND_HALF:
8721 /* 16-bit unsigned immediate. */
8722 if (unsigned_overflow (value, 16))
8723 as_bad_where (fixP->fx_file, fixP->fx_line,
8724 _("immediate out of range"));
8725 insn = get_aarch64_insn (buf);
8726 insn |= encode_movw_imm (value & 0xffff);
8727 put_aarch64_insn (buf, insn);
8728 break;
8729
8730 case AARCH64_OPND_IMM_MOV:
8731 /* Operand for a generic move immediate instruction, which is
8732 an alias instruction that generates a single MOVZ, MOVN or ORR
8733 instruction to loads a 32-bit/64-bit immediate value into general
8734 register. An assembler error shall result if the immediate cannot be
8735 created by a single one of these instructions. If there is a choice,
8736 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8737 and MOVZ or MOVN to ORR. */
8738 gas_assert (new_inst != NULL);
8739 fix_mov_imm_insn (fixP, buf, new_inst, value);
8740 break;
8741
8742 case AARCH64_OPND_ADDR_SIMM7:
8743 case AARCH64_OPND_ADDR_SIMM9:
8744 case AARCH64_OPND_ADDR_SIMM9_2:
8745 case AARCH64_OPND_ADDR_SIMM10:
8746 case AARCH64_OPND_ADDR_UIMM12:
8747 case AARCH64_OPND_ADDR_SIMM11:
8748 case AARCH64_OPND_ADDR_SIMM13:
8749 /* Immediate offset in an address. */
8750 insn = get_aarch64_insn (buf);
8751
8752 gas_assert (new_inst != NULL && new_inst->value == insn);
8753 gas_assert (new_inst->opcode->operands[1] == opnd
8754 || new_inst->opcode->operands[2] == opnd);
8755
8756 /* Get the index of the address operand. */
8757 if (new_inst->opcode->operands[1] == opnd)
8758 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8759 idx = 1;
8760 else
8761 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8762 idx = 2;
8763
8764 /* Update the resolved offset value. */
8765 new_inst->operands[idx].addr.offset.imm = value;
8766
8767 /* Encode/fix-up. */
8768 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8769 &new_inst->value, NULL, NULL, insn_sequence))
8770 {
8771 put_aarch64_insn (buf, new_inst->value);
8772 break;
8773 }
8774 else if (new_inst->opcode->iclass == ldst_pos
8775 && try_to_encode_as_unscaled_ldst (new_inst))
8776 {
8777 put_aarch64_insn (buf, new_inst->value);
8778 break;
8779 }
8780
8781 as_bad_where (fixP->fx_file, fixP->fx_line,
8782 _("immediate offset out of range"));
8783 break;
8784
8785 default:
8786 gas_assert (0);
8787 as_fatal (_("unhandled operand code %d"), opnd);
8788 }
8789 }
8790
8791 /* Apply a fixup (fixP) to segment data, once it has been determined
8792 by our caller that we have all the info we need to fix it up.
8793
8794 Parameter valP is the pointer to the value of the bits. */
8795
8796 void
8797 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8798 {
8799 offsetT value = *valP;
8800 uint32_t insn;
8801 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8802 int scale;
8803 unsigned flags = fixP->fx_addnumber;
8804
8805 DEBUG_TRACE ("\n\n");
8806 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8807 DEBUG_TRACE ("Enter md_apply_fix");
8808
8809 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8810
8811 /* Note whether this will delete the relocation. */
8812
8813 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
8814 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
8815 fixP->fx_done = 1;
8816
8817 /* Process the relocations. */
8818 switch (fixP->fx_r_type)
8819 {
8820 case BFD_RELOC_NONE:
8821 /* This will need to go in the object file. */
8822 fixP->fx_done = 0;
8823 break;
8824
8825 case BFD_RELOC_8:
8826 case BFD_RELOC_8_PCREL:
8827 if (fixP->fx_done || !seg->use_rela_p)
8828 md_number_to_chars (buf, value, 1);
8829 break;
8830
8831 case BFD_RELOC_16:
8832 case BFD_RELOC_16_PCREL:
8833 if (fixP->fx_done || !seg->use_rela_p)
8834 md_number_to_chars (buf, value, 2);
8835 break;
8836
8837 case BFD_RELOC_32:
8838 case BFD_RELOC_32_PCREL:
8839 if (fixP->fx_done || !seg->use_rela_p)
8840 md_number_to_chars (buf, value, 4);
8841 break;
8842
8843 case BFD_RELOC_64:
8844 case BFD_RELOC_64_PCREL:
8845 if (fixP->fx_done || !seg->use_rela_p)
8846 md_number_to_chars (buf, value, 8);
8847 break;
8848
8849 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8850 /* We claim that these fixups have been processed here, even if
8851 in fact we generate an error because we do not have a reloc
8852 for them, so tc_gen_reloc() will reject them. */
8853 fixP->fx_done = 1;
8854 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8855 {
8856 as_bad_where (fixP->fx_file, fixP->fx_line,
8857 _("undefined symbol %s used as an immediate value"),
8858 S_GET_NAME (fixP->fx_addsy));
8859 goto apply_fix_return;
8860 }
8861 fix_insn (fixP, flags, value);
8862 break;
8863
8864 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8865 if (fixP->fx_done || !seg->use_rela_p)
8866 {
8867 if (value & 3)
8868 as_bad_where (fixP->fx_file, fixP->fx_line,
8869 _("pc-relative load offset not word aligned"));
8870 if (signed_overflow (value, 21))
8871 as_bad_where (fixP->fx_file, fixP->fx_line,
8872 _("pc-relative load offset out of range"));
8873 insn = get_aarch64_insn (buf);
8874 insn |= encode_ld_lit_ofs_19 (value >> 2);
8875 put_aarch64_insn (buf, insn);
8876 }
8877 break;
8878
8879 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8880 if (fixP->fx_done || !seg->use_rela_p)
8881 {
8882 if (signed_overflow (value, 21))
8883 as_bad_where (fixP->fx_file, fixP->fx_line,
8884 _("pc-relative address offset out of range"));
8885 insn = get_aarch64_insn (buf);
8886 insn |= encode_adr_imm (value);
8887 put_aarch64_insn (buf, insn);
8888 }
8889 break;
8890
8891 case BFD_RELOC_AARCH64_BRANCH19:
8892 if (fixP->fx_done || !seg->use_rela_p)
8893 {
8894 if (value & 3)
8895 as_bad_where (fixP->fx_file, fixP->fx_line,
8896 _("conditional branch target not word aligned"));
8897 if (signed_overflow (value, 21))
8898 as_bad_where (fixP->fx_file, fixP->fx_line,
8899 _("conditional branch out of range"));
8900 insn = get_aarch64_insn (buf);
8901 insn |= encode_cond_branch_ofs_19 (value >> 2);
8902 put_aarch64_insn (buf, insn);
8903 }
8904 break;
8905
8906 case BFD_RELOC_AARCH64_TSTBR14:
8907 if (fixP->fx_done || !seg->use_rela_p)
8908 {
8909 if (value & 3)
8910 as_bad_where (fixP->fx_file, fixP->fx_line,
8911 _("conditional branch target not word aligned"));
8912 if (signed_overflow (value, 16))
8913 as_bad_where (fixP->fx_file, fixP->fx_line,
8914 _("conditional branch out of range"));
8915 insn = get_aarch64_insn (buf);
8916 insn |= encode_tst_branch_ofs_14 (value >> 2);
8917 put_aarch64_insn (buf, insn);
8918 }
8919 break;
8920
8921 case BFD_RELOC_AARCH64_CALL26:
8922 case BFD_RELOC_AARCH64_JUMP26:
8923 if (fixP->fx_done || !seg->use_rela_p)
8924 {
8925 if (value & 3)
8926 as_bad_where (fixP->fx_file, fixP->fx_line,
8927 _("branch target not word aligned"));
8928 if (signed_overflow (value, 28))
8929 as_bad_where (fixP->fx_file, fixP->fx_line,
8930 _("branch out of range"));
8931 insn = get_aarch64_insn (buf);
8932 insn |= encode_branch_ofs_26 (value >> 2);
8933 put_aarch64_insn (buf, insn);
8934 }
8935 break;
8936
8937 case BFD_RELOC_AARCH64_MOVW_G0:
8938 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8939 case BFD_RELOC_AARCH64_MOVW_G0_S:
8940 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8941 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8942 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8943 scale = 0;
8944 goto movw_common;
8945 case BFD_RELOC_AARCH64_MOVW_G1:
8946 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8947 case BFD_RELOC_AARCH64_MOVW_G1_S:
8948 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8949 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8950 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8951 scale = 16;
8952 goto movw_common;
8953 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8954 scale = 0;
8955 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8956 /* Should always be exported to object file, see
8957 aarch64_force_relocation(). */
8958 gas_assert (!fixP->fx_done);
8959 gas_assert (seg->use_rela_p);
8960 goto movw_common;
8961 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8962 scale = 16;
8963 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8964 /* Should always be exported to object file, see
8965 aarch64_force_relocation(). */
8966 gas_assert (!fixP->fx_done);
8967 gas_assert (seg->use_rela_p);
8968 goto movw_common;
8969 case BFD_RELOC_AARCH64_MOVW_G2:
8970 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8971 case BFD_RELOC_AARCH64_MOVW_G2_S:
8972 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8973 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8974 scale = 32;
8975 goto movw_common;
8976 case BFD_RELOC_AARCH64_MOVW_G3:
8977 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8978 scale = 48;
8979 movw_common:
8980 if (fixP->fx_done || !seg->use_rela_p)
8981 {
8982 insn = get_aarch64_insn (buf);
8983
8984 if (!fixP->fx_done)
8985 {
8986 /* REL signed addend must fit in 16 bits */
8987 if (signed_overflow (value, 16))
8988 as_bad_where (fixP->fx_file, fixP->fx_line,
8989 _("offset out of range"));
8990 }
8991 else
8992 {
8993 /* Check for overflow and scale. */
8994 switch (fixP->fx_r_type)
8995 {
8996 case BFD_RELOC_AARCH64_MOVW_G0:
8997 case BFD_RELOC_AARCH64_MOVW_G1:
8998 case BFD_RELOC_AARCH64_MOVW_G2:
8999 case BFD_RELOC_AARCH64_MOVW_G3:
9000 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9001 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9002 if (unsigned_overflow (value, scale + 16))
9003 as_bad_where (fixP->fx_file, fixP->fx_line,
9004 _("unsigned value out of range"));
9005 break;
9006 case BFD_RELOC_AARCH64_MOVW_G0_S:
9007 case BFD_RELOC_AARCH64_MOVW_G1_S:
9008 case BFD_RELOC_AARCH64_MOVW_G2_S:
9009 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9010 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9011 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9012 /* NOTE: We can only come here with movz or movn. */
9013 if (signed_overflow (value, scale + 16))
9014 as_bad_where (fixP->fx_file, fixP->fx_line,
9015 _("signed value out of range"));
9016 if (value < 0)
9017 {
9018 /* Force use of MOVN. */
9019 value = ~value;
9020 insn = reencode_movzn_to_movn (insn);
9021 }
9022 else
9023 {
9024 /* Force use of MOVZ. */
9025 insn = reencode_movzn_to_movz (insn);
9026 }
9027 break;
9028 default:
9029 /* Unchecked relocations. */
9030 break;
9031 }
9032 value >>= scale;
9033 }
9034
9035 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9036 insn |= encode_movw_imm (value & 0xffff);
9037
9038 put_aarch64_insn (buf, insn);
9039 }
9040 break;
9041
9042 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9043 fixP->fx_r_type = (ilp32_p
9044 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9045 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9046 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9047 /* Should always be exported to object file, see
9048 aarch64_force_relocation(). */
9049 gas_assert (!fixP->fx_done);
9050 gas_assert (seg->use_rela_p);
9051 break;
9052
9053 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9054 fixP->fx_r_type = (ilp32_p
9055 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9056 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9057 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9058 /* Should always be exported to object file, see
9059 aarch64_force_relocation(). */
9060 gas_assert (!fixP->fx_done);
9061 gas_assert (seg->use_rela_p);
9062 break;
9063
9064 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9065 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9066 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9067 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9068 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9069 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9070 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9071 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9072 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9073 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9074 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9075 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9076 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9077 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9078 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9079 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9080 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9081 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9082 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9083 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9084 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9085 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9086 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9087 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9088 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9089 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9090 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9091 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9092 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9093 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9094 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9095 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9096 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9097 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9098 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9099 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9100 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9101 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9102 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9103 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9104 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9105 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9106 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9107 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9108 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9109 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9110 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9111 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9112 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9113 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9114 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9115 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9116 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9117 /* Should always be exported to object file, see
9118 aarch64_force_relocation(). */
9119 gas_assert (!fixP->fx_done);
9120 gas_assert (seg->use_rela_p);
9121 break;
9122
9123 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9124 /* Should always be exported to object file, see
9125 aarch64_force_relocation(). */
9126 fixP->fx_r_type = (ilp32_p
9127 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9128 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9129 gas_assert (!fixP->fx_done);
9130 gas_assert (seg->use_rela_p);
9131 break;
9132
9133 case BFD_RELOC_AARCH64_ADD_LO12:
9134 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9135 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9136 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9137 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9138 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9139 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9140 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9141 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9142 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9143 case BFD_RELOC_AARCH64_LDST128_LO12:
9144 case BFD_RELOC_AARCH64_LDST16_LO12:
9145 case BFD_RELOC_AARCH64_LDST32_LO12:
9146 case BFD_RELOC_AARCH64_LDST64_LO12:
9147 case BFD_RELOC_AARCH64_LDST8_LO12:
9148 /* Should always be exported to object file, see
9149 aarch64_force_relocation(). */
9150 gas_assert (!fixP->fx_done);
9151 gas_assert (seg->use_rela_p);
9152 break;
9153
9154 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9155 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9156 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9157 break;
9158
9159 case BFD_RELOC_UNUSED:
9160 /* An error will already have been reported. */
9161 break;
9162
9163 default:
9164 as_bad_where (fixP->fx_file, fixP->fx_line,
9165 _("unexpected %s fixup"),
9166 bfd_get_reloc_code_name (fixP->fx_r_type));
9167 break;
9168 }
9169
9170 apply_fix_return:
9171 /* Free the allocated the struct aarch64_inst.
9172 N.B. currently there are very limited number of fix-up types actually use
9173 this field, so the impact on the performance should be minimal . */
9174 free (fixP->tc_fix_data.inst);
9175
9176 return;
9177 }
9178
9179 /* Translate internal representation of relocation info to BFD target
9180 format. */
9181
9182 arelent *
9183 tc_gen_reloc (asection * section, fixS * fixp)
9184 {
9185 arelent *reloc;
9186 bfd_reloc_code_real_type code;
9187
9188 reloc = XNEW (arelent);
9189
9190 reloc->sym_ptr_ptr = XNEW (asymbol *);
9191 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9192 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9193
9194 if (fixp->fx_pcrel)
9195 {
9196 if (section->use_rela_p)
9197 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9198 else
9199 fixp->fx_offset = reloc->address;
9200 }
9201 reloc->addend = fixp->fx_offset;
9202
9203 code = fixp->fx_r_type;
9204 switch (code)
9205 {
9206 case BFD_RELOC_16:
9207 if (fixp->fx_pcrel)
9208 code = BFD_RELOC_16_PCREL;
9209 break;
9210
9211 case BFD_RELOC_32:
9212 if (fixp->fx_pcrel)
9213 code = BFD_RELOC_32_PCREL;
9214 break;
9215
9216 case BFD_RELOC_64:
9217 if (fixp->fx_pcrel)
9218 code = BFD_RELOC_64_PCREL;
9219 break;
9220
9221 default:
9222 break;
9223 }
9224
9225 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9226 if (reloc->howto == NULL)
9227 {
9228 as_bad_where (fixp->fx_file, fixp->fx_line,
9229 _
9230 ("cannot represent %s relocation in this object file format"),
9231 bfd_get_reloc_code_name (code));
9232 return NULL;
9233 }
9234
9235 return reloc;
9236 }
9237
9238 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9239
9240 void
9241 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9242 {
9243 bfd_reloc_code_real_type type;
9244 int pcrel = 0;
9245
9246 /* Pick a reloc.
9247 FIXME: @@ Should look at CPU word size. */
9248 switch (size)
9249 {
9250 case 1:
9251 type = BFD_RELOC_8;
9252 break;
9253 case 2:
9254 type = BFD_RELOC_16;
9255 break;
9256 case 4:
9257 type = BFD_RELOC_32;
9258 break;
9259 case 8:
9260 type = BFD_RELOC_64;
9261 break;
9262 default:
9263 as_bad (_("cannot do %u-byte relocation"), size);
9264 type = BFD_RELOC_UNUSED;
9265 break;
9266 }
9267
9268 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9269 }
9270
9271 #ifdef OBJ_ELF
9272
9273 /* Implement md_after_parse_args. This is the earliest time we need to decide
9274 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9275
9276 void
9277 aarch64_after_parse_args (void)
9278 {
9279 if (aarch64_abi != AARCH64_ABI_NONE)
9280 return;
9281
9282 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9283 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9284 aarch64_abi = AARCH64_ABI_ILP32;
9285 else
9286 aarch64_abi = AARCH64_ABI_LP64;
9287 }
9288
9289 const char *
9290 elf64_aarch64_target_format (void)
9291 {
9292 #ifdef TE_CLOUDABI
9293 /* FIXME: What to do for ilp32_p ? */
9294 if (target_big_endian)
9295 return "elf64-bigaarch64-cloudabi";
9296 else
9297 return "elf64-littleaarch64-cloudabi";
9298 #else
9299 if (target_big_endian)
9300 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9301 else
9302 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9303 #endif
9304 }
9305
9306 void
9307 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9308 {
9309 elf_frob_symbol (symp, puntp);
9310 }
9311 #endif
9312
9313 /* MD interface: Finalization. */
9314
9315 /* A good place to do this, although this was probably not intended
9316 for this kind of use. We need to dump the literal pool before
9317 references are made to a null symbol pointer. */
9318
9319 void
9320 aarch64_cleanup (void)
9321 {
9322 literal_pool *pool;
9323
9324 for (pool = list_of_pools; pool; pool = pool->next)
9325 {
9326 /* Put it at the end of the relevant section. */
9327 subseg_set (pool->section, pool->sub_section);
9328 s_ltorg (0);
9329 }
9330 }
9331
9332 #ifdef OBJ_ELF
9333 /* Remove any excess mapping symbols generated for alignment frags in
9334 SEC. We may have created a mapping symbol before a zero byte
9335 alignment; remove it if there's a mapping symbol after the
9336 alignment. */
9337 static void
9338 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9339 void *dummy ATTRIBUTE_UNUSED)
9340 {
9341 segment_info_type *seginfo = seg_info (sec);
9342 fragS *fragp;
9343
9344 if (seginfo == NULL || seginfo->frchainP == NULL)
9345 return;
9346
9347 for (fragp = seginfo->frchainP->frch_root;
9348 fragp != NULL; fragp = fragp->fr_next)
9349 {
9350 symbolS *sym = fragp->tc_frag_data.last_map;
9351 fragS *next = fragp->fr_next;
9352
9353 /* Variable-sized frags have been converted to fixed size by
9354 this point. But if this was variable-sized to start with,
9355 there will be a fixed-size frag after it. So don't handle
9356 next == NULL. */
9357 if (sym == NULL || next == NULL)
9358 continue;
9359
9360 if (S_GET_VALUE (sym) < next->fr_address)
9361 /* Not at the end of this frag. */
9362 continue;
9363 know (S_GET_VALUE (sym) == next->fr_address);
9364
9365 do
9366 {
9367 if (next->tc_frag_data.first_map != NULL)
9368 {
9369 /* Next frag starts with a mapping symbol. Discard this
9370 one. */
9371 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9372 break;
9373 }
9374
9375 if (next->fr_next == NULL)
9376 {
9377 /* This mapping symbol is at the end of the section. Discard
9378 it. */
9379 know (next->fr_fix == 0 && next->fr_var == 0);
9380 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9381 break;
9382 }
9383
9384 /* As long as we have empty frags without any mapping symbols,
9385 keep looking. */
9386 /* If the next frag is non-empty and does not start with a
9387 mapping symbol, then this mapping symbol is required. */
9388 if (next->fr_address != next->fr_next->fr_address)
9389 break;
9390
9391 next = next->fr_next;
9392 }
9393 while (next != NULL);
9394 }
9395 }
9396 #endif
9397
9398 /* Adjust the symbol table. */
9399
9400 void
9401 aarch64_adjust_symtab (void)
9402 {
9403 #ifdef OBJ_ELF
9404 /* Remove any overlapping mapping symbols generated by alignment frags. */
9405 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9406 /* Now do generic ELF adjustments. */
9407 elf_adjust_symtab ();
9408 #endif
9409 }
9410
9411 static void
9412 checked_hash_insert (htab_t table, const char *key, void *value)
9413 {
9414 str_hash_insert (table, key, value, 0);
9415 }
9416
9417 static void
9418 sysreg_hash_insert (htab_t table, const char *key, void *value)
9419 {
9420 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9421 checked_hash_insert (table, key, value);
9422 }
9423
9424 static void
9425 fill_instruction_hash_table (void)
9426 {
9427 const aarch64_opcode *opcode = aarch64_opcode_table;
9428
9429 while (opcode->name != NULL)
9430 {
9431 templates *templ, *new_templ;
9432 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9433
9434 new_templ = XNEW (templates);
9435 new_templ->opcode = opcode;
9436 new_templ->next = NULL;
9437
9438 if (!templ)
9439 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9440 else
9441 {
9442 new_templ->next = templ->next;
9443 templ->next = new_templ;
9444 }
9445 ++opcode;
9446 }
9447 }
9448
9449 static inline void
9450 convert_to_upper (char *dst, const char *src, size_t num)
9451 {
9452 unsigned int i;
9453 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9454 *dst = TOUPPER (*src);
9455 *dst = '\0';
9456 }
9457
9458 /* Assume STR point to a lower-case string, allocate, convert and return
9459 the corresponding upper-case string. */
9460 static inline const char*
9461 get_upper_str (const char *str)
9462 {
9463 char *ret;
9464 size_t len = strlen (str);
9465 ret = XNEWVEC (char, len + 1);
9466 convert_to_upper (ret, str, len);
9467 return ret;
9468 }
9469
9470 /* MD interface: Initialization. */
9471
9472 void
9473 md_begin (void)
9474 {
9475 unsigned mach;
9476 unsigned int i;
9477
9478 aarch64_ops_hsh = str_htab_create ();
9479 aarch64_cond_hsh = str_htab_create ();
9480 aarch64_shift_hsh = str_htab_create ();
9481 aarch64_sys_regs_hsh = str_htab_create ();
9482 aarch64_pstatefield_hsh = str_htab_create ();
9483 aarch64_sys_regs_ic_hsh = str_htab_create ();
9484 aarch64_sys_regs_dc_hsh = str_htab_create ();
9485 aarch64_sys_regs_at_hsh = str_htab_create ();
9486 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9487 aarch64_sys_regs_sr_hsh = str_htab_create ();
9488 aarch64_reg_hsh = str_htab_create ();
9489 aarch64_barrier_opt_hsh = str_htab_create ();
9490 aarch64_nzcv_hsh = str_htab_create ();
9491 aarch64_pldop_hsh = str_htab_create ();
9492 aarch64_hint_opt_hsh = str_htab_create ();
9493
9494 fill_instruction_hash_table ();
9495
9496 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9497 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9498 (void *) (aarch64_sys_regs + i));
9499
9500 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9501 sysreg_hash_insert (aarch64_pstatefield_hsh,
9502 aarch64_pstatefields[i].name,
9503 (void *) (aarch64_pstatefields + i));
9504
9505 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9506 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9507 aarch64_sys_regs_ic[i].name,
9508 (void *) (aarch64_sys_regs_ic + i));
9509
9510 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9511 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9512 aarch64_sys_regs_dc[i].name,
9513 (void *) (aarch64_sys_regs_dc + i));
9514
9515 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9516 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9517 aarch64_sys_regs_at[i].name,
9518 (void *) (aarch64_sys_regs_at + i));
9519
9520 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9521 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9522 aarch64_sys_regs_tlbi[i].name,
9523 (void *) (aarch64_sys_regs_tlbi + i));
9524
9525 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9526 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9527 aarch64_sys_regs_sr[i].name,
9528 (void *) (aarch64_sys_regs_sr + i));
9529
9530 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9531 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9532 (void *) (reg_names + i));
9533
9534 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9535 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9536 (void *) (nzcv_names + i));
9537
9538 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9539 {
9540 const char *name = aarch64_operand_modifiers[i].name;
9541 checked_hash_insert (aarch64_shift_hsh, name,
9542 (void *) (aarch64_operand_modifiers + i));
9543 /* Also hash the name in the upper case. */
9544 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9545 (void *) (aarch64_operand_modifiers + i));
9546 }
9547
9548 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9549 {
9550 unsigned int j;
9551 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9552 the same condition code. */
9553 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9554 {
9555 const char *name = aarch64_conds[i].names[j];
9556 if (name == NULL)
9557 break;
9558 checked_hash_insert (aarch64_cond_hsh, name,
9559 (void *) (aarch64_conds + i));
9560 /* Also hash the name in the upper case. */
9561 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9562 (void *) (aarch64_conds + i));
9563 }
9564 }
9565
9566 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9567 {
9568 const char *name = aarch64_barrier_options[i].name;
9569 /* Skip xx00 - the unallocated values of option. */
9570 if ((i & 0x3) == 0)
9571 continue;
9572 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9573 (void *) (aarch64_barrier_options + i));
9574 /* Also hash the name in the upper case. */
9575 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9576 (void *) (aarch64_barrier_options + i));
9577 }
9578
9579 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9580 {
9581 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9582 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9583 (void *) (aarch64_barrier_dsb_nxs_options + i));
9584 /* Also hash the name in the upper case. */
9585 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9586 (void *) (aarch64_barrier_dsb_nxs_options + i));
9587 }
9588
9589 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9590 {
9591 const char* name = aarch64_prfops[i].name;
9592 /* Skip the unallocated hint encodings. */
9593 if (name == NULL)
9594 continue;
9595 checked_hash_insert (aarch64_pldop_hsh, name,
9596 (void *) (aarch64_prfops + i));
9597 /* Also hash the name in the upper case. */
9598 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9599 (void *) (aarch64_prfops + i));
9600 }
9601
9602 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9603 {
9604 const char* name = aarch64_hint_options[i].name;
9605 const char* upper_name = get_upper_str(name);
9606
9607 checked_hash_insert (aarch64_hint_opt_hsh, name,
9608 (void *) (aarch64_hint_options + i));
9609
9610 /* Also hash the name in the upper case if not the same. */
9611 if (strcmp (name, upper_name) != 0)
9612 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9613 (void *) (aarch64_hint_options + i));
9614 }
9615
9616 /* Set the cpu variant based on the command-line options. */
9617 if (!mcpu_cpu_opt)
9618 mcpu_cpu_opt = march_cpu_opt;
9619
9620 if (!mcpu_cpu_opt)
9621 mcpu_cpu_opt = &cpu_default;
9622
9623 cpu_variant = *mcpu_cpu_opt;
9624
9625 /* Record the CPU type. */
9626 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
9627
9628 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9629 }
9630
9631 /* Command line processing. */
9632
9633 const char *md_shortopts = "m:";
9634
9635 #ifdef AARCH64_BI_ENDIAN
9636 #define OPTION_EB (OPTION_MD_BASE + 0)
9637 #define OPTION_EL (OPTION_MD_BASE + 1)
9638 #else
9639 #if TARGET_BYTES_BIG_ENDIAN
9640 #define OPTION_EB (OPTION_MD_BASE + 0)
9641 #else
9642 #define OPTION_EL (OPTION_MD_BASE + 1)
9643 #endif
9644 #endif
9645
9646 struct option md_longopts[] = {
9647 #ifdef OPTION_EB
9648 {"EB", no_argument, NULL, OPTION_EB},
9649 #endif
9650 #ifdef OPTION_EL
9651 {"EL", no_argument, NULL, OPTION_EL},
9652 #endif
9653 {NULL, no_argument, NULL, 0}
9654 };
9655
9656 size_t md_longopts_size = sizeof (md_longopts);
9657
9658 struct aarch64_option_table
9659 {
9660 const char *option; /* Option name to match. */
9661 const char *help; /* Help information. */
9662 int *var; /* Variable to change. */
9663 int value; /* What to change it to. */
9664 char *deprecated; /* If non-null, print this message. */
9665 };
9666
9667 static struct aarch64_option_table aarch64_opts[] = {
9668 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9669 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9670 NULL},
9671 #ifdef DEBUG_AARCH64
9672 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9673 #endif /* DEBUG_AARCH64 */
9674 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9675 NULL},
9676 {"mno-verbose-error", N_("do not output verbose error messages"),
9677 &verbose_error_p, 0, NULL},
9678 {NULL, NULL, NULL, 0, NULL}
9679 };
9680
9681 struct aarch64_cpu_option_table
9682 {
9683 const char *name;
9684 const aarch64_feature_set value;
9685 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9686 case. */
9687 const char *canonical_name;
9688 };
9689
9690 /* This list should, at a minimum, contain all the cpu names
9691 recognized by GCC. */
9692 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9693 {"all", AARCH64_ANY, NULL},
9694 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9695 AARCH64_FEATURE_CRC), "Cortex-A34"},
9696 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9697 AARCH64_FEATURE_CRC), "Cortex-A35"},
9698 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9699 AARCH64_FEATURE_CRC), "Cortex-A53"},
9700 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9701 AARCH64_FEATURE_CRC), "Cortex-A57"},
9702 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9703 AARCH64_FEATURE_CRC), "Cortex-A72"},
9704 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9705 AARCH64_FEATURE_CRC), "Cortex-A73"},
9706 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9707 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9708 "Cortex-A55"},
9709 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9710 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9711 "Cortex-A75"},
9712 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9713 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9714 "Cortex-A76"},
9715 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9716 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9717 | AARCH64_FEATURE_DOTPROD
9718 | AARCH64_FEATURE_SSBS),
9719 "Cortex-A76AE"},
9720 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9721 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9722 | AARCH64_FEATURE_DOTPROD
9723 | AARCH64_FEATURE_SSBS),
9724 "Cortex-A77"},
9725 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9726 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9727 | AARCH64_FEATURE_DOTPROD
9728 | AARCH64_FEATURE_SSBS),
9729 "Cortex-A65"},
9730 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9731 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9732 | AARCH64_FEATURE_DOTPROD
9733 | AARCH64_FEATURE_SSBS),
9734 "Cortex-A65AE"},
9735 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9736 AARCH64_FEATURE_F16
9737 | AARCH64_FEATURE_RCPC
9738 | AARCH64_FEATURE_DOTPROD
9739 | AARCH64_FEATURE_SSBS
9740 | AARCH64_FEATURE_PROFILE),
9741 "Cortex-A78"},
9742 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9743 AARCH64_FEATURE_F16
9744 | AARCH64_FEATURE_RCPC
9745 | AARCH64_FEATURE_DOTPROD
9746 | AARCH64_FEATURE_SSBS
9747 | AARCH64_FEATURE_PROFILE),
9748 "Cortex-A78AE"},
9749 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9750 AARCH64_FEATURE_DOTPROD
9751 | AARCH64_FEATURE_F16
9752 | AARCH64_FEATURE_FLAGM
9753 | AARCH64_FEATURE_PAC
9754 | AARCH64_FEATURE_PROFILE
9755 | AARCH64_FEATURE_RCPC
9756 | AARCH64_FEATURE_SSBS),
9757 "Cortex-A78C"},
9758 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9759 AARCH64_FEATURE_BFLOAT16
9760 | AARCH64_FEATURE_I8MM
9761 | AARCH64_FEATURE_MEMTAG
9762 | AARCH64_FEATURE_SVE2_BITPERM),
9763 "Cortex-A510"},
9764 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9765 AARCH64_FEATURE_BFLOAT16
9766 | AARCH64_FEATURE_I8MM
9767 | AARCH64_FEATURE_MEMTAG
9768 | AARCH64_FEATURE_SVE2_BITPERM),
9769 "Cortex-A710"},
9770 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9771 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9772 | AARCH64_FEATURE_DOTPROD
9773 | AARCH64_FEATURE_PROFILE),
9774 "Ares"},
9775 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9776 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9777 "Samsung Exynos M1"},
9778 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9779 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9780 | AARCH64_FEATURE_RDMA),
9781 "Qualcomm Falkor"},
9782 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9783 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9784 | AARCH64_FEATURE_DOTPROD
9785 | AARCH64_FEATURE_SSBS),
9786 "Neoverse E1"},
9787 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9788 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9789 | AARCH64_FEATURE_DOTPROD
9790 | AARCH64_FEATURE_PROFILE),
9791 "Neoverse N1"},
9792 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9793 AARCH64_FEATURE_BFLOAT16
9794 | AARCH64_FEATURE_I8MM
9795 | AARCH64_FEATURE_F16
9796 | AARCH64_FEATURE_SVE
9797 | AARCH64_FEATURE_SVE2
9798 | AARCH64_FEATURE_SVE2_BITPERM
9799 | AARCH64_FEATURE_MEMTAG
9800 | AARCH64_FEATURE_RNG),
9801 "Neoverse N2"},
9802 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9803 AARCH64_FEATURE_PROFILE
9804 | AARCH64_FEATURE_CVADP
9805 | AARCH64_FEATURE_SVE
9806 | AARCH64_FEATURE_SSBS
9807 | AARCH64_FEATURE_RNG
9808 | AARCH64_FEATURE_F16
9809 | AARCH64_FEATURE_BFLOAT16
9810 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9811 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9812 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9813 | AARCH64_FEATURE_RDMA),
9814 "Qualcomm QDF24XX"},
9815 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9816 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9817 "Qualcomm Saphira"},
9818 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9819 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9820 "Cavium ThunderX"},
9821 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9822 AARCH64_FEATURE_CRYPTO),
9823 "Broadcom Vulcan"},
9824 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9825 in earlier releases and is superseded by 'xgene1' in all
9826 tools. */
9827 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9828 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9829 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9830 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9831 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9832 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9833 AARCH64_FEATURE_F16
9834 | AARCH64_FEATURE_RCPC
9835 | AARCH64_FEATURE_DOTPROD
9836 | AARCH64_FEATURE_SSBS
9837 | AARCH64_FEATURE_PROFILE),
9838 "Cortex-X1"},
9839 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9840 AARCH64_FEATURE_BFLOAT16
9841 | AARCH64_FEATURE_I8MM
9842 | AARCH64_FEATURE_MEMTAG
9843 | AARCH64_FEATURE_SVE2_BITPERM),
9844 "Cortex-X2"},
9845 {"generic", AARCH64_ARCH_V8, NULL},
9846
9847 {NULL, AARCH64_ARCH_NONE, NULL}
9848 };
9849
9850 struct aarch64_arch_option_table
9851 {
9852 const char *name;
9853 const aarch64_feature_set value;
9854 };
9855
9856 /* This list should, at a minimum, contain all the architecture names
9857 recognized by GCC. */
9858 static const struct aarch64_arch_option_table aarch64_archs[] = {
9859 {"all", AARCH64_ANY},
9860 {"armv8-a", AARCH64_ARCH_V8},
9861 {"armv8.1-a", AARCH64_ARCH_V8_1},
9862 {"armv8.2-a", AARCH64_ARCH_V8_2},
9863 {"armv8.3-a", AARCH64_ARCH_V8_3},
9864 {"armv8.4-a", AARCH64_ARCH_V8_4},
9865 {"armv8.5-a", AARCH64_ARCH_V8_5},
9866 {"armv8.6-a", AARCH64_ARCH_V8_6},
9867 {"armv8.7-a", AARCH64_ARCH_V8_7},
9868 {"armv8.8-a", AARCH64_ARCH_V8_8},
9869 {"armv8-r", AARCH64_ARCH_V8_R},
9870 {"armv9-a", AARCH64_ARCH_V9},
9871 {"armv9.1-a", AARCH64_ARCH_V9_1},
9872 {"armv9.2-a", AARCH64_ARCH_V9_2},
9873 {"armv9.3-a", AARCH64_ARCH_V9_3},
9874 {NULL, AARCH64_ARCH_NONE}
9875 };
9876
9877 /* ISA extensions. */
9878 struct aarch64_option_cpu_value_table
9879 {
9880 const char *name;
9881 const aarch64_feature_set value;
9882 const aarch64_feature_set require; /* Feature dependencies. */
9883 };
9884
9885 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9886 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9887 AARCH64_ARCH_NONE},
9888 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9889 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9890 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9891 AARCH64_ARCH_NONE},
9892 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9893 AARCH64_ARCH_NONE},
9894 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9895 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9896 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9897 AARCH64_ARCH_NONE},
9898 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9899 AARCH64_ARCH_NONE},
9900 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9901 AARCH64_ARCH_NONE},
9902 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9903 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9904 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9905 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9906 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9907 AARCH64_FEATURE (AARCH64_FEATURE_FP
9908 | AARCH64_FEATURE_F16, 0)},
9909 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9910 AARCH64_ARCH_NONE},
9911 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9912 AARCH64_FEATURE (AARCH64_FEATURE_F16
9913 | AARCH64_FEATURE_SIMD
9914 | AARCH64_FEATURE_COMPNUM, 0)},
9915 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9916 AARCH64_ARCH_NONE},
9917 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9918 AARCH64_FEATURE (AARCH64_FEATURE_F16
9919 | AARCH64_FEATURE_SIMD, 0)},
9920 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9921 AARCH64_ARCH_NONE},
9922 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9923 AARCH64_ARCH_NONE},
9924 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9925 AARCH64_ARCH_NONE},
9926 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9927 AARCH64_ARCH_NONE},
9928 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9929 AARCH64_ARCH_NONE},
9930 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9931 AARCH64_ARCH_NONE},
9932 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9933 AARCH64_ARCH_NONE},
9934 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9935 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9936 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9937 AARCH64_ARCH_NONE},
9938 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9939 AARCH64_ARCH_NONE},
9940 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9941 AARCH64_ARCH_NONE},
9942 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9943 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9944 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9945 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9946 | AARCH64_FEATURE_SM4, 0)},
9947 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9948 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9949 | AARCH64_FEATURE_AES, 0)},
9950 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9951 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9952 | AARCH64_FEATURE_SHA3, 0)},
9953 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9954 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9955 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
9956 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9957 | AARCH64_FEATURE_BFLOAT16, 0)},
9958 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64, 0),
9959 AARCH64_FEATURE (AARCH64_FEATURE_SME
9960 | AARCH64_FEATURE_SVE2
9961 | AARCH64_FEATURE_BFLOAT16, 0)},
9962 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I64, 0),
9963 AARCH64_FEATURE (AARCH64_FEATURE_SME
9964 | AARCH64_FEATURE_SVE2
9965 | AARCH64_FEATURE_BFLOAT16, 0)},
9966 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9967 AARCH64_ARCH_NONE},
9968 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9969 AARCH64_ARCH_NONE},
9970 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9971 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9972 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9973 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9974 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
9975 AARCH64_ARCH_NONE},
9976 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
9977 AARCH64_ARCH_NONE},
9978 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
9979 AARCH64_ARCH_NONE},
9980 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
9981 AARCH64_ARCH_NONE},
9982 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
9983 AARCH64_ARCH_NONE},
9984 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9985 };
9986
9987 struct aarch64_long_option_table
9988 {
9989 const char *option; /* Substring to match. */
9990 const char *help; /* Help information. */
9991 int (*func) (const char *subopt); /* Function to decode sub-option. */
9992 char *deprecated; /* If non-null, print this message. */
9993 };
9994
9995 /* Transitive closure of features depending on set. */
9996 static aarch64_feature_set
9997 aarch64_feature_disable_set (aarch64_feature_set set)
9998 {
9999 const struct aarch64_option_cpu_value_table *opt;
10000 aarch64_feature_set prev = 0;
10001
10002 while (prev != set) {
10003 prev = set;
10004 for (opt = aarch64_features; opt->name != NULL; opt++)
10005 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10006 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10007 }
10008 return set;
10009 }
10010
10011 /* Transitive closure of dependencies of set. */
10012 static aarch64_feature_set
10013 aarch64_feature_enable_set (aarch64_feature_set set)
10014 {
10015 const struct aarch64_option_cpu_value_table *opt;
10016 aarch64_feature_set prev = 0;
10017
10018 while (prev != set) {
10019 prev = set;
10020 for (opt = aarch64_features; opt->name != NULL; opt++)
10021 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10022 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10023 }
10024 return set;
10025 }
10026
10027 static int
10028 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10029 bool ext_only)
10030 {
10031 /* We insist on extensions being added before being removed. We achieve
10032 this by using the ADDING_VALUE variable to indicate whether we are
10033 adding an extension (1) or removing it (0) and only allowing it to
10034 change in the order -1 -> 1 -> 0. */
10035 int adding_value = -1;
10036 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10037
10038 /* Copy the feature set, so that we can modify it. */
10039 *ext_set = **opt_p;
10040 *opt_p = ext_set;
10041
10042 while (str != NULL && *str != 0)
10043 {
10044 const struct aarch64_option_cpu_value_table *opt;
10045 const char *ext = NULL;
10046 int optlen;
10047
10048 if (!ext_only)
10049 {
10050 if (*str != '+')
10051 {
10052 as_bad (_("invalid architectural extension"));
10053 return 0;
10054 }
10055
10056 ext = strchr (++str, '+');
10057 }
10058
10059 if (ext != NULL)
10060 optlen = ext - str;
10061 else
10062 optlen = strlen (str);
10063
10064 if (optlen >= 2 && startswith (str, "no"))
10065 {
10066 if (adding_value != 0)
10067 adding_value = 0;
10068 optlen -= 2;
10069 str += 2;
10070 }
10071 else if (optlen > 0)
10072 {
10073 if (adding_value == -1)
10074 adding_value = 1;
10075 else if (adding_value != 1)
10076 {
10077 as_bad (_("must specify extensions to add before specifying "
10078 "those to remove"));
10079 return false;
10080 }
10081 }
10082
10083 if (optlen == 0)
10084 {
10085 as_bad (_("missing architectural extension"));
10086 return 0;
10087 }
10088
10089 gas_assert (adding_value != -1);
10090
10091 for (opt = aarch64_features; opt->name != NULL; opt++)
10092 if (strncmp (opt->name, str, optlen) == 0)
10093 {
10094 aarch64_feature_set set;
10095
10096 /* Add or remove the extension. */
10097 if (adding_value)
10098 {
10099 set = aarch64_feature_enable_set (opt->value);
10100 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10101 }
10102 else
10103 {
10104 set = aarch64_feature_disable_set (opt->value);
10105 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10106 }
10107 break;
10108 }
10109
10110 if (opt->name == NULL)
10111 {
10112 as_bad (_("unknown architectural extension `%s'"), str);
10113 return 0;
10114 }
10115
10116 str = ext;
10117 };
10118
10119 return 1;
10120 }
10121
10122 static int
10123 aarch64_parse_cpu (const char *str)
10124 {
10125 const struct aarch64_cpu_option_table *opt;
10126 const char *ext = strchr (str, '+');
10127 size_t optlen;
10128
10129 if (ext != NULL)
10130 optlen = ext - str;
10131 else
10132 optlen = strlen (str);
10133
10134 if (optlen == 0)
10135 {
10136 as_bad (_("missing cpu name `%s'"), str);
10137 return 0;
10138 }
10139
10140 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10141 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10142 {
10143 mcpu_cpu_opt = &opt->value;
10144 if (ext != NULL)
10145 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10146
10147 return 1;
10148 }
10149
10150 as_bad (_("unknown cpu `%s'"), str);
10151 return 0;
10152 }
10153
10154 static int
10155 aarch64_parse_arch (const char *str)
10156 {
10157 const struct aarch64_arch_option_table *opt;
10158 const char *ext = strchr (str, '+');
10159 size_t optlen;
10160
10161 if (ext != NULL)
10162 optlen = ext - str;
10163 else
10164 optlen = strlen (str);
10165
10166 if (optlen == 0)
10167 {
10168 as_bad (_("missing architecture name `%s'"), str);
10169 return 0;
10170 }
10171
10172 for (opt = aarch64_archs; opt->name != NULL; opt++)
10173 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10174 {
10175 march_cpu_opt = &opt->value;
10176 if (ext != NULL)
10177 return aarch64_parse_features (ext, &march_cpu_opt, false);
10178
10179 return 1;
10180 }
10181
10182 as_bad (_("unknown architecture `%s'\n"), str);
10183 return 0;
10184 }
10185
10186 /* ABIs. */
10187 struct aarch64_option_abi_value_table
10188 {
10189 const char *name;
10190 enum aarch64_abi_type value;
10191 };
10192
10193 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10194 {"ilp32", AARCH64_ABI_ILP32},
10195 {"lp64", AARCH64_ABI_LP64},
10196 };
10197
10198 static int
10199 aarch64_parse_abi (const char *str)
10200 {
10201 unsigned int i;
10202
10203 if (str[0] == '\0')
10204 {
10205 as_bad (_("missing abi name `%s'"), str);
10206 return 0;
10207 }
10208
10209 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10210 if (strcmp (str, aarch64_abis[i].name) == 0)
10211 {
10212 aarch64_abi = aarch64_abis[i].value;
10213 return 1;
10214 }
10215
10216 as_bad (_("unknown abi `%s'\n"), str);
10217 return 0;
10218 }
10219
10220 static struct aarch64_long_option_table aarch64_long_opts[] = {
10221 #ifdef OBJ_ELF
10222 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10223 aarch64_parse_abi, NULL},
10224 #endif /* OBJ_ELF */
10225 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10226 aarch64_parse_cpu, NULL},
10227 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10228 aarch64_parse_arch, NULL},
10229 {NULL, NULL, 0, NULL}
10230 };
10231
10232 int
10233 md_parse_option (int c, const char *arg)
10234 {
10235 struct aarch64_option_table *opt;
10236 struct aarch64_long_option_table *lopt;
10237
10238 switch (c)
10239 {
10240 #ifdef OPTION_EB
10241 case OPTION_EB:
10242 target_big_endian = 1;
10243 break;
10244 #endif
10245
10246 #ifdef OPTION_EL
10247 case OPTION_EL:
10248 target_big_endian = 0;
10249 break;
10250 #endif
10251
10252 case 'a':
10253 /* Listing option. Just ignore these, we don't support additional
10254 ones. */
10255 return 0;
10256
10257 default:
10258 for (opt = aarch64_opts; opt->option != NULL; opt++)
10259 {
10260 if (c == opt->option[0]
10261 && ((arg == NULL && opt->option[1] == 0)
10262 || streq (arg, opt->option + 1)))
10263 {
10264 /* If the option is deprecated, tell the user. */
10265 if (opt->deprecated != NULL)
10266 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10267 arg ? arg : "", _(opt->deprecated));
10268
10269 if (opt->var != NULL)
10270 *opt->var = opt->value;
10271
10272 return 1;
10273 }
10274 }
10275
10276 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10277 {
10278 /* These options are expected to have an argument. */
10279 if (c == lopt->option[0]
10280 && arg != NULL
10281 && startswith (arg, lopt->option + 1))
10282 {
10283 /* If the option is deprecated, tell the user. */
10284 if (lopt->deprecated != NULL)
10285 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10286 _(lopt->deprecated));
10287
10288 /* Call the sup-option parser. */
10289 return lopt->func (arg + strlen (lopt->option) - 1);
10290 }
10291 }
10292
10293 return 0;
10294 }
10295
10296 return 1;
10297 }
10298
10299 void
10300 md_show_usage (FILE * fp)
10301 {
10302 struct aarch64_option_table *opt;
10303 struct aarch64_long_option_table *lopt;
10304
10305 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10306
10307 for (opt = aarch64_opts; opt->option != NULL; opt++)
10308 if (opt->help != NULL)
10309 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10310
10311 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10312 if (lopt->help != NULL)
10313 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10314
10315 #ifdef OPTION_EB
10316 fprintf (fp, _("\
10317 -EB assemble code for a big-endian cpu\n"));
10318 #endif
10319
10320 #ifdef OPTION_EL
10321 fprintf (fp, _("\
10322 -EL assemble code for a little-endian cpu\n"));
10323 #endif
10324 }
10325
10326 /* Parse a .cpu directive. */
10327
10328 static void
10329 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10330 {
10331 const struct aarch64_cpu_option_table *opt;
10332 char saved_char;
10333 char *name;
10334 char *ext;
10335 size_t optlen;
10336
10337 name = input_line_pointer;
10338 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10339 input_line_pointer++;
10340 saved_char = *input_line_pointer;
10341 *input_line_pointer = 0;
10342
10343 ext = strchr (name, '+');
10344
10345 if (ext != NULL)
10346 optlen = ext - name;
10347 else
10348 optlen = strlen (name);
10349
10350 /* Skip the first "all" entry. */
10351 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10352 if (strlen (opt->name) == optlen
10353 && strncmp (name, opt->name, optlen) == 0)
10354 {
10355 mcpu_cpu_opt = &opt->value;
10356 if (ext != NULL)
10357 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10358 return;
10359
10360 cpu_variant = *mcpu_cpu_opt;
10361
10362 *input_line_pointer = saved_char;
10363 demand_empty_rest_of_line ();
10364 return;
10365 }
10366 as_bad (_("unknown cpu `%s'"), name);
10367 *input_line_pointer = saved_char;
10368 ignore_rest_of_line ();
10369 }
10370
10371
10372 /* Parse a .arch directive. */
10373
10374 static void
10375 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10376 {
10377 const struct aarch64_arch_option_table *opt;
10378 char saved_char;
10379 char *name;
10380 char *ext;
10381 size_t optlen;
10382
10383 name = input_line_pointer;
10384 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10385 input_line_pointer++;
10386 saved_char = *input_line_pointer;
10387 *input_line_pointer = 0;
10388
10389 ext = strchr (name, '+');
10390
10391 if (ext != NULL)
10392 optlen = ext - name;
10393 else
10394 optlen = strlen (name);
10395
10396 /* Skip the first "all" entry. */
10397 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10398 if (strlen (opt->name) == optlen
10399 && strncmp (name, opt->name, optlen) == 0)
10400 {
10401 mcpu_cpu_opt = &opt->value;
10402 if (ext != NULL)
10403 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10404 return;
10405
10406 cpu_variant = *mcpu_cpu_opt;
10407
10408 *input_line_pointer = saved_char;
10409 demand_empty_rest_of_line ();
10410 return;
10411 }
10412
10413 as_bad (_("unknown architecture `%s'\n"), name);
10414 *input_line_pointer = saved_char;
10415 ignore_rest_of_line ();
10416 }
10417
10418 /* Parse a .arch_extension directive. */
10419
10420 static void
10421 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10422 {
10423 char saved_char;
10424 char *ext = input_line_pointer;;
10425
10426 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10427 input_line_pointer++;
10428 saved_char = *input_line_pointer;
10429 *input_line_pointer = 0;
10430
10431 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10432 return;
10433
10434 cpu_variant = *mcpu_cpu_opt;
10435
10436 *input_line_pointer = saved_char;
10437 demand_empty_rest_of_line ();
10438 }
10439
10440 /* Copy symbol information. */
10441
10442 void
10443 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10444 {
10445 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10446 }
10447
10448 #ifdef OBJ_ELF
10449 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10450 This is needed so AArch64 specific st_other values can be independently
10451 specified for an IFUNC resolver (that is called by the dynamic linker)
10452 and the symbol it resolves (aliased to the resolver). In particular,
10453 if a function symbol has special st_other value set via directives,
10454 then attaching an IFUNC resolver to that symbol should not override
10455 the st_other setting. Requiring the directive on the IFUNC resolver
10456 symbol would be unexpected and problematic in C code, where the two
10457 symbols appear as two independent function declarations. */
10458
10459 void
10460 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10461 {
10462 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10463 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10464 /* If size is unset, copy size from src. Because we don't track whether
10465 .size has been used, we can't differentiate .size dest, 0 from the case
10466 where dest's size is unset. */
10467 if (!destelf->size && S_GET_SIZE (dest) == 0)
10468 {
10469 if (srcelf->size)
10470 {
10471 destelf->size = XNEW (expressionS);
10472 *destelf->size = *srcelf->size;
10473 }
10474 S_SET_SIZE (dest, S_GET_SIZE (src));
10475 }
10476 }
10477 #endif