e9f7ee9fc964c3716474b04316d992be846dfbb5
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2022 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* SME horizontal or vertical slice indicator, encoded in "V".
103 Values:
104 0 - Horizontal
105 1 - vertical
106 */
107 enum sme_hv_slice
108 {
109 HV_horizontal = 0,
110 HV_vertical = 1
111 };
112
113 /* Bits for DEFINED field in vector_type_el. */
114 #define NTA_HASTYPE 1
115 #define NTA_HASINDEX 2
116 #define NTA_HASVARWIDTH 4
117
118 struct vector_type_el
119 {
120 enum vector_el_type type;
121 unsigned char defined;
122 unsigned width;
123 int64_t index;
124 };
125
126 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
127
128 struct reloc
129 {
130 bfd_reloc_code_real_type type;
131 expressionS exp;
132 int pc_rel;
133 enum aarch64_opnd opnd;
134 uint32_t flags;
135 unsigned need_libopcodes_p : 1;
136 };
137
138 struct aarch64_instruction
139 {
140 /* libopcodes structure for instruction intermediate representation. */
141 aarch64_inst base;
142 /* Record assembly errors found during the parsing. */
143 struct
144 {
145 enum aarch64_operand_error_kind kind;
146 const char *error;
147 } parsing_error;
148 /* The condition that appears in the assembly line. */
149 int cond;
150 /* Relocation information (including the GAS internal fixup). */
151 struct reloc reloc;
152 /* Need to generate an immediate in the literal pool. */
153 unsigned gen_lit_pool : 1;
154 };
155
156 typedef struct aarch64_instruction aarch64_instruction;
157
158 static aarch64_instruction inst;
159
160 static bool parse_operands (char *, const aarch64_opcode *);
161 static bool programmer_friendly_fixup (aarch64_instruction *);
162
163 /* Diagnostics inline function utilities.
164
165 These are lightweight utilities which should only be called by parse_operands
166 and other parsers. GAS processes each assembly line by parsing it against
167 instruction template(s), in the case of multiple templates (for the same
168 mnemonic name), those templates are tried one by one until one succeeds or
169 all fail. An assembly line may fail a few templates before being
170 successfully parsed; an error saved here in most cases is not a user error
171 but an error indicating the current template is not the right template.
172 Therefore it is very important that errors can be saved at a low cost during
173 the parsing; we don't want to slow down the whole parsing by recording
174 non-user errors in detail.
175
176 Remember that the objective is to help GAS pick up the most appropriate
177 error message in the case of multiple templates, e.g. FMOV which has 8
178 templates. */
179
180 static inline void
181 clear_error (void)
182 {
183 inst.parsing_error.kind = AARCH64_OPDE_NIL;
184 inst.parsing_error.error = NULL;
185 }
186
187 static inline bool
188 error_p (void)
189 {
190 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
191 }
192
193 static inline const char *
194 get_error_message (void)
195 {
196 return inst.parsing_error.error;
197 }
198
199 static inline enum aarch64_operand_error_kind
200 get_error_kind (void)
201 {
202 return inst.parsing_error.kind;
203 }
204
205 static inline void
206 set_error (enum aarch64_operand_error_kind kind, const char *error)
207 {
208 inst.parsing_error.kind = kind;
209 inst.parsing_error.error = error;
210 }
211
212 static inline void
213 set_recoverable_error (const char *error)
214 {
215 set_error (AARCH64_OPDE_RECOVERABLE, error);
216 }
217
218 /* Use the DESC field of the corresponding aarch64_operand entry to compose
219 the error message. */
220 static inline void
221 set_default_error (void)
222 {
223 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
224 }
225
226 static inline void
227 set_syntax_error (const char *error)
228 {
229 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
230 }
231
232 static inline void
233 set_first_syntax_error (const char *error)
234 {
235 if (! error_p ())
236 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
237 }
238
239 static inline void
240 set_fatal_syntax_error (const char *error)
241 {
242 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
243 }
244 \f
245 /* Return value for certain parsers when the parsing fails; those parsers
246 return the information of the parsed result, e.g. register number, on
247 success. */
248 #define PARSE_FAIL -1
249
250 /* This is an invalid condition code that means no conditional field is
251 present. */
252 #define COND_ALWAYS 0x10
253
254 typedef struct
255 {
256 const char *template;
257 uint32_t value;
258 } asm_nzcv;
259
260 struct reloc_entry
261 {
262 char *name;
263 bfd_reloc_code_real_type reloc;
264 };
265
266 /* Macros to define the register types and masks for the purpose
267 of parsing. */
268
269 #undef AARCH64_REG_TYPES
270 #define AARCH64_REG_TYPES \
271 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
272 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
273 BASIC_REG_TYPE(SP_32) /* wsp */ \
274 BASIC_REG_TYPE(SP_64) /* sp */ \
275 BASIC_REG_TYPE(Z_32) /* wzr */ \
276 BASIC_REG_TYPE(Z_64) /* xzr */ \
277 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
278 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
279 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
280 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
281 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
282 BASIC_REG_TYPE(VN) /* v[0-31] */ \
283 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
284 BASIC_REG_TYPE(PN) /* p[0-15] */ \
285 BASIC_REG_TYPE(ZA) /* za[0-15] */ \
286 BASIC_REG_TYPE(ZAH) /* za[0-15]h */ \
287 BASIC_REG_TYPE(ZAV) /* za[0-15]v */ \
288 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
289 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
290 /* Typecheck: same, plus SVE registers. */ \
291 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
292 | REG_TYPE(ZN)) \
293 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
294 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
295 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
296 /* Typecheck: same, plus SVE registers. */ \
297 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
298 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
299 | REG_TYPE(ZN)) \
300 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
301 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
302 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
303 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
304 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
306 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
307 /* Typecheck: any [BHSDQ]P FP. */ \
308 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
309 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
310 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
311 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
312 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
313 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
314 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
315 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
316 be used for SVE instructions, since Zn and Pn are valid symbols \
317 in other contexts. */ \
318 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
319 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
320 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
321 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
322 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
323 | REG_TYPE(ZN) | REG_TYPE(PN)) \
324 /* Any integer register; used for error messages only. */ \
325 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
326 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
327 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
328 /* Pseudo type to mark the end of the enumerator sequence. */ \
329 BASIC_REG_TYPE(MAX)
330
331 #undef BASIC_REG_TYPE
332 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
333 #undef MULTI_REG_TYPE
334 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
335
336 /* Register type enumerators. */
337 typedef enum aarch64_reg_type_
338 {
339 /* A list of REG_TYPE_*. */
340 AARCH64_REG_TYPES
341 } aarch64_reg_type;
342
343 #undef BASIC_REG_TYPE
344 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
345 #undef REG_TYPE
346 #define REG_TYPE(T) (1 << REG_TYPE_##T)
347 #undef MULTI_REG_TYPE
348 #define MULTI_REG_TYPE(T,V) V,
349
350 /* Structure for a hash table entry for a register. */
351 typedef struct
352 {
353 const char *name;
354 unsigned char number;
355 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
356 unsigned char builtin;
357 } reg_entry;
358
359 /* Values indexed by aarch64_reg_type to assist the type checking. */
360 static const unsigned reg_type_masks[] =
361 {
362 AARCH64_REG_TYPES
363 };
364
365 #undef BASIC_REG_TYPE
366 #undef REG_TYPE
367 #undef MULTI_REG_TYPE
368 #undef AARCH64_REG_TYPES
369
370 /* Diagnostics used when we don't get a register of the expected type.
371 Note: this has to synchronized with aarch64_reg_type definitions
372 above. */
373 static const char *
374 get_reg_expected_msg (aarch64_reg_type reg_type)
375 {
376 const char *msg;
377
378 switch (reg_type)
379 {
380 case REG_TYPE_R_32:
381 msg = N_("integer 32-bit register expected");
382 break;
383 case REG_TYPE_R_64:
384 msg = N_("integer 64-bit register expected");
385 break;
386 case REG_TYPE_R_N:
387 msg = N_("integer register expected");
388 break;
389 case REG_TYPE_R64_SP:
390 msg = N_("64-bit integer or SP register expected");
391 break;
392 case REG_TYPE_SVE_BASE:
393 msg = N_("base register expected");
394 break;
395 case REG_TYPE_R_Z:
396 msg = N_("integer or zero register expected");
397 break;
398 case REG_TYPE_SVE_OFFSET:
399 msg = N_("offset register expected");
400 break;
401 case REG_TYPE_R_SP:
402 msg = N_("integer or SP register expected");
403 break;
404 case REG_TYPE_R_Z_SP:
405 msg = N_("integer, zero or SP register expected");
406 break;
407 case REG_TYPE_FP_B:
408 msg = N_("8-bit SIMD scalar register expected");
409 break;
410 case REG_TYPE_FP_H:
411 msg = N_("16-bit SIMD scalar or floating-point half precision "
412 "register expected");
413 break;
414 case REG_TYPE_FP_S:
415 msg = N_("32-bit SIMD scalar or floating-point single precision "
416 "register expected");
417 break;
418 case REG_TYPE_FP_D:
419 msg = N_("64-bit SIMD scalar or floating-point double precision "
420 "register expected");
421 break;
422 case REG_TYPE_FP_Q:
423 msg = N_("128-bit SIMD scalar or floating-point quad precision "
424 "register expected");
425 break;
426 case REG_TYPE_R_Z_BHSDQ_V:
427 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
428 msg = N_("register expected");
429 break;
430 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
431 msg = N_("SIMD scalar or floating-point register expected");
432 break;
433 case REG_TYPE_VN: /* any V reg */
434 msg = N_("vector register expected");
435 break;
436 case REG_TYPE_ZN:
437 msg = N_("SVE vector register expected");
438 break;
439 case REG_TYPE_PN:
440 msg = N_("SVE predicate register expected");
441 break;
442 default:
443 as_fatal (_("invalid register type %d"), reg_type);
444 }
445 return msg;
446 }
447
448 /* Some well known registers that we refer to directly elsewhere. */
449 #define REG_SP 31
450 #define REG_ZR 31
451
452 /* Instructions take 4 bytes in the object file. */
453 #define INSN_SIZE 4
454
455 static htab_t aarch64_ops_hsh;
456 static htab_t aarch64_cond_hsh;
457 static htab_t aarch64_shift_hsh;
458 static htab_t aarch64_sys_regs_hsh;
459 static htab_t aarch64_pstatefield_hsh;
460 static htab_t aarch64_sys_regs_ic_hsh;
461 static htab_t aarch64_sys_regs_dc_hsh;
462 static htab_t aarch64_sys_regs_at_hsh;
463 static htab_t aarch64_sys_regs_tlbi_hsh;
464 static htab_t aarch64_sys_regs_sr_hsh;
465 static htab_t aarch64_reg_hsh;
466 static htab_t aarch64_barrier_opt_hsh;
467 static htab_t aarch64_nzcv_hsh;
468 static htab_t aarch64_pldop_hsh;
469 static htab_t aarch64_hint_opt_hsh;
470
471 /* Stuff needed to resolve the label ambiguity
472 As:
473 ...
474 label: <insn>
475 may differ from:
476 ...
477 label:
478 <insn> */
479
480 static symbolS *last_label_seen;
481
482 /* Literal pool structure. Held on a per-section
483 and per-sub-section basis. */
484
485 #define MAX_LITERAL_POOL_SIZE 1024
486 typedef struct literal_expression
487 {
488 expressionS exp;
489 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
490 LITTLENUM_TYPE * bignum;
491 } literal_expression;
492
493 typedef struct literal_pool
494 {
495 literal_expression literals[MAX_LITERAL_POOL_SIZE];
496 unsigned int next_free_entry;
497 unsigned int id;
498 symbolS *symbol;
499 segT section;
500 subsegT sub_section;
501 int size;
502 struct literal_pool *next;
503 } literal_pool;
504
505 /* Pointer to a linked list of literal pools. */
506 static literal_pool *list_of_pools = NULL;
507 \f
508 /* Pure syntax. */
509
510 /* This array holds the chars that always start a comment. If the
511 pre-processor is disabled, these aren't very useful. */
512 const char comment_chars[] = "";
513
514 /* This array holds the chars that only start a comment at the beginning of
515 a line. If the line seems to have the form '# 123 filename'
516 .line and .file directives will appear in the pre-processed output. */
517 /* Note that input_file.c hand checks for '#' at the beginning of the
518 first line of the input file. This is because the compiler outputs
519 #NO_APP at the beginning of its output. */
520 /* Also note that comments like this one will always work. */
521 const char line_comment_chars[] = "#";
522
523 const char line_separator_chars[] = ";";
524
525 /* Chars that can be used to separate mant
526 from exp in floating point numbers. */
527 const char EXP_CHARS[] = "eE";
528
529 /* Chars that mean this number is a floating point constant. */
530 /* As in 0f12.456 */
531 /* or 0d1.2345e12 */
532
533 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
534
535 /* Prefix character that indicates the start of an immediate value. */
536 #define is_immediate_prefix(C) ((C) == '#')
537
538 /* Separator character handling. */
539
540 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
541
542 static inline bool
543 skip_past_char (char **str, char c)
544 {
545 if (**str == c)
546 {
547 (*str)++;
548 return true;
549 }
550 else
551 return false;
552 }
553
554 #define skip_past_comma(str) skip_past_char (str, ',')
555
556 /* Arithmetic expressions (possibly involving symbols). */
557
558 static bool in_aarch64_get_expression = false;
559
560 /* Third argument to aarch64_get_expression. */
561 #define GE_NO_PREFIX false
562 #define GE_OPT_PREFIX true
563
564 /* Fourth argument to aarch64_get_expression. */
565 #define ALLOW_ABSENT false
566 #define REJECT_ABSENT true
567
568 /* Fifth argument to aarch64_get_expression. */
569 #define NORMAL_RESOLUTION false
570
571 /* Return TRUE if the string pointed by *STR is successfully parsed
572 as an valid expression; *EP will be filled with the information of
573 such an expression. Otherwise return FALSE.
574
575 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
576 If REJECT_ABSENT is true then trat missing expressions as an error.
577 If DEFER_RESOLUTION is true, then do not resolve expressions against
578 constant symbols. Necessary if the expression is part of a fixup
579 that uses a reloc that must be emitted. */
580
581 static bool
582 aarch64_get_expression (expressionS * ep,
583 char ** str,
584 bool allow_immediate_prefix,
585 bool reject_absent,
586 bool defer_resolution)
587 {
588 char *save_in;
589 segT seg;
590 bool prefix_present = false;
591
592 if (allow_immediate_prefix)
593 {
594 if (is_immediate_prefix (**str))
595 {
596 (*str)++;
597 prefix_present = true;
598 }
599 }
600
601 memset (ep, 0, sizeof (expressionS));
602
603 save_in = input_line_pointer;
604 input_line_pointer = *str;
605 in_aarch64_get_expression = true;
606 if (defer_resolution)
607 seg = deferred_expression (ep);
608 else
609 seg = expression (ep);
610 in_aarch64_get_expression = false;
611
612 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
613 {
614 /* We found a bad expression in md_operand(). */
615 *str = input_line_pointer;
616 input_line_pointer = save_in;
617 if (prefix_present && ! error_p ())
618 set_fatal_syntax_error (_("bad expression"));
619 else
620 set_first_syntax_error (_("bad expression"));
621 return false;
622 }
623
624 #ifdef OBJ_AOUT
625 if (seg != absolute_section
626 && seg != text_section
627 && seg != data_section
628 && seg != bss_section
629 && seg != undefined_section)
630 {
631 set_syntax_error (_("bad segment"));
632 *str = input_line_pointer;
633 input_line_pointer = save_in;
634 return false;
635 }
636 #else
637 (void) seg;
638 #endif
639
640 *str = input_line_pointer;
641 input_line_pointer = save_in;
642 return true;
643 }
644
645 /* Turn a string in input_line_pointer into a floating point constant
646 of type TYPE, and store the appropriate bytes in *LITP. The number
647 of LITTLENUMS emitted is stored in *SIZEP. An error message is
648 returned, or NULL on OK. */
649
650 const char *
651 md_atof (int type, char *litP, int *sizeP)
652 {
653 return ieee_md_atof (type, litP, sizeP, target_big_endian);
654 }
655
656 /* We handle all bad expressions here, so that we can report the faulty
657 instruction in the error message. */
658 void
659 md_operand (expressionS * exp)
660 {
661 if (in_aarch64_get_expression)
662 exp->X_op = O_illegal;
663 }
664
665 /* Immediate values. */
666
667 /* Errors may be set multiple times during parsing or bit encoding
668 (particularly in the Neon bits), but usually the earliest error which is set
669 will be the most meaningful. Avoid overwriting it with later (cascading)
670 errors by calling this function. */
671
672 static void
673 first_error (const char *error)
674 {
675 if (! error_p ())
676 set_syntax_error (error);
677 }
678
679 /* Similar to first_error, but this function accepts formatted error
680 message. */
681 static void
682 first_error_fmt (const char *format, ...)
683 {
684 va_list args;
685 enum
686 { size = 100 };
687 /* N.B. this single buffer will not cause error messages for different
688 instructions to pollute each other; this is because at the end of
689 processing of each assembly line, error message if any will be
690 collected by as_bad. */
691 static char buffer[size];
692
693 if (! error_p ())
694 {
695 int ret ATTRIBUTE_UNUSED;
696 va_start (args, format);
697 ret = vsnprintf (buffer, size, format, args);
698 know (ret <= size - 1 && ret >= 0);
699 va_end (args);
700 set_syntax_error (buffer);
701 }
702 }
703
704 /* Register parsing. */
705
706 /* Generic register parser which is called by other specialized
707 register parsers.
708 CCP points to what should be the beginning of a register name.
709 If it is indeed a valid register name, advance CCP over it and
710 return the reg_entry structure; otherwise return NULL.
711 It does not issue diagnostics. */
712
713 static reg_entry *
714 parse_reg (char **ccp)
715 {
716 char *start = *ccp;
717 char *p;
718 reg_entry *reg;
719
720 #ifdef REGISTER_PREFIX
721 if (*start != REGISTER_PREFIX)
722 return NULL;
723 start++;
724 #endif
725
726 p = start;
727 if (!ISALPHA (*p) || !is_name_beginner (*p))
728 return NULL;
729
730 do
731 p++;
732 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
733
734 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
735
736 if (!reg)
737 return NULL;
738
739 *ccp = p;
740 return reg;
741 }
742
743 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
744 return FALSE. */
745 static bool
746 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
747 {
748 return (reg_type_masks[type] & (1 << reg->type)) != 0;
749 }
750
751 /* Try to parse a base or offset register. Allow SVE base and offset
752 registers if REG_TYPE includes SVE registers. Return the register
753 entry on success, setting *QUALIFIER to the register qualifier.
754 Return null otherwise.
755
756 Note that this function does not issue any diagnostics. */
757
758 static const reg_entry *
759 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
760 aarch64_opnd_qualifier_t *qualifier)
761 {
762 char *str = *ccp;
763 const reg_entry *reg = parse_reg (&str);
764
765 if (reg == NULL)
766 return NULL;
767
768 switch (reg->type)
769 {
770 case REG_TYPE_R_32:
771 case REG_TYPE_SP_32:
772 case REG_TYPE_Z_32:
773 *qualifier = AARCH64_OPND_QLF_W;
774 break;
775
776 case REG_TYPE_R_64:
777 case REG_TYPE_SP_64:
778 case REG_TYPE_Z_64:
779 *qualifier = AARCH64_OPND_QLF_X;
780 break;
781
782 case REG_TYPE_ZN:
783 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
784 || str[0] != '.')
785 return NULL;
786 switch (TOLOWER (str[1]))
787 {
788 case 's':
789 *qualifier = AARCH64_OPND_QLF_S_S;
790 break;
791 case 'd':
792 *qualifier = AARCH64_OPND_QLF_S_D;
793 break;
794 default:
795 return NULL;
796 }
797 str += 2;
798 break;
799
800 default:
801 return NULL;
802 }
803
804 *ccp = str;
805
806 return reg;
807 }
808
809 /* Try to parse a base or offset register. Return the register entry
810 on success, setting *QUALIFIER to the register qualifier. Return null
811 otherwise.
812
813 Note that this function does not issue any diagnostics. */
814
815 static const reg_entry *
816 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
817 {
818 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
819 }
820
821 /* Parse the qualifier of a vector register or vector element of type
822 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
823 succeeds; otherwise return FALSE.
824
825 Accept only one occurrence of:
826 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
827 b h s d q */
828 static bool
829 parse_vector_type_for_operand (aarch64_reg_type reg_type,
830 struct vector_type_el *parsed_type, char **str)
831 {
832 char *ptr = *str;
833 unsigned width;
834 unsigned element_size;
835 enum vector_el_type type;
836
837 /* skip '.' */
838 gas_assert (*ptr == '.');
839 ptr++;
840
841 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
842 {
843 width = 0;
844 goto elt_size;
845 }
846 width = strtoul (ptr, &ptr, 10);
847 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
848 {
849 first_error_fmt (_("bad size %d in vector width specifier"), width);
850 return false;
851 }
852
853 elt_size:
854 switch (TOLOWER (*ptr))
855 {
856 case 'b':
857 type = NT_b;
858 element_size = 8;
859 break;
860 case 'h':
861 type = NT_h;
862 element_size = 16;
863 break;
864 case 's':
865 type = NT_s;
866 element_size = 32;
867 break;
868 case 'd':
869 type = NT_d;
870 element_size = 64;
871 break;
872 case 'q':
873 if (reg_type == REG_TYPE_ZN || width == 1)
874 {
875 type = NT_q;
876 element_size = 128;
877 break;
878 }
879 /* fall through. */
880 default:
881 if (*ptr != '\0')
882 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
883 else
884 first_error (_("missing element size"));
885 return false;
886 }
887 if (width != 0 && width * element_size != 64
888 && width * element_size != 128
889 && !(width == 2 && element_size == 16)
890 && !(width == 4 && element_size == 8))
891 {
892 first_error_fmt (_
893 ("invalid element size %d and vector size combination %c"),
894 width, *ptr);
895 return false;
896 }
897 ptr++;
898
899 parsed_type->type = type;
900 parsed_type->width = width;
901
902 *str = ptr;
903
904 return true;
905 }
906
907 /* *STR contains an SVE zero/merge predication suffix. Parse it into
908 *PARSED_TYPE and point *STR at the end of the suffix. */
909
910 static bool
911 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
912 {
913 char *ptr = *str;
914
915 /* Skip '/'. */
916 gas_assert (*ptr == '/');
917 ptr++;
918 switch (TOLOWER (*ptr))
919 {
920 case 'z':
921 parsed_type->type = NT_zero;
922 break;
923 case 'm':
924 parsed_type->type = NT_merge;
925 break;
926 default:
927 if (*ptr != '\0' && *ptr != ',')
928 first_error_fmt (_("unexpected character `%c' in predication type"),
929 *ptr);
930 else
931 first_error (_("missing predication type"));
932 return false;
933 }
934 parsed_type->width = 0;
935 *str = ptr + 1;
936 return true;
937 }
938
939 /* Parse a register of the type TYPE.
940
941 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
942 name or the parsed register is not of TYPE.
943
944 Otherwise return the register number, and optionally fill in the actual
945 type of the register in *RTYPE when multiple alternatives were given, and
946 return the register shape and element index information in *TYPEINFO.
947
948 IN_REG_LIST should be set with TRUE if the caller is parsing a register
949 list. */
950
951 static int
952 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
953 struct vector_type_el *typeinfo, bool in_reg_list)
954 {
955 char *str = *ccp;
956 const reg_entry *reg = parse_reg (&str);
957 struct vector_type_el atype;
958 struct vector_type_el parsetype;
959 bool is_typed_vecreg = false;
960
961 atype.defined = 0;
962 atype.type = NT_invtype;
963 atype.width = -1;
964 atype.index = 0;
965
966 if (reg == NULL)
967 {
968 if (typeinfo)
969 *typeinfo = atype;
970 set_default_error ();
971 return PARSE_FAIL;
972 }
973
974 if (! aarch64_check_reg_type (reg, type))
975 {
976 DEBUG_TRACE ("reg type check failed");
977 set_default_error ();
978 return PARSE_FAIL;
979 }
980 type = reg->type;
981
982 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
983 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
984 {
985 if (*str == '.')
986 {
987 if (!parse_vector_type_for_operand (type, &parsetype, &str))
988 return PARSE_FAIL;
989 }
990 else
991 {
992 if (!parse_predication_for_operand (&parsetype, &str))
993 return PARSE_FAIL;
994 }
995
996 /* Register if of the form Vn.[bhsdq]. */
997 is_typed_vecreg = true;
998
999 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1000 {
1001 /* The width is always variable; we don't allow an integer width
1002 to be specified. */
1003 gas_assert (parsetype.width == 0);
1004 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1005 }
1006 else if (parsetype.width == 0)
1007 /* Expect index. In the new scheme we cannot have
1008 Vn.[bhsdq] represent a scalar. Therefore any
1009 Vn.[bhsdq] should have an index following it.
1010 Except in reglists of course. */
1011 atype.defined |= NTA_HASINDEX;
1012 else
1013 atype.defined |= NTA_HASTYPE;
1014
1015 atype.type = parsetype.type;
1016 atype.width = parsetype.width;
1017 }
1018
1019 if (skip_past_char (&str, '['))
1020 {
1021 expressionS exp;
1022
1023 /* Reject Sn[index] syntax. */
1024 if (!is_typed_vecreg)
1025 {
1026 first_error (_("this type of register can't be indexed"));
1027 return PARSE_FAIL;
1028 }
1029
1030 if (in_reg_list)
1031 {
1032 first_error (_("index not allowed inside register list"));
1033 return PARSE_FAIL;
1034 }
1035
1036 atype.defined |= NTA_HASINDEX;
1037
1038 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1039 NORMAL_RESOLUTION);
1040
1041 if (exp.X_op != O_constant)
1042 {
1043 first_error (_("constant expression required"));
1044 return PARSE_FAIL;
1045 }
1046
1047 if (! skip_past_char (&str, ']'))
1048 return PARSE_FAIL;
1049
1050 atype.index = exp.X_add_number;
1051 }
1052 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1053 {
1054 /* Indexed vector register expected. */
1055 first_error (_("indexed vector register expected"));
1056 return PARSE_FAIL;
1057 }
1058
1059 /* A vector reg Vn should be typed or indexed. */
1060 if (type == REG_TYPE_VN && atype.defined == 0)
1061 {
1062 first_error (_("invalid use of vector register"));
1063 }
1064
1065 if (typeinfo)
1066 *typeinfo = atype;
1067
1068 if (rtype)
1069 *rtype = type;
1070
1071 *ccp = str;
1072
1073 return reg->number;
1074 }
1075
1076 /* Parse register.
1077
1078 Return the register number on success; return PARSE_FAIL otherwise.
1079
1080 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1081 the register (e.g. NEON double or quad reg when either has been requested).
1082
1083 If this is a NEON vector register with additional type information, fill
1084 in the struct pointed to by VECTYPE (if non-NULL).
1085
1086 This parser does not handle register list. */
1087
1088 static int
1089 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1090 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1091 {
1092 struct vector_type_el atype;
1093 char *str = *ccp;
1094 int reg = parse_typed_reg (&str, type, rtype, &atype,
1095 /*in_reg_list= */ false);
1096
1097 if (reg == PARSE_FAIL)
1098 return PARSE_FAIL;
1099
1100 if (vectype)
1101 *vectype = atype;
1102
1103 *ccp = str;
1104
1105 return reg;
1106 }
1107
1108 static inline bool
1109 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1110 {
1111 return
1112 e1.type == e2.type
1113 && e1.defined == e2.defined
1114 && e1.width == e2.width && e1.index == e2.index;
1115 }
1116
1117 /* This function parses a list of vector registers of type TYPE.
1118 On success, it returns the parsed register list information in the
1119 following encoded format:
1120
1121 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1122 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1123
1124 The information of the register shape and/or index is returned in
1125 *VECTYPE.
1126
1127 It returns PARSE_FAIL if the register list is invalid.
1128
1129 The list contains one to four registers.
1130 Each register can be one of:
1131 <Vt>.<T>[<index>]
1132 <Vt>.<T>
1133 All <T> should be identical.
1134 All <index> should be identical.
1135 There are restrictions on <Vt> numbers which are checked later
1136 (by reg_list_valid_p). */
1137
1138 static int
1139 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1140 struct vector_type_el *vectype)
1141 {
1142 char *str = *ccp;
1143 int nb_regs;
1144 struct vector_type_el typeinfo, typeinfo_first;
1145 int val, val_range;
1146 int in_range;
1147 int ret_val;
1148 int i;
1149 bool error = false;
1150 bool expect_index = false;
1151
1152 if (*str != '{')
1153 {
1154 set_syntax_error (_("expecting {"));
1155 return PARSE_FAIL;
1156 }
1157 str++;
1158
1159 nb_regs = 0;
1160 typeinfo_first.defined = 0;
1161 typeinfo_first.type = NT_invtype;
1162 typeinfo_first.width = -1;
1163 typeinfo_first.index = 0;
1164 ret_val = 0;
1165 val = -1;
1166 val_range = -1;
1167 in_range = 0;
1168 do
1169 {
1170 if (in_range)
1171 {
1172 str++; /* skip over '-' */
1173 val_range = val;
1174 }
1175 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1176 /*in_reg_list= */ true);
1177 if (val == PARSE_FAIL)
1178 {
1179 set_first_syntax_error (_("invalid vector register in list"));
1180 error = true;
1181 continue;
1182 }
1183 /* reject [bhsd]n */
1184 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1185 {
1186 set_first_syntax_error (_("invalid scalar register in list"));
1187 error = true;
1188 continue;
1189 }
1190
1191 if (typeinfo.defined & NTA_HASINDEX)
1192 expect_index = true;
1193
1194 if (in_range)
1195 {
1196 if (val < val_range)
1197 {
1198 set_first_syntax_error
1199 (_("invalid range in vector register list"));
1200 error = true;
1201 }
1202 val_range++;
1203 }
1204 else
1205 {
1206 val_range = val;
1207 if (nb_regs == 0)
1208 typeinfo_first = typeinfo;
1209 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1210 {
1211 set_first_syntax_error
1212 (_("type mismatch in vector register list"));
1213 error = true;
1214 }
1215 }
1216 if (! error)
1217 for (i = val_range; i <= val; i++)
1218 {
1219 ret_val |= i << (5 * nb_regs);
1220 nb_regs++;
1221 }
1222 in_range = 0;
1223 }
1224 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1225
1226 skip_whitespace (str);
1227 if (*str != '}')
1228 {
1229 set_first_syntax_error (_("end of vector register list not found"));
1230 error = true;
1231 }
1232 str++;
1233
1234 skip_whitespace (str);
1235
1236 if (expect_index)
1237 {
1238 if (skip_past_char (&str, '['))
1239 {
1240 expressionS exp;
1241
1242 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1243 NORMAL_RESOLUTION);
1244 if (exp.X_op != O_constant)
1245 {
1246 set_first_syntax_error (_("constant expression required."));
1247 error = true;
1248 }
1249 if (! skip_past_char (&str, ']'))
1250 error = true;
1251 else
1252 typeinfo_first.index = exp.X_add_number;
1253 }
1254 else
1255 {
1256 set_first_syntax_error (_("expected index"));
1257 error = true;
1258 }
1259 }
1260
1261 if (nb_regs > 4)
1262 {
1263 set_first_syntax_error (_("too many registers in vector register list"));
1264 error = true;
1265 }
1266 else if (nb_regs == 0)
1267 {
1268 set_first_syntax_error (_("empty vector register list"));
1269 error = true;
1270 }
1271
1272 *ccp = str;
1273 if (! error)
1274 *vectype = typeinfo_first;
1275
1276 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1277 }
1278
1279 /* Directives: register aliases. */
1280
1281 static reg_entry *
1282 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1283 {
1284 reg_entry *new;
1285 const char *name;
1286
1287 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1288 {
1289 if (new->builtin)
1290 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1291 str);
1292
1293 /* Only warn about a redefinition if it's not defined as the
1294 same register. */
1295 else if (new->number != number || new->type != type)
1296 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1297
1298 return NULL;
1299 }
1300
1301 name = xstrdup (str);
1302 new = XNEW (reg_entry);
1303
1304 new->name = name;
1305 new->number = number;
1306 new->type = type;
1307 new->builtin = false;
1308
1309 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1310
1311 return new;
1312 }
1313
1314 /* Look for the .req directive. This is of the form:
1315
1316 new_register_name .req existing_register_name
1317
1318 If we find one, or if it looks sufficiently like one that we want to
1319 handle any error here, return TRUE. Otherwise return FALSE. */
1320
1321 static bool
1322 create_register_alias (char *newname, char *p)
1323 {
1324 const reg_entry *old;
1325 char *oldname, *nbuf;
1326 size_t nlen;
1327
1328 /* The input scrubber ensures that whitespace after the mnemonic is
1329 collapsed to single spaces. */
1330 oldname = p;
1331 if (!startswith (oldname, " .req "))
1332 return false;
1333
1334 oldname += 6;
1335 if (*oldname == '\0')
1336 return false;
1337
1338 old = str_hash_find (aarch64_reg_hsh, oldname);
1339 if (!old)
1340 {
1341 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1342 return true;
1343 }
1344
1345 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1346 the desired alias name, and p points to its end. If not, then
1347 the desired alias name is in the global original_case_string. */
1348 #ifdef TC_CASE_SENSITIVE
1349 nlen = p - newname;
1350 #else
1351 newname = original_case_string;
1352 nlen = strlen (newname);
1353 #endif
1354
1355 nbuf = xmemdup0 (newname, nlen);
1356
1357 /* Create aliases under the new name as stated; an all-lowercase
1358 version of the new name; and an all-uppercase version of the new
1359 name. */
1360 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1361 {
1362 for (p = nbuf; *p; p++)
1363 *p = TOUPPER (*p);
1364
1365 if (strncmp (nbuf, newname, nlen))
1366 {
1367 /* If this attempt to create an additional alias fails, do not bother
1368 trying to create the all-lower case alias. We will fail and issue
1369 a second, duplicate error message. This situation arises when the
1370 programmer does something like:
1371 foo .req r0
1372 Foo .req r1
1373 The second .req creates the "Foo" alias but then fails to create
1374 the artificial FOO alias because it has already been created by the
1375 first .req. */
1376 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1377 {
1378 free (nbuf);
1379 return true;
1380 }
1381 }
1382
1383 for (p = nbuf; *p; p++)
1384 *p = TOLOWER (*p);
1385
1386 if (strncmp (nbuf, newname, nlen))
1387 insert_reg_alias (nbuf, old->number, old->type);
1388 }
1389
1390 free (nbuf);
1391 return true;
1392 }
1393
1394 /* Should never be called, as .req goes between the alias and the
1395 register name, not at the beginning of the line. */
1396 static void
1397 s_req (int a ATTRIBUTE_UNUSED)
1398 {
1399 as_bad (_("invalid syntax for .req directive"));
1400 }
1401
1402 /* The .unreq directive deletes an alias which was previously defined
1403 by .req. For example:
1404
1405 my_alias .req r11
1406 .unreq my_alias */
1407
1408 static void
1409 s_unreq (int a ATTRIBUTE_UNUSED)
1410 {
1411 char *name;
1412 char saved_char;
1413
1414 name = input_line_pointer;
1415
1416 while (*input_line_pointer != 0
1417 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1418 ++input_line_pointer;
1419
1420 saved_char = *input_line_pointer;
1421 *input_line_pointer = 0;
1422
1423 if (!*name)
1424 as_bad (_("invalid syntax for .unreq directive"));
1425 else
1426 {
1427 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1428
1429 if (!reg)
1430 as_bad (_("unknown register alias '%s'"), name);
1431 else if (reg->builtin)
1432 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1433 name);
1434 else
1435 {
1436 char *p;
1437 char *nbuf;
1438
1439 str_hash_delete (aarch64_reg_hsh, name);
1440 free ((char *) reg->name);
1441 free (reg);
1442
1443 /* Also locate the all upper case and all lower case versions.
1444 Do not complain if we cannot find one or the other as it
1445 was probably deleted above. */
1446
1447 nbuf = strdup (name);
1448 for (p = nbuf; *p; p++)
1449 *p = TOUPPER (*p);
1450 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1451 if (reg)
1452 {
1453 str_hash_delete (aarch64_reg_hsh, nbuf);
1454 free ((char *) reg->name);
1455 free (reg);
1456 }
1457
1458 for (p = nbuf; *p; p++)
1459 *p = TOLOWER (*p);
1460 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1461 if (reg)
1462 {
1463 str_hash_delete (aarch64_reg_hsh, nbuf);
1464 free ((char *) reg->name);
1465 free (reg);
1466 }
1467
1468 free (nbuf);
1469 }
1470 }
1471
1472 *input_line_pointer = saved_char;
1473 demand_empty_rest_of_line ();
1474 }
1475
1476 /* Directives: Instruction set selection. */
1477
1478 #ifdef OBJ_ELF
1479 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1480 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1481 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1482 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1483
1484 /* Create a new mapping symbol for the transition to STATE. */
1485
1486 static void
1487 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1488 {
1489 symbolS *symbolP;
1490 const char *symname;
1491 int type;
1492
1493 switch (state)
1494 {
1495 case MAP_DATA:
1496 symname = "$d";
1497 type = BSF_NO_FLAGS;
1498 break;
1499 case MAP_INSN:
1500 symname = "$x";
1501 type = BSF_NO_FLAGS;
1502 break;
1503 default:
1504 abort ();
1505 }
1506
1507 symbolP = symbol_new (symname, now_seg, frag, value);
1508 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1509
1510 /* Save the mapping symbols for future reference. Also check that
1511 we do not place two mapping symbols at the same offset within a
1512 frag. We'll handle overlap between frags in
1513 check_mapping_symbols.
1514
1515 If .fill or other data filling directive generates zero sized data,
1516 the mapping symbol for the following code will have the same value
1517 as the one generated for the data filling directive. In this case,
1518 we replace the old symbol with the new one at the same address. */
1519 if (value == 0)
1520 {
1521 if (frag->tc_frag_data.first_map != NULL)
1522 {
1523 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1524 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1525 &symbol_lastP);
1526 }
1527 frag->tc_frag_data.first_map = symbolP;
1528 }
1529 if (frag->tc_frag_data.last_map != NULL)
1530 {
1531 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1532 S_GET_VALUE (symbolP));
1533 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1534 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1535 &symbol_lastP);
1536 }
1537 frag->tc_frag_data.last_map = symbolP;
1538 }
1539
1540 /* We must sometimes convert a region marked as code to data during
1541 code alignment, if an odd number of bytes have to be padded. The
1542 code mapping symbol is pushed to an aligned address. */
1543
1544 static void
1545 insert_data_mapping_symbol (enum mstate state,
1546 valueT value, fragS * frag, offsetT bytes)
1547 {
1548 /* If there was already a mapping symbol, remove it. */
1549 if (frag->tc_frag_data.last_map != NULL
1550 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1551 frag->fr_address + value)
1552 {
1553 symbolS *symp = frag->tc_frag_data.last_map;
1554
1555 if (value == 0)
1556 {
1557 know (frag->tc_frag_data.first_map == symp);
1558 frag->tc_frag_data.first_map = NULL;
1559 }
1560 frag->tc_frag_data.last_map = NULL;
1561 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1562 }
1563
1564 make_mapping_symbol (MAP_DATA, value, frag);
1565 make_mapping_symbol (state, value + bytes, frag);
1566 }
1567
1568 static void mapping_state_2 (enum mstate state, int max_chars);
1569
1570 /* Set the mapping state to STATE. Only call this when about to
1571 emit some STATE bytes to the file. */
1572
1573 void
1574 mapping_state (enum mstate state)
1575 {
1576 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1577
1578 if (state == MAP_INSN)
1579 /* AArch64 instructions require 4-byte alignment. When emitting
1580 instructions into any section, record the appropriate section
1581 alignment. */
1582 record_alignment (now_seg, 2);
1583
1584 if (mapstate == state)
1585 /* The mapping symbol has already been emitted.
1586 There is nothing else to do. */
1587 return;
1588
1589 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1590 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1591 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1592 evaluated later in the next else. */
1593 return;
1594 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1595 {
1596 /* Only add the symbol if the offset is > 0:
1597 if we're at the first frag, check it's size > 0;
1598 if we're not at the first frag, then for sure
1599 the offset is > 0. */
1600 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1601 const int add_symbol = (frag_now != frag_first)
1602 || (frag_now_fix () > 0);
1603
1604 if (add_symbol)
1605 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1606 }
1607 #undef TRANSITION
1608
1609 mapping_state_2 (state, 0);
1610 }
1611
1612 /* Same as mapping_state, but MAX_CHARS bytes have already been
1613 allocated. Put the mapping symbol that far back. */
1614
1615 static void
1616 mapping_state_2 (enum mstate state, int max_chars)
1617 {
1618 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1619
1620 if (!SEG_NORMAL (now_seg))
1621 return;
1622
1623 if (mapstate == state)
1624 /* The mapping symbol has already been emitted.
1625 There is nothing else to do. */
1626 return;
1627
1628 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1629 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1630 }
1631 #else
1632 #define mapping_state(x) /* nothing */
1633 #define mapping_state_2(x, y) /* nothing */
1634 #endif
1635
1636 /* Directives: sectioning and alignment. */
1637
1638 static void
1639 s_bss (int ignore ATTRIBUTE_UNUSED)
1640 {
1641 /* We don't support putting frags in the BSS segment, we fake it by
1642 marking in_bss, then looking at s_skip for clues. */
1643 subseg_set (bss_section, 0);
1644 demand_empty_rest_of_line ();
1645 mapping_state (MAP_DATA);
1646 }
1647
1648 static void
1649 s_even (int ignore ATTRIBUTE_UNUSED)
1650 {
1651 /* Never make frag if expect extra pass. */
1652 if (!need_pass_2)
1653 frag_align (1, 0, 0);
1654
1655 record_alignment (now_seg, 1);
1656
1657 demand_empty_rest_of_line ();
1658 }
1659
1660 /* Directives: Literal pools. */
1661
1662 static literal_pool *
1663 find_literal_pool (int size)
1664 {
1665 literal_pool *pool;
1666
1667 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1668 {
1669 if (pool->section == now_seg
1670 && pool->sub_section == now_subseg && pool->size == size)
1671 break;
1672 }
1673
1674 return pool;
1675 }
1676
1677 static literal_pool *
1678 find_or_make_literal_pool (int size)
1679 {
1680 /* Next literal pool ID number. */
1681 static unsigned int latest_pool_num = 1;
1682 literal_pool *pool;
1683
1684 pool = find_literal_pool (size);
1685
1686 if (pool == NULL)
1687 {
1688 /* Create a new pool. */
1689 pool = XNEW (literal_pool);
1690 if (!pool)
1691 return NULL;
1692
1693 /* Currently we always put the literal pool in the current text
1694 section. If we were generating "small" model code where we
1695 knew that all code and initialised data was within 1MB then
1696 we could output literals to mergeable, read-only data
1697 sections. */
1698
1699 pool->next_free_entry = 0;
1700 pool->section = now_seg;
1701 pool->sub_section = now_subseg;
1702 pool->size = size;
1703 pool->next = list_of_pools;
1704 pool->symbol = NULL;
1705
1706 /* Add it to the list. */
1707 list_of_pools = pool;
1708 }
1709
1710 /* New pools, and emptied pools, will have a NULL symbol. */
1711 if (pool->symbol == NULL)
1712 {
1713 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1714 &zero_address_frag, 0);
1715 pool->id = latest_pool_num++;
1716 }
1717
1718 /* Done. */
1719 return pool;
1720 }
1721
1722 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1723 Return TRUE on success, otherwise return FALSE. */
1724 static bool
1725 add_to_lit_pool (expressionS *exp, int size)
1726 {
1727 literal_pool *pool;
1728 unsigned int entry;
1729
1730 pool = find_or_make_literal_pool (size);
1731
1732 /* Check if this literal value is already in the pool. */
1733 for (entry = 0; entry < pool->next_free_entry; entry++)
1734 {
1735 expressionS * litexp = & pool->literals[entry].exp;
1736
1737 if ((litexp->X_op == exp->X_op)
1738 && (exp->X_op == O_constant)
1739 && (litexp->X_add_number == exp->X_add_number)
1740 && (litexp->X_unsigned == exp->X_unsigned))
1741 break;
1742
1743 if ((litexp->X_op == exp->X_op)
1744 && (exp->X_op == O_symbol)
1745 && (litexp->X_add_number == exp->X_add_number)
1746 && (litexp->X_add_symbol == exp->X_add_symbol)
1747 && (litexp->X_op_symbol == exp->X_op_symbol))
1748 break;
1749 }
1750
1751 /* Do we need to create a new entry? */
1752 if (entry == pool->next_free_entry)
1753 {
1754 if (entry >= MAX_LITERAL_POOL_SIZE)
1755 {
1756 set_syntax_error (_("literal pool overflow"));
1757 return false;
1758 }
1759
1760 pool->literals[entry].exp = *exp;
1761 pool->next_free_entry += 1;
1762 if (exp->X_op == O_big)
1763 {
1764 /* PR 16688: Bignums are held in a single global array. We must
1765 copy and preserve that value now, before it is overwritten. */
1766 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1767 exp->X_add_number);
1768 memcpy (pool->literals[entry].bignum, generic_bignum,
1769 CHARS_PER_LITTLENUM * exp->X_add_number);
1770 }
1771 else
1772 pool->literals[entry].bignum = NULL;
1773 }
1774
1775 exp->X_op = O_symbol;
1776 exp->X_add_number = ((int) entry) * size;
1777 exp->X_add_symbol = pool->symbol;
1778
1779 return true;
1780 }
1781
1782 /* Can't use symbol_new here, so have to create a symbol and then at
1783 a later date assign it a value. That's what these functions do. */
1784
1785 static void
1786 symbol_locate (symbolS * symbolP,
1787 const char *name,/* It is copied, the caller can modify. */
1788 segT segment, /* Segment identifier (SEG_<something>). */
1789 valueT valu, /* Symbol value. */
1790 fragS * frag) /* Associated fragment. */
1791 {
1792 size_t name_length;
1793 char *preserved_copy_of_name;
1794
1795 name_length = strlen (name) + 1; /* +1 for \0. */
1796 obstack_grow (&notes, name, name_length);
1797 preserved_copy_of_name = obstack_finish (&notes);
1798
1799 #ifdef tc_canonicalize_symbol_name
1800 preserved_copy_of_name =
1801 tc_canonicalize_symbol_name (preserved_copy_of_name);
1802 #endif
1803
1804 S_SET_NAME (symbolP, preserved_copy_of_name);
1805
1806 S_SET_SEGMENT (symbolP, segment);
1807 S_SET_VALUE (symbolP, valu);
1808 symbol_clear_list_pointers (symbolP);
1809
1810 symbol_set_frag (symbolP, frag);
1811
1812 /* Link to end of symbol chain. */
1813 {
1814 extern int symbol_table_frozen;
1815
1816 if (symbol_table_frozen)
1817 abort ();
1818 }
1819
1820 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1821
1822 obj_symbol_new_hook (symbolP);
1823
1824 #ifdef tc_symbol_new_hook
1825 tc_symbol_new_hook (symbolP);
1826 #endif
1827
1828 #ifdef DEBUG_SYMS
1829 verify_symbol_chain (symbol_rootP, symbol_lastP);
1830 #endif /* DEBUG_SYMS */
1831 }
1832
1833
1834 static void
1835 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1836 {
1837 unsigned int entry;
1838 literal_pool *pool;
1839 char sym_name[20];
1840 int align;
1841
1842 for (align = 2; align <= 4; align++)
1843 {
1844 int size = 1 << align;
1845
1846 pool = find_literal_pool (size);
1847 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1848 continue;
1849
1850 /* Align pool as you have word accesses.
1851 Only make a frag if we have to. */
1852 if (!need_pass_2)
1853 frag_align (align, 0, 0);
1854
1855 mapping_state (MAP_DATA);
1856
1857 record_alignment (now_seg, align);
1858
1859 sprintf (sym_name, "$$lit_\002%x", pool->id);
1860
1861 symbol_locate (pool->symbol, sym_name, now_seg,
1862 (valueT) frag_now_fix (), frag_now);
1863 symbol_table_insert (pool->symbol);
1864
1865 for (entry = 0; entry < pool->next_free_entry; entry++)
1866 {
1867 expressionS * exp = & pool->literals[entry].exp;
1868
1869 if (exp->X_op == O_big)
1870 {
1871 /* PR 16688: Restore the global bignum value. */
1872 gas_assert (pool->literals[entry].bignum != NULL);
1873 memcpy (generic_bignum, pool->literals[entry].bignum,
1874 CHARS_PER_LITTLENUM * exp->X_add_number);
1875 }
1876
1877 /* First output the expression in the instruction to the pool. */
1878 emit_expr (exp, size); /* .word|.xword */
1879
1880 if (exp->X_op == O_big)
1881 {
1882 free (pool->literals[entry].bignum);
1883 pool->literals[entry].bignum = NULL;
1884 }
1885 }
1886
1887 /* Mark the pool as empty. */
1888 pool->next_free_entry = 0;
1889 pool->symbol = NULL;
1890 }
1891 }
1892
1893 #ifdef OBJ_ELF
1894 /* Forward declarations for functions below, in the MD interface
1895 section. */
1896 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1897 static struct reloc_table_entry * find_reloc_table_entry (char **);
1898
1899 /* Directives: Data. */
1900 /* N.B. the support for relocation suffix in this directive needs to be
1901 implemented properly. */
1902
1903 static void
1904 s_aarch64_elf_cons (int nbytes)
1905 {
1906 expressionS exp;
1907
1908 #ifdef md_flush_pending_output
1909 md_flush_pending_output ();
1910 #endif
1911
1912 if (is_it_end_of_statement ())
1913 {
1914 demand_empty_rest_of_line ();
1915 return;
1916 }
1917
1918 #ifdef md_cons_align
1919 md_cons_align (nbytes);
1920 #endif
1921
1922 mapping_state (MAP_DATA);
1923 do
1924 {
1925 struct reloc_table_entry *reloc;
1926
1927 expression (&exp);
1928
1929 if (exp.X_op != O_symbol)
1930 emit_expr (&exp, (unsigned int) nbytes);
1931 else
1932 {
1933 skip_past_char (&input_line_pointer, '#');
1934 if (skip_past_char (&input_line_pointer, ':'))
1935 {
1936 reloc = find_reloc_table_entry (&input_line_pointer);
1937 if (reloc == NULL)
1938 as_bad (_("unrecognized relocation suffix"));
1939 else
1940 as_bad (_("unimplemented relocation suffix"));
1941 ignore_rest_of_line ();
1942 return;
1943 }
1944 else
1945 emit_expr (&exp, (unsigned int) nbytes);
1946 }
1947 }
1948 while (*input_line_pointer++ == ',');
1949
1950 /* Put terminator back into stream. */
1951 input_line_pointer--;
1952 demand_empty_rest_of_line ();
1953 }
1954
1955 /* Mark symbol that it follows a variant PCS convention. */
1956
1957 static void
1958 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1959 {
1960 char *name;
1961 char c;
1962 symbolS *sym;
1963 asymbol *bfdsym;
1964 elf_symbol_type *elfsym;
1965
1966 c = get_symbol_name (&name);
1967 if (!*name)
1968 as_bad (_("Missing symbol name in directive"));
1969 sym = symbol_find_or_make (name);
1970 restore_line_pointer (c);
1971 demand_empty_rest_of_line ();
1972 bfdsym = symbol_get_bfdsym (sym);
1973 elfsym = elf_symbol_from (bfdsym);
1974 gas_assert (elfsym);
1975 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
1976 }
1977 #endif /* OBJ_ELF */
1978
1979 /* Output a 32-bit word, but mark as an instruction. */
1980
1981 static void
1982 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1983 {
1984 expressionS exp;
1985 unsigned n = 0;
1986
1987 #ifdef md_flush_pending_output
1988 md_flush_pending_output ();
1989 #endif
1990
1991 if (is_it_end_of_statement ())
1992 {
1993 demand_empty_rest_of_line ();
1994 return;
1995 }
1996
1997 /* Sections are assumed to start aligned. In executable section, there is no
1998 MAP_DATA symbol pending. So we only align the address during
1999 MAP_DATA --> MAP_INSN transition.
2000 For other sections, this is not guaranteed. */
2001 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2002 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2003 frag_align_code (2, 0);
2004
2005 #ifdef OBJ_ELF
2006 mapping_state (MAP_INSN);
2007 #endif
2008
2009 do
2010 {
2011 expression (&exp);
2012 if (exp.X_op != O_constant)
2013 {
2014 as_bad (_("constant expression required"));
2015 ignore_rest_of_line ();
2016 return;
2017 }
2018
2019 if (target_big_endian)
2020 {
2021 unsigned int val = exp.X_add_number;
2022 exp.X_add_number = SWAP_32 (val);
2023 }
2024 emit_expr (&exp, INSN_SIZE);
2025 ++n;
2026 }
2027 while (*input_line_pointer++ == ',');
2028
2029 dwarf2_emit_insn (n * INSN_SIZE);
2030
2031 /* Put terminator back into stream. */
2032 input_line_pointer--;
2033 demand_empty_rest_of_line ();
2034 }
2035
2036 static void
2037 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2038 {
2039 demand_empty_rest_of_line ();
2040 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2041 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2042 }
2043
2044 #ifdef OBJ_ELF
2045 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2046
2047 static void
2048 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2049 {
2050 expressionS exp;
2051
2052 expression (&exp);
2053 frag_grow (4);
2054 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2055 BFD_RELOC_AARCH64_TLSDESC_ADD);
2056
2057 demand_empty_rest_of_line ();
2058 }
2059
2060 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2061
2062 static void
2063 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2064 {
2065 expressionS exp;
2066
2067 /* Since we're just labelling the code, there's no need to define a
2068 mapping symbol. */
2069 expression (&exp);
2070 /* Make sure there is enough room in this frag for the following
2071 blr. This trick only works if the blr follows immediately after
2072 the .tlsdesc directive. */
2073 frag_grow (4);
2074 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2075 BFD_RELOC_AARCH64_TLSDESC_CALL);
2076
2077 demand_empty_rest_of_line ();
2078 }
2079
2080 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2081
2082 static void
2083 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2084 {
2085 expressionS exp;
2086
2087 expression (&exp);
2088 frag_grow (4);
2089 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2090 BFD_RELOC_AARCH64_TLSDESC_LDR);
2091
2092 demand_empty_rest_of_line ();
2093 }
2094 #endif /* OBJ_ELF */
2095
2096 static void s_aarch64_arch (int);
2097 static void s_aarch64_cpu (int);
2098 static void s_aarch64_arch_extension (int);
2099
2100 /* This table describes all the machine specific pseudo-ops the assembler
2101 has to support. The fields are:
2102 pseudo-op name without dot
2103 function to call to execute this pseudo-op
2104 Integer arg to pass to the function. */
2105
2106 const pseudo_typeS md_pseudo_table[] = {
2107 /* Never called because '.req' does not start a line. */
2108 {"req", s_req, 0},
2109 {"unreq", s_unreq, 0},
2110 {"bss", s_bss, 0},
2111 {"even", s_even, 0},
2112 {"ltorg", s_ltorg, 0},
2113 {"pool", s_ltorg, 0},
2114 {"cpu", s_aarch64_cpu, 0},
2115 {"arch", s_aarch64_arch, 0},
2116 {"arch_extension", s_aarch64_arch_extension, 0},
2117 {"inst", s_aarch64_inst, 0},
2118 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2119 #ifdef OBJ_ELF
2120 {"tlsdescadd", s_tlsdescadd, 0},
2121 {"tlsdesccall", s_tlsdesccall, 0},
2122 {"tlsdescldr", s_tlsdescldr, 0},
2123 {"word", s_aarch64_elf_cons, 4},
2124 {"long", s_aarch64_elf_cons, 4},
2125 {"xword", s_aarch64_elf_cons, 8},
2126 {"dword", s_aarch64_elf_cons, 8},
2127 {"variant_pcs", s_variant_pcs, 0},
2128 #endif
2129 {"float16", float_cons, 'h'},
2130 {"bfloat16", float_cons, 'b'},
2131 {0, 0, 0}
2132 };
2133 \f
2134
2135 /* Check whether STR points to a register name followed by a comma or the
2136 end of line; REG_TYPE indicates which register types are checked
2137 against. Return TRUE if STR is such a register name; otherwise return
2138 FALSE. The function does not intend to produce any diagnostics, but since
2139 the register parser aarch64_reg_parse, which is called by this function,
2140 does produce diagnostics, we call clear_error to clear any diagnostics
2141 that may be generated by aarch64_reg_parse.
2142 Also, the function returns FALSE directly if there is any user error
2143 present at the function entry. This prevents the existing diagnostics
2144 state from being spoiled.
2145 The function currently serves parse_constant_immediate and
2146 parse_big_immediate only. */
2147 static bool
2148 reg_name_p (char *str, aarch64_reg_type reg_type)
2149 {
2150 int reg;
2151
2152 /* Prevent the diagnostics state from being spoiled. */
2153 if (error_p ())
2154 return false;
2155
2156 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2157
2158 /* Clear the parsing error that may be set by the reg parser. */
2159 clear_error ();
2160
2161 if (reg == PARSE_FAIL)
2162 return false;
2163
2164 skip_whitespace (str);
2165 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2166 return true;
2167
2168 return false;
2169 }
2170
2171 /* Parser functions used exclusively in instruction operands. */
2172
2173 /* Parse an immediate expression which may not be constant.
2174
2175 To prevent the expression parser from pushing a register name
2176 into the symbol table as an undefined symbol, firstly a check is
2177 done to find out whether STR is a register of type REG_TYPE followed
2178 by a comma or the end of line. Return FALSE if STR is such a string. */
2179
2180 static bool
2181 parse_immediate_expression (char **str, expressionS *exp,
2182 aarch64_reg_type reg_type)
2183 {
2184 if (reg_name_p (*str, reg_type))
2185 {
2186 set_recoverable_error (_("immediate operand required"));
2187 return false;
2188 }
2189
2190 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT,
2191 NORMAL_RESOLUTION);
2192
2193 if (exp->X_op == O_absent)
2194 {
2195 set_fatal_syntax_error (_("missing immediate expression"));
2196 return false;
2197 }
2198
2199 return true;
2200 }
2201
2202 /* Constant immediate-value read function for use in insn parsing.
2203 STR points to the beginning of the immediate (with the optional
2204 leading #); *VAL receives the value. REG_TYPE says which register
2205 names should be treated as registers rather than as symbolic immediates.
2206
2207 Return TRUE on success; otherwise return FALSE. */
2208
2209 static bool
2210 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2211 {
2212 expressionS exp;
2213
2214 if (! parse_immediate_expression (str, &exp, reg_type))
2215 return false;
2216
2217 if (exp.X_op != O_constant)
2218 {
2219 set_syntax_error (_("constant expression required"));
2220 return false;
2221 }
2222
2223 *val = exp.X_add_number;
2224 return true;
2225 }
2226
2227 static uint32_t
2228 encode_imm_float_bits (uint32_t imm)
2229 {
2230 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2231 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2232 }
2233
2234 /* Return TRUE if the single-precision floating-point value encoded in IMM
2235 can be expressed in the AArch64 8-bit signed floating-point format with
2236 3-bit exponent and normalized 4 bits of precision; in other words, the
2237 floating-point value must be expressable as
2238 (+/-) n / 16 * power (2, r)
2239 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2240
2241 static bool
2242 aarch64_imm_float_p (uint32_t imm)
2243 {
2244 /* If a single-precision floating-point value has the following bit
2245 pattern, it can be expressed in the AArch64 8-bit floating-point
2246 format:
2247
2248 3 32222222 2221111111111
2249 1 09876543 21098765432109876543210
2250 n Eeeeeexx xxxx0000000000000000000
2251
2252 where n, e and each x are either 0 or 1 independently, with
2253 E == ~ e. */
2254
2255 uint32_t pattern;
2256
2257 /* Prepare the pattern for 'Eeeeee'. */
2258 if (((imm >> 30) & 0x1) == 0)
2259 pattern = 0x3e000000;
2260 else
2261 pattern = 0x40000000;
2262
2263 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2264 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2265 }
2266
2267 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2268 as an IEEE float without any loss of precision. Store the value in
2269 *FPWORD if so. */
2270
2271 static bool
2272 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2273 {
2274 /* If a double-precision floating-point value has the following bit
2275 pattern, it can be expressed in a float:
2276
2277 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2278 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2279 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2280
2281 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2282 if Eeee_eeee != 1111_1111
2283
2284 where n, e, s and S are either 0 or 1 independently and where ~ is the
2285 inverse of E. */
2286
2287 uint32_t pattern;
2288 uint32_t high32 = imm >> 32;
2289 uint32_t low32 = imm;
2290
2291 /* Lower 29 bits need to be 0s. */
2292 if ((imm & 0x1fffffff) != 0)
2293 return false;
2294
2295 /* Prepare the pattern for 'Eeeeeeeee'. */
2296 if (((high32 >> 30) & 0x1) == 0)
2297 pattern = 0x38000000;
2298 else
2299 pattern = 0x40000000;
2300
2301 /* Check E~~~. */
2302 if ((high32 & 0x78000000) != pattern)
2303 return false;
2304
2305 /* Check Eeee_eeee != 1111_1111. */
2306 if ((high32 & 0x7ff00000) == 0x47f00000)
2307 return false;
2308
2309 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2310 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2311 | (low32 >> 29)); /* 3 S bits. */
2312 return true;
2313 }
2314
2315 /* Return true if we should treat OPERAND as a double-precision
2316 floating-point operand rather than a single-precision one. */
2317 static bool
2318 double_precision_operand_p (const aarch64_opnd_info *operand)
2319 {
2320 /* Check for unsuffixed SVE registers, which are allowed
2321 for LDR and STR but not in instructions that require an
2322 immediate. We get better error messages if we arbitrarily
2323 pick one size, parse the immediate normally, and then
2324 report the match failure in the normal way. */
2325 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2326 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2327 }
2328
2329 /* Parse a floating-point immediate. Return TRUE on success and return the
2330 value in *IMMED in the format of IEEE754 single-precision encoding.
2331 *CCP points to the start of the string; DP_P is TRUE when the immediate
2332 is expected to be in double-precision (N.B. this only matters when
2333 hexadecimal representation is involved). REG_TYPE says which register
2334 names should be treated as registers rather than as symbolic immediates.
2335
2336 This routine accepts any IEEE float; it is up to the callers to reject
2337 invalid ones. */
2338
2339 static bool
2340 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2341 aarch64_reg_type reg_type)
2342 {
2343 char *str = *ccp;
2344 char *fpnum;
2345 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2346 int64_t val = 0;
2347 unsigned fpword = 0;
2348 bool hex_p = false;
2349
2350 skip_past_char (&str, '#');
2351
2352 fpnum = str;
2353 skip_whitespace (fpnum);
2354
2355 if (startswith (fpnum, "0x"))
2356 {
2357 /* Support the hexadecimal representation of the IEEE754 encoding.
2358 Double-precision is expected when DP_P is TRUE, otherwise the
2359 representation should be in single-precision. */
2360 if (! parse_constant_immediate (&str, &val, reg_type))
2361 goto invalid_fp;
2362
2363 if (dp_p)
2364 {
2365 if (!can_convert_double_to_float (val, &fpword))
2366 goto invalid_fp;
2367 }
2368 else if ((uint64_t) val > 0xffffffff)
2369 goto invalid_fp;
2370 else
2371 fpword = val;
2372
2373 hex_p = true;
2374 }
2375 else if (reg_name_p (str, reg_type))
2376 {
2377 set_recoverable_error (_("immediate operand required"));
2378 return false;
2379 }
2380
2381 if (! hex_p)
2382 {
2383 int i;
2384
2385 if ((str = atof_ieee (str, 's', words)) == NULL)
2386 goto invalid_fp;
2387
2388 /* Our FP word must be 32 bits (single-precision FP). */
2389 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2390 {
2391 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2392 fpword |= words[i];
2393 }
2394 }
2395
2396 *immed = fpword;
2397 *ccp = str;
2398 return true;
2399
2400 invalid_fp:
2401 set_fatal_syntax_error (_("invalid floating-point constant"));
2402 return false;
2403 }
2404
2405 /* Less-generic immediate-value read function with the possibility of loading
2406 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2407 instructions.
2408
2409 To prevent the expression parser from pushing a register name into the
2410 symbol table as an undefined symbol, a check is firstly done to find
2411 out whether STR is a register of type REG_TYPE followed by a comma or
2412 the end of line. Return FALSE if STR is such a register. */
2413
2414 static bool
2415 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2416 {
2417 char *ptr = *str;
2418
2419 if (reg_name_p (ptr, reg_type))
2420 {
2421 set_syntax_error (_("immediate operand required"));
2422 return false;
2423 }
2424
2425 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT,
2426 NORMAL_RESOLUTION);
2427
2428 if (inst.reloc.exp.X_op == O_constant)
2429 *imm = inst.reloc.exp.X_add_number;
2430
2431 *str = ptr;
2432
2433 return true;
2434 }
2435
2436 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2437 if NEED_LIBOPCODES is non-zero, the fixup will need
2438 assistance from the libopcodes. */
2439
2440 static inline void
2441 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2442 const aarch64_opnd_info *operand,
2443 int need_libopcodes_p)
2444 {
2445 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2446 reloc->opnd = operand->type;
2447 if (need_libopcodes_p)
2448 reloc->need_libopcodes_p = 1;
2449 };
2450
2451 /* Return TRUE if the instruction needs to be fixed up later internally by
2452 the GAS; otherwise return FALSE. */
2453
2454 static inline bool
2455 aarch64_gas_internal_fixup_p (void)
2456 {
2457 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2458 }
2459
2460 /* Assign the immediate value to the relevant field in *OPERAND if
2461 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2462 needs an internal fixup in a later stage.
2463 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2464 IMM.VALUE that may get assigned with the constant. */
2465 static inline void
2466 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2467 aarch64_opnd_info *operand,
2468 int addr_off_p,
2469 int need_libopcodes_p,
2470 int skip_p)
2471 {
2472 if (reloc->exp.X_op == O_constant)
2473 {
2474 if (addr_off_p)
2475 operand->addr.offset.imm = reloc->exp.X_add_number;
2476 else
2477 operand->imm.value = reloc->exp.X_add_number;
2478 reloc->type = BFD_RELOC_UNUSED;
2479 }
2480 else
2481 {
2482 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2483 /* Tell libopcodes to ignore this operand or not. This is helpful
2484 when one of the operands needs to be fixed up later but we need
2485 libopcodes to check the other operands. */
2486 operand->skip = skip_p;
2487 }
2488 }
2489
2490 /* Relocation modifiers. Each entry in the table contains the textual
2491 name for the relocation which may be placed before a symbol used as
2492 a load/store offset, or add immediate. It must be surrounded by a
2493 leading and trailing colon, for example:
2494
2495 ldr x0, [x1, #:rello:varsym]
2496 add x0, x1, #:rello:varsym */
2497
2498 struct reloc_table_entry
2499 {
2500 const char *name;
2501 int pc_rel;
2502 bfd_reloc_code_real_type adr_type;
2503 bfd_reloc_code_real_type adrp_type;
2504 bfd_reloc_code_real_type movw_type;
2505 bfd_reloc_code_real_type add_type;
2506 bfd_reloc_code_real_type ldst_type;
2507 bfd_reloc_code_real_type ld_literal_type;
2508 };
2509
2510 static struct reloc_table_entry reloc_table[] =
2511 {
2512 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2513 {"lo12", 0,
2514 0, /* adr_type */
2515 0,
2516 0,
2517 BFD_RELOC_AARCH64_ADD_LO12,
2518 BFD_RELOC_AARCH64_LDST_LO12,
2519 0},
2520
2521 /* Higher 21 bits of pc-relative page offset: ADRP */
2522 {"pg_hi21", 1,
2523 0, /* adr_type */
2524 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2525 0,
2526 0,
2527 0,
2528 0},
2529
2530 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2531 {"pg_hi21_nc", 1,
2532 0, /* adr_type */
2533 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2534 0,
2535 0,
2536 0,
2537 0},
2538
2539 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2540 {"abs_g0", 0,
2541 0, /* adr_type */
2542 0,
2543 BFD_RELOC_AARCH64_MOVW_G0,
2544 0,
2545 0,
2546 0},
2547
2548 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2549 {"abs_g0_s", 0,
2550 0, /* adr_type */
2551 0,
2552 BFD_RELOC_AARCH64_MOVW_G0_S,
2553 0,
2554 0,
2555 0},
2556
2557 /* Less significant bits 0-15 of address/value: MOVK, no check */
2558 {"abs_g0_nc", 0,
2559 0, /* adr_type */
2560 0,
2561 BFD_RELOC_AARCH64_MOVW_G0_NC,
2562 0,
2563 0,
2564 0},
2565
2566 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2567 {"abs_g1", 0,
2568 0, /* adr_type */
2569 0,
2570 BFD_RELOC_AARCH64_MOVW_G1,
2571 0,
2572 0,
2573 0},
2574
2575 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2576 {"abs_g1_s", 0,
2577 0, /* adr_type */
2578 0,
2579 BFD_RELOC_AARCH64_MOVW_G1_S,
2580 0,
2581 0,
2582 0},
2583
2584 /* Less significant bits 16-31 of address/value: MOVK, no check */
2585 {"abs_g1_nc", 0,
2586 0, /* adr_type */
2587 0,
2588 BFD_RELOC_AARCH64_MOVW_G1_NC,
2589 0,
2590 0,
2591 0},
2592
2593 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2594 {"abs_g2", 0,
2595 0, /* adr_type */
2596 0,
2597 BFD_RELOC_AARCH64_MOVW_G2,
2598 0,
2599 0,
2600 0},
2601
2602 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2603 {"abs_g2_s", 0,
2604 0, /* adr_type */
2605 0,
2606 BFD_RELOC_AARCH64_MOVW_G2_S,
2607 0,
2608 0,
2609 0},
2610
2611 /* Less significant bits 32-47 of address/value: MOVK, no check */
2612 {"abs_g2_nc", 0,
2613 0, /* adr_type */
2614 0,
2615 BFD_RELOC_AARCH64_MOVW_G2_NC,
2616 0,
2617 0,
2618 0},
2619
2620 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2621 {"abs_g3", 0,
2622 0, /* adr_type */
2623 0,
2624 BFD_RELOC_AARCH64_MOVW_G3,
2625 0,
2626 0,
2627 0},
2628
2629 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2630 {"prel_g0", 1,
2631 0, /* adr_type */
2632 0,
2633 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2634 0,
2635 0,
2636 0},
2637
2638 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2639 {"prel_g0_nc", 1,
2640 0, /* adr_type */
2641 0,
2642 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2643 0,
2644 0,
2645 0},
2646
2647 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2648 {"prel_g1", 1,
2649 0, /* adr_type */
2650 0,
2651 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2652 0,
2653 0,
2654 0},
2655
2656 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2657 {"prel_g1_nc", 1,
2658 0, /* adr_type */
2659 0,
2660 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2661 0,
2662 0,
2663 0},
2664
2665 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2666 {"prel_g2", 1,
2667 0, /* adr_type */
2668 0,
2669 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2670 0,
2671 0,
2672 0},
2673
2674 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2675 {"prel_g2_nc", 1,
2676 0, /* adr_type */
2677 0,
2678 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2679 0,
2680 0,
2681 0},
2682
2683 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2684 {"prel_g3", 1,
2685 0, /* adr_type */
2686 0,
2687 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2688 0,
2689 0,
2690 0},
2691
2692 /* Get to the page containing GOT entry for a symbol. */
2693 {"got", 1,
2694 0, /* adr_type */
2695 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2696 0,
2697 0,
2698 0,
2699 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2700
2701 /* 12 bit offset into the page containing GOT entry for that symbol. */
2702 {"got_lo12", 0,
2703 0, /* adr_type */
2704 0,
2705 0,
2706 0,
2707 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2708 0},
2709
2710 /* 0-15 bits of address/value: MOVk, no check. */
2711 {"gotoff_g0_nc", 0,
2712 0, /* adr_type */
2713 0,
2714 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2715 0,
2716 0,
2717 0},
2718
2719 /* Most significant bits 16-31 of address/value: MOVZ. */
2720 {"gotoff_g1", 0,
2721 0, /* adr_type */
2722 0,
2723 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2724 0,
2725 0,
2726 0},
2727
2728 /* 15 bit offset into the page containing GOT entry for that symbol. */
2729 {"gotoff_lo15", 0,
2730 0, /* adr_type */
2731 0,
2732 0,
2733 0,
2734 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2735 0},
2736
2737 /* Get to the page containing GOT TLS entry for a symbol */
2738 {"gottprel_g0_nc", 0,
2739 0, /* adr_type */
2740 0,
2741 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2742 0,
2743 0,
2744 0},
2745
2746 /* Get to the page containing GOT TLS entry for a symbol */
2747 {"gottprel_g1", 0,
2748 0, /* adr_type */
2749 0,
2750 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2751 0,
2752 0,
2753 0},
2754
2755 /* Get to the page containing GOT TLS entry for a symbol */
2756 {"tlsgd", 0,
2757 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2758 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2759 0,
2760 0,
2761 0,
2762 0},
2763
2764 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2765 {"tlsgd_lo12", 0,
2766 0, /* adr_type */
2767 0,
2768 0,
2769 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2770 0,
2771 0},
2772
2773 /* Lower 16 bits address/value: MOVk. */
2774 {"tlsgd_g0_nc", 0,
2775 0, /* adr_type */
2776 0,
2777 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2778 0,
2779 0,
2780 0},
2781
2782 /* Most significant bits 16-31 of address/value: MOVZ. */
2783 {"tlsgd_g1", 0,
2784 0, /* adr_type */
2785 0,
2786 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2787 0,
2788 0,
2789 0},
2790
2791 /* Get to the page containing GOT TLS entry for a symbol */
2792 {"tlsdesc", 0,
2793 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2794 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2795 0,
2796 0,
2797 0,
2798 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2799
2800 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2801 {"tlsdesc_lo12", 0,
2802 0, /* adr_type */
2803 0,
2804 0,
2805 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2806 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2807 0},
2808
2809 /* Get to the page containing GOT TLS entry for a symbol.
2810 The same as GD, we allocate two consecutive GOT slots
2811 for module index and module offset, the only difference
2812 with GD is the module offset should be initialized to
2813 zero without any outstanding runtime relocation. */
2814 {"tlsldm", 0,
2815 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2816 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2817 0,
2818 0,
2819 0,
2820 0},
2821
2822 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2823 {"tlsldm_lo12_nc", 0,
2824 0, /* adr_type */
2825 0,
2826 0,
2827 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2828 0,
2829 0},
2830
2831 /* 12 bit offset into the module TLS base address. */
2832 {"dtprel_lo12", 0,
2833 0, /* adr_type */
2834 0,
2835 0,
2836 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2837 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2838 0},
2839
2840 /* Same as dtprel_lo12, no overflow check. */
2841 {"dtprel_lo12_nc", 0,
2842 0, /* adr_type */
2843 0,
2844 0,
2845 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2846 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2847 0},
2848
2849 /* bits[23:12] of offset to the module TLS base address. */
2850 {"dtprel_hi12", 0,
2851 0, /* adr_type */
2852 0,
2853 0,
2854 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2855 0,
2856 0},
2857
2858 /* bits[15:0] of offset to the module TLS base address. */
2859 {"dtprel_g0", 0,
2860 0, /* adr_type */
2861 0,
2862 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2863 0,
2864 0,
2865 0},
2866
2867 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2868 {"dtprel_g0_nc", 0,
2869 0, /* adr_type */
2870 0,
2871 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2872 0,
2873 0,
2874 0},
2875
2876 /* bits[31:16] of offset to the module TLS base address. */
2877 {"dtprel_g1", 0,
2878 0, /* adr_type */
2879 0,
2880 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2881 0,
2882 0,
2883 0},
2884
2885 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2886 {"dtprel_g1_nc", 0,
2887 0, /* adr_type */
2888 0,
2889 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2890 0,
2891 0,
2892 0},
2893
2894 /* bits[47:32] of offset to the module TLS base address. */
2895 {"dtprel_g2", 0,
2896 0, /* adr_type */
2897 0,
2898 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2899 0,
2900 0,
2901 0},
2902
2903 /* Lower 16 bit offset into GOT entry for a symbol */
2904 {"tlsdesc_off_g0_nc", 0,
2905 0, /* adr_type */
2906 0,
2907 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2908 0,
2909 0,
2910 0},
2911
2912 /* Higher 16 bit offset into GOT entry for a symbol */
2913 {"tlsdesc_off_g1", 0,
2914 0, /* adr_type */
2915 0,
2916 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2917 0,
2918 0,
2919 0},
2920
2921 /* Get to the page containing GOT TLS entry for a symbol */
2922 {"gottprel", 0,
2923 0, /* adr_type */
2924 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2925 0,
2926 0,
2927 0,
2928 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2929
2930 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2931 {"gottprel_lo12", 0,
2932 0, /* adr_type */
2933 0,
2934 0,
2935 0,
2936 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2937 0},
2938
2939 /* Get tp offset for a symbol. */
2940 {"tprel", 0,
2941 0, /* adr_type */
2942 0,
2943 0,
2944 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2945 0,
2946 0},
2947
2948 /* Get tp offset for a symbol. */
2949 {"tprel_lo12", 0,
2950 0, /* adr_type */
2951 0,
2952 0,
2953 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2954 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2955 0},
2956
2957 /* Get tp offset for a symbol. */
2958 {"tprel_hi12", 0,
2959 0, /* adr_type */
2960 0,
2961 0,
2962 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2963 0,
2964 0},
2965
2966 /* Get tp offset for a symbol. */
2967 {"tprel_lo12_nc", 0,
2968 0, /* adr_type */
2969 0,
2970 0,
2971 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2972 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2973 0},
2974
2975 /* Most significant bits 32-47 of address/value: MOVZ. */
2976 {"tprel_g2", 0,
2977 0, /* adr_type */
2978 0,
2979 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2980 0,
2981 0,
2982 0},
2983
2984 /* Most significant bits 16-31 of address/value: MOVZ. */
2985 {"tprel_g1", 0,
2986 0, /* adr_type */
2987 0,
2988 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2989 0,
2990 0,
2991 0},
2992
2993 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2994 {"tprel_g1_nc", 0,
2995 0, /* adr_type */
2996 0,
2997 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2998 0,
2999 0,
3000 0},
3001
3002 /* Most significant bits 0-15 of address/value: MOVZ. */
3003 {"tprel_g0", 0,
3004 0, /* adr_type */
3005 0,
3006 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3007 0,
3008 0,
3009 0},
3010
3011 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3012 {"tprel_g0_nc", 0,
3013 0, /* adr_type */
3014 0,
3015 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3016 0,
3017 0,
3018 0},
3019
3020 /* 15bit offset from got entry to base address of GOT table. */
3021 {"gotpage_lo15", 0,
3022 0,
3023 0,
3024 0,
3025 0,
3026 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3027 0},
3028
3029 /* 14bit offset from got entry to base address of GOT table. */
3030 {"gotpage_lo14", 0,
3031 0,
3032 0,
3033 0,
3034 0,
3035 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3036 0},
3037 };
3038
3039 /* Given the address of a pointer pointing to the textual name of a
3040 relocation as may appear in assembler source, attempt to find its
3041 details in reloc_table. The pointer will be updated to the character
3042 after the trailing colon. On failure, NULL will be returned;
3043 otherwise return the reloc_table_entry. */
3044
3045 static struct reloc_table_entry *
3046 find_reloc_table_entry (char **str)
3047 {
3048 unsigned int i;
3049 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3050 {
3051 int length = strlen (reloc_table[i].name);
3052
3053 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3054 && (*str)[length] == ':')
3055 {
3056 *str += (length + 1);
3057 return &reloc_table[i];
3058 }
3059 }
3060
3061 return NULL;
3062 }
3063
3064 /* Returns 0 if the relocation should never be forced,
3065 1 if the relocation must be forced, and -1 if either
3066 result is OK. */
3067
3068 static signed int
3069 aarch64_force_reloc (unsigned int type)
3070 {
3071 switch (type)
3072 {
3073 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3074 /* Perform these "immediate" internal relocations
3075 even if the symbol is extern or weak. */
3076 return 0;
3077
3078 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3079 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3080 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3081 /* Pseudo relocs that need to be fixed up according to
3082 ilp32_p. */
3083 return 1;
3084
3085 case BFD_RELOC_AARCH64_ADD_LO12:
3086 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3087 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3088 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3089 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3090 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3091 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3092 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3093 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3094 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3095 case BFD_RELOC_AARCH64_LDST128_LO12:
3096 case BFD_RELOC_AARCH64_LDST16_LO12:
3097 case BFD_RELOC_AARCH64_LDST32_LO12:
3098 case BFD_RELOC_AARCH64_LDST64_LO12:
3099 case BFD_RELOC_AARCH64_LDST8_LO12:
3100 case BFD_RELOC_AARCH64_LDST_LO12:
3101 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3102 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3103 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3104 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3105 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3106 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3107 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3108 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3109 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3110 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3111 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3112 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3113 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3114 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3115 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3116 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3117 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3118 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3119 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3120 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3121 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3122 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3123 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3124 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3125 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3126 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3127 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3128 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3129 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3130 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3131 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3132 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3133 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3134 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3135 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3136 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3137 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3138 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3139 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3140 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3141 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3142 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3143 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3144 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3145 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3146 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3147 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3148 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3149 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3150 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3151 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3152 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3153 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3154 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3155 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3156 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3157 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3158 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3159 /* Always leave these relocations for the linker. */
3160 return 1;
3161
3162 default:
3163 return -1;
3164 }
3165 }
3166
3167 int
3168 aarch64_force_relocation (struct fix *fixp)
3169 {
3170 int res = aarch64_force_reloc (fixp->fx_r_type);
3171
3172 if (res == -1)
3173 return generic_force_reloc (fixp);
3174 return res;
3175 }
3176
3177 /* Mode argument to parse_shift and parser_shifter_operand. */
3178 enum parse_shift_mode
3179 {
3180 SHIFTED_NONE, /* no shifter allowed */
3181 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3182 "#imm{,lsl #n}" */
3183 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3184 "#imm" */
3185 SHIFTED_LSL, /* bare "lsl #n" */
3186 SHIFTED_MUL, /* bare "mul #n" */
3187 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3188 SHIFTED_MUL_VL, /* "mul vl" */
3189 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3190 };
3191
3192 /* Parse a <shift> operator on an AArch64 data processing instruction.
3193 Return TRUE on success; otherwise return FALSE. */
3194 static bool
3195 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3196 {
3197 const struct aarch64_name_value_pair *shift_op;
3198 enum aarch64_modifier_kind kind;
3199 expressionS exp;
3200 int exp_has_prefix;
3201 char *s = *str;
3202 char *p = s;
3203
3204 for (p = *str; ISALPHA (*p); p++)
3205 ;
3206
3207 if (p == *str)
3208 {
3209 set_syntax_error (_("shift expression expected"));
3210 return false;
3211 }
3212
3213 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3214
3215 if (shift_op == NULL)
3216 {
3217 set_syntax_error (_("shift operator expected"));
3218 return false;
3219 }
3220
3221 kind = aarch64_get_operand_modifier (shift_op);
3222
3223 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3224 {
3225 set_syntax_error (_("invalid use of 'MSL'"));
3226 return false;
3227 }
3228
3229 if (kind == AARCH64_MOD_MUL
3230 && mode != SHIFTED_MUL
3231 && mode != SHIFTED_MUL_VL)
3232 {
3233 set_syntax_error (_("invalid use of 'MUL'"));
3234 return false;
3235 }
3236
3237 switch (mode)
3238 {
3239 case SHIFTED_LOGIC_IMM:
3240 if (aarch64_extend_operator_p (kind))
3241 {
3242 set_syntax_error (_("extending shift is not permitted"));
3243 return false;
3244 }
3245 break;
3246
3247 case SHIFTED_ARITH_IMM:
3248 if (kind == AARCH64_MOD_ROR)
3249 {
3250 set_syntax_error (_("'ROR' shift is not permitted"));
3251 return false;
3252 }
3253 break;
3254
3255 case SHIFTED_LSL:
3256 if (kind != AARCH64_MOD_LSL)
3257 {
3258 set_syntax_error (_("only 'LSL' shift is permitted"));
3259 return false;
3260 }
3261 break;
3262
3263 case SHIFTED_MUL:
3264 if (kind != AARCH64_MOD_MUL)
3265 {
3266 set_syntax_error (_("only 'MUL' is permitted"));
3267 return false;
3268 }
3269 break;
3270
3271 case SHIFTED_MUL_VL:
3272 /* "MUL VL" consists of two separate tokens. Require the first
3273 token to be "MUL" and look for a following "VL". */
3274 if (kind == AARCH64_MOD_MUL)
3275 {
3276 skip_whitespace (p);
3277 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3278 {
3279 p += 2;
3280 kind = AARCH64_MOD_MUL_VL;
3281 break;
3282 }
3283 }
3284 set_syntax_error (_("only 'MUL VL' is permitted"));
3285 return false;
3286
3287 case SHIFTED_REG_OFFSET:
3288 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3289 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3290 {
3291 set_fatal_syntax_error
3292 (_("invalid shift for the register offset addressing mode"));
3293 return false;
3294 }
3295 break;
3296
3297 case SHIFTED_LSL_MSL:
3298 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3299 {
3300 set_syntax_error (_("invalid shift operator"));
3301 return false;
3302 }
3303 break;
3304
3305 default:
3306 abort ();
3307 }
3308
3309 /* Whitespace can appear here if the next thing is a bare digit. */
3310 skip_whitespace (p);
3311
3312 /* Parse shift amount. */
3313 exp_has_prefix = 0;
3314 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3315 exp.X_op = O_absent;
3316 else
3317 {
3318 if (is_immediate_prefix (*p))
3319 {
3320 p++;
3321 exp_has_prefix = 1;
3322 }
3323 (void) aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT,
3324 NORMAL_RESOLUTION);
3325 }
3326 if (kind == AARCH64_MOD_MUL_VL)
3327 /* For consistency, give MUL VL the same shift amount as an implicit
3328 MUL #1. */
3329 operand->shifter.amount = 1;
3330 else if (exp.X_op == O_absent)
3331 {
3332 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3333 {
3334 set_syntax_error (_("missing shift amount"));
3335 return false;
3336 }
3337 operand->shifter.amount = 0;
3338 }
3339 else if (exp.X_op != O_constant)
3340 {
3341 set_syntax_error (_("constant shift amount required"));
3342 return false;
3343 }
3344 /* For parsing purposes, MUL #n has no inherent range. The range
3345 depends on the operand and will be checked by operand-specific
3346 routines. */
3347 else if (kind != AARCH64_MOD_MUL
3348 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3349 {
3350 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3351 return false;
3352 }
3353 else
3354 {
3355 operand->shifter.amount = exp.X_add_number;
3356 operand->shifter.amount_present = 1;
3357 }
3358
3359 operand->shifter.operator_present = 1;
3360 operand->shifter.kind = kind;
3361
3362 *str = p;
3363 return true;
3364 }
3365
3366 /* Parse a <shifter_operand> for a data processing instruction:
3367
3368 #<immediate>
3369 #<immediate>, LSL #imm
3370
3371 Validation of immediate operands is deferred to md_apply_fix.
3372
3373 Return TRUE on success; otherwise return FALSE. */
3374
3375 static bool
3376 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3377 enum parse_shift_mode mode)
3378 {
3379 char *p;
3380
3381 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3382 return false;
3383
3384 p = *str;
3385
3386 /* Accept an immediate expression. */
3387 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3388 REJECT_ABSENT, NORMAL_RESOLUTION))
3389 return false;
3390
3391 /* Accept optional LSL for arithmetic immediate values. */
3392 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3393 if (! parse_shift (&p, operand, SHIFTED_LSL))
3394 return false;
3395
3396 /* Not accept any shifter for logical immediate values. */
3397 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3398 && parse_shift (&p, operand, mode))
3399 {
3400 set_syntax_error (_("unexpected shift operator"));
3401 return false;
3402 }
3403
3404 *str = p;
3405 return true;
3406 }
3407
3408 /* Parse a <shifter_operand> for a data processing instruction:
3409
3410 <Rm>
3411 <Rm>, <shift>
3412 #<immediate>
3413 #<immediate>, LSL #imm
3414
3415 where <shift> is handled by parse_shift above, and the last two
3416 cases are handled by the function above.
3417
3418 Validation of immediate operands is deferred to md_apply_fix.
3419
3420 Return TRUE on success; otherwise return FALSE. */
3421
3422 static bool
3423 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3424 enum parse_shift_mode mode)
3425 {
3426 const reg_entry *reg;
3427 aarch64_opnd_qualifier_t qualifier;
3428 enum aarch64_operand_class opd_class
3429 = aarch64_get_operand_class (operand->type);
3430
3431 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3432 if (reg)
3433 {
3434 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3435 {
3436 set_syntax_error (_("unexpected register in the immediate operand"));
3437 return false;
3438 }
3439
3440 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3441 {
3442 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3443 return false;
3444 }
3445
3446 operand->reg.regno = reg->number;
3447 operand->qualifier = qualifier;
3448
3449 /* Accept optional shift operation on register. */
3450 if (! skip_past_comma (str))
3451 return true;
3452
3453 if (! parse_shift (str, operand, mode))
3454 return false;
3455
3456 return true;
3457 }
3458 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3459 {
3460 set_syntax_error
3461 (_("integer register expected in the extended/shifted operand "
3462 "register"));
3463 return false;
3464 }
3465
3466 /* We have a shifted immediate variable. */
3467 return parse_shifter_operand_imm (str, operand, mode);
3468 }
3469
3470 /* Return TRUE on success; return FALSE otherwise. */
3471
3472 static bool
3473 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3474 enum parse_shift_mode mode)
3475 {
3476 char *p = *str;
3477
3478 /* Determine if we have the sequence of characters #: or just :
3479 coming next. If we do, then we check for a :rello: relocation
3480 modifier. If we don't, punt the whole lot to
3481 parse_shifter_operand. */
3482
3483 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3484 {
3485 struct reloc_table_entry *entry;
3486
3487 if (p[0] == '#')
3488 p += 2;
3489 else
3490 p++;
3491 *str = p;
3492
3493 /* Try to parse a relocation. Anything else is an error. */
3494 if (!(entry = find_reloc_table_entry (str)))
3495 {
3496 set_syntax_error (_("unknown relocation modifier"));
3497 return false;
3498 }
3499
3500 if (entry->add_type == 0)
3501 {
3502 set_syntax_error
3503 (_("this relocation modifier is not allowed on this instruction"));
3504 return false;
3505 }
3506
3507 /* Save str before we decompose it. */
3508 p = *str;
3509
3510 /* Next, we parse the expression. */
3511 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3512 REJECT_ABSENT,
3513 aarch64_force_reloc (entry->add_type) == 1))
3514 return false;
3515
3516 /* Record the relocation type (use the ADD variant here). */
3517 inst.reloc.type = entry->add_type;
3518 inst.reloc.pc_rel = entry->pc_rel;
3519
3520 /* If str is empty, we've reached the end, stop here. */
3521 if (**str == '\0')
3522 return true;
3523
3524 /* Otherwise, we have a shifted reloc modifier, so rewind to
3525 recover the variable name and continue parsing for the shifter. */
3526 *str = p;
3527 return parse_shifter_operand_imm (str, operand, mode);
3528 }
3529
3530 return parse_shifter_operand (str, operand, mode);
3531 }
3532
3533 /* Parse all forms of an address expression. Information is written
3534 to *OPERAND and/or inst.reloc.
3535
3536 The A64 instruction set has the following addressing modes:
3537
3538 Offset
3539 [base] // in SIMD ld/st structure
3540 [base{,#0}] // in ld/st exclusive
3541 [base{,#imm}]
3542 [base,Xm{,LSL #imm}]
3543 [base,Xm,SXTX {#imm}]
3544 [base,Wm,(S|U)XTW {#imm}]
3545 Pre-indexed
3546 [base]! // in ldraa/ldrab exclusive
3547 [base,#imm]!
3548 Post-indexed
3549 [base],#imm
3550 [base],Xm // in SIMD ld/st structure
3551 PC-relative (literal)
3552 label
3553 SVE:
3554 [base,#imm,MUL VL]
3555 [base,Zm.D{,LSL #imm}]
3556 [base,Zm.S,(S|U)XTW {#imm}]
3557 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3558 [Zn.S,#imm]
3559 [Zn.D,#imm]
3560 [Zn.S{, Xm}]
3561 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3562 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3563 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3564
3565 (As a convenience, the notation "=immediate" is permitted in conjunction
3566 with the pc-relative literal load instructions to automatically place an
3567 immediate value or symbolic address in a nearby literal pool and generate
3568 a hidden label which references it.)
3569
3570 Upon a successful parsing, the address structure in *OPERAND will be
3571 filled in the following way:
3572
3573 .base_regno = <base>
3574 .offset.is_reg // 1 if the offset is a register
3575 .offset.imm = <imm>
3576 .offset.regno = <Rm>
3577
3578 For different addressing modes defined in the A64 ISA:
3579
3580 Offset
3581 .pcrel=0; .preind=1; .postind=0; .writeback=0
3582 Pre-indexed
3583 .pcrel=0; .preind=1; .postind=0; .writeback=1
3584 Post-indexed
3585 .pcrel=0; .preind=0; .postind=1; .writeback=1
3586 PC-relative (literal)
3587 .pcrel=1; .preind=1; .postind=0; .writeback=0
3588
3589 The shift/extension information, if any, will be stored in .shifter.
3590 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3591 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3592 corresponding register.
3593
3594 BASE_TYPE says which types of base register should be accepted and
3595 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3596 is the type of shifter that is allowed for immediate offsets,
3597 or SHIFTED_NONE if none.
3598
3599 In all other respects, it is the caller's responsibility to check
3600 for addressing modes not supported by the instruction, and to set
3601 inst.reloc.type. */
3602
3603 static bool
3604 parse_address_main (char **str, aarch64_opnd_info *operand,
3605 aarch64_opnd_qualifier_t *base_qualifier,
3606 aarch64_opnd_qualifier_t *offset_qualifier,
3607 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3608 enum parse_shift_mode imm_shift_mode)
3609 {
3610 char *p = *str;
3611 const reg_entry *reg;
3612 expressionS *exp = &inst.reloc.exp;
3613
3614 *base_qualifier = AARCH64_OPND_QLF_NIL;
3615 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3616 if (! skip_past_char (&p, '['))
3617 {
3618 /* =immediate or label. */
3619 operand->addr.pcrel = 1;
3620 operand->addr.preind = 1;
3621
3622 /* #:<reloc_op>:<symbol> */
3623 skip_past_char (&p, '#');
3624 if (skip_past_char (&p, ':'))
3625 {
3626 bfd_reloc_code_real_type ty;
3627 struct reloc_table_entry *entry;
3628
3629 /* Try to parse a relocation modifier. Anything else is
3630 an error. */
3631 entry = find_reloc_table_entry (&p);
3632 if (! entry)
3633 {
3634 set_syntax_error (_("unknown relocation modifier"));
3635 return false;
3636 }
3637
3638 switch (operand->type)
3639 {
3640 case AARCH64_OPND_ADDR_PCREL21:
3641 /* adr */
3642 ty = entry->adr_type;
3643 break;
3644
3645 default:
3646 ty = entry->ld_literal_type;
3647 break;
3648 }
3649
3650 if (ty == 0)
3651 {
3652 set_syntax_error
3653 (_("this relocation modifier is not allowed on this "
3654 "instruction"));
3655 return false;
3656 }
3657
3658 /* #:<reloc_op>: */
3659 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3660 aarch64_force_reloc (ty) == 1))
3661 {
3662 set_syntax_error (_("invalid relocation expression"));
3663 return false;
3664 }
3665 /* #:<reloc_op>:<expr> */
3666 /* Record the relocation type. */
3667 inst.reloc.type = ty;
3668 inst.reloc.pc_rel = entry->pc_rel;
3669 }
3670 else
3671 {
3672 if (skip_past_char (&p, '='))
3673 /* =immediate; need to generate the literal in the literal pool. */
3674 inst.gen_lit_pool = 1;
3675
3676 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3677 NORMAL_RESOLUTION))
3678 {
3679 set_syntax_error (_("invalid address"));
3680 return false;
3681 }
3682 }
3683
3684 *str = p;
3685 return true;
3686 }
3687
3688 /* [ */
3689
3690 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3691 if (!reg || !aarch64_check_reg_type (reg, base_type))
3692 {
3693 set_syntax_error (_(get_reg_expected_msg (base_type)));
3694 return false;
3695 }
3696 operand->addr.base_regno = reg->number;
3697
3698 /* [Xn */
3699 if (skip_past_comma (&p))
3700 {
3701 /* [Xn, */
3702 operand->addr.preind = 1;
3703
3704 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3705 if (reg)
3706 {
3707 if (!aarch64_check_reg_type (reg, offset_type))
3708 {
3709 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3710 return false;
3711 }
3712
3713 /* [Xn,Rm */
3714 operand->addr.offset.regno = reg->number;
3715 operand->addr.offset.is_reg = 1;
3716 /* Shifted index. */
3717 if (skip_past_comma (&p))
3718 {
3719 /* [Xn,Rm, */
3720 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3721 /* Use the diagnostics set in parse_shift, so not set new
3722 error message here. */
3723 return false;
3724 }
3725 /* We only accept:
3726 [base,Xm] # For vector plus scalar SVE2 indexing.
3727 [base,Xm{,LSL #imm}]
3728 [base,Xm,SXTX {#imm}]
3729 [base,Wm,(S|U)XTW {#imm}] */
3730 if (operand->shifter.kind == AARCH64_MOD_NONE
3731 || operand->shifter.kind == AARCH64_MOD_LSL
3732 || operand->shifter.kind == AARCH64_MOD_SXTX)
3733 {
3734 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3735 {
3736 set_syntax_error (_("invalid use of 32-bit register offset"));
3737 return false;
3738 }
3739 if (aarch64_get_qualifier_esize (*base_qualifier)
3740 != aarch64_get_qualifier_esize (*offset_qualifier)
3741 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3742 || *base_qualifier != AARCH64_OPND_QLF_S_S
3743 || *offset_qualifier != AARCH64_OPND_QLF_X))
3744 {
3745 set_syntax_error (_("offset has different size from base"));
3746 return false;
3747 }
3748 }
3749 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3750 {
3751 set_syntax_error (_("invalid use of 64-bit register offset"));
3752 return false;
3753 }
3754 }
3755 else
3756 {
3757 /* [Xn,#:<reloc_op>:<symbol> */
3758 skip_past_char (&p, '#');
3759 if (skip_past_char (&p, ':'))
3760 {
3761 struct reloc_table_entry *entry;
3762
3763 /* Try to parse a relocation modifier. Anything else is
3764 an error. */
3765 if (!(entry = find_reloc_table_entry (&p)))
3766 {
3767 set_syntax_error (_("unknown relocation modifier"));
3768 return false;
3769 }
3770
3771 if (entry->ldst_type == 0)
3772 {
3773 set_syntax_error
3774 (_("this relocation modifier is not allowed on this "
3775 "instruction"));
3776 return false;
3777 }
3778
3779 /* [Xn,#:<reloc_op>: */
3780 /* We now have the group relocation table entry corresponding to
3781 the name in the assembler source. Next, we parse the
3782 expression. */
3783 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3784 aarch64_force_reloc (entry->ldst_type) == 1))
3785 {
3786 set_syntax_error (_("invalid relocation expression"));
3787 return false;
3788 }
3789
3790 /* [Xn,#:<reloc_op>:<expr> */
3791 /* Record the load/store relocation type. */
3792 inst.reloc.type = entry->ldst_type;
3793 inst.reloc.pc_rel = entry->pc_rel;
3794 }
3795 else
3796 {
3797 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3798 NORMAL_RESOLUTION))
3799 {
3800 set_syntax_error (_("invalid expression in the address"));
3801 return false;
3802 }
3803 /* [Xn,<expr> */
3804 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3805 /* [Xn,<expr>,<shifter> */
3806 if (! parse_shift (&p, operand, imm_shift_mode))
3807 return false;
3808 }
3809 }
3810 }
3811
3812 if (! skip_past_char (&p, ']'))
3813 {
3814 set_syntax_error (_("']' expected"));
3815 return false;
3816 }
3817
3818 if (skip_past_char (&p, '!'))
3819 {
3820 if (operand->addr.preind && operand->addr.offset.is_reg)
3821 {
3822 set_syntax_error (_("register offset not allowed in pre-indexed "
3823 "addressing mode"));
3824 return false;
3825 }
3826 /* [Xn]! */
3827 operand->addr.writeback = 1;
3828 }
3829 else if (skip_past_comma (&p))
3830 {
3831 /* [Xn], */
3832 operand->addr.postind = 1;
3833 operand->addr.writeback = 1;
3834
3835 if (operand->addr.preind)
3836 {
3837 set_syntax_error (_("cannot combine pre- and post-indexing"));
3838 return false;
3839 }
3840
3841 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3842 if (reg)
3843 {
3844 /* [Xn],Xm */
3845 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3846 {
3847 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3848 return false;
3849 }
3850
3851 operand->addr.offset.regno = reg->number;
3852 operand->addr.offset.is_reg = 1;
3853 }
3854 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3855 NORMAL_RESOLUTION))
3856 {
3857 /* [Xn],#expr */
3858 set_syntax_error (_("invalid expression in the address"));
3859 return false;
3860 }
3861 }
3862
3863 /* If at this point neither .preind nor .postind is set, we have a
3864 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3865 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3866 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3867 [Zn.<T>, xzr]. */
3868 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3869 {
3870 if (operand->addr.writeback)
3871 {
3872 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3873 {
3874 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3875 operand->addr.offset.is_reg = 0;
3876 operand->addr.offset.imm = 0;
3877 operand->addr.preind = 1;
3878 }
3879 else
3880 {
3881 /* Reject [Rn]! */
3882 set_syntax_error (_("missing offset in the pre-indexed address"));
3883 return false;
3884 }
3885 }
3886 else
3887 {
3888 operand->addr.preind = 1;
3889 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3890 {
3891 operand->addr.offset.is_reg = 1;
3892 operand->addr.offset.regno = REG_ZR;
3893 *offset_qualifier = AARCH64_OPND_QLF_X;
3894 }
3895 else
3896 {
3897 inst.reloc.exp.X_op = O_constant;
3898 inst.reloc.exp.X_add_number = 0;
3899 }
3900 }
3901 }
3902
3903 *str = p;
3904 return true;
3905 }
3906
3907 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3908 on success. */
3909 static bool
3910 parse_address (char **str, aarch64_opnd_info *operand)
3911 {
3912 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3913 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3914 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3915 }
3916
3917 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3918 The arguments have the same meaning as for parse_address_main.
3919 Return TRUE on success. */
3920 static bool
3921 parse_sve_address (char **str, aarch64_opnd_info *operand,
3922 aarch64_opnd_qualifier_t *base_qualifier,
3923 aarch64_opnd_qualifier_t *offset_qualifier)
3924 {
3925 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3926 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3927 SHIFTED_MUL_VL);
3928 }
3929
3930 /* Parse a register X0-X30. The register must be 64-bit and register 31
3931 is unallocated. */
3932 static bool
3933 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
3934 {
3935 const reg_entry *reg = parse_reg (str);
3936 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
3937 {
3938 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3939 return false;
3940 }
3941 operand->reg.regno = reg->number;
3942 operand->qualifier = AARCH64_OPND_QLF_X;
3943 return true;
3944 }
3945
3946 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3947 Return TRUE on success; otherwise return FALSE. */
3948 static bool
3949 parse_half (char **str, int *internal_fixup_p)
3950 {
3951 char *p = *str;
3952
3953 skip_past_char (&p, '#');
3954
3955 gas_assert (internal_fixup_p);
3956 *internal_fixup_p = 0;
3957
3958 if (*p == ':')
3959 {
3960 struct reloc_table_entry *entry;
3961
3962 /* Try to parse a relocation. Anything else is an error. */
3963 ++p;
3964
3965 if (!(entry = find_reloc_table_entry (&p)))
3966 {
3967 set_syntax_error (_("unknown relocation modifier"));
3968 return false;
3969 }
3970
3971 if (entry->movw_type == 0)
3972 {
3973 set_syntax_error
3974 (_("this relocation modifier is not allowed on this instruction"));
3975 return false;
3976 }
3977
3978 inst.reloc.type = entry->movw_type;
3979 }
3980 else
3981 *internal_fixup_p = 1;
3982
3983 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3984 aarch64_force_reloc (inst.reloc.type) == 1))
3985 return false;
3986
3987 *str = p;
3988 return true;
3989 }
3990
3991 /* Parse an operand for an ADRP instruction:
3992 ADRP <Xd>, <label>
3993 Return TRUE on success; otherwise return FALSE. */
3994
3995 static bool
3996 parse_adrp (char **str)
3997 {
3998 char *p;
3999
4000 p = *str;
4001 if (*p == ':')
4002 {
4003 struct reloc_table_entry *entry;
4004
4005 /* Try to parse a relocation. Anything else is an error. */
4006 ++p;
4007 if (!(entry = find_reloc_table_entry (&p)))
4008 {
4009 set_syntax_error (_("unknown relocation modifier"));
4010 return false;
4011 }
4012
4013 if (entry->adrp_type == 0)
4014 {
4015 set_syntax_error
4016 (_("this relocation modifier is not allowed on this instruction"));
4017 return false;
4018 }
4019
4020 inst.reloc.type = entry->adrp_type;
4021 }
4022 else
4023 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4024
4025 inst.reloc.pc_rel = 1;
4026 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
4027 aarch64_force_reloc (inst.reloc.type) == 1))
4028 return false;
4029 *str = p;
4030 return true;
4031 }
4032
4033 /* Miscellaneous. */
4034
4035 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4036 of SIZE tokens in which index I gives the token for field value I,
4037 or is null if field value I is invalid. REG_TYPE says which register
4038 names should be treated as registers rather than as symbolic immediates.
4039
4040 Return true on success, moving *STR past the operand and storing the
4041 field value in *VAL. */
4042
4043 static int
4044 parse_enum_string (char **str, int64_t *val, const char *const *array,
4045 size_t size, aarch64_reg_type reg_type)
4046 {
4047 expressionS exp;
4048 char *p, *q;
4049 size_t i;
4050
4051 /* Match C-like tokens. */
4052 p = q = *str;
4053 while (ISALNUM (*q))
4054 q++;
4055
4056 for (i = 0; i < size; ++i)
4057 if (array[i]
4058 && strncasecmp (array[i], p, q - p) == 0
4059 && array[i][q - p] == 0)
4060 {
4061 *val = i;
4062 *str = q;
4063 return true;
4064 }
4065
4066 if (!parse_immediate_expression (&p, &exp, reg_type))
4067 return false;
4068
4069 if (exp.X_op == O_constant
4070 && (uint64_t) exp.X_add_number < size)
4071 {
4072 *val = exp.X_add_number;
4073 *str = p;
4074 return true;
4075 }
4076
4077 /* Use the default error for this operand. */
4078 return false;
4079 }
4080
4081 /* Parse an option for a preload instruction. Returns the encoding for the
4082 option, or PARSE_FAIL. */
4083
4084 static int
4085 parse_pldop (char **str)
4086 {
4087 char *p, *q;
4088 const struct aarch64_name_value_pair *o;
4089
4090 p = q = *str;
4091 while (ISALNUM (*q))
4092 q++;
4093
4094 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4095 if (!o)
4096 return PARSE_FAIL;
4097
4098 *str = q;
4099 return o->value;
4100 }
4101
4102 /* Parse an option for a barrier instruction. Returns the encoding for the
4103 option, or PARSE_FAIL. */
4104
4105 static int
4106 parse_barrier (char **str)
4107 {
4108 char *p, *q;
4109 const struct aarch64_name_value_pair *o;
4110
4111 p = q = *str;
4112 while (ISALPHA (*q))
4113 q++;
4114
4115 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4116 if (!o)
4117 return PARSE_FAIL;
4118
4119 *str = q;
4120 return o->value;
4121 }
4122
4123 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4124 return 0 if successful. Otherwise return PARSE_FAIL. */
4125
4126 static int
4127 parse_barrier_psb (char **str,
4128 const struct aarch64_name_value_pair ** hint_opt)
4129 {
4130 char *p, *q;
4131 const struct aarch64_name_value_pair *o;
4132
4133 p = q = *str;
4134 while (ISALPHA (*q))
4135 q++;
4136
4137 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4138 if (!o)
4139 {
4140 set_fatal_syntax_error
4141 ( _("unknown or missing option to PSB/TSB"));
4142 return PARSE_FAIL;
4143 }
4144
4145 if (o->value != 0x11)
4146 {
4147 /* PSB only accepts option name 'CSYNC'. */
4148 set_syntax_error
4149 (_("the specified option is not accepted for PSB/TSB"));
4150 return PARSE_FAIL;
4151 }
4152
4153 *str = q;
4154 *hint_opt = o;
4155 return 0;
4156 }
4157
4158 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4159 return 0 if successful. Otherwise return PARSE_FAIL. */
4160
4161 static int
4162 parse_bti_operand (char **str,
4163 const struct aarch64_name_value_pair ** hint_opt)
4164 {
4165 char *p, *q;
4166 const struct aarch64_name_value_pair *o;
4167
4168 p = q = *str;
4169 while (ISALPHA (*q))
4170 q++;
4171
4172 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4173 if (!o)
4174 {
4175 set_fatal_syntax_error
4176 ( _("unknown option to BTI"));
4177 return PARSE_FAIL;
4178 }
4179
4180 switch (o->value)
4181 {
4182 /* Valid BTI operands. */
4183 case HINT_OPD_C:
4184 case HINT_OPD_J:
4185 case HINT_OPD_JC:
4186 break;
4187
4188 default:
4189 set_syntax_error
4190 (_("unknown option to BTI"));
4191 return PARSE_FAIL;
4192 }
4193
4194 *str = q;
4195 *hint_opt = o;
4196 return 0;
4197 }
4198
4199 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4200 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4201 on failure. Format:
4202
4203 REG_TYPE.QUALIFIER
4204
4205 Side effect: Update STR with current parse position of success.
4206 */
4207
4208 static const reg_entry *
4209 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4210 aarch64_opnd_qualifier_t *qualifier)
4211 {
4212 char *q;
4213
4214 reg_entry *reg = parse_reg (str);
4215 if (reg != NULL && reg->type == reg_type)
4216 {
4217 if (!skip_past_char (str, '.'))
4218 {
4219 set_syntax_error (_("missing ZA tile element size separator"));
4220 return NULL;
4221 }
4222
4223 q = *str;
4224 switch (TOLOWER (*q))
4225 {
4226 case 'b':
4227 *qualifier = AARCH64_OPND_QLF_S_B;
4228 break;
4229 case 'h':
4230 *qualifier = AARCH64_OPND_QLF_S_H;
4231 break;
4232 case 's':
4233 *qualifier = AARCH64_OPND_QLF_S_S;
4234 break;
4235 case 'd':
4236 *qualifier = AARCH64_OPND_QLF_S_D;
4237 break;
4238 case 'q':
4239 *qualifier = AARCH64_OPND_QLF_S_Q;
4240 break;
4241 default:
4242 return NULL;
4243 }
4244 q++;
4245
4246 *str = q;
4247 return reg;
4248 }
4249
4250 return NULL;
4251 }
4252
4253 /* Parse SME ZA tile encoded in <ZAda> assembler symbol.
4254 Function return tile QUALIFIER on success.
4255
4256 Tiles are in example format: za[0-9]\.[bhsd]
4257
4258 Function returns <ZAda> register number or PARSE_FAIL.
4259 */
4260 static int
4261 parse_sme_zada_operand (char **str, aarch64_opnd_qualifier_t *qualifier)
4262 {
4263 int regno;
4264 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_ZA, qualifier);
4265
4266 if (reg == NULL)
4267 return PARSE_FAIL;
4268 regno = reg->number;
4269
4270 switch (*qualifier)
4271 {
4272 case AARCH64_OPND_QLF_S_B:
4273 if (regno != 0x00)
4274 {
4275 set_syntax_error (_("invalid ZA tile register number, expected za0"));
4276 return PARSE_FAIL;
4277 }
4278 break;
4279 case AARCH64_OPND_QLF_S_H:
4280 if (regno > 0x01)
4281 {
4282 set_syntax_error (_("invalid ZA tile register number, expected za0-za1"));
4283 return PARSE_FAIL;
4284 }
4285 break;
4286 case AARCH64_OPND_QLF_S_S:
4287 if (regno > 0x03)
4288 {
4289 /* For the 32-bit variant: is the name of the ZA tile ZA0-ZA3. */
4290 set_syntax_error (_("invalid ZA tile register number, expected za0-za3"));
4291 return PARSE_FAIL;
4292 }
4293 break;
4294 case AARCH64_OPND_QLF_S_D:
4295 if (regno > 0x07)
4296 {
4297 /* For the 64-bit variant: is the name of the ZA tile ZA0-ZA7 */
4298 set_syntax_error (_("invalid ZA tile register number, expected za0-za7"));
4299 return PARSE_FAIL;
4300 }
4301 break;
4302 default:
4303 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s and d"));
4304 return PARSE_FAIL;
4305 }
4306
4307 return regno;
4308 }
4309
4310 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4311
4312 #<imm>
4313 <imm>
4314
4315 Function return TRUE if immediate was found, or FALSE.
4316 */
4317 static bool
4318 parse_sme_immediate (char **str, int64_t *imm)
4319 {
4320 int64_t val;
4321 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4322 return false;
4323
4324 *imm = val;
4325 return true;
4326 }
4327
4328 /* Parse index with vector select register and immediate:
4329
4330 [<Wv>, <imm>]
4331 [<Wv>, #<imm>]
4332 where <Wv> is in W12-W15 range and # is optional for immediate.
4333
4334 Function performs extra check for mandatory immediate value if REQUIRE_IMM
4335 is set to true.
4336
4337 On success function returns TRUE and populated VECTOR_SELECT_REGISTER and
4338 IMM output.
4339 */
4340 static bool
4341 parse_sme_za_hv_tiles_operand_index (char **str,
4342 int *vector_select_register,
4343 int64_t *imm)
4344 {
4345 const reg_entry *reg;
4346
4347 if (!skip_past_char (str, '['))
4348 {
4349 set_syntax_error (_("expected '['"));
4350 return false;
4351 }
4352
4353 /* Vector select register W12-W15 encoded in the 2-bit Rv field. */
4354 reg = parse_reg (str);
4355 if (reg == NULL || reg->type != REG_TYPE_R_32
4356 || reg->number < 12 || reg->number > 15)
4357 {
4358 set_syntax_error (_("expected vector select register W12-W15"));
4359 return false;
4360 }
4361 *vector_select_register = reg->number;
4362
4363 if (!skip_past_char (str, ',')) /* Optional index offset immediate. */
4364 {
4365 set_syntax_error (_("expected ','"));
4366 return false;
4367 }
4368
4369 if (!parse_sme_immediate (str, imm))
4370 {
4371 set_syntax_error (_("index offset immediate expected"));
4372 return false;
4373 }
4374
4375 if (!skip_past_char (str, ']'))
4376 {
4377 set_syntax_error (_("expected ']'"));
4378 return false;
4379 }
4380
4381 return true;
4382 }
4383
4384 /* Parse SME ZA horizontal or vertical vector access to tiles.
4385 Function extracts from STR to SLICE_INDICATOR <HV> horizontal (0) or
4386 vertical (1) ZA tile vector orientation. VECTOR_SELECT_REGISTER
4387 contains <Wv> select register and corresponding optional IMMEDIATE.
4388 In addition QUALIFIER is extracted.
4389
4390 Field format examples:
4391
4392 ZA0<HV>.B[<Wv>, #<imm>]
4393 <ZAn><HV>.H[<Wv>, #<imm>]
4394 <ZAn><HV>.S[<Wv>, #<imm>]
4395 <ZAn><HV>.D[<Wv>, #<imm>]
4396 <ZAn><HV>.Q[<Wv>, #<imm>]
4397
4398 Function returns <ZAda> register number or PARSE_FAIL.
4399 */
4400 static int
4401 parse_sme_za_hv_tiles_operand (char **str,
4402 enum sme_hv_slice *slice_indicator,
4403 int *vector_select_register,
4404 int *imm,
4405 aarch64_opnd_qualifier_t *qualifier)
4406 {
4407 char *qh, *qv;
4408 int regno;
4409 int regno_limit;
4410 int64_t imm_limit;
4411 int64_t imm_value;
4412 const reg_entry *reg;
4413
4414 qh = qv = *str;
4415 if ((reg = parse_reg_with_qual (&qh, REG_TYPE_ZAH, qualifier)) != NULL)
4416 {
4417 *slice_indicator = HV_horizontal;
4418 *str = qh;
4419 }
4420 else if ((reg = parse_reg_with_qual (&qv, REG_TYPE_ZAV, qualifier)) != NULL)
4421 {
4422 *slice_indicator = HV_vertical;
4423 *str = qv;
4424 }
4425 else
4426 return PARSE_FAIL;
4427 regno = reg->number;
4428
4429 switch (*qualifier)
4430 {
4431 case AARCH64_OPND_QLF_S_B:
4432 regno_limit = 0;
4433 imm_limit = 15;
4434 break;
4435 case AARCH64_OPND_QLF_S_H:
4436 regno_limit = 1;
4437 imm_limit = 7;
4438 break;
4439 case AARCH64_OPND_QLF_S_S:
4440 regno_limit = 3;
4441 imm_limit = 3;
4442 break;
4443 case AARCH64_OPND_QLF_S_D:
4444 regno_limit = 7;
4445 imm_limit = 1;
4446 break;
4447 case AARCH64_OPND_QLF_S_Q:
4448 regno_limit = 15;
4449 imm_limit = 0;
4450 break;
4451 default:
4452 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s, d and q"));
4453 return PARSE_FAIL;
4454 }
4455
4456 /* Check if destination register ZA tile vector is in range for given
4457 instruction variant. */
4458 if (regno < 0 || regno > regno_limit)
4459 {
4460 set_syntax_error (_("ZA tile vector out of range"));
4461 return PARSE_FAIL;
4462 }
4463
4464 if (!parse_sme_za_hv_tiles_operand_index (str, vector_select_register,
4465 &imm_value))
4466 return PARSE_FAIL;
4467
4468 /* Check if optional index offset is in the range for instruction
4469 variant. */
4470 if (imm_value < 0 || imm_value > imm_limit)
4471 {
4472 set_syntax_error (_("index offset out of range"));
4473 return PARSE_FAIL;
4474 }
4475
4476 *imm = imm_value;
4477
4478 return regno;
4479 }
4480
4481
4482 static int
4483 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4484 enum sme_hv_slice *slice_indicator,
4485 int *vector_select_register,
4486 int *imm,
4487 aarch64_opnd_qualifier_t *qualifier)
4488 {
4489 int regno;
4490
4491 if (!skip_past_char (str, '{'))
4492 {
4493 set_syntax_error (_("expected '{'"));
4494 return PARSE_FAIL;
4495 }
4496
4497 regno = parse_sme_za_hv_tiles_operand (str, slice_indicator,
4498 vector_select_register, imm,
4499 qualifier);
4500
4501 if (regno == PARSE_FAIL)
4502 return PARSE_FAIL;
4503
4504 if (!skip_past_char (str, '}'))
4505 {
4506 set_syntax_error (_("expected '}'"));
4507 return PARSE_FAIL;
4508 }
4509
4510 return regno;
4511 }
4512
4513 /* Parse list of up to eight 64-bit element tile names separated by commas in
4514 SME's ZERO instruction:
4515
4516 ZERO { <mask> }
4517
4518 Function returns <mask>:
4519
4520 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4521 */
4522 static int
4523 parse_sme_zero_mask(char **str)
4524 {
4525 char *q;
4526 int mask;
4527 aarch64_opnd_qualifier_t qualifier;
4528
4529 mask = 0x00;
4530 q = *str;
4531 do
4532 {
4533 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA, &qualifier);
4534 if (reg)
4535 {
4536 int regno = reg->number;
4537 if (qualifier == AARCH64_OPND_QLF_S_B && regno == 0)
4538 {
4539 /* { ZA0.B } is assembled as all-ones immediate. */
4540 mask = 0xff;
4541 }
4542 else if (qualifier == AARCH64_OPND_QLF_S_H && regno < 2)
4543 mask |= 0x55 << regno;
4544 else if (qualifier == AARCH64_OPND_QLF_S_S && regno < 4)
4545 mask |= 0x11 << regno;
4546 else if (qualifier == AARCH64_OPND_QLF_S_D && regno < 8)
4547 mask |= 0x01 << regno;
4548 else
4549 {
4550 set_syntax_error (_("wrong ZA tile element format"));
4551 return PARSE_FAIL;
4552 }
4553 continue;
4554 }
4555 else if (strncasecmp (q, "za", 2) == 0
4556 && !ISALNUM (q[2]))
4557 {
4558 /* { ZA } is assembled as all-ones immediate. */
4559 mask = 0xff;
4560 q += 2;
4561 continue;
4562 }
4563 else
4564 {
4565 set_syntax_error (_("wrong ZA tile element format"));
4566 return PARSE_FAIL;
4567 }
4568 }
4569 while (skip_past_char (&q, ','));
4570
4571 *str = q;
4572 return mask;
4573 }
4574
4575 /* Wraps in curly braces <mask> operand ZERO instruction:
4576
4577 ZERO { <mask> }
4578
4579 Function returns value of <mask> bit-field.
4580 */
4581 static int
4582 parse_sme_list_of_64bit_tiles (char **str)
4583 {
4584 int regno;
4585
4586 if (!skip_past_char (str, '{'))
4587 {
4588 set_syntax_error (_("expected '{'"));
4589 return PARSE_FAIL;
4590 }
4591
4592 /* Empty <mask> list is an all-zeros immediate. */
4593 if (!skip_past_char (str, '}'))
4594 {
4595 regno = parse_sme_zero_mask (str);
4596 if (regno == PARSE_FAIL)
4597 return PARSE_FAIL;
4598
4599 if (!skip_past_char (str, '}'))
4600 {
4601 set_syntax_error (_("expected '}'"));
4602 return PARSE_FAIL;
4603 }
4604 }
4605 else
4606 regno = 0x00;
4607
4608 return regno;
4609 }
4610
4611 /* Parse ZA array operand used in e.g. STR and LDR instruction.
4612 Operand format:
4613
4614 ZA[<Wv>, <imm>]
4615 ZA[<Wv>, #<imm>]
4616
4617 Function returns <Wv> or PARSE_FAIL.
4618 */
4619 static int
4620 parse_sme_za_array (char **str, int *imm)
4621 {
4622 char *p, *q;
4623 int regno;
4624 int64_t imm_value;
4625
4626 p = q = *str;
4627 while (ISALPHA (*q))
4628 q++;
4629
4630 if ((q - p != 2) || strncasecmp ("za", p, q - p) != 0)
4631 {
4632 set_syntax_error (_("expected ZA array"));
4633 return PARSE_FAIL;
4634 }
4635
4636 if (! parse_sme_za_hv_tiles_operand_index (&q, &regno, &imm_value))
4637 return PARSE_FAIL;
4638
4639 if (imm_value < 0 || imm_value > 15)
4640 {
4641 set_syntax_error (_("offset out of range"));
4642 return PARSE_FAIL;
4643 }
4644
4645 *imm = imm_value;
4646 *str = q;
4647 return regno;
4648 }
4649
4650 /* Parse streaming mode operand for SMSTART and SMSTOP.
4651
4652 {SM | ZA}
4653
4654 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4655 */
4656 static int
4657 parse_sme_sm_za (char **str)
4658 {
4659 char *p, *q;
4660
4661 p = q = *str;
4662 while (ISALPHA (*q))
4663 q++;
4664
4665 if ((q - p != 2)
4666 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4667 {
4668 set_syntax_error (_("expected SM or ZA operand"));
4669 return PARSE_FAIL;
4670 }
4671
4672 *str = q;
4673 return TOLOWER (p[0]);
4674 }
4675
4676 /* Parse the name of the source scalable predicate register, the index base
4677 register W12-W15 and the element index. Function performs element index
4678 limit checks as well as qualifier type checks.
4679
4680 <Pn>.<T>[<Wv>, <imm>]
4681 <Pn>.<T>[<Wv>, #<imm>]
4682
4683 On success function sets <Wv> to INDEX_BASE_REG, <T> to QUALIFIER and
4684 <imm> to IMM.
4685 Function returns <Pn>, or PARSE_FAIL.
4686 */
4687 static int
4688 parse_sme_pred_reg_with_index(char **str,
4689 int *index_base_reg,
4690 int *imm,
4691 aarch64_opnd_qualifier_t *qualifier)
4692 {
4693 int regno;
4694 int64_t imm_limit;
4695 int64_t imm_value;
4696 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_PN, qualifier);
4697
4698 if (reg == NULL)
4699 return PARSE_FAIL;
4700 regno = reg->number;
4701
4702 switch (*qualifier)
4703 {
4704 case AARCH64_OPND_QLF_S_B:
4705 imm_limit = 15;
4706 break;
4707 case AARCH64_OPND_QLF_S_H:
4708 imm_limit = 7;
4709 break;
4710 case AARCH64_OPND_QLF_S_S:
4711 imm_limit = 3;
4712 break;
4713 case AARCH64_OPND_QLF_S_D:
4714 imm_limit = 1;
4715 break;
4716 default:
4717 set_syntax_error (_("wrong predicate register element size, allowed b, h, s and d"));
4718 return PARSE_FAIL;
4719 }
4720
4721 if (! parse_sme_za_hv_tiles_operand_index (str, index_base_reg, &imm_value))
4722 return PARSE_FAIL;
4723
4724 if (imm_value < 0 || imm_value > imm_limit)
4725 {
4726 set_syntax_error (_("element index out of range for given variant"));
4727 return PARSE_FAIL;
4728 }
4729
4730 *imm = imm_value;
4731
4732 return regno;
4733 }
4734
4735 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4736 Returns the encoding for the option, or PARSE_FAIL.
4737
4738 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4739 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4740
4741 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4742 field, otherwise as a system register.
4743 */
4744
4745 static int
4746 parse_sys_reg (char **str, htab_t sys_regs,
4747 int imple_defined_p, int pstatefield_p,
4748 uint32_t* flags)
4749 {
4750 char *p, *q;
4751 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4752 const aarch64_sys_reg *o;
4753 int value;
4754
4755 p = buf;
4756 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4757 if (p < buf + (sizeof (buf) - 1))
4758 *p++ = TOLOWER (*q);
4759 *p = '\0';
4760
4761 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4762 valid system register. This is enforced by construction of the hash
4763 table. */
4764 if (p - buf != q - *str)
4765 return PARSE_FAIL;
4766
4767 o = str_hash_find (sys_regs, buf);
4768 if (!o)
4769 {
4770 if (!imple_defined_p)
4771 return PARSE_FAIL;
4772 else
4773 {
4774 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4775 unsigned int op0, op1, cn, cm, op2;
4776
4777 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4778 != 5)
4779 return PARSE_FAIL;
4780 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4781 return PARSE_FAIL;
4782 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4783 if (flags)
4784 *flags = 0;
4785 }
4786 }
4787 else
4788 {
4789 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4790 as_bad (_("selected processor does not support PSTATE field "
4791 "name '%s'"), buf);
4792 if (!pstatefield_p
4793 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4794 o->value, o->flags, o->features))
4795 as_bad (_("selected processor does not support system register "
4796 "name '%s'"), buf);
4797 if (aarch64_sys_reg_deprecated_p (o->flags))
4798 as_warn (_("system register name '%s' is deprecated and may be "
4799 "removed in a future release"), buf);
4800 value = o->value;
4801 if (flags)
4802 *flags = o->flags;
4803 }
4804
4805 *str = q;
4806 return value;
4807 }
4808
4809 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4810 for the option, or NULL. */
4811
4812 static const aarch64_sys_ins_reg *
4813 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4814 {
4815 char *p, *q;
4816 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4817 const aarch64_sys_ins_reg *o;
4818
4819 p = buf;
4820 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4821 if (p < buf + (sizeof (buf) - 1))
4822 *p++ = TOLOWER (*q);
4823 *p = '\0';
4824
4825 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4826 valid system register. This is enforced by construction of the hash
4827 table. */
4828 if (p - buf != q - *str)
4829 return NULL;
4830
4831 o = str_hash_find (sys_ins_regs, buf);
4832 if (!o)
4833 return NULL;
4834
4835 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4836 o->name, o->value, o->flags, 0))
4837 as_bad (_("selected processor does not support system register "
4838 "name '%s'"), buf);
4839 if (aarch64_sys_reg_deprecated_p (o->flags))
4840 as_warn (_("system register name '%s' is deprecated and may be "
4841 "removed in a future release"), buf);
4842
4843 *str = q;
4844 return o;
4845 }
4846 \f
4847 #define po_char_or_fail(chr) do { \
4848 if (! skip_past_char (&str, chr)) \
4849 goto failure; \
4850 } while (0)
4851
4852 #define po_reg_or_fail(regtype) do { \
4853 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4854 if (val == PARSE_FAIL) \
4855 { \
4856 set_default_error (); \
4857 goto failure; \
4858 } \
4859 } while (0)
4860
4861 #define po_int_reg_or_fail(reg_type) do { \
4862 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4863 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4864 { \
4865 set_default_error (); \
4866 goto failure; \
4867 } \
4868 info->reg.regno = reg->number; \
4869 info->qualifier = qualifier; \
4870 } while (0)
4871
4872 #define po_imm_nc_or_fail() do { \
4873 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4874 goto failure; \
4875 } while (0)
4876
4877 #define po_imm_or_fail(min, max) do { \
4878 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4879 goto failure; \
4880 if (val < min || val > max) \
4881 { \
4882 set_fatal_syntax_error (_("immediate value out of range "\
4883 #min " to "#max)); \
4884 goto failure; \
4885 } \
4886 } while (0)
4887
4888 #define po_enum_or_fail(array) do { \
4889 if (!parse_enum_string (&str, &val, array, \
4890 ARRAY_SIZE (array), imm_reg_type)) \
4891 goto failure; \
4892 } while (0)
4893
4894 #define po_misc_or_fail(expr) do { \
4895 if (!expr) \
4896 goto failure; \
4897 } while (0)
4898 \f
4899 /* encode the 12-bit imm field of Add/sub immediate */
4900 static inline uint32_t
4901 encode_addsub_imm (uint32_t imm)
4902 {
4903 return imm << 10;
4904 }
4905
4906 /* encode the shift amount field of Add/sub immediate */
4907 static inline uint32_t
4908 encode_addsub_imm_shift_amount (uint32_t cnt)
4909 {
4910 return cnt << 22;
4911 }
4912
4913
4914 /* encode the imm field of Adr instruction */
4915 static inline uint32_t
4916 encode_adr_imm (uint32_t imm)
4917 {
4918 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4919 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4920 }
4921
4922 /* encode the immediate field of Move wide immediate */
4923 static inline uint32_t
4924 encode_movw_imm (uint32_t imm)
4925 {
4926 return imm << 5;
4927 }
4928
4929 /* encode the 26-bit offset of unconditional branch */
4930 static inline uint32_t
4931 encode_branch_ofs_26 (uint32_t ofs)
4932 {
4933 return ofs & ((1 << 26) - 1);
4934 }
4935
4936 /* encode the 19-bit offset of conditional branch and compare & branch */
4937 static inline uint32_t
4938 encode_cond_branch_ofs_19 (uint32_t ofs)
4939 {
4940 return (ofs & ((1 << 19) - 1)) << 5;
4941 }
4942
4943 /* encode the 19-bit offset of ld literal */
4944 static inline uint32_t
4945 encode_ld_lit_ofs_19 (uint32_t ofs)
4946 {
4947 return (ofs & ((1 << 19) - 1)) << 5;
4948 }
4949
4950 /* Encode the 14-bit offset of test & branch. */
4951 static inline uint32_t
4952 encode_tst_branch_ofs_14 (uint32_t ofs)
4953 {
4954 return (ofs & ((1 << 14) - 1)) << 5;
4955 }
4956
4957 /* Encode the 16-bit imm field of svc/hvc/smc. */
4958 static inline uint32_t
4959 encode_svc_imm (uint32_t imm)
4960 {
4961 return imm << 5;
4962 }
4963
4964 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4965 static inline uint32_t
4966 reencode_addsub_switch_add_sub (uint32_t opcode)
4967 {
4968 return opcode ^ (1 << 30);
4969 }
4970
4971 static inline uint32_t
4972 reencode_movzn_to_movz (uint32_t opcode)
4973 {
4974 return opcode | (1 << 30);
4975 }
4976
4977 static inline uint32_t
4978 reencode_movzn_to_movn (uint32_t opcode)
4979 {
4980 return opcode & ~(1 << 30);
4981 }
4982
4983 /* Overall per-instruction processing. */
4984
4985 /* We need to be able to fix up arbitrary expressions in some statements.
4986 This is so that we can handle symbols that are an arbitrary distance from
4987 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4988 which returns part of an address in a form which will be valid for
4989 a data instruction. We do this by pushing the expression into a symbol
4990 in the expr_section, and creating a fix for that. */
4991
4992 static fixS *
4993 fix_new_aarch64 (fragS * frag,
4994 int where,
4995 short int size,
4996 expressionS * exp,
4997 int pc_rel,
4998 int reloc)
4999 {
5000 fixS *new_fix;
5001
5002 switch (exp->X_op)
5003 {
5004 case O_constant:
5005 case O_symbol:
5006 case O_add:
5007 case O_subtract:
5008 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5009 break;
5010
5011 default:
5012 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5013 pc_rel, reloc);
5014 break;
5015 }
5016 return new_fix;
5017 }
5018 \f
5019 /* Diagnostics on operands errors. */
5020
5021 /* By default, output verbose error message.
5022 Disable the verbose error message by -mno-verbose-error. */
5023 static int verbose_error_p = 1;
5024
5025 #ifdef DEBUG_AARCH64
5026 /* N.B. this is only for the purpose of debugging. */
5027 const char* operand_mismatch_kind_names[] =
5028 {
5029 "AARCH64_OPDE_NIL",
5030 "AARCH64_OPDE_RECOVERABLE",
5031 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5032 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5033 "AARCH64_OPDE_SYNTAX_ERROR",
5034 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5035 "AARCH64_OPDE_INVALID_VARIANT",
5036 "AARCH64_OPDE_OUT_OF_RANGE",
5037 "AARCH64_OPDE_UNALIGNED",
5038 "AARCH64_OPDE_REG_LIST",
5039 "AARCH64_OPDE_OTHER_ERROR",
5040 };
5041 #endif /* DEBUG_AARCH64 */
5042
5043 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5044
5045 When multiple errors of different kinds are found in the same assembly
5046 line, only the error of the highest severity will be picked up for
5047 issuing the diagnostics. */
5048
5049 static inline bool
5050 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5051 enum aarch64_operand_error_kind rhs)
5052 {
5053 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5054 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5055 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5056 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5057 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5058 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5059 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5060 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
5061 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5062 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
5063 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
5064 return lhs > rhs;
5065 }
5066
5067 /* Helper routine to get the mnemonic name from the assembly instruction
5068 line; should only be called for the diagnosis purpose, as there is
5069 string copy operation involved, which may affect the runtime
5070 performance if used in elsewhere. */
5071
5072 static const char*
5073 get_mnemonic_name (const char *str)
5074 {
5075 static char mnemonic[32];
5076 char *ptr;
5077
5078 /* Get the first 15 bytes and assume that the full name is included. */
5079 strncpy (mnemonic, str, 31);
5080 mnemonic[31] = '\0';
5081
5082 /* Scan up to the end of the mnemonic, which must end in white space,
5083 '.', or end of string. */
5084 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5085 ;
5086
5087 *ptr = '\0';
5088
5089 /* Append '...' to the truncated long name. */
5090 if (ptr - mnemonic == 31)
5091 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5092
5093 return mnemonic;
5094 }
5095
5096 static void
5097 reset_aarch64_instruction (aarch64_instruction *instruction)
5098 {
5099 memset (instruction, '\0', sizeof (aarch64_instruction));
5100 instruction->reloc.type = BFD_RELOC_UNUSED;
5101 }
5102
5103 /* Data structures storing one user error in the assembly code related to
5104 operands. */
5105
5106 struct operand_error_record
5107 {
5108 const aarch64_opcode *opcode;
5109 aarch64_operand_error detail;
5110 struct operand_error_record *next;
5111 };
5112
5113 typedef struct operand_error_record operand_error_record;
5114
5115 struct operand_errors
5116 {
5117 operand_error_record *head;
5118 operand_error_record *tail;
5119 };
5120
5121 typedef struct operand_errors operand_errors;
5122
5123 /* Top-level data structure reporting user errors for the current line of
5124 the assembly code.
5125 The way md_assemble works is that all opcodes sharing the same mnemonic
5126 name are iterated to find a match to the assembly line. In this data
5127 structure, each of the such opcodes will have one operand_error_record
5128 allocated and inserted. In other words, excessive errors related with
5129 a single opcode are disregarded. */
5130 operand_errors operand_error_report;
5131
5132 /* Free record nodes. */
5133 static operand_error_record *free_opnd_error_record_nodes = NULL;
5134
5135 /* Initialize the data structure that stores the operand mismatch
5136 information on assembling one line of the assembly code. */
5137 static void
5138 init_operand_error_report (void)
5139 {
5140 if (operand_error_report.head != NULL)
5141 {
5142 gas_assert (operand_error_report.tail != NULL);
5143 operand_error_report.tail->next = free_opnd_error_record_nodes;
5144 free_opnd_error_record_nodes = operand_error_report.head;
5145 operand_error_report.head = NULL;
5146 operand_error_report.tail = NULL;
5147 return;
5148 }
5149 gas_assert (operand_error_report.tail == NULL);
5150 }
5151
5152 /* Return TRUE if some operand error has been recorded during the
5153 parsing of the current assembly line using the opcode *OPCODE;
5154 otherwise return FALSE. */
5155 static inline bool
5156 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5157 {
5158 operand_error_record *record = operand_error_report.head;
5159 return record && record->opcode == opcode;
5160 }
5161
5162 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5163 OPCODE field is initialized with OPCODE.
5164 N.B. only one record for each opcode, i.e. the maximum of one error is
5165 recorded for each instruction template. */
5166
5167 static void
5168 add_operand_error_record (const operand_error_record* new_record)
5169 {
5170 const aarch64_opcode *opcode = new_record->opcode;
5171 operand_error_record* record = operand_error_report.head;
5172
5173 /* The record may have been created for this opcode. If not, we need
5174 to prepare one. */
5175 if (! opcode_has_operand_error_p (opcode))
5176 {
5177 /* Get one empty record. */
5178 if (free_opnd_error_record_nodes == NULL)
5179 {
5180 record = XNEW (operand_error_record);
5181 }
5182 else
5183 {
5184 record = free_opnd_error_record_nodes;
5185 free_opnd_error_record_nodes = record->next;
5186 }
5187 record->opcode = opcode;
5188 /* Insert at the head. */
5189 record->next = operand_error_report.head;
5190 operand_error_report.head = record;
5191 if (operand_error_report.tail == NULL)
5192 operand_error_report.tail = record;
5193 }
5194 else if (record->detail.kind != AARCH64_OPDE_NIL
5195 && record->detail.index <= new_record->detail.index
5196 && operand_error_higher_severity_p (record->detail.kind,
5197 new_record->detail.kind))
5198 {
5199 /* In the case of multiple errors found on operands related with a
5200 single opcode, only record the error of the leftmost operand and
5201 only if the error is of higher severity. */
5202 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5203 " the existing error %s on operand %d",
5204 operand_mismatch_kind_names[new_record->detail.kind],
5205 new_record->detail.index,
5206 operand_mismatch_kind_names[record->detail.kind],
5207 record->detail.index);
5208 return;
5209 }
5210
5211 record->detail = new_record->detail;
5212 }
5213
5214 static inline void
5215 record_operand_error_info (const aarch64_opcode *opcode,
5216 aarch64_operand_error *error_info)
5217 {
5218 operand_error_record record;
5219 record.opcode = opcode;
5220 record.detail = *error_info;
5221 add_operand_error_record (&record);
5222 }
5223
5224 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5225 error message *ERROR, for operand IDX (count from 0). */
5226
5227 static void
5228 record_operand_error (const aarch64_opcode *opcode, int idx,
5229 enum aarch64_operand_error_kind kind,
5230 const char* error)
5231 {
5232 aarch64_operand_error info;
5233 memset(&info, 0, sizeof (info));
5234 info.index = idx;
5235 info.kind = kind;
5236 info.error = error;
5237 info.non_fatal = false;
5238 record_operand_error_info (opcode, &info);
5239 }
5240
5241 static void
5242 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5243 enum aarch64_operand_error_kind kind,
5244 const char* error, const int *extra_data)
5245 {
5246 aarch64_operand_error info;
5247 info.index = idx;
5248 info.kind = kind;
5249 info.error = error;
5250 info.data[0].i = extra_data[0];
5251 info.data[1].i = extra_data[1];
5252 info.data[2].i = extra_data[2];
5253 info.non_fatal = false;
5254 record_operand_error_info (opcode, &info);
5255 }
5256
5257 static void
5258 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5259 const char* error, int lower_bound,
5260 int upper_bound)
5261 {
5262 int data[3] = {lower_bound, upper_bound, 0};
5263 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5264 error, data);
5265 }
5266
5267 /* Remove the operand error record for *OPCODE. */
5268 static void ATTRIBUTE_UNUSED
5269 remove_operand_error_record (const aarch64_opcode *opcode)
5270 {
5271 if (opcode_has_operand_error_p (opcode))
5272 {
5273 operand_error_record* record = operand_error_report.head;
5274 gas_assert (record != NULL && operand_error_report.tail != NULL);
5275 operand_error_report.head = record->next;
5276 record->next = free_opnd_error_record_nodes;
5277 free_opnd_error_record_nodes = record;
5278 if (operand_error_report.head == NULL)
5279 {
5280 gas_assert (operand_error_report.tail == record);
5281 operand_error_report.tail = NULL;
5282 }
5283 }
5284 }
5285
5286 /* Given the instruction in *INSTR, return the index of the best matched
5287 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5288
5289 Return -1 if there is no qualifier sequence; return the first match
5290 if there is multiple matches found. */
5291
5292 static int
5293 find_best_match (const aarch64_inst *instr,
5294 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5295 {
5296 int i, num_opnds, max_num_matched, idx;
5297
5298 num_opnds = aarch64_num_of_operands (instr->opcode);
5299 if (num_opnds == 0)
5300 {
5301 DEBUG_TRACE ("no operand");
5302 return -1;
5303 }
5304
5305 max_num_matched = 0;
5306 idx = 0;
5307
5308 /* For each pattern. */
5309 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5310 {
5311 int j, num_matched;
5312 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5313
5314 /* Most opcodes has much fewer patterns in the list. */
5315 if (empty_qualifier_sequence_p (qualifiers))
5316 {
5317 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5318 break;
5319 }
5320
5321 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5322 if (*qualifiers == instr->operands[j].qualifier)
5323 ++num_matched;
5324
5325 if (num_matched > max_num_matched)
5326 {
5327 max_num_matched = num_matched;
5328 idx = i;
5329 }
5330 }
5331
5332 DEBUG_TRACE ("return with %d", idx);
5333 return idx;
5334 }
5335
5336 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5337 corresponding operands in *INSTR. */
5338
5339 static inline void
5340 assign_qualifier_sequence (aarch64_inst *instr,
5341 const aarch64_opnd_qualifier_t *qualifiers)
5342 {
5343 int i = 0;
5344 int num_opnds = aarch64_num_of_operands (instr->opcode);
5345 gas_assert (num_opnds);
5346 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5347 instr->operands[i].qualifier = *qualifiers;
5348 }
5349
5350 /* Print operands for the diagnosis purpose. */
5351
5352 static void
5353 print_operands (char *buf, const aarch64_opcode *opcode,
5354 const aarch64_opnd_info *opnds)
5355 {
5356 int i;
5357
5358 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5359 {
5360 char str[128];
5361
5362 /* We regard the opcode operand info more, however we also look into
5363 the inst->operands to support the disassembling of the optional
5364 operand.
5365 The two operand code should be the same in all cases, apart from
5366 when the operand can be optional. */
5367 if (opcode->operands[i] == AARCH64_OPND_NIL
5368 || opnds[i].type == AARCH64_OPND_NIL)
5369 break;
5370
5371 /* Generate the operand string in STR. */
5372 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5373 NULL, cpu_variant);
5374
5375 /* Delimiter. */
5376 if (str[0] != '\0')
5377 strcat (buf, i == 0 ? " " : ", ");
5378
5379 /* Append the operand string. */
5380 strcat (buf, str);
5381 }
5382 }
5383
5384 /* Send to stderr a string as information. */
5385
5386 static void
5387 output_info (const char *format, ...)
5388 {
5389 const char *file;
5390 unsigned int line;
5391 va_list args;
5392
5393 file = as_where (&line);
5394 if (file)
5395 {
5396 if (line != 0)
5397 fprintf (stderr, "%s:%u: ", file, line);
5398 else
5399 fprintf (stderr, "%s: ", file);
5400 }
5401 fprintf (stderr, _("Info: "));
5402 va_start (args, format);
5403 vfprintf (stderr, format, args);
5404 va_end (args);
5405 (void) putc ('\n', stderr);
5406 }
5407
5408 /* Output one operand error record. */
5409
5410 static void
5411 output_operand_error_record (const operand_error_record *record, char *str)
5412 {
5413 const aarch64_operand_error *detail = &record->detail;
5414 int idx = detail->index;
5415 const aarch64_opcode *opcode = record->opcode;
5416 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5417 : AARCH64_OPND_NIL);
5418
5419 typedef void (*handler_t)(const char *format, ...);
5420 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5421
5422 switch (detail->kind)
5423 {
5424 case AARCH64_OPDE_NIL:
5425 gas_assert (0);
5426 break;
5427
5428 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5429 handler (_("this `%s' should have an immediately preceding `%s'"
5430 " -- `%s'"),
5431 detail->data[0].s, detail->data[1].s, str);
5432 break;
5433
5434 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5435 handler (_("the preceding `%s' should be followed by `%s` rather"
5436 " than `%s` -- `%s'"),
5437 detail->data[1].s, detail->data[0].s, opcode->name, str);
5438 break;
5439
5440 case AARCH64_OPDE_SYNTAX_ERROR:
5441 case AARCH64_OPDE_RECOVERABLE:
5442 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5443 case AARCH64_OPDE_OTHER_ERROR:
5444 /* Use the prepared error message if there is, otherwise use the
5445 operand description string to describe the error. */
5446 if (detail->error != NULL)
5447 {
5448 if (idx < 0)
5449 handler (_("%s -- `%s'"), detail->error, str);
5450 else
5451 handler (_("%s at operand %d -- `%s'"),
5452 detail->error, idx + 1, str);
5453 }
5454 else
5455 {
5456 gas_assert (idx >= 0);
5457 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5458 aarch64_get_operand_desc (opd_code), str);
5459 }
5460 break;
5461
5462 case AARCH64_OPDE_INVALID_VARIANT:
5463 handler (_("operand mismatch -- `%s'"), str);
5464 if (verbose_error_p)
5465 {
5466 /* We will try to correct the erroneous instruction and also provide
5467 more information e.g. all other valid variants.
5468
5469 The string representation of the corrected instruction and other
5470 valid variants are generated by
5471
5472 1) obtaining the intermediate representation of the erroneous
5473 instruction;
5474 2) manipulating the IR, e.g. replacing the operand qualifier;
5475 3) printing out the instruction by calling the printer functions
5476 shared with the disassembler.
5477
5478 The limitation of this method is that the exact input assembly
5479 line cannot be accurately reproduced in some cases, for example an
5480 optional operand present in the actual assembly line will be
5481 omitted in the output; likewise for the optional syntax rules,
5482 e.g. the # before the immediate. Another limitation is that the
5483 assembly symbols and relocation operations in the assembly line
5484 currently cannot be printed out in the error report. Last but not
5485 least, when there is other error(s) co-exist with this error, the
5486 'corrected' instruction may be still incorrect, e.g. given
5487 'ldnp h0,h1,[x0,#6]!'
5488 this diagnosis will provide the version:
5489 'ldnp s0,s1,[x0,#6]!'
5490 which is still not right. */
5491 size_t len = strlen (get_mnemonic_name (str));
5492 int i, qlf_idx;
5493 bool result;
5494 char buf[2048];
5495 aarch64_inst *inst_base = &inst.base;
5496 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5497
5498 /* Init inst. */
5499 reset_aarch64_instruction (&inst);
5500 inst_base->opcode = opcode;
5501
5502 /* Reset the error report so that there is no side effect on the
5503 following operand parsing. */
5504 init_operand_error_report ();
5505
5506 /* Fill inst. */
5507 result = parse_operands (str + len, opcode)
5508 && programmer_friendly_fixup (&inst);
5509 gas_assert (result);
5510 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5511 NULL, NULL, insn_sequence);
5512 gas_assert (!result);
5513
5514 /* Find the most matched qualifier sequence. */
5515 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5516 gas_assert (qlf_idx > -1);
5517
5518 /* Assign the qualifiers. */
5519 assign_qualifier_sequence (inst_base,
5520 opcode->qualifiers_list[qlf_idx]);
5521
5522 /* Print the hint. */
5523 output_info (_(" did you mean this?"));
5524 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5525 print_operands (buf, opcode, inst_base->operands);
5526 output_info (_(" %s"), buf);
5527
5528 /* Print out other variant(s) if there is any. */
5529 if (qlf_idx != 0 ||
5530 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5531 output_info (_(" other valid variant(s):"));
5532
5533 /* For each pattern. */
5534 qualifiers_list = opcode->qualifiers_list;
5535 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5536 {
5537 /* Most opcodes has much fewer patterns in the list.
5538 First NIL qualifier indicates the end in the list. */
5539 if (empty_qualifier_sequence_p (*qualifiers_list))
5540 break;
5541
5542 if (i != qlf_idx)
5543 {
5544 /* Mnemonics name. */
5545 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5546
5547 /* Assign the qualifiers. */
5548 assign_qualifier_sequence (inst_base, *qualifiers_list);
5549
5550 /* Print instruction. */
5551 print_operands (buf, opcode, inst_base->operands);
5552
5553 output_info (_(" %s"), buf);
5554 }
5555 }
5556 }
5557 break;
5558
5559 case AARCH64_OPDE_UNTIED_IMMS:
5560 handler (_("operand %d must have the same immediate value "
5561 "as operand 1 -- `%s'"),
5562 detail->index + 1, str);
5563 break;
5564
5565 case AARCH64_OPDE_UNTIED_OPERAND:
5566 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5567 detail->index + 1, str);
5568 break;
5569
5570 case AARCH64_OPDE_OUT_OF_RANGE:
5571 if (detail->data[0].i != detail->data[1].i)
5572 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5573 detail->error ? detail->error : _("immediate value"),
5574 detail->data[0].i, detail->data[1].i, idx + 1, str);
5575 else
5576 handler (_("%s must be %d at operand %d -- `%s'"),
5577 detail->error ? detail->error : _("immediate value"),
5578 detail->data[0].i, idx + 1, str);
5579 break;
5580
5581 case AARCH64_OPDE_REG_LIST:
5582 if (detail->data[0].i == 1)
5583 handler (_("invalid number of registers in the list; "
5584 "only 1 register is expected at operand %d -- `%s'"),
5585 idx + 1, str);
5586 else
5587 handler (_("invalid number of registers in the list; "
5588 "%d registers are expected at operand %d -- `%s'"),
5589 detail->data[0].i, idx + 1, str);
5590 break;
5591
5592 case AARCH64_OPDE_UNALIGNED:
5593 handler (_("immediate value must be a multiple of "
5594 "%d at operand %d -- `%s'"),
5595 detail->data[0].i, idx + 1, str);
5596 break;
5597
5598 default:
5599 gas_assert (0);
5600 break;
5601 }
5602 }
5603
5604 /* Process and output the error message about the operand mismatching.
5605
5606 When this function is called, the operand error information had
5607 been collected for an assembly line and there will be multiple
5608 errors in the case of multiple instruction templates; output the
5609 error message that most closely describes the problem.
5610
5611 The errors to be printed can be filtered on printing all errors
5612 or only non-fatal errors. This distinction has to be made because
5613 the error buffer may already be filled with fatal errors we don't want to
5614 print due to the different instruction templates. */
5615
5616 static void
5617 output_operand_error_report (char *str, bool non_fatal_only)
5618 {
5619 int largest_error_pos;
5620 const char *msg = NULL;
5621 enum aarch64_operand_error_kind kind;
5622 operand_error_record *curr;
5623 operand_error_record *head = operand_error_report.head;
5624 operand_error_record *record = NULL;
5625
5626 /* No error to report. */
5627 if (head == NULL)
5628 return;
5629
5630 gas_assert (head != NULL && operand_error_report.tail != NULL);
5631
5632 /* Only one error. */
5633 if (head == operand_error_report.tail)
5634 {
5635 /* If the only error is a non-fatal one and we don't want to print it,
5636 just exit. */
5637 if (!non_fatal_only || head->detail.non_fatal)
5638 {
5639 DEBUG_TRACE ("single opcode entry with error kind: %s",
5640 operand_mismatch_kind_names[head->detail.kind]);
5641 output_operand_error_record (head, str);
5642 }
5643 return;
5644 }
5645
5646 /* Find the error kind of the highest severity. */
5647 DEBUG_TRACE ("multiple opcode entries with error kind");
5648 kind = AARCH64_OPDE_NIL;
5649 for (curr = head; curr != NULL; curr = curr->next)
5650 {
5651 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5652 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5653 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5654 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5655 kind = curr->detail.kind;
5656 }
5657
5658 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5659
5660 /* Pick up one of errors of KIND to report. */
5661 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5662 for (curr = head; curr != NULL; curr = curr->next)
5663 {
5664 /* If we don't want to print non-fatal errors then don't consider them
5665 at all. */
5666 if (curr->detail.kind != kind
5667 || (non_fatal_only && !curr->detail.non_fatal))
5668 continue;
5669 /* If there are multiple errors, pick up the one with the highest
5670 mismatching operand index. In the case of multiple errors with
5671 the equally highest operand index, pick up the first one or the
5672 first one with non-NULL error message. */
5673 if (curr->detail.index > largest_error_pos
5674 || (curr->detail.index == largest_error_pos && msg == NULL
5675 && curr->detail.error != NULL))
5676 {
5677 largest_error_pos = curr->detail.index;
5678 record = curr;
5679 msg = record->detail.error;
5680 }
5681 }
5682
5683 /* The way errors are collected in the back-end is a bit non-intuitive. But
5684 essentially, because each operand template is tried recursively you may
5685 always have errors collected from the previous tried OPND. These are
5686 usually skipped if there is one successful match. However now with the
5687 non-fatal errors we have to ignore those previously collected hard errors
5688 when we're only interested in printing the non-fatal ones. This condition
5689 prevents us from printing errors that are not appropriate, since we did
5690 match a condition, but it also has warnings that it wants to print. */
5691 if (non_fatal_only && !record)
5692 return;
5693
5694 gas_assert (largest_error_pos != -2 && record != NULL);
5695 DEBUG_TRACE ("Pick up error kind %s to report",
5696 operand_mismatch_kind_names[record->detail.kind]);
5697
5698 /* Output. */
5699 output_operand_error_record (record, str);
5700 }
5701 \f
5702 /* Write an AARCH64 instruction to buf - always little-endian. */
5703 static void
5704 put_aarch64_insn (char *buf, uint32_t insn)
5705 {
5706 unsigned char *where = (unsigned char *) buf;
5707 where[0] = insn;
5708 where[1] = insn >> 8;
5709 where[2] = insn >> 16;
5710 where[3] = insn >> 24;
5711 }
5712
5713 static uint32_t
5714 get_aarch64_insn (char *buf)
5715 {
5716 unsigned char *where = (unsigned char *) buf;
5717 uint32_t result;
5718 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5719 | ((uint32_t) where[3] << 24)));
5720 return result;
5721 }
5722
5723 static void
5724 output_inst (struct aarch64_inst *new_inst)
5725 {
5726 char *to = NULL;
5727
5728 to = frag_more (INSN_SIZE);
5729
5730 frag_now->tc_frag_data.recorded = 1;
5731
5732 put_aarch64_insn (to, inst.base.value);
5733
5734 if (inst.reloc.type != BFD_RELOC_UNUSED)
5735 {
5736 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5737 INSN_SIZE, &inst.reloc.exp,
5738 inst.reloc.pc_rel,
5739 inst.reloc.type);
5740 DEBUG_TRACE ("Prepared relocation fix up");
5741 /* Don't check the addend value against the instruction size,
5742 that's the job of our code in md_apply_fix(). */
5743 fixp->fx_no_overflow = 1;
5744 if (new_inst != NULL)
5745 fixp->tc_fix_data.inst = new_inst;
5746 if (aarch64_gas_internal_fixup_p ())
5747 {
5748 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5749 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5750 fixp->fx_addnumber = inst.reloc.flags;
5751 }
5752 }
5753
5754 dwarf2_emit_insn (INSN_SIZE);
5755 }
5756
5757 /* Link together opcodes of the same name. */
5758
5759 struct templates
5760 {
5761 const aarch64_opcode *opcode;
5762 struct templates *next;
5763 };
5764
5765 typedef struct templates templates;
5766
5767 static templates *
5768 lookup_mnemonic (const char *start, int len)
5769 {
5770 templates *templ = NULL;
5771
5772 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5773 return templ;
5774 }
5775
5776 /* Subroutine of md_assemble, responsible for looking up the primary
5777 opcode from the mnemonic the user wrote. BASE points to the beginning
5778 of the mnemonic, DOT points to the first '.' within the mnemonic
5779 (if any) and END points to the end of the mnemonic. */
5780
5781 static templates *
5782 opcode_lookup (char *base, char *dot, char *end)
5783 {
5784 const aarch64_cond *cond;
5785 char condname[16];
5786 int len;
5787
5788 if (dot == end)
5789 return 0;
5790
5791 inst.cond = COND_ALWAYS;
5792
5793 /* Handle a possible condition. */
5794 if (dot)
5795 {
5796 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5797 if (!cond)
5798 return 0;
5799 inst.cond = cond->value;
5800 len = dot - base;
5801 }
5802 else
5803 len = end - base;
5804
5805 if (inst.cond == COND_ALWAYS)
5806 {
5807 /* Look for unaffixed mnemonic. */
5808 return lookup_mnemonic (base, len);
5809 }
5810 else if (len <= 13)
5811 {
5812 /* append ".c" to mnemonic if conditional */
5813 memcpy (condname, base, len);
5814 memcpy (condname + len, ".c", 2);
5815 base = condname;
5816 len += 2;
5817 return lookup_mnemonic (base, len);
5818 }
5819
5820 return NULL;
5821 }
5822
5823 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5824 to a corresponding operand qualifier. */
5825
5826 static inline aarch64_opnd_qualifier_t
5827 vectype_to_qualifier (const struct vector_type_el *vectype)
5828 {
5829 /* Element size in bytes indexed by vector_el_type. */
5830 const unsigned char ele_size[5]
5831 = {1, 2, 4, 8, 16};
5832 const unsigned int ele_base [5] =
5833 {
5834 AARCH64_OPND_QLF_V_4B,
5835 AARCH64_OPND_QLF_V_2H,
5836 AARCH64_OPND_QLF_V_2S,
5837 AARCH64_OPND_QLF_V_1D,
5838 AARCH64_OPND_QLF_V_1Q
5839 };
5840
5841 if (!vectype->defined || vectype->type == NT_invtype)
5842 goto vectype_conversion_fail;
5843
5844 if (vectype->type == NT_zero)
5845 return AARCH64_OPND_QLF_P_Z;
5846 if (vectype->type == NT_merge)
5847 return AARCH64_OPND_QLF_P_M;
5848
5849 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5850
5851 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5852 {
5853 /* Special case S_4B. */
5854 if (vectype->type == NT_b && vectype->width == 4)
5855 return AARCH64_OPND_QLF_S_4B;
5856
5857 /* Special case S_2H. */
5858 if (vectype->type == NT_h && vectype->width == 2)
5859 return AARCH64_OPND_QLF_S_2H;
5860
5861 /* Vector element register. */
5862 return AARCH64_OPND_QLF_S_B + vectype->type;
5863 }
5864 else
5865 {
5866 /* Vector register. */
5867 int reg_size = ele_size[vectype->type] * vectype->width;
5868 unsigned offset;
5869 unsigned shift;
5870 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5871 goto vectype_conversion_fail;
5872
5873 /* The conversion is by calculating the offset from the base operand
5874 qualifier for the vector type. The operand qualifiers are regular
5875 enough that the offset can established by shifting the vector width by
5876 a vector-type dependent amount. */
5877 shift = 0;
5878 if (vectype->type == NT_b)
5879 shift = 3;
5880 else if (vectype->type == NT_h || vectype->type == NT_s)
5881 shift = 2;
5882 else if (vectype->type >= NT_d)
5883 shift = 1;
5884 else
5885 gas_assert (0);
5886
5887 offset = ele_base [vectype->type] + (vectype->width >> shift);
5888 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5889 && offset <= AARCH64_OPND_QLF_V_1Q);
5890 return offset;
5891 }
5892
5893 vectype_conversion_fail:
5894 first_error (_("bad vector arrangement type"));
5895 return AARCH64_OPND_QLF_NIL;
5896 }
5897
5898 /* Process an optional operand that is found omitted from the assembly line.
5899 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5900 instruction's opcode entry while IDX is the index of this omitted operand.
5901 */
5902
5903 static void
5904 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5905 int idx, aarch64_opnd_info *operand)
5906 {
5907 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5908 gas_assert (optional_operand_p (opcode, idx));
5909 gas_assert (!operand->present);
5910
5911 switch (type)
5912 {
5913 case AARCH64_OPND_Rd:
5914 case AARCH64_OPND_Rn:
5915 case AARCH64_OPND_Rm:
5916 case AARCH64_OPND_Rt:
5917 case AARCH64_OPND_Rt2:
5918 case AARCH64_OPND_Rt_LS64:
5919 case AARCH64_OPND_Rt_SP:
5920 case AARCH64_OPND_Rs:
5921 case AARCH64_OPND_Ra:
5922 case AARCH64_OPND_Rt_SYS:
5923 case AARCH64_OPND_Rd_SP:
5924 case AARCH64_OPND_Rn_SP:
5925 case AARCH64_OPND_Rm_SP:
5926 case AARCH64_OPND_Fd:
5927 case AARCH64_OPND_Fn:
5928 case AARCH64_OPND_Fm:
5929 case AARCH64_OPND_Fa:
5930 case AARCH64_OPND_Ft:
5931 case AARCH64_OPND_Ft2:
5932 case AARCH64_OPND_Sd:
5933 case AARCH64_OPND_Sn:
5934 case AARCH64_OPND_Sm:
5935 case AARCH64_OPND_Va:
5936 case AARCH64_OPND_Vd:
5937 case AARCH64_OPND_Vn:
5938 case AARCH64_OPND_Vm:
5939 case AARCH64_OPND_VdD1:
5940 case AARCH64_OPND_VnD1:
5941 operand->reg.regno = default_value;
5942 break;
5943
5944 case AARCH64_OPND_Ed:
5945 case AARCH64_OPND_En:
5946 case AARCH64_OPND_Em:
5947 case AARCH64_OPND_Em16:
5948 case AARCH64_OPND_SM3_IMM2:
5949 operand->reglane.regno = default_value;
5950 break;
5951
5952 case AARCH64_OPND_IDX:
5953 case AARCH64_OPND_BIT_NUM:
5954 case AARCH64_OPND_IMMR:
5955 case AARCH64_OPND_IMMS:
5956 case AARCH64_OPND_SHLL_IMM:
5957 case AARCH64_OPND_IMM_VLSL:
5958 case AARCH64_OPND_IMM_VLSR:
5959 case AARCH64_OPND_CCMP_IMM:
5960 case AARCH64_OPND_FBITS:
5961 case AARCH64_OPND_UIMM4:
5962 case AARCH64_OPND_UIMM3_OP1:
5963 case AARCH64_OPND_UIMM3_OP2:
5964 case AARCH64_OPND_IMM:
5965 case AARCH64_OPND_IMM_2:
5966 case AARCH64_OPND_WIDTH:
5967 case AARCH64_OPND_UIMM7:
5968 case AARCH64_OPND_NZCV:
5969 case AARCH64_OPND_SVE_PATTERN:
5970 case AARCH64_OPND_SVE_PRFOP:
5971 operand->imm.value = default_value;
5972 break;
5973
5974 case AARCH64_OPND_SVE_PATTERN_SCALED:
5975 operand->imm.value = default_value;
5976 operand->shifter.kind = AARCH64_MOD_MUL;
5977 operand->shifter.amount = 1;
5978 break;
5979
5980 case AARCH64_OPND_EXCEPTION:
5981 inst.reloc.type = BFD_RELOC_UNUSED;
5982 break;
5983
5984 case AARCH64_OPND_BARRIER_ISB:
5985 operand->barrier = aarch64_barrier_options + default_value;
5986 break;
5987
5988 case AARCH64_OPND_BTI_TARGET:
5989 operand->hint_option = aarch64_hint_options + default_value;
5990 break;
5991
5992 default:
5993 break;
5994 }
5995 }
5996
5997 /* Process the relocation type for move wide instructions.
5998 Return TRUE on success; otherwise return FALSE. */
5999
6000 static bool
6001 process_movw_reloc_info (void)
6002 {
6003 int is32;
6004 unsigned shift;
6005
6006 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6007
6008 if (inst.base.opcode->op == OP_MOVK)
6009 switch (inst.reloc.type)
6010 {
6011 case BFD_RELOC_AARCH64_MOVW_G0_S:
6012 case BFD_RELOC_AARCH64_MOVW_G1_S:
6013 case BFD_RELOC_AARCH64_MOVW_G2_S:
6014 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6015 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6016 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6017 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6018 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6019 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6020 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6021 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6022 set_syntax_error
6023 (_("the specified relocation type is not allowed for MOVK"));
6024 return false;
6025 default:
6026 break;
6027 }
6028
6029 switch (inst.reloc.type)
6030 {
6031 case BFD_RELOC_AARCH64_MOVW_G0:
6032 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6033 case BFD_RELOC_AARCH64_MOVW_G0_S:
6034 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6035 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6036 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6037 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6038 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6039 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6040 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6041 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6042 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6043 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6044 shift = 0;
6045 break;
6046 case BFD_RELOC_AARCH64_MOVW_G1:
6047 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6048 case BFD_RELOC_AARCH64_MOVW_G1_S:
6049 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6050 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6051 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6052 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6053 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6054 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6055 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6056 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6057 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6058 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6059 shift = 16;
6060 break;
6061 case BFD_RELOC_AARCH64_MOVW_G2:
6062 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6063 case BFD_RELOC_AARCH64_MOVW_G2_S:
6064 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6065 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6066 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6067 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6068 if (is32)
6069 {
6070 set_fatal_syntax_error
6071 (_("the specified relocation type is not allowed for 32-bit "
6072 "register"));
6073 return false;
6074 }
6075 shift = 32;
6076 break;
6077 case BFD_RELOC_AARCH64_MOVW_G3:
6078 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6079 if (is32)
6080 {
6081 set_fatal_syntax_error
6082 (_("the specified relocation type is not allowed for 32-bit "
6083 "register"));
6084 return false;
6085 }
6086 shift = 48;
6087 break;
6088 default:
6089 /* More cases should be added when more MOVW-related relocation types
6090 are supported in GAS. */
6091 gas_assert (aarch64_gas_internal_fixup_p ());
6092 /* The shift amount should have already been set by the parser. */
6093 return true;
6094 }
6095 inst.base.operands[1].shifter.amount = shift;
6096 return true;
6097 }
6098
6099 /* A primitive log calculator. */
6100
6101 static inline unsigned int
6102 get_logsz (unsigned int size)
6103 {
6104 const unsigned char ls[16] =
6105 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6106 if (size > 16)
6107 {
6108 gas_assert (0);
6109 return -1;
6110 }
6111 gas_assert (ls[size - 1] != (unsigned char)-1);
6112 return ls[size - 1];
6113 }
6114
6115 /* Determine and return the real reloc type code for an instruction
6116 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6117
6118 static inline bfd_reloc_code_real_type
6119 ldst_lo12_determine_real_reloc_type (void)
6120 {
6121 unsigned logsz, max_logsz;
6122 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6123 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6124
6125 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6126 {
6127 BFD_RELOC_AARCH64_LDST8_LO12,
6128 BFD_RELOC_AARCH64_LDST16_LO12,
6129 BFD_RELOC_AARCH64_LDST32_LO12,
6130 BFD_RELOC_AARCH64_LDST64_LO12,
6131 BFD_RELOC_AARCH64_LDST128_LO12
6132 },
6133 {
6134 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6135 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6136 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6137 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6138 BFD_RELOC_AARCH64_NONE
6139 },
6140 {
6141 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6142 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6143 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6144 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6145 BFD_RELOC_AARCH64_NONE
6146 },
6147 {
6148 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6149 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6150 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6151 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6152 BFD_RELOC_AARCH64_NONE
6153 },
6154 {
6155 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6156 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6157 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6158 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6159 BFD_RELOC_AARCH64_NONE
6160 }
6161 };
6162
6163 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6164 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6165 || (inst.reloc.type
6166 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6167 || (inst.reloc.type
6168 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6169 || (inst.reloc.type
6170 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6171 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6172
6173 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6174 opd1_qlf =
6175 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6176 1, opd0_qlf, 0);
6177 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6178
6179 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6180
6181 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6182 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6183 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6184 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6185 max_logsz = 3;
6186 else
6187 max_logsz = 4;
6188
6189 if (logsz > max_logsz)
6190 {
6191 /* SEE PR 27904 for an example of this. */
6192 set_fatal_syntax_error
6193 (_("relocation qualifier does not match instruction size"));
6194 return BFD_RELOC_AARCH64_NONE;
6195 }
6196
6197 /* In reloc.c, these pseudo relocation types should be defined in similar
6198 order as above reloc_ldst_lo12 array. Because the array index calculation
6199 below relies on this. */
6200 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6201 }
6202
6203 /* Check whether a register list REGINFO is valid. The registers must be
6204 numbered in increasing order (modulo 32), in increments of one or two.
6205
6206 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6207 increments of two.
6208
6209 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6210
6211 static bool
6212 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6213 {
6214 uint32_t i, nb_regs, prev_regno, incr;
6215
6216 nb_regs = 1 + (reginfo & 0x3);
6217 reginfo >>= 2;
6218 prev_regno = reginfo & 0x1f;
6219 incr = accept_alternate ? 2 : 1;
6220
6221 for (i = 1; i < nb_regs; ++i)
6222 {
6223 uint32_t curr_regno;
6224 reginfo >>= 5;
6225 curr_regno = reginfo & 0x1f;
6226 if (curr_regno != ((prev_regno + incr) & 0x1f))
6227 return false;
6228 prev_regno = curr_regno;
6229 }
6230
6231 return true;
6232 }
6233
6234 /* Generic instruction operand parser. This does no encoding and no
6235 semantic validation; it merely squirrels values away in the inst
6236 structure. Returns TRUE or FALSE depending on whether the
6237 specified grammar matched. */
6238
6239 static bool
6240 parse_operands (char *str, const aarch64_opcode *opcode)
6241 {
6242 int i;
6243 char *backtrack_pos = 0;
6244 const enum aarch64_opnd *operands = opcode->operands;
6245 aarch64_reg_type imm_reg_type;
6246
6247 clear_error ();
6248 skip_whitespace (str);
6249
6250 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
6251 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6252 else
6253 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6254
6255 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6256 {
6257 int64_t val;
6258 const reg_entry *reg;
6259 int comma_skipped_p = 0;
6260 aarch64_reg_type rtype;
6261 struct vector_type_el vectype;
6262 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6263 aarch64_opnd_info *info = &inst.base.operands[i];
6264 aarch64_reg_type reg_type;
6265
6266 DEBUG_TRACE ("parse operand %d", i);
6267
6268 /* Assign the operand code. */
6269 info->type = operands[i];
6270
6271 if (optional_operand_p (opcode, i))
6272 {
6273 /* Remember where we are in case we need to backtrack. */
6274 gas_assert (!backtrack_pos);
6275 backtrack_pos = str;
6276 }
6277
6278 /* Expect comma between operands; the backtrack mechanism will take
6279 care of cases of omitted optional operand. */
6280 if (i > 0 && ! skip_past_char (&str, ','))
6281 {
6282 set_syntax_error (_("comma expected between operands"));
6283 goto failure;
6284 }
6285 else
6286 comma_skipped_p = 1;
6287
6288 switch (operands[i])
6289 {
6290 case AARCH64_OPND_Rd:
6291 case AARCH64_OPND_Rn:
6292 case AARCH64_OPND_Rm:
6293 case AARCH64_OPND_Rt:
6294 case AARCH64_OPND_Rt2:
6295 case AARCH64_OPND_Rs:
6296 case AARCH64_OPND_Ra:
6297 case AARCH64_OPND_Rt_LS64:
6298 case AARCH64_OPND_Rt_SYS:
6299 case AARCH64_OPND_PAIRREG:
6300 case AARCH64_OPND_SVE_Rm:
6301 po_int_reg_or_fail (REG_TYPE_R_Z);
6302
6303 /* In LS64 load/store instructions Rt register number must be even
6304 and <=22. */
6305 if (operands[i] == AARCH64_OPND_Rt_LS64)
6306 {
6307 /* We've already checked if this is valid register.
6308 This will check if register number (Rt) is not undefined for LS64
6309 instructions:
6310 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6311 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6312 {
6313 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6314 goto failure;
6315 }
6316 }
6317 break;
6318
6319 case AARCH64_OPND_Rd_SP:
6320 case AARCH64_OPND_Rn_SP:
6321 case AARCH64_OPND_Rt_SP:
6322 case AARCH64_OPND_SVE_Rn_SP:
6323 case AARCH64_OPND_Rm_SP:
6324 po_int_reg_or_fail (REG_TYPE_R_SP);
6325 break;
6326
6327 case AARCH64_OPND_Rm_EXT:
6328 case AARCH64_OPND_Rm_SFT:
6329 po_misc_or_fail (parse_shifter_operand
6330 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6331 ? SHIFTED_ARITH_IMM
6332 : SHIFTED_LOGIC_IMM)));
6333 if (!info->shifter.operator_present)
6334 {
6335 /* Default to LSL if not present. Libopcodes prefers shifter
6336 kind to be explicit. */
6337 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6338 info->shifter.kind = AARCH64_MOD_LSL;
6339 /* For Rm_EXT, libopcodes will carry out further check on whether
6340 or not stack pointer is used in the instruction (Recall that
6341 "the extend operator is not optional unless at least one of
6342 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6343 }
6344 break;
6345
6346 case AARCH64_OPND_Fd:
6347 case AARCH64_OPND_Fn:
6348 case AARCH64_OPND_Fm:
6349 case AARCH64_OPND_Fa:
6350 case AARCH64_OPND_Ft:
6351 case AARCH64_OPND_Ft2:
6352 case AARCH64_OPND_Sd:
6353 case AARCH64_OPND_Sn:
6354 case AARCH64_OPND_Sm:
6355 case AARCH64_OPND_SVE_VZn:
6356 case AARCH64_OPND_SVE_Vd:
6357 case AARCH64_OPND_SVE_Vm:
6358 case AARCH64_OPND_SVE_Vn:
6359 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
6360 if (val == PARSE_FAIL)
6361 {
6362 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6363 goto failure;
6364 }
6365 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
6366
6367 info->reg.regno = val;
6368 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
6369 break;
6370
6371 case AARCH64_OPND_SVE_Pd:
6372 case AARCH64_OPND_SVE_Pg3:
6373 case AARCH64_OPND_SVE_Pg4_5:
6374 case AARCH64_OPND_SVE_Pg4_10:
6375 case AARCH64_OPND_SVE_Pg4_16:
6376 case AARCH64_OPND_SVE_Pm:
6377 case AARCH64_OPND_SVE_Pn:
6378 case AARCH64_OPND_SVE_Pt:
6379 case AARCH64_OPND_SME_Pm:
6380 reg_type = REG_TYPE_PN;
6381 goto vector_reg;
6382
6383 case AARCH64_OPND_SVE_Za_5:
6384 case AARCH64_OPND_SVE_Za_16:
6385 case AARCH64_OPND_SVE_Zd:
6386 case AARCH64_OPND_SVE_Zm_5:
6387 case AARCH64_OPND_SVE_Zm_16:
6388 case AARCH64_OPND_SVE_Zn:
6389 case AARCH64_OPND_SVE_Zt:
6390 reg_type = REG_TYPE_ZN;
6391 goto vector_reg;
6392
6393 case AARCH64_OPND_Va:
6394 case AARCH64_OPND_Vd:
6395 case AARCH64_OPND_Vn:
6396 case AARCH64_OPND_Vm:
6397 reg_type = REG_TYPE_VN;
6398 vector_reg:
6399 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6400 if (val == PARSE_FAIL)
6401 {
6402 first_error (_(get_reg_expected_msg (reg_type)));
6403 goto failure;
6404 }
6405 if (vectype.defined & NTA_HASINDEX)
6406 goto failure;
6407
6408 info->reg.regno = val;
6409 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6410 && vectype.type == NT_invtype)
6411 /* Unqualified Pn and Zn registers are allowed in certain
6412 contexts. Rely on F_STRICT qualifier checking to catch
6413 invalid uses. */
6414 info->qualifier = AARCH64_OPND_QLF_NIL;
6415 else
6416 {
6417 info->qualifier = vectype_to_qualifier (&vectype);
6418 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6419 goto failure;
6420 }
6421 break;
6422
6423 case AARCH64_OPND_VdD1:
6424 case AARCH64_OPND_VnD1:
6425 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
6426 if (val == PARSE_FAIL)
6427 {
6428 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6429 goto failure;
6430 }
6431 if (vectype.type != NT_d || vectype.index != 1)
6432 {
6433 set_fatal_syntax_error
6434 (_("the top half of a 128-bit FP/SIMD register is expected"));
6435 goto failure;
6436 }
6437 info->reg.regno = val;
6438 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6439 here; it is correct for the purpose of encoding/decoding since
6440 only the register number is explicitly encoded in the related
6441 instructions, although this appears a bit hacky. */
6442 info->qualifier = AARCH64_OPND_QLF_S_D;
6443 break;
6444
6445 case AARCH64_OPND_SVE_Zm3_INDEX:
6446 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6447 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6448 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6449 case AARCH64_OPND_SVE_Zm4_INDEX:
6450 case AARCH64_OPND_SVE_Zn_INDEX:
6451 reg_type = REG_TYPE_ZN;
6452 goto vector_reg_index;
6453
6454 case AARCH64_OPND_Ed:
6455 case AARCH64_OPND_En:
6456 case AARCH64_OPND_Em:
6457 case AARCH64_OPND_Em16:
6458 case AARCH64_OPND_SM3_IMM2:
6459 reg_type = REG_TYPE_VN;
6460 vector_reg_index:
6461 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6462 if (val == PARSE_FAIL)
6463 {
6464 first_error (_(get_reg_expected_msg (reg_type)));
6465 goto failure;
6466 }
6467 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6468 goto failure;
6469
6470 info->reglane.regno = val;
6471 info->reglane.index = vectype.index;
6472 info->qualifier = vectype_to_qualifier (&vectype);
6473 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6474 goto failure;
6475 break;
6476
6477 case AARCH64_OPND_SVE_ZnxN:
6478 case AARCH64_OPND_SVE_ZtxN:
6479 reg_type = REG_TYPE_ZN;
6480 goto vector_reg_list;
6481
6482 case AARCH64_OPND_LVn:
6483 case AARCH64_OPND_LVt:
6484 case AARCH64_OPND_LVt_AL:
6485 case AARCH64_OPND_LEt:
6486 reg_type = REG_TYPE_VN;
6487 vector_reg_list:
6488 if (reg_type == REG_TYPE_ZN
6489 && get_opcode_dependent_value (opcode) == 1
6490 && *str != '{')
6491 {
6492 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6493 if (val == PARSE_FAIL)
6494 {
6495 first_error (_(get_reg_expected_msg (reg_type)));
6496 goto failure;
6497 }
6498 info->reglist.first_regno = val;
6499 info->reglist.num_regs = 1;
6500 }
6501 else
6502 {
6503 val = parse_vector_reg_list (&str, reg_type, &vectype);
6504 if (val == PARSE_FAIL)
6505 goto failure;
6506
6507 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6508 {
6509 set_fatal_syntax_error (_("invalid register list"));
6510 goto failure;
6511 }
6512
6513 if (vectype.width != 0 && *str != ',')
6514 {
6515 set_fatal_syntax_error
6516 (_("expected element type rather than vector type"));
6517 goto failure;
6518 }
6519
6520 info->reglist.first_regno = (val >> 2) & 0x1f;
6521 info->reglist.num_regs = (val & 0x3) + 1;
6522 }
6523 if (operands[i] == AARCH64_OPND_LEt)
6524 {
6525 if (!(vectype.defined & NTA_HASINDEX))
6526 goto failure;
6527 info->reglist.has_index = 1;
6528 info->reglist.index = vectype.index;
6529 }
6530 else
6531 {
6532 if (vectype.defined & NTA_HASINDEX)
6533 goto failure;
6534 if (!(vectype.defined & NTA_HASTYPE))
6535 {
6536 if (reg_type == REG_TYPE_ZN)
6537 set_fatal_syntax_error (_("missing type suffix"));
6538 goto failure;
6539 }
6540 }
6541 info->qualifier = vectype_to_qualifier (&vectype);
6542 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6543 goto failure;
6544 break;
6545
6546 case AARCH64_OPND_CRn:
6547 case AARCH64_OPND_CRm:
6548 {
6549 char prefix = *(str++);
6550 if (prefix != 'c' && prefix != 'C')
6551 goto failure;
6552
6553 po_imm_nc_or_fail ();
6554 if (val > 15)
6555 {
6556 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6557 goto failure;
6558 }
6559 info->qualifier = AARCH64_OPND_QLF_CR;
6560 info->imm.value = val;
6561 break;
6562 }
6563
6564 case AARCH64_OPND_SHLL_IMM:
6565 case AARCH64_OPND_IMM_VLSR:
6566 po_imm_or_fail (1, 64);
6567 info->imm.value = val;
6568 break;
6569
6570 case AARCH64_OPND_CCMP_IMM:
6571 case AARCH64_OPND_SIMM5:
6572 case AARCH64_OPND_FBITS:
6573 case AARCH64_OPND_TME_UIMM16:
6574 case AARCH64_OPND_UIMM4:
6575 case AARCH64_OPND_UIMM4_ADDG:
6576 case AARCH64_OPND_UIMM10:
6577 case AARCH64_OPND_UIMM3_OP1:
6578 case AARCH64_OPND_UIMM3_OP2:
6579 case AARCH64_OPND_IMM_VLSL:
6580 case AARCH64_OPND_IMM:
6581 case AARCH64_OPND_IMM_2:
6582 case AARCH64_OPND_WIDTH:
6583 case AARCH64_OPND_SVE_INV_LIMM:
6584 case AARCH64_OPND_SVE_LIMM:
6585 case AARCH64_OPND_SVE_LIMM_MOV:
6586 case AARCH64_OPND_SVE_SHLIMM_PRED:
6587 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6588 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6589 case AARCH64_OPND_SVE_SHRIMM_PRED:
6590 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6591 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6592 case AARCH64_OPND_SVE_SIMM5:
6593 case AARCH64_OPND_SVE_SIMM5B:
6594 case AARCH64_OPND_SVE_SIMM6:
6595 case AARCH64_OPND_SVE_SIMM8:
6596 case AARCH64_OPND_SVE_UIMM3:
6597 case AARCH64_OPND_SVE_UIMM7:
6598 case AARCH64_OPND_SVE_UIMM8:
6599 case AARCH64_OPND_SVE_UIMM8_53:
6600 case AARCH64_OPND_IMM_ROT1:
6601 case AARCH64_OPND_IMM_ROT2:
6602 case AARCH64_OPND_IMM_ROT3:
6603 case AARCH64_OPND_SVE_IMM_ROT1:
6604 case AARCH64_OPND_SVE_IMM_ROT2:
6605 case AARCH64_OPND_SVE_IMM_ROT3:
6606 po_imm_nc_or_fail ();
6607 info->imm.value = val;
6608 break;
6609
6610 case AARCH64_OPND_SVE_AIMM:
6611 case AARCH64_OPND_SVE_ASIMM:
6612 po_imm_nc_or_fail ();
6613 info->imm.value = val;
6614 skip_whitespace (str);
6615 if (skip_past_comma (&str))
6616 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6617 else
6618 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6619 break;
6620
6621 case AARCH64_OPND_SVE_PATTERN:
6622 po_enum_or_fail (aarch64_sve_pattern_array);
6623 info->imm.value = val;
6624 break;
6625
6626 case AARCH64_OPND_SVE_PATTERN_SCALED:
6627 po_enum_or_fail (aarch64_sve_pattern_array);
6628 info->imm.value = val;
6629 if (skip_past_comma (&str)
6630 && !parse_shift (&str, info, SHIFTED_MUL))
6631 goto failure;
6632 if (!info->shifter.operator_present)
6633 {
6634 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6635 info->shifter.kind = AARCH64_MOD_MUL;
6636 info->shifter.amount = 1;
6637 }
6638 break;
6639
6640 case AARCH64_OPND_SVE_PRFOP:
6641 po_enum_or_fail (aarch64_sve_prfop_array);
6642 info->imm.value = val;
6643 break;
6644
6645 case AARCH64_OPND_UIMM7:
6646 po_imm_or_fail (0, 127);
6647 info->imm.value = val;
6648 break;
6649
6650 case AARCH64_OPND_IDX:
6651 case AARCH64_OPND_MASK:
6652 case AARCH64_OPND_BIT_NUM:
6653 case AARCH64_OPND_IMMR:
6654 case AARCH64_OPND_IMMS:
6655 po_imm_or_fail (0, 63);
6656 info->imm.value = val;
6657 break;
6658
6659 case AARCH64_OPND_IMM0:
6660 po_imm_nc_or_fail ();
6661 if (val != 0)
6662 {
6663 set_fatal_syntax_error (_("immediate zero expected"));
6664 goto failure;
6665 }
6666 info->imm.value = 0;
6667 break;
6668
6669 case AARCH64_OPND_FPIMM0:
6670 {
6671 int qfloat;
6672 bool res1 = false, res2 = false;
6673 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6674 it is probably not worth the effort to support it. */
6675 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6676 imm_reg_type))
6677 && (error_p ()
6678 || !(res2 = parse_constant_immediate (&str, &val,
6679 imm_reg_type))))
6680 goto failure;
6681 if ((res1 && qfloat == 0) || (res2 && val == 0))
6682 {
6683 info->imm.value = 0;
6684 info->imm.is_fp = 1;
6685 break;
6686 }
6687 set_fatal_syntax_error (_("immediate zero expected"));
6688 goto failure;
6689 }
6690
6691 case AARCH64_OPND_IMM_MOV:
6692 {
6693 char *saved = str;
6694 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6695 reg_name_p (str, REG_TYPE_VN))
6696 goto failure;
6697 str = saved;
6698 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6699 GE_OPT_PREFIX, REJECT_ABSENT,
6700 NORMAL_RESOLUTION));
6701 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6702 later. fix_mov_imm_insn will try to determine a machine
6703 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6704 message if the immediate cannot be moved by a single
6705 instruction. */
6706 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6707 inst.base.operands[i].skip = 1;
6708 }
6709 break;
6710
6711 case AARCH64_OPND_SIMD_IMM:
6712 case AARCH64_OPND_SIMD_IMM_SFT:
6713 if (! parse_big_immediate (&str, &val, imm_reg_type))
6714 goto failure;
6715 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6716 /* addr_off_p */ 0,
6717 /* need_libopcodes_p */ 1,
6718 /* skip_p */ 1);
6719 /* Parse shift.
6720 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6721 shift, we don't check it here; we leave the checking to
6722 the libopcodes (operand_general_constraint_met_p). By
6723 doing this, we achieve better diagnostics. */
6724 if (skip_past_comma (&str)
6725 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6726 goto failure;
6727 if (!info->shifter.operator_present
6728 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6729 {
6730 /* Default to LSL if not present. Libopcodes prefers shifter
6731 kind to be explicit. */
6732 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6733 info->shifter.kind = AARCH64_MOD_LSL;
6734 }
6735 break;
6736
6737 case AARCH64_OPND_FPIMM:
6738 case AARCH64_OPND_SIMD_FPIMM:
6739 case AARCH64_OPND_SVE_FPIMM8:
6740 {
6741 int qfloat;
6742 bool dp_p;
6743
6744 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6745 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6746 || !aarch64_imm_float_p (qfloat))
6747 {
6748 if (!error_p ())
6749 set_fatal_syntax_error (_("invalid floating-point"
6750 " constant"));
6751 goto failure;
6752 }
6753 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6754 inst.base.operands[i].imm.is_fp = 1;
6755 }
6756 break;
6757
6758 case AARCH64_OPND_SVE_I1_HALF_ONE:
6759 case AARCH64_OPND_SVE_I1_HALF_TWO:
6760 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6761 {
6762 int qfloat;
6763 bool dp_p;
6764
6765 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6766 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6767 {
6768 if (!error_p ())
6769 set_fatal_syntax_error (_("invalid floating-point"
6770 " constant"));
6771 goto failure;
6772 }
6773 inst.base.operands[i].imm.value = qfloat;
6774 inst.base.operands[i].imm.is_fp = 1;
6775 }
6776 break;
6777
6778 case AARCH64_OPND_LIMM:
6779 po_misc_or_fail (parse_shifter_operand (&str, info,
6780 SHIFTED_LOGIC_IMM));
6781 if (info->shifter.operator_present)
6782 {
6783 set_fatal_syntax_error
6784 (_("shift not allowed for bitmask immediate"));
6785 goto failure;
6786 }
6787 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6788 /* addr_off_p */ 0,
6789 /* need_libopcodes_p */ 1,
6790 /* skip_p */ 1);
6791 break;
6792
6793 case AARCH64_OPND_AIMM:
6794 if (opcode->op == OP_ADD)
6795 /* ADD may have relocation types. */
6796 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6797 SHIFTED_ARITH_IMM));
6798 else
6799 po_misc_or_fail (parse_shifter_operand (&str, info,
6800 SHIFTED_ARITH_IMM));
6801 switch (inst.reloc.type)
6802 {
6803 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6804 info->shifter.amount = 12;
6805 break;
6806 case BFD_RELOC_UNUSED:
6807 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6808 if (info->shifter.kind != AARCH64_MOD_NONE)
6809 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6810 inst.reloc.pc_rel = 0;
6811 break;
6812 default:
6813 break;
6814 }
6815 info->imm.value = 0;
6816 if (!info->shifter.operator_present)
6817 {
6818 /* Default to LSL if not present. Libopcodes prefers shifter
6819 kind to be explicit. */
6820 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6821 info->shifter.kind = AARCH64_MOD_LSL;
6822 }
6823 break;
6824
6825 case AARCH64_OPND_HALF:
6826 {
6827 /* #<imm16> or relocation. */
6828 int internal_fixup_p;
6829 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6830 if (internal_fixup_p)
6831 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6832 skip_whitespace (str);
6833 if (skip_past_comma (&str))
6834 {
6835 /* {, LSL #<shift>} */
6836 if (! aarch64_gas_internal_fixup_p ())
6837 {
6838 set_fatal_syntax_error (_("can't mix relocation modifier "
6839 "with explicit shift"));
6840 goto failure;
6841 }
6842 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6843 }
6844 else
6845 inst.base.operands[i].shifter.amount = 0;
6846 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6847 inst.base.operands[i].imm.value = 0;
6848 if (! process_movw_reloc_info ())
6849 goto failure;
6850 }
6851 break;
6852
6853 case AARCH64_OPND_EXCEPTION:
6854 case AARCH64_OPND_UNDEFINED:
6855 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6856 imm_reg_type));
6857 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6858 /* addr_off_p */ 0,
6859 /* need_libopcodes_p */ 0,
6860 /* skip_p */ 1);
6861 break;
6862
6863 case AARCH64_OPND_NZCV:
6864 {
6865 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6866 if (nzcv != NULL)
6867 {
6868 str += 4;
6869 info->imm.value = nzcv->value;
6870 break;
6871 }
6872 po_imm_or_fail (0, 15);
6873 info->imm.value = val;
6874 }
6875 break;
6876
6877 case AARCH64_OPND_COND:
6878 case AARCH64_OPND_COND1:
6879 {
6880 char *start = str;
6881 do
6882 str++;
6883 while (ISALPHA (*str));
6884 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6885 if (info->cond == NULL)
6886 {
6887 set_syntax_error (_("invalid condition"));
6888 goto failure;
6889 }
6890 else if (operands[i] == AARCH64_OPND_COND1
6891 && (info->cond->value & 0xe) == 0xe)
6892 {
6893 /* Do not allow AL or NV. */
6894 set_default_error ();
6895 goto failure;
6896 }
6897 }
6898 break;
6899
6900 case AARCH64_OPND_ADDR_ADRP:
6901 po_misc_or_fail (parse_adrp (&str));
6902 /* Clear the value as operand needs to be relocated. */
6903 info->imm.value = 0;
6904 break;
6905
6906 case AARCH64_OPND_ADDR_PCREL14:
6907 case AARCH64_OPND_ADDR_PCREL19:
6908 case AARCH64_OPND_ADDR_PCREL21:
6909 case AARCH64_OPND_ADDR_PCREL26:
6910 po_misc_or_fail (parse_address (&str, info));
6911 if (!info->addr.pcrel)
6912 {
6913 set_syntax_error (_("invalid pc-relative address"));
6914 goto failure;
6915 }
6916 if (inst.gen_lit_pool
6917 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6918 {
6919 /* Only permit "=value" in the literal load instructions.
6920 The literal will be generated by programmer_friendly_fixup. */
6921 set_syntax_error (_("invalid use of \"=immediate\""));
6922 goto failure;
6923 }
6924 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6925 {
6926 set_syntax_error (_("unrecognized relocation suffix"));
6927 goto failure;
6928 }
6929 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6930 {
6931 info->imm.value = inst.reloc.exp.X_add_number;
6932 inst.reloc.type = BFD_RELOC_UNUSED;
6933 }
6934 else
6935 {
6936 info->imm.value = 0;
6937 if (inst.reloc.type == BFD_RELOC_UNUSED)
6938 switch (opcode->iclass)
6939 {
6940 case compbranch:
6941 case condbranch:
6942 /* e.g. CBZ or B.COND */
6943 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6944 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6945 break;
6946 case testbranch:
6947 /* e.g. TBZ */
6948 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6949 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6950 break;
6951 case branch_imm:
6952 /* e.g. B or BL */
6953 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6954 inst.reloc.type =
6955 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6956 : BFD_RELOC_AARCH64_JUMP26;
6957 break;
6958 case loadlit:
6959 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6960 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6961 break;
6962 case pcreladdr:
6963 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6964 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6965 break;
6966 default:
6967 gas_assert (0);
6968 abort ();
6969 }
6970 inst.reloc.pc_rel = 1;
6971 }
6972 break;
6973
6974 case AARCH64_OPND_ADDR_SIMPLE:
6975 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6976 {
6977 /* [<Xn|SP>{, #<simm>}] */
6978 char *start = str;
6979 /* First use the normal address-parsing routines, to get
6980 the usual syntax errors. */
6981 po_misc_or_fail (parse_address (&str, info));
6982 if (info->addr.pcrel || info->addr.offset.is_reg
6983 || !info->addr.preind || info->addr.postind
6984 || info->addr.writeback)
6985 {
6986 set_syntax_error (_("invalid addressing mode"));
6987 goto failure;
6988 }
6989
6990 /* Then retry, matching the specific syntax of these addresses. */
6991 str = start;
6992 po_char_or_fail ('[');
6993 po_reg_or_fail (REG_TYPE_R64_SP);
6994 /* Accept optional ", #0". */
6995 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6996 && skip_past_char (&str, ','))
6997 {
6998 skip_past_char (&str, '#');
6999 if (! skip_past_char (&str, '0'))
7000 {
7001 set_fatal_syntax_error
7002 (_("the optional immediate offset can only be 0"));
7003 goto failure;
7004 }
7005 }
7006 po_char_or_fail (']');
7007 break;
7008 }
7009
7010 case AARCH64_OPND_ADDR_REGOFF:
7011 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7012 po_misc_or_fail (parse_address (&str, info));
7013 regoff_addr:
7014 if (info->addr.pcrel || !info->addr.offset.is_reg
7015 || !info->addr.preind || info->addr.postind
7016 || info->addr.writeback)
7017 {
7018 set_syntax_error (_("invalid addressing mode"));
7019 goto failure;
7020 }
7021 if (!info->shifter.operator_present)
7022 {
7023 /* Default to LSL if not present. Libopcodes prefers shifter
7024 kind to be explicit. */
7025 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7026 info->shifter.kind = AARCH64_MOD_LSL;
7027 }
7028 /* Qualifier to be deduced by libopcodes. */
7029 break;
7030
7031 case AARCH64_OPND_ADDR_SIMM7:
7032 po_misc_or_fail (parse_address (&str, info));
7033 if (info->addr.pcrel || info->addr.offset.is_reg
7034 || (!info->addr.preind && !info->addr.postind))
7035 {
7036 set_syntax_error (_("invalid addressing mode"));
7037 goto failure;
7038 }
7039 if (inst.reloc.type != BFD_RELOC_UNUSED)
7040 {
7041 set_syntax_error (_("relocation not allowed"));
7042 goto failure;
7043 }
7044 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7045 /* addr_off_p */ 1,
7046 /* need_libopcodes_p */ 1,
7047 /* skip_p */ 0);
7048 break;
7049
7050 case AARCH64_OPND_ADDR_SIMM9:
7051 case AARCH64_OPND_ADDR_SIMM9_2:
7052 case AARCH64_OPND_ADDR_SIMM11:
7053 case AARCH64_OPND_ADDR_SIMM13:
7054 po_misc_or_fail (parse_address (&str, info));
7055 if (info->addr.pcrel || info->addr.offset.is_reg
7056 || (!info->addr.preind && !info->addr.postind)
7057 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7058 && info->addr.writeback))
7059 {
7060 set_syntax_error (_("invalid addressing mode"));
7061 goto failure;
7062 }
7063 if (inst.reloc.type != BFD_RELOC_UNUSED)
7064 {
7065 set_syntax_error (_("relocation not allowed"));
7066 goto failure;
7067 }
7068 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7069 /* addr_off_p */ 1,
7070 /* need_libopcodes_p */ 1,
7071 /* skip_p */ 0);
7072 break;
7073
7074 case AARCH64_OPND_ADDR_SIMM10:
7075 case AARCH64_OPND_ADDR_OFFSET:
7076 po_misc_or_fail (parse_address (&str, info));
7077 if (info->addr.pcrel || info->addr.offset.is_reg
7078 || !info->addr.preind || info->addr.postind)
7079 {
7080 set_syntax_error (_("invalid addressing mode"));
7081 goto failure;
7082 }
7083 if (inst.reloc.type != BFD_RELOC_UNUSED)
7084 {
7085 set_syntax_error (_("relocation not allowed"));
7086 goto failure;
7087 }
7088 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7089 /* addr_off_p */ 1,
7090 /* need_libopcodes_p */ 1,
7091 /* skip_p */ 0);
7092 break;
7093
7094 case AARCH64_OPND_ADDR_UIMM12:
7095 po_misc_or_fail (parse_address (&str, info));
7096 if (info->addr.pcrel || info->addr.offset.is_reg
7097 || !info->addr.preind || info->addr.writeback)
7098 {
7099 set_syntax_error (_("invalid addressing mode"));
7100 goto failure;
7101 }
7102 if (inst.reloc.type == BFD_RELOC_UNUSED)
7103 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7104 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7105 || (inst.reloc.type
7106 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7107 || (inst.reloc.type
7108 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7109 || (inst.reloc.type
7110 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7111 || (inst.reloc.type
7112 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7113 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7114 /* Leave qualifier to be determined by libopcodes. */
7115 break;
7116
7117 case AARCH64_OPND_SIMD_ADDR_POST:
7118 /* [<Xn|SP>], <Xm|#<amount>> */
7119 po_misc_or_fail (parse_address (&str, info));
7120 if (!info->addr.postind || !info->addr.writeback)
7121 {
7122 set_syntax_error (_("invalid addressing mode"));
7123 goto failure;
7124 }
7125 if (!info->addr.offset.is_reg)
7126 {
7127 if (inst.reloc.exp.X_op == O_constant)
7128 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7129 else
7130 {
7131 set_fatal_syntax_error
7132 (_("writeback value must be an immediate constant"));
7133 goto failure;
7134 }
7135 }
7136 /* No qualifier. */
7137 break;
7138
7139 case AARCH64_OPND_SME_SM_ZA:
7140 /* { SM | ZA } */
7141 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7142 {
7143 set_syntax_error (_("unknown or missing PSTATE field name"));
7144 goto failure;
7145 }
7146 info->reg.regno = val;
7147 break;
7148
7149 case AARCH64_OPND_SME_PnT_Wm_imm:
7150 /* <Pn>.<T>[<Wm>, #<imm>] */
7151 {
7152 int index_base_reg;
7153 int imm;
7154 val = parse_sme_pred_reg_with_index (&str,
7155 &index_base_reg,
7156 &imm,
7157 &qualifier);
7158 if (val == PARSE_FAIL)
7159 goto failure;
7160
7161 info->za_tile_vector.regno = val;
7162 info->za_tile_vector.index.regno = index_base_reg;
7163 info->za_tile_vector.index.imm = imm;
7164 info->qualifier = qualifier;
7165 break;
7166 }
7167
7168 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7169 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7170 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7171 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7172 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7173 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7174 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7175 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7176 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7177 case AARCH64_OPND_SVE_ADDR_RI_U6:
7178 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7179 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7180 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7181 /* [X<n>{, #imm, MUL VL}]
7182 [X<n>{, #imm}]
7183 but recognizing SVE registers. */
7184 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7185 &offset_qualifier));
7186 if (base_qualifier != AARCH64_OPND_QLF_X)
7187 {
7188 set_syntax_error (_("invalid addressing mode"));
7189 goto failure;
7190 }
7191 sve_regimm:
7192 if (info->addr.pcrel || info->addr.offset.is_reg
7193 || !info->addr.preind || info->addr.writeback)
7194 {
7195 set_syntax_error (_("invalid addressing mode"));
7196 goto failure;
7197 }
7198 if (inst.reloc.type != BFD_RELOC_UNUSED
7199 || inst.reloc.exp.X_op != O_constant)
7200 {
7201 /* Make sure this has priority over
7202 "invalid addressing mode". */
7203 set_fatal_syntax_error (_("constant offset required"));
7204 goto failure;
7205 }
7206 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7207 break;
7208
7209 case AARCH64_OPND_SVE_ADDR_R:
7210 /* [<Xn|SP>{, <R><m>}]
7211 but recognizing SVE registers. */
7212 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7213 &offset_qualifier));
7214 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7215 {
7216 offset_qualifier = AARCH64_OPND_QLF_X;
7217 info->addr.offset.is_reg = 1;
7218 info->addr.offset.regno = 31;
7219 }
7220 else if (base_qualifier != AARCH64_OPND_QLF_X
7221 || offset_qualifier != AARCH64_OPND_QLF_X)
7222 {
7223 set_syntax_error (_("invalid addressing mode"));
7224 goto failure;
7225 }
7226 goto regoff_addr;
7227
7228 case AARCH64_OPND_SVE_ADDR_RR:
7229 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7230 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7231 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7232 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7233 case AARCH64_OPND_SVE_ADDR_RX:
7234 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7235 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7236 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7237 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7238 but recognizing SVE registers. */
7239 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7240 &offset_qualifier));
7241 if (base_qualifier != AARCH64_OPND_QLF_X
7242 || offset_qualifier != AARCH64_OPND_QLF_X)
7243 {
7244 set_syntax_error (_("invalid addressing mode"));
7245 goto failure;
7246 }
7247 goto regoff_addr;
7248
7249 case AARCH64_OPND_SVE_ADDR_RZ:
7250 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7251 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7252 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7253 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7254 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7255 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7256 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7257 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7258 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7259 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7260 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7261 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7262 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7263 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7264 &offset_qualifier));
7265 if (base_qualifier != AARCH64_OPND_QLF_X
7266 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7267 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7268 {
7269 set_syntax_error (_("invalid addressing mode"));
7270 goto failure;
7271 }
7272 info->qualifier = offset_qualifier;
7273 goto regoff_addr;
7274
7275 case AARCH64_OPND_SVE_ADDR_ZX:
7276 /* [Zn.<T>{, <Xm>}]. */
7277 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7278 &offset_qualifier));
7279 /* Things to check:
7280 base_qualifier either S_S or S_D
7281 offset_qualifier must be X
7282 */
7283 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7284 && base_qualifier != AARCH64_OPND_QLF_S_D)
7285 || offset_qualifier != AARCH64_OPND_QLF_X)
7286 {
7287 set_syntax_error (_("invalid addressing mode"));
7288 goto failure;
7289 }
7290 info->qualifier = base_qualifier;
7291 if (!info->addr.offset.is_reg || info->addr.pcrel
7292 || !info->addr.preind || info->addr.writeback
7293 || info->shifter.operator_present != 0)
7294 {
7295 set_syntax_error (_("invalid addressing mode"));
7296 goto failure;
7297 }
7298 info->shifter.kind = AARCH64_MOD_LSL;
7299 break;
7300
7301
7302 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7303 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7304 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7305 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7306 /* [Z<n>.<T>{, #imm}] */
7307 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7308 &offset_qualifier));
7309 if (base_qualifier != AARCH64_OPND_QLF_S_S
7310 && base_qualifier != AARCH64_OPND_QLF_S_D)
7311 {
7312 set_syntax_error (_("invalid addressing mode"));
7313 goto failure;
7314 }
7315 info->qualifier = base_qualifier;
7316 goto sve_regimm;
7317
7318 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7319 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7320 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7321 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7322 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7323
7324 We don't reject:
7325
7326 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7327
7328 here since we get better error messages by leaving it to
7329 the qualifier checking routines. */
7330 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7331 &offset_qualifier));
7332 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7333 && base_qualifier != AARCH64_OPND_QLF_S_D)
7334 || offset_qualifier != base_qualifier)
7335 {
7336 set_syntax_error (_("invalid addressing mode"));
7337 goto failure;
7338 }
7339 info->qualifier = base_qualifier;
7340 goto regoff_addr;
7341
7342 case AARCH64_OPND_SYSREG:
7343 {
7344 uint32_t sysreg_flags;
7345 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7346 &sysreg_flags)) == PARSE_FAIL)
7347 {
7348 set_syntax_error (_("unknown or missing system register name"));
7349 goto failure;
7350 }
7351 inst.base.operands[i].sysreg.value = val;
7352 inst.base.operands[i].sysreg.flags = sysreg_flags;
7353 break;
7354 }
7355
7356 case AARCH64_OPND_PSTATEFIELD:
7357 {
7358 uint32_t sysreg_flags;
7359 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7360 &sysreg_flags)) == PARSE_FAIL)
7361 {
7362 set_syntax_error (_("unknown or missing PSTATE field name"));
7363 goto failure;
7364 }
7365 inst.base.operands[i].pstatefield = val;
7366 inst.base.operands[i].sysreg.flags = sysreg_flags;
7367 break;
7368 }
7369
7370 case AARCH64_OPND_SYSREG_IC:
7371 inst.base.operands[i].sysins_op =
7372 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7373 goto sys_reg_ins;
7374
7375 case AARCH64_OPND_SYSREG_DC:
7376 inst.base.operands[i].sysins_op =
7377 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7378 goto sys_reg_ins;
7379
7380 case AARCH64_OPND_SYSREG_AT:
7381 inst.base.operands[i].sysins_op =
7382 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7383 goto sys_reg_ins;
7384
7385 case AARCH64_OPND_SYSREG_SR:
7386 inst.base.operands[i].sysins_op =
7387 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7388 goto sys_reg_ins;
7389
7390 case AARCH64_OPND_SYSREG_TLBI:
7391 inst.base.operands[i].sysins_op =
7392 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7393 sys_reg_ins:
7394 if (inst.base.operands[i].sysins_op == NULL)
7395 {
7396 set_fatal_syntax_error ( _("unknown or missing operation name"));
7397 goto failure;
7398 }
7399 break;
7400
7401 case AARCH64_OPND_BARRIER:
7402 case AARCH64_OPND_BARRIER_ISB:
7403 val = parse_barrier (&str);
7404 if (val != PARSE_FAIL
7405 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7406 {
7407 /* ISB only accepts options name 'sy'. */
7408 set_syntax_error
7409 (_("the specified option is not accepted in ISB"));
7410 /* Turn off backtrack as this optional operand is present. */
7411 backtrack_pos = 0;
7412 goto failure;
7413 }
7414 if (val != PARSE_FAIL
7415 && operands[i] == AARCH64_OPND_BARRIER)
7416 {
7417 /* Regular barriers accept options CRm (C0-C15).
7418 DSB nXS barrier variant accepts values > 15. */
7419 if (val < 0 || val > 15)
7420 {
7421 set_syntax_error (_("the specified option is not accepted in DSB"));
7422 goto failure;
7423 }
7424 }
7425 /* This is an extension to accept a 0..15 immediate. */
7426 if (val == PARSE_FAIL)
7427 po_imm_or_fail (0, 15);
7428 info->barrier = aarch64_barrier_options + val;
7429 break;
7430
7431 case AARCH64_OPND_BARRIER_DSB_NXS:
7432 val = parse_barrier (&str);
7433 if (val != PARSE_FAIL)
7434 {
7435 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7436 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7437 {
7438 set_syntax_error (_("the specified option is not accepted in DSB"));
7439 /* Turn off backtrack as this optional operand is present. */
7440 backtrack_pos = 0;
7441 goto failure;
7442 }
7443 }
7444 else
7445 {
7446 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7447 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7448 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7449 goto failure;
7450 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7451 {
7452 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7453 goto failure;
7454 }
7455 }
7456 /* Option index is encoded as 2-bit value in val<3:2>. */
7457 val = (val >> 2) - 4;
7458 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7459 break;
7460
7461 case AARCH64_OPND_PRFOP:
7462 val = parse_pldop (&str);
7463 /* This is an extension to accept a 0..31 immediate. */
7464 if (val == PARSE_FAIL)
7465 po_imm_or_fail (0, 31);
7466 inst.base.operands[i].prfop = aarch64_prfops + val;
7467 break;
7468
7469 case AARCH64_OPND_BARRIER_PSB:
7470 val = parse_barrier_psb (&str, &(info->hint_option));
7471 if (val == PARSE_FAIL)
7472 goto failure;
7473 break;
7474
7475 case AARCH64_OPND_BTI_TARGET:
7476 val = parse_bti_operand (&str, &(info->hint_option));
7477 if (val == PARSE_FAIL)
7478 goto failure;
7479 break;
7480
7481 case AARCH64_OPND_SME_ZAda_2b:
7482 case AARCH64_OPND_SME_ZAda_3b:
7483 val = parse_sme_zada_operand (&str, &qualifier);
7484 if (val == PARSE_FAIL)
7485 goto failure;
7486 info->reg.regno = val;
7487 info->qualifier = qualifier;
7488 break;
7489
7490 case AARCH64_OPND_SME_ZA_HV_idx_src:
7491 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7492 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7493 {
7494 enum sme_hv_slice slice_indicator;
7495 int vector_select_register;
7496 int imm;
7497
7498 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr)
7499 val = parse_sme_za_hv_tiles_operand_with_braces (&str,
7500 &slice_indicator,
7501 &vector_select_register,
7502 &imm,
7503 &qualifier);
7504 else
7505 val = parse_sme_za_hv_tiles_operand (&str, &slice_indicator,
7506 &vector_select_register,
7507 &imm,
7508 &qualifier);
7509 if (val == PARSE_FAIL)
7510 goto failure;
7511 info->za_tile_vector.regno = val;
7512 info->za_tile_vector.index.regno = vector_select_register;
7513 info->za_tile_vector.index.imm = imm;
7514 info->za_tile_vector.v = slice_indicator;
7515 info->qualifier = qualifier;
7516 break;
7517 }
7518
7519 case AARCH64_OPND_SME_list_of_64bit_tiles:
7520 val = parse_sme_list_of_64bit_tiles (&str);
7521 if (val == PARSE_FAIL)
7522 goto failure;
7523 info->imm.value = val;
7524 break;
7525
7526 case AARCH64_OPND_SME_ZA_array:
7527 {
7528 int imm;
7529 val = parse_sme_za_array (&str, &imm);
7530 if (val == PARSE_FAIL)
7531 goto failure;
7532 info->za_tile_vector.index.regno = val;
7533 info->za_tile_vector.index.imm = imm;
7534 break;
7535 }
7536
7537 case AARCH64_OPND_MOPS_ADDR_Rd:
7538 case AARCH64_OPND_MOPS_ADDR_Rs:
7539 po_char_or_fail ('[');
7540 if (!parse_x0_to_x30 (&str, info))
7541 goto failure;
7542 po_char_or_fail (']');
7543 po_char_or_fail ('!');
7544 break;
7545
7546 case AARCH64_OPND_MOPS_WB_Rn:
7547 if (!parse_x0_to_x30 (&str, info))
7548 goto failure;
7549 po_char_or_fail ('!');
7550 break;
7551
7552 default:
7553 as_fatal (_("unhandled operand code %d"), operands[i]);
7554 }
7555
7556 /* If we get here, this operand was successfully parsed. */
7557 inst.base.operands[i].present = 1;
7558 continue;
7559
7560 failure:
7561 /* The parse routine should already have set the error, but in case
7562 not, set a default one here. */
7563 if (! error_p ())
7564 set_default_error ();
7565
7566 if (! backtrack_pos)
7567 goto parse_operands_return;
7568
7569 {
7570 /* We reach here because this operand is marked as optional, and
7571 either no operand was supplied or the operand was supplied but it
7572 was syntactically incorrect. In the latter case we report an
7573 error. In the former case we perform a few more checks before
7574 dropping through to the code to insert the default operand. */
7575
7576 char *tmp = backtrack_pos;
7577 char endchar = END_OF_INSN;
7578
7579 if (i != (aarch64_num_of_operands (opcode) - 1))
7580 endchar = ',';
7581 skip_past_char (&tmp, ',');
7582
7583 if (*tmp != endchar)
7584 /* The user has supplied an operand in the wrong format. */
7585 goto parse_operands_return;
7586
7587 /* Make sure there is not a comma before the optional operand.
7588 For example the fifth operand of 'sys' is optional:
7589
7590 sys #0,c0,c0,#0, <--- wrong
7591 sys #0,c0,c0,#0 <--- correct. */
7592 if (comma_skipped_p && i && endchar == END_OF_INSN)
7593 {
7594 set_fatal_syntax_error
7595 (_("unexpected comma before the omitted optional operand"));
7596 goto parse_operands_return;
7597 }
7598 }
7599
7600 /* Reaching here means we are dealing with an optional operand that is
7601 omitted from the assembly line. */
7602 gas_assert (optional_operand_p (opcode, i));
7603 info->present = 0;
7604 process_omitted_operand (operands[i], opcode, i, info);
7605
7606 /* Try again, skipping the optional operand at backtrack_pos. */
7607 str = backtrack_pos;
7608 backtrack_pos = 0;
7609
7610 /* Clear any error record after the omitted optional operand has been
7611 successfully handled. */
7612 clear_error ();
7613 }
7614
7615 /* Check if we have parsed all the operands. */
7616 if (*str != '\0' && ! error_p ())
7617 {
7618 /* Set I to the index of the last present operand; this is
7619 for the purpose of diagnostics. */
7620 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7621 ;
7622 set_fatal_syntax_error
7623 (_("unexpected characters following instruction"));
7624 }
7625
7626 parse_operands_return:
7627
7628 if (error_p ())
7629 {
7630 DEBUG_TRACE ("parsing FAIL: %s - %s",
7631 operand_mismatch_kind_names[get_error_kind ()],
7632 get_error_message ());
7633 /* Record the operand error properly; this is useful when there
7634 are multiple instruction templates for a mnemonic name, so that
7635 later on, we can select the error that most closely describes
7636 the problem. */
7637 record_operand_error (opcode, i, get_error_kind (),
7638 get_error_message ());
7639 return false;
7640 }
7641 else
7642 {
7643 DEBUG_TRACE ("parsing SUCCESS");
7644 return true;
7645 }
7646 }
7647
7648 /* It does some fix-up to provide some programmer friendly feature while
7649 keeping the libopcodes happy, i.e. libopcodes only accepts
7650 the preferred architectural syntax.
7651 Return FALSE if there is any failure; otherwise return TRUE. */
7652
7653 static bool
7654 programmer_friendly_fixup (aarch64_instruction *instr)
7655 {
7656 aarch64_inst *base = &instr->base;
7657 const aarch64_opcode *opcode = base->opcode;
7658 enum aarch64_op op = opcode->op;
7659 aarch64_opnd_info *operands = base->operands;
7660
7661 DEBUG_TRACE ("enter");
7662
7663 switch (opcode->iclass)
7664 {
7665 case testbranch:
7666 /* TBNZ Xn|Wn, #uimm6, label
7667 Test and Branch Not Zero: conditionally jumps to label if bit number
7668 uimm6 in register Xn is not zero. The bit number implies the width of
7669 the register, which may be written and should be disassembled as Wn if
7670 uimm is less than 32. */
7671 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7672 {
7673 if (operands[1].imm.value >= 32)
7674 {
7675 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7676 0, 31);
7677 return false;
7678 }
7679 operands[0].qualifier = AARCH64_OPND_QLF_X;
7680 }
7681 break;
7682 case loadlit:
7683 /* LDR Wt, label | =value
7684 As a convenience assemblers will typically permit the notation
7685 "=value" in conjunction with the pc-relative literal load instructions
7686 to automatically place an immediate value or symbolic address in a
7687 nearby literal pool and generate a hidden label which references it.
7688 ISREG has been set to 0 in the case of =value. */
7689 if (instr->gen_lit_pool
7690 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7691 {
7692 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7693 if (op == OP_LDRSW_LIT)
7694 size = 4;
7695 if (instr->reloc.exp.X_op != O_constant
7696 && instr->reloc.exp.X_op != O_big
7697 && instr->reloc.exp.X_op != O_symbol)
7698 {
7699 record_operand_error (opcode, 1,
7700 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7701 _("constant expression expected"));
7702 return false;
7703 }
7704 if (! add_to_lit_pool (&instr->reloc.exp, size))
7705 {
7706 record_operand_error (opcode, 1,
7707 AARCH64_OPDE_OTHER_ERROR,
7708 _("literal pool insertion failed"));
7709 return false;
7710 }
7711 }
7712 break;
7713 case log_shift:
7714 case bitfield:
7715 /* UXT[BHW] Wd, Wn
7716 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7717 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7718 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7719 A programmer-friendly assembler should accept a destination Xd in
7720 place of Wd, however that is not the preferred form for disassembly.
7721 */
7722 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7723 && operands[1].qualifier == AARCH64_OPND_QLF_W
7724 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7725 operands[0].qualifier = AARCH64_OPND_QLF_W;
7726 break;
7727
7728 case addsub_ext:
7729 {
7730 /* In the 64-bit form, the final register operand is written as Wm
7731 for all but the (possibly omitted) UXTX/LSL and SXTX
7732 operators.
7733 As a programmer-friendly assembler, we accept e.g.
7734 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7735 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7736 int idx = aarch64_operand_index (opcode->operands,
7737 AARCH64_OPND_Rm_EXT);
7738 gas_assert (idx == 1 || idx == 2);
7739 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7740 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7741 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7742 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7743 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7744 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7745 }
7746 break;
7747
7748 default:
7749 break;
7750 }
7751
7752 DEBUG_TRACE ("exit with SUCCESS");
7753 return true;
7754 }
7755
7756 /* Check for loads and stores that will cause unpredictable behavior. */
7757
7758 static void
7759 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7760 {
7761 aarch64_inst *base = &instr->base;
7762 const aarch64_opcode *opcode = base->opcode;
7763 const aarch64_opnd_info *opnds = base->operands;
7764 switch (opcode->iclass)
7765 {
7766 case ldst_pos:
7767 case ldst_imm9:
7768 case ldst_imm10:
7769 case ldst_unscaled:
7770 case ldst_unpriv:
7771 /* Loading/storing the base register is unpredictable if writeback. */
7772 if ((aarch64_get_operand_class (opnds[0].type)
7773 == AARCH64_OPND_CLASS_INT_REG)
7774 && opnds[0].reg.regno == opnds[1].addr.base_regno
7775 && opnds[1].addr.base_regno != REG_SP
7776 /* Exempt STG/STZG/ST2G/STZ2G. */
7777 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7778 && opnds[1].addr.writeback)
7779 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7780 break;
7781
7782 case ldstpair_off:
7783 case ldstnapair_offs:
7784 case ldstpair_indexed:
7785 /* Loading/storing the base register is unpredictable if writeback. */
7786 if ((aarch64_get_operand_class (opnds[0].type)
7787 == AARCH64_OPND_CLASS_INT_REG)
7788 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7789 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7790 && opnds[2].addr.base_regno != REG_SP
7791 /* Exempt STGP. */
7792 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7793 && opnds[2].addr.writeback)
7794 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7795 /* Load operations must load different registers. */
7796 if ((opcode->opcode & (1 << 22))
7797 && opnds[0].reg.regno == opnds[1].reg.regno)
7798 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7799 break;
7800
7801 case ldstexcl:
7802 if ((aarch64_get_operand_class (opnds[0].type)
7803 == AARCH64_OPND_CLASS_INT_REG)
7804 && (aarch64_get_operand_class (opnds[1].type)
7805 == AARCH64_OPND_CLASS_INT_REG))
7806 {
7807 if ((opcode->opcode & (1 << 22)))
7808 {
7809 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7810 if ((opcode->opcode & (1 << 21))
7811 && opnds[0].reg.regno == opnds[1].reg.regno)
7812 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7813 }
7814 else
7815 {
7816 /* Store-Exclusive is unpredictable if Rt == Rs. */
7817 if (opnds[0].reg.regno == opnds[1].reg.regno)
7818 as_warn
7819 (_("unpredictable: identical transfer and status registers"
7820 " --`%s'"),str);
7821
7822 if (opnds[0].reg.regno == opnds[2].reg.regno)
7823 {
7824 if (!(opcode->opcode & (1 << 21)))
7825 /* Store-Exclusive is unpredictable if Rn == Rs. */
7826 as_warn
7827 (_("unpredictable: identical base and status registers"
7828 " --`%s'"),str);
7829 else
7830 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7831 as_warn
7832 (_("unpredictable: "
7833 "identical transfer and status registers"
7834 " --`%s'"),str);
7835 }
7836
7837 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7838 if ((opcode->opcode & (1 << 21))
7839 && opnds[0].reg.regno == opnds[3].reg.regno
7840 && opnds[3].reg.regno != REG_SP)
7841 as_warn (_("unpredictable: identical base and status registers"
7842 " --`%s'"),str);
7843 }
7844 }
7845 break;
7846
7847 default:
7848 break;
7849 }
7850 }
7851
7852 static void
7853 force_automatic_sequence_close (void)
7854 {
7855 struct aarch64_segment_info_type *tc_seg_info;
7856
7857 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7858 if (tc_seg_info->insn_sequence.instr)
7859 {
7860 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7861 _("previous `%s' sequence has not been closed"),
7862 tc_seg_info->insn_sequence.instr->opcode->name);
7863 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7864 }
7865 }
7866
7867 /* A wrapper function to interface with libopcodes on encoding and
7868 record the error message if there is any.
7869
7870 Return TRUE on success; otherwise return FALSE. */
7871
7872 static bool
7873 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7874 aarch64_insn *code)
7875 {
7876 aarch64_operand_error error_info;
7877 memset (&error_info, '\0', sizeof (error_info));
7878 error_info.kind = AARCH64_OPDE_NIL;
7879 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7880 && !error_info.non_fatal)
7881 return true;
7882
7883 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7884 record_operand_error_info (opcode, &error_info);
7885 return error_info.non_fatal;
7886 }
7887
7888 #ifdef DEBUG_AARCH64
7889 static inline void
7890 dump_opcode_operands (const aarch64_opcode *opcode)
7891 {
7892 int i = 0;
7893 while (opcode->operands[i] != AARCH64_OPND_NIL)
7894 {
7895 aarch64_verbose ("\t\t opnd%d: %s", i,
7896 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7897 ? aarch64_get_operand_name (opcode->operands[i])
7898 : aarch64_get_operand_desc (opcode->operands[i]));
7899 ++i;
7900 }
7901 }
7902 #endif /* DEBUG_AARCH64 */
7903
7904 /* This is the guts of the machine-dependent assembler. STR points to a
7905 machine dependent instruction. This function is supposed to emit
7906 the frags/bytes it assembles to. */
7907
7908 void
7909 md_assemble (char *str)
7910 {
7911 templates *template;
7912 const aarch64_opcode *opcode;
7913 struct aarch64_segment_info_type *tc_seg_info;
7914 aarch64_inst *inst_base;
7915 unsigned saved_cond;
7916
7917 /* Align the previous label if needed. */
7918 if (last_label_seen != NULL)
7919 {
7920 symbol_set_frag (last_label_seen, frag_now);
7921 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7922 S_SET_SEGMENT (last_label_seen, now_seg);
7923 }
7924
7925 /* Update the current insn_sequence from the segment. */
7926 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7927 insn_sequence = &tc_seg_info->insn_sequence;
7928 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
7929
7930 inst.reloc.type = BFD_RELOC_UNUSED;
7931
7932 DEBUG_TRACE ("\n\n");
7933 DEBUG_TRACE ("==============================");
7934 DEBUG_TRACE ("Enter md_assemble with %s", str);
7935
7936 /* Scan up to the end of the mnemonic, which must end in whitespace,
7937 '.', or end of string. */
7938 char *p = str;
7939 char *dot = 0;
7940 for (; is_part_of_name (*p); p++)
7941 if (*p == '.' && !dot)
7942 dot = p;
7943
7944 if (p == str)
7945 {
7946 as_bad (_("unknown mnemonic -- `%s'"), str);
7947 return;
7948 }
7949
7950 if (!dot && create_register_alias (str, p))
7951 return;
7952
7953 template = opcode_lookup (str, dot, p);
7954 if (!template)
7955 {
7956 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7957 str);
7958 return;
7959 }
7960
7961 skip_whitespace (p);
7962 if (*p == ',')
7963 {
7964 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7965 get_mnemonic_name (str), str);
7966 return;
7967 }
7968
7969 init_operand_error_report ();
7970
7971 /* Sections are assumed to start aligned. In executable section, there is no
7972 MAP_DATA symbol pending. So we only align the address during
7973 MAP_DATA --> MAP_INSN transition.
7974 For other sections, this is not guaranteed. */
7975 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7976 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7977 frag_align_code (2, 0);
7978
7979 saved_cond = inst.cond;
7980 reset_aarch64_instruction (&inst);
7981 inst.cond = saved_cond;
7982
7983 /* Iterate through all opcode entries with the same mnemonic name. */
7984 do
7985 {
7986 opcode = template->opcode;
7987
7988 DEBUG_TRACE ("opcode %s found", opcode->name);
7989 #ifdef DEBUG_AARCH64
7990 if (debug_dump)
7991 dump_opcode_operands (opcode);
7992 #endif /* DEBUG_AARCH64 */
7993
7994 mapping_state (MAP_INSN);
7995
7996 inst_base = &inst.base;
7997 inst_base->opcode = opcode;
7998
7999 /* Truly conditionally executed instructions, e.g. b.cond. */
8000 if (opcode->flags & F_COND)
8001 {
8002 gas_assert (inst.cond != COND_ALWAYS);
8003 inst_base->cond = get_cond_from_value (inst.cond);
8004 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8005 }
8006 else if (inst.cond != COND_ALWAYS)
8007 {
8008 /* It shouldn't arrive here, where the assembly looks like a
8009 conditional instruction but the found opcode is unconditional. */
8010 gas_assert (0);
8011 continue;
8012 }
8013
8014 if (parse_operands (p, opcode)
8015 && programmer_friendly_fixup (&inst)
8016 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8017 {
8018 /* Check that this instruction is supported for this CPU. */
8019 if (!opcode->avariant
8020 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
8021 {
8022 as_bad (_("selected processor does not support `%s'"), str);
8023 return;
8024 }
8025
8026 warn_unpredictable_ldst (&inst, str);
8027
8028 if (inst.reloc.type == BFD_RELOC_UNUSED
8029 || !inst.reloc.need_libopcodes_p)
8030 output_inst (NULL);
8031 else
8032 {
8033 /* If there is relocation generated for the instruction,
8034 store the instruction information for the future fix-up. */
8035 struct aarch64_inst *copy;
8036 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8037 copy = XNEW (struct aarch64_inst);
8038 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8039 output_inst (copy);
8040 }
8041
8042 /* Issue non-fatal messages if any. */
8043 output_operand_error_report (str, true);
8044 return;
8045 }
8046
8047 template = template->next;
8048 if (template != NULL)
8049 {
8050 reset_aarch64_instruction (&inst);
8051 inst.cond = saved_cond;
8052 }
8053 }
8054 while (template != NULL);
8055
8056 /* Issue the error messages if any. */
8057 output_operand_error_report (str, false);
8058 }
8059
8060 /* Various frobbings of labels and their addresses. */
8061
8062 void
8063 aarch64_start_line_hook (void)
8064 {
8065 last_label_seen = NULL;
8066 }
8067
8068 void
8069 aarch64_frob_label (symbolS * sym)
8070 {
8071 last_label_seen = sym;
8072
8073 dwarf2_emit_label (sym);
8074 }
8075
8076 void
8077 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8078 {
8079 /* Check to see if we have a block to close. */
8080 force_automatic_sequence_close ();
8081 }
8082
8083 int
8084 aarch64_data_in_code (void)
8085 {
8086 if (startswith (input_line_pointer + 1, "data:"))
8087 {
8088 *input_line_pointer = '/';
8089 input_line_pointer += 5;
8090 *input_line_pointer = 0;
8091 return 1;
8092 }
8093
8094 return 0;
8095 }
8096
8097 char *
8098 aarch64_canonicalize_symbol_name (char *name)
8099 {
8100 int len;
8101
8102 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8103 *(name + len - 5) = 0;
8104
8105 return name;
8106 }
8107 \f
8108 /* Table of all register names defined by default. The user can
8109 define additional names with .req. Note that all register names
8110 should appear in both upper and lowercase variants. Some registers
8111 also have mixed-case names. */
8112
8113 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8114 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8115 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8116 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8117 #define REGSET16(p,t) \
8118 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8119 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8120 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8121 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8122 #define REGSET16S(p,s,t) \
8123 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8124 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8125 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8126 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8127 #define REGSET31(p,t) \
8128 REGSET16(p, t), \
8129 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8130 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8131 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8132 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8133 #define REGSET(p,t) \
8134 REGSET31(p,t), REGNUM(p,31,t)
8135
8136 /* These go into aarch64_reg_hsh hash-table. */
8137 static const reg_entry reg_names[] = {
8138 /* Integer registers. */
8139 REGSET31 (x, R_64), REGSET31 (X, R_64),
8140 REGSET31 (w, R_32), REGSET31 (W, R_32),
8141
8142 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8143 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8144 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8145 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8146 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8147 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8148
8149 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8150 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8151
8152 /* Floating-point single precision registers. */
8153 REGSET (s, FP_S), REGSET (S, FP_S),
8154
8155 /* Floating-point double precision registers. */
8156 REGSET (d, FP_D), REGSET (D, FP_D),
8157
8158 /* Floating-point half precision registers. */
8159 REGSET (h, FP_H), REGSET (H, FP_H),
8160
8161 /* Floating-point byte precision registers. */
8162 REGSET (b, FP_B), REGSET (B, FP_B),
8163
8164 /* Floating-point quad precision registers. */
8165 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8166
8167 /* FP/SIMD registers. */
8168 REGSET (v, VN), REGSET (V, VN),
8169
8170 /* SVE vector registers. */
8171 REGSET (z, ZN), REGSET (Z, ZN),
8172
8173 /* SVE predicate registers. */
8174 REGSET16 (p, PN), REGSET16 (P, PN),
8175
8176 /* SME ZA tile registers. */
8177 REGSET16 (za, ZA), REGSET16 (ZA, ZA),
8178
8179 /* SME ZA tile registers (horizontal slice). */
8180 REGSET16S (za, h, ZAH), REGSET16S (ZA, H, ZAH),
8181
8182 /* SME ZA tile registers (vertical slice). */
8183 REGSET16S (za, v, ZAV), REGSET16S (ZA, V, ZAV)
8184 };
8185
8186 #undef REGDEF
8187 #undef REGDEF_ALIAS
8188 #undef REGNUM
8189 #undef REGSET16
8190 #undef REGSET31
8191 #undef REGSET
8192
8193 #define N 1
8194 #define n 0
8195 #define Z 1
8196 #define z 0
8197 #define C 1
8198 #define c 0
8199 #define V 1
8200 #define v 0
8201 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8202 static const asm_nzcv nzcv_names[] = {
8203 {"nzcv", B (n, z, c, v)},
8204 {"nzcV", B (n, z, c, V)},
8205 {"nzCv", B (n, z, C, v)},
8206 {"nzCV", B (n, z, C, V)},
8207 {"nZcv", B (n, Z, c, v)},
8208 {"nZcV", B (n, Z, c, V)},
8209 {"nZCv", B (n, Z, C, v)},
8210 {"nZCV", B (n, Z, C, V)},
8211 {"Nzcv", B (N, z, c, v)},
8212 {"NzcV", B (N, z, c, V)},
8213 {"NzCv", B (N, z, C, v)},
8214 {"NzCV", B (N, z, C, V)},
8215 {"NZcv", B (N, Z, c, v)},
8216 {"NZcV", B (N, Z, c, V)},
8217 {"NZCv", B (N, Z, C, v)},
8218 {"NZCV", B (N, Z, C, V)}
8219 };
8220
8221 #undef N
8222 #undef n
8223 #undef Z
8224 #undef z
8225 #undef C
8226 #undef c
8227 #undef V
8228 #undef v
8229 #undef B
8230 \f
8231 /* MD interface: bits in the object file. */
8232
8233 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8234 for use in the a.out file, and stores them in the array pointed to by buf.
8235 This knows about the endian-ness of the target machine and does
8236 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8237 2 (short) and 4 (long) Floating numbers are put out as a series of
8238 LITTLENUMS (shorts, here at least). */
8239
8240 void
8241 md_number_to_chars (char *buf, valueT val, int n)
8242 {
8243 if (target_big_endian)
8244 number_to_chars_bigendian (buf, val, n);
8245 else
8246 number_to_chars_littleendian (buf, val, n);
8247 }
8248
8249 /* MD interface: Sections. */
8250
8251 /* Estimate the size of a frag before relaxing. Assume everything fits in
8252 4 bytes. */
8253
8254 int
8255 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8256 {
8257 fragp->fr_var = 4;
8258 return 4;
8259 }
8260
8261 /* Round up a section size to the appropriate boundary. */
8262
8263 valueT
8264 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8265 {
8266 return size;
8267 }
8268
8269 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8270 of an rs_align_code fragment.
8271
8272 Here we fill the frag with the appropriate info for padding the
8273 output stream. The resulting frag will consist of a fixed (fr_fix)
8274 and of a repeating (fr_var) part.
8275
8276 The fixed content is always emitted before the repeating content and
8277 these two parts are used as follows in constructing the output:
8278 - the fixed part will be used to align to a valid instruction word
8279 boundary, in case that we start at a misaligned address; as no
8280 executable instruction can live at the misaligned location, we
8281 simply fill with zeros;
8282 - the variable part will be used to cover the remaining padding and
8283 we fill using the AArch64 NOP instruction.
8284
8285 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8286 enough storage space for up to 3 bytes for padding the back to a valid
8287 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8288
8289 void
8290 aarch64_handle_align (fragS * fragP)
8291 {
8292 /* NOP = d503201f */
8293 /* AArch64 instructions are always little-endian. */
8294 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8295
8296 int bytes, fix, noop_size;
8297 char *p;
8298
8299 if (fragP->fr_type != rs_align_code)
8300 return;
8301
8302 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8303 p = fragP->fr_literal + fragP->fr_fix;
8304
8305 #ifdef OBJ_ELF
8306 gas_assert (fragP->tc_frag_data.recorded);
8307 #endif
8308
8309 noop_size = sizeof (aarch64_noop);
8310
8311 fix = bytes & (noop_size - 1);
8312 if (fix)
8313 {
8314 #ifdef OBJ_ELF
8315 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8316 #endif
8317 memset (p, 0, fix);
8318 p += fix;
8319 fragP->fr_fix += fix;
8320 }
8321
8322 if (noop_size)
8323 memcpy (p, aarch64_noop, noop_size);
8324 fragP->fr_var = noop_size;
8325 }
8326
8327 /* Perform target specific initialisation of a frag.
8328 Note - despite the name this initialisation is not done when the frag
8329 is created, but only when its type is assigned. A frag can be created
8330 and used a long time before its type is set, so beware of assuming that
8331 this initialisation is performed first. */
8332
8333 #ifndef OBJ_ELF
8334 void
8335 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8336 int max_chars ATTRIBUTE_UNUSED)
8337 {
8338 }
8339
8340 #else /* OBJ_ELF is defined. */
8341 void
8342 aarch64_init_frag (fragS * fragP, int max_chars)
8343 {
8344 /* Record a mapping symbol for alignment frags. We will delete this
8345 later if the alignment ends up empty. */
8346 if (!fragP->tc_frag_data.recorded)
8347 fragP->tc_frag_data.recorded = 1;
8348
8349 /* PR 21809: Do not set a mapping state for debug sections
8350 - it just confuses other tools. */
8351 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8352 return;
8353
8354 switch (fragP->fr_type)
8355 {
8356 case rs_align_test:
8357 case rs_fill:
8358 mapping_state_2 (MAP_DATA, max_chars);
8359 break;
8360 case rs_align:
8361 /* PR 20364: We can get alignment frags in code sections,
8362 so do not just assume that we should use the MAP_DATA state. */
8363 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8364 break;
8365 case rs_align_code:
8366 mapping_state_2 (MAP_INSN, max_chars);
8367 break;
8368 default:
8369 break;
8370 }
8371 }
8372 \f
8373 /* Initialize the DWARF-2 unwind information for this procedure. */
8374
8375 void
8376 tc_aarch64_frame_initial_instructions (void)
8377 {
8378 cfi_add_CFA_def_cfa (REG_SP, 0);
8379 }
8380 #endif /* OBJ_ELF */
8381
8382 /* Convert REGNAME to a DWARF-2 register number. */
8383
8384 int
8385 tc_aarch64_regname_to_dw2regnum (char *regname)
8386 {
8387 const reg_entry *reg = parse_reg (&regname);
8388 if (reg == NULL)
8389 return -1;
8390
8391 switch (reg->type)
8392 {
8393 case REG_TYPE_SP_32:
8394 case REG_TYPE_SP_64:
8395 case REG_TYPE_R_32:
8396 case REG_TYPE_R_64:
8397 return reg->number;
8398
8399 case REG_TYPE_FP_B:
8400 case REG_TYPE_FP_H:
8401 case REG_TYPE_FP_S:
8402 case REG_TYPE_FP_D:
8403 case REG_TYPE_FP_Q:
8404 return reg->number + 64;
8405
8406 default:
8407 break;
8408 }
8409 return -1;
8410 }
8411
8412 /* Implement DWARF2_ADDR_SIZE. */
8413
8414 int
8415 aarch64_dwarf2_addr_size (void)
8416 {
8417 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8418 if (ilp32_p)
8419 return 4;
8420 #endif
8421 return bfd_arch_bits_per_address (stdoutput) / 8;
8422 }
8423
8424 /* MD interface: Symbol and relocation handling. */
8425
8426 /* Return the address within the segment that a PC-relative fixup is
8427 relative to. For AArch64 PC-relative fixups applied to instructions
8428 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8429
8430 long
8431 md_pcrel_from_section (fixS * fixP, segT seg)
8432 {
8433 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8434
8435 /* If this is pc-relative and we are going to emit a relocation
8436 then we just want to put out any pipeline compensation that the linker
8437 will need. Otherwise we want to use the calculated base. */
8438 if (fixP->fx_pcrel
8439 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8440 || aarch64_force_relocation (fixP)))
8441 base = 0;
8442
8443 /* AArch64 should be consistent for all pc-relative relocations. */
8444 return base + AARCH64_PCREL_OFFSET;
8445 }
8446
8447 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8448 Otherwise we have no need to default values of symbols. */
8449
8450 symbolS *
8451 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8452 {
8453 #ifdef OBJ_ELF
8454 if (name[0] == '_' && name[1] == 'G'
8455 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8456 {
8457 if (!GOT_symbol)
8458 {
8459 if (symbol_find (name))
8460 as_bad (_("GOT already in the symbol table"));
8461
8462 GOT_symbol = symbol_new (name, undefined_section,
8463 &zero_address_frag, 0);
8464 }
8465
8466 return GOT_symbol;
8467 }
8468 #endif
8469
8470 return 0;
8471 }
8472
8473 /* Return non-zero if the indicated VALUE has overflowed the maximum
8474 range expressible by a unsigned number with the indicated number of
8475 BITS. */
8476
8477 static bool
8478 unsigned_overflow (valueT value, unsigned bits)
8479 {
8480 valueT lim;
8481 if (bits >= sizeof (valueT) * 8)
8482 return false;
8483 lim = (valueT) 1 << bits;
8484 return (value >= lim);
8485 }
8486
8487
8488 /* Return non-zero if the indicated VALUE has overflowed the maximum
8489 range expressible by an signed number with the indicated number of
8490 BITS. */
8491
8492 static bool
8493 signed_overflow (offsetT value, unsigned bits)
8494 {
8495 offsetT lim;
8496 if (bits >= sizeof (offsetT) * 8)
8497 return false;
8498 lim = (offsetT) 1 << (bits - 1);
8499 return (value < -lim || value >= lim);
8500 }
8501
8502 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8503 unsigned immediate offset load/store instruction, try to encode it as
8504 an unscaled, 9-bit, signed immediate offset load/store instruction.
8505 Return TRUE if it is successful; otherwise return FALSE.
8506
8507 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8508 in response to the standard LDR/STR mnemonics when the immediate offset is
8509 unambiguous, i.e. when it is negative or unaligned. */
8510
8511 static bool
8512 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8513 {
8514 int idx;
8515 enum aarch64_op new_op;
8516 const aarch64_opcode *new_opcode;
8517
8518 gas_assert (instr->opcode->iclass == ldst_pos);
8519
8520 switch (instr->opcode->op)
8521 {
8522 case OP_LDRB_POS:new_op = OP_LDURB; break;
8523 case OP_STRB_POS: new_op = OP_STURB; break;
8524 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8525 case OP_LDRH_POS: new_op = OP_LDURH; break;
8526 case OP_STRH_POS: new_op = OP_STURH; break;
8527 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8528 case OP_LDR_POS: new_op = OP_LDUR; break;
8529 case OP_STR_POS: new_op = OP_STUR; break;
8530 case OP_LDRF_POS: new_op = OP_LDURV; break;
8531 case OP_STRF_POS: new_op = OP_STURV; break;
8532 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8533 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8534 default: new_op = OP_NIL; break;
8535 }
8536
8537 if (new_op == OP_NIL)
8538 return false;
8539
8540 new_opcode = aarch64_get_opcode (new_op);
8541 gas_assert (new_opcode != NULL);
8542
8543 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8544 instr->opcode->op, new_opcode->op);
8545
8546 aarch64_replace_opcode (instr, new_opcode);
8547
8548 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8549 qualifier matching may fail because the out-of-date qualifier will
8550 prevent the operand being updated with a new and correct qualifier. */
8551 idx = aarch64_operand_index (instr->opcode->operands,
8552 AARCH64_OPND_ADDR_SIMM9);
8553 gas_assert (idx == 1);
8554 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8555
8556 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8557
8558 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8559 insn_sequence))
8560 return false;
8561
8562 return true;
8563 }
8564
8565 /* Called by fix_insn to fix a MOV immediate alias instruction.
8566
8567 Operand for a generic move immediate instruction, which is an alias
8568 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8569 a 32-bit/64-bit immediate value into general register. An assembler error
8570 shall result if the immediate cannot be created by a single one of these
8571 instructions. If there is a choice, then to ensure reversability an
8572 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8573
8574 static void
8575 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8576 {
8577 const aarch64_opcode *opcode;
8578
8579 /* Need to check if the destination is SP/ZR. The check has to be done
8580 before any aarch64_replace_opcode. */
8581 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8582 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8583
8584 instr->operands[1].imm.value = value;
8585 instr->operands[1].skip = 0;
8586
8587 if (try_mov_wide_p)
8588 {
8589 /* Try the MOVZ alias. */
8590 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8591 aarch64_replace_opcode (instr, opcode);
8592 if (aarch64_opcode_encode (instr->opcode, instr,
8593 &instr->value, NULL, NULL, insn_sequence))
8594 {
8595 put_aarch64_insn (buf, instr->value);
8596 return;
8597 }
8598 /* Try the MOVK alias. */
8599 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8600 aarch64_replace_opcode (instr, opcode);
8601 if (aarch64_opcode_encode (instr->opcode, instr,
8602 &instr->value, NULL, NULL, insn_sequence))
8603 {
8604 put_aarch64_insn (buf, instr->value);
8605 return;
8606 }
8607 }
8608
8609 if (try_mov_bitmask_p)
8610 {
8611 /* Try the ORR alias. */
8612 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8613 aarch64_replace_opcode (instr, opcode);
8614 if (aarch64_opcode_encode (instr->opcode, instr,
8615 &instr->value, NULL, NULL, insn_sequence))
8616 {
8617 put_aarch64_insn (buf, instr->value);
8618 return;
8619 }
8620 }
8621
8622 as_bad_where (fixP->fx_file, fixP->fx_line,
8623 _("immediate cannot be moved by a single instruction"));
8624 }
8625
8626 /* An instruction operand which is immediate related may have symbol used
8627 in the assembly, e.g.
8628
8629 mov w0, u32
8630 .set u32, 0x00ffff00
8631
8632 At the time when the assembly instruction is parsed, a referenced symbol,
8633 like 'u32' in the above example may not have been seen; a fixS is created
8634 in such a case and is handled here after symbols have been resolved.
8635 Instruction is fixed up with VALUE using the information in *FIXP plus
8636 extra information in FLAGS.
8637
8638 This function is called by md_apply_fix to fix up instructions that need
8639 a fix-up described above but does not involve any linker-time relocation. */
8640
8641 static void
8642 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8643 {
8644 int idx;
8645 uint32_t insn;
8646 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8647 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8648 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8649
8650 if (new_inst)
8651 {
8652 /* Now the instruction is about to be fixed-up, so the operand that
8653 was previously marked as 'ignored' needs to be unmarked in order
8654 to get the encoding done properly. */
8655 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8656 new_inst->operands[idx].skip = 0;
8657 }
8658
8659 gas_assert (opnd != AARCH64_OPND_NIL);
8660
8661 switch (opnd)
8662 {
8663 case AARCH64_OPND_EXCEPTION:
8664 case AARCH64_OPND_UNDEFINED:
8665 if (unsigned_overflow (value, 16))
8666 as_bad_where (fixP->fx_file, fixP->fx_line,
8667 _("immediate out of range"));
8668 insn = get_aarch64_insn (buf);
8669 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8670 put_aarch64_insn (buf, insn);
8671 break;
8672
8673 case AARCH64_OPND_AIMM:
8674 /* ADD or SUB with immediate.
8675 NOTE this assumes we come here with a add/sub shifted reg encoding
8676 3 322|2222|2 2 2 21111 111111
8677 1 098|7654|3 2 1 09876 543210 98765 43210
8678 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8679 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8680 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8681 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8682 ->
8683 3 322|2222|2 2 221111111111
8684 1 098|7654|3 2 109876543210 98765 43210
8685 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8686 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8687 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8688 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8689 Fields sf Rn Rd are already set. */
8690 insn = get_aarch64_insn (buf);
8691 if (value < 0)
8692 {
8693 /* Add <-> sub. */
8694 insn = reencode_addsub_switch_add_sub (insn);
8695 value = -value;
8696 }
8697
8698 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8699 && unsigned_overflow (value, 12))
8700 {
8701 /* Try to shift the value by 12 to make it fit. */
8702 if (((value >> 12) << 12) == value
8703 && ! unsigned_overflow (value, 12 + 12))
8704 {
8705 value >>= 12;
8706 insn |= encode_addsub_imm_shift_amount (1);
8707 }
8708 }
8709
8710 if (unsigned_overflow (value, 12))
8711 as_bad_where (fixP->fx_file, fixP->fx_line,
8712 _("immediate out of range"));
8713
8714 insn |= encode_addsub_imm (value);
8715
8716 put_aarch64_insn (buf, insn);
8717 break;
8718
8719 case AARCH64_OPND_SIMD_IMM:
8720 case AARCH64_OPND_SIMD_IMM_SFT:
8721 case AARCH64_OPND_LIMM:
8722 /* Bit mask immediate. */
8723 gas_assert (new_inst != NULL);
8724 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8725 new_inst->operands[idx].imm.value = value;
8726 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8727 &new_inst->value, NULL, NULL, insn_sequence))
8728 put_aarch64_insn (buf, new_inst->value);
8729 else
8730 as_bad_where (fixP->fx_file, fixP->fx_line,
8731 _("invalid immediate"));
8732 break;
8733
8734 case AARCH64_OPND_HALF:
8735 /* 16-bit unsigned immediate. */
8736 if (unsigned_overflow (value, 16))
8737 as_bad_where (fixP->fx_file, fixP->fx_line,
8738 _("immediate out of range"));
8739 insn = get_aarch64_insn (buf);
8740 insn |= encode_movw_imm (value & 0xffff);
8741 put_aarch64_insn (buf, insn);
8742 break;
8743
8744 case AARCH64_OPND_IMM_MOV:
8745 /* Operand for a generic move immediate instruction, which is
8746 an alias instruction that generates a single MOVZ, MOVN or ORR
8747 instruction to loads a 32-bit/64-bit immediate value into general
8748 register. An assembler error shall result if the immediate cannot be
8749 created by a single one of these instructions. If there is a choice,
8750 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8751 and MOVZ or MOVN to ORR. */
8752 gas_assert (new_inst != NULL);
8753 fix_mov_imm_insn (fixP, buf, new_inst, value);
8754 break;
8755
8756 case AARCH64_OPND_ADDR_SIMM7:
8757 case AARCH64_OPND_ADDR_SIMM9:
8758 case AARCH64_OPND_ADDR_SIMM9_2:
8759 case AARCH64_OPND_ADDR_SIMM10:
8760 case AARCH64_OPND_ADDR_UIMM12:
8761 case AARCH64_OPND_ADDR_SIMM11:
8762 case AARCH64_OPND_ADDR_SIMM13:
8763 /* Immediate offset in an address. */
8764 insn = get_aarch64_insn (buf);
8765
8766 gas_assert (new_inst != NULL && new_inst->value == insn);
8767 gas_assert (new_inst->opcode->operands[1] == opnd
8768 || new_inst->opcode->operands[2] == opnd);
8769
8770 /* Get the index of the address operand. */
8771 if (new_inst->opcode->operands[1] == opnd)
8772 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8773 idx = 1;
8774 else
8775 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8776 idx = 2;
8777
8778 /* Update the resolved offset value. */
8779 new_inst->operands[idx].addr.offset.imm = value;
8780
8781 /* Encode/fix-up. */
8782 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8783 &new_inst->value, NULL, NULL, insn_sequence))
8784 {
8785 put_aarch64_insn (buf, new_inst->value);
8786 break;
8787 }
8788 else if (new_inst->opcode->iclass == ldst_pos
8789 && try_to_encode_as_unscaled_ldst (new_inst))
8790 {
8791 put_aarch64_insn (buf, new_inst->value);
8792 break;
8793 }
8794
8795 as_bad_where (fixP->fx_file, fixP->fx_line,
8796 _("immediate offset out of range"));
8797 break;
8798
8799 default:
8800 gas_assert (0);
8801 as_fatal (_("unhandled operand code %d"), opnd);
8802 }
8803 }
8804
8805 /* Apply a fixup (fixP) to segment data, once it has been determined
8806 by our caller that we have all the info we need to fix it up.
8807
8808 Parameter valP is the pointer to the value of the bits. */
8809
8810 void
8811 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8812 {
8813 offsetT value = *valP;
8814 uint32_t insn;
8815 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8816 int scale;
8817 unsigned flags = fixP->fx_addnumber;
8818
8819 DEBUG_TRACE ("\n\n");
8820 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8821 DEBUG_TRACE ("Enter md_apply_fix");
8822
8823 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8824
8825 /* Note whether this will delete the relocation. */
8826
8827 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
8828 fixP->fx_done = 1;
8829
8830 /* Process the relocations. */
8831 switch (fixP->fx_r_type)
8832 {
8833 case BFD_RELOC_NONE:
8834 /* This will need to go in the object file. */
8835 fixP->fx_done = 0;
8836 break;
8837
8838 case BFD_RELOC_8:
8839 case BFD_RELOC_8_PCREL:
8840 if (fixP->fx_done || !seg->use_rela_p)
8841 md_number_to_chars (buf, value, 1);
8842 break;
8843
8844 case BFD_RELOC_16:
8845 case BFD_RELOC_16_PCREL:
8846 if (fixP->fx_done || !seg->use_rela_p)
8847 md_number_to_chars (buf, value, 2);
8848 break;
8849
8850 case BFD_RELOC_32:
8851 case BFD_RELOC_32_PCREL:
8852 if (fixP->fx_done || !seg->use_rela_p)
8853 md_number_to_chars (buf, value, 4);
8854 break;
8855
8856 case BFD_RELOC_64:
8857 case BFD_RELOC_64_PCREL:
8858 if (fixP->fx_done || !seg->use_rela_p)
8859 md_number_to_chars (buf, value, 8);
8860 break;
8861
8862 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8863 /* We claim that these fixups have been processed here, even if
8864 in fact we generate an error because we do not have a reloc
8865 for them, so tc_gen_reloc() will reject them. */
8866 fixP->fx_done = 1;
8867 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8868 {
8869 as_bad_where (fixP->fx_file, fixP->fx_line,
8870 _("undefined symbol %s used as an immediate value"),
8871 S_GET_NAME (fixP->fx_addsy));
8872 goto apply_fix_return;
8873 }
8874 fix_insn (fixP, flags, value);
8875 break;
8876
8877 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8878 if (fixP->fx_done || !seg->use_rela_p)
8879 {
8880 if (value & 3)
8881 as_bad_where (fixP->fx_file, fixP->fx_line,
8882 _("pc-relative load offset not word aligned"));
8883 if (signed_overflow (value, 21))
8884 as_bad_where (fixP->fx_file, fixP->fx_line,
8885 _("pc-relative load offset out of range"));
8886 insn = get_aarch64_insn (buf);
8887 insn |= encode_ld_lit_ofs_19 (value >> 2);
8888 put_aarch64_insn (buf, insn);
8889 }
8890 break;
8891
8892 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8893 if (fixP->fx_done || !seg->use_rela_p)
8894 {
8895 if (signed_overflow (value, 21))
8896 as_bad_where (fixP->fx_file, fixP->fx_line,
8897 _("pc-relative address offset out of range"));
8898 insn = get_aarch64_insn (buf);
8899 insn |= encode_adr_imm (value);
8900 put_aarch64_insn (buf, insn);
8901 }
8902 break;
8903
8904 case BFD_RELOC_AARCH64_BRANCH19:
8905 if (fixP->fx_done || !seg->use_rela_p)
8906 {
8907 if (value & 3)
8908 as_bad_where (fixP->fx_file, fixP->fx_line,
8909 _("conditional branch target not word aligned"));
8910 if (signed_overflow (value, 21))
8911 as_bad_where (fixP->fx_file, fixP->fx_line,
8912 _("conditional branch out of range"));
8913 insn = get_aarch64_insn (buf);
8914 insn |= encode_cond_branch_ofs_19 (value >> 2);
8915 put_aarch64_insn (buf, insn);
8916 }
8917 break;
8918
8919 case BFD_RELOC_AARCH64_TSTBR14:
8920 if (fixP->fx_done || !seg->use_rela_p)
8921 {
8922 if (value & 3)
8923 as_bad_where (fixP->fx_file, fixP->fx_line,
8924 _("conditional branch target not word aligned"));
8925 if (signed_overflow (value, 16))
8926 as_bad_where (fixP->fx_file, fixP->fx_line,
8927 _("conditional branch out of range"));
8928 insn = get_aarch64_insn (buf);
8929 insn |= encode_tst_branch_ofs_14 (value >> 2);
8930 put_aarch64_insn (buf, insn);
8931 }
8932 break;
8933
8934 case BFD_RELOC_AARCH64_CALL26:
8935 case BFD_RELOC_AARCH64_JUMP26:
8936 if (fixP->fx_done || !seg->use_rela_p)
8937 {
8938 if (value & 3)
8939 as_bad_where (fixP->fx_file, fixP->fx_line,
8940 _("branch target not word aligned"));
8941 if (signed_overflow (value, 28))
8942 as_bad_where (fixP->fx_file, fixP->fx_line,
8943 _("branch out of range"));
8944 insn = get_aarch64_insn (buf);
8945 insn |= encode_branch_ofs_26 (value >> 2);
8946 put_aarch64_insn (buf, insn);
8947 }
8948 break;
8949
8950 case BFD_RELOC_AARCH64_MOVW_G0:
8951 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8952 case BFD_RELOC_AARCH64_MOVW_G0_S:
8953 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8954 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8955 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8956 scale = 0;
8957 goto movw_common;
8958 case BFD_RELOC_AARCH64_MOVW_G1:
8959 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8960 case BFD_RELOC_AARCH64_MOVW_G1_S:
8961 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8962 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8963 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8964 scale = 16;
8965 goto movw_common;
8966 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8967 scale = 0;
8968 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8969 /* Should always be exported to object file, see
8970 aarch64_force_relocation(). */
8971 gas_assert (!fixP->fx_done);
8972 gas_assert (seg->use_rela_p);
8973 goto movw_common;
8974 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8975 scale = 16;
8976 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8977 /* Should always be exported to object file, see
8978 aarch64_force_relocation(). */
8979 gas_assert (!fixP->fx_done);
8980 gas_assert (seg->use_rela_p);
8981 goto movw_common;
8982 case BFD_RELOC_AARCH64_MOVW_G2:
8983 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8984 case BFD_RELOC_AARCH64_MOVW_G2_S:
8985 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8986 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8987 scale = 32;
8988 goto movw_common;
8989 case BFD_RELOC_AARCH64_MOVW_G3:
8990 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8991 scale = 48;
8992 movw_common:
8993 if (fixP->fx_done || !seg->use_rela_p)
8994 {
8995 insn = get_aarch64_insn (buf);
8996
8997 if (!fixP->fx_done)
8998 {
8999 /* REL signed addend must fit in 16 bits */
9000 if (signed_overflow (value, 16))
9001 as_bad_where (fixP->fx_file, fixP->fx_line,
9002 _("offset out of range"));
9003 }
9004 else
9005 {
9006 /* Check for overflow and scale. */
9007 switch (fixP->fx_r_type)
9008 {
9009 case BFD_RELOC_AARCH64_MOVW_G0:
9010 case BFD_RELOC_AARCH64_MOVW_G1:
9011 case BFD_RELOC_AARCH64_MOVW_G2:
9012 case BFD_RELOC_AARCH64_MOVW_G3:
9013 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9014 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9015 if (unsigned_overflow (value, scale + 16))
9016 as_bad_where (fixP->fx_file, fixP->fx_line,
9017 _("unsigned value out of range"));
9018 break;
9019 case BFD_RELOC_AARCH64_MOVW_G0_S:
9020 case BFD_RELOC_AARCH64_MOVW_G1_S:
9021 case BFD_RELOC_AARCH64_MOVW_G2_S:
9022 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9023 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9024 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9025 /* NOTE: We can only come here with movz or movn. */
9026 if (signed_overflow (value, scale + 16))
9027 as_bad_where (fixP->fx_file, fixP->fx_line,
9028 _("signed value out of range"));
9029 if (value < 0)
9030 {
9031 /* Force use of MOVN. */
9032 value = ~value;
9033 insn = reencode_movzn_to_movn (insn);
9034 }
9035 else
9036 {
9037 /* Force use of MOVZ. */
9038 insn = reencode_movzn_to_movz (insn);
9039 }
9040 break;
9041 default:
9042 /* Unchecked relocations. */
9043 break;
9044 }
9045 value >>= scale;
9046 }
9047
9048 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9049 insn |= encode_movw_imm (value & 0xffff);
9050
9051 put_aarch64_insn (buf, insn);
9052 }
9053 break;
9054
9055 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9056 fixP->fx_r_type = (ilp32_p
9057 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9058 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9059 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9060 /* Should always be exported to object file, see
9061 aarch64_force_relocation(). */
9062 gas_assert (!fixP->fx_done);
9063 gas_assert (seg->use_rela_p);
9064 break;
9065
9066 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9067 fixP->fx_r_type = (ilp32_p
9068 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9069 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9070 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9071 /* Should always be exported to object file, see
9072 aarch64_force_relocation(). */
9073 gas_assert (!fixP->fx_done);
9074 gas_assert (seg->use_rela_p);
9075 break;
9076
9077 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9078 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9079 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9080 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9081 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9082 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9083 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9084 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9085 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9086 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9087 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9088 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9089 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9090 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9091 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9092 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9093 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9094 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9095 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9096 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9097 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9098 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9099 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9100 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9101 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9102 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9103 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9104 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9105 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9106 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9107 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9108 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9109 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9110 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9111 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9112 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9113 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9114 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9115 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9116 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9117 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9118 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9119 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9120 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9121 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9122 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9123 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9124 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9125 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9126 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9127 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9128 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9129 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9130 /* Should always be exported to object file, see
9131 aarch64_force_relocation(). */
9132 gas_assert (!fixP->fx_done);
9133 gas_assert (seg->use_rela_p);
9134 break;
9135
9136 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9137 /* Should always be exported to object file, see
9138 aarch64_force_relocation(). */
9139 fixP->fx_r_type = (ilp32_p
9140 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9141 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9142 gas_assert (!fixP->fx_done);
9143 gas_assert (seg->use_rela_p);
9144 break;
9145
9146 case BFD_RELOC_AARCH64_ADD_LO12:
9147 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9148 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9149 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9150 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9151 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9152 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9153 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9154 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9155 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9156 case BFD_RELOC_AARCH64_LDST128_LO12:
9157 case BFD_RELOC_AARCH64_LDST16_LO12:
9158 case BFD_RELOC_AARCH64_LDST32_LO12:
9159 case BFD_RELOC_AARCH64_LDST64_LO12:
9160 case BFD_RELOC_AARCH64_LDST8_LO12:
9161 /* Should always be exported to object file, see
9162 aarch64_force_relocation(). */
9163 gas_assert (!fixP->fx_done);
9164 gas_assert (seg->use_rela_p);
9165 break;
9166
9167 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9168 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9169 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9170 break;
9171
9172 case BFD_RELOC_UNUSED:
9173 /* An error will already have been reported. */
9174 break;
9175
9176 default:
9177 as_bad_where (fixP->fx_file, fixP->fx_line,
9178 _("unexpected %s fixup"),
9179 bfd_get_reloc_code_name (fixP->fx_r_type));
9180 break;
9181 }
9182
9183 apply_fix_return:
9184 /* Free the allocated the struct aarch64_inst.
9185 N.B. currently there are very limited number of fix-up types actually use
9186 this field, so the impact on the performance should be minimal . */
9187 free (fixP->tc_fix_data.inst);
9188
9189 return;
9190 }
9191
9192 /* Translate internal representation of relocation info to BFD target
9193 format. */
9194
9195 arelent *
9196 tc_gen_reloc (asection * section, fixS * fixp)
9197 {
9198 arelent *reloc;
9199 bfd_reloc_code_real_type code;
9200
9201 reloc = XNEW (arelent);
9202
9203 reloc->sym_ptr_ptr = XNEW (asymbol *);
9204 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9205 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9206
9207 if (fixp->fx_pcrel)
9208 {
9209 if (section->use_rela_p)
9210 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9211 else
9212 fixp->fx_offset = reloc->address;
9213 }
9214 reloc->addend = fixp->fx_offset;
9215
9216 code = fixp->fx_r_type;
9217 switch (code)
9218 {
9219 case BFD_RELOC_16:
9220 if (fixp->fx_pcrel)
9221 code = BFD_RELOC_16_PCREL;
9222 break;
9223
9224 case BFD_RELOC_32:
9225 if (fixp->fx_pcrel)
9226 code = BFD_RELOC_32_PCREL;
9227 break;
9228
9229 case BFD_RELOC_64:
9230 if (fixp->fx_pcrel)
9231 code = BFD_RELOC_64_PCREL;
9232 break;
9233
9234 default:
9235 break;
9236 }
9237
9238 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9239 if (reloc->howto == NULL)
9240 {
9241 as_bad_where (fixp->fx_file, fixp->fx_line,
9242 _
9243 ("cannot represent %s relocation in this object file format"),
9244 bfd_get_reloc_code_name (code));
9245 return NULL;
9246 }
9247
9248 return reloc;
9249 }
9250
9251 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9252
9253 void
9254 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9255 {
9256 bfd_reloc_code_real_type type;
9257 int pcrel = 0;
9258
9259 /* Pick a reloc.
9260 FIXME: @@ Should look at CPU word size. */
9261 switch (size)
9262 {
9263 case 1:
9264 type = BFD_RELOC_8;
9265 break;
9266 case 2:
9267 type = BFD_RELOC_16;
9268 break;
9269 case 4:
9270 type = BFD_RELOC_32;
9271 break;
9272 case 8:
9273 type = BFD_RELOC_64;
9274 break;
9275 default:
9276 as_bad (_("cannot do %u-byte relocation"), size);
9277 type = BFD_RELOC_UNUSED;
9278 break;
9279 }
9280
9281 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9282 }
9283
9284 #ifdef OBJ_ELF
9285
9286 /* Implement md_after_parse_args. This is the earliest time we need to decide
9287 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9288
9289 void
9290 aarch64_after_parse_args (void)
9291 {
9292 if (aarch64_abi != AARCH64_ABI_NONE)
9293 return;
9294
9295 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9296 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9297 aarch64_abi = AARCH64_ABI_ILP32;
9298 else
9299 aarch64_abi = AARCH64_ABI_LP64;
9300 }
9301
9302 const char *
9303 elf64_aarch64_target_format (void)
9304 {
9305 #ifdef TE_CLOUDABI
9306 /* FIXME: What to do for ilp32_p ? */
9307 if (target_big_endian)
9308 return "elf64-bigaarch64-cloudabi";
9309 else
9310 return "elf64-littleaarch64-cloudabi";
9311 #else
9312 if (target_big_endian)
9313 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9314 else
9315 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9316 #endif
9317 }
9318
9319 void
9320 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9321 {
9322 elf_frob_symbol (symp, puntp);
9323 }
9324 #endif
9325
9326 /* MD interface: Finalization. */
9327
9328 /* A good place to do this, although this was probably not intended
9329 for this kind of use. We need to dump the literal pool before
9330 references are made to a null symbol pointer. */
9331
9332 void
9333 aarch64_cleanup (void)
9334 {
9335 literal_pool *pool;
9336
9337 for (pool = list_of_pools; pool; pool = pool->next)
9338 {
9339 /* Put it at the end of the relevant section. */
9340 subseg_set (pool->section, pool->sub_section);
9341 s_ltorg (0);
9342 }
9343 }
9344
9345 #ifdef OBJ_ELF
9346 /* Remove any excess mapping symbols generated for alignment frags in
9347 SEC. We may have created a mapping symbol before a zero byte
9348 alignment; remove it if there's a mapping symbol after the
9349 alignment. */
9350 static void
9351 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9352 void *dummy ATTRIBUTE_UNUSED)
9353 {
9354 segment_info_type *seginfo = seg_info (sec);
9355 fragS *fragp;
9356
9357 if (seginfo == NULL || seginfo->frchainP == NULL)
9358 return;
9359
9360 for (fragp = seginfo->frchainP->frch_root;
9361 fragp != NULL; fragp = fragp->fr_next)
9362 {
9363 symbolS *sym = fragp->tc_frag_data.last_map;
9364 fragS *next = fragp->fr_next;
9365
9366 /* Variable-sized frags have been converted to fixed size by
9367 this point. But if this was variable-sized to start with,
9368 there will be a fixed-size frag after it. So don't handle
9369 next == NULL. */
9370 if (sym == NULL || next == NULL)
9371 continue;
9372
9373 if (S_GET_VALUE (sym) < next->fr_address)
9374 /* Not at the end of this frag. */
9375 continue;
9376 know (S_GET_VALUE (sym) == next->fr_address);
9377
9378 do
9379 {
9380 if (next->tc_frag_data.first_map != NULL)
9381 {
9382 /* Next frag starts with a mapping symbol. Discard this
9383 one. */
9384 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9385 break;
9386 }
9387
9388 if (next->fr_next == NULL)
9389 {
9390 /* This mapping symbol is at the end of the section. Discard
9391 it. */
9392 know (next->fr_fix == 0 && next->fr_var == 0);
9393 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9394 break;
9395 }
9396
9397 /* As long as we have empty frags without any mapping symbols,
9398 keep looking. */
9399 /* If the next frag is non-empty and does not start with a
9400 mapping symbol, then this mapping symbol is required. */
9401 if (next->fr_address != next->fr_next->fr_address)
9402 break;
9403
9404 next = next->fr_next;
9405 }
9406 while (next != NULL);
9407 }
9408 }
9409 #endif
9410
9411 /* Adjust the symbol table. */
9412
9413 void
9414 aarch64_adjust_symtab (void)
9415 {
9416 #ifdef OBJ_ELF
9417 /* Remove any overlapping mapping symbols generated by alignment frags. */
9418 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9419 /* Now do generic ELF adjustments. */
9420 elf_adjust_symtab ();
9421 #endif
9422 }
9423
9424 static void
9425 checked_hash_insert (htab_t table, const char *key, void *value)
9426 {
9427 str_hash_insert (table, key, value, 0);
9428 }
9429
9430 static void
9431 sysreg_hash_insert (htab_t table, const char *key, void *value)
9432 {
9433 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9434 checked_hash_insert (table, key, value);
9435 }
9436
9437 static void
9438 fill_instruction_hash_table (void)
9439 {
9440 const aarch64_opcode *opcode = aarch64_opcode_table;
9441
9442 while (opcode->name != NULL)
9443 {
9444 templates *templ, *new_templ;
9445 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9446
9447 new_templ = XNEW (templates);
9448 new_templ->opcode = opcode;
9449 new_templ->next = NULL;
9450
9451 if (!templ)
9452 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9453 else
9454 {
9455 new_templ->next = templ->next;
9456 templ->next = new_templ;
9457 }
9458 ++opcode;
9459 }
9460 }
9461
9462 static inline void
9463 convert_to_upper (char *dst, const char *src, size_t num)
9464 {
9465 unsigned int i;
9466 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9467 *dst = TOUPPER (*src);
9468 *dst = '\0';
9469 }
9470
9471 /* Assume STR point to a lower-case string, allocate, convert and return
9472 the corresponding upper-case string. */
9473 static inline const char*
9474 get_upper_str (const char *str)
9475 {
9476 char *ret;
9477 size_t len = strlen (str);
9478 ret = XNEWVEC (char, len + 1);
9479 convert_to_upper (ret, str, len);
9480 return ret;
9481 }
9482
9483 /* MD interface: Initialization. */
9484
9485 void
9486 md_begin (void)
9487 {
9488 unsigned mach;
9489 unsigned int i;
9490
9491 aarch64_ops_hsh = str_htab_create ();
9492 aarch64_cond_hsh = str_htab_create ();
9493 aarch64_shift_hsh = str_htab_create ();
9494 aarch64_sys_regs_hsh = str_htab_create ();
9495 aarch64_pstatefield_hsh = str_htab_create ();
9496 aarch64_sys_regs_ic_hsh = str_htab_create ();
9497 aarch64_sys_regs_dc_hsh = str_htab_create ();
9498 aarch64_sys_regs_at_hsh = str_htab_create ();
9499 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9500 aarch64_sys_regs_sr_hsh = str_htab_create ();
9501 aarch64_reg_hsh = str_htab_create ();
9502 aarch64_barrier_opt_hsh = str_htab_create ();
9503 aarch64_nzcv_hsh = str_htab_create ();
9504 aarch64_pldop_hsh = str_htab_create ();
9505 aarch64_hint_opt_hsh = str_htab_create ();
9506
9507 fill_instruction_hash_table ();
9508
9509 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9510 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9511 (void *) (aarch64_sys_regs + i));
9512
9513 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9514 sysreg_hash_insert (aarch64_pstatefield_hsh,
9515 aarch64_pstatefields[i].name,
9516 (void *) (aarch64_pstatefields + i));
9517
9518 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9519 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9520 aarch64_sys_regs_ic[i].name,
9521 (void *) (aarch64_sys_regs_ic + i));
9522
9523 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9524 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9525 aarch64_sys_regs_dc[i].name,
9526 (void *) (aarch64_sys_regs_dc + i));
9527
9528 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9529 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9530 aarch64_sys_regs_at[i].name,
9531 (void *) (aarch64_sys_regs_at + i));
9532
9533 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9534 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9535 aarch64_sys_regs_tlbi[i].name,
9536 (void *) (aarch64_sys_regs_tlbi + i));
9537
9538 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9539 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9540 aarch64_sys_regs_sr[i].name,
9541 (void *) (aarch64_sys_regs_sr + i));
9542
9543 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9544 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9545 (void *) (reg_names + i));
9546
9547 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9548 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9549 (void *) (nzcv_names + i));
9550
9551 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9552 {
9553 const char *name = aarch64_operand_modifiers[i].name;
9554 checked_hash_insert (aarch64_shift_hsh, name,
9555 (void *) (aarch64_operand_modifiers + i));
9556 /* Also hash the name in the upper case. */
9557 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9558 (void *) (aarch64_operand_modifiers + i));
9559 }
9560
9561 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9562 {
9563 unsigned int j;
9564 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9565 the same condition code. */
9566 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9567 {
9568 const char *name = aarch64_conds[i].names[j];
9569 if (name == NULL)
9570 break;
9571 checked_hash_insert (aarch64_cond_hsh, name,
9572 (void *) (aarch64_conds + i));
9573 /* Also hash the name in the upper case. */
9574 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9575 (void *) (aarch64_conds + i));
9576 }
9577 }
9578
9579 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9580 {
9581 const char *name = aarch64_barrier_options[i].name;
9582 /* Skip xx00 - the unallocated values of option. */
9583 if ((i & 0x3) == 0)
9584 continue;
9585 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9586 (void *) (aarch64_barrier_options + i));
9587 /* Also hash the name in the upper case. */
9588 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9589 (void *) (aarch64_barrier_options + i));
9590 }
9591
9592 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9593 {
9594 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9595 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9596 (void *) (aarch64_barrier_dsb_nxs_options + i));
9597 /* Also hash the name in the upper case. */
9598 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9599 (void *) (aarch64_barrier_dsb_nxs_options + i));
9600 }
9601
9602 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9603 {
9604 const char* name = aarch64_prfops[i].name;
9605 /* Skip the unallocated hint encodings. */
9606 if (name == NULL)
9607 continue;
9608 checked_hash_insert (aarch64_pldop_hsh, name,
9609 (void *) (aarch64_prfops + i));
9610 /* Also hash the name in the upper case. */
9611 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9612 (void *) (aarch64_prfops + i));
9613 }
9614
9615 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9616 {
9617 const char* name = aarch64_hint_options[i].name;
9618 const char* upper_name = get_upper_str(name);
9619
9620 checked_hash_insert (aarch64_hint_opt_hsh, name,
9621 (void *) (aarch64_hint_options + i));
9622
9623 /* Also hash the name in the upper case if not the same. */
9624 if (strcmp (name, upper_name) != 0)
9625 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9626 (void *) (aarch64_hint_options + i));
9627 }
9628
9629 /* Set the cpu variant based on the command-line options. */
9630 if (!mcpu_cpu_opt)
9631 mcpu_cpu_opt = march_cpu_opt;
9632
9633 if (!mcpu_cpu_opt)
9634 mcpu_cpu_opt = &cpu_default;
9635
9636 cpu_variant = *mcpu_cpu_opt;
9637
9638 /* Record the CPU type. */
9639 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
9640
9641 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9642 }
9643
9644 /* Command line processing. */
9645
9646 const char *md_shortopts = "m:";
9647
9648 #ifdef AARCH64_BI_ENDIAN
9649 #define OPTION_EB (OPTION_MD_BASE + 0)
9650 #define OPTION_EL (OPTION_MD_BASE + 1)
9651 #else
9652 #if TARGET_BYTES_BIG_ENDIAN
9653 #define OPTION_EB (OPTION_MD_BASE + 0)
9654 #else
9655 #define OPTION_EL (OPTION_MD_BASE + 1)
9656 #endif
9657 #endif
9658
9659 struct option md_longopts[] = {
9660 #ifdef OPTION_EB
9661 {"EB", no_argument, NULL, OPTION_EB},
9662 #endif
9663 #ifdef OPTION_EL
9664 {"EL", no_argument, NULL, OPTION_EL},
9665 #endif
9666 {NULL, no_argument, NULL, 0}
9667 };
9668
9669 size_t md_longopts_size = sizeof (md_longopts);
9670
9671 struct aarch64_option_table
9672 {
9673 const char *option; /* Option name to match. */
9674 const char *help; /* Help information. */
9675 int *var; /* Variable to change. */
9676 int value; /* What to change it to. */
9677 char *deprecated; /* If non-null, print this message. */
9678 };
9679
9680 static struct aarch64_option_table aarch64_opts[] = {
9681 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9682 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9683 NULL},
9684 #ifdef DEBUG_AARCH64
9685 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9686 #endif /* DEBUG_AARCH64 */
9687 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9688 NULL},
9689 {"mno-verbose-error", N_("do not output verbose error messages"),
9690 &verbose_error_p, 0, NULL},
9691 {NULL, NULL, NULL, 0, NULL}
9692 };
9693
9694 struct aarch64_cpu_option_table
9695 {
9696 const char *name;
9697 const aarch64_feature_set value;
9698 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9699 case. */
9700 const char *canonical_name;
9701 };
9702
9703 /* This list should, at a minimum, contain all the cpu names
9704 recognized by GCC. */
9705 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9706 {"all", AARCH64_ANY, NULL},
9707 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9708 AARCH64_FEATURE_CRC), "Cortex-A34"},
9709 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9710 AARCH64_FEATURE_CRC), "Cortex-A35"},
9711 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9712 AARCH64_FEATURE_CRC), "Cortex-A53"},
9713 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9714 AARCH64_FEATURE_CRC), "Cortex-A57"},
9715 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9716 AARCH64_FEATURE_CRC), "Cortex-A72"},
9717 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9718 AARCH64_FEATURE_CRC), "Cortex-A73"},
9719 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9720 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9721 "Cortex-A55"},
9722 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9723 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9724 "Cortex-A75"},
9725 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9726 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9727 "Cortex-A76"},
9728 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9729 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9730 | AARCH64_FEATURE_DOTPROD
9731 | AARCH64_FEATURE_SSBS),
9732 "Cortex-A76AE"},
9733 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9734 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9735 | AARCH64_FEATURE_DOTPROD
9736 | AARCH64_FEATURE_SSBS),
9737 "Cortex-A77"},
9738 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9739 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9740 | AARCH64_FEATURE_DOTPROD
9741 | AARCH64_FEATURE_SSBS),
9742 "Cortex-A65"},
9743 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9744 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9745 | AARCH64_FEATURE_DOTPROD
9746 | AARCH64_FEATURE_SSBS),
9747 "Cortex-A65AE"},
9748 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9749 AARCH64_FEATURE_F16
9750 | AARCH64_FEATURE_RCPC
9751 | AARCH64_FEATURE_DOTPROD
9752 | AARCH64_FEATURE_SSBS
9753 | AARCH64_FEATURE_PROFILE),
9754 "Cortex-A78"},
9755 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9756 AARCH64_FEATURE_F16
9757 | AARCH64_FEATURE_RCPC
9758 | AARCH64_FEATURE_DOTPROD
9759 | AARCH64_FEATURE_SSBS
9760 | AARCH64_FEATURE_PROFILE),
9761 "Cortex-A78AE"},
9762 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9763 AARCH64_FEATURE_DOTPROD
9764 | AARCH64_FEATURE_F16
9765 | AARCH64_FEATURE_FLAGM
9766 | AARCH64_FEATURE_PAC
9767 | AARCH64_FEATURE_PROFILE
9768 | AARCH64_FEATURE_RCPC
9769 | AARCH64_FEATURE_SSBS),
9770 "Cortex-A78C"},
9771 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9772 AARCH64_FEATURE_BFLOAT16
9773 | AARCH64_FEATURE_I8MM
9774 | AARCH64_FEATURE_MEMTAG
9775 | AARCH64_FEATURE_SVE2_BITPERM),
9776 "Cortex-A510"},
9777 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9778 AARCH64_FEATURE_BFLOAT16
9779 | AARCH64_FEATURE_I8MM
9780 | AARCH64_FEATURE_MEMTAG
9781 | AARCH64_FEATURE_SVE2_BITPERM),
9782 "Cortex-A710"},
9783 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9784 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9785 | AARCH64_FEATURE_DOTPROD
9786 | AARCH64_FEATURE_PROFILE),
9787 "Ares"},
9788 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9789 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9790 "Samsung Exynos M1"},
9791 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9792 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9793 | AARCH64_FEATURE_RDMA),
9794 "Qualcomm Falkor"},
9795 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9796 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9797 | AARCH64_FEATURE_DOTPROD
9798 | AARCH64_FEATURE_SSBS),
9799 "Neoverse E1"},
9800 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9801 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9802 | AARCH64_FEATURE_DOTPROD
9803 | AARCH64_FEATURE_PROFILE),
9804 "Neoverse N1"},
9805 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9806 AARCH64_FEATURE_BFLOAT16
9807 | AARCH64_FEATURE_I8MM
9808 | AARCH64_FEATURE_F16
9809 | AARCH64_FEATURE_SVE
9810 | AARCH64_FEATURE_SVE2
9811 | AARCH64_FEATURE_SVE2_BITPERM
9812 | AARCH64_FEATURE_MEMTAG
9813 | AARCH64_FEATURE_RNG),
9814 "Neoverse N2"},
9815 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9816 AARCH64_FEATURE_PROFILE
9817 | AARCH64_FEATURE_CVADP
9818 | AARCH64_FEATURE_SVE
9819 | AARCH64_FEATURE_SSBS
9820 | AARCH64_FEATURE_RNG
9821 | AARCH64_FEATURE_F16
9822 | AARCH64_FEATURE_BFLOAT16
9823 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9824 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9825 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9826 | AARCH64_FEATURE_RDMA),
9827 "Qualcomm QDF24XX"},
9828 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9829 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9830 "Qualcomm Saphira"},
9831 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9832 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9833 "Cavium ThunderX"},
9834 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9835 AARCH64_FEATURE_CRYPTO),
9836 "Broadcom Vulcan"},
9837 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9838 in earlier releases and is superseded by 'xgene1' in all
9839 tools. */
9840 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9841 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9842 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9843 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9844 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9845 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9846 AARCH64_FEATURE_F16
9847 | AARCH64_FEATURE_RCPC
9848 | AARCH64_FEATURE_DOTPROD
9849 | AARCH64_FEATURE_SSBS
9850 | AARCH64_FEATURE_PROFILE),
9851 "Cortex-X1"},
9852 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9853 AARCH64_FEATURE_BFLOAT16
9854 | AARCH64_FEATURE_I8MM
9855 | AARCH64_FEATURE_MEMTAG
9856 | AARCH64_FEATURE_SVE2_BITPERM),
9857 "Cortex-X2"},
9858 {"generic", AARCH64_ARCH_V8, NULL},
9859
9860 {NULL, AARCH64_ARCH_NONE, NULL}
9861 };
9862
9863 struct aarch64_arch_option_table
9864 {
9865 const char *name;
9866 const aarch64_feature_set value;
9867 };
9868
9869 /* This list should, at a minimum, contain all the architecture names
9870 recognized by GCC. */
9871 static const struct aarch64_arch_option_table aarch64_archs[] = {
9872 {"all", AARCH64_ANY},
9873 {"armv8-a", AARCH64_ARCH_V8},
9874 {"armv8.1-a", AARCH64_ARCH_V8_1},
9875 {"armv8.2-a", AARCH64_ARCH_V8_2},
9876 {"armv8.3-a", AARCH64_ARCH_V8_3},
9877 {"armv8.4-a", AARCH64_ARCH_V8_4},
9878 {"armv8.5-a", AARCH64_ARCH_V8_5},
9879 {"armv8.6-a", AARCH64_ARCH_V8_6},
9880 {"armv8.7-a", AARCH64_ARCH_V8_7},
9881 {"armv8.8-a", AARCH64_ARCH_V8_8},
9882 {"armv8-r", AARCH64_ARCH_V8_R},
9883 {"armv9-a", AARCH64_ARCH_V9},
9884 {"armv9.1-a", AARCH64_ARCH_V9_1},
9885 {"armv9.2-a", AARCH64_ARCH_V9_2},
9886 {"armv9.3-a", AARCH64_ARCH_V9_3},
9887 {NULL, AARCH64_ARCH_NONE}
9888 };
9889
9890 /* ISA extensions. */
9891 struct aarch64_option_cpu_value_table
9892 {
9893 const char *name;
9894 const aarch64_feature_set value;
9895 const aarch64_feature_set require; /* Feature dependencies. */
9896 };
9897
9898 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9899 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9900 AARCH64_ARCH_NONE},
9901 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9902 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9903 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9904 AARCH64_ARCH_NONE},
9905 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9906 AARCH64_ARCH_NONE},
9907 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9908 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9909 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9910 AARCH64_ARCH_NONE},
9911 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9912 AARCH64_ARCH_NONE},
9913 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9914 AARCH64_ARCH_NONE},
9915 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9916 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9917 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9918 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9919 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9920 AARCH64_FEATURE (AARCH64_FEATURE_FP
9921 | AARCH64_FEATURE_F16, 0)},
9922 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9923 AARCH64_ARCH_NONE},
9924 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9925 AARCH64_FEATURE (AARCH64_FEATURE_F16
9926 | AARCH64_FEATURE_SIMD
9927 | AARCH64_FEATURE_COMPNUM, 0)},
9928 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9929 AARCH64_ARCH_NONE},
9930 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9931 AARCH64_FEATURE (AARCH64_FEATURE_F16
9932 | AARCH64_FEATURE_SIMD, 0)},
9933 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9934 AARCH64_ARCH_NONE},
9935 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9936 AARCH64_ARCH_NONE},
9937 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9938 AARCH64_ARCH_NONE},
9939 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9940 AARCH64_ARCH_NONE},
9941 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9942 AARCH64_ARCH_NONE},
9943 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9944 AARCH64_ARCH_NONE},
9945 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9946 AARCH64_ARCH_NONE},
9947 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9948 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9949 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9950 AARCH64_ARCH_NONE},
9951 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9952 AARCH64_ARCH_NONE},
9953 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9954 AARCH64_ARCH_NONE},
9955 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9956 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9957 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9958 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9959 | AARCH64_FEATURE_SM4, 0)},
9960 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9961 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9962 | AARCH64_FEATURE_AES, 0)},
9963 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9964 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9965 | AARCH64_FEATURE_SHA3, 0)},
9966 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9967 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9968 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
9969 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9970 | AARCH64_FEATURE_BFLOAT16, 0)},
9971 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64, 0),
9972 AARCH64_FEATURE (AARCH64_FEATURE_SME
9973 | AARCH64_FEATURE_SVE2
9974 | AARCH64_FEATURE_BFLOAT16, 0)},
9975 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I64, 0),
9976 AARCH64_FEATURE (AARCH64_FEATURE_SME
9977 | AARCH64_FEATURE_SVE2
9978 | AARCH64_FEATURE_BFLOAT16, 0)},
9979 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9980 AARCH64_ARCH_NONE},
9981 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9982 AARCH64_ARCH_NONE},
9983 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9984 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9985 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9986 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9987 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
9988 AARCH64_ARCH_NONE},
9989 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
9990 AARCH64_ARCH_NONE},
9991 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
9992 AARCH64_ARCH_NONE},
9993 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
9994 AARCH64_ARCH_NONE},
9995 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
9996 AARCH64_ARCH_NONE},
9997 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9998 };
9999
10000 struct aarch64_long_option_table
10001 {
10002 const char *option; /* Substring to match. */
10003 const char *help; /* Help information. */
10004 int (*func) (const char *subopt); /* Function to decode sub-option. */
10005 char *deprecated; /* If non-null, print this message. */
10006 };
10007
10008 /* Transitive closure of features depending on set. */
10009 static aarch64_feature_set
10010 aarch64_feature_disable_set (aarch64_feature_set set)
10011 {
10012 const struct aarch64_option_cpu_value_table *opt;
10013 aarch64_feature_set prev = 0;
10014
10015 while (prev != set) {
10016 prev = set;
10017 for (opt = aarch64_features; opt->name != NULL; opt++)
10018 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10019 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10020 }
10021 return set;
10022 }
10023
10024 /* Transitive closure of dependencies of set. */
10025 static aarch64_feature_set
10026 aarch64_feature_enable_set (aarch64_feature_set set)
10027 {
10028 const struct aarch64_option_cpu_value_table *opt;
10029 aarch64_feature_set prev = 0;
10030
10031 while (prev != set) {
10032 prev = set;
10033 for (opt = aarch64_features; opt->name != NULL; opt++)
10034 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10035 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10036 }
10037 return set;
10038 }
10039
10040 static int
10041 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10042 bool ext_only)
10043 {
10044 /* We insist on extensions being added before being removed. We achieve
10045 this by using the ADDING_VALUE variable to indicate whether we are
10046 adding an extension (1) or removing it (0) and only allowing it to
10047 change in the order -1 -> 1 -> 0. */
10048 int adding_value = -1;
10049 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10050
10051 /* Copy the feature set, so that we can modify it. */
10052 *ext_set = **opt_p;
10053 *opt_p = ext_set;
10054
10055 while (str != NULL && *str != 0)
10056 {
10057 const struct aarch64_option_cpu_value_table *opt;
10058 const char *ext = NULL;
10059 int optlen;
10060
10061 if (!ext_only)
10062 {
10063 if (*str != '+')
10064 {
10065 as_bad (_("invalid architectural extension"));
10066 return 0;
10067 }
10068
10069 ext = strchr (++str, '+');
10070 }
10071
10072 if (ext != NULL)
10073 optlen = ext - str;
10074 else
10075 optlen = strlen (str);
10076
10077 if (optlen >= 2 && startswith (str, "no"))
10078 {
10079 if (adding_value != 0)
10080 adding_value = 0;
10081 optlen -= 2;
10082 str += 2;
10083 }
10084 else if (optlen > 0)
10085 {
10086 if (adding_value == -1)
10087 adding_value = 1;
10088 else if (adding_value != 1)
10089 {
10090 as_bad (_("must specify extensions to add before specifying "
10091 "those to remove"));
10092 return false;
10093 }
10094 }
10095
10096 if (optlen == 0)
10097 {
10098 as_bad (_("missing architectural extension"));
10099 return 0;
10100 }
10101
10102 gas_assert (adding_value != -1);
10103
10104 for (opt = aarch64_features; opt->name != NULL; opt++)
10105 if (strncmp (opt->name, str, optlen) == 0)
10106 {
10107 aarch64_feature_set set;
10108
10109 /* Add or remove the extension. */
10110 if (adding_value)
10111 {
10112 set = aarch64_feature_enable_set (opt->value);
10113 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10114 }
10115 else
10116 {
10117 set = aarch64_feature_disable_set (opt->value);
10118 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10119 }
10120 break;
10121 }
10122
10123 if (opt->name == NULL)
10124 {
10125 as_bad (_("unknown architectural extension `%s'"), str);
10126 return 0;
10127 }
10128
10129 str = ext;
10130 };
10131
10132 return 1;
10133 }
10134
10135 static int
10136 aarch64_parse_cpu (const char *str)
10137 {
10138 const struct aarch64_cpu_option_table *opt;
10139 const char *ext = strchr (str, '+');
10140 size_t optlen;
10141
10142 if (ext != NULL)
10143 optlen = ext - str;
10144 else
10145 optlen = strlen (str);
10146
10147 if (optlen == 0)
10148 {
10149 as_bad (_("missing cpu name `%s'"), str);
10150 return 0;
10151 }
10152
10153 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10154 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10155 {
10156 mcpu_cpu_opt = &opt->value;
10157 if (ext != NULL)
10158 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10159
10160 return 1;
10161 }
10162
10163 as_bad (_("unknown cpu `%s'"), str);
10164 return 0;
10165 }
10166
10167 static int
10168 aarch64_parse_arch (const char *str)
10169 {
10170 const struct aarch64_arch_option_table *opt;
10171 const char *ext = strchr (str, '+');
10172 size_t optlen;
10173
10174 if (ext != NULL)
10175 optlen = ext - str;
10176 else
10177 optlen = strlen (str);
10178
10179 if (optlen == 0)
10180 {
10181 as_bad (_("missing architecture name `%s'"), str);
10182 return 0;
10183 }
10184
10185 for (opt = aarch64_archs; opt->name != NULL; opt++)
10186 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10187 {
10188 march_cpu_opt = &opt->value;
10189 if (ext != NULL)
10190 return aarch64_parse_features (ext, &march_cpu_opt, false);
10191
10192 return 1;
10193 }
10194
10195 as_bad (_("unknown architecture `%s'\n"), str);
10196 return 0;
10197 }
10198
10199 /* ABIs. */
10200 struct aarch64_option_abi_value_table
10201 {
10202 const char *name;
10203 enum aarch64_abi_type value;
10204 };
10205
10206 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10207 {"ilp32", AARCH64_ABI_ILP32},
10208 {"lp64", AARCH64_ABI_LP64},
10209 };
10210
10211 static int
10212 aarch64_parse_abi (const char *str)
10213 {
10214 unsigned int i;
10215
10216 if (str[0] == '\0')
10217 {
10218 as_bad (_("missing abi name `%s'"), str);
10219 return 0;
10220 }
10221
10222 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10223 if (strcmp (str, aarch64_abis[i].name) == 0)
10224 {
10225 aarch64_abi = aarch64_abis[i].value;
10226 return 1;
10227 }
10228
10229 as_bad (_("unknown abi `%s'\n"), str);
10230 return 0;
10231 }
10232
10233 static struct aarch64_long_option_table aarch64_long_opts[] = {
10234 #ifdef OBJ_ELF
10235 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10236 aarch64_parse_abi, NULL},
10237 #endif /* OBJ_ELF */
10238 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10239 aarch64_parse_cpu, NULL},
10240 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10241 aarch64_parse_arch, NULL},
10242 {NULL, NULL, 0, NULL}
10243 };
10244
10245 int
10246 md_parse_option (int c, const char *arg)
10247 {
10248 struct aarch64_option_table *opt;
10249 struct aarch64_long_option_table *lopt;
10250
10251 switch (c)
10252 {
10253 #ifdef OPTION_EB
10254 case OPTION_EB:
10255 target_big_endian = 1;
10256 break;
10257 #endif
10258
10259 #ifdef OPTION_EL
10260 case OPTION_EL:
10261 target_big_endian = 0;
10262 break;
10263 #endif
10264
10265 case 'a':
10266 /* Listing option. Just ignore these, we don't support additional
10267 ones. */
10268 return 0;
10269
10270 default:
10271 for (opt = aarch64_opts; opt->option != NULL; opt++)
10272 {
10273 if (c == opt->option[0]
10274 && ((arg == NULL && opt->option[1] == 0)
10275 || streq (arg, opt->option + 1)))
10276 {
10277 /* If the option is deprecated, tell the user. */
10278 if (opt->deprecated != NULL)
10279 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10280 arg ? arg : "", _(opt->deprecated));
10281
10282 if (opt->var != NULL)
10283 *opt->var = opt->value;
10284
10285 return 1;
10286 }
10287 }
10288
10289 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10290 {
10291 /* These options are expected to have an argument. */
10292 if (c == lopt->option[0]
10293 && arg != NULL
10294 && startswith (arg, lopt->option + 1))
10295 {
10296 /* If the option is deprecated, tell the user. */
10297 if (lopt->deprecated != NULL)
10298 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10299 _(lopt->deprecated));
10300
10301 /* Call the sup-option parser. */
10302 return lopt->func (arg + strlen (lopt->option) - 1);
10303 }
10304 }
10305
10306 return 0;
10307 }
10308
10309 return 1;
10310 }
10311
10312 void
10313 md_show_usage (FILE * fp)
10314 {
10315 struct aarch64_option_table *opt;
10316 struct aarch64_long_option_table *lopt;
10317
10318 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10319
10320 for (opt = aarch64_opts; opt->option != NULL; opt++)
10321 if (opt->help != NULL)
10322 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10323
10324 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10325 if (lopt->help != NULL)
10326 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10327
10328 #ifdef OPTION_EB
10329 fprintf (fp, _("\
10330 -EB assemble code for a big-endian cpu\n"));
10331 #endif
10332
10333 #ifdef OPTION_EL
10334 fprintf (fp, _("\
10335 -EL assemble code for a little-endian cpu\n"));
10336 #endif
10337 }
10338
10339 /* Parse a .cpu directive. */
10340
10341 static void
10342 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10343 {
10344 const struct aarch64_cpu_option_table *opt;
10345 char saved_char;
10346 char *name;
10347 char *ext;
10348 size_t optlen;
10349
10350 name = input_line_pointer;
10351 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10352 input_line_pointer++;
10353 saved_char = *input_line_pointer;
10354 *input_line_pointer = 0;
10355
10356 ext = strchr (name, '+');
10357
10358 if (ext != NULL)
10359 optlen = ext - name;
10360 else
10361 optlen = strlen (name);
10362
10363 /* Skip the first "all" entry. */
10364 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10365 if (strlen (opt->name) == optlen
10366 && strncmp (name, opt->name, optlen) == 0)
10367 {
10368 mcpu_cpu_opt = &opt->value;
10369 if (ext != NULL)
10370 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10371 return;
10372
10373 cpu_variant = *mcpu_cpu_opt;
10374
10375 *input_line_pointer = saved_char;
10376 demand_empty_rest_of_line ();
10377 return;
10378 }
10379 as_bad (_("unknown cpu `%s'"), name);
10380 *input_line_pointer = saved_char;
10381 ignore_rest_of_line ();
10382 }
10383
10384
10385 /* Parse a .arch directive. */
10386
10387 static void
10388 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10389 {
10390 const struct aarch64_arch_option_table *opt;
10391 char saved_char;
10392 char *name;
10393 char *ext;
10394 size_t optlen;
10395
10396 name = input_line_pointer;
10397 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10398 input_line_pointer++;
10399 saved_char = *input_line_pointer;
10400 *input_line_pointer = 0;
10401
10402 ext = strchr (name, '+');
10403
10404 if (ext != NULL)
10405 optlen = ext - name;
10406 else
10407 optlen = strlen (name);
10408
10409 /* Skip the first "all" entry. */
10410 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10411 if (strlen (opt->name) == optlen
10412 && strncmp (name, opt->name, optlen) == 0)
10413 {
10414 mcpu_cpu_opt = &opt->value;
10415 if (ext != NULL)
10416 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10417 return;
10418
10419 cpu_variant = *mcpu_cpu_opt;
10420
10421 *input_line_pointer = saved_char;
10422 demand_empty_rest_of_line ();
10423 return;
10424 }
10425
10426 as_bad (_("unknown architecture `%s'\n"), name);
10427 *input_line_pointer = saved_char;
10428 ignore_rest_of_line ();
10429 }
10430
10431 /* Parse a .arch_extension directive. */
10432
10433 static void
10434 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10435 {
10436 char saved_char;
10437 char *ext = input_line_pointer;;
10438
10439 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10440 input_line_pointer++;
10441 saved_char = *input_line_pointer;
10442 *input_line_pointer = 0;
10443
10444 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10445 return;
10446
10447 cpu_variant = *mcpu_cpu_opt;
10448
10449 *input_line_pointer = saved_char;
10450 demand_empty_rest_of_line ();
10451 }
10452
10453 /* Copy symbol information. */
10454
10455 void
10456 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10457 {
10458 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10459 }
10460
10461 #ifdef OBJ_ELF
10462 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10463 This is needed so AArch64 specific st_other values can be independently
10464 specified for an IFUNC resolver (that is called by the dynamic linker)
10465 and the symbol it resolves (aliased to the resolver). In particular,
10466 if a function symbol has special st_other value set via directives,
10467 then attaching an IFUNC resolver to that symbol should not override
10468 the st_other setting. Requiring the directive on the IFUNC resolver
10469 symbol would be unexpected and problematic in C code, where the two
10470 symbols appear as two independent function declarations. */
10471
10472 void
10473 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10474 {
10475 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10476 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10477 /* If size is unset, copy size from src. Because we don't track whether
10478 .size has been used, we can't differentiate .size dest, 0 from the case
10479 where dest's size is unset. */
10480 if (!destelf->size && S_GET_SIZE (dest) == 0)
10481 {
10482 if (srcelf->size)
10483 {
10484 destelf->size = XNEW (expressionS);
10485 *destelf->size = *srcelf->size;
10486 }
10487 S_SET_SIZE (dest, S_GET_SIZE (src));
10488 }
10489 }
10490 #endif