aarch64: Reuse parse_typed_reg for ZA tiles
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 /* Types of processor to assemble for. */
42 #ifndef CPU_DEFAULT
43 #define CPU_DEFAULT AARCH64_ARCH_V8
44 #endif
45
46 #define streq(a, b) (strcmp (a, b) == 0)
47
48 #define END_OF_INSN '\0'
49
50 static aarch64_feature_set cpu_variant;
51
52 /* Variables that we set while parsing command-line options. Once all
53 options have been read we re-process these values to set the real
54 assembly flags. */
55 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
56 static const aarch64_feature_set *march_cpu_opt = NULL;
57
58 /* Constants for known architecture features. */
59 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
60
61 /* Currently active instruction sequence. */
62 static aarch64_instr_sequence *insn_sequence = NULL;
63
64 #ifdef OBJ_ELF
65 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
66 static symbolS *GOT_symbol;
67 #endif
68
69 /* Which ABI to use. */
70 enum aarch64_abi_type
71 {
72 AARCH64_ABI_NONE = 0,
73 AARCH64_ABI_LP64 = 1,
74 AARCH64_ABI_ILP32 = 2,
75 AARCH64_ABI_LLP64 = 3
76 };
77
78 unsigned int aarch64_sframe_cfa_sp_reg;
79 /* The other CFA base register for SFrame stack trace info. */
80 unsigned int aarch64_sframe_cfa_fp_reg;
81 unsigned int aarch64_sframe_cfa_ra_reg;
82
83 #ifndef DEFAULT_ARCH
84 #define DEFAULT_ARCH "aarch64"
85 #endif
86
87 #ifdef OBJ_ELF
88 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
89 static const char *default_arch = DEFAULT_ARCH;
90 #endif
91
92 /* AArch64 ABI for the output file. */
93 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
94
95 /* When non-zero, program to a 32-bit model, in which the C data types
96 int, long and all pointer types are 32-bit objects (ILP32); or to a
97 64-bit model, in which the C int type is 32-bits but the C long type
98 and all pointer types are 64-bit objects (LP64). */
99 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
100
101 /* When non zero, C types int and long are 32 bit,
102 pointers, however are 64 bit */
103 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
104
105 enum vector_el_type
106 {
107 NT_invtype = -1,
108 NT_b,
109 NT_h,
110 NT_s,
111 NT_d,
112 NT_q,
113 NT_zero,
114 NT_merge
115 };
116
117 /* SME horizontal or vertical slice indicator, encoded in "V".
118 Values:
119 0 - Horizontal
120 1 - vertical
121 */
122 enum sme_hv_slice
123 {
124 HV_horizontal = 0,
125 HV_vertical = 1
126 };
127
128 /* Bits for DEFINED field in vector_type_el. */
129 #define NTA_HASTYPE 1
130 #define NTA_HASINDEX 2
131 #define NTA_HASVARWIDTH 4
132
133 struct vector_type_el
134 {
135 enum vector_el_type type;
136 unsigned char defined;
137 unsigned width;
138 int64_t index;
139 };
140
141 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
142
143 struct reloc
144 {
145 bfd_reloc_code_real_type type;
146 expressionS exp;
147 int pc_rel;
148 enum aarch64_opnd opnd;
149 uint32_t flags;
150 unsigned need_libopcodes_p : 1;
151 };
152
153 struct aarch64_instruction
154 {
155 /* libopcodes structure for instruction intermediate representation. */
156 aarch64_inst base;
157 /* Record assembly errors found during the parsing. */
158 aarch64_operand_error parsing_error;
159 /* The condition that appears in the assembly line. */
160 int cond;
161 /* Relocation information (including the GAS internal fixup). */
162 struct reloc reloc;
163 /* Need to generate an immediate in the literal pool. */
164 unsigned gen_lit_pool : 1;
165 };
166
167 typedef struct aarch64_instruction aarch64_instruction;
168
169 static aarch64_instruction inst;
170
171 static bool parse_operands (char *, const aarch64_opcode *);
172 static bool programmer_friendly_fixup (aarch64_instruction *);
173
174 /* Diagnostics inline function utilities.
175
176 These are lightweight utilities which should only be called by parse_operands
177 and other parsers. GAS processes each assembly line by parsing it against
178 instruction template(s), in the case of multiple templates (for the same
179 mnemonic name), those templates are tried one by one until one succeeds or
180 all fail. An assembly line may fail a few templates before being
181 successfully parsed; an error saved here in most cases is not a user error
182 but an error indicating the current template is not the right template.
183 Therefore it is very important that errors can be saved at a low cost during
184 the parsing; we don't want to slow down the whole parsing by recording
185 non-user errors in detail.
186
187 Remember that the objective is to help GAS pick up the most appropriate
188 error message in the case of multiple templates, e.g. FMOV which has 8
189 templates. */
190
191 static inline void
192 clear_error (void)
193 {
194 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
195 inst.parsing_error.kind = AARCH64_OPDE_NIL;
196 }
197
198 static inline bool
199 error_p (void)
200 {
201 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
202 }
203
204 static inline void
205 set_error (enum aarch64_operand_error_kind kind, const char *error)
206 {
207 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
208 inst.parsing_error.index = -1;
209 inst.parsing_error.kind = kind;
210 inst.parsing_error.error = error;
211 }
212
213 static inline void
214 set_recoverable_error (const char *error)
215 {
216 set_error (AARCH64_OPDE_RECOVERABLE, error);
217 }
218
219 /* Use the DESC field of the corresponding aarch64_operand entry to compose
220 the error message. */
221 static inline void
222 set_default_error (void)
223 {
224 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
225 }
226
227 static inline void
228 set_syntax_error (const char *error)
229 {
230 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
231 }
232
233 static inline void
234 set_first_syntax_error (const char *error)
235 {
236 if (! error_p ())
237 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
238 }
239
240 static inline void
241 set_fatal_syntax_error (const char *error)
242 {
243 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
244 }
245 \f
246 /* Return value for certain parsers when the parsing fails; those parsers
247 return the information of the parsed result, e.g. register number, on
248 success. */
249 #define PARSE_FAIL -1
250
251 /* This is an invalid condition code that means no conditional field is
252 present. */
253 #define COND_ALWAYS 0x10
254
255 typedef struct
256 {
257 const char *template;
258 uint32_t value;
259 } asm_nzcv;
260
261 struct reloc_entry
262 {
263 char *name;
264 bfd_reloc_code_real_type reloc;
265 };
266
267 /* Macros to define the register types and masks for the purpose
268 of parsing. */
269
270 #undef AARCH64_REG_TYPES
271 #define AARCH64_REG_TYPES \
272 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
273 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
274 BASIC_REG_TYPE(SP_32) /* wsp */ \
275 BASIC_REG_TYPE(SP_64) /* sp */ \
276 BASIC_REG_TYPE(Z_32) /* wzr */ \
277 BASIC_REG_TYPE(Z_64) /* xzr */ \
278 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
279 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
280 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
281 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
282 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
283 BASIC_REG_TYPE(VN) /* v[0-31] */ \
284 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
285 BASIC_REG_TYPE(PN) /* p[0-15] */ \
286 BASIC_REG_TYPE(ZAT) /* za[0-15] (ZA tile) */ \
287 BASIC_REG_TYPE(ZATH) /* za[0-15]h (ZA tile horizontal slice) */ \
288 BASIC_REG_TYPE(ZATV) /* za[0-15]v (ZA tile vertical slice) */ \
289 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
290 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
291 /* Typecheck: same, plus SVE registers. */ \
292 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
293 | REG_TYPE(ZN)) \
294 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
295 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
296 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
297 /* Typecheck: same, plus SVE registers. */ \
298 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
299 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
300 | REG_TYPE(ZN)) \
301 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
302 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
303 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
304 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
305 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
306 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
307 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
308 /* Typecheck: any [BHSDQ]P FP. */ \
309 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
310 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
311 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
312 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
313 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
314 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
315 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
316 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
317 be used for SVE instructions, since Zn and Pn are valid symbols \
318 in other contexts. */ \
319 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
320 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
321 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
322 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
323 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
324 | REG_TYPE(ZN) | REG_TYPE(PN)) \
325 /* Any integer register; used for error messages only. */ \
326 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
327 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
328 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
329 /* A horizontal or vertical slice of a ZA tile. */ \
330 MULTI_REG_TYPE(ZATHV, REG_TYPE(ZATH) | REG_TYPE(ZATV)) \
331 /* Pseudo type to mark the end of the enumerator sequence. */ \
332 BASIC_REG_TYPE(MAX)
333
334 #undef BASIC_REG_TYPE
335 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
336 #undef MULTI_REG_TYPE
337 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
338
339 /* Register type enumerators. */
340 typedef enum aarch64_reg_type_
341 {
342 /* A list of REG_TYPE_*. */
343 AARCH64_REG_TYPES
344 } aarch64_reg_type;
345
346 #undef BASIC_REG_TYPE
347 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
348 #undef REG_TYPE
349 #define REG_TYPE(T) (1 << REG_TYPE_##T)
350 #undef MULTI_REG_TYPE
351 #define MULTI_REG_TYPE(T,V) V,
352
353 /* Structure for a hash table entry for a register. */
354 typedef struct
355 {
356 const char *name;
357 unsigned char number;
358 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
359 unsigned char builtin;
360 } reg_entry;
361
362 /* Values indexed by aarch64_reg_type to assist the type checking. */
363 static const unsigned reg_type_masks[] =
364 {
365 AARCH64_REG_TYPES
366 };
367
368 #undef BASIC_REG_TYPE
369 #undef REG_TYPE
370 #undef MULTI_REG_TYPE
371 #undef AARCH64_REG_TYPES
372
373 /* Diagnostics used when we don't get a register of the expected type.
374 Note: this has to synchronized with aarch64_reg_type definitions
375 above. */
376 static const char *
377 get_reg_expected_msg (aarch64_reg_type reg_type)
378 {
379 const char *msg;
380
381 switch (reg_type)
382 {
383 case REG_TYPE_R_32:
384 msg = N_("integer 32-bit register expected");
385 break;
386 case REG_TYPE_R_64:
387 msg = N_("integer 64-bit register expected");
388 break;
389 case REG_TYPE_R_N:
390 msg = N_("integer register expected");
391 break;
392 case REG_TYPE_R64_SP:
393 msg = N_("64-bit integer or SP register expected");
394 break;
395 case REG_TYPE_SVE_BASE:
396 msg = N_("base register expected");
397 break;
398 case REG_TYPE_R_Z:
399 msg = N_("integer or zero register expected");
400 break;
401 case REG_TYPE_SVE_OFFSET:
402 msg = N_("offset register expected");
403 break;
404 case REG_TYPE_R_SP:
405 msg = N_("integer or SP register expected");
406 break;
407 case REG_TYPE_R_Z_SP:
408 msg = N_("integer, zero or SP register expected");
409 break;
410 case REG_TYPE_FP_B:
411 msg = N_("8-bit SIMD scalar register expected");
412 break;
413 case REG_TYPE_FP_H:
414 msg = N_("16-bit SIMD scalar or floating-point half precision "
415 "register expected");
416 break;
417 case REG_TYPE_FP_S:
418 msg = N_("32-bit SIMD scalar or floating-point single precision "
419 "register expected");
420 break;
421 case REG_TYPE_FP_D:
422 msg = N_("64-bit SIMD scalar or floating-point double precision "
423 "register expected");
424 break;
425 case REG_TYPE_FP_Q:
426 msg = N_("128-bit SIMD scalar or floating-point quad precision "
427 "register expected");
428 break;
429 case REG_TYPE_R_Z_BHSDQ_V:
430 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
431 msg = N_("register expected");
432 break;
433 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
434 msg = N_("SIMD scalar or floating-point register expected");
435 break;
436 case REG_TYPE_VN: /* any V reg */
437 msg = N_("vector register expected");
438 break;
439 case REG_TYPE_ZN:
440 msg = N_("SVE vector register expected");
441 break;
442 case REG_TYPE_PN:
443 msg = N_("SVE predicate register expected");
444 break;
445 default:
446 as_fatal (_("invalid register type %d"), reg_type);
447 }
448 return msg;
449 }
450
451 /* Some well known registers that we refer to directly elsewhere. */
452 #define REG_SP 31
453 #define REG_ZR 31
454
455 /* Instructions take 4 bytes in the object file. */
456 #define INSN_SIZE 4
457
458 static htab_t aarch64_ops_hsh;
459 static htab_t aarch64_cond_hsh;
460 static htab_t aarch64_shift_hsh;
461 static htab_t aarch64_sys_regs_hsh;
462 static htab_t aarch64_pstatefield_hsh;
463 static htab_t aarch64_sys_regs_ic_hsh;
464 static htab_t aarch64_sys_regs_dc_hsh;
465 static htab_t aarch64_sys_regs_at_hsh;
466 static htab_t aarch64_sys_regs_tlbi_hsh;
467 static htab_t aarch64_sys_regs_sr_hsh;
468 static htab_t aarch64_reg_hsh;
469 static htab_t aarch64_barrier_opt_hsh;
470 static htab_t aarch64_nzcv_hsh;
471 static htab_t aarch64_pldop_hsh;
472 static htab_t aarch64_hint_opt_hsh;
473
474 /* Stuff needed to resolve the label ambiguity
475 As:
476 ...
477 label: <insn>
478 may differ from:
479 ...
480 label:
481 <insn> */
482
483 static symbolS *last_label_seen;
484
485 /* Literal pool structure. Held on a per-section
486 and per-sub-section basis. */
487
488 #define MAX_LITERAL_POOL_SIZE 1024
489 typedef struct literal_expression
490 {
491 expressionS exp;
492 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
493 LITTLENUM_TYPE * bignum;
494 } literal_expression;
495
496 typedef struct literal_pool
497 {
498 literal_expression literals[MAX_LITERAL_POOL_SIZE];
499 unsigned int next_free_entry;
500 unsigned int id;
501 symbolS *symbol;
502 segT section;
503 subsegT sub_section;
504 int size;
505 struct literal_pool *next;
506 } literal_pool;
507
508 /* Pointer to a linked list of literal pools. */
509 static literal_pool *list_of_pools = NULL;
510 \f
511 /* Pure syntax. */
512
513 /* This array holds the chars that always start a comment. If the
514 pre-processor is disabled, these aren't very useful. */
515 const char comment_chars[] = "";
516
517 /* This array holds the chars that only start a comment at the beginning of
518 a line. If the line seems to have the form '# 123 filename'
519 .line and .file directives will appear in the pre-processed output. */
520 /* Note that input_file.c hand checks for '#' at the beginning of the
521 first line of the input file. This is because the compiler outputs
522 #NO_APP at the beginning of its output. */
523 /* Also note that comments like this one will always work. */
524 const char line_comment_chars[] = "#";
525
526 const char line_separator_chars[] = ";";
527
528 /* Chars that can be used to separate mant
529 from exp in floating point numbers. */
530 const char EXP_CHARS[] = "eE";
531
532 /* Chars that mean this number is a floating point constant. */
533 /* As in 0f12.456 */
534 /* or 0d1.2345e12 */
535
536 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
537
538 /* Prefix character that indicates the start of an immediate value. */
539 #define is_immediate_prefix(C) ((C) == '#')
540
541 /* Separator character handling. */
542
543 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
544
545 static inline bool
546 skip_past_char (char **str, char c)
547 {
548 if (**str == c)
549 {
550 (*str)++;
551 return true;
552 }
553 else
554 return false;
555 }
556
557 #define skip_past_comma(str) skip_past_char (str, ',')
558
559 /* Arithmetic expressions (possibly involving symbols). */
560
561 static bool in_aarch64_get_expression = false;
562
563 /* Third argument to aarch64_get_expression. */
564 #define GE_NO_PREFIX false
565 #define GE_OPT_PREFIX true
566
567 /* Fourth argument to aarch64_get_expression. */
568 #define ALLOW_ABSENT false
569 #define REJECT_ABSENT true
570
571 /* Return TRUE if the string pointed by *STR is successfully parsed
572 as an valid expression; *EP will be filled with the information of
573 such an expression. Otherwise return FALSE.
574
575 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
576 If REJECT_ABSENT is true then trat missing expressions as an error. */
577
578 static bool
579 aarch64_get_expression (expressionS * ep,
580 char ** str,
581 bool allow_immediate_prefix,
582 bool reject_absent)
583 {
584 char *save_in;
585 segT seg;
586 bool prefix_present = false;
587
588 if (allow_immediate_prefix)
589 {
590 if (is_immediate_prefix (**str))
591 {
592 (*str)++;
593 prefix_present = true;
594 }
595 }
596
597 memset (ep, 0, sizeof (expressionS));
598
599 save_in = input_line_pointer;
600 input_line_pointer = *str;
601 in_aarch64_get_expression = true;
602 seg = expression (ep);
603 in_aarch64_get_expression = false;
604
605 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
606 {
607 /* We found a bad expression in md_operand(). */
608 *str = input_line_pointer;
609 input_line_pointer = save_in;
610 if (prefix_present && ! error_p ())
611 set_fatal_syntax_error (_("bad expression"));
612 else
613 set_first_syntax_error (_("bad expression"));
614 return false;
615 }
616
617 #ifdef OBJ_AOUT
618 if (seg != absolute_section
619 && seg != text_section
620 && seg != data_section
621 && seg != bss_section
622 && seg != undefined_section)
623 {
624 set_syntax_error (_("bad segment"));
625 *str = input_line_pointer;
626 input_line_pointer = save_in;
627 return false;
628 }
629 #else
630 (void) seg;
631 #endif
632
633 *str = input_line_pointer;
634 input_line_pointer = save_in;
635 return true;
636 }
637
638 /* Turn a string in input_line_pointer into a floating point constant
639 of type TYPE, and store the appropriate bytes in *LITP. The number
640 of LITTLENUMS emitted is stored in *SIZEP. An error message is
641 returned, or NULL on OK. */
642
643 const char *
644 md_atof (int type, char *litP, int *sizeP)
645 {
646 return ieee_md_atof (type, litP, sizeP, target_big_endian);
647 }
648
649 /* We handle all bad expressions here, so that we can report the faulty
650 instruction in the error message. */
651 void
652 md_operand (expressionS * exp)
653 {
654 if (in_aarch64_get_expression)
655 exp->X_op = O_illegal;
656 }
657
658 /* Immediate values. */
659
660 /* Errors may be set multiple times during parsing or bit encoding
661 (particularly in the Neon bits), but usually the earliest error which is set
662 will be the most meaningful. Avoid overwriting it with later (cascading)
663 errors by calling this function. */
664
665 static void
666 first_error (const char *error)
667 {
668 if (! error_p ())
669 set_syntax_error (error);
670 }
671
672 /* Similar to first_error, but this function accepts formatted error
673 message. */
674 static void
675 first_error_fmt (const char *format, ...)
676 {
677 va_list args;
678 enum
679 { size = 100 };
680 /* N.B. this single buffer will not cause error messages for different
681 instructions to pollute each other; this is because at the end of
682 processing of each assembly line, error message if any will be
683 collected by as_bad. */
684 static char buffer[size];
685
686 if (! error_p ())
687 {
688 int ret ATTRIBUTE_UNUSED;
689 va_start (args, format);
690 ret = vsnprintf (buffer, size, format, args);
691 know (ret <= size - 1 && ret >= 0);
692 va_end (args);
693 set_syntax_error (buffer);
694 }
695 }
696
697 /* Internal helper routine converting a vector_type_el structure *VECTYPE
698 to a corresponding operand qualifier. */
699
700 static inline aarch64_opnd_qualifier_t
701 vectype_to_qualifier (const struct vector_type_el *vectype)
702 {
703 /* Element size in bytes indexed by vector_el_type. */
704 const unsigned char ele_size[5]
705 = {1, 2, 4, 8, 16};
706 const unsigned int ele_base [5] =
707 {
708 AARCH64_OPND_QLF_V_4B,
709 AARCH64_OPND_QLF_V_2H,
710 AARCH64_OPND_QLF_V_2S,
711 AARCH64_OPND_QLF_V_1D,
712 AARCH64_OPND_QLF_V_1Q
713 };
714
715 if (!vectype->defined || vectype->type == NT_invtype)
716 goto vectype_conversion_fail;
717
718 if (vectype->type == NT_zero)
719 return AARCH64_OPND_QLF_P_Z;
720 if (vectype->type == NT_merge)
721 return AARCH64_OPND_QLF_P_M;
722
723 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
724
725 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
726 {
727 /* Special case S_4B. */
728 if (vectype->type == NT_b && vectype->width == 4)
729 return AARCH64_OPND_QLF_S_4B;
730
731 /* Special case S_2H. */
732 if (vectype->type == NT_h && vectype->width == 2)
733 return AARCH64_OPND_QLF_S_2H;
734
735 /* Vector element register. */
736 return AARCH64_OPND_QLF_S_B + vectype->type;
737 }
738 else
739 {
740 /* Vector register. */
741 int reg_size = ele_size[vectype->type] * vectype->width;
742 unsigned offset;
743 unsigned shift;
744 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
745 goto vectype_conversion_fail;
746
747 /* The conversion is by calculating the offset from the base operand
748 qualifier for the vector type. The operand qualifiers are regular
749 enough that the offset can established by shifting the vector width by
750 a vector-type dependent amount. */
751 shift = 0;
752 if (vectype->type == NT_b)
753 shift = 3;
754 else if (vectype->type == NT_h || vectype->type == NT_s)
755 shift = 2;
756 else if (vectype->type >= NT_d)
757 shift = 1;
758 else
759 gas_assert (0);
760
761 offset = ele_base [vectype->type] + (vectype->width >> shift);
762 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
763 && offset <= AARCH64_OPND_QLF_V_1Q);
764 return offset;
765 }
766
767 vectype_conversion_fail:
768 first_error (_("bad vector arrangement type"));
769 return AARCH64_OPND_QLF_NIL;
770 }
771
772 /* Register parsing. */
773
774 /* Generic register parser which is called by other specialized
775 register parsers.
776 CCP points to what should be the beginning of a register name.
777 If it is indeed a valid register name, advance CCP over it and
778 return the reg_entry structure; otherwise return NULL.
779 It does not issue diagnostics. */
780
781 static reg_entry *
782 parse_reg (char **ccp)
783 {
784 char *start = *ccp;
785 char *p;
786 reg_entry *reg;
787
788 #ifdef REGISTER_PREFIX
789 if (*start != REGISTER_PREFIX)
790 return NULL;
791 start++;
792 #endif
793
794 p = start;
795 if (!ISALPHA (*p) || !is_name_beginner (*p))
796 return NULL;
797
798 do
799 p++;
800 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
801
802 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
803
804 if (!reg)
805 return NULL;
806
807 *ccp = p;
808 return reg;
809 }
810
811 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
812 return FALSE. */
813 static bool
814 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
815 {
816 return (reg_type_masks[type] & (1 << reg->type)) != 0;
817 }
818
819 /* Try to parse a base or offset register. Allow SVE base and offset
820 registers if REG_TYPE includes SVE registers. Return the register
821 entry on success, setting *QUALIFIER to the register qualifier.
822 Return null otherwise.
823
824 Note that this function does not issue any diagnostics. */
825
826 static const reg_entry *
827 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
828 aarch64_opnd_qualifier_t *qualifier)
829 {
830 char *str = *ccp;
831 const reg_entry *reg = parse_reg (&str);
832
833 if (reg == NULL)
834 return NULL;
835
836 switch (reg->type)
837 {
838 case REG_TYPE_R_32:
839 case REG_TYPE_SP_32:
840 case REG_TYPE_Z_32:
841 *qualifier = AARCH64_OPND_QLF_W;
842 break;
843
844 case REG_TYPE_R_64:
845 case REG_TYPE_SP_64:
846 case REG_TYPE_Z_64:
847 *qualifier = AARCH64_OPND_QLF_X;
848 break;
849
850 case REG_TYPE_ZN:
851 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
852 || str[0] != '.')
853 return NULL;
854 switch (TOLOWER (str[1]))
855 {
856 case 's':
857 *qualifier = AARCH64_OPND_QLF_S_S;
858 break;
859 case 'd':
860 *qualifier = AARCH64_OPND_QLF_S_D;
861 break;
862 default:
863 return NULL;
864 }
865 str += 2;
866 break;
867
868 default:
869 return NULL;
870 }
871
872 *ccp = str;
873
874 return reg;
875 }
876
877 /* Try to parse a base or offset register. Return the register entry
878 on success, setting *QUALIFIER to the register qualifier. Return null
879 otherwise.
880
881 Note that this function does not issue any diagnostics. */
882
883 static const reg_entry *
884 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
885 {
886 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
887 }
888
889 /* Parse the qualifier of a vector register or vector element of type
890 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
891 succeeds; otherwise return FALSE.
892
893 Accept only one occurrence of:
894 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
895 b h s d q */
896 static bool
897 parse_vector_type_for_operand (aarch64_reg_type reg_type,
898 struct vector_type_el *parsed_type, char **str)
899 {
900 char *ptr = *str;
901 unsigned width;
902 unsigned element_size;
903 enum vector_el_type type;
904
905 /* skip '.' */
906 gas_assert (*ptr == '.');
907 ptr++;
908
909 if (reg_type != REG_TYPE_VN || !ISDIGIT (*ptr))
910 {
911 width = 0;
912 goto elt_size;
913 }
914 width = strtoul (ptr, &ptr, 10);
915 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
916 {
917 first_error_fmt (_("bad size %d in vector width specifier"), width);
918 return false;
919 }
920
921 elt_size:
922 switch (TOLOWER (*ptr))
923 {
924 case 'b':
925 type = NT_b;
926 element_size = 8;
927 break;
928 case 'h':
929 type = NT_h;
930 element_size = 16;
931 break;
932 case 's':
933 type = NT_s;
934 element_size = 32;
935 break;
936 case 'd':
937 type = NT_d;
938 element_size = 64;
939 break;
940 case 'q':
941 if (reg_type != REG_TYPE_VN || width == 1)
942 {
943 type = NT_q;
944 element_size = 128;
945 break;
946 }
947 /* fall through. */
948 default:
949 if (*ptr != '\0')
950 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
951 else
952 first_error (_("missing element size"));
953 return false;
954 }
955 if (width != 0 && width * element_size != 64
956 && width * element_size != 128
957 && !(width == 2 && element_size == 16)
958 && !(width == 4 && element_size == 8))
959 {
960 first_error_fmt (_
961 ("invalid element size %d and vector size combination %c"),
962 width, *ptr);
963 return false;
964 }
965 ptr++;
966
967 parsed_type->type = type;
968 parsed_type->width = width;
969
970 *str = ptr;
971
972 return true;
973 }
974
975 /* *STR contains an SVE zero/merge predication suffix. Parse it into
976 *PARSED_TYPE and point *STR at the end of the suffix. */
977
978 static bool
979 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
980 {
981 char *ptr = *str;
982
983 /* Skip '/'. */
984 gas_assert (*ptr == '/');
985 ptr++;
986 switch (TOLOWER (*ptr))
987 {
988 case 'z':
989 parsed_type->type = NT_zero;
990 break;
991 case 'm':
992 parsed_type->type = NT_merge;
993 break;
994 default:
995 if (*ptr != '\0' && *ptr != ',')
996 first_error_fmt (_("unexpected character `%c' in predication type"),
997 *ptr);
998 else
999 first_error (_("missing predication type"));
1000 return false;
1001 }
1002 parsed_type->width = 0;
1003 *str = ptr + 1;
1004 return true;
1005 }
1006
1007 /* Return true if CH is a valid suffix character for registers of
1008 type TYPE. */
1009
1010 static bool
1011 aarch64_valid_suffix_char_p (aarch64_reg_type type, char ch)
1012 {
1013 switch (type)
1014 {
1015 case REG_TYPE_VN:
1016 case REG_TYPE_ZN:
1017 case REG_TYPE_ZAT:
1018 case REG_TYPE_ZATH:
1019 case REG_TYPE_ZATV:
1020 return ch == '.';
1021
1022 case REG_TYPE_PN:
1023 return ch == '.' || ch == '/';
1024
1025 default:
1026 return false;
1027 }
1028 }
1029
1030 /* Parse a register of the type TYPE.
1031
1032 Return null if the string pointed to by *CCP is not a valid register
1033 name or the parsed register is not of TYPE.
1034
1035 Otherwise return the register, and optionally return the register
1036 shape and element index information in *TYPEINFO.
1037
1038 FLAGS includes PTR_IN_REGLIST if the caller is parsing a register list.
1039
1040 FLAGS includes PTR_FULL_REG if the function should ignore any potential
1041 register index. */
1042
1043 #define PTR_IN_REGLIST (1U << 0)
1044 #define PTR_FULL_REG (1U << 1)
1045
1046 static const reg_entry *
1047 parse_typed_reg (char **ccp, aarch64_reg_type type,
1048 struct vector_type_el *typeinfo, unsigned int flags)
1049 {
1050 char *str = *ccp;
1051 const reg_entry *reg = parse_reg (&str);
1052 struct vector_type_el atype;
1053 struct vector_type_el parsetype;
1054 bool is_typed_vecreg = false;
1055
1056 atype.defined = 0;
1057 atype.type = NT_invtype;
1058 atype.width = -1;
1059 atype.index = 0;
1060
1061 if (reg == NULL)
1062 {
1063 if (typeinfo)
1064 *typeinfo = atype;
1065 set_default_error ();
1066 return NULL;
1067 }
1068
1069 if (! aarch64_check_reg_type (reg, type))
1070 {
1071 DEBUG_TRACE ("reg type check failed");
1072 set_default_error ();
1073 return NULL;
1074 }
1075 type = reg->type;
1076
1077 if (aarch64_valid_suffix_char_p (reg->type, *str))
1078 {
1079 if (*str == '.')
1080 {
1081 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1082 return NULL;
1083 }
1084 else
1085 {
1086 if (!parse_predication_for_operand (&parsetype, &str))
1087 return NULL;
1088 }
1089
1090 /* Register if of the form Vn.[bhsdq]. */
1091 is_typed_vecreg = true;
1092
1093 if (type != REG_TYPE_VN)
1094 {
1095 /* The width is always variable; we don't allow an integer width
1096 to be specified. */
1097 gas_assert (parsetype.width == 0);
1098 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1099 }
1100 else if (parsetype.width == 0)
1101 /* Expect index. In the new scheme we cannot have
1102 Vn.[bhsdq] represent a scalar. Therefore any
1103 Vn.[bhsdq] should have an index following it.
1104 Except in reglists of course. */
1105 atype.defined |= NTA_HASINDEX;
1106 else
1107 atype.defined |= NTA_HASTYPE;
1108
1109 atype.type = parsetype.type;
1110 atype.width = parsetype.width;
1111 }
1112
1113 if (!(flags & PTR_FULL_REG) && skip_past_char (&str, '['))
1114 {
1115 expressionS exp;
1116
1117 /* Reject Sn[index] syntax. */
1118 if (!is_typed_vecreg)
1119 {
1120 first_error (_("this type of register can't be indexed"));
1121 return NULL;
1122 }
1123
1124 if (flags & PTR_IN_REGLIST)
1125 {
1126 first_error (_("index not allowed inside register list"));
1127 return NULL;
1128 }
1129
1130 atype.defined |= NTA_HASINDEX;
1131
1132 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT);
1133
1134 if (exp.X_op != O_constant)
1135 {
1136 first_error (_("constant expression required"));
1137 return NULL;
1138 }
1139
1140 if (! skip_past_char (&str, ']'))
1141 return NULL;
1142
1143 atype.index = exp.X_add_number;
1144 }
1145 else if (!(flags & PTR_IN_REGLIST) && (atype.defined & NTA_HASINDEX) != 0)
1146 {
1147 /* Indexed vector register expected. */
1148 first_error (_("indexed vector register expected"));
1149 return NULL;
1150 }
1151
1152 /* A vector reg Vn should be typed or indexed. */
1153 if (type == REG_TYPE_VN && atype.defined == 0)
1154 {
1155 first_error (_("invalid use of vector register"));
1156 }
1157
1158 if (typeinfo)
1159 *typeinfo = atype;
1160
1161 *ccp = str;
1162
1163 return reg;
1164 }
1165
1166 /* Parse register.
1167
1168 Return the register on success; return null otherwise.
1169
1170 If this is a NEON vector register with additional type information, fill
1171 in the struct pointed to by VECTYPE (if non-NULL).
1172
1173 This parser does not handle register lists. */
1174
1175 static const reg_entry *
1176 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1177 struct vector_type_el *vectype)
1178 {
1179 return parse_typed_reg (ccp, type, vectype, 0);
1180 }
1181
1182 static inline bool
1183 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1184 {
1185 return
1186 e1.type == e2.type
1187 && e1.defined == e2.defined
1188 && e1.width == e2.width && e1.index == e2.index;
1189 }
1190
1191 /* This function parses a list of vector registers of type TYPE.
1192 On success, it returns the parsed register list information in the
1193 following encoded format:
1194
1195 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1196 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1197
1198 The information of the register shape and/or index is returned in
1199 *VECTYPE.
1200
1201 It returns PARSE_FAIL if the register list is invalid.
1202
1203 The list contains one to four registers.
1204 Each register can be one of:
1205 <Vt>.<T>[<index>]
1206 <Vt>.<T>
1207 All <T> should be identical.
1208 All <index> should be identical.
1209 There are restrictions on <Vt> numbers which are checked later
1210 (by reg_list_valid_p). */
1211
1212 static int
1213 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1214 struct vector_type_el *vectype)
1215 {
1216 char *str = *ccp;
1217 int nb_regs;
1218 struct vector_type_el typeinfo, typeinfo_first;
1219 int val, val_range;
1220 int in_range;
1221 int ret_val;
1222 int i;
1223 bool error = false;
1224 bool expect_index = false;
1225
1226 if (*str != '{')
1227 {
1228 set_syntax_error (_("expecting {"));
1229 return PARSE_FAIL;
1230 }
1231 str++;
1232
1233 nb_regs = 0;
1234 typeinfo_first.defined = 0;
1235 typeinfo_first.type = NT_invtype;
1236 typeinfo_first.width = -1;
1237 typeinfo_first.index = 0;
1238 ret_val = 0;
1239 val = -1;
1240 val_range = -1;
1241 in_range = 0;
1242 do
1243 {
1244 if (in_range)
1245 {
1246 str++; /* skip over '-' */
1247 val_range = val;
1248 }
1249 const reg_entry *reg = parse_typed_reg (&str, type, &typeinfo,
1250 PTR_IN_REGLIST);
1251 if (!reg)
1252 {
1253 set_first_syntax_error (_("invalid vector register in list"));
1254 error = true;
1255 continue;
1256 }
1257 val = reg->number;
1258 /* reject [bhsd]n */
1259 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1260 {
1261 set_first_syntax_error (_("invalid scalar register in list"));
1262 error = true;
1263 continue;
1264 }
1265
1266 if (typeinfo.defined & NTA_HASINDEX)
1267 expect_index = true;
1268
1269 if (in_range)
1270 {
1271 if (val < val_range)
1272 {
1273 set_first_syntax_error
1274 (_("invalid range in vector register list"));
1275 error = true;
1276 }
1277 val_range++;
1278 }
1279 else
1280 {
1281 val_range = val;
1282 if (nb_regs == 0)
1283 typeinfo_first = typeinfo;
1284 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1285 {
1286 set_first_syntax_error
1287 (_("type mismatch in vector register list"));
1288 error = true;
1289 }
1290 }
1291 if (! error)
1292 for (i = val_range; i <= val; i++)
1293 {
1294 ret_val |= i << (5 * nb_regs);
1295 nb_regs++;
1296 }
1297 in_range = 0;
1298 }
1299 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1300
1301 skip_whitespace (str);
1302 if (*str != '}')
1303 {
1304 set_first_syntax_error (_("end of vector register list not found"));
1305 error = true;
1306 }
1307 str++;
1308
1309 skip_whitespace (str);
1310
1311 if (expect_index)
1312 {
1313 if (skip_past_char (&str, '['))
1314 {
1315 expressionS exp;
1316
1317 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT);
1318 if (exp.X_op != O_constant)
1319 {
1320 set_first_syntax_error (_("constant expression required."));
1321 error = true;
1322 }
1323 if (! skip_past_char (&str, ']'))
1324 error = true;
1325 else
1326 typeinfo_first.index = exp.X_add_number;
1327 }
1328 else
1329 {
1330 set_first_syntax_error (_("expected index"));
1331 error = true;
1332 }
1333 }
1334
1335 if (nb_regs > 4)
1336 {
1337 set_first_syntax_error (_("too many registers in vector register list"));
1338 error = true;
1339 }
1340 else if (nb_regs == 0)
1341 {
1342 set_first_syntax_error (_("empty vector register list"));
1343 error = true;
1344 }
1345
1346 *ccp = str;
1347 if (! error)
1348 *vectype = typeinfo_first;
1349
1350 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1351 }
1352
1353 /* Directives: register aliases. */
1354
1355 static reg_entry *
1356 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1357 {
1358 reg_entry *new;
1359 const char *name;
1360
1361 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1362 {
1363 if (new->builtin)
1364 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1365 str);
1366
1367 /* Only warn about a redefinition if it's not defined as the
1368 same register. */
1369 else if (new->number != number || new->type != type)
1370 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1371
1372 return NULL;
1373 }
1374
1375 name = xstrdup (str);
1376 new = XNEW (reg_entry);
1377
1378 new->name = name;
1379 new->number = number;
1380 new->type = type;
1381 new->builtin = false;
1382
1383 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1384
1385 return new;
1386 }
1387
1388 /* Look for the .req directive. This is of the form:
1389
1390 new_register_name .req existing_register_name
1391
1392 If we find one, or if it looks sufficiently like one that we want to
1393 handle any error here, return TRUE. Otherwise return FALSE. */
1394
1395 static bool
1396 create_register_alias (char *newname, char *p)
1397 {
1398 const reg_entry *old;
1399 char *oldname, *nbuf;
1400 size_t nlen;
1401
1402 /* The input scrubber ensures that whitespace after the mnemonic is
1403 collapsed to single spaces. */
1404 oldname = p;
1405 if (!startswith (oldname, " .req "))
1406 return false;
1407
1408 oldname += 6;
1409 if (*oldname == '\0')
1410 return false;
1411
1412 old = str_hash_find (aarch64_reg_hsh, oldname);
1413 if (!old)
1414 {
1415 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1416 return true;
1417 }
1418
1419 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1420 the desired alias name, and p points to its end. If not, then
1421 the desired alias name is in the global original_case_string. */
1422 #ifdef TC_CASE_SENSITIVE
1423 nlen = p - newname;
1424 #else
1425 newname = original_case_string;
1426 nlen = strlen (newname);
1427 #endif
1428
1429 nbuf = xmemdup0 (newname, nlen);
1430
1431 /* Create aliases under the new name as stated; an all-lowercase
1432 version of the new name; and an all-uppercase version of the new
1433 name. */
1434 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1435 {
1436 for (p = nbuf; *p; p++)
1437 *p = TOUPPER (*p);
1438
1439 if (strncmp (nbuf, newname, nlen))
1440 {
1441 /* If this attempt to create an additional alias fails, do not bother
1442 trying to create the all-lower case alias. We will fail and issue
1443 a second, duplicate error message. This situation arises when the
1444 programmer does something like:
1445 foo .req r0
1446 Foo .req r1
1447 The second .req creates the "Foo" alias but then fails to create
1448 the artificial FOO alias because it has already been created by the
1449 first .req. */
1450 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1451 {
1452 free (nbuf);
1453 return true;
1454 }
1455 }
1456
1457 for (p = nbuf; *p; p++)
1458 *p = TOLOWER (*p);
1459
1460 if (strncmp (nbuf, newname, nlen))
1461 insert_reg_alias (nbuf, old->number, old->type);
1462 }
1463
1464 free (nbuf);
1465 return true;
1466 }
1467
1468 /* Should never be called, as .req goes between the alias and the
1469 register name, not at the beginning of the line. */
1470 static void
1471 s_req (int a ATTRIBUTE_UNUSED)
1472 {
1473 as_bad (_("invalid syntax for .req directive"));
1474 }
1475
1476 /* The .unreq directive deletes an alias which was previously defined
1477 by .req. For example:
1478
1479 my_alias .req r11
1480 .unreq my_alias */
1481
1482 static void
1483 s_unreq (int a ATTRIBUTE_UNUSED)
1484 {
1485 char *name;
1486 char saved_char;
1487
1488 name = input_line_pointer;
1489 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1490 saved_char = *input_line_pointer;
1491 *input_line_pointer = 0;
1492
1493 if (!*name)
1494 as_bad (_("invalid syntax for .unreq directive"));
1495 else
1496 {
1497 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1498
1499 if (!reg)
1500 as_bad (_("unknown register alias '%s'"), name);
1501 else if (reg->builtin)
1502 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1503 name);
1504 else
1505 {
1506 char *p;
1507 char *nbuf;
1508
1509 str_hash_delete (aarch64_reg_hsh, name);
1510 free ((char *) reg->name);
1511 free (reg);
1512
1513 /* Also locate the all upper case and all lower case versions.
1514 Do not complain if we cannot find one or the other as it
1515 was probably deleted above. */
1516
1517 nbuf = strdup (name);
1518 for (p = nbuf; *p; p++)
1519 *p = TOUPPER (*p);
1520 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1521 if (reg)
1522 {
1523 str_hash_delete (aarch64_reg_hsh, nbuf);
1524 free ((char *) reg->name);
1525 free (reg);
1526 }
1527
1528 for (p = nbuf; *p; p++)
1529 *p = TOLOWER (*p);
1530 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1531 if (reg)
1532 {
1533 str_hash_delete (aarch64_reg_hsh, nbuf);
1534 free ((char *) reg->name);
1535 free (reg);
1536 }
1537
1538 free (nbuf);
1539 }
1540 }
1541
1542 *input_line_pointer = saved_char;
1543 demand_empty_rest_of_line ();
1544 }
1545
1546 /* Directives: Instruction set selection. */
1547
1548 #if defined OBJ_ELF || defined OBJ_COFF
1549 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1550 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1551 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1552 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1553
1554 /* Create a new mapping symbol for the transition to STATE. */
1555
1556 static void
1557 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1558 {
1559 symbolS *symbolP;
1560 const char *symname;
1561 int type;
1562
1563 switch (state)
1564 {
1565 case MAP_DATA:
1566 symname = "$d";
1567 type = BSF_NO_FLAGS;
1568 break;
1569 case MAP_INSN:
1570 symname = "$x";
1571 type = BSF_NO_FLAGS;
1572 break;
1573 default:
1574 abort ();
1575 }
1576
1577 symbolP = symbol_new (symname, now_seg, frag, value);
1578 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1579
1580 /* Save the mapping symbols for future reference. Also check that
1581 we do not place two mapping symbols at the same offset within a
1582 frag. We'll handle overlap between frags in
1583 check_mapping_symbols.
1584
1585 If .fill or other data filling directive generates zero sized data,
1586 the mapping symbol for the following code will have the same value
1587 as the one generated for the data filling directive. In this case,
1588 we replace the old symbol with the new one at the same address. */
1589 if (value == 0)
1590 {
1591 if (frag->tc_frag_data.first_map != NULL)
1592 {
1593 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1594 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1595 &symbol_lastP);
1596 }
1597 frag->tc_frag_data.first_map = symbolP;
1598 }
1599 if (frag->tc_frag_data.last_map != NULL)
1600 {
1601 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1602 S_GET_VALUE (symbolP));
1603 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1604 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1605 &symbol_lastP);
1606 }
1607 frag->tc_frag_data.last_map = symbolP;
1608 }
1609
1610 /* We must sometimes convert a region marked as code to data during
1611 code alignment, if an odd number of bytes have to be padded. The
1612 code mapping symbol is pushed to an aligned address. */
1613
1614 static void
1615 insert_data_mapping_symbol (enum mstate state,
1616 valueT value, fragS * frag, offsetT bytes)
1617 {
1618 /* If there was already a mapping symbol, remove it. */
1619 if (frag->tc_frag_data.last_map != NULL
1620 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1621 frag->fr_address + value)
1622 {
1623 symbolS *symp = frag->tc_frag_data.last_map;
1624
1625 if (value == 0)
1626 {
1627 know (frag->tc_frag_data.first_map == symp);
1628 frag->tc_frag_data.first_map = NULL;
1629 }
1630 frag->tc_frag_data.last_map = NULL;
1631 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1632 }
1633
1634 make_mapping_symbol (MAP_DATA, value, frag);
1635 make_mapping_symbol (state, value + bytes, frag);
1636 }
1637
1638 static void mapping_state_2 (enum mstate state, int max_chars);
1639
1640 /* Set the mapping state to STATE. Only call this when about to
1641 emit some STATE bytes to the file. */
1642
1643 void
1644 mapping_state (enum mstate state)
1645 {
1646 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1647
1648 if (state == MAP_INSN)
1649 /* AArch64 instructions require 4-byte alignment. When emitting
1650 instructions into any section, record the appropriate section
1651 alignment. */
1652 record_alignment (now_seg, 2);
1653
1654 if (mapstate == state)
1655 /* The mapping symbol has already been emitted.
1656 There is nothing else to do. */
1657 return;
1658
1659 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1660 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1661 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1662 evaluated later in the next else. */
1663 return;
1664 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1665 {
1666 /* Only add the symbol if the offset is > 0:
1667 if we're at the first frag, check it's size > 0;
1668 if we're not at the first frag, then for sure
1669 the offset is > 0. */
1670 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1671 const int add_symbol = (frag_now != frag_first)
1672 || (frag_now_fix () > 0);
1673
1674 if (add_symbol)
1675 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1676 }
1677 #undef TRANSITION
1678
1679 mapping_state_2 (state, 0);
1680 }
1681
1682 /* Same as mapping_state, but MAX_CHARS bytes have already been
1683 allocated. Put the mapping symbol that far back. */
1684
1685 static void
1686 mapping_state_2 (enum mstate state, int max_chars)
1687 {
1688 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1689
1690 if (!SEG_NORMAL (now_seg))
1691 return;
1692
1693 if (mapstate == state)
1694 /* The mapping symbol has already been emitted.
1695 There is nothing else to do. */
1696 return;
1697
1698 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1699 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1700 }
1701 #else
1702 #define mapping_state(x) /* nothing */
1703 #define mapping_state_2(x, y) /* nothing */
1704 #endif
1705
1706 /* Directives: sectioning and alignment. */
1707
1708 static void
1709 s_bss (int ignore ATTRIBUTE_UNUSED)
1710 {
1711 /* We don't support putting frags in the BSS segment, we fake it by
1712 marking in_bss, then looking at s_skip for clues. */
1713 subseg_set (bss_section, 0);
1714 demand_empty_rest_of_line ();
1715 mapping_state (MAP_DATA);
1716 }
1717
1718 static void
1719 s_even (int ignore ATTRIBUTE_UNUSED)
1720 {
1721 /* Never make frag if expect extra pass. */
1722 if (!need_pass_2)
1723 frag_align (1, 0, 0);
1724
1725 record_alignment (now_seg, 1);
1726
1727 demand_empty_rest_of_line ();
1728 }
1729
1730 /* Directives: Literal pools. */
1731
1732 static literal_pool *
1733 find_literal_pool (int size)
1734 {
1735 literal_pool *pool;
1736
1737 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1738 {
1739 if (pool->section == now_seg
1740 && pool->sub_section == now_subseg && pool->size == size)
1741 break;
1742 }
1743
1744 return pool;
1745 }
1746
1747 static literal_pool *
1748 find_or_make_literal_pool (int size)
1749 {
1750 /* Next literal pool ID number. */
1751 static unsigned int latest_pool_num = 1;
1752 literal_pool *pool;
1753
1754 pool = find_literal_pool (size);
1755
1756 if (pool == NULL)
1757 {
1758 /* Create a new pool. */
1759 pool = XNEW (literal_pool);
1760 if (!pool)
1761 return NULL;
1762
1763 /* Currently we always put the literal pool in the current text
1764 section. If we were generating "small" model code where we
1765 knew that all code and initialised data was within 1MB then
1766 we could output literals to mergeable, read-only data
1767 sections. */
1768
1769 pool->next_free_entry = 0;
1770 pool->section = now_seg;
1771 pool->sub_section = now_subseg;
1772 pool->size = size;
1773 pool->next = list_of_pools;
1774 pool->symbol = NULL;
1775
1776 /* Add it to the list. */
1777 list_of_pools = pool;
1778 }
1779
1780 /* New pools, and emptied pools, will have a NULL symbol. */
1781 if (pool->symbol == NULL)
1782 {
1783 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1784 &zero_address_frag, 0);
1785 pool->id = latest_pool_num++;
1786 }
1787
1788 /* Done. */
1789 return pool;
1790 }
1791
1792 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1793 Return TRUE on success, otherwise return FALSE. */
1794 static bool
1795 add_to_lit_pool (expressionS *exp, int size)
1796 {
1797 literal_pool *pool;
1798 unsigned int entry;
1799
1800 pool = find_or_make_literal_pool (size);
1801
1802 /* Check if this literal value is already in the pool. */
1803 for (entry = 0; entry < pool->next_free_entry; entry++)
1804 {
1805 expressionS * litexp = & pool->literals[entry].exp;
1806
1807 if ((litexp->X_op == exp->X_op)
1808 && (exp->X_op == O_constant)
1809 && (litexp->X_add_number == exp->X_add_number)
1810 && (litexp->X_unsigned == exp->X_unsigned))
1811 break;
1812
1813 if ((litexp->X_op == exp->X_op)
1814 && (exp->X_op == O_symbol)
1815 && (litexp->X_add_number == exp->X_add_number)
1816 && (litexp->X_add_symbol == exp->X_add_symbol)
1817 && (litexp->X_op_symbol == exp->X_op_symbol))
1818 break;
1819 }
1820
1821 /* Do we need to create a new entry? */
1822 if (entry == pool->next_free_entry)
1823 {
1824 if (entry >= MAX_LITERAL_POOL_SIZE)
1825 {
1826 set_syntax_error (_("literal pool overflow"));
1827 return false;
1828 }
1829
1830 pool->literals[entry].exp = *exp;
1831 pool->next_free_entry += 1;
1832 if (exp->X_op == O_big)
1833 {
1834 /* PR 16688: Bignums are held in a single global array. We must
1835 copy and preserve that value now, before it is overwritten. */
1836 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1837 exp->X_add_number);
1838 memcpy (pool->literals[entry].bignum, generic_bignum,
1839 CHARS_PER_LITTLENUM * exp->X_add_number);
1840 }
1841 else
1842 pool->literals[entry].bignum = NULL;
1843 }
1844
1845 exp->X_op = O_symbol;
1846 exp->X_add_number = ((int) entry) * size;
1847 exp->X_add_symbol = pool->symbol;
1848
1849 return true;
1850 }
1851
1852 /* Can't use symbol_new here, so have to create a symbol and then at
1853 a later date assign it a value. That's what these functions do. */
1854
1855 static void
1856 symbol_locate (symbolS * symbolP,
1857 const char *name,/* It is copied, the caller can modify. */
1858 segT segment, /* Segment identifier (SEG_<something>). */
1859 valueT valu, /* Symbol value. */
1860 fragS * frag) /* Associated fragment. */
1861 {
1862 size_t name_length;
1863 char *preserved_copy_of_name;
1864
1865 name_length = strlen (name) + 1; /* +1 for \0. */
1866 obstack_grow (&notes, name, name_length);
1867 preserved_copy_of_name = obstack_finish (&notes);
1868
1869 #ifdef tc_canonicalize_symbol_name
1870 preserved_copy_of_name =
1871 tc_canonicalize_symbol_name (preserved_copy_of_name);
1872 #endif
1873
1874 S_SET_NAME (symbolP, preserved_copy_of_name);
1875
1876 S_SET_SEGMENT (symbolP, segment);
1877 S_SET_VALUE (symbolP, valu);
1878 symbol_clear_list_pointers (symbolP);
1879
1880 symbol_set_frag (symbolP, frag);
1881
1882 /* Link to end of symbol chain. */
1883 {
1884 extern int symbol_table_frozen;
1885
1886 if (symbol_table_frozen)
1887 abort ();
1888 }
1889
1890 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1891
1892 obj_symbol_new_hook (symbolP);
1893
1894 #ifdef tc_symbol_new_hook
1895 tc_symbol_new_hook (symbolP);
1896 #endif
1897
1898 #ifdef DEBUG_SYMS
1899 verify_symbol_chain (symbol_rootP, symbol_lastP);
1900 #endif /* DEBUG_SYMS */
1901 }
1902
1903
1904 static void
1905 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1906 {
1907 unsigned int entry;
1908 literal_pool *pool;
1909 char sym_name[20];
1910 int align;
1911
1912 for (align = 2; align <= 4; align++)
1913 {
1914 int size = 1 << align;
1915
1916 pool = find_literal_pool (size);
1917 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1918 continue;
1919
1920 /* Align pool as you have word accesses.
1921 Only make a frag if we have to. */
1922 if (!need_pass_2)
1923 frag_align (align, 0, 0);
1924
1925 mapping_state (MAP_DATA);
1926
1927 record_alignment (now_seg, align);
1928
1929 sprintf (sym_name, "$$lit_\002%x", pool->id);
1930
1931 symbol_locate (pool->symbol, sym_name, now_seg,
1932 (valueT) frag_now_fix (), frag_now);
1933 symbol_table_insert (pool->symbol);
1934
1935 for (entry = 0; entry < pool->next_free_entry; entry++)
1936 {
1937 expressionS * exp = & pool->literals[entry].exp;
1938
1939 if (exp->X_op == O_big)
1940 {
1941 /* PR 16688: Restore the global bignum value. */
1942 gas_assert (pool->literals[entry].bignum != NULL);
1943 memcpy (generic_bignum, pool->literals[entry].bignum,
1944 CHARS_PER_LITTLENUM * exp->X_add_number);
1945 }
1946
1947 /* First output the expression in the instruction to the pool. */
1948 emit_expr (exp, size); /* .word|.xword */
1949
1950 if (exp->X_op == O_big)
1951 {
1952 free (pool->literals[entry].bignum);
1953 pool->literals[entry].bignum = NULL;
1954 }
1955 }
1956
1957 /* Mark the pool as empty. */
1958 pool->next_free_entry = 0;
1959 pool->symbol = NULL;
1960 }
1961 }
1962
1963 #if defined(OBJ_ELF) || defined(OBJ_COFF)
1964 /* Forward declarations for functions below, in the MD interface
1965 section. */
1966 static struct reloc_table_entry * find_reloc_table_entry (char **);
1967
1968 /* Directives: Data. */
1969 /* N.B. the support for relocation suffix in this directive needs to be
1970 implemented properly. */
1971
1972 static void
1973 s_aarch64_cons (int nbytes)
1974 {
1975 expressionS exp;
1976
1977 #ifdef md_flush_pending_output
1978 md_flush_pending_output ();
1979 #endif
1980
1981 if (is_it_end_of_statement ())
1982 {
1983 demand_empty_rest_of_line ();
1984 return;
1985 }
1986
1987 #ifdef md_cons_align
1988 md_cons_align (nbytes);
1989 #endif
1990
1991 mapping_state (MAP_DATA);
1992 do
1993 {
1994 struct reloc_table_entry *reloc;
1995
1996 expression (&exp);
1997
1998 if (exp.X_op != O_symbol)
1999 emit_expr (&exp, (unsigned int) nbytes);
2000 else
2001 {
2002 skip_past_char (&input_line_pointer, '#');
2003 if (skip_past_char (&input_line_pointer, ':'))
2004 {
2005 reloc = find_reloc_table_entry (&input_line_pointer);
2006 if (reloc == NULL)
2007 as_bad (_("unrecognized relocation suffix"));
2008 else
2009 as_bad (_("unimplemented relocation suffix"));
2010 ignore_rest_of_line ();
2011 return;
2012 }
2013 else
2014 emit_expr (&exp, (unsigned int) nbytes);
2015 }
2016 }
2017 while (*input_line_pointer++ == ',');
2018
2019 /* Put terminator back into stream. */
2020 input_line_pointer--;
2021 demand_empty_rest_of_line ();
2022 }
2023 #endif
2024
2025 #ifdef OBJ_ELF
2026 /* Forward declarations for functions below, in the MD interface
2027 section. */
2028 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
2029
2030 /* Mark symbol that it follows a variant PCS convention. */
2031
2032 static void
2033 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2034 {
2035 char *name;
2036 char c;
2037 symbolS *sym;
2038 asymbol *bfdsym;
2039 elf_symbol_type *elfsym;
2040
2041 c = get_symbol_name (&name);
2042 if (!*name)
2043 as_bad (_("Missing symbol name in directive"));
2044 sym = symbol_find_or_make (name);
2045 restore_line_pointer (c);
2046 demand_empty_rest_of_line ();
2047 bfdsym = symbol_get_bfdsym (sym);
2048 elfsym = elf_symbol_from (bfdsym);
2049 gas_assert (elfsym);
2050 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2051 }
2052 #endif /* OBJ_ELF */
2053
2054 /* Output a 32-bit word, but mark as an instruction. */
2055
2056 static void
2057 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2058 {
2059 expressionS exp;
2060 unsigned n = 0;
2061
2062 #ifdef md_flush_pending_output
2063 md_flush_pending_output ();
2064 #endif
2065
2066 if (is_it_end_of_statement ())
2067 {
2068 demand_empty_rest_of_line ();
2069 return;
2070 }
2071
2072 /* Sections are assumed to start aligned. In executable section, there is no
2073 MAP_DATA symbol pending. So we only align the address during
2074 MAP_DATA --> MAP_INSN transition.
2075 For other sections, this is not guaranteed. */
2076 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2077 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2078 frag_align_code (2, 0);
2079
2080 #ifdef OBJ_ELF
2081 mapping_state (MAP_INSN);
2082 #endif
2083
2084 do
2085 {
2086 expression (&exp);
2087 if (exp.X_op != O_constant)
2088 {
2089 as_bad (_("constant expression required"));
2090 ignore_rest_of_line ();
2091 return;
2092 }
2093
2094 if (target_big_endian)
2095 {
2096 unsigned int val = exp.X_add_number;
2097 exp.X_add_number = SWAP_32 (val);
2098 }
2099 emit_expr (&exp, INSN_SIZE);
2100 ++n;
2101 }
2102 while (*input_line_pointer++ == ',');
2103
2104 dwarf2_emit_insn (n * INSN_SIZE);
2105
2106 /* Put terminator back into stream. */
2107 input_line_pointer--;
2108 demand_empty_rest_of_line ();
2109 }
2110
2111 static void
2112 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2113 {
2114 demand_empty_rest_of_line ();
2115 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2116 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2117 }
2118
2119 #ifdef OBJ_ELF
2120 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2121
2122 static void
2123 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2124 {
2125 expressionS exp;
2126
2127 expression (&exp);
2128 frag_grow (4);
2129 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2130 BFD_RELOC_AARCH64_TLSDESC_ADD);
2131
2132 demand_empty_rest_of_line ();
2133 }
2134
2135 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2136
2137 static void
2138 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2139 {
2140 expressionS exp;
2141
2142 /* Since we're just labelling the code, there's no need to define a
2143 mapping symbol. */
2144 expression (&exp);
2145 /* Make sure there is enough room in this frag for the following
2146 blr. This trick only works if the blr follows immediately after
2147 the .tlsdesc directive. */
2148 frag_grow (4);
2149 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2150 BFD_RELOC_AARCH64_TLSDESC_CALL);
2151
2152 demand_empty_rest_of_line ();
2153 }
2154
2155 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2156
2157 static void
2158 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2159 {
2160 expressionS exp;
2161
2162 expression (&exp);
2163 frag_grow (4);
2164 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2165 BFD_RELOC_AARCH64_TLSDESC_LDR);
2166
2167 demand_empty_rest_of_line ();
2168 }
2169 #endif /* OBJ_ELF */
2170
2171 #ifdef TE_PE
2172 static void
2173 s_secrel (int dummy ATTRIBUTE_UNUSED)
2174 {
2175 expressionS exp;
2176
2177 do
2178 {
2179 expression (&exp);
2180 if (exp.X_op == O_symbol)
2181 exp.X_op = O_secrel;
2182
2183 emit_expr (&exp, 4);
2184 }
2185 while (*input_line_pointer++ == ',');
2186
2187 input_line_pointer--;
2188 demand_empty_rest_of_line ();
2189 }
2190
2191 void
2192 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
2193 {
2194 expressionS exp;
2195
2196 exp.X_op = O_secrel;
2197 exp.X_add_symbol = symbol;
2198 exp.X_add_number = 0;
2199 emit_expr (&exp, size);
2200 }
2201
2202 static void
2203 s_secidx (int dummy ATTRIBUTE_UNUSED)
2204 {
2205 expressionS exp;
2206
2207 do
2208 {
2209 expression (&exp);
2210 if (exp.X_op == O_symbol)
2211 exp.X_op = O_secidx;
2212
2213 emit_expr (&exp, 2);
2214 }
2215 while (*input_line_pointer++ == ',');
2216
2217 input_line_pointer--;
2218 demand_empty_rest_of_line ();
2219 }
2220 #endif /* TE_PE */
2221
2222 static void s_aarch64_arch (int);
2223 static void s_aarch64_cpu (int);
2224 static void s_aarch64_arch_extension (int);
2225
2226 /* This table describes all the machine specific pseudo-ops the assembler
2227 has to support. The fields are:
2228 pseudo-op name without dot
2229 function to call to execute this pseudo-op
2230 Integer arg to pass to the function. */
2231
2232 const pseudo_typeS md_pseudo_table[] = {
2233 /* Never called because '.req' does not start a line. */
2234 {"req", s_req, 0},
2235 {"unreq", s_unreq, 0},
2236 {"bss", s_bss, 0},
2237 {"even", s_even, 0},
2238 {"ltorg", s_ltorg, 0},
2239 {"pool", s_ltorg, 0},
2240 {"cpu", s_aarch64_cpu, 0},
2241 {"arch", s_aarch64_arch, 0},
2242 {"arch_extension", s_aarch64_arch_extension, 0},
2243 {"inst", s_aarch64_inst, 0},
2244 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2245 #ifdef OBJ_ELF
2246 {"tlsdescadd", s_tlsdescadd, 0},
2247 {"tlsdesccall", s_tlsdesccall, 0},
2248 {"tlsdescldr", s_tlsdescldr, 0},
2249 {"variant_pcs", s_variant_pcs, 0},
2250 #endif
2251 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2252 {"word", s_aarch64_cons, 4},
2253 {"long", s_aarch64_cons, 4},
2254 {"xword", s_aarch64_cons, 8},
2255 {"dword", s_aarch64_cons, 8},
2256 #endif
2257 #ifdef TE_PE
2258 {"secrel32", s_secrel, 0},
2259 {"secidx", s_secidx, 0},
2260 #endif
2261 {"float16", float_cons, 'h'},
2262 {"bfloat16", float_cons, 'b'},
2263 {0, 0, 0}
2264 };
2265 \f
2266
2267 /* Check whether STR points to a register name followed by a comma or the
2268 end of line; REG_TYPE indicates which register types are checked
2269 against. Return TRUE if STR is such a register name; otherwise return
2270 FALSE. The function does not intend to produce any diagnostics, but since
2271 the register parser aarch64_reg_parse, which is called by this function,
2272 does produce diagnostics, we call clear_error to clear any diagnostics
2273 that may be generated by aarch64_reg_parse.
2274 Also, the function returns FALSE directly if there is any user error
2275 present at the function entry. This prevents the existing diagnostics
2276 state from being spoiled.
2277 The function currently serves parse_constant_immediate and
2278 parse_big_immediate only. */
2279 static bool
2280 reg_name_p (char *str, aarch64_reg_type reg_type)
2281 {
2282 const reg_entry *reg;
2283
2284 /* Prevent the diagnostics state from being spoiled. */
2285 if (error_p ())
2286 return false;
2287
2288 reg = aarch64_reg_parse (&str, reg_type, NULL);
2289
2290 /* Clear the parsing error that may be set by the reg parser. */
2291 clear_error ();
2292
2293 if (!reg)
2294 return false;
2295
2296 skip_whitespace (str);
2297 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2298 return true;
2299
2300 return false;
2301 }
2302
2303 /* Parser functions used exclusively in instruction operands. */
2304
2305 /* Parse an immediate expression which may not be constant.
2306
2307 To prevent the expression parser from pushing a register name
2308 into the symbol table as an undefined symbol, firstly a check is
2309 done to find out whether STR is a register of type REG_TYPE followed
2310 by a comma or the end of line. Return FALSE if STR is such a string. */
2311
2312 static bool
2313 parse_immediate_expression (char **str, expressionS *exp,
2314 aarch64_reg_type reg_type)
2315 {
2316 if (reg_name_p (*str, reg_type))
2317 {
2318 set_recoverable_error (_("immediate operand required"));
2319 return false;
2320 }
2321
2322 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2323
2324 if (exp->X_op == O_absent)
2325 {
2326 set_fatal_syntax_error (_("missing immediate expression"));
2327 return false;
2328 }
2329
2330 return true;
2331 }
2332
2333 /* Constant immediate-value read function for use in insn parsing.
2334 STR points to the beginning of the immediate (with the optional
2335 leading #); *VAL receives the value. REG_TYPE says which register
2336 names should be treated as registers rather than as symbolic immediates.
2337
2338 Return TRUE on success; otherwise return FALSE. */
2339
2340 static bool
2341 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2342 {
2343 expressionS exp;
2344
2345 if (! parse_immediate_expression (str, &exp, reg_type))
2346 return false;
2347
2348 if (exp.X_op != O_constant)
2349 {
2350 set_syntax_error (_("constant expression required"));
2351 return false;
2352 }
2353
2354 *val = exp.X_add_number;
2355 return true;
2356 }
2357
2358 static uint32_t
2359 encode_imm_float_bits (uint32_t imm)
2360 {
2361 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2362 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2363 }
2364
2365 /* Return TRUE if the single-precision floating-point value encoded in IMM
2366 can be expressed in the AArch64 8-bit signed floating-point format with
2367 3-bit exponent and normalized 4 bits of precision; in other words, the
2368 floating-point value must be expressable as
2369 (+/-) n / 16 * power (2, r)
2370 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2371
2372 static bool
2373 aarch64_imm_float_p (uint32_t imm)
2374 {
2375 /* If a single-precision floating-point value has the following bit
2376 pattern, it can be expressed in the AArch64 8-bit floating-point
2377 format:
2378
2379 3 32222222 2221111111111
2380 1 09876543 21098765432109876543210
2381 n Eeeeeexx xxxx0000000000000000000
2382
2383 where n, e and each x are either 0 or 1 independently, with
2384 E == ~ e. */
2385
2386 uint32_t pattern;
2387
2388 /* Prepare the pattern for 'Eeeeee'. */
2389 if (((imm >> 30) & 0x1) == 0)
2390 pattern = 0x3e000000;
2391 else
2392 pattern = 0x40000000;
2393
2394 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2395 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2396 }
2397
2398 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2399 as an IEEE float without any loss of precision. Store the value in
2400 *FPWORD if so. */
2401
2402 static bool
2403 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2404 {
2405 /* If a double-precision floating-point value has the following bit
2406 pattern, it can be expressed in a float:
2407
2408 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2409 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2410 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2411
2412 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2413 if Eeee_eeee != 1111_1111
2414
2415 where n, e, s and S are either 0 or 1 independently and where ~ is the
2416 inverse of E. */
2417
2418 uint32_t pattern;
2419 uint32_t high32 = imm >> 32;
2420 uint32_t low32 = imm;
2421
2422 /* Lower 29 bits need to be 0s. */
2423 if ((imm & 0x1fffffff) != 0)
2424 return false;
2425
2426 /* Prepare the pattern for 'Eeeeeeeee'. */
2427 if (((high32 >> 30) & 0x1) == 0)
2428 pattern = 0x38000000;
2429 else
2430 pattern = 0x40000000;
2431
2432 /* Check E~~~. */
2433 if ((high32 & 0x78000000) != pattern)
2434 return false;
2435
2436 /* Check Eeee_eeee != 1111_1111. */
2437 if ((high32 & 0x7ff00000) == 0x47f00000)
2438 return false;
2439
2440 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2441 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2442 | (low32 >> 29)); /* 3 S bits. */
2443 return true;
2444 }
2445
2446 /* Return true if we should treat OPERAND as a double-precision
2447 floating-point operand rather than a single-precision one. */
2448 static bool
2449 double_precision_operand_p (const aarch64_opnd_info *operand)
2450 {
2451 /* Check for unsuffixed SVE registers, which are allowed
2452 for LDR and STR but not in instructions that require an
2453 immediate. We get better error messages if we arbitrarily
2454 pick one size, parse the immediate normally, and then
2455 report the match failure in the normal way. */
2456 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2457 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2458 }
2459
2460 /* Parse a floating-point immediate. Return TRUE on success and return the
2461 value in *IMMED in the format of IEEE754 single-precision encoding.
2462 *CCP points to the start of the string; DP_P is TRUE when the immediate
2463 is expected to be in double-precision (N.B. this only matters when
2464 hexadecimal representation is involved). REG_TYPE says which register
2465 names should be treated as registers rather than as symbolic immediates.
2466
2467 This routine accepts any IEEE float; it is up to the callers to reject
2468 invalid ones. */
2469
2470 static bool
2471 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2472 aarch64_reg_type reg_type)
2473 {
2474 char *str = *ccp;
2475 char *fpnum;
2476 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2477 int64_t val = 0;
2478 unsigned fpword = 0;
2479 bool hex_p = false;
2480
2481 skip_past_char (&str, '#');
2482
2483 fpnum = str;
2484 skip_whitespace (fpnum);
2485
2486 if (startswith (fpnum, "0x"))
2487 {
2488 /* Support the hexadecimal representation of the IEEE754 encoding.
2489 Double-precision is expected when DP_P is TRUE, otherwise the
2490 representation should be in single-precision. */
2491 if (! parse_constant_immediate (&str, &val, reg_type))
2492 goto invalid_fp;
2493
2494 if (dp_p)
2495 {
2496 if (!can_convert_double_to_float (val, &fpword))
2497 goto invalid_fp;
2498 }
2499 else if ((uint64_t) val > 0xffffffff)
2500 goto invalid_fp;
2501 else
2502 fpword = val;
2503
2504 hex_p = true;
2505 }
2506 else if (reg_name_p (str, reg_type))
2507 {
2508 set_recoverable_error (_("immediate operand required"));
2509 return false;
2510 }
2511
2512 if (! hex_p)
2513 {
2514 int i;
2515
2516 if ((str = atof_ieee (str, 's', words)) == NULL)
2517 goto invalid_fp;
2518
2519 /* Our FP word must be 32 bits (single-precision FP). */
2520 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2521 {
2522 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2523 fpword |= words[i];
2524 }
2525 }
2526
2527 *immed = fpword;
2528 *ccp = str;
2529 return true;
2530
2531 invalid_fp:
2532 set_fatal_syntax_error (_("invalid floating-point constant"));
2533 return false;
2534 }
2535
2536 /* Less-generic immediate-value read function with the possibility of loading
2537 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2538 instructions.
2539
2540 To prevent the expression parser from pushing a register name into the
2541 symbol table as an undefined symbol, a check is firstly done to find
2542 out whether STR is a register of type REG_TYPE followed by a comma or
2543 the end of line. Return FALSE if STR is such a register. */
2544
2545 static bool
2546 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2547 {
2548 char *ptr = *str;
2549
2550 if (reg_name_p (ptr, reg_type))
2551 {
2552 set_syntax_error (_("immediate operand required"));
2553 return false;
2554 }
2555
2556 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2557
2558 if (inst.reloc.exp.X_op == O_constant)
2559 *imm = inst.reloc.exp.X_add_number;
2560
2561 *str = ptr;
2562
2563 return true;
2564 }
2565
2566 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2567 if NEED_LIBOPCODES is non-zero, the fixup will need
2568 assistance from the libopcodes. */
2569
2570 static inline void
2571 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2572 const aarch64_opnd_info *operand,
2573 int need_libopcodes_p)
2574 {
2575 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2576 reloc->opnd = operand->type;
2577 if (need_libopcodes_p)
2578 reloc->need_libopcodes_p = 1;
2579 };
2580
2581 /* Return TRUE if the instruction needs to be fixed up later internally by
2582 the GAS; otherwise return FALSE. */
2583
2584 static inline bool
2585 aarch64_gas_internal_fixup_p (void)
2586 {
2587 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2588 }
2589
2590 /* Assign the immediate value to the relevant field in *OPERAND if
2591 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2592 needs an internal fixup in a later stage.
2593 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2594 IMM.VALUE that may get assigned with the constant. */
2595 static inline void
2596 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2597 aarch64_opnd_info *operand,
2598 int addr_off_p,
2599 int need_libopcodes_p,
2600 int skip_p)
2601 {
2602 if (reloc->exp.X_op == O_constant)
2603 {
2604 if (addr_off_p)
2605 operand->addr.offset.imm = reloc->exp.X_add_number;
2606 else
2607 operand->imm.value = reloc->exp.X_add_number;
2608 reloc->type = BFD_RELOC_UNUSED;
2609 }
2610 else
2611 {
2612 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2613 /* Tell libopcodes to ignore this operand or not. This is helpful
2614 when one of the operands needs to be fixed up later but we need
2615 libopcodes to check the other operands. */
2616 operand->skip = skip_p;
2617 }
2618 }
2619
2620 /* Relocation modifiers. Each entry in the table contains the textual
2621 name for the relocation which may be placed before a symbol used as
2622 a load/store offset, or add immediate. It must be surrounded by a
2623 leading and trailing colon, for example:
2624
2625 ldr x0, [x1, #:rello:varsym]
2626 add x0, x1, #:rello:varsym */
2627
2628 struct reloc_table_entry
2629 {
2630 const char *name;
2631 int pc_rel;
2632 bfd_reloc_code_real_type adr_type;
2633 bfd_reloc_code_real_type adrp_type;
2634 bfd_reloc_code_real_type movw_type;
2635 bfd_reloc_code_real_type add_type;
2636 bfd_reloc_code_real_type ldst_type;
2637 bfd_reloc_code_real_type ld_literal_type;
2638 };
2639
2640 static struct reloc_table_entry reloc_table[] =
2641 {
2642 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2643 {"lo12", 0,
2644 0, /* adr_type */
2645 0,
2646 0,
2647 BFD_RELOC_AARCH64_ADD_LO12,
2648 BFD_RELOC_AARCH64_LDST_LO12,
2649 0},
2650
2651 /* Higher 21 bits of pc-relative page offset: ADRP */
2652 {"pg_hi21", 1,
2653 0, /* adr_type */
2654 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2655 0,
2656 0,
2657 0,
2658 0},
2659
2660 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2661 {"pg_hi21_nc", 1,
2662 0, /* adr_type */
2663 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2664 0,
2665 0,
2666 0,
2667 0},
2668
2669 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2670 {"abs_g0", 0,
2671 0, /* adr_type */
2672 0,
2673 BFD_RELOC_AARCH64_MOVW_G0,
2674 0,
2675 0,
2676 0},
2677
2678 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2679 {"abs_g0_s", 0,
2680 0, /* adr_type */
2681 0,
2682 BFD_RELOC_AARCH64_MOVW_G0_S,
2683 0,
2684 0,
2685 0},
2686
2687 /* Less significant bits 0-15 of address/value: MOVK, no check */
2688 {"abs_g0_nc", 0,
2689 0, /* adr_type */
2690 0,
2691 BFD_RELOC_AARCH64_MOVW_G0_NC,
2692 0,
2693 0,
2694 0},
2695
2696 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2697 {"abs_g1", 0,
2698 0, /* adr_type */
2699 0,
2700 BFD_RELOC_AARCH64_MOVW_G1,
2701 0,
2702 0,
2703 0},
2704
2705 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2706 {"abs_g1_s", 0,
2707 0, /* adr_type */
2708 0,
2709 BFD_RELOC_AARCH64_MOVW_G1_S,
2710 0,
2711 0,
2712 0},
2713
2714 /* Less significant bits 16-31 of address/value: MOVK, no check */
2715 {"abs_g1_nc", 0,
2716 0, /* adr_type */
2717 0,
2718 BFD_RELOC_AARCH64_MOVW_G1_NC,
2719 0,
2720 0,
2721 0},
2722
2723 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2724 {"abs_g2", 0,
2725 0, /* adr_type */
2726 0,
2727 BFD_RELOC_AARCH64_MOVW_G2,
2728 0,
2729 0,
2730 0},
2731
2732 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2733 {"abs_g2_s", 0,
2734 0, /* adr_type */
2735 0,
2736 BFD_RELOC_AARCH64_MOVW_G2_S,
2737 0,
2738 0,
2739 0},
2740
2741 /* Less significant bits 32-47 of address/value: MOVK, no check */
2742 {"abs_g2_nc", 0,
2743 0, /* adr_type */
2744 0,
2745 BFD_RELOC_AARCH64_MOVW_G2_NC,
2746 0,
2747 0,
2748 0},
2749
2750 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2751 {"abs_g3", 0,
2752 0, /* adr_type */
2753 0,
2754 BFD_RELOC_AARCH64_MOVW_G3,
2755 0,
2756 0,
2757 0},
2758
2759 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2760 {"prel_g0", 1,
2761 0, /* adr_type */
2762 0,
2763 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2764 0,
2765 0,
2766 0},
2767
2768 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2769 {"prel_g0_nc", 1,
2770 0, /* adr_type */
2771 0,
2772 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2773 0,
2774 0,
2775 0},
2776
2777 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2778 {"prel_g1", 1,
2779 0, /* adr_type */
2780 0,
2781 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2782 0,
2783 0,
2784 0},
2785
2786 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2787 {"prel_g1_nc", 1,
2788 0, /* adr_type */
2789 0,
2790 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2791 0,
2792 0,
2793 0},
2794
2795 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2796 {"prel_g2", 1,
2797 0, /* adr_type */
2798 0,
2799 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2800 0,
2801 0,
2802 0},
2803
2804 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2805 {"prel_g2_nc", 1,
2806 0, /* adr_type */
2807 0,
2808 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2809 0,
2810 0,
2811 0},
2812
2813 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2814 {"prel_g3", 1,
2815 0, /* adr_type */
2816 0,
2817 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2818 0,
2819 0,
2820 0},
2821
2822 /* Get to the page containing GOT entry for a symbol. */
2823 {"got", 1,
2824 0, /* adr_type */
2825 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2826 0,
2827 0,
2828 0,
2829 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2830
2831 /* 12 bit offset into the page containing GOT entry for that symbol. */
2832 {"got_lo12", 0,
2833 0, /* adr_type */
2834 0,
2835 0,
2836 0,
2837 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2838 0},
2839
2840 /* 0-15 bits of address/value: MOVk, no check. */
2841 {"gotoff_g0_nc", 0,
2842 0, /* adr_type */
2843 0,
2844 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2845 0,
2846 0,
2847 0},
2848
2849 /* Most significant bits 16-31 of address/value: MOVZ. */
2850 {"gotoff_g1", 0,
2851 0, /* adr_type */
2852 0,
2853 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2854 0,
2855 0,
2856 0},
2857
2858 /* 15 bit offset into the page containing GOT entry for that symbol. */
2859 {"gotoff_lo15", 0,
2860 0, /* adr_type */
2861 0,
2862 0,
2863 0,
2864 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2865 0},
2866
2867 /* Get to the page containing GOT TLS entry for a symbol */
2868 {"gottprel_g0_nc", 0,
2869 0, /* adr_type */
2870 0,
2871 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2872 0,
2873 0,
2874 0},
2875
2876 /* Get to the page containing GOT TLS entry for a symbol */
2877 {"gottprel_g1", 0,
2878 0, /* adr_type */
2879 0,
2880 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2881 0,
2882 0,
2883 0},
2884
2885 /* Get to the page containing GOT TLS entry for a symbol */
2886 {"tlsgd", 0,
2887 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2888 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2889 0,
2890 0,
2891 0,
2892 0},
2893
2894 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2895 {"tlsgd_lo12", 0,
2896 0, /* adr_type */
2897 0,
2898 0,
2899 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2900 0,
2901 0},
2902
2903 /* Lower 16 bits address/value: MOVk. */
2904 {"tlsgd_g0_nc", 0,
2905 0, /* adr_type */
2906 0,
2907 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2908 0,
2909 0,
2910 0},
2911
2912 /* Most significant bits 16-31 of address/value: MOVZ. */
2913 {"tlsgd_g1", 0,
2914 0, /* adr_type */
2915 0,
2916 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2917 0,
2918 0,
2919 0},
2920
2921 /* Get to the page containing GOT TLS entry for a symbol */
2922 {"tlsdesc", 0,
2923 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2924 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2925 0,
2926 0,
2927 0,
2928 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2929
2930 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2931 {"tlsdesc_lo12", 0,
2932 0, /* adr_type */
2933 0,
2934 0,
2935 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2936 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2937 0},
2938
2939 /* Get to the page containing GOT TLS entry for a symbol.
2940 The same as GD, we allocate two consecutive GOT slots
2941 for module index and module offset, the only difference
2942 with GD is the module offset should be initialized to
2943 zero without any outstanding runtime relocation. */
2944 {"tlsldm", 0,
2945 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2946 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2947 0,
2948 0,
2949 0,
2950 0},
2951
2952 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2953 {"tlsldm_lo12_nc", 0,
2954 0, /* adr_type */
2955 0,
2956 0,
2957 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2958 0,
2959 0},
2960
2961 /* 12 bit offset into the module TLS base address. */
2962 {"dtprel_lo12", 0,
2963 0, /* adr_type */
2964 0,
2965 0,
2966 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2967 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2968 0},
2969
2970 /* Same as dtprel_lo12, no overflow check. */
2971 {"dtprel_lo12_nc", 0,
2972 0, /* adr_type */
2973 0,
2974 0,
2975 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2976 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2977 0},
2978
2979 /* bits[23:12] of offset to the module TLS base address. */
2980 {"dtprel_hi12", 0,
2981 0, /* adr_type */
2982 0,
2983 0,
2984 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2985 0,
2986 0},
2987
2988 /* bits[15:0] of offset to the module TLS base address. */
2989 {"dtprel_g0", 0,
2990 0, /* adr_type */
2991 0,
2992 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2993 0,
2994 0,
2995 0},
2996
2997 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2998 {"dtprel_g0_nc", 0,
2999 0, /* adr_type */
3000 0,
3001 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
3002 0,
3003 0,
3004 0},
3005
3006 /* bits[31:16] of offset to the module TLS base address. */
3007 {"dtprel_g1", 0,
3008 0, /* adr_type */
3009 0,
3010 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
3011 0,
3012 0,
3013 0},
3014
3015 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
3016 {"dtprel_g1_nc", 0,
3017 0, /* adr_type */
3018 0,
3019 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
3020 0,
3021 0,
3022 0},
3023
3024 /* bits[47:32] of offset to the module TLS base address. */
3025 {"dtprel_g2", 0,
3026 0, /* adr_type */
3027 0,
3028 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
3029 0,
3030 0,
3031 0},
3032
3033 /* Lower 16 bit offset into GOT entry for a symbol */
3034 {"tlsdesc_off_g0_nc", 0,
3035 0, /* adr_type */
3036 0,
3037 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
3038 0,
3039 0,
3040 0},
3041
3042 /* Higher 16 bit offset into GOT entry for a symbol */
3043 {"tlsdesc_off_g1", 0,
3044 0, /* adr_type */
3045 0,
3046 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
3047 0,
3048 0,
3049 0},
3050
3051 /* Get to the page containing GOT TLS entry for a symbol */
3052 {"gottprel", 0,
3053 0, /* adr_type */
3054 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
3055 0,
3056 0,
3057 0,
3058 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
3059
3060 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3061 {"gottprel_lo12", 0,
3062 0, /* adr_type */
3063 0,
3064 0,
3065 0,
3066 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
3067 0},
3068
3069 /* Get tp offset for a symbol. */
3070 {"tprel", 0,
3071 0, /* adr_type */
3072 0,
3073 0,
3074 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3075 0,
3076 0},
3077
3078 /* Get tp offset for a symbol. */
3079 {"tprel_lo12", 0,
3080 0, /* adr_type */
3081 0,
3082 0,
3083 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3084 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3085 0},
3086
3087 /* Get tp offset for a symbol. */
3088 {"tprel_hi12", 0,
3089 0, /* adr_type */
3090 0,
3091 0,
3092 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3093 0,
3094 0},
3095
3096 /* Get tp offset for a symbol. */
3097 {"tprel_lo12_nc", 0,
3098 0, /* adr_type */
3099 0,
3100 0,
3101 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3102 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3103 0},
3104
3105 /* Most significant bits 32-47 of address/value: MOVZ. */
3106 {"tprel_g2", 0,
3107 0, /* adr_type */
3108 0,
3109 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3110 0,
3111 0,
3112 0},
3113
3114 /* Most significant bits 16-31 of address/value: MOVZ. */
3115 {"tprel_g1", 0,
3116 0, /* adr_type */
3117 0,
3118 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3119 0,
3120 0,
3121 0},
3122
3123 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3124 {"tprel_g1_nc", 0,
3125 0, /* adr_type */
3126 0,
3127 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3128 0,
3129 0,
3130 0},
3131
3132 /* Most significant bits 0-15 of address/value: MOVZ. */
3133 {"tprel_g0", 0,
3134 0, /* adr_type */
3135 0,
3136 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3137 0,
3138 0,
3139 0},
3140
3141 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3142 {"tprel_g0_nc", 0,
3143 0, /* adr_type */
3144 0,
3145 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3146 0,
3147 0,
3148 0},
3149
3150 /* 15bit offset from got entry to base address of GOT table. */
3151 {"gotpage_lo15", 0,
3152 0,
3153 0,
3154 0,
3155 0,
3156 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3157 0},
3158
3159 /* 14bit offset from got entry to base address of GOT table. */
3160 {"gotpage_lo14", 0,
3161 0,
3162 0,
3163 0,
3164 0,
3165 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3166 0},
3167 };
3168
3169 /* Given the address of a pointer pointing to the textual name of a
3170 relocation as may appear in assembler source, attempt to find its
3171 details in reloc_table. The pointer will be updated to the character
3172 after the trailing colon. On failure, NULL will be returned;
3173 otherwise return the reloc_table_entry. */
3174
3175 static struct reloc_table_entry *
3176 find_reloc_table_entry (char **str)
3177 {
3178 unsigned int i;
3179 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3180 {
3181 int length = strlen (reloc_table[i].name);
3182
3183 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3184 && (*str)[length] == ':')
3185 {
3186 *str += (length + 1);
3187 return &reloc_table[i];
3188 }
3189 }
3190
3191 return NULL;
3192 }
3193
3194 /* Returns 0 if the relocation should never be forced,
3195 1 if the relocation must be forced, and -1 if either
3196 result is OK. */
3197
3198 static signed int
3199 aarch64_force_reloc (unsigned int type)
3200 {
3201 switch (type)
3202 {
3203 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3204 /* Perform these "immediate" internal relocations
3205 even if the symbol is extern or weak. */
3206 return 0;
3207
3208 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3209 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3210 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3211 /* Pseudo relocs that need to be fixed up according to
3212 ilp32_p. */
3213 return 1;
3214
3215 case BFD_RELOC_AARCH64_ADD_LO12:
3216 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3217 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3218 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3219 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3220 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3221 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3222 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3223 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3224 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3225 case BFD_RELOC_AARCH64_LDST128_LO12:
3226 case BFD_RELOC_AARCH64_LDST16_LO12:
3227 case BFD_RELOC_AARCH64_LDST32_LO12:
3228 case BFD_RELOC_AARCH64_LDST64_LO12:
3229 case BFD_RELOC_AARCH64_LDST8_LO12:
3230 case BFD_RELOC_AARCH64_LDST_LO12:
3231 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3232 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3233 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3234 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3235 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3236 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3237 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3238 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3239 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3240 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3241 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3242 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3243 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3244 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3245 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3246 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3247 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3248 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3249 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3250 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3251 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3252 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3253 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3254 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3255 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3256 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3257 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3258 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3259 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3260 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3261 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3262 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3263 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3264 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3265 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3266 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3267 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3268 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3269 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3270 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3271 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3272 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3273 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3274 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3275 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3276 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3277 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3278 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3279 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3280 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3281 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3282 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3283 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3284 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3285 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3286 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3287 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3288 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3289 /* Always leave these relocations for the linker. */
3290 return 1;
3291
3292 default:
3293 return -1;
3294 }
3295 }
3296
3297 int
3298 aarch64_force_relocation (struct fix *fixp)
3299 {
3300 int res = aarch64_force_reloc (fixp->fx_r_type);
3301
3302 if (res == -1)
3303 return generic_force_reloc (fixp);
3304 return res;
3305 }
3306
3307 /* Mode argument to parse_shift and parser_shifter_operand. */
3308 enum parse_shift_mode
3309 {
3310 SHIFTED_NONE, /* no shifter allowed */
3311 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3312 "#imm{,lsl #n}" */
3313 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3314 "#imm" */
3315 SHIFTED_LSL, /* bare "lsl #n" */
3316 SHIFTED_MUL, /* bare "mul #n" */
3317 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3318 SHIFTED_MUL_VL, /* "mul vl" */
3319 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3320 };
3321
3322 /* Parse a <shift> operator on an AArch64 data processing instruction.
3323 Return TRUE on success; otherwise return FALSE. */
3324 static bool
3325 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3326 {
3327 const struct aarch64_name_value_pair *shift_op;
3328 enum aarch64_modifier_kind kind;
3329 expressionS exp;
3330 int exp_has_prefix;
3331 char *s = *str;
3332 char *p = s;
3333
3334 for (p = *str; ISALPHA (*p); p++)
3335 ;
3336
3337 if (p == *str)
3338 {
3339 set_syntax_error (_("shift expression expected"));
3340 return false;
3341 }
3342
3343 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3344
3345 if (shift_op == NULL)
3346 {
3347 set_syntax_error (_("shift operator expected"));
3348 return false;
3349 }
3350
3351 kind = aarch64_get_operand_modifier (shift_op);
3352
3353 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3354 {
3355 set_syntax_error (_("invalid use of 'MSL'"));
3356 return false;
3357 }
3358
3359 if (kind == AARCH64_MOD_MUL
3360 && mode != SHIFTED_MUL
3361 && mode != SHIFTED_MUL_VL)
3362 {
3363 set_syntax_error (_("invalid use of 'MUL'"));
3364 return false;
3365 }
3366
3367 switch (mode)
3368 {
3369 case SHIFTED_LOGIC_IMM:
3370 if (aarch64_extend_operator_p (kind))
3371 {
3372 set_syntax_error (_("extending shift is not permitted"));
3373 return false;
3374 }
3375 break;
3376
3377 case SHIFTED_ARITH_IMM:
3378 if (kind == AARCH64_MOD_ROR)
3379 {
3380 set_syntax_error (_("'ROR' shift is not permitted"));
3381 return false;
3382 }
3383 break;
3384
3385 case SHIFTED_LSL:
3386 if (kind != AARCH64_MOD_LSL)
3387 {
3388 set_syntax_error (_("only 'LSL' shift is permitted"));
3389 return false;
3390 }
3391 break;
3392
3393 case SHIFTED_MUL:
3394 if (kind != AARCH64_MOD_MUL)
3395 {
3396 set_syntax_error (_("only 'MUL' is permitted"));
3397 return false;
3398 }
3399 break;
3400
3401 case SHIFTED_MUL_VL:
3402 /* "MUL VL" consists of two separate tokens. Require the first
3403 token to be "MUL" and look for a following "VL". */
3404 if (kind == AARCH64_MOD_MUL)
3405 {
3406 skip_whitespace (p);
3407 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3408 {
3409 p += 2;
3410 kind = AARCH64_MOD_MUL_VL;
3411 break;
3412 }
3413 }
3414 set_syntax_error (_("only 'MUL VL' is permitted"));
3415 return false;
3416
3417 case SHIFTED_REG_OFFSET:
3418 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3419 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3420 {
3421 set_fatal_syntax_error
3422 (_("invalid shift for the register offset addressing mode"));
3423 return false;
3424 }
3425 break;
3426
3427 case SHIFTED_LSL_MSL:
3428 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3429 {
3430 set_syntax_error (_("invalid shift operator"));
3431 return false;
3432 }
3433 break;
3434
3435 default:
3436 abort ();
3437 }
3438
3439 /* Whitespace can appear here if the next thing is a bare digit. */
3440 skip_whitespace (p);
3441
3442 /* Parse shift amount. */
3443 exp_has_prefix = 0;
3444 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3445 exp.X_op = O_absent;
3446 else
3447 {
3448 if (is_immediate_prefix (*p))
3449 {
3450 p++;
3451 exp_has_prefix = 1;
3452 }
3453 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3454 }
3455 if (kind == AARCH64_MOD_MUL_VL)
3456 /* For consistency, give MUL VL the same shift amount as an implicit
3457 MUL #1. */
3458 operand->shifter.amount = 1;
3459 else if (exp.X_op == O_absent)
3460 {
3461 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3462 {
3463 set_syntax_error (_("missing shift amount"));
3464 return false;
3465 }
3466 operand->shifter.amount = 0;
3467 }
3468 else if (exp.X_op != O_constant)
3469 {
3470 set_syntax_error (_("constant shift amount required"));
3471 return false;
3472 }
3473 /* For parsing purposes, MUL #n has no inherent range. The range
3474 depends on the operand and will be checked by operand-specific
3475 routines. */
3476 else if (kind != AARCH64_MOD_MUL
3477 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3478 {
3479 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3480 return false;
3481 }
3482 else
3483 {
3484 operand->shifter.amount = exp.X_add_number;
3485 operand->shifter.amount_present = 1;
3486 }
3487
3488 operand->shifter.operator_present = 1;
3489 operand->shifter.kind = kind;
3490
3491 *str = p;
3492 return true;
3493 }
3494
3495 /* Parse a <shifter_operand> for a data processing instruction:
3496
3497 #<immediate>
3498 #<immediate>, LSL #imm
3499
3500 Validation of immediate operands is deferred to md_apply_fix.
3501
3502 Return TRUE on success; otherwise return FALSE. */
3503
3504 static bool
3505 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3506 enum parse_shift_mode mode)
3507 {
3508 char *p;
3509
3510 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3511 return false;
3512
3513 p = *str;
3514
3515 /* Accept an immediate expression. */
3516 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3517 REJECT_ABSENT))
3518 return false;
3519
3520 /* Accept optional LSL for arithmetic immediate values. */
3521 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3522 if (! parse_shift (&p, operand, SHIFTED_LSL))
3523 return false;
3524
3525 /* Not accept any shifter for logical immediate values. */
3526 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3527 && parse_shift (&p, operand, mode))
3528 {
3529 set_syntax_error (_("unexpected shift operator"));
3530 return false;
3531 }
3532
3533 *str = p;
3534 return true;
3535 }
3536
3537 /* Parse a <shifter_operand> for a data processing instruction:
3538
3539 <Rm>
3540 <Rm>, <shift>
3541 #<immediate>
3542 #<immediate>, LSL #imm
3543
3544 where <shift> is handled by parse_shift above, and the last two
3545 cases are handled by the function above.
3546
3547 Validation of immediate operands is deferred to md_apply_fix.
3548
3549 Return TRUE on success; otherwise return FALSE. */
3550
3551 static bool
3552 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3553 enum parse_shift_mode mode)
3554 {
3555 const reg_entry *reg;
3556 aarch64_opnd_qualifier_t qualifier;
3557 enum aarch64_operand_class opd_class
3558 = aarch64_get_operand_class (operand->type);
3559
3560 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3561 if (reg)
3562 {
3563 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3564 {
3565 set_syntax_error (_("unexpected register in the immediate operand"));
3566 return false;
3567 }
3568
3569 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3570 {
3571 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3572 return false;
3573 }
3574
3575 operand->reg.regno = reg->number;
3576 operand->qualifier = qualifier;
3577
3578 /* Accept optional shift operation on register. */
3579 if (! skip_past_comma (str))
3580 return true;
3581
3582 if (! parse_shift (str, operand, mode))
3583 return false;
3584
3585 return true;
3586 }
3587 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3588 {
3589 set_syntax_error
3590 (_("integer register expected in the extended/shifted operand "
3591 "register"));
3592 return false;
3593 }
3594
3595 /* We have a shifted immediate variable. */
3596 return parse_shifter_operand_imm (str, operand, mode);
3597 }
3598
3599 /* Return TRUE on success; return FALSE otherwise. */
3600
3601 static bool
3602 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3603 enum parse_shift_mode mode)
3604 {
3605 char *p = *str;
3606
3607 /* Determine if we have the sequence of characters #: or just :
3608 coming next. If we do, then we check for a :rello: relocation
3609 modifier. If we don't, punt the whole lot to
3610 parse_shifter_operand. */
3611
3612 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3613 {
3614 struct reloc_table_entry *entry;
3615
3616 if (p[0] == '#')
3617 p += 2;
3618 else
3619 p++;
3620 *str = p;
3621
3622 /* Try to parse a relocation. Anything else is an error. */
3623 if (!(entry = find_reloc_table_entry (str)))
3624 {
3625 set_syntax_error (_("unknown relocation modifier"));
3626 return false;
3627 }
3628
3629 if (entry->add_type == 0)
3630 {
3631 set_syntax_error
3632 (_("this relocation modifier is not allowed on this instruction"));
3633 return false;
3634 }
3635
3636 /* Save str before we decompose it. */
3637 p = *str;
3638
3639 /* Next, we parse the expression. */
3640 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3641 REJECT_ABSENT))
3642 return false;
3643
3644 /* Record the relocation type (use the ADD variant here). */
3645 inst.reloc.type = entry->add_type;
3646 inst.reloc.pc_rel = entry->pc_rel;
3647
3648 /* If str is empty, we've reached the end, stop here. */
3649 if (**str == '\0')
3650 return true;
3651
3652 /* Otherwise, we have a shifted reloc modifier, so rewind to
3653 recover the variable name and continue parsing for the shifter. */
3654 *str = p;
3655 return parse_shifter_operand_imm (str, operand, mode);
3656 }
3657
3658 return parse_shifter_operand (str, operand, mode);
3659 }
3660
3661 /* Parse all forms of an address expression. Information is written
3662 to *OPERAND and/or inst.reloc.
3663
3664 The A64 instruction set has the following addressing modes:
3665
3666 Offset
3667 [base] // in SIMD ld/st structure
3668 [base{,#0}] // in ld/st exclusive
3669 [base{,#imm}]
3670 [base,Xm{,LSL #imm}]
3671 [base,Xm,SXTX {#imm}]
3672 [base,Wm,(S|U)XTW {#imm}]
3673 Pre-indexed
3674 [base]! // in ldraa/ldrab exclusive
3675 [base,#imm]!
3676 Post-indexed
3677 [base],#imm
3678 [base],Xm // in SIMD ld/st structure
3679 PC-relative (literal)
3680 label
3681 SVE:
3682 [base,#imm,MUL VL]
3683 [base,Zm.D{,LSL #imm}]
3684 [base,Zm.S,(S|U)XTW {#imm}]
3685 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3686 [Zn.S,#imm]
3687 [Zn.D,#imm]
3688 [Zn.S{, Xm}]
3689 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3690 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3691 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3692
3693 (As a convenience, the notation "=immediate" is permitted in conjunction
3694 with the pc-relative literal load instructions to automatically place an
3695 immediate value or symbolic address in a nearby literal pool and generate
3696 a hidden label which references it.)
3697
3698 Upon a successful parsing, the address structure in *OPERAND will be
3699 filled in the following way:
3700
3701 .base_regno = <base>
3702 .offset.is_reg // 1 if the offset is a register
3703 .offset.imm = <imm>
3704 .offset.regno = <Rm>
3705
3706 For different addressing modes defined in the A64 ISA:
3707
3708 Offset
3709 .pcrel=0; .preind=1; .postind=0; .writeback=0
3710 Pre-indexed
3711 .pcrel=0; .preind=1; .postind=0; .writeback=1
3712 Post-indexed
3713 .pcrel=0; .preind=0; .postind=1; .writeback=1
3714 PC-relative (literal)
3715 .pcrel=1; .preind=1; .postind=0; .writeback=0
3716
3717 The shift/extension information, if any, will be stored in .shifter.
3718 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3719 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3720 corresponding register.
3721
3722 BASE_TYPE says which types of base register should be accepted and
3723 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3724 is the type of shifter that is allowed for immediate offsets,
3725 or SHIFTED_NONE if none.
3726
3727 In all other respects, it is the caller's responsibility to check
3728 for addressing modes not supported by the instruction, and to set
3729 inst.reloc.type. */
3730
3731 static bool
3732 parse_address_main (char **str, aarch64_opnd_info *operand,
3733 aarch64_opnd_qualifier_t *base_qualifier,
3734 aarch64_opnd_qualifier_t *offset_qualifier,
3735 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3736 enum parse_shift_mode imm_shift_mode)
3737 {
3738 char *p = *str;
3739 const reg_entry *reg;
3740 expressionS *exp = &inst.reloc.exp;
3741
3742 *base_qualifier = AARCH64_OPND_QLF_NIL;
3743 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3744 if (! skip_past_char (&p, '['))
3745 {
3746 /* =immediate or label. */
3747 operand->addr.pcrel = 1;
3748 operand->addr.preind = 1;
3749
3750 /* #:<reloc_op>:<symbol> */
3751 skip_past_char (&p, '#');
3752 if (skip_past_char (&p, ':'))
3753 {
3754 bfd_reloc_code_real_type ty;
3755 struct reloc_table_entry *entry;
3756
3757 /* Try to parse a relocation modifier. Anything else is
3758 an error. */
3759 entry = find_reloc_table_entry (&p);
3760 if (! entry)
3761 {
3762 set_syntax_error (_("unknown relocation modifier"));
3763 return false;
3764 }
3765
3766 switch (operand->type)
3767 {
3768 case AARCH64_OPND_ADDR_PCREL21:
3769 /* adr */
3770 ty = entry->adr_type;
3771 break;
3772
3773 default:
3774 ty = entry->ld_literal_type;
3775 break;
3776 }
3777
3778 if (ty == 0)
3779 {
3780 set_syntax_error
3781 (_("this relocation modifier is not allowed on this "
3782 "instruction"));
3783 return false;
3784 }
3785
3786 /* #:<reloc_op>: */
3787 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3788 {
3789 set_syntax_error (_("invalid relocation expression"));
3790 return false;
3791 }
3792 /* #:<reloc_op>:<expr> */
3793 /* Record the relocation type. */
3794 inst.reloc.type = ty;
3795 inst.reloc.pc_rel = entry->pc_rel;
3796 }
3797 else
3798 {
3799 if (skip_past_char (&p, '='))
3800 /* =immediate; need to generate the literal in the literal pool. */
3801 inst.gen_lit_pool = 1;
3802
3803 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3804 {
3805 set_syntax_error (_("invalid address"));
3806 return false;
3807 }
3808 }
3809
3810 *str = p;
3811 return true;
3812 }
3813
3814 /* [ */
3815
3816 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3817 if (!reg || !aarch64_check_reg_type (reg, base_type))
3818 {
3819 set_syntax_error (_(get_reg_expected_msg (base_type)));
3820 return false;
3821 }
3822 operand->addr.base_regno = reg->number;
3823
3824 /* [Xn */
3825 if (skip_past_comma (&p))
3826 {
3827 /* [Xn, */
3828 operand->addr.preind = 1;
3829
3830 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3831 if (reg)
3832 {
3833 if (!aarch64_check_reg_type (reg, offset_type))
3834 {
3835 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3836 return false;
3837 }
3838
3839 /* [Xn,Rm */
3840 operand->addr.offset.regno = reg->number;
3841 operand->addr.offset.is_reg = 1;
3842 /* Shifted index. */
3843 if (skip_past_comma (&p))
3844 {
3845 /* [Xn,Rm, */
3846 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3847 /* Use the diagnostics set in parse_shift, so not set new
3848 error message here. */
3849 return false;
3850 }
3851 /* We only accept:
3852 [base,Xm] # For vector plus scalar SVE2 indexing.
3853 [base,Xm{,LSL #imm}]
3854 [base,Xm,SXTX {#imm}]
3855 [base,Wm,(S|U)XTW {#imm}] */
3856 if (operand->shifter.kind == AARCH64_MOD_NONE
3857 || operand->shifter.kind == AARCH64_MOD_LSL
3858 || operand->shifter.kind == AARCH64_MOD_SXTX)
3859 {
3860 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3861 {
3862 set_syntax_error (_("invalid use of 32-bit register offset"));
3863 return false;
3864 }
3865 if (aarch64_get_qualifier_esize (*base_qualifier)
3866 != aarch64_get_qualifier_esize (*offset_qualifier)
3867 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3868 || *base_qualifier != AARCH64_OPND_QLF_S_S
3869 || *offset_qualifier != AARCH64_OPND_QLF_X))
3870 {
3871 set_syntax_error (_("offset has different size from base"));
3872 return false;
3873 }
3874 }
3875 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3876 {
3877 set_syntax_error (_("invalid use of 64-bit register offset"));
3878 return false;
3879 }
3880 }
3881 else
3882 {
3883 /* [Xn,#:<reloc_op>:<symbol> */
3884 skip_past_char (&p, '#');
3885 if (skip_past_char (&p, ':'))
3886 {
3887 struct reloc_table_entry *entry;
3888
3889 /* Try to parse a relocation modifier. Anything else is
3890 an error. */
3891 if (!(entry = find_reloc_table_entry (&p)))
3892 {
3893 set_syntax_error (_("unknown relocation modifier"));
3894 return false;
3895 }
3896
3897 if (entry->ldst_type == 0)
3898 {
3899 set_syntax_error
3900 (_("this relocation modifier is not allowed on this "
3901 "instruction"));
3902 return false;
3903 }
3904
3905 /* [Xn,#:<reloc_op>: */
3906 /* We now have the group relocation table entry corresponding to
3907 the name in the assembler source. Next, we parse the
3908 expression. */
3909 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3910 {
3911 set_syntax_error (_("invalid relocation expression"));
3912 return false;
3913 }
3914
3915 /* [Xn,#:<reloc_op>:<expr> */
3916 /* Record the load/store relocation type. */
3917 inst.reloc.type = entry->ldst_type;
3918 inst.reloc.pc_rel = entry->pc_rel;
3919 }
3920 else
3921 {
3922 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3923 {
3924 set_syntax_error (_("invalid expression in the address"));
3925 return false;
3926 }
3927 /* [Xn,<expr> */
3928 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3929 /* [Xn,<expr>,<shifter> */
3930 if (! parse_shift (&p, operand, imm_shift_mode))
3931 return false;
3932 }
3933 }
3934 }
3935
3936 if (! skip_past_char (&p, ']'))
3937 {
3938 set_syntax_error (_("']' expected"));
3939 return false;
3940 }
3941
3942 if (skip_past_char (&p, '!'))
3943 {
3944 if (operand->addr.preind && operand->addr.offset.is_reg)
3945 {
3946 set_syntax_error (_("register offset not allowed in pre-indexed "
3947 "addressing mode"));
3948 return false;
3949 }
3950 /* [Xn]! */
3951 operand->addr.writeback = 1;
3952 }
3953 else if (skip_past_comma (&p))
3954 {
3955 /* [Xn], */
3956 operand->addr.postind = 1;
3957 operand->addr.writeback = 1;
3958
3959 if (operand->addr.preind)
3960 {
3961 set_syntax_error (_("cannot combine pre- and post-indexing"));
3962 return false;
3963 }
3964
3965 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3966 if (reg)
3967 {
3968 /* [Xn],Xm */
3969 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3970 {
3971 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3972 return false;
3973 }
3974
3975 operand->addr.offset.regno = reg->number;
3976 operand->addr.offset.is_reg = 1;
3977 }
3978 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3979 {
3980 /* [Xn],#expr */
3981 set_syntax_error (_("invalid expression in the address"));
3982 return false;
3983 }
3984 }
3985
3986 /* If at this point neither .preind nor .postind is set, we have a
3987 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3988 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3989 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3990 [Zn.<T>, xzr]. */
3991 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3992 {
3993 if (operand->addr.writeback)
3994 {
3995 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3996 {
3997 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3998 operand->addr.offset.is_reg = 0;
3999 operand->addr.offset.imm = 0;
4000 operand->addr.preind = 1;
4001 }
4002 else
4003 {
4004 /* Reject [Rn]! */
4005 set_syntax_error (_("missing offset in the pre-indexed address"));
4006 return false;
4007 }
4008 }
4009 else
4010 {
4011 operand->addr.preind = 1;
4012 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
4013 {
4014 operand->addr.offset.is_reg = 1;
4015 operand->addr.offset.regno = REG_ZR;
4016 *offset_qualifier = AARCH64_OPND_QLF_X;
4017 }
4018 else
4019 {
4020 inst.reloc.exp.X_op = O_constant;
4021 inst.reloc.exp.X_add_number = 0;
4022 }
4023 }
4024 }
4025
4026 *str = p;
4027 return true;
4028 }
4029
4030 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
4031 on success. */
4032 static bool
4033 parse_address (char **str, aarch64_opnd_info *operand)
4034 {
4035 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4036 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4037 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
4038 }
4039
4040 /* Parse an address in which SVE vector registers and MUL VL are allowed.
4041 The arguments have the same meaning as for parse_address_main.
4042 Return TRUE on success. */
4043 static bool
4044 parse_sve_address (char **str, aarch64_opnd_info *operand,
4045 aarch64_opnd_qualifier_t *base_qualifier,
4046 aarch64_opnd_qualifier_t *offset_qualifier)
4047 {
4048 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
4049 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
4050 SHIFTED_MUL_VL);
4051 }
4052
4053 /* Parse a register X0-X30. The register must be 64-bit and register 31
4054 is unallocated. */
4055 static bool
4056 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
4057 {
4058 const reg_entry *reg = parse_reg (str);
4059 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
4060 {
4061 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
4062 return false;
4063 }
4064 operand->reg.regno = reg->number;
4065 operand->qualifier = AARCH64_OPND_QLF_X;
4066 return true;
4067 }
4068
4069 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
4070 Return TRUE on success; otherwise return FALSE. */
4071 static bool
4072 parse_half (char **str, int *internal_fixup_p)
4073 {
4074 char *p = *str;
4075
4076 skip_past_char (&p, '#');
4077
4078 gas_assert (internal_fixup_p);
4079 *internal_fixup_p = 0;
4080
4081 if (*p == ':')
4082 {
4083 struct reloc_table_entry *entry;
4084
4085 /* Try to parse a relocation. Anything else is an error. */
4086 ++p;
4087
4088 if (!(entry = find_reloc_table_entry (&p)))
4089 {
4090 set_syntax_error (_("unknown relocation modifier"));
4091 return false;
4092 }
4093
4094 if (entry->movw_type == 0)
4095 {
4096 set_syntax_error
4097 (_("this relocation modifier is not allowed on this instruction"));
4098 return false;
4099 }
4100
4101 inst.reloc.type = entry->movw_type;
4102 }
4103 else
4104 *internal_fixup_p = 1;
4105
4106 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4107 return false;
4108
4109 *str = p;
4110 return true;
4111 }
4112
4113 /* Parse an operand for an ADRP instruction:
4114 ADRP <Xd>, <label>
4115 Return TRUE on success; otherwise return FALSE. */
4116
4117 static bool
4118 parse_adrp (char **str)
4119 {
4120 char *p;
4121
4122 p = *str;
4123 if (*p == ':')
4124 {
4125 struct reloc_table_entry *entry;
4126
4127 /* Try to parse a relocation. Anything else is an error. */
4128 ++p;
4129 if (!(entry = find_reloc_table_entry (&p)))
4130 {
4131 set_syntax_error (_("unknown relocation modifier"));
4132 return false;
4133 }
4134
4135 if (entry->adrp_type == 0)
4136 {
4137 set_syntax_error
4138 (_("this relocation modifier is not allowed on this instruction"));
4139 return false;
4140 }
4141
4142 inst.reloc.type = entry->adrp_type;
4143 }
4144 else
4145 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4146
4147 inst.reloc.pc_rel = 1;
4148 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4149 return false;
4150 *str = p;
4151 return true;
4152 }
4153
4154 /* Miscellaneous. */
4155
4156 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4157 of SIZE tokens in which index I gives the token for field value I,
4158 or is null if field value I is invalid. REG_TYPE says which register
4159 names should be treated as registers rather than as symbolic immediates.
4160
4161 Return true on success, moving *STR past the operand and storing the
4162 field value in *VAL. */
4163
4164 static int
4165 parse_enum_string (char **str, int64_t *val, const char *const *array,
4166 size_t size, aarch64_reg_type reg_type)
4167 {
4168 expressionS exp;
4169 char *p, *q;
4170 size_t i;
4171
4172 /* Match C-like tokens. */
4173 p = q = *str;
4174 while (ISALNUM (*q))
4175 q++;
4176
4177 for (i = 0; i < size; ++i)
4178 if (array[i]
4179 && strncasecmp (array[i], p, q - p) == 0
4180 && array[i][q - p] == 0)
4181 {
4182 *val = i;
4183 *str = q;
4184 return true;
4185 }
4186
4187 if (!parse_immediate_expression (&p, &exp, reg_type))
4188 return false;
4189
4190 if (exp.X_op == O_constant
4191 && (uint64_t) exp.X_add_number < size)
4192 {
4193 *val = exp.X_add_number;
4194 *str = p;
4195 return true;
4196 }
4197
4198 /* Use the default error for this operand. */
4199 return false;
4200 }
4201
4202 /* Parse an option for a preload instruction. Returns the encoding for the
4203 option, or PARSE_FAIL. */
4204
4205 static int
4206 parse_pldop (char **str)
4207 {
4208 char *p, *q;
4209 const struct aarch64_name_value_pair *o;
4210
4211 p = q = *str;
4212 while (ISALNUM (*q))
4213 q++;
4214
4215 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4216 if (!o)
4217 return PARSE_FAIL;
4218
4219 *str = q;
4220 return o->value;
4221 }
4222
4223 /* Parse an option for a barrier instruction. Returns the encoding for the
4224 option, or PARSE_FAIL. */
4225
4226 static int
4227 parse_barrier (char **str)
4228 {
4229 char *p, *q;
4230 const struct aarch64_name_value_pair *o;
4231
4232 p = q = *str;
4233 while (ISALPHA (*q))
4234 q++;
4235
4236 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4237 if (!o)
4238 return PARSE_FAIL;
4239
4240 *str = q;
4241 return o->value;
4242 }
4243
4244 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4245 return 0 if successful. Otherwise return PARSE_FAIL. */
4246
4247 static int
4248 parse_barrier_psb (char **str,
4249 const struct aarch64_name_value_pair ** hint_opt)
4250 {
4251 char *p, *q;
4252 const struct aarch64_name_value_pair *o;
4253
4254 p = q = *str;
4255 while (ISALPHA (*q))
4256 q++;
4257
4258 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4259 if (!o)
4260 {
4261 set_fatal_syntax_error
4262 ( _("unknown or missing option to PSB/TSB"));
4263 return PARSE_FAIL;
4264 }
4265
4266 if (o->value != 0x11)
4267 {
4268 /* PSB only accepts option name 'CSYNC'. */
4269 set_syntax_error
4270 (_("the specified option is not accepted for PSB/TSB"));
4271 return PARSE_FAIL;
4272 }
4273
4274 *str = q;
4275 *hint_opt = o;
4276 return 0;
4277 }
4278
4279 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4280 return 0 if successful. Otherwise return PARSE_FAIL. */
4281
4282 static int
4283 parse_bti_operand (char **str,
4284 const struct aarch64_name_value_pair ** hint_opt)
4285 {
4286 char *p, *q;
4287 const struct aarch64_name_value_pair *o;
4288
4289 p = q = *str;
4290 while (ISALPHA (*q))
4291 q++;
4292
4293 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4294 if (!o)
4295 {
4296 set_fatal_syntax_error
4297 ( _("unknown option to BTI"));
4298 return PARSE_FAIL;
4299 }
4300
4301 switch (o->value)
4302 {
4303 /* Valid BTI operands. */
4304 case HINT_OPD_C:
4305 case HINT_OPD_J:
4306 case HINT_OPD_JC:
4307 break;
4308
4309 default:
4310 set_syntax_error
4311 (_("unknown option to BTI"));
4312 return PARSE_FAIL;
4313 }
4314
4315 *str = q;
4316 *hint_opt = o;
4317 return 0;
4318 }
4319
4320 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4321 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4322 on failure. Format:
4323
4324 REG_TYPE.QUALIFIER
4325
4326 Side effect: Update STR with current parse position of success.
4327 */
4328
4329 static const reg_entry *
4330 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4331 aarch64_opnd_qualifier_t *qualifier)
4332 {
4333 struct vector_type_el vectype;
4334 const reg_entry *reg = parse_typed_reg (str, reg_type, &vectype,
4335 PTR_FULL_REG);
4336 if (!reg)
4337 return NULL;
4338
4339 *qualifier = vectype_to_qualifier (&vectype);
4340 if (*qualifier == AARCH64_OPND_QLF_NIL)
4341 return NULL;
4342
4343 return reg;
4344 }
4345
4346 /* Parse SME ZA tile encoded in <ZAda> assembler symbol.
4347 Function return tile QUALIFIER on success.
4348
4349 Tiles are in example format: za[0-9]\.[bhsd]
4350
4351 Function returns <ZAda> register number or PARSE_FAIL.
4352 */
4353 static int
4354 parse_sme_zada_operand (char **str, aarch64_opnd_qualifier_t *qualifier)
4355 {
4356 int regno;
4357 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_ZAT, qualifier);
4358
4359 if (reg == NULL)
4360 return PARSE_FAIL;
4361 regno = reg->number;
4362
4363 switch (*qualifier)
4364 {
4365 case AARCH64_OPND_QLF_S_B:
4366 if (regno != 0x00)
4367 {
4368 set_syntax_error (_("invalid ZA tile register number, expected za0"));
4369 return PARSE_FAIL;
4370 }
4371 break;
4372 case AARCH64_OPND_QLF_S_H:
4373 if (regno > 0x01)
4374 {
4375 set_syntax_error (_("invalid ZA tile register number, expected za0-za1"));
4376 return PARSE_FAIL;
4377 }
4378 break;
4379 case AARCH64_OPND_QLF_S_S:
4380 if (regno > 0x03)
4381 {
4382 /* For the 32-bit variant: is the name of the ZA tile ZA0-ZA3. */
4383 set_syntax_error (_("invalid ZA tile register number, expected za0-za3"));
4384 return PARSE_FAIL;
4385 }
4386 break;
4387 case AARCH64_OPND_QLF_S_D:
4388 if (regno > 0x07)
4389 {
4390 /* For the 64-bit variant: is the name of the ZA tile ZA0-ZA7 */
4391 set_syntax_error (_("invalid ZA tile register number, expected za0-za7"));
4392 return PARSE_FAIL;
4393 }
4394 break;
4395 default:
4396 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s and d"));
4397 return PARSE_FAIL;
4398 }
4399
4400 return regno;
4401 }
4402
4403 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4404
4405 #<imm>
4406 <imm>
4407
4408 Function return TRUE if immediate was found, or FALSE.
4409 */
4410 static bool
4411 parse_sme_immediate (char **str, int64_t *imm)
4412 {
4413 int64_t val;
4414 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4415 return false;
4416
4417 *imm = val;
4418 return true;
4419 }
4420
4421 /* Parse index with vector select register and immediate:
4422
4423 [<Wv>, <imm>]
4424 [<Wv>, #<imm>]
4425 where <Wv> is in W12-W15 range and # is optional for immediate.
4426
4427 Function performs extra check for mandatory immediate value if REQUIRE_IMM
4428 is set to true.
4429
4430 On success function returns TRUE and populated VECTOR_SELECT_REGISTER and
4431 IMM output.
4432 */
4433 static bool
4434 parse_sme_za_hv_tiles_operand_index (char **str,
4435 int *vector_select_register,
4436 int64_t *imm)
4437 {
4438 const reg_entry *reg;
4439
4440 if (!skip_past_char (str, '['))
4441 {
4442 set_syntax_error (_("expected '['"));
4443 return false;
4444 }
4445
4446 /* Vector select register W12-W15 encoded in the 2-bit Rv field. */
4447 reg = parse_reg (str);
4448 if (reg == NULL || reg->type != REG_TYPE_R_32
4449 || reg->number < 12 || reg->number > 15)
4450 {
4451 set_syntax_error (_("expected vector select register W12-W15"));
4452 return false;
4453 }
4454 *vector_select_register = reg->number;
4455
4456 if (!skip_past_char (str, ',')) /* Optional index offset immediate. */
4457 {
4458 set_syntax_error (_("expected ','"));
4459 return false;
4460 }
4461
4462 if (!parse_sme_immediate (str, imm))
4463 {
4464 set_syntax_error (_("index offset immediate expected"));
4465 return false;
4466 }
4467
4468 if (!skip_past_char (str, ']'))
4469 {
4470 set_syntax_error (_("expected ']'"));
4471 return false;
4472 }
4473
4474 return true;
4475 }
4476
4477 /* Parse SME ZA horizontal or vertical vector access to tiles.
4478 Function extracts from STR to SLICE_INDICATOR <HV> horizontal (0) or
4479 vertical (1) ZA tile vector orientation. VECTOR_SELECT_REGISTER
4480 contains <Wv> select register and corresponding optional IMMEDIATE.
4481 In addition QUALIFIER is extracted.
4482
4483 Field format examples:
4484
4485 ZA0<HV>.B[<Wv>, #<imm>]
4486 <ZAn><HV>.H[<Wv>, #<imm>]
4487 <ZAn><HV>.S[<Wv>, #<imm>]
4488 <ZAn><HV>.D[<Wv>, #<imm>]
4489 <ZAn><HV>.Q[<Wv>, #<imm>]
4490
4491 Function returns <ZAda> register number or PARSE_FAIL.
4492 */
4493 static int
4494 parse_sme_za_hv_tiles_operand (char **str,
4495 enum sme_hv_slice *slice_indicator,
4496 int *vector_select_register,
4497 int *imm,
4498 aarch64_opnd_qualifier_t *qualifier)
4499 {
4500 int regno;
4501 int regno_limit;
4502 int64_t imm_limit;
4503 int64_t imm_value;
4504 const reg_entry *reg;
4505
4506 reg = parse_reg_with_qual (str, REG_TYPE_ZATHV, qualifier);
4507 if (!reg)
4508 return PARSE_FAIL;
4509
4510 *slice_indicator = (aarch64_check_reg_type (reg, REG_TYPE_ZATH)
4511 ? HV_horizontal
4512 : HV_vertical);
4513 regno = reg->number;
4514
4515 switch (*qualifier)
4516 {
4517 case AARCH64_OPND_QLF_S_B:
4518 regno_limit = 0;
4519 imm_limit = 15;
4520 break;
4521 case AARCH64_OPND_QLF_S_H:
4522 regno_limit = 1;
4523 imm_limit = 7;
4524 break;
4525 case AARCH64_OPND_QLF_S_S:
4526 regno_limit = 3;
4527 imm_limit = 3;
4528 break;
4529 case AARCH64_OPND_QLF_S_D:
4530 regno_limit = 7;
4531 imm_limit = 1;
4532 break;
4533 case AARCH64_OPND_QLF_S_Q:
4534 regno_limit = 15;
4535 imm_limit = 0;
4536 break;
4537 default:
4538 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s, d and q"));
4539 return PARSE_FAIL;
4540 }
4541
4542 /* Check if destination register ZA tile vector is in range for given
4543 instruction variant. */
4544 if (regno < 0 || regno > regno_limit)
4545 {
4546 set_syntax_error (_("ZA tile vector out of range"));
4547 return PARSE_FAIL;
4548 }
4549
4550 if (!parse_sme_za_hv_tiles_operand_index (str, vector_select_register,
4551 &imm_value))
4552 return PARSE_FAIL;
4553
4554 /* Check if optional index offset is in the range for instruction
4555 variant. */
4556 if (imm_value < 0 || imm_value > imm_limit)
4557 {
4558 set_syntax_error (_("index offset out of range"));
4559 return PARSE_FAIL;
4560 }
4561
4562 *imm = imm_value;
4563
4564 return regno;
4565 }
4566
4567
4568 static int
4569 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4570 enum sme_hv_slice *slice_indicator,
4571 int *vector_select_register,
4572 int *imm,
4573 aarch64_opnd_qualifier_t *qualifier)
4574 {
4575 int regno;
4576
4577 if (!skip_past_char (str, '{'))
4578 {
4579 set_syntax_error (_("expected '{'"));
4580 return PARSE_FAIL;
4581 }
4582
4583 regno = parse_sme_za_hv_tiles_operand (str, slice_indicator,
4584 vector_select_register, imm,
4585 qualifier);
4586
4587 if (regno == PARSE_FAIL)
4588 return PARSE_FAIL;
4589
4590 if (!skip_past_char (str, '}'))
4591 {
4592 set_syntax_error (_("expected '}'"));
4593 return PARSE_FAIL;
4594 }
4595
4596 return regno;
4597 }
4598
4599 /* Parse list of up to eight 64-bit element tile names separated by commas in
4600 SME's ZERO instruction:
4601
4602 ZERO { <mask> }
4603
4604 Function returns <mask>:
4605
4606 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4607 */
4608 static int
4609 parse_sme_zero_mask(char **str)
4610 {
4611 char *q;
4612 int mask;
4613 aarch64_opnd_qualifier_t qualifier;
4614
4615 mask = 0x00;
4616 q = *str;
4617 do
4618 {
4619 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZAT,
4620 &qualifier);
4621 if (reg)
4622 {
4623 int regno = reg->number;
4624 if (qualifier == AARCH64_OPND_QLF_S_B && regno == 0)
4625 {
4626 /* { ZA0.B } is assembled as all-ones immediate. */
4627 mask = 0xff;
4628 }
4629 else if (qualifier == AARCH64_OPND_QLF_S_H && regno < 2)
4630 mask |= 0x55 << regno;
4631 else if (qualifier == AARCH64_OPND_QLF_S_S && regno < 4)
4632 mask |= 0x11 << regno;
4633 else if (qualifier == AARCH64_OPND_QLF_S_D && regno < 8)
4634 mask |= 0x01 << regno;
4635 else
4636 {
4637 set_syntax_error (_("wrong ZA tile element format"));
4638 return PARSE_FAIL;
4639 }
4640 continue;
4641 }
4642 clear_error ();
4643 if (strncasecmp (q, "za", 2) == 0 && !ISALNUM (q[2]))
4644 {
4645 /* { ZA } is assembled as all-ones immediate. */
4646 mask = 0xff;
4647 q += 2;
4648 continue;
4649 }
4650
4651 set_syntax_error (_("wrong ZA tile element format"));
4652 return PARSE_FAIL;
4653 }
4654 while (skip_past_char (&q, ','));
4655
4656 *str = q;
4657 return mask;
4658 }
4659
4660 /* Wraps in curly braces <mask> operand ZERO instruction:
4661
4662 ZERO { <mask> }
4663
4664 Function returns value of <mask> bit-field.
4665 */
4666 static int
4667 parse_sme_list_of_64bit_tiles (char **str)
4668 {
4669 int regno;
4670
4671 if (!skip_past_char (str, '{'))
4672 {
4673 set_syntax_error (_("expected '{'"));
4674 return PARSE_FAIL;
4675 }
4676
4677 /* Empty <mask> list is an all-zeros immediate. */
4678 if (!skip_past_char (str, '}'))
4679 {
4680 regno = parse_sme_zero_mask (str);
4681 if (regno == PARSE_FAIL)
4682 return PARSE_FAIL;
4683
4684 if (!skip_past_char (str, '}'))
4685 {
4686 set_syntax_error (_("expected '}'"));
4687 return PARSE_FAIL;
4688 }
4689 }
4690 else
4691 regno = 0x00;
4692
4693 return regno;
4694 }
4695
4696 /* Parse ZA array operand used in e.g. STR and LDR instruction.
4697 Operand format:
4698
4699 ZA[<Wv>, <imm>]
4700 ZA[<Wv>, #<imm>]
4701
4702 Function returns <Wv> or PARSE_FAIL.
4703 */
4704 static int
4705 parse_sme_za_array (char **str, int *imm)
4706 {
4707 char *p, *q;
4708 int regno;
4709 int64_t imm_value;
4710
4711 p = q = *str;
4712 while (ISALPHA (*q))
4713 q++;
4714
4715 if ((q - p != 2) || strncasecmp ("za", p, q - p) != 0)
4716 {
4717 set_syntax_error (_("expected ZA array"));
4718 return PARSE_FAIL;
4719 }
4720
4721 if (! parse_sme_za_hv_tiles_operand_index (&q, &regno, &imm_value))
4722 return PARSE_FAIL;
4723
4724 if (imm_value < 0 || imm_value > 15)
4725 {
4726 set_syntax_error (_("offset out of range"));
4727 return PARSE_FAIL;
4728 }
4729
4730 *imm = imm_value;
4731 *str = q;
4732 return regno;
4733 }
4734
4735 /* Parse streaming mode operand for SMSTART and SMSTOP.
4736
4737 {SM | ZA}
4738
4739 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4740 */
4741 static int
4742 parse_sme_sm_za (char **str)
4743 {
4744 char *p, *q;
4745
4746 p = q = *str;
4747 while (ISALPHA (*q))
4748 q++;
4749
4750 if ((q - p != 2)
4751 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4752 {
4753 set_syntax_error (_("expected SM or ZA operand"));
4754 return PARSE_FAIL;
4755 }
4756
4757 *str = q;
4758 return TOLOWER (p[0]);
4759 }
4760
4761 /* Parse the name of the source scalable predicate register, the index base
4762 register W12-W15 and the element index. Function performs element index
4763 limit checks as well as qualifier type checks.
4764
4765 <Pn>.<T>[<Wv>, <imm>]
4766 <Pn>.<T>[<Wv>, #<imm>]
4767
4768 On success function sets <Wv> to INDEX_BASE_REG, <T> to QUALIFIER and
4769 <imm> to IMM.
4770 Function returns <Pn>, or PARSE_FAIL.
4771 */
4772 static int
4773 parse_sme_pred_reg_with_index(char **str,
4774 int *index_base_reg,
4775 int *imm,
4776 aarch64_opnd_qualifier_t *qualifier)
4777 {
4778 int regno;
4779 int64_t imm_limit;
4780 int64_t imm_value;
4781 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_PN, qualifier);
4782
4783 if (reg == NULL)
4784 return PARSE_FAIL;
4785 regno = reg->number;
4786
4787 switch (*qualifier)
4788 {
4789 case AARCH64_OPND_QLF_S_B:
4790 imm_limit = 15;
4791 break;
4792 case AARCH64_OPND_QLF_S_H:
4793 imm_limit = 7;
4794 break;
4795 case AARCH64_OPND_QLF_S_S:
4796 imm_limit = 3;
4797 break;
4798 case AARCH64_OPND_QLF_S_D:
4799 imm_limit = 1;
4800 break;
4801 default:
4802 set_syntax_error (_("wrong predicate register element size, allowed b, h, s and d"));
4803 return PARSE_FAIL;
4804 }
4805
4806 if (! parse_sme_za_hv_tiles_operand_index (str, index_base_reg, &imm_value))
4807 return PARSE_FAIL;
4808
4809 if (imm_value < 0 || imm_value > imm_limit)
4810 {
4811 set_syntax_error (_("element index out of range for given variant"));
4812 return PARSE_FAIL;
4813 }
4814
4815 *imm = imm_value;
4816
4817 return regno;
4818 }
4819
4820 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4821 Returns the encoding for the option, or PARSE_FAIL.
4822
4823 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4824 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4825
4826 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4827 field, otherwise as a system register.
4828 */
4829
4830 static int
4831 parse_sys_reg (char **str, htab_t sys_regs,
4832 int imple_defined_p, int pstatefield_p,
4833 uint32_t* flags)
4834 {
4835 char *p, *q;
4836 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4837 const aarch64_sys_reg *o;
4838 int value;
4839
4840 p = buf;
4841 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4842 if (p < buf + (sizeof (buf) - 1))
4843 *p++ = TOLOWER (*q);
4844 *p = '\0';
4845
4846 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4847 valid system register. This is enforced by construction of the hash
4848 table. */
4849 if (p - buf != q - *str)
4850 return PARSE_FAIL;
4851
4852 o = str_hash_find (sys_regs, buf);
4853 if (!o)
4854 {
4855 if (!imple_defined_p)
4856 return PARSE_FAIL;
4857 else
4858 {
4859 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4860 unsigned int op0, op1, cn, cm, op2;
4861
4862 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4863 != 5)
4864 return PARSE_FAIL;
4865 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4866 return PARSE_FAIL;
4867 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4868 if (flags)
4869 *flags = 0;
4870 }
4871 }
4872 else
4873 {
4874 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4875 as_bad (_("selected processor does not support PSTATE field "
4876 "name '%s'"), buf);
4877 if (!pstatefield_p
4878 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4879 o->value, o->flags, o->features))
4880 as_bad (_("selected processor does not support system register "
4881 "name '%s'"), buf);
4882 if (aarch64_sys_reg_deprecated_p (o->flags))
4883 as_warn (_("system register name '%s' is deprecated and may be "
4884 "removed in a future release"), buf);
4885 value = o->value;
4886 if (flags)
4887 *flags = o->flags;
4888 }
4889
4890 *str = q;
4891 return value;
4892 }
4893
4894 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4895 for the option, or NULL. */
4896
4897 static const aarch64_sys_ins_reg *
4898 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4899 {
4900 char *p, *q;
4901 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4902 const aarch64_sys_ins_reg *o;
4903
4904 p = buf;
4905 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4906 if (p < buf + (sizeof (buf) - 1))
4907 *p++ = TOLOWER (*q);
4908 *p = '\0';
4909
4910 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4911 valid system register. This is enforced by construction of the hash
4912 table. */
4913 if (p - buf != q - *str)
4914 return NULL;
4915
4916 o = str_hash_find (sys_ins_regs, buf);
4917 if (!o)
4918 return NULL;
4919
4920 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4921 o->name, o->value, o->flags, 0))
4922 as_bad (_("selected processor does not support system register "
4923 "name '%s'"), buf);
4924 if (aarch64_sys_reg_deprecated_p (o->flags))
4925 as_warn (_("system register name '%s' is deprecated and may be "
4926 "removed in a future release"), buf);
4927
4928 *str = q;
4929 return o;
4930 }
4931 \f
4932 #define po_char_or_fail(chr) do { \
4933 if (! skip_past_char (&str, chr)) \
4934 goto failure; \
4935 } while (0)
4936
4937 #define po_reg_or_fail(regtype) do { \
4938 reg = aarch64_reg_parse (&str, regtype, NULL); \
4939 if (!reg) \
4940 { \
4941 set_default_error (); \
4942 goto failure; \
4943 } \
4944 } while (0)
4945
4946 #define po_int_reg_or_fail(reg_type) do { \
4947 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4948 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4949 { \
4950 set_default_error (); \
4951 goto failure; \
4952 } \
4953 info->reg.regno = reg->number; \
4954 info->qualifier = qualifier; \
4955 } while (0)
4956
4957 #define po_imm_nc_or_fail() do { \
4958 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4959 goto failure; \
4960 } while (0)
4961
4962 #define po_imm_or_fail(min, max) do { \
4963 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4964 goto failure; \
4965 if (val < min || val > max) \
4966 { \
4967 set_fatal_syntax_error (_("immediate value out of range "\
4968 #min " to "#max)); \
4969 goto failure; \
4970 } \
4971 } while (0)
4972
4973 #define po_enum_or_fail(array) do { \
4974 if (!parse_enum_string (&str, &val, array, \
4975 ARRAY_SIZE (array), imm_reg_type)) \
4976 goto failure; \
4977 } while (0)
4978
4979 #define po_misc_or_fail(expr) do { \
4980 if (!expr) \
4981 goto failure; \
4982 } while (0)
4983 \f
4984 /* encode the 12-bit imm field of Add/sub immediate */
4985 static inline uint32_t
4986 encode_addsub_imm (uint32_t imm)
4987 {
4988 return imm << 10;
4989 }
4990
4991 /* encode the shift amount field of Add/sub immediate */
4992 static inline uint32_t
4993 encode_addsub_imm_shift_amount (uint32_t cnt)
4994 {
4995 return cnt << 22;
4996 }
4997
4998
4999 /* encode the imm field of Adr instruction */
5000 static inline uint32_t
5001 encode_adr_imm (uint32_t imm)
5002 {
5003 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
5004 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
5005 }
5006
5007 /* encode the immediate field of Move wide immediate */
5008 static inline uint32_t
5009 encode_movw_imm (uint32_t imm)
5010 {
5011 return imm << 5;
5012 }
5013
5014 /* encode the 26-bit offset of unconditional branch */
5015 static inline uint32_t
5016 encode_branch_ofs_26 (uint32_t ofs)
5017 {
5018 return ofs & ((1 << 26) - 1);
5019 }
5020
5021 /* encode the 19-bit offset of conditional branch and compare & branch */
5022 static inline uint32_t
5023 encode_cond_branch_ofs_19 (uint32_t ofs)
5024 {
5025 return (ofs & ((1 << 19) - 1)) << 5;
5026 }
5027
5028 /* encode the 19-bit offset of ld literal */
5029 static inline uint32_t
5030 encode_ld_lit_ofs_19 (uint32_t ofs)
5031 {
5032 return (ofs & ((1 << 19) - 1)) << 5;
5033 }
5034
5035 /* Encode the 14-bit offset of test & branch. */
5036 static inline uint32_t
5037 encode_tst_branch_ofs_14 (uint32_t ofs)
5038 {
5039 return (ofs & ((1 << 14) - 1)) << 5;
5040 }
5041
5042 /* Encode the 16-bit imm field of svc/hvc/smc. */
5043 static inline uint32_t
5044 encode_svc_imm (uint32_t imm)
5045 {
5046 return imm << 5;
5047 }
5048
5049 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
5050 static inline uint32_t
5051 reencode_addsub_switch_add_sub (uint32_t opcode)
5052 {
5053 return opcode ^ (1 << 30);
5054 }
5055
5056 static inline uint32_t
5057 reencode_movzn_to_movz (uint32_t opcode)
5058 {
5059 return opcode | (1 << 30);
5060 }
5061
5062 static inline uint32_t
5063 reencode_movzn_to_movn (uint32_t opcode)
5064 {
5065 return opcode & ~(1 << 30);
5066 }
5067
5068 /* Overall per-instruction processing. */
5069
5070 /* We need to be able to fix up arbitrary expressions in some statements.
5071 This is so that we can handle symbols that are an arbitrary distance from
5072 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
5073 which returns part of an address in a form which will be valid for
5074 a data instruction. We do this by pushing the expression into a symbol
5075 in the expr_section, and creating a fix for that. */
5076
5077 static fixS *
5078 fix_new_aarch64 (fragS * frag,
5079 int where,
5080 short int size,
5081 expressionS * exp,
5082 int pc_rel,
5083 int reloc)
5084 {
5085 fixS *new_fix;
5086
5087 switch (exp->X_op)
5088 {
5089 case O_constant:
5090 case O_symbol:
5091 case O_add:
5092 case O_subtract:
5093 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5094 break;
5095
5096 default:
5097 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5098 pc_rel, reloc);
5099 break;
5100 }
5101 return new_fix;
5102 }
5103 \f
5104 /* Diagnostics on operands errors. */
5105
5106 /* By default, output verbose error message.
5107 Disable the verbose error message by -mno-verbose-error. */
5108 static int verbose_error_p = 1;
5109
5110 #ifdef DEBUG_AARCH64
5111 /* N.B. this is only for the purpose of debugging. */
5112 const char* operand_mismatch_kind_names[] =
5113 {
5114 "AARCH64_OPDE_NIL",
5115 "AARCH64_OPDE_RECOVERABLE",
5116 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5117 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5118 "AARCH64_OPDE_SYNTAX_ERROR",
5119 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5120 "AARCH64_OPDE_INVALID_VARIANT",
5121 "AARCH64_OPDE_OUT_OF_RANGE",
5122 "AARCH64_OPDE_UNALIGNED",
5123 "AARCH64_OPDE_REG_LIST",
5124 "AARCH64_OPDE_OTHER_ERROR",
5125 };
5126 #endif /* DEBUG_AARCH64 */
5127
5128 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5129
5130 When multiple errors of different kinds are found in the same assembly
5131 line, only the error of the highest severity will be picked up for
5132 issuing the diagnostics. */
5133
5134 static inline bool
5135 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5136 enum aarch64_operand_error_kind rhs)
5137 {
5138 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5139 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5140 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5141 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5142 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5143 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5144 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5145 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
5146 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5147 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
5148 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
5149 return lhs > rhs;
5150 }
5151
5152 /* Helper routine to get the mnemonic name from the assembly instruction
5153 line; should only be called for the diagnosis purpose, as there is
5154 string copy operation involved, which may affect the runtime
5155 performance if used in elsewhere. */
5156
5157 static const char*
5158 get_mnemonic_name (const char *str)
5159 {
5160 static char mnemonic[32];
5161 char *ptr;
5162
5163 /* Get the first 15 bytes and assume that the full name is included. */
5164 strncpy (mnemonic, str, 31);
5165 mnemonic[31] = '\0';
5166
5167 /* Scan up to the end of the mnemonic, which must end in white space,
5168 '.', or end of string. */
5169 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5170 ;
5171
5172 *ptr = '\0';
5173
5174 /* Append '...' to the truncated long name. */
5175 if (ptr - mnemonic == 31)
5176 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5177
5178 return mnemonic;
5179 }
5180
5181 static void
5182 reset_aarch64_instruction (aarch64_instruction *instruction)
5183 {
5184 memset (instruction, '\0', sizeof (aarch64_instruction));
5185 instruction->reloc.type = BFD_RELOC_UNUSED;
5186 }
5187
5188 /* Data structures storing one user error in the assembly code related to
5189 operands. */
5190
5191 struct operand_error_record
5192 {
5193 const aarch64_opcode *opcode;
5194 aarch64_operand_error detail;
5195 struct operand_error_record *next;
5196 };
5197
5198 typedef struct operand_error_record operand_error_record;
5199
5200 struct operand_errors
5201 {
5202 operand_error_record *head;
5203 operand_error_record *tail;
5204 };
5205
5206 typedef struct operand_errors operand_errors;
5207
5208 /* Top-level data structure reporting user errors for the current line of
5209 the assembly code.
5210 The way md_assemble works is that all opcodes sharing the same mnemonic
5211 name are iterated to find a match to the assembly line. In this data
5212 structure, each of the such opcodes will have one operand_error_record
5213 allocated and inserted. In other words, excessive errors related with
5214 a single opcode are disregarded. */
5215 operand_errors operand_error_report;
5216
5217 /* Free record nodes. */
5218 static operand_error_record *free_opnd_error_record_nodes = NULL;
5219
5220 /* Initialize the data structure that stores the operand mismatch
5221 information on assembling one line of the assembly code. */
5222 static void
5223 init_operand_error_report (void)
5224 {
5225 if (operand_error_report.head != NULL)
5226 {
5227 gas_assert (operand_error_report.tail != NULL);
5228 operand_error_report.tail->next = free_opnd_error_record_nodes;
5229 free_opnd_error_record_nodes = operand_error_report.head;
5230 operand_error_report.head = NULL;
5231 operand_error_report.tail = NULL;
5232 return;
5233 }
5234 gas_assert (operand_error_report.tail == NULL);
5235 }
5236
5237 /* Return TRUE if some operand error has been recorded during the
5238 parsing of the current assembly line using the opcode *OPCODE;
5239 otherwise return FALSE. */
5240 static inline bool
5241 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5242 {
5243 operand_error_record *record = operand_error_report.head;
5244 return record && record->opcode == opcode;
5245 }
5246
5247 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5248 OPCODE field is initialized with OPCODE.
5249 N.B. only one record for each opcode, i.e. the maximum of one error is
5250 recorded for each instruction template. */
5251
5252 static void
5253 add_operand_error_record (const operand_error_record* new_record)
5254 {
5255 const aarch64_opcode *opcode = new_record->opcode;
5256 operand_error_record* record = operand_error_report.head;
5257
5258 /* The record may have been created for this opcode. If not, we need
5259 to prepare one. */
5260 if (! opcode_has_operand_error_p (opcode))
5261 {
5262 /* Get one empty record. */
5263 if (free_opnd_error_record_nodes == NULL)
5264 {
5265 record = XNEW (operand_error_record);
5266 }
5267 else
5268 {
5269 record = free_opnd_error_record_nodes;
5270 free_opnd_error_record_nodes = record->next;
5271 }
5272 record->opcode = opcode;
5273 /* Insert at the head. */
5274 record->next = operand_error_report.head;
5275 operand_error_report.head = record;
5276 if (operand_error_report.tail == NULL)
5277 operand_error_report.tail = record;
5278 }
5279 else if (record->detail.kind != AARCH64_OPDE_NIL
5280 && record->detail.index <= new_record->detail.index
5281 && operand_error_higher_severity_p (record->detail.kind,
5282 new_record->detail.kind))
5283 {
5284 /* In the case of multiple errors found on operands related with a
5285 single opcode, only record the error of the leftmost operand and
5286 only if the error is of higher severity. */
5287 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5288 " the existing error %s on operand %d",
5289 operand_mismatch_kind_names[new_record->detail.kind],
5290 new_record->detail.index,
5291 operand_mismatch_kind_names[record->detail.kind],
5292 record->detail.index);
5293 return;
5294 }
5295
5296 record->detail = new_record->detail;
5297 }
5298
5299 static inline void
5300 record_operand_error_info (const aarch64_opcode *opcode,
5301 aarch64_operand_error *error_info)
5302 {
5303 operand_error_record record;
5304 record.opcode = opcode;
5305 record.detail = *error_info;
5306 add_operand_error_record (&record);
5307 }
5308
5309 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5310 error message *ERROR, for operand IDX (count from 0). */
5311
5312 static void
5313 record_operand_error (const aarch64_opcode *opcode, int idx,
5314 enum aarch64_operand_error_kind kind,
5315 const char* error)
5316 {
5317 aarch64_operand_error info;
5318 memset(&info, 0, sizeof (info));
5319 info.index = idx;
5320 info.kind = kind;
5321 info.error = error;
5322 info.non_fatal = false;
5323 record_operand_error_info (opcode, &info);
5324 }
5325
5326 static void
5327 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5328 enum aarch64_operand_error_kind kind,
5329 const char* error, const int *extra_data)
5330 {
5331 aarch64_operand_error info;
5332 info.index = idx;
5333 info.kind = kind;
5334 info.error = error;
5335 info.data[0].i = extra_data[0];
5336 info.data[1].i = extra_data[1];
5337 info.data[2].i = extra_data[2];
5338 info.non_fatal = false;
5339 record_operand_error_info (opcode, &info);
5340 }
5341
5342 static void
5343 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5344 const char* error, int lower_bound,
5345 int upper_bound)
5346 {
5347 int data[3] = {lower_bound, upper_bound, 0};
5348 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5349 error, data);
5350 }
5351
5352 /* Remove the operand error record for *OPCODE. */
5353 static void ATTRIBUTE_UNUSED
5354 remove_operand_error_record (const aarch64_opcode *opcode)
5355 {
5356 if (opcode_has_operand_error_p (opcode))
5357 {
5358 operand_error_record* record = operand_error_report.head;
5359 gas_assert (record != NULL && operand_error_report.tail != NULL);
5360 operand_error_report.head = record->next;
5361 record->next = free_opnd_error_record_nodes;
5362 free_opnd_error_record_nodes = record;
5363 if (operand_error_report.head == NULL)
5364 {
5365 gas_assert (operand_error_report.tail == record);
5366 operand_error_report.tail = NULL;
5367 }
5368 }
5369 }
5370
5371 /* Given the instruction in *INSTR, return the index of the best matched
5372 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5373
5374 Return -1 if there is no qualifier sequence; return the first match
5375 if there is multiple matches found. */
5376
5377 static int
5378 find_best_match (const aarch64_inst *instr,
5379 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5380 {
5381 int i, num_opnds, max_num_matched, idx;
5382
5383 num_opnds = aarch64_num_of_operands (instr->opcode);
5384 if (num_opnds == 0)
5385 {
5386 DEBUG_TRACE ("no operand");
5387 return -1;
5388 }
5389
5390 max_num_matched = 0;
5391 idx = 0;
5392
5393 /* For each pattern. */
5394 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5395 {
5396 int j, num_matched;
5397 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5398
5399 /* Most opcodes has much fewer patterns in the list. */
5400 if (empty_qualifier_sequence_p (qualifiers))
5401 {
5402 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5403 break;
5404 }
5405
5406 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5407 if (*qualifiers == instr->operands[j].qualifier)
5408 ++num_matched;
5409
5410 if (num_matched > max_num_matched)
5411 {
5412 max_num_matched = num_matched;
5413 idx = i;
5414 }
5415 }
5416
5417 DEBUG_TRACE ("return with %d", idx);
5418 return idx;
5419 }
5420
5421 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5422 corresponding operands in *INSTR. */
5423
5424 static inline void
5425 assign_qualifier_sequence (aarch64_inst *instr,
5426 const aarch64_opnd_qualifier_t *qualifiers)
5427 {
5428 int i = 0;
5429 int num_opnds = aarch64_num_of_operands (instr->opcode);
5430 gas_assert (num_opnds);
5431 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5432 instr->operands[i].qualifier = *qualifiers;
5433 }
5434
5435 /* Callback used by aarch64_print_operand to apply STYLE to the
5436 disassembler output created from FMT and ARGS. The STYLER object holds
5437 any required state. Must return a pointer to a string (created from FMT
5438 and ARGS) that will continue to be valid until the complete disassembled
5439 instruction has been printed.
5440
5441 We don't currently add any styling to the output of the disassembler as
5442 used within assembler error messages, and so STYLE is ignored here. A
5443 new string is allocated on the obstack help within STYLER and returned
5444 to the caller. */
5445
5446 static const char *aarch64_apply_style
5447 (struct aarch64_styler *styler,
5448 enum disassembler_style style ATTRIBUTE_UNUSED,
5449 const char *fmt, va_list args)
5450 {
5451 int res;
5452 char *ptr;
5453 struct obstack *stack = (struct obstack *) styler->state;
5454 va_list ap;
5455
5456 /* Calculate the required space. */
5457 va_copy (ap, args);
5458 res = vsnprintf (NULL, 0, fmt, ap);
5459 va_end (ap);
5460 gas_assert (res >= 0);
5461
5462 /* Allocate space on the obstack and format the result. */
5463 ptr = (char *) obstack_alloc (stack, res + 1);
5464 res = vsnprintf (ptr, (res + 1), fmt, args);
5465 gas_assert (res >= 0);
5466
5467 return ptr;
5468 }
5469
5470 /* Print operands for the diagnosis purpose. */
5471
5472 static void
5473 print_operands (char *buf, const aarch64_opcode *opcode,
5474 const aarch64_opnd_info *opnds)
5475 {
5476 int i;
5477 struct aarch64_styler styler;
5478 struct obstack content;
5479 obstack_init (&content);
5480
5481 styler.apply_style = aarch64_apply_style;
5482 styler.state = (void *) &content;
5483
5484 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5485 {
5486 char str[128];
5487 char cmt[128];
5488
5489 /* We regard the opcode operand info more, however we also look into
5490 the inst->operands to support the disassembling of the optional
5491 operand.
5492 The two operand code should be the same in all cases, apart from
5493 when the operand can be optional. */
5494 if (opcode->operands[i] == AARCH64_OPND_NIL
5495 || opnds[i].type == AARCH64_OPND_NIL)
5496 break;
5497
5498 /* Generate the operand string in STR. */
5499 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5500 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5501
5502 /* Delimiter. */
5503 if (str[0] != '\0')
5504 strcat (buf, i == 0 ? " " : ", ");
5505
5506 /* Append the operand string. */
5507 strcat (buf, str);
5508
5509 /* Append a comment. This works because only the last operand ever
5510 adds a comment. If that ever changes then we'll need to be
5511 smarter here. */
5512 if (cmt[0] != '\0')
5513 {
5514 strcat (buf, "\t// ");
5515 strcat (buf, cmt);
5516 }
5517 }
5518
5519 obstack_free (&content, NULL);
5520 }
5521
5522 /* Send to stderr a string as information. */
5523
5524 static void
5525 output_info (const char *format, ...)
5526 {
5527 const char *file;
5528 unsigned int line;
5529 va_list args;
5530
5531 file = as_where (&line);
5532 if (file)
5533 {
5534 if (line != 0)
5535 fprintf (stderr, "%s:%u: ", file, line);
5536 else
5537 fprintf (stderr, "%s: ", file);
5538 }
5539 fprintf (stderr, _("Info: "));
5540 va_start (args, format);
5541 vfprintf (stderr, format, args);
5542 va_end (args);
5543 (void) putc ('\n', stderr);
5544 }
5545
5546 /* Output one operand error record. */
5547
5548 static void
5549 output_operand_error_record (const operand_error_record *record, char *str)
5550 {
5551 const aarch64_operand_error *detail = &record->detail;
5552 int idx = detail->index;
5553 const aarch64_opcode *opcode = record->opcode;
5554 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5555 : AARCH64_OPND_NIL);
5556
5557 typedef void (*handler_t)(const char *format, ...);
5558 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5559
5560 switch (detail->kind)
5561 {
5562 case AARCH64_OPDE_NIL:
5563 gas_assert (0);
5564 break;
5565
5566 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5567 handler (_("this `%s' should have an immediately preceding `%s'"
5568 " -- `%s'"),
5569 detail->data[0].s, detail->data[1].s, str);
5570 break;
5571
5572 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5573 handler (_("the preceding `%s' should be followed by `%s` rather"
5574 " than `%s` -- `%s'"),
5575 detail->data[1].s, detail->data[0].s, opcode->name, str);
5576 break;
5577
5578 case AARCH64_OPDE_SYNTAX_ERROR:
5579 case AARCH64_OPDE_RECOVERABLE:
5580 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5581 case AARCH64_OPDE_OTHER_ERROR:
5582 /* Use the prepared error message if there is, otherwise use the
5583 operand description string to describe the error. */
5584 if (detail->error != NULL)
5585 {
5586 if (idx < 0)
5587 handler (_("%s -- `%s'"), detail->error, str);
5588 else
5589 handler (_("%s at operand %d -- `%s'"),
5590 detail->error, idx + 1, str);
5591 }
5592 else
5593 {
5594 gas_assert (idx >= 0);
5595 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5596 aarch64_get_operand_desc (opd_code), str);
5597 }
5598 break;
5599
5600 case AARCH64_OPDE_INVALID_VARIANT:
5601 handler (_("operand mismatch -- `%s'"), str);
5602 if (verbose_error_p)
5603 {
5604 /* We will try to correct the erroneous instruction and also provide
5605 more information e.g. all other valid variants.
5606
5607 The string representation of the corrected instruction and other
5608 valid variants are generated by
5609
5610 1) obtaining the intermediate representation of the erroneous
5611 instruction;
5612 2) manipulating the IR, e.g. replacing the operand qualifier;
5613 3) printing out the instruction by calling the printer functions
5614 shared with the disassembler.
5615
5616 The limitation of this method is that the exact input assembly
5617 line cannot be accurately reproduced in some cases, for example an
5618 optional operand present in the actual assembly line will be
5619 omitted in the output; likewise for the optional syntax rules,
5620 e.g. the # before the immediate. Another limitation is that the
5621 assembly symbols and relocation operations in the assembly line
5622 currently cannot be printed out in the error report. Last but not
5623 least, when there is other error(s) co-exist with this error, the
5624 'corrected' instruction may be still incorrect, e.g. given
5625 'ldnp h0,h1,[x0,#6]!'
5626 this diagnosis will provide the version:
5627 'ldnp s0,s1,[x0,#6]!'
5628 which is still not right. */
5629 size_t len = strlen (get_mnemonic_name (str));
5630 int i, qlf_idx;
5631 bool result;
5632 char buf[2048];
5633 aarch64_inst *inst_base = &inst.base;
5634 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5635
5636 /* Init inst. */
5637 reset_aarch64_instruction (&inst);
5638 inst_base->opcode = opcode;
5639
5640 /* Reset the error report so that there is no side effect on the
5641 following operand parsing. */
5642 init_operand_error_report ();
5643
5644 /* Fill inst. */
5645 result = parse_operands (str + len, opcode)
5646 && programmer_friendly_fixup (&inst);
5647 gas_assert (result);
5648 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5649 NULL, NULL, insn_sequence);
5650 gas_assert (!result);
5651
5652 /* Find the most matched qualifier sequence. */
5653 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5654 gas_assert (qlf_idx > -1);
5655
5656 /* Assign the qualifiers. */
5657 assign_qualifier_sequence (inst_base,
5658 opcode->qualifiers_list[qlf_idx]);
5659
5660 /* Print the hint. */
5661 output_info (_(" did you mean this?"));
5662 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5663 print_operands (buf, opcode, inst_base->operands);
5664 output_info (_(" %s"), buf);
5665
5666 /* Print out other variant(s) if there is any. */
5667 if (qlf_idx != 0 ||
5668 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5669 output_info (_(" other valid variant(s):"));
5670
5671 /* For each pattern. */
5672 qualifiers_list = opcode->qualifiers_list;
5673 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5674 {
5675 /* Most opcodes has much fewer patterns in the list.
5676 First NIL qualifier indicates the end in the list. */
5677 if (empty_qualifier_sequence_p (*qualifiers_list))
5678 break;
5679
5680 if (i != qlf_idx)
5681 {
5682 /* Mnemonics name. */
5683 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5684
5685 /* Assign the qualifiers. */
5686 assign_qualifier_sequence (inst_base, *qualifiers_list);
5687
5688 /* Print instruction. */
5689 print_operands (buf, opcode, inst_base->operands);
5690
5691 output_info (_(" %s"), buf);
5692 }
5693 }
5694 }
5695 break;
5696
5697 case AARCH64_OPDE_UNTIED_IMMS:
5698 handler (_("operand %d must have the same immediate value "
5699 "as operand 1 -- `%s'"),
5700 detail->index + 1, str);
5701 break;
5702
5703 case AARCH64_OPDE_UNTIED_OPERAND:
5704 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5705 detail->index + 1, str);
5706 break;
5707
5708 case AARCH64_OPDE_OUT_OF_RANGE:
5709 if (detail->data[0].i != detail->data[1].i)
5710 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5711 detail->error ? detail->error : _("immediate value"),
5712 detail->data[0].i, detail->data[1].i, idx + 1, str);
5713 else
5714 handler (_("%s must be %d at operand %d -- `%s'"),
5715 detail->error ? detail->error : _("immediate value"),
5716 detail->data[0].i, idx + 1, str);
5717 break;
5718
5719 case AARCH64_OPDE_REG_LIST:
5720 if (detail->data[0].i == 1)
5721 handler (_("invalid number of registers in the list; "
5722 "only 1 register is expected at operand %d -- `%s'"),
5723 idx + 1, str);
5724 else
5725 handler (_("invalid number of registers in the list; "
5726 "%d registers are expected at operand %d -- `%s'"),
5727 detail->data[0].i, idx + 1, str);
5728 break;
5729
5730 case AARCH64_OPDE_UNALIGNED:
5731 handler (_("immediate value must be a multiple of "
5732 "%d at operand %d -- `%s'"),
5733 detail->data[0].i, idx + 1, str);
5734 break;
5735
5736 default:
5737 gas_assert (0);
5738 break;
5739 }
5740 }
5741
5742 /* Process and output the error message about the operand mismatching.
5743
5744 When this function is called, the operand error information had
5745 been collected for an assembly line and there will be multiple
5746 errors in the case of multiple instruction templates; output the
5747 error message that most closely describes the problem.
5748
5749 The errors to be printed can be filtered on printing all errors
5750 or only non-fatal errors. This distinction has to be made because
5751 the error buffer may already be filled with fatal errors we don't want to
5752 print due to the different instruction templates. */
5753
5754 static void
5755 output_operand_error_report (char *str, bool non_fatal_only)
5756 {
5757 int largest_error_pos;
5758 const char *msg = NULL;
5759 enum aarch64_operand_error_kind kind;
5760 operand_error_record *curr;
5761 operand_error_record *head = operand_error_report.head;
5762 operand_error_record *record = NULL;
5763
5764 /* No error to report. */
5765 if (head == NULL)
5766 return;
5767
5768 gas_assert (head != NULL && operand_error_report.tail != NULL);
5769
5770 /* Only one error. */
5771 if (head == operand_error_report.tail)
5772 {
5773 /* If the only error is a non-fatal one and we don't want to print it,
5774 just exit. */
5775 if (!non_fatal_only || head->detail.non_fatal)
5776 {
5777 DEBUG_TRACE ("single opcode entry with error kind: %s",
5778 operand_mismatch_kind_names[head->detail.kind]);
5779 output_operand_error_record (head, str);
5780 }
5781 return;
5782 }
5783
5784 /* Find the error kind of the highest severity. */
5785 DEBUG_TRACE ("multiple opcode entries with error kind");
5786 kind = AARCH64_OPDE_NIL;
5787 for (curr = head; curr != NULL; curr = curr->next)
5788 {
5789 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5790 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5791 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5792 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5793 kind = curr->detail.kind;
5794 }
5795
5796 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5797
5798 /* Pick up one of errors of KIND to report. */
5799 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5800 for (curr = head; curr != NULL; curr = curr->next)
5801 {
5802 /* If we don't want to print non-fatal errors then don't consider them
5803 at all. */
5804 if (curr->detail.kind != kind
5805 || (non_fatal_only && !curr->detail.non_fatal))
5806 continue;
5807 /* If there are multiple errors, pick up the one with the highest
5808 mismatching operand index. In the case of multiple errors with
5809 the equally highest operand index, pick up the first one or the
5810 first one with non-NULL error message. */
5811 if (curr->detail.index > largest_error_pos
5812 || (curr->detail.index == largest_error_pos && msg == NULL
5813 && curr->detail.error != NULL))
5814 {
5815 largest_error_pos = curr->detail.index;
5816 record = curr;
5817 msg = record->detail.error;
5818 }
5819 }
5820
5821 /* The way errors are collected in the back-end is a bit non-intuitive. But
5822 essentially, because each operand template is tried recursively you may
5823 always have errors collected from the previous tried OPND. These are
5824 usually skipped if there is one successful match. However now with the
5825 non-fatal errors we have to ignore those previously collected hard errors
5826 when we're only interested in printing the non-fatal ones. This condition
5827 prevents us from printing errors that are not appropriate, since we did
5828 match a condition, but it also has warnings that it wants to print. */
5829 if (non_fatal_only && !record)
5830 return;
5831
5832 gas_assert (largest_error_pos != -2 && record != NULL);
5833 DEBUG_TRACE ("Pick up error kind %s to report",
5834 operand_mismatch_kind_names[record->detail.kind]);
5835
5836 /* Output. */
5837 output_operand_error_record (record, str);
5838 }
5839 \f
5840 /* Write an AARCH64 instruction to buf - always little-endian. */
5841 static void
5842 put_aarch64_insn (char *buf, uint32_t insn)
5843 {
5844 unsigned char *where = (unsigned char *) buf;
5845 where[0] = insn;
5846 where[1] = insn >> 8;
5847 where[2] = insn >> 16;
5848 where[3] = insn >> 24;
5849 }
5850
5851 static uint32_t
5852 get_aarch64_insn (char *buf)
5853 {
5854 unsigned char *where = (unsigned char *) buf;
5855 uint32_t result;
5856 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5857 | ((uint32_t) where[3] << 24)));
5858 return result;
5859 }
5860
5861 static void
5862 output_inst (struct aarch64_inst *new_inst)
5863 {
5864 char *to = NULL;
5865
5866 to = frag_more (INSN_SIZE);
5867
5868 frag_now->tc_frag_data.recorded = 1;
5869
5870 put_aarch64_insn (to, inst.base.value);
5871
5872 if (inst.reloc.type != BFD_RELOC_UNUSED)
5873 {
5874 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5875 INSN_SIZE, &inst.reloc.exp,
5876 inst.reloc.pc_rel,
5877 inst.reloc.type);
5878 DEBUG_TRACE ("Prepared relocation fix up");
5879 /* Don't check the addend value against the instruction size,
5880 that's the job of our code in md_apply_fix(). */
5881 fixp->fx_no_overflow = 1;
5882 if (new_inst != NULL)
5883 fixp->tc_fix_data.inst = new_inst;
5884 if (aarch64_gas_internal_fixup_p ())
5885 {
5886 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5887 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5888 fixp->fx_addnumber = inst.reloc.flags;
5889 }
5890 }
5891
5892 dwarf2_emit_insn (INSN_SIZE);
5893 }
5894
5895 /* Link together opcodes of the same name. */
5896
5897 struct templates
5898 {
5899 const aarch64_opcode *opcode;
5900 struct templates *next;
5901 };
5902
5903 typedef struct templates templates;
5904
5905 static templates *
5906 lookup_mnemonic (const char *start, int len)
5907 {
5908 templates *templ = NULL;
5909
5910 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5911 return templ;
5912 }
5913
5914 /* Subroutine of md_assemble, responsible for looking up the primary
5915 opcode from the mnemonic the user wrote. BASE points to the beginning
5916 of the mnemonic, DOT points to the first '.' within the mnemonic
5917 (if any) and END points to the end of the mnemonic. */
5918
5919 static templates *
5920 opcode_lookup (char *base, char *dot, char *end)
5921 {
5922 const aarch64_cond *cond;
5923 char condname[16];
5924 int len;
5925
5926 if (dot == end)
5927 return 0;
5928
5929 inst.cond = COND_ALWAYS;
5930
5931 /* Handle a possible condition. */
5932 if (dot)
5933 {
5934 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5935 if (!cond)
5936 return 0;
5937 inst.cond = cond->value;
5938 len = dot - base;
5939 }
5940 else
5941 len = end - base;
5942
5943 if (inst.cond == COND_ALWAYS)
5944 {
5945 /* Look for unaffixed mnemonic. */
5946 return lookup_mnemonic (base, len);
5947 }
5948 else if (len <= 13)
5949 {
5950 /* append ".c" to mnemonic if conditional */
5951 memcpy (condname, base, len);
5952 memcpy (condname + len, ".c", 2);
5953 base = condname;
5954 len += 2;
5955 return lookup_mnemonic (base, len);
5956 }
5957
5958 return NULL;
5959 }
5960
5961 /* Process an optional operand that is found omitted from the assembly line.
5962 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5963 instruction's opcode entry while IDX is the index of this omitted operand.
5964 */
5965
5966 static void
5967 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5968 int idx, aarch64_opnd_info *operand)
5969 {
5970 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5971 gas_assert (optional_operand_p (opcode, idx));
5972 gas_assert (!operand->present);
5973
5974 switch (type)
5975 {
5976 case AARCH64_OPND_Rd:
5977 case AARCH64_OPND_Rn:
5978 case AARCH64_OPND_Rm:
5979 case AARCH64_OPND_Rt:
5980 case AARCH64_OPND_Rt2:
5981 case AARCH64_OPND_Rt_LS64:
5982 case AARCH64_OPND_Rt_SP:
5983 case AARCH64_OPND_Rs:
5984 case AARCH64_OPND_Ra:
5985 case AARCH64_OPND_Rt_SYS:
5986 case AARCH64_OPND_Rd_SP:
5987 case AARCH64_OPND_Rn_SP:
5988 case AARCH64_OPND_Rm_SP:
5989 case AARCH64_OPND_Fd:
5990 case AARCH64_OPND_Fn:
5991 case AARCH64_OPND_Fm:
5992 case AARCH64_OPND_Fa:
5993 case AARCH64_OPND_Ft:
5994 case AARCH64_OPND_Ft2:
5995 case AARCH64_OPND_Sd:
5996 case AARCH64_OPND_Sn:
5997 case AARCH64_OPND_Sm:
5998 case AARCH64_OPND_Va:
5999 case AARCH64_OPND_Vd:
6000 case AARCH64_OPND_Vn:
6001 case AARCH64_OPND_Vm:
6002 case AARCH64_OPND_VdD1:
6003 case AARCH64_OPND_VnD1:
6004 operand->reg.regno = default_value;
6005 break;
6006
6007 case AARCH64_OPND_Ed:
6008 case AARCH64_OPND_En:
6009 case AARCH64_OPND_Em:
6010 case AARCH64_OPND_Em16:
6011 case AARCH64_OPND_SM3_IMM2:
6012 operand->reglane.regno = default_value;
6013 break;
6014
6015 case AARCH64_OPND_IDX:
6016 case AARCH64_OPND_BIT_NUM:
6017 case AARCH64_OPND_IMMR:
6018 case AARCH64_OPND_IMMS:
6019 case AARCH64_OPND_SHLL_IMM:
6020 case AARCH64_OPND_IMM_VLSL:
6021 case AARCH64_OPND_IMM_VLSR:
6022 case AARCH64_OPND_CCMP_IMM:
6023 case AARCH64_OPND_FBITS:
6024 case AARCH64_OPND_UIMM4:
6025 case AARCH64_OPND_UIMM3_OP1:
6026 case AARCH64_OPND_UIMM3_OP2:
6027 case AARCH64_OPND_IMM:
6028 case AARCH64_OPND_IMM_2:
6029 case AARCH64_OPND_WIDTH:
6030 case AARCH64_OPND_UIMM7:
6031 case AARCH64_OPND_NZCV:
6032 case AARCH64_OPND_SVE_PATTERN:
6033 case AARCH64_OPND_SVE_PRFOP:
6034 operand->imm.value = default_value;
6035 break;
6036
6037 case AARCH64_OPND_SVE_PATTERN_SCALED:
6038 operand->imm.value = default_value;
6039 operand->shifter.kind = AARCH64_MOD_MUL;
6040 operand->shifter.amount = 1;
6041 break;
6042
6043 case AARCH64_OPND_EXCEPTION:
6044 inst.reloc.type = BFD_RELOC_UNUSED;
6045 break;
6046
6047 case AARCH64_OPND_BARRIER_ISB:
6048 operand->barrier = aarch64_barrier_options + default_value;
6049 break;
6050
6051 case AARCH64_OPND_BTI_TARGET:
6052 operand->hint_option = aarch64_hint_options + default_value;
6053 break;
6054
6055 default:
6056 break;
6057 }
6058 }
6059
6060 /* Process the relocation type for move wide instructions.
6061 Return TRUE on success; otherwise return FALSE. */
6062
6063 static bool
6064 process_movw_reloc_info (void)
6065 {
6066 int is32;
6067 unsigned shift;
6068
6069 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6070
6071 if (inst.base.opcode->op == OP_MOVK)
6072 switch (inst.reloc.type)
6073 {
6074 case BFD_RELOC_AARCH64_MOVW_G0_S:
6075 case BFD_RELOC_AARCH64_MOVW_G1_S:
6076 case BFD_RELOC_AARCH64_MOVW_G2_S:
6077 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6078 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6079 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6080 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6081 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6082 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6083 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6084 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6085 set_syntax_error
6086 (_("the specified relocation type is not allowed for MOVK"));
6087 return false;
6088 default:
6089 break;
6090 }
6091
6092 switch (inst.reloc.type)
6093 {
6094 case BFD_RELOC_AARCH64_MOVW_G0:
6095 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6096 case BFD_RELOC_AARCH64_MOVW_G0_S:
6097 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6098 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6099 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6100 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6101 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6102 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6103 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6104 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6105 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6106 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6107 shift = 0;
6108 break;
6109 case BFD_RELOC_AARCH64_MOVW_G1:
6110 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6111 case BFD_RELOC_AARCH64_MOVW_G1_S:
6112 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6113 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6114 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6115 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6116 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6117 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6118 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6119 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6120 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6121 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6122 shift = 16;
6123 break;
6124 case BFD_RELOC_AARCH64_MOVW_G2:
6125 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6126 case BFD_RELOC_AARCH64_MOVW_G2_S:
6127 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6128 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6129 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6130 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6131 if (is32)
6132 {
6133 set_fatal_syntax_error
6134 (_("the specified relocation type is not allowed for 32-bit "
6135 "register"));
6136 return false;
6137 }
6138 shift = 32;
6139 break;
6140 case BFD_RELOC_AARCH64_MOVW_G3:
6141 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6142 if (is32)
6143 {
6144 set_fatal_syntax_error
6145 (_("the specified relocation type is not allowed for 32-bit "
6146 "register"));
6147 return false;
6148 }
6149 shift = 48;
6150 break;
6151 default:
6152 /* More cases should be added when more MOVW-related relocation types
6153 are supported in GAS. */
6154 gas_assert (aarch64_gas_internal_fixup_p ());
6155 /* The shift amount should have already been set by the parser. */
6156 return true;
6157 }
6158 inst.base.operands[1].shifter.amount = shift;
6159 return true;
6160 }
6161
6162 /* A primitive log calculator. */
6163
6164 static inline unsigned int
6165 get_logsz (unsigned int size)
6166 {
6167 const unsigned char ls[16] =
6168 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6169 if (size > 16)
6170 {
6171 gas_assert (0);
6172 return -1;
6173 }
6174 gas_assert (ls[size - 1] != (unsigned char)-1);
6175 return ls[size - 1];
6176 }
6177
6178 /* Determine and return the real reloc type code for an instruction
6179 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6180
6181 static inline bfd_reloc_code_real_type
6182 ldst_lo12_determine_real_reloc_type (void)
6183 {
6184 unsigned logsz, max_logsz;
6185 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6186 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6187
6188 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6189 {
6190 BFD_RELOC_AARCH64_LDST8_LO12,
6191 BFD_RELOC_AARCH64_LDST16_LO12,
6192 BFD_RELOC_AARCH64_LDST32_LO12,
6193 BFD_RELOC_AARCH64_LDST64_LO12,
6194 BFD_RELOC_AARCH64_LDST128_LO12
6195 },
6196 {
6197 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6198 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6199 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6200 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6201 BFD_RELOC_AARCH64_NONE
6202 },
6203 {
6204 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6205 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6206 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6207 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6208 BFD_RELOC_AARCH64_NONE
6209 },
6210 {
6211 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6212 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6213 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6214 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6215 BFD_RELOC_AARCH64_NONE
6216 },
6217 {
6218 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6219 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6220 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6221 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6222 BFD_RELOC_AARCH64_NONE
6223 }
6224 };
6225
6226 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6227 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6228 || (inst.reloc.type
6229 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6230 || (inst.reloc.type
6231 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6232 || (inst.reloc.type
6233 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6234 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6235
6236 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6237 opd1_qlf =
6238 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6239 1, opd0_qlf, 0);
6240 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6241
6242 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6243
6244 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6245 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6246 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6247 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6248 max_logsz = 3;
6249 else
6250 max_logsz = 4;
6251
6252 if (logsz > max_logsz)
6253 {
6254 /* SEE PR 27904 for an example of this. */
6255 set_fatal_syntax_error
6256 (_("relocation qualifier does not match instruction size"));
6257 return BFD_RELOC_AARCH64_NONE;
6258 }
6259
6260 /* In reloc.c, these pseudo relocation types should be defined in similar
6261 order as above reloc_ldst_lo12 array. Because the array index calculation
6262 below relies on this. */
6263 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6264 }
6265
6266 /* Check whether a register list REGINFO is valid. The registers must be
6267 numbered in increasing order (modulo 32), in increments of one or two.
6268
6269 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6270 increments of two.
6271
6272 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6273
6274 static bool
6275 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6276 {
6277 uint32_t i, nb_regs, prev_regno, incr;
6278
6279 nb_regs = 1 + (reginfo & 0x3);
6280 reginfo >>= 2;
6281 prev_regno = reginfo & 0x1f;
6282 incr = accept_alternate ? 2 : 1;
6283
6284 for (i = 1; i < nb_regs; ++i)
6285 {
6286 uint32_t curr_regno;
6287 reginfo >>= 5;
6288 curr_regno = reginfo & 0x1f;
6289 if (curr_regno != ((prev_regno + incr) & 0x1f))
6290 return false;
6291 prev_regno = curr_regno;
6292 }
6293
6294 return true;
6295 }
6296
6297 /* Generic instruction operand parser. This does no encoding and no
6298 semantic validation; it merely squirrels values away in the inst
6299 structure. Returns TRUE or FALSE depending on whether the
6300 specified grammar matched. */
6301
6302 static bool
6303 parse_operands (char *str, const aarch64_opcode *opcode)
6304 {
6305 int i;
6306 char *backtrack_pos = 0;
6307 const enum aarch64_opnd *operands = opcode->operands;
6308 aarch64_reg_type imm_reg_type;
6309
6310 clear_error ();
6311 skip_whitespace (str);
6312
6313 if (AARCH64_CPU_HAS_ANY_FEATURES (*opcode->avariant,
6314 AARCH64_FEATURE_SVE
6315 | AARCH64_FEATURE_SVE2))
6316 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6317 else
6318 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6319
6320 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6321 {
6322 int64_t val;
6323 const reg_entry *reg;
6324 int comma_skipped_p = 0;
6325 struct vector_type_el vectype;
6326 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6327 aarch64_opnd_info *info = &inst.base.operands[i];
6328 aarch64_reg_type reg_type;
6329
6330 DEBUG_TRACE ("parse operand %d", i);
6331
6332 /* Assign the operand code. */
6333 info->type = operands[i];
6334
6335 if (optional_operand_p (opcode, i))
6336 {
6337 /* Remember where we are in case we need to backtrack. */
6338 gas_assert (!backtrack_pos);
6339 backtrack_pos = str;
6340 }
6341
6342 /* Expect comma between operands; the backtrack mechanism will take
6343 care of cases of omitted optional operand. */
6344 if (i > 0 && ! skip_past_char (&str, ','))
6345 {
6346 set_syntax_error (_("comma expected between operands"));
6347 goto failure;
6348 }
6349 else
6350 comma_skipped_p = 1;
6351
6352 switch (operands[i])
6353 {
6354 case AARCH64_OPND_Rd:
6355 case AARCH64_OPND_Rn:
6356 case AARCH64_OPND_Rm:
6357 case AARCH64_OPND_Rt:
6358 case AARCH64_OPND_Rt2:
6359 case AARCH64_OPND_Rs:
6360 case AARCH64_OPND_Ra:
6361 case AARCH64_OPND_Rt_LS64:
6362 case AARCH64_OPND_Rt_SYS:
6363 case AARCH64_OPND_PAIRREG:
6364 case AARCH64_OPND_SVE_Rm:
6365 po_int_reg_or_fail (REG_TYPE_R_Z);
6366
6367 /* In LS64 load/store instructions Rt register number must be even
6368 and <=22. */
6369 if (operands[i] == AARCH64_OPND_Rt_LS64)
6370 {
6371 /* We've already checked if this is valid register.
6372 This will check if register number (Rt) is not undefined for LS64
6373 instructions:
6374 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6375 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6376 {
6377 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6378 goto failure;
6379 }
6380 }
6381 break;
6382
6383 case AARCH64_OPND_Rd_SP:
6384 case AARCH64_OPND_Rn_SP:
6385 case AARCH64_OPND_Rt_SP:
6386 case AARCH64_OPND_SVE_Rn_SP:
6387 case AARCH64_OPND_Rm_SP:
6388 po_int_reg_or_fail (REG_TYPE_R_SP);
6389 break;
6390
6391 case AARCH64_OPND_Rm_EXT:
6392 case AARCH64_OPND_Rm_SFT:
6393 po_misc_or_fail (parse_shifter_operand
6394 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6395 ? SHIFTED_ARITH_IMM
6396 : SHIFTED_LOGIC_IMM)));
6397 if (!info->shifter.operator_present)
6398 {
6399 /* Default to LSL if not present. Libopcodes prefers shifter
6400 kind to be explicit. */
6401 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6402 info->shifter.kind = AARCH64_MOD_LSL;
6403 /* For Rm_EXT, libopcodes will carry out further check on whether
6404 or not stack pointer is used in the instruction (Recall that
6405 "the extend operator is not optional unless at least one of
6406 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6407 }
6408 break;
6409
6410 case AARCH64_OPND_Fd:
6411 case AARCH64_OPND_Fn:
6412 case AARCH64_OPND_Fm:
6413 case AARCH64_OPND_Fa:
6414 case AARCH64_OPND_Ft:
6415 case AARCH64_OPND_Ft2:
6416 case AARCH64_OPND_Sd:
6417 case AARCH64_OPND_Sn:
6418 case AARCH64_OPND_Sm:
6419 case AARCH64_OPND_SVE_VZn:
6420 case AARCH64_OPND_SVE_Vd:
6421 case AARCH64_OPND_SVE_Vm:
6422 case AARCH64_OPND_SVE_Vn:
6423 reg = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, NULL);
6424 if (!reg)
6425 {
6426 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6427 goto failure;
6428 }
6429 gas_assert (reg->type >= REG_TYPE_FP_B
6430 && reg->type <= REG_TYPE_FP_Q);
6431
6432 info->reg.regno = reg->number;
6433 info->qualifier = AARCH64_OPND_QLF_S_B + (reg->type - REG_TYPE_FP_B);
6434 break;
6435
6436 case AARCH64_OPND_SVE_Pd:
6437 case AARCH64_OPND_SVE_Pg3:
6438 case AARCH64_OPND_SVE_Pg4_5:
6439 case AARCH64_OPND_SVE_Pg4_10:
6440 case AARCH64_OPND_SVE_Pg4_16:
6441 case AARCH64_OPND_SVE_Pm:
6442 case AARCH64_OPND_SVE_Pn:
6443 case AARCH64_OPND_SVE_Pt:
6444 case AARCH64_OPND_SME_Pm:
6445 reg_type = REG_TYPE_PN;
6446 goto vector_reg;
6447
6448 case AARCH64_OPND_SVE_Za_5:
6449 case AARCH64_OPND_SVE_Za_16:
6450 case AARCH64_OPND_SVE_Zd:
6451 case AARCH64_OPND_SVE_Zm_5:
6452 case AARCH64_OPND_SVE_Zm_16:
6453 case AARCH64_OPND_SVE_Zn:
6454 case AARCH64_OPND_SVE_Zt:
6455 reg_type = REG_TYPE_ZN;
6456 goto vector_reg;
6457
6458 case AARCH64_OPND_Va:
6459 case AARCH64_OPND_Vd:
6460 case AARCH64_OPND_Vn:
6461 case AARCH64_OPND_Vm:
6462 reg_type = REG_TYPE_VN;
6463 vector_reg:
6464 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6465 if (!reg)
6466 {
6467 first_error (_(get_reg_expected_msg (reg_type)));
6468 goto failure;
6469 }
6470 if (vectype.defined & NTA_HASINDEX)
6471 goto failure;
6472
6473 info->reg.regno = reg->number;
6474 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6475 && vectype.type == NT_invtype)
6476 /* Unqualified Pn and Zn registers are allowed in certain
6477 contexts. Rely on F_STRICT qualifier checking to catch
6478 invalid uses. */
6479 info->qualifier = AARCH64_OPND_QLF_NIL;
6480 else
6481 {
6482 info->qualifier = vectype_to_qualifier (&vectype);
6483 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6484 goto failure;
6485 }
6486 break;
6487
6488 case AARCH64_OPND_VdD1:
6489 case AARCH64_OPND_VnD1:
6490 reg = aarch64_reg_parse (&str, REG_TYPE_VN, &vectype);
6491 if (!reg)
6492 {
6493 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6494 goto failure;
6495 }
6496 if (vectype.type != NT_d || vectype.index != 1)
6497 {
6498 set_fatal_syntax_error
6499 (_("the top half of a 128-bit FP/SIMD register is expected"));
6500 goto failure;
6501 }
6502 info->reg.regno = reg->number;
6503 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6504 here; it is correct for the purpose of encoding/decoding since
6505 only the register number is explicitly encoded in the related
6506 instructions, although this appears a bit hacky. */
6507 info->qualifier = AARCH64_OPND_QLF_S_D;
6508 break;
6509
6510 case AARCH64_OPND_SVE_Zm3_INDEX:
6511 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6512 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6513 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6514 case AARCH64_OPND_SVE_Zm4_INDEX:
6515 case AARCH64_OPND_SVE_Zn_INDEX:
6516 reg_type = REG_TYPE_ZN;
6517 goto vector_reg_index;
6518
6519 case AARCH64_OPND_Ed:
6520 case AARCH64_OPND_En:
6521 case AARCH64_OPND_Em:
6522 case AARCH64_OPND_Em16:
6523 case AARCH64_OPND_SM3_IMM2:
6524 reg_type = REG_TYPE_VN;
6525 vector_reg_index:
6526 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6527 if (!reg)
6528 {
6529 first_error (_(get_reg_expected_msg (reg_type)));
6530 goto failure;
6531 }
6532 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6533 goto failure;
6534
6535 info->reglane.regno = reg->number;
6536 info->reglane.index = vectype.index;
6537 info->qualifier = vectype_to_qualifier (&vectype);
6538 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6539 goto failure;
6540 break;
6541
6542 case AARCH64_OPND_SVE_ZnxN:
6543 case AARCH64_OPND_SVE_ZtxN:
6544 reg_type = REG_TYPE_ZN;
6545 goto vector_reg_list;
6546
6547 case AARCH64_OPND_LVn:
6548 case AARCH64_OPND_LVt:
6549 case AARCH64_OPND_LVt_AL:
6550 case AARCH64_OPND_LEt:
6551 reg_type = REG_TYPE_VN;
6552 vector_reg_list:
6553 if (reg_type == REG_TYPE_ZN
6554 && get_opcode_dependent_value (opcode) == 1
6555 && *str != '{')
6556 {
6557 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6558 if (!reg)
6559 {
6560 first_error (_(get_reg_expected_msg (reg_type)));
6561 goto failure;
6562 }
6563 info->reglist.first_regno = reg->number;
6564 info->reglist.num_regs = 1;
6565 }
6566 else
6567 {
6568 val = parse_vector_reg_list (&str, reg_type, &vectype);
6569 if (val == PARSE_FAIL)
6570 goto failure;
6571
6572 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6573 {
6574 set_fatal_syntax_error (_("invalid register list"));
6575 goto failure;
6576 }
6577
6578 if (vectype.width != 0 && *str != ',')
6579 {
6580 set_fatal_syntax_error
6581 (_("expected element type rather than vector type"));
6582 goto failure;
6583 }
6584
6585 info->reglist.first_regno = (val >> 2) & 0x1f;
6586 info->reglist.num_regs = (val & 0x3) + 1;
6587 }
6588 if (operands[i] == AARCH64_OPND_LEt)
6589 {
6590 if (!(vectype.defined & NTA_HASINDEX))
6591 goto failure;
6592 info->reglist.has_index = 1;
6593 info->reglist.index = vectype.index;
6594 }
6595 else
6596 {
6597 if (vectype.defined & NTA_HASINDEX)
6598 goto failure;
6599 if (!(vectype.defined & NTA_HASTYPE))
6600 {
6601 if (reg_type == REG_TYPE_ZN)
6602 set_fatal_syntax_error (_("missing type suffix"));
6603 goto failure;
6604 }
6605 }
6606 info->qualifier = vectype_to_qualifier (&vectype);
6607 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6608 goto failure;
6609 break;
6610
6611 case AARCH64_OPND_CRn:
6612 case AARCH64_OPND_CRm:
6613 {
6614 char prefix = *(str++);
6615 if (prefix != 'c' && prefix != 'C')
6616 goto failure;
6617
6618 po_imm_nc_or_fail ();
6619 if (val > 15)
6620 {
6621 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6622 goto failure;
6623 }
6624 info->qualifier = AARCH64_OPND_QLF_CR;
6625 info->imm.value = val;
6626 break;
6627 }
6628
6629 case AARCH64_OPND_SHLL_IMM:
6630 case AARCH64_OPND_IMM_VLSR:
6631 po_imm_or_fail (1, 64);
6632 info->imm.value = val;
6633 break;
6634
6635 case AARCH64_OPND_CCMP_IMM:
6636 case AARCH64_OPND_SIMM5:
6637 case AARCH64_OPND_FBITS:
6638 case AARCH64_OPND_TME_UIMM16:
6639 case AARCH64_OPND_UIMM4:
6640 case AARCH64_OPND_UIMM4_ADDG:
6641 case AARCH64_OPND_UIMM10:
6642 case AARCH64_OPND_UIMM3_OP1:
6643 case AARCH64_OPND_UIMM3_OP2:
6644 case AARCH64_OPND_IMM_VLSL:
6645 case AARCH64_OPND_IMM:
6646 case AARCH64_OPND_IMM_2:
6647 case AARCH64_OPND_WIDTH:
6648 case AARCH64_OPND_SVE_INV_LIMM:
6649 case AARCH64_OPND_SVE_LIMM:
6650 case AARCH64_OPND_SVE_LIMM_MOV:
6651 case AARCH64_OPND_SVE_SHLIMM_PRED:
6652 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6653 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6654 case AARCH64_OPND_SVE_SHRIMM_PRED:
6655 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6656 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6657 case AARCH64_OPND_SVE_SIMM5:
6658 case AARCH64_OPND_SVE_SIMM5B:
6659 case AARCH64_OPND_SVE_SIMM6:
6660 case AARCH64_OPND_SVE_SIMM8:
6661 case AARCH64_OPND_SVE_UIMM3:
6662 case AARCH64_OPND_SVE_UIMM7:
6663 case AARCH64_OPND_SVE_UIMM8:
6664 case AARCH64_OPND_SVE_UIMM8_53:
6665 case AARCH64_OPND_IMM_ROT1:
6666 case AARCH64_OPND_IMM_ROT2:
6667 case AARCH64_OPND_IMM_ROT3:
6668 case AARCH64_OPND_SVE_IMM_ROT1:
6669 case AARCH64_OPND_SVE_IMM_ROT2:
6670 case AARCH64_OPND_SVE_IMM_ROT3:
6671 case AARCH64_OPND_CSSC_SIMM8:
6672 case AARCH64_OPND_CSSC_UIMM8:
6673 po_imm_nc_or_fail ();
6674 info->imm.value = val;
6675 break;
6676
6677 case AARCH64_OPND_SVE_AIMM:
6678 case AARCH64_OPND_SVE_ASIMM:
6679 po_imm_nc_or_fail ();
6680 info->imm.value = val;
6681 skip_whitespace (str);
6682 if (skip_past_comma (&str))
6683 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6684 else
6685 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6686 break;
6687
6688 case AARCH64_OPND_SVE_PATTERN:
6689 po_enum_or_fail (aarch64_sve_pattern_array);
6690 info->imm.value = val;
6691 break;
6692
6693 case AARCH64_OPND_SVE_PATTERN_SCALED:
6694 po_enum_or_fail (aarch64_sve_pattern_array);
6695 info->imm.value = val;
6696 if (skip_past_comma (&str)
6697 && !parse_shift (&str, info, SHIFTED_MUL))
6698 goto failure;
6699 if (!info->shifter.operator_present)
6700 {
6701 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6702 info->shifter.kind = AARCH64_MOD_MUL;
6703 info->shifter.amount = 1;
6704 }
6705 break;
6706
6707 case AARCH64_OPND_SVE_PRFOP:
6708 po_enum_or_fail (aarch64_sve_prfop_array);
6709 info->imm.value = val;
6710 break;
6711
6712 case AARCH64_OPND_UIMM7:
6713 po_imm_or_fail (0, 127);
6714 info->imm.value = val;
6715 break;
6716
6717 case AARCH64_OPND_IDX:
6718 case AARCH64_OPND_MASK:
6719 case AARCH64_OPND_BIT_NUM:
6720 case AARCH64_OPND_IMMR:
6721 case AARCH64_OPND_IMMS:
6722 po_imm_or_fail (0, 63);
6723 info->imm.value = val;
6724 break;
6725
6726 case AARCH64_OPND_IMM0:
6727 po_imm_nc_or_fail ();
6728 if (val != 0)
6729 {
6730 set_fatal_syntax_error (_("immediate zero expected"));
6731 goto failure;
6732 }
6733 info->imm.value = 0;
6734 break;
6735
6736 case AARCH64_OPND_FPIMM0:
6737 {
6738 int qfloat;
6739 bool res1 = false, res2 = false;
6740 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6741 it is probably not worth the effort to support it. */
6742 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6743 imm_reg_type))
6744 && (error_p ()
6745 || !(res2 = parse_constant_immediate (&str, &val,
6746 imm_reg_type))))
6747 goto failure;
6748 if ((res1 && qfloat == 0) || (res2 && val == 0))
6749 {
6750 info->imm.value = 0;
6751 info->imm.is_fp = 1;
6752 break;
6753 }
6754 set_fatal_syntax_error (_("immediate zero expected"));
6755 goto failure;
6756 }
6757
6758 case AARCH64_OPND_IMM_MOV:
6759 {
6760 char *saved = str;
6761 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6762 reg_name_p (str, REG_TYPE_VN))
6763 goto failure;
6764 str = saved;
6765 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6766 GE_OPT_PREFIX, REJECT_ABSENT));
6767 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6768 later. fix_mov_imm_insn will try to determine a machine
6769 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6770 message if the immediate cannot be moved by a single
6771 instruction. */
6772 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6773 inst.base.operands[i].skip = 1;
6774 }
6775 break;
6776
6777 case AARCH64_OPND_SIMD_IMM:
6778 case AARCH64_OPND_SIMD_IMM_SFT:
6779 if (! parse_big_immediate (&str, &val, imm_reg_type))
6780 goto failure;
6781 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6782 /* addr_off_p */ 0,
6783 /* need_libopcodes_p */ 1,
6784 /* skip_p */ 1);
6785 /* Parse shift.
6786 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6787 shift, we don't check it here; we leave the checking to
6788 the libopcodes (operand_general_constraint_met_p). By
6789 doing this, we achieve better diagnostics. */
6790 if (skip_past_comma (&str)
6791 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6792 goto failure;
6793 if (!info->shifter.operator_present
6794 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6795 {
6796 /* Default to LSL if not present. Libopcodes prefers shifter
6797 kind to be explicit. */
6798 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6799 info->shifter.kind = AARCH64_MOD_LSL;
6800 }
6801 break;
6802
6803 case AARCH64_OPND_FPIMM:
6804 case AARCH64_OPND_SIMD_FPIMM:
6805 case AARCH64_OPND_SVE_FPIMM8:
6806 {
6807 int qfloat;
6808 bool dp_p;
6809
6810 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6811 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6812 || !aarch64_imm_float_p (qfloat))
6813 {
6814 if (!error_p ())
6815 set_fatal_syntax_error (_("invalid floating-point"
6816 " constant"));
6817 goto failure;
6818 }
6819 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6820 inst.base.operands[i].imm.is_fp = 1;
6821 }
6822 break;
6823
6824 case AARCH64_OPND_SVE_I1_HALF_ONE:
6825 case AARCH64_OPND_SVE_I1_HALF_TWO:
6826 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6827 {
6828 int qfloat;
6829 bool dp_p;
6830
6831 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6832 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6833 {
6834 if (!error_p ())
6835 set_fatal_syntax_error (_("invalid floating-point"
6836 " constant"));
6837 goto failure;
6838 }
6839 inst.base.operands[i].imm.value = qfloat;
6840 inst.base.operands[i].imm.is_fp = 1;
6841 }
6842 break;
6843
6844 case AARCH64_OPND_LIMM:
6845 po_misc_or_fail (parse_shifter_operand (&str, info,
6846 SHIFTED_LOGIC_IMM));
6847 if (info->shifter.operator_present)
6848 {
6849 set_fatal_syntax_error
6850 (_("shift not allowed for bitmask immediate"));
6851 goto failure;
6852 }
6853 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6854 /* addr_off_p */ 0,
6855 /* need_libopcodes_p */ 1,
6856 /* skip_p */ 1);
6857 break;
6858
6859 case AARCH64_OPND_AIMM:
6860 if (opcode->op == OP_ADD)
6861 /* ADD may have relocation types. */
6862 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6863 SHIFTED_ARITH_IMM));
6864 else
6865 po_misc_or_fail (parse_shifter_operand (&str, info,
6866 SHIFTED_ARITH_IMM));
6867 switch (inst.reloc.type)
6868 {
6869 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6870 info->shifter.amount = 12;
6871 break;
6872 case BFD_RELOC_UNUSED:
6873 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6874 if (info->shifter.kind != AARCH64_MOD_NONE)
6875 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6876 inst.reloc.pc_rel = 0;
6877 break;
6878 default:
6879 break;
6880 }
6881 info->imm.value = 0;
6882 if (!info->shifter.operator_present)
6883 {
6884 /* Default to LSL if not present. Libopcodes prefers shifter
6885 kind to be explicit. */
6886 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6887 info->shifter.kind = AARCH64_MOD_LSL;
6888 }
6889 break;
6890
6891 case AARCH64_OPND_HALF:
6892 {
6893 /* #<imm16> or relocation. */
6894 int internal_fixup_p;
6895 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6896 if (internal_fixup_p)
6897 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6898 skip_whitespace (str);
6899 if (skip_past_comma (&str))
6900 {
6901 /* {, LSL #<shift>} */
6902 if (! aarch64_gas_internal_fixup_p ())
6903 {
6904 set_fatal_syntax_error (_("can't mix relocation modifier "
6905 "with explicit shift"));
6906 goto failure;
6907 }
6908 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6909 }
6910 else
6911 inst.base.operands[i].shifter.amount = 0;
6912 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6913 inst.base.operands[i].imm.value = 0;
6914 if (! process_movw_reloc_info ())
6915 goto failure;
6916 }
6917 break;
6918
6919 case AARCH64_OPND_EXCEPTION:
6920 case AARCH64_OPND_UNDEFINED:
6921 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6922 imm_reg_type));
6923 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6924 /* addr_off_p */ 0,
6925 /* need_libopcodes_p */ 0,
6926 /* skip_p */ 1);
6927 break;
6928
6929 case AARCH64_OPND_NZCV:
6930 {
6931 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6932 if (nzcv != NULL)
6933 {
6934 str += 4;
6935 info->imm.value = nzcv->value;
6936 break;
6937 }
6938 po_imm_or_fail (0, 15);
6939 info->imm.value = val;
6940 }
6941 break;
6942
6943 case AARCH64_OPND_COND:
6944 case AARCH64_OPND_COND1:
6945 {
6946 char *start = str;
6947 do
6948 str++;
6949 while (ISALPHA (*str));
6950 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6951 if (info->cond == NULL)
6952 {
6953 set_syntax_error (_("invalid condition"));
6954 goto failure;
6955 }
6956 else if (operands[i] == AARCH64_OPND_COND1
6957 && (info->cond->value & 0xe) == 0xe)
6958 {
6959 /* Do not allow AL or NV. */
6960 set_default_error ();
6961 goto failure;
6962 }
6963 }
6964 break;
6965
6966 case AARCH64_OPND_ADDR_ADRP:
6967 po_misc_or_fail (parse_adrp (&str));
6968 /* Clear the value as operand needs to be relocated. */
6969 info->imm.value = 0;
6970 break;
6971
6972 case AARCH64_OPND_ADDR_PCREL14:
6973 case AARCH64_OPND_ADDR_PCREL19:
6974 case AARCH64_OPND_ADDR_PCREL21:
6975 case AARCH64_OPND_ADDR_PCREL26:
6976 po_misc_or_fail (parse_address (&str, info));
6977 if (!info->addr.pcrel)
6978 {
6979 set_syntax_error (_("invalid pc-relative address"));
6980 goto failure;
6981 }
6982 if (inst.gen_lit_pool
6983 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6984 {
6985 /* Only permit "=value" in the literal load instructions.
6986 The literal will be generated by programmer_friendly_fixup. */
6987 set_syntax_error (_("invalid use of \"=immediate\""));
6988 goto failure;
6989 }
6990 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6991 {
6992 set_syntax_error (_("unrecognized relocation suffix"));
6993 goto failure;
6994 }
6995 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6996 {
6997 info->imm.value = inst.reloc.exp.X_add_number;
6998 inst.reloc.type = BFD_RELOC_UNUSED;
6999 }
7000 else
7001 {
7002 info->imm.value = 0;
7003 if (inst.reloc.type == BFD_RELOC_UNUSED)
7004 switch (opcode->iclass)
7005 {
7006 case compbranch:
7007 case condbranch:
7008 /* e.g. CBZ or B.COND */
7009 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7010 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
7011 break;
7012 case testbranch:
7013 /* e.g. TBZ */
7014 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
7015 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
7016 break;
7017 case branch_imm:
7018 /* e.g. B or BL */
7019 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
7020 inst.reloc.type =
7021 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
7022 : BFD_RELOC_AARCH64_JUMP26;
7023 break;
7024 case loadlit:
7025 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7026 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
7027 break;
7028 case pcreladdr:
7029 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
7030 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
7031 break;
7032 default:
7033 gas_assert (0);
7034 abort ();
7035 }
7036 inst.reloc.pc_rel = 1;
7037 }
7038 break;
7039
7040 case AARCH64_OPND_ADDR_SIMPLE:
7041 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
7042 {
7043 /* [<Xn|SP>{, #<simm>}] */
7044 char *start = str;
7045 /* First use the normal address-parsing routines, to get
7046 the usual syntax errors. */
7047 po_misc_or_fail (parse_address (&str, info));
7048 if (info->addr.pcrel || info->addr.offset.is_reg
7049 || !info->addr.preind || info->addr.postind
7050 || info->addr.writeback)
7051 {
7052 set_syntax_error (_("invalid addressing mode"));
7053 goto failure;
7054 }
7055
7056 /* Then retry, matching the specific syntax of these addresses. */
7057 str = start;
7058 po_char_or_fail ('[');
7059 po_reg_or_fail (REG_TYPE_R64_SP);
7060 /* Accept optional ", #0". */
7061 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7062 && skip_past_char (&str, ','))
7063 {
7064 skip_past_char (&str, '#');
7065 if (! skip_past_char (&str, '0'))
7066 {
7067 set_fatal_syntax_error
7068 (_("the optional immediate offset can only be 0"));
7069 goto failure;
7070 }
7071 }
7072 po_char_or_fail (']');
7073 break;
7074 }
7075
7076 case AARCH64_OPND_ADDR_REGOFF:
7077 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7078 po_misc_or_fail (parse_address (&str, info));
7079 regoff_addr:
7080 if (info->addr.pcrel || !info->addr.offset.is_reg
7081 || !info->addr.preind || info->addr.postind
7082 || info->addr.writeback)
7083 {
7084 set_syntax_error (_("invalid addressing mode"));
7085 goto failure;
7086 }
7087 if (!info->shifter.operator_present)
7088 {
7089 /* Default to LSL if not present. Libopcodes prefers shifter
7090 kind to be explicit. */
7091 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7092 info->shifter.kind = AARCH64_MOD_LSL;
7093 }
7094 /* Qualifier to be deduced by libopcodes. */
7095 break;
7096
7097 case AARCH64_OPND_ADDR_SIMM7:
7098 po_misc_or_fail (parse_address (&str, info));
7099 if (info->addr.pcrel || info->addr.offset.is_reg
7100 || (!info->addr.preind && !info->addr.postind))
7101 {
7102 set_syntax_error (_("invalid addressing mode"));
7103 goto failure;
7104 }
7105 if (inst.reloc.type != BFD_RELOC_UNUSED)
7106 {
7107 set_syntax_error (_("relocation not allowed"));
7108 goto failure;
7109 }
7110 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7111 /* addr_off_p */ 1,
7112 /* need_libopcodes_p */ 1,
7113 /* skip_p */ 0);
7114 break;
7115
7116 case AARCH64_OPND_ADDR_SIMM9:
7117 case AARCH64_OPND_ADDR_SIMM9_2:
7118 case AARCH64_OPND_ADDR_SIMM11:
7119 case AARCH64_OPND_ADDR_SIMM13:
7120 po_misc_or_fail (parse_address (&str, info));
7121 if (info->addr.pcrel || info->addr.offset.is_reg
7122 || (!info->addr.preind && !info->addr.postind)
7123 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7124 && info->addr.writeback))
7125 {
7126 set_syntax_error (_("invalid addressing mode"));
7127 goto failure;
7128 }
7129 if (inst.reloc.type != BFD_RELOC_UNUSED)
7130 {
7131 set_syntax_error (_("relocation not allowed"));
7132 goto failure;
7133 }
7134 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7135 /* addr_off_p */ 1,
7136 /* need_libopcodes_p */ 1,
7137 /* skip_p */ 0);
7138 break;
7139
7140 case AARCH64_OPND_ADDR_SIMM10:
7141 case AARCH64_OPND_ADDR_OFFSET:
7142 po_misc_or_fail (parse_address (&str, info));
7143 if (info->addr.pcrel || info->addr.offset.is_reg
7144 || !info->addr.preind || info->addr.postind)
7145 {
7146 set_syntax_error (_("invalid addressing mode"));
7147 goto failure;
7148 }
7149 if (inst.reloc.type != BFD_RELOC_UNUSED)
7150 {
7151 set_syntax_error (_("relocation not allowed"));
7152 goto failure;
7153 }
7154 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7155 /* addr_off_p */ 1,
7156 /* need_libopcodes_p */ 1,
7157 /* skip_p */ 0);
7158 break;
7159
7160 case AARCH64_OPND_ADDR_UIMM12:
7161 po_misc_or_fail (parse_address (&str, info));
7162 if (info->addr.pcrel || info->addr.offset.is_reg
7163 || !info->addr.preind || info->addr.writeback)
7164 {
7165 set_syntax_error (_("invalid addressing mode"));
7166 goto failure;
7167 }
7168 if (inst.reloc.type == BFD_RELOC_UNUSED)
7169 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7170 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7171 || (inst.reloc.type
7172 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7173 || (inst.reloc.type
7174 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7175 || (inst.reloc.type
7176 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7177 || (inst.reloc.type
7178 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7179 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7180 /* Leave qualifier to be determined by libopcodes. */
7181 break;
7182
7183 case AARCH64_OPND_SIMD_ADDR_POST:
7184 /* [<Xn|SP>], <Xm|#<amount>> */
7185 po_misc_or_fail (parse_address (&str, info));
7186 if (!info->addr.postind || !info->addr.writeback)
7187 {
7188 set_syntax_error (_("invalid addressing mode"));
7189 goto failure;
7190 }
7191 if (!info->addr.offset.is_reg)
7192 {
7193 if (inst.reloc.exp.X_op == O_constant)
7194 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7195 else
7196 {
7197 set_fatal_syntax_error
7198 (_("writeback value must be an immediate constant"));
7199 goto failure;
7200 }
7201 }
7202 /* No qualifier. */
7203 break;
7204
7205 case AARCH64_OPND_SME_SM_ZA:
7206 /* { SM | ZA } */
7207 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7208 {
7209 set_syntax_error (_("unknown or missing PSTATE field name"));
7210 goto failure;
7211 }
7212 info->reg.regno = val;
7213 break;
7214
7215 case AARCH64_OPND_SME_PnT_Wm_imm:
7216 /* <Pn>.<T>[<Wm>, #<imm>] */
7217 {
7218 int index_base_reg;
7219 int imm;
7220 val = parse_sme_pred_reg_with_index (&str,
7221 &index_base_reg,
7222 &imm,
7223 &qualifier);
7224 if (val == PARSE_FAIL)
7225 goto failure;
7226
7227 info->za_tile_vector.regno = val;
7228 info->za_tile_vector.index.regno = index_base_reg;
7229 info->za_tile_vector.index.imm = imm;
7230 info->qualifier = qualifier;
7231 break;
7232 }
7233
7234 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7235 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7236 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7237 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7238 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7239 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7240 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7241 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7242 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7243 case AARCH64_OPND_SVE_ADDR_RI_U6:
7244 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7245 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7246 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7247 /* [X<n>{, #imm, MUL VL}]
7248 [X<n>{, #imm}]
7249 but recognizing SVE registers. */
7250 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7251 &offset_qualifier));
7252 if (base_qualifier != AARCH64_OPND_QLF_X)
7253 {
7254 set_syntax_error (_("invalid addressing mode"));
7255 goto failure;
7256 }
7257 sve_regimm:
7258 if (info->addr.pcrel || info->addr.offset.is_reg
7259 || !info->addr.preind || info->addr.writeback)
7260 {
7261 set_syntax_error (_("invalid addressing mode"));
7262 goto failure;
7263 }
7264 if (inst.reloc.type != BFD_RELOC_UNUSED
7265 || inst.reloc.exp.X_op != O_constant)
7266 {
7267 /* Make sure this has priority over
7268 "invalid addressing mode". */
7269 set_fatal_syntax_error (_("constant offset required"));
7270 goto failure;
7271 }
7272 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7273 break;
7274
7275 case AARCH64_OPND_SVE_ADDR_R:
7276 /* [<Xn|SP>{, <R><m>}]
7277 but recognizing SVE registers. */
7278 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7279 &offset_qualifier));
7280 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7281 {
7282 offset_qualifier = AARCH64_OPND_QLF_X;
7283 info->addr.offset.is_reg = 1;
7284 info->addr.offset.regno = 31;
7285 }
7286 else if (base_qualifier != AARCH64_OPND_QLF_X
7287 || offset_qualifier != AARCH64_OPND_QLF_X)
7288 {
7289 set_syntax_error (_("invalid addressing mode"));
7290 goto failure;
7291 }
7292 goto regoff_addr;
7293
7294 case AARCH64_OPND_SVE_ADDR_RR:
7295 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7296 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7297 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7298 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7299 case AARCH64_OPND_SVE_ADDR_RX:
7300 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7301 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7302 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7303 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7304 but recognizing SVE registers. */
7305 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7306 &offset_qualifier));
7307 if (base_qualifier != AARCH64_OPND_QLF_X
7308 || offset_qualifier != AARCH64_OPND_QLF_X)
7309 {
7310 set_syntax_error (_("invalid addressing mode"));
7311 goto failure;
7312 }
7313 goto regoff_addr;
7314
7315 case AARCH64_OPND_SVE_ADDR_RZ:
7316 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7317 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7318 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7319 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7320 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7321 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7322 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7323 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7324 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7325 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7326 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7327 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7328 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7329 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7330 &offset_qualifier));
7331 if (base_qualifier != AARCH64_OPND_QLF_X
7332 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7333 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7334 {
7335 set_syntax_error (_("invalid addressing mode"));
7336 goto failure;
7337 }
7338 info->qualifier = offset_qualifier;
7339 goto regoff_addr;
7340
7341 case AARCH64_OPND_SVE_ADDR_ZX:
7342 /* [Zn.<T>{, <Xm>}]. */
7343 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7344 &offset_qualifier));
7345 /* Things to check:
7346 base_qualifier either S_S or S_D
7347 offset_qualifier must be X
7348 */
7349 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7350 && base_qualifier != AARCH64_OPND_QLF_S_D)
7351 || offset_qualifier != AARCH64_OPND_QLF_X)
7352 {
7353 set_syntax_error (_("invalid addressing mode"));
7354 goto failure;
7355 }
7356 info->qualifier = base_qualifier;
7357 if (!info->addr.offset.is_reg || info->addr.pcrel
7358 || !info->addr.preind || info->addr.writeback
7359 || info->shifter.operator_present != 0)
7360 {
7361 set_syntax_error (_("invalid addressing mode"));
7362 goto failure;
7363 }
7364 info->shifter.kind = AARCH64_MOD_LSL;
7365 break;
7366
7367
7368 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7369 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7370 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7371 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7372 /* [Z<n>.<T>{, #imm}] */
7373 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7374 &offset_qualifier));
7375 if (base_qualifier != AARCH64_OPND_QLF_S_S
7376 && base_qualifier != AARCH64_OPND_QLF_S_D)
7377 {
7378 set_syntax_error (_("invalid addressing mode"));
7379 goto failure;
7380 }
7381 info->qualifier = base_qualifier;
7382 goto sve_regimm;
7383
7384 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7385 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7386 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7387 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7388 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7389
7390 We don't reject:
7391
7392 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7393
7394 here since we get better error messages by leaving it to
7395 the qualifier checking routines. */
7396 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7397 &offset_qualifier));
7398 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7399 && base_qualifier != AARCH64_OPND_QLF_S_D)
7400 || offset_qualifier != base_qualifier)
7401 {
7402 set_syntax_error (_("invalid addressing mode"));
7403 goto failure;
7404 }
7405 info->qualifier = base_qualifier;
7406 goto regoff_addr;
7407
7408 case AARCH64_OPND_SYSREG:
7409 {
7410 uint32_t sysreg_flags;
7411 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7412 &sysreg_flags)) == PARSE_FAIL)
7413 {
7414 set_syntax_error (_("unknown or missing system register name"));
7415 goto failure;
7416 }
7417 inst.base.operands[i].sysreg.value = val;
7418 inst.base.operands[i].sysreg.flags = sysreg_flags;
7419 break;
7420 }
7421
7422 case AARCH64_OPND_PSTATEFIELD:
7423 {
7424 uint32_t sysreg_flags;
7425 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7426 &sysreg_flags)) == PARSE_FAIL)
7427 {
7428 set_syntax_error (_("unknown or missing PSTATE field name"));
7429 goto failure;
7430 }
7431 inst.base.operands[i].pstatefield = val;
7432 inst.base.operands[i].sysreg.flags = sysreg_flags;
7433 break;
7434 }
7435
7436 case AARCH64_OPND_SYSREG_IC:
7437 inst.base.operands[i].sysins_op =
7438 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7439 goto sys_reg_ins;
7440
7441 case AARCH64_OPND_SYSREG_DC:
7442 inst.base.operands[i].sysins_op =
7443 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7444 goto sys_reg_ins;
7445
7446 case AARCH64_OPND_SYSREG_AT:
7447 inst.base.operands[i].sysins_op =
7448 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7449 goto sys_reg_ins;
7450
7451 case AARCH64_OPND_SYSREG_SR:
7452 inst.base.operands[i].sysins_op =
7453 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7454 goto sys_reg_ins;
7455
7456 case AARCH64_OPND_SYSREG_TLBI:
7457 inst.base.operands[i].sysins_op =
7458 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7459 sys_reg_ins:
7460 if (inst.base.operands[i].sysins_op == NULL)
7461 {
7462 set_fatal_syntax_error ( _("unknown or missing operation name"));
7463 goto failure;
7464 }
7465 break;
7466
7467 case AARCH64_OPND_BARRIER:
7468 case AARCH64_OPND_BARRIER_ISB:
7469 val = parse_barrier (&str);
7470 if (val != PARSE_FAIL
7471 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7472 {
7473 /* ISB only accepts options name 'sy'. */
7474 set_syntax_error
7475 (_("the specified option is not accepted in ISB"));
7476 /* Turn off backtrack as this optional operand is present. */
7477 backtrack_pos = 0;
7478 goto failure;
7479 }
7480 if (val != PARSE_FAIL
7481 && operands[i] == AARCH64_OPND_BARRIER)
7482 {
7483 /* Regular barriers accept options CRm (C0-C15).
7484 DSB nXS barrier variant accepts values > 15. */
7485 if (val < 0 || val > 15)
7486 {
7487 set_syntax_error (_("the specified option is not accepted in DSB"));
7488 goto failure;
7489 }
7490 }
7491 /* This is an extension to accept a 0..15 immediate. */
7492 if (val == PARSE_FAIL)
7493 po_imm_or_fail (0, 15);
7494 info->barrier = aarch64_barrier_options + val;
7495 break;
7496
7497 case AARCH64_OPND_BARRIER_DSB_NXS:
7498 val = parse_barrier (&str);
7499 if (val != PARSE_FAIL)
7500 {
7501 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7502 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7503 {
7504 set_syntax_error (_("the specified option is not accepted in DSB"));
7505 /* Turn off backtrack as this optional operand is present. */
7506 backtrack_pos = 0;
7507 goto failure;
7508 }
7509 }
7510 else
7511 {
7512 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7513 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7514 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7515 goto failure;
7516 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7517 {
7518 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7519 goto failure;
7520 }
7521 }
7522 /* Option index is encoded as 2-bit value in val<3:2>. */
7523 val = (val >> 2) - 4;
7524 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7525 break;
7526
7527 case AARCH64_OPND_PRFOP:
7528 val = parse_pldop (&str);
7529 /* This is an extension to accept a 0..31 immediate. */
7530 if (val == PARSE_FAIL)
7531 po_imm_or_fail (0, 31);
7532 inst.base.operands[i].prfop = aarch64_prfops + val;
7533 break;
7534
7535 case AARCH64_OPND_BARRIER_PSB:
7536 val = parse_barrier_psb (&str, &(info->hint_option));
7537 if (val == PARSE_FAIL)
7538 goto failure;
7539 break;
7540
7541 case AARCH64_OPND_BTI_TARGET:
7542 val = parse_bti_operand (&str, &(info->hint_option));
7543 if (val == PARSE_FAIL)
7544 goto failure;
7545 break;
7546
7547 case AARCH64_OPND_SME_ZAda_2b:
7548 case AARCH64_OPND_SME_ZAda_3b:
7549 val = parse_sme_zada_operand (&str, &qualifier);
7550 if (val == PARSE_FAIL)
7551 goto failure;
7552 info->reg.regno = val;
7553 info->qualifier = qualifier;
7554 break;
7555
7556 case AARCH64_OPND_SME_ZA_HV_idx_src:
7557 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7558 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7559 {
7560 enum sme_hv_slice slice_indicator;
7561 int vector_select_register;
7562 int imm;
7563
7564 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr)
7565 val = parse_sme_za_hv_tiles_operand_with_braces (&str,
7566 &slice_indicator,
7567 &vector_select_register,
7568 &imm,
7569 &qualifier);
7570 else
7571 val = parse_sme_za_hv_tiles_operand (&str, &slice_indicator,
7572 &vector_select_register,
7573 &imm,
7574 &qualifier);
7575 if (val == PARSE_FAIL)
7576 goto failure;
7577 info->za_tile_vector.regno = val;
7578 info->za_tile_vector.index.regno = vector_select_register;
7579 info->za_tile_vector.index.imm = imm;
7580 info->za_tile_vector.v = slice_indicator;
7581 info->qualifier = qualifier;
7582 break;
7583 }
7584
7585 case AARCH64_OPND_SME_list_of_64bit_tiles:
7586 val = parse_sme_list_of_64bit_tiles (&str);
7587 if (val == PARSE_FAIL)
7588 goto failure;
7589 info->imm.value = val;
7590 break;
7591
7592 case AARCH64_OPND_SME_ZA_array:
7593 {
7594 int imm;
7595 val = parse_sme_za_array (&str, &imm);
7596 if (val == PARSE_FAIL)
7597 goto failure;
7598 info->za_tile_vector.index.regno = val;
7599 info->za_tile_vector.index.imm = imm;
7600 break;
7601 }
7602
7603 case AARCH64_OPND_MOPS_ADDR_Rd:
7604 case AARCH64_OPND_MOPS_ADDR_Rs:
7605 po_char_or_fail ('[');
7606 if (!parse_x0_to_x30 (&str, info))
7607 goto failure;
7608 po_char_or_fail (']');
7609 po_char_or_fail ('!');
7610 break;
7611
7612 case AARCH64_OPND_MOPS_WB_Rn:
7613 if (!parse_x0_to_x30 (&str, info))
7614 goto failure;
7615 po_char_or_fail ('!');
7616 break;
7617
7618 default:
7619 as_fatal (_("unhandled operand code %d"), operands[i]);
7620 }
7621
7622 /* If we get here, this operand was successfully parsed. */
7623 inst.base.operands[i].present = 1;
7624 continue;
7625
7626 failure:
7627 /* The parse routine should already have set the error, but in case
7628 not, set a default one here. */
7629 if (! error_p ())
7630 set_default_error ();
7631
7632 if (! backtrack_pos)
7633 goto parse_operands_return;
7634
7635 {
7636 /* We reach here because this operand is marked as optional, and
7637 either no operand was supplied or the operand was supplied but it
7638 was syntactically incorrect. In the latter case we report an
7639 error. In the former case we perform a few more checks before
7640 dropping through to the code to insert the default operand. */
7641
7642 char *tmp = backtrack_pos;
7643 char endchar = END_OF_INSN;
7644
7645 if (i != (aarch64_num_of_operands (opcode) - 1))
7646 endchar = ',';
7647 skip_past_char (&tmp, ',');
7648
7649 if (*tmp != endchar)
7650 /* The user has supplied an operand in the wrong format. */
7651 goto parse_operands_return;
7652
7653 /* Make sure there is not a comma before the optional operand.
7654 For example the fifth operand of 'sys' is optional:
7655
7656 sys #0,c0,c0,#0, <--- wrong
7657 sys #0,c0,c0,#0 <--- correct. */
7658 if (comma_skipped_p && i && endchar == END_OF_INSN)
7659 {
7660 set_fatal_syntax_error
7661 (_("unexpected comma before the omitted optional operand"));
7662 goto parse_operands_return;
7663 }
7664 }
7665
7666 /* Reaching here means we are dealing with an optional operand that is
7667 omitted from the assembly line. */
7668 gas_assert (optional_operand_p (opcode, i));
7669 info->present = 0;
7670 process_omitted_operand (operands[i], opcode, i, info);
7671
7672 /* Try again, skipping the optional operand at backtrack_pos. */
7673 str = backtrack_pos;
7674 backtrack_pos = 0;
7675
7676 /* Clear any error record after the omitted optional operand has been
7677 successfully handled. */
7678 clear_error ();
7679 }
7680
7681 /* Check if we have parsed all the operands. */
7682 if (*str != '\0' && ! error_p ())
7683 {
7684 /* Set I to the index of the last present operand; this is
7685 for the purpose of diagnostics. */
7686 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7687 ;
7688 set_fatal_syntax_error
7689 (_("unexpected characters following instruction"));
7690 }
7691
7692 parse_operands_return:
7693
7694 if (error_p ())
7695 {
7696 inst.parsing_error.index = i;
7697 DEBUG_TRACE ("parsing FAIL: %s - %s",
7698 operand_mismatch_kind_names[inst.parsing_error.kind],
7699 inst.parsing_error.error);
7700 /* Record the operand error properly; this is useful when there
7701 are multiple instruction templates for a mnemonic name, so that
7702 later on, we can select the error that most closely describes
7703 the problem. */
7704 record_operand_error_info (opcode, &inst.parsing_error);
7705 return false;
7706 }
7707 else
7708 {
7709 DEBUG_TRACE ("parsing SUCCESS");
7710 return true;
7711 }
7712 }
7713
7714 /* It does some fix-up to provide some programmer friendly feature while
7715 keeping the libopcodes happy, i.e. libopcodes only accepts
7716 the preferred architectural syntax.
7717 Return FALSE if there is any failure; otherwise return TRUE. */
7718
7719 static bool
7720 programmer_friendly_fixup (aarch64_instruction *instr)
7721 {
7722 aarch64_inst *base = &instr->base;
7723 const aarch64_opcode *opcode = base->opcode;
7724 enum aarch64_op op = opcode->op;
7725 aarch64_opnd_info *operands = base->operands;
7726
7727 DEBUG_TRACE ("enter");
7728
7729 switch (opcode->iclass)
7730 {
7731 case testbranch:
7732 /* TBNZ Xn|Wn, #uimm6, label
7733 Test and Branch Not Zero: conditionally jumps to label if bit number
7734 uimm6 in register Xn is not zero. The bit number implies the width of
7735 the register, which may be written and should be disassembled as Wn if
7736 uimm is less than 32. */
7737 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7738 {
7739 if (operands[1].imm.value >= 32)
7740 {
7741 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7742 0, 31);
7743 return false;
7744 }
7745 operands[0].qualifier = AARCH64_OPND_QLF_X;
7746 }
7747 break;
7748 case loadlit:
7749 /* LDR Wt, label | =value
7750 As a convenience assemblers will typically permit the notation
7751 "=value" in conjunction with the pc-relative literal load instructions
7752 to automatically place an immediate value or symbolic address in a
7753 nearby literal pool and generate a hidden label which references it.
7754 ISREG has been set to 0 in the case of =value. */
7755 if (instr->gen_lit_pool
7756 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7757 {
7758 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7759 if (op == OP_LDRSW_LIT)
7760 size = 4;
7761 if (instr->reloc.exp.X_op != O_constant
7762 && instr->reloc.exp.X_op != O_big
7763 && instr->reloc.exp.X_op != O_symbol)
7764 {
7765 record_operand_error (opcode, 1,
7766 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7767 _("constant expression expected"));
7768 return false;
7769 }
7770 if (! add_to_lit_pool (&instr->reloc.exp, size))
7771 {
7772 record_operand_error (opcode, 1,
7773 AARCH64_OPDE_OTHER_ERROR,
7774 _("literal pool insertion failed"));
7775 return false;
7776 }
7777 }
7778 break;
7779 case log_shift:
7780 case bitfield:
7781 /* UXT[BHW] Wd, Wn
7782 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7783 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7784 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7785 A programmer-friendly assembler should accept a destination Xd in
7786 place of Wd, however that is not the preferred form for disassembly.
7787 */
7788 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7789 && operands[1].qualifier == AARCH64_OPND_QLF_W
7790 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7791 operands[0].qualifier = AARCH64_OPND_QLF_W;
7792 break;
7793
7794 case addsub_ext:
7795 {
7796 /* In the 64-bit form, the final register operand is written as Wm
7797 for all but the (possibly omitted) UXTX/LSL and SXTX
7798 operators.
7799 As a programmer-friendly assembler, we accept e.g.
7800 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7801 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7802 int idx = aarch64_operand_index (opcode->operands,
7803 AARCH64_OPND_Rm_EXT);
7804 gas_assert (idx == 1 || idx == 2);
7805 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7806 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7807 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7808 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7809 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7810 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7811 }
7812 break;
7813
7814 default:
7815 break;
7816 }
7817
7818 DEBUG_TRACE ("exit with SUCCESS");
7819 return true;
7820 }
7821
7822 /* Check for loads and stores that will cause unpredictable behavior. */
7823
7824 static void
7825 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7826 {
7827 aarch64_inst *base = &instr->base;
7828 const aarch64_opcode *opcode = base->opcode;
7829 const aarch64_opnd_info *opnds = base->operands;
7830 switch (opcode->iclass)
7831 {
7832 case ldst_pos:
7833 case ldst_imm9:
7834 case ldst_imm10:
7835 case ldst_unscaled:
7836 case ldst_unpriv:
7837 /* Loading/storing the base register is unpredictable if writeback. */
7838 if ((aarch64_get_operand_class (opnds[0].type)
7839 == AARCH64_OPND_CLASS_INT_REG)
7840 && opnds[0].reg.regno == opnds[1].addr.base_regno
7841 && opnds[1].addr.base_regno != REG_SP
7842 /* Exempt STG/STZG/ST2G/STZ2G. */
7843 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7844 && opnds[1].addr.writeback)
7845 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7846 break;
7847
7848 case ldstpair_off:
7849 case ldstnapair_offs:
7850 case ldstpair_indexed:
7851 /* Loading/storing the base register is unpredictable if writeback. */
7852 if ((aarch64_get_operand_class (opnds[0].type)
7853 == AARCH64_OPND_CLASS_INT_REG)
7854 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7855 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7856 && opnds[2].addr.base_regno != REG_SP
7857 /* Exempt STGP. */
7858 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7859 && opnds[2].addr.writeback)
7860 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7861 /* Load operations must load different registers. */
7862 if ((opcode->opcode & (1 << 22))
7863 && opnds[0].reg.regno == opnds[1].reg.regno)
7864 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7865 break;
7866
7867 case ldstexcl:
7868 if ((aarch64_get_operand_class (opnds[0].type)
7869 == AARCH64_OPND_CLASS_INT_REG)
7870 && (aarch64_get_operand_class (opnds[1].type)
7871 == AARCH64_OPND_CLASS_INT_REG))
7872 {
7873 if ((opcode->opcode & (1 << 22)))
7874 {
7875 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7876 if ((opcode->opcode & (1 << 21))
7877 && opnds[0].reg.regno == opnds[1].reg.regno)
7878 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7879 }
7880 else
7881 {
7882 /* Store-Exclusive is unpredictable if Rt == Rs. */
7883 if (opnds[0].reg.regno == opnds[1].reg.regno)
7884 as_warn
7885 (_("unpredictable: identical transfer and status registers"
7886 " --`%s'"),str);
7887
7888 if (opnds[0].reg.regno == opnds[2].reg.regno)
7889 {
7890 if (!(opcode->opcode & (1 << 21)))
7891 /* Store-Exclusive is unpredictable if Rn == Rs. */
7892 as_warn
7893 (_("unpredictable: identical base and status registers"
7894 " --`%s'"),str);
7895 else
7896 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7897 as_warn
7898 (_("unpredictable: "
7899 "identical transfer and status registers"
7900 " --`%s'"),str);
7901 }
7902
7903 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7904 if ((opcode->opcode & (1 << 21))
7905 && opnds[0].reg.regno == opnds[3].reg.regno
7906 && opnds[3].reg.regno != REG_SP)
7907 as_warn (_("unpredictable: identical base and status registers"
7908 " --`%s'"),str);
7909 }
7910 }
7911 break;
7912
7913 default:
7914 break;
7915 }
7916 }
7917
7918 static void
7919 force_automatic_sequence_close (void)
7920 {
7921 struct aarch64_segment_info_type *tc_seg_info;
7922
7923 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7924 if (tc_seg_info->insn_sequence.instr)
7925 {
7926 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7927 _("previous `%s' sequence has not been closed"),
7928 tc_seg_info->insn_sequence.instr->opcode->name);
7929 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7930 }
7931 }
7932
7933 /* A wrapper function to interface with libopcodes on encoding and
7934 record the error message if there is any.
7935
7936 Return TRUE on success; otherwise return FALSE. */
7937
7938 static bool
7939 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7940 aarch64_insn *code)
7941 {
7942 aarch64_operand_error error_info;
7943 memset (&error_info, '\0', sizeof (error_info));
7944 error_info.kind = AARCH64_OPDE_NIL;
7945 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7946 && !error_info.non_fatal)
7947 return true;
7948
7949 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7950 record_operand_error_info (opcode, &error_info);
7951 return error_info.non_fatal;
7952 }
7953
7954 #ifdef DEBUG_AARCH64
7955 static inline void
7956 dump_opcode_operands (const aarch64_opcode *opcode)
7957 {
7958 int i = 0;
7959 while (opcode->operands[i] != AARCH64_OPND_NIL)
7960 {
7961 aarch64_verbose ("\t\t opnd%d: %s", i,
7962 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7963 ? aarch64_get_operand_name (opcode->operands[i])
7964 : aarch64_get_operand_desc (opcode->operands[i]));
7965 ++i;
7966 }
7967 }
7968 #endif /* DEBUG_AARCH64 */
7969
7970 /* This is the guts of the machine-dependent assembler. STR points to a
7971 machine dependent instruction. This function is supposed to emit
7972 the frags/bytes it assembles to. */
7973
7974 void
7975 md_assemble (char *str)
7976 {
7977 templates *template;
7978 const aarch64_opcode *opcode;
7979 struct aarch64_segment_info_type *tc_seg_info;
7980 aarch64_inst *inst_base;
7981 unsigned saved_cond;
7982
7983 /* Align the previous label if needed. */
7984 if (last_label_seen != NULL)
7985 {
7986 symbol_set_frag (last_label_seen, frag_now);
7987 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7988 S_SET_SEGMENT (last_label_seen, now_seg);
7989 }
7990
7991 /* Update the current insn_sequence from the segment. */
7992 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7993 insn_sequence = &tc_seg_info->insn_sequence;
7994 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
7995
7996 inst.reloc.type = BFD_RELOC_UNUSED;
7997
7998 DEBUG_TRACE ("\n\n");
7999 DEBUG_TRACE ("==============================");
8000 DEBUG_TRACE ("Enter md_assemble with %s", str);
8001
8002 /* Scan up to the end of the mnemonic, which must end in whitespace,
8003 '.', or end of string. */
8004 char *p = str;
8005 char *dot = 0;
8006 for (; is_part_of_name (*p); p++)
8007 if (*p == '.' && !dot)
8008 dot = p;
8009
8010 if (p == str)
8011 {
8012 as_bad (_("unknown mnemonic -- `%s'"), str);
8013 return;
8014 }
8015
8016 if (!dot && create_register_alias (str, p))
8017 return;
8018
8019 template = opcode_lookup (str, dot, p);
8020 if (!template)
8021 {
8022 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
8023 str);
8024 return;
8025 }
8026
8027 skip_whitespace (p);
8028 if (*p == ',')
8029 {
8030 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
8031 get_mnemonic_name (str), str);
8032 return;
8033 }
8034
8035 init_operand_error_report ();
8036
8037 /* Sections are assumed to start aligned. In executable section, there is no
8038 MAP_DATA symbol pending. So we only align the address during
8039 MAP_DATA --> MAP_INSN transition.
8040 For other sections, this is not guaranteed. */
8041 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
8042 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
8043 frag_align_code (2, 0);
8044
8045 saved_cond = inst.cond;
8046 reset_aarch64_instruction (&inst);
8047 inst.cond = saved_cond;
8048
8049 /* Iterate through all opcode entries with the same mnemonic name. */
8050 do
8051 {
8052 opcode = template->opcode;
8053
8054 DEBUG_TRACE ("opcode %s found", opcode->name);
8055 #ifdef DEBUG_AARCH64
8056 if (debug_dump)
8057 dump_opcode_operands (opcode);
8058 #endif /* DEBUG_AARCH64 */
8059
8060 mapping_state (MAP_INSN);
8061
8062 inst_base = &inst.base;
8063 inst_base->opcode = opcode;
8064
8065 /* Truly conditionally executed instructions, e.g. b.cond. */
8066 if (opcode->flags & F_COND)
8067 {
8068 gas_assert (inst.cond != COND_ALWAYS);
8069 inst_base->cond = get_cond_from_value (inst.cond);
8070 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8071 }
8072 else if (inst.cond != COND_ALWAYS)
8073 {
8074 /* It shouldn't arrive here, where the assembly looks like a
8075 conditional instruction but the found opcode is unconditional. */
8076 gas_assert (0);
8077 continue;
8078 }
8079
8080 if (parse_operands (p, opcode)
8081 && programmer_friendly_fixup (&inst)
8082 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8083 {
8084 /* Check that this instruction is supported for this CPU. */
8085 if (!opcode->avariant
8086 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
8087 {
8088 as_bad (_("selected processor does not support `%s'"), str);
8089 return;
8090 }
8091
8092 warn_unpredictable_ldst (&inst, str);
8093
8094 if (inst.reloc.type == BFD_RELOC_UNUSED
8095 || !inst.reloc.need_libopcodes_p)
8096 output_inst (NULL);
8097 else
8098 {
8099 /* If there is relocation generated for the instruction,
8100 store the instruction information for the future fix-up. */
8101 struct aarch64_inst *copy;
8102 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8103 copy = XNEW (struct aarch64_inst);
8104 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8105 output_inst (copy);
8106 }
8107
8108 /* Issue non-fatal messages if any. */
8109 output_operand_error_report (str, true);
8110 return;
8111 }
8112
8113 template = template->next;
8114 if (template != NULL)
8115 {
8116 reset_aarch64_instruction (&inst);
8117 inst.cond = saved_cond;
8118 }
8119 }
8120 while (template != NULL);
8121
8122 /* Issue the error messages if any. */
8123 output_operand_error_report (str, false);
8124 }
8125
8126 /* Various frobbings of labels and their addresses. */
8127
8128 void
8129 aarch64_start_line_hook (void)
8130 {
8131 last_label_seen = NULL;
8132 }
8133
8134 void
8135 aarch64_frob_label (symbolS * sym)
8136 {
8137 last_label_seen = sym;
8138
8139 dwarf2_emit_label (sym);
8140 }
8141
8142 void
8143 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8144 {
8145 /* Check to see if we have a block to close. */
8146 force_automatic_sequence_close ();
8147 }
8148
8149 int
8150 aarch64_data_in_code (void)
8151 {
8152 if (startswith (input_line_pointer + 1, "data:"))
8153 {
8154 *input_line_pointer = '/';
8155 input_line_pointer += 5;
8156 *input_line_pointer = 0;
8157 return 1;
8158 }
8159
8160 return 0;
8161 }
8162
8163 char *
8164 aarch64_canonicalize_symbol_name (char *name)
8165 {
8166 int len;
8167
8168 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8169 *(name + len - 5) = 0;
8170
8171 return name;
8172 }
8173 \f
8174 /* Table of all register names defined by default. The user can
8175 define additional names with .req. Note that all register names
8176 should appear in both upper and lowercase variants. Some registers
8177 also have mixed-case names. */
8178
8179 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8180 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8181 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8182 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8183 #define REGSET16(p,t) \
8184 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8185 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8186 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8187 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8188 #define REGSET16S(p,s,t) \
8189 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8190 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8191 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8192 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8193 #define REGSET31(p,t) \
8194 REGSET16(p, t), \
8195 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8196 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8197 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8198 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8199 #define REGSET(p,t) \
8200 REGSET31(p,t), REGNUM(p,31,t)
8201
8202 /* These go into aarch64_reg_hsh hash-table. */
8203 static const reg_entry reg_names[] = {
8204 /* Integer registers. */
8205 REGSET31 (x, R_64), REGSET31 (X, R_64),
8206 REGSET31 (w, R_32), REGSET31 (W, R_32),
8207
8208 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8209 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8210 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8211 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8212 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8213 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8214
8215 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8216 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8217
8218 /* Floating-point single precision registers. */
8219 REGSET (s, FP_S), REGSET (S, FP_S),
8220
8221 /* Floating-point double precision registers. */
8222 REGSET (d, FP_D), REGSET (D, FP_D),
8223
8224 /* Floating-point half precision registers. */
8225 REGSET (h, FP_H), REGSET (H, FP_H),
8226
8227 /* Floating-point byte precision registers. */
8228 REGSET (b, FP_B), REGSET (B, FP_B),
8229
8230 /* Floating-point quad precision registers. */
8231 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8232
8233 /* FP/SIMD registers. */
8234 REGSET (v, VN), REGSET (V, VN),
8235
8236 /* SVE vector registers. */
8237 REGSET (z, ZN), REGSET (Z, ZN),
8238
8239 /* SVE predicate registers. */
8240 REGSET16 (p, PN), REGSET16 (P, PN),
8241
8242 /* SME ZA tile registers. */
8243 REGSET16 (za, ZAT), REGSET16 (ZA, ZAT),
8244
8245 /* SME ZA tile registers (horizontal slice). */
8246 REGSET16S (za, h, ZATH), REGSET16S (ZA, H, ZATH),
8247
8248 /* SME ZA tile registers (vertical slice). */
8249 REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV)
8250 };
8251
8252 #undef REGDEF
8253 #undef REGDEF_ALIAS
8254 #undef REGNUM
8255 #undef REGSET16
8256 #undef REGSET31
8257 #undef REGSET
8258
8259 #define N 1
8260 #define n 0
8261 #define Z 1
8262 #define z 0
8263 #define C 1
8264 #define c 0
8265 #define V 1
8266 #define v 0
8267 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8268 static const asm_nzcv nzcv_names[] = {
8269 {"nzcv", B (n, z, c, v)},
8270 {"nzcV", B (n, z, c, V)},
8271 {"nzCv", B (n, z, C, v)},
8272 {"nzCV", B (n, z, C, V)},
8273 {"nZcv", B (n, Z, c, v)},
8274 {"nZcV", B (n, Z, c, V)},
8275 {"nZCv", B (n, Z, C, v)},
8276 {"nZCV", B (n, Z, C, V)},
8277 {"Nzcv", B (N, z, c, v)},
8278 {"NzcV", B (N, z, c, V)},
8279 {"NzCv", B (N, z, C, v)},
8280 {"NzCV", B (N, z, C, V)},
8281 {"NZcv", B (N, Z, c, v)},
8282 {"NZcV", B (N, Z, c, V)},
8283 {"NZCv", B (N, Z, C, v)},
8284 {"NZCV", B (N, Z, C, V)}
8285 };
8286
8287 #undef N
8288 #undef n
8289 #undef Z
8290 #undef z
8291 #undef C
8292 #undef c
8293 #undef V
8294 #undef v
8295 #undef B
8296 \f
8297 /* MD interface: bits in the object file. */
8298
8299 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8300 for use in the a.out file, and stores them in the array pointed to by buf.
8301 This knows about the endian-ness of the target machine and does
8302 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8303 2 (short) and 4 (long) Floating numbers are put out as a series of
8304 LITTLENUMS (shorts, here at least). */
8305
8306 void
8307 md_number_to_chars (char *buf, valueT val, int n)
8308 {
8309 if (target_big_endian)
8310 number_to_chars_bigendian (buf, val, n);
8311 else
8312 number_to_chars_littleendian (buf, val, n);
8313 }
8314
8315 /* MD interface: Sections. */
8316
8317 /* Estimate the size of a frag before relaxing. Assume everything fits in
8318 4 bytes. */
8319
8320 int
8321 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8322 {
8323 fragp->fr_var = 4;
8324 return 4;
8325 }
8326
8327 /* Round up a section size to the appropriate boundary. */
8328
8329 valueT
8330 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8331 {
8332 return size;
8333 }
8334
8335 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8336 of an rs_align_code fragment.
8337
8338 Here we fill the frag with the appropriate info for padding the
8339 output stream. The resulting frag will consist of a fixed (fr_fix)
8340 and of a repeating (fr_var) part.
8341
8342 The fixed content is always emitted before the repeating content and
8343 these two parts are used as follows in constructing the output:
8344 - the fixed part will be used to align to a valid instruction word
8345 boundary, in case that we start at a misaligned address; as no
8346 executable instruction can live at the misaligned location, we
8347 simply fill with zeros;
8348 - the variable part will be used to cover the remaining padding and
8349 we fill using the AArch64 NOP instruction.
8350
8351 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8352 enough storage space for up to 3 bytes for padding the back to a valid
8353 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8354
8355 void
8356 aarch64_handle_align (fragS * fragP)
8357 {
8358 /* NOP = d503201f */
8359 /* AArch64 instructions are always little-endian. */
8360 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8361
8362 int bytes, fix, noop_size;
8363 char *p;
8364
8365 if (fragP->fr_type != rs_align_code)
8366 return;
8367
8368 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8369 p = fragP->fr_literal + fragP->fr_fix;
8370
8371 #ifdef OBJ_ELF
8372 gas_assert (fragP->tc_frag_data.recorded);
8373 #endif
8374
8375 noop_size = sizeof (aarch64_noop);
8376
8377 fix = bytes & (noop_size - 1);
8378 if (fix)
8379 {
8380 #if defined OBJ_ELF || defined OBJ_COFF
8381 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8382 #endif
8383 memset (p, 0, fix);
8384 p += fix;
8385 fragP->fr_fix += fix;
8386 }
8387
8388 if (noop_size)
8389 memcpy (p, aarch64_noop, noop_size);
8390 fragP->fr_var = noop_size;
8391 }
8392
8393 /* Perform target specific initialisation of a frag.
8394 Note - despite the name this initialisation is not done when the frag
8395 is created, but only when its type is assigned. A frag can be created
8396 and used a long time before its type is set, so beware of assuming that
8397 this initialisation is performed first. */
8398
8399 #ifndef OBJ_ELF
8400 void
8401 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8402 int max_chars ATTRIBUTE_UNUSED)
8403 {
8404 }
8405
8406 #else /* OBJ_ELF is defined. */
8407 void
8408 aarch64_init_frag (fragS * fragP, int max_chars)
8409 {
8410 /* Record a mapping symbol for alignment frags. We will delete this
8411 later if the alignment ends up empty. */
8412 if (!fragP->tc_frag_data.recorded)
8413 fragP->tc_frag_data.recorded = 1;
8414
8415 /* PR 21809: Do not set a mapping state for debug sections
8416 - it just confuses other tools. */
8417 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8418 return;
8419
8420 switch (fragP->fr_type)
8421 {
8422 case rs_align_test:
8423 case rs_fill:
8424 mapping_state_2 (MAP_DATA, max_chars);
8425 break;
8426 case rs_align:
8427 /* PR 20364: We can get alignment frags in code sections,
8428 so do not just assume that we should use the MAP_DATA state. */
8429 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8430 break;
8431 case rs_align_code:
8432 mapping_state_2 (MAP_INSN, max_chars);
8433 break;
8434 default:
8435 break;
8436 }
8437 }
8438
8439 /* Whether SFrame stack trace info is supported. */
8440
8441 bool
8442 aarch64_support_sframe_p (void)
8443 {
8444 /* At this time, SFrame is supported for aarch64 only. */
8445 return (aarch64_abi == AARCH64_ABI_LP64);
8446 }
8447
8448 /* Specify if RA tracking is needed. */
8449
8450 bool
8451 aarch64_sframe_ra_tracking_p (void)
8452 {
8453 return true;
8454 }
8455
8456 /* Specify the fixed offset to recover RA from CFA.
8457 (useful only when RA tracking is not needed). */
8458
8459 offsetT
8460 aarch64_sframe_cfa_ra_offset (void)
8461 {
8462 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8463 }
8464
8465 /* Get the abi/arch indentifier for SFrame. */
8466
8467 unsigned char
8468 aarch64_sframe_get_abi_arch (void)
8469 {
8470 unsigned char sframe_abi_arch = 0;
8471
8472 if (aarch64_support_sframe_p ())
8473 {
8474 sframe_abi_arch = target_big_endian
8475 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8476 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8477 }
8478
8479 return sframe_abi_arch;
8480 }
8481
8482 #endif /* OBJ_ELF */
8483 \f
8484 /* Initialize the DWARF-2 unwind information for this procedure. */
8485
8486 void
8487 tc_aarch64_frame_initial_instructions (void)
8488 {
8489 cfi_add_CFA_def_cfa (REG_SP, 0);
8490 }
8491
8492 /* Convert REGNAME to a DWARF-2 register number. */
8493
8494 int
8495 tc_aarch64_regname_to_dw2regnum (char *regname)
8496 {
8497 const reg_entry *reg = parse_reg (&regname);
8498 if (reg == NULL)
8499 return -1;
8500
8501 switch (reg->type)
8502 {
8503 case REG_TYPE_SP_32:
8504 case REG_TYPE_SP_64:
8505 case REG_TYPE_R_32:
8506 case REG_TYPE_R_64:
8507 return reg->number;
8508
8509 case REG_TYPE_FP_B:
8510 case REG_TYPE_FP_H:
8511 case REG_TYPE_FP_S:
8512 case REG_TYPE_FP_D:
8513 case REG_TYPE_FP_Q:
8514 return reg->number + 64;
8515
8516 default:
8517 break;
8518 }
8519 return -1;
8520 }
8521
8522 /* Implement DWARF2_ADDR_SIZE. */
8523
8524 int
8525 aarch64_dwarf2_addr_size (void)
8526 {
8527 if (ilp32_p)
8528 return 4;
8529 else if (llp64_p)
8530 return 8;
8531 return bfd_arch_bits_per_address (stdoutput) / 8;
8532 }
8533
8534 /* MD interface: Symbol and relocation handling. */
8535
8536 /* Return the address within the segment that a PC-relative fixup is
8537 relative to. For AArch64 PC-relative fixups applied to instructions
8538 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8539
8540 long
8541 md_pcrel_from_section (fixS * fixP, segT seg)
8542 {
8543 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8544
8545 /* If this is pc-relative and we are going to emit a relocation
8546 then we just want to put out any pipeline compensation that the linker
8547 will need. Otherwise we want to use the calculated base. */
8548 if (fixP->fx_pcrel
8549 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8550 || aarch64_force_relocation (fixP)))
8551 base = 0;
8552
8553 /* AArch64 should be consistent for all pc-relative relocations. */
8554 return base + AARCH64_PCREL_OFFSET;
8555 }
8556
8557 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8558 Otherwise we have no need to default values of symbols. */
8559
8560 symbolS *
8561 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8562 {
8563 #ifdef OBJ_ELF
8564 if (name[0] == '_' && name[1] == 'G'
8565 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8566 {
8567 if (!GOT_symbol)
8568 {
8569 if (symbol_find (name))
8570 as_bad (_("GOT already in the symbol table"));
8571
8572 GOT_symbol = symbol_new (name, undefined_section,
8573 &zero_address_frag, 0);
8574 }
8575
8576 return GOT_symbol;
8577 }
8578 #endif
8579
8580 return 0;
8581 }
8582
8583 /* Return non-zero if the indicated VALUE has overflowed the maximum
8584 range expressible by a unsigned number with the indicated number of
8585 BITS. */
8586
8587 static bool
8588 unsigned_overflow (valueT value, unsigned bits)
8589 {
8590 valueT lim;
8591 if (bits >= sizeof (valueT) * 8)
8592 return false;
8593 lim = (valueT) 1 << bits;
8594 return (value >= lim);
8595 }
8596
8597
8598 /* Return non-zero if the indicated VALUE has overflowed the maximum
8599 range expressible by an signed number with the indicated number of
8600 BITS. */
8601
8602 static bool
8603 signed_overflow (offsetT value, unsigned bits)
8604 {
8605 offsetT lim;
8606 if (bits >= sizeof (offsetT) * 8)
8607 return false;
8608 lim = (offsetT) 1 << (bits - 1);
8609 return (value < -lim || value >= lim);
8610 }
8611
8612 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8613 unsigned immediate offset load/store instruction, try to encode it as
8614 an unscaled, 9-bit, signed immediate offset load/store instruction.
8615 Return TRUE if it is successful; otherwise return FALSE.
8616
8617 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8618 in response to the standard LDR/STR mnemonics when the immediate offset is
8619 unambiguous, i.e. when it is negative or unaligned. */
8620
8621 static bool
8622 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8623 {
8624 int idx;
8625 enum aarch64_op new_op;
8626 const aarch64_opcode *new_opcode;
8627
8628 gas_assert (instr->opcode->iclass == ldst_pos);
8629
8630 switch (instr->opcode->op)
8631 {
8632 case OP_LDRB_POS:new_op = OP_LDURB; break;
8633 case OP_STRB_POS: new_op = OP_STURB; break;
8634 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8635 case OP_LDRH_POS: new_op = OP_LDURH; break;
8636 case OP_STRH_POS: new_op = OP_STURH; break;
8637 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8638 case OP_LDR_POS: new_op = OP_LDUR; break;
8639 case OP_STR_POS: new_op = OP_STUR; break;
8640 case OP_LDRF_POS: new_op = OP_LDURV; break;
8641 case OP_STRF_POS: new_op = OP_STURV; break;
8642 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8643 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8644 default: new_op = OP_NIL; break;
8645 }
8646
8647 if (new_op == OP_NIL)
8648 return false;
8649
8650 new_opcode = aarch64_get_opcode (new_op);
8651 gas_assert (new_opcode != NULL);
8652
8653 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8654 instr->opcode->op, new_opcode->op);
8655
8656 aarch64_replace_opcode (instr, new_opcode);
8657
8658 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8659 qualifier matching may fail because the out-of-date qualifier will
8660 prevent the operand being updated with a new and correct qualifier. */
8661 idx = aarch64_operand_index (instr->opcode->operands,
8662 AARCH64_OPND_ADDR_SIMM9);
8663 gas_assert (idx == 1);
8664 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8665
8666 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8667
8668 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8669 insn_sequence))
8670 return false;
8671
8672 return true;
8673 }
8674
8675 /* Called by fix_insn to fix a MOV immediate alias instruction.
8676
8677 Operand for a generic move immediate instruction, which is an alias
8678 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8679 a 32-bit/64-bit immediate value into general register. An assembler error
8680 shall result if the immediate cannot be created by a single one of these
8681 instructions. If there is a choice, then to ensure reversability an
8682 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8683
8684 static void
8685 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8686 {
8687 const aarch64_opcode *opcode;
8688
8689 /* Need to check if the destination is SP/ZR. The check has to be done
8690 before any aarch64_replace_opcode. */
8691 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8692 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8693
8694 instr->operands[1].imm.value = value;
8695 instr->operands[1].skip = 0;
8696
8697 if (try_mov_wide_p)
8698 {
8699 /* Try the MOVZ alias. */
8700 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8701 aarch64_replace_opcode (instr, opcode);
8702 if (aarch64_opcode_encode (instr->opcode, instr,
8703 &instr->value, NULL, NULL, insn_sequence))
8704 {
8705 put_aarch64_insn (buf, instr->value);
8706 return;
8707 }
8708 /* Try the MOVK alias. */
8709 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8710 aarch64_replace_opcode (instr, opcode);
8711 if (aarch64_opcode_encode (instr->opcode, instr,
8712 &instr->value, NULL, NULL, insn_sequence))
8713 {
8714 put_aarch64_insn (buf, instr->value);
8715 return;
8716 }
8717 }
8718
8719 if (try_mov_bitmask_p)
8720 {
8721 /* Try the ORR alias. */
8722 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8723 aarch64_replace_opcode (instr, opcode);
8724 if (aarch64_opcode_encode (instr->opcode, instr,
8725 &instr->value, NULL, NULL, insn_sequence))
8726 {
8727 put_aarch64_insn (buf, instr->value);
8728 return;
8729 }
8730 }
8731
8732 as_bad_where (fixP->fx_file, fixP->fx_line,
8733 _("immediate cannot be moved by a single instruction"));
8734 }
8735
8736 /* An instruction operand which is immediate related may have symbol used
8737 in the assembly, e.g.
8738
8739 mov w0, u32
8740 .set u32, 0x00ffff00
8741
8742 At the time when the assembly instruction is parsed, a referenced symbol,
8743 like 'u32' in the above example may not have been seen; a fixS is created
8744 in such a case and is handled here after symbols have been resolved.
8745 Instruction is fixed up with VALUE using the information in *FIXP plus
8746 extra information in FLAGS.
8747
8748 This function is called by md_apply_fix to fix up instructions that need
8749 a fix-up described above but does not involve any linker-time relocation. */
8750
8751 static void
8752 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8753 {
8754 int idx;
8755 uint32_t insn;
8756 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8757 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8758 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8759
8760 if (new_inst)
8761 {
8762 /* Now the instruction is about to be fixed-up, so the operand that
8763 was previously marked as 'ignored' needs to be unmarked in order
8764 to get the encoding done properly. */
8765 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8766 new_inst->operands[idx].skip = 0;
8767 }
8768
8769 gas_assert (opnd != AARCH64_OPND_NIL);
8770
8771 switch (opnd)
8772 {
8773 case AARCH64_OPND_EXCEPTION:
8774 case AARCH64_OPND_UNDEFINED:
8775 if (unsigned_overflow (value, 16))
8776 as_bad_where (fixP->fx_file, fixP->fx_line,
8777 _("immediate out of range"));
8778 insn = get_aarch64_insn (buf);
8779 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8780 put_aarch64_insn (buf, insn);
8781 break;
8782
8783 case AARCH64_OPND_AIMM:
8784 /* ADD or SUB with immediate.
8785 NOTE this assumes we come here with a add/sub shifted reg encoding
8786 3 322|2222|2 2 2 21111 111111
8787 1 098|7654|3 2 1 09876 543210 98765 43210
8788 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8789 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8790 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8791 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8792 ->
8793 3 322|2222|2 2 221111111111
8794 1 098|7654|3 2 109876543210 98765 43210
8795 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8796 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8797 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8798 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8799 Fields sf Rn Rd are already set. */
8800 insn = get_aarch64_insn (buf);
8801 if (value < 0)
8802 {
8803 /* Add <-> sub. */
8804 insn = reencode_addsub_switch_add_sub (insn);
8805 value = -value;
8806 }
8807
8808 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8809 && unsigned_overflow (value, 12))
8810 {
8811 /* Try to shift the value by 12 to make it fit. */
8812 if (((value >> 12) << 12) == value
8813 && ! unsigned_overflow (value, 12 + 12))
8814 {
8815 value >>= 12;
8816 insn |= encode_addsub_imm_shift_amount (1);
8817 }
8818 }
8819
8820 if (unsigned_overflow (value, 12))
8821 as_bad_where (fixP->fx_file, fixP->fx_line,
8822 _("immediate out of range"));
8823
8824 insn |= encode_addsub_imm (value);
8825
8826 put_aarch64_insn (buf, insn);
8827 break;
8828
8829 case AARCH64_OPND_SIMD_IMM:
8830 case AARCH64_OPND_SIMD_IMM_SFT:
8831 case AARCH64_OPND_LIMM:
8832 /* Bit mask immediate. */
8833 gas_assert (new_inst != NULL);
8834 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8835 new_inst->operands[idx].imm.value = value;
8836 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8837 &new_inst->value, NULL, NULL, insn_sequence))
8838 put_aarch64_insn (buf, new_inst->value);
8839 else
8840 as_bad_where (fixP->fx_file, fixP->fx_line,
8841 _("invalid immediate"));
8842 break;
8843
8844 case AARCH64_OPND_HALF:
8845 /* 16-bit unsigned immediate. */
8846 if (unsigned_overflow (value, 16))
8847 as_bad_where (fixP->fx_file, fixP->fx_line,
8848 _("immediate out of range"));
8849 insn = get_aarch64_insn (buf);
8850 insn |= encode_movw_imm (value & 0xffff);
8851 put_aarch64_insn (buf, insn);
8852 break;
8853
8854 case AARCH64_OPND_IMM_MOV:
8855 /* Operand for a generic move immediate instruction, which is
8856 an alias instruction that generates a single MOVZ, MOVN or ORR
8857 instruction to loads a 32-bit/64-bit immediate value into general
8858 register. An assembler error shall result if the immediate cannot be
8859 created by a single one of these instructions. If there is a choice,
8860 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8861 and MOVZ or MOVN to ORR. */
8862 gas_assert (new_inst != NULL);
8863 fix_mov_imm_insn (fixP, buf, new_inst, value);
8864 break;
8865
8866 case AARCH64_OPND_ADDR_SIMM7:
8867 case AARCH64_OPND_ADDR_SIMM9:
8868 case AARCH64_OPND_ADDR_SIMM9_2:
8869 case AARCH64_OPND_ADDR_SIMM10:
8870 case AARCH64_OPND_ADDR_UIMM12:
8871 case AARCH64_OPND_ADDR_SIMM11:
8872 case AARCH64_OPND_ADDR_SIMM13:
8873 /* Immediate offset in an address. */
8874 insn = get_aarch64_insn (buf);
8875
8876 gas_assert (new_inst != NULL && new_inst->value == insn);
8877 gas_assert (new_inst->opcode->operands[1] == opnd
8878 || new_inst->opcode->operands[2] == opnd);
8879
8880 /* Get the index of the address operand. */
8881 if (new_inst->opcode->operands[1] == opnd)
8882 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8883 idx = 1;
8884 else
8885 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8886 idx = 2;
8887
8888 /* Update the resolved offset value. */
8889 new_inst->operands[idx].addr.offset.imm = value;
8890
8891 /* Encode/fix-up. */
8892 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8893 &new_inst->value, NULL, NULL, insn_sequence))
8894 {
8895 put_aarch64_insn (buf, new_inst->value);
8896 break;
8897 }
8898 else if (new_inst->opcode->iclass == ldst_pos
8899 && try_to_encode_as_unscaled_ldst (new_inst))
8900 {
8901 put_aarch64_insn (buf, new_inst->value);
8902 break;
8903 }
8904
8905 as_bad_where (fixP->fx_file, fixP->fx_line,
8906 _("immediate offset out of range"));
8907 break;
8908
8909 default:
8910 gas_assert (0);
8911 as_fatal (_("unhandled operand code %d"), opnd);
8912 }
8913 }
8914
8915 /* Apply a fixup (fixP) to segment data, once it has been determined
8916 by our caller that we have all the info we need to fix it up.
8917
8918 Parameter valP is the pointer to the value of the bits. */
8919
8920 void
8921 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8922 {
8923 offsetT value = *valP;
8924 uint32_t insn;
8925 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8926 int scale;
8927 unsigned flags = fixP->fx_addnumber;
8928
8929 DEBUG_TRACE ("\n\n");
8930 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8931 DEBUG_TRACE ("Enter md_apply_fix");
8932
8933 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8934
8935 /* Note whether this will delete the relocation. */
8936
8937 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
8938 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
8939 fixP->fx_done = 1;
8940
8941 /* Process the relocations. */
8942 switch (fixP->fx_r_type)
8943 {
8944 case BFD_RELOC_NONE:
8945 /* This will need to go in the object file. */
8946 fixP->fx_done = 0;
8947 break;
8948
8949 case BFD_RELOC_8:
8950 case BFD_RELOC_8_PCREL:
8951 if (fixP->fx_done || !seg->use_rela_p)
8952 md_number_to_chars (buf, value, 1);
8953 break;
8954
8955 case BFD_RELOC_16:
8956 case BFD_RELOC_16_PCREL:
8957 if (fixP->fx_done || !seg->use_rela_p)
8958 md_number_to_chars (buf, value, 2);
8959 break;
8960
8961 case BFD_RELOC_32:
8962 case BFD_RELOC_32_PCREL:
8963 if (fixP->fx_done || !seg->use_rela_p)
8964 md_number_to_chars (buf, value, 4);
8965 break;
8966
8967 case BFD_RELOC_64:
8968 case BFD_RELOC_64_PCREL:
8969 if (fixP->fx_done || !seg->use_rela_p)
8970 md_number_to_chars (buf, value, 8);
8971 break;
8972
8973 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8974 /* We claim that these fixups have been processed here, even if
8975 in fact we generate an error because we do not have a reloc
8976 for them, so tc_gen_reloc() will reject them. */
8977 fixP->fx_done = 1;
8978 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8979 {
8980 as_bad_where (fixP->fx_file, fixP->fx_line,
8981 _("undefined symbol %s used as an immediate value"),
8982 S_GET_NAME (fixP->fx_addsy));
8983 goto apply_fix_return;
8984 }
8985 fix_insn (fixP, flags, value);
8986 break;
8987
8988 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8989 if (fixP->fx_done || !seg->use_rela_p)
8990 {
8991 if (value & 3)
8992 as_bad_where (fixP->fx_file, fixP->fx_line,
8993 _("pc-relative load offset not word aligned"));
8994 if (signed_overflow (value, 21))
8995 as_bad_where (fixP->fx_file, fixP->fx_line,
8996 _("pc-relative load offset out of range"));
8997 insn = get_aarch64_insn (buf);
8998 insn |= encode_ld_lit_ofs_19 (value >> 2);
8999 put_aarch64_insn (buf, insn);
9000 }
9001 break;
9002
9003 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
9004 if (fixP->fx_done || !seg->use_rela_p)
9005 {
9006 if (signed_overflow (value, 21))
9007 as_bad_where (fixP->fx_file, fixP->fx_line,
9008 _("pc-relative address offset out of range"));
9009 insn = get_aarch64_insn (buf);
9010 insn |= encode_adr_imm (value);
9011 put_aarch64_insn (buf, insn);
9012 }
9013 break;
9014
9015 case BFD_RELOC_AARCH64_BRANCH19:
9016 if (fixP->fx_done || !seg->use_rela_p)
9017 {
9018 if (value & 3)
9019 as_bad_where (fixP->fx_file, fixP->fx_line,
9020 _("conditional branch target not word aligned"));
9021 if (signed_overflow (value, 21))
9022 as_bad_where (fixP->fx_file, fixP->fx_line,
9023 _("conditional branch out of range"));
9024 insn = get_aarch64_insn (buf);
9025 insn |= encode_cond_branch_ofs_19 (value >> 2);
9026 put_aarch64_insn (buf, insn);
9027 }
9028 break;
9029
9030 case BFD_RELOC_AARCH64_TSTBR14:
9031 if (fixP->fx_done || !seg->use_rela_p)
9032 {
9033 if (value & 3)
9034 as_bad_where (fixP->fx_file, fixP->fx_line,
9035 _("conditional branch target not word aligned"));
9036 if (signed_overflow (value, 16))
9037 as_bad_where (fixP->fx_file, fixP->fx_line,
9038 _("conditional branch out of range"));
9039 insn = get_aarch64_insn (buf);
9040 insn |= encode_tst_branch_ofs_14 (value >> 2);
9041 put_aarch64_insn (buf, insn);
9042 }
9043 break;
9044
9045 case BFD_RELOC_AARCH64_CALL26:
9046 case BFD_RELOC_AARCH64_JUMP26:
9047 if (fixP->fx_done || !seg->use_rela_p)
9048 {
9049 if (value & 3)
9050 as_bad_where (fixP->fx_file, fixP->fx_line,
9051 _("branch target not word aligned"));
9052 if (signed_overflow (value, 28))
9053 as_bad_where (fixP->fx_file, fixP->fx_line,
9054 _("branch out of range"));
9055 insn = get_aarch64_insn (buf);
9056 insn |= encode_branch_ofs_26 (value >> 2);
9057 put_aarch64_insn (buf, insn);
9058 }
9059 break;
9060
9061 case BFD_RELOC_AARCH64_MOVW_G0:
9062 case BFD_RELOC_AARCH64_MOVW_G0_NC:
9063 case BFD_RELOC_AARCH64_MOVW_G0_S:
9064 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9065 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9066 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
9067 scale = 0;
9068 goto movw_common;
9069 case BFD_RELOC_AARCH64_MOVW_G1:
9070 case BFD_RELOC_AARCH64_MOVW_G1_NC:
9071 case BFD_RELOC_AARCH64_MOVW_G1_S:
9072 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9073 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9074 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
9075 scale = 16;
9076 goto movw_common;
9077 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9078 scale = 0;
9079 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9080 /* Should always be exported to object file, see
9081 aarch64_force_relocation(). */
9082 gas_assert (!fixP->fx_done);
9083 gas_assert (seg->use_rela_p);
9084 goto movw_common;
9085 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9086 scale = 16;
9087 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9088 /* Should always be exported to object file, see
9089 aarch64_force_relocation(). */
9090 gas_assert (!fixP->fx_done);
9091 gas_assert (seg->use_rela_p);
9092 goto movw_common;
9093 case BFD_RELOC_AARCH64_MOVW_G2:
9094 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9095 case BFD_RELOC_AARCH64_MOVW_G2_S:
9096 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9097 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9098 scale = 32;
9099 goto movw_common;
9100 case BFD_RELOC_AARCH64_MOVW_G3:
9101 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9102 scale = 48;
9103 movw_common:
9104 if (fixP->fx_done || !seg->use_rela_p)
9105 {
9106 insn = get_aarch64_insn (buf);
9107
9108 if (!fixP->fx_done)
9109 {
9110 /* REL signed addend must fit in 16 bits */
9111 if (signed_overflow (value, 16))
9112 as_bad_where (fixP->fx_file, fixP->fx_line,
9113 _("offset out of range"));
9114 }
9115 else
9116 {
9117 /* Check for overflow and scale. */
9118 switch (fixP->fx_r_type)
9119 {
9120 case BFD_RELOC_AARCH64_MOVW_G0:
9121 case BFD_RELOC_AARCH64_MOVW_G1:
9122 case BFD_RELOC_AARCH64_MOVW_G2:
9123 case BFD_RELOC_AARCH64_MOVW_G3:
9124 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9125 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9126 if (unsigned_overflow (value, scale + 16))
9127 as_bad_where (fixP->fx_file, fixP->fx_line,
9128 _("unsigned value out of range"));
9129 break;
9130 case BFD_RELOC_AARCH64_MOVW_G0_S:
9131 case BFD_RELOC_AARCH64_MOVW_G1_S:
9132 case BFD_RELOC_AARCH64_MOVW_G2_S:
9133 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9134 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9135 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9136 /* NOTE: We can only come here with movz or movn. */
9137 if (signed_overflow (value, scale + 16))
9138 as_bad_where (fixP->fx_file, fixP->fx_line,
9139 _("signed value out of range"));
9140 if (value < 0)
9141 {
9142 /* Force use of MOVN. */
9143 value = ~value;
9144 insn = reencode_movzn_to_movn (insn);
9145 }
9146 else
9147 {
9148 /* Force use of MOVZ. */
9149 insn = reencode_movzn_to_movz (insn);
9150 }
9151 break;
9152 default:
9153 /* Unchecked relocations. */
9154 break;
9155 }
9156 value >>= scale;
9157 }
9158
9159 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9160 insn |= encode_movw_imm (value & 0xffff);
9161
9162 put_aarch64_insn (buf, insn);
9163 }
9164 break;
9165
9166 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9167 fixP->fx_r_type = (ilp32_p
9168 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9169 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9170 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9171 /* Should always be exported to object file, see
9172 aarch64_force_relocation(). */
9173 gas_assert (!fixP->fx_done);
9174 gas_assert (seg->use_rela_p);
9175 break;
9176
9177 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9178 fixP->fx_r_type = (ilp32_p
9179 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9180 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9181 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9182 /* Should always be exported to object file, see
9183 aarch64_force_relocation(). */
9184 gas_assert (!fixP->fx_done);
9185 gas_assert (seg->use_rela_p);
9186 break;
9187
9188 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9189 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9190 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9191 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9192 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9193 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9194 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9195 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9196 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9197 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9198 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9199 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9200 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9201 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9202 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9203 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9204 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9205 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9206 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9207 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9208 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9209 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9210 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9211 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9212 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9213 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9214 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9215 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9216 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9217 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9218 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9219 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9220 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9221 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9222 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9223 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9224 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9225 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9226 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9227 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9228 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9229 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9230 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9231 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9232 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9233 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9234 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9235 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9236 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9237 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9238 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9239 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9240 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9241 /* Should always be exported to object file, see
9242 aarch64_force_relocation(). */
9243 gas_assert (!fixP->fx_done);
9244 gas_assert (seg->use_rela_p);
9245 break;
9246
9247 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9248 /* Should always be exported to object file, see
9249 aarch64_force_relocation(). */
9250 fixP->fx_r_type = (ilp32_p
9251 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9252 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9253 gas_assert (!fixP->fx_done);
9254 gas_assert (seg->use_rela_p);
9255 break;
9256
9257 case BFD_RELOC_AARCH64_ADD_LO12:
9258 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9259 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9260 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9261 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9262 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9263 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9264 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9265 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9266 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9267 case BFD_RELOC_AARCH64_LDST128_LO12:
9268 case BFD_RELOC_AARCH64_LDST16_LO12:
9269 case BFD_RELOC_AARCH64_LDST32_LO12:
9270 case BFD_RELOC_AARCH64_LDST64_LO12:
9271 case BFD_RELOC_AARCH64_LDST8_LO12:
9272 /* Should always be exported to object file, see
9273 aarch64_force_relocation(). */
9274 gas_assert (!fixP->fx_done);
9275 gas_assert (seg->use_rela_p);
9276 break;
9277
9278 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9279 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9280 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9281 break;
9282
9283 case BFD_RELOC_UNUSED:
9284 /* An error will already have been reported. */
9285 break;
9286
9287 case BFD_RELOC_RVA:
9288 case BFD_RELOC_32_SECREL:
9289 case BFD_RELOC_16_SECIDX:
9290 break;
9291
9292 default:
9293 as_bad_where (fixP->fx_file, fixP->fx_line,
9294 _("unexpected %s fixup"),
9295 bfd_get_reloc_code_name (fixP->fx_r_type));
9296 break;
9297 }
9298
9299 apply_fix_return:
9300 /* Free the allocated the struct aarch64_inst.
9301 N.B. currently there are very limited number of fix-up types actually use
9302 this field, so the impact on the performance should be minimal . */
9303 free (fixP->tc_fix_data.inst);
9304
9305 return;
9306 }
9307
9308 /* Translate internal representation of relocation info to BFD target
9309 format. */
9310
9311 arelent *
9312 tc_gen_reloc (asection * section, fixS * fixp)
9313 {
9314 arelent *reloc;
9315 bfd_reloc_code_real_type code;
9316
9317 reloc = XNEW (arelent);
9318
9319 reloc->sym_ptr_ptr = XNEW (asymbol *);
9320 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9321 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9322
9323 if (fixp->fx_pcrel)
9324 {
9325 if (section->use_rela_p)
9326 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9327 else
9328 fixp->fx_offset = reloc->address;
9329 }
9330 reloc->addend = fixp->fx_offset;
9331
9332 code = fixp->fx_r_type;
9333 switch (code)
9334 {
9335 case BFD_RELOC_16:
9336 if (fixp->fx_pcrel)
9337 code = BFD_RELOC_16_PCREL;
9338 break;
9339
9340 case BFD_RELOC_32:
9341 if (fixp->fx_pcrel)
9342 code = BFD_RELOC_32_PCREL;
9343 break;
9344
9345 case BFD_RELOC_64:
9346 if (fixp->fx_pcrel)
9347 code = BFD_RELOC_64_PCREL;
9348 break;
9349
9350 default:
9351 break;
9352 }
9353
9354 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9355 if (reloc->howto == NULL)
9356 {
9357 as_bad_where (fixp->fx_file, fixp->fx_line,
9358 _
9359 ("cannot represent %s relocation in this object file format"),
9360 bfd_get_reloc_code_name (code));
9361 return NULL;
9362 }
9363
9364 return reloc;
9365 }
9366
9367 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9368
9369 void
9370 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9371 {
9372 bfd_reloc_code_real_type type;
9373 int pcrel = 0;
9374
9375 #ifdef TE_PE
9376 if (exp->X_op == O_secrel)
9377 {
9378 exp->X_op = O_symbol;
9379 type = BFD_RELOC_32_SECREL;
9380 }
9381 else if (exp->X_op == O_secidx)
9382 {
9383 exp->X_op = O_symbol;
9384 type = BFD_RELOC_16_SECIDX;
9385 }
9386 else
9387 {
9388 #endif
9389 /* Pick a reloc.
9390 FIXME: @@ Should look at CPU word size. */
9391 switch (size)
9392 {
9393 case 1:
9394 type = BFD_RELOC_8;
9395 break;
9396 case 2:
9397 type = BFD_RELOC_16;
9398 break;
9399 case 4:
9400 type = BFD_RELOC_32;
9401 break;
9402 case 8:
9403 type = BFD_RELOC_64;
9404 break;
9405 default:
9406 as_bad (_("cannot do %u-byte relocation"), size);
9407 type = BFD_RELOC_UNUSED;
9408 break;
9409 }
9410 #ifdef TE_PE
9411 }
9412 #endif
9413
9414 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9415 }
9416
9417 /* Implement md_after_parse_args. This is the earliest time we need to decide
9418 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9419
9420 void
9421 aarch64_after_parse_args (void)
9422 {
9423 if (aarch64_abi != AARCH64_ABI_NONE)
9424 return;
9425
9426 #ifdef OBJ_ELF
9427 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9428 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9429 aarch64_abi = AARCH64_ABI_ILP32;
9430 else
9431 aarch64_abi = AARCH64_ABI_LP64;
9432 #else
9433 aarch64_abi = AARCH64_ABI_LLP64;
9434 #endif
9435 }
9436
9437 #ifdef OBJ_ELF
9438 const char *
9439 elf64_aarch64_target_format (void)
9440 {
9441 #ifdef TE_CLOUDABI
9442 /* FIXME: What to do for ilp32_p ? */
9443 if (target_big_endian)
9444 return "elf64-bigaarch64-cloudabi";
9445 else
9446 return "elf64-littleaarch64-cloudabi";
9447 #else
9448 if (target_big_endian)
9449 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9450 else
9451 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9452 #endif
9453 }
9454
9455 void
9456 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9457 {
9458 elf_frob_symbol (symp, puntp);
9459 }
9460 #elif defined OBJ_COFF
9461 const char *
9462 coff_aarch64_target_format (void)
9463 {
9464 return "pe-aarch64-little";
9465 }
9466 #endif
9467
9468 /* MD interface: Finalization. */
9469
9470 /* A good place to do this, although this was probably not intended
9471 for this kind of use. We need to dump the literal pool before
9472 references are made to a null symbol pointer. */
9473
9474 void
9475 aarch64_cleanup (void)
9476 {
9477 literal_pool *pool;
9478
9479 for (pool = list_of_pools; pool; pool = pool->next)
9480 {
9481 /* Put it at the end of the relevant section. */
9482 subseg_set (pool->section, pool->sub_section);
9483 s_ltorg (0);
9484 }
9485 }
9486
9487 #ifdef OBJ_ELF
9488 /* Remove any excess mapping symbols generated for alignment frags in
9489 SEC. We may have created a mapping symbol before a zero byte
9490 alignment; remove it if there's a mapping symbol after the
9491 alignment. */
9492 static void
9493 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9494 void *dummy ATTRIBUTE_UNUSED)
9495 {
9496 segment_info_type *seginfo = seg_info (sec);
9497 fragS *fragp;
9498
9499 if (seginfo == NULL || seginfo->frchainP == NULL)
9500 return;
9501
9502 for (fragp = seginfo->frchainP->frch_root;
9503 fragp != NULL; fragp = fragp->fr_next)
9504 {
9505 symbolS *sym = fragp->tc_frag_data.last_map;
9506 fragS *next = fragp->fr_next;
9507
9508 /* Variable-sized frags have been converted to fixed size by
9509 this point. But if this was variable-sized to start with,
9510 there will be a fixed-size frag after it. So don't handle
9511 next == NULL. */
9512 if (sym == NULL || next == NULL)
9513 continue;
9514
9515 if (S_GET_VALUE (sym) < next->fr_address)
9516 /* Not at the end of this frag. */
9517 continue;
9518 know (S_GET_VALUE (sym) == next->fr_address);
9519
9520 do
9521 {
9522 if (next->tc_frag_data.first_map != NULL)
9523 {
9524 /* Next frag starts with a mapping symbol. Discard this
9525 one. */
9526 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9527 break;
9528 }
9529
9530 if (next->fr_next == NULL)
9531 {
9532 /* This mapping symbol is at the end of the section. Discard
9533 it. */
9534 know (next->fr_fix == 0 && next->fr_var == 0);
9535 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9536 break;
9537 }
9538
9539 /* As long as we have empty frags without any mapping symbols,
9540 keep looking. */
9541 /* If the next frag is non-empty and does not start with a
9542 mapping symbol, then this mapping symbol is required. */
9543 if (next->fr_address != next->fr_next->fr_address)
9544 break;
9545
9546 next = next->fr_next;
9547 }
9548 while (next != NULL);
9549 }
9550 }
9551 #endif
9552
9553 /* Adjust the symbol table. */
9554
9555 void
9556 aarch64_adjust_symtab (void)
9557 {
9558 #ifdef OBJ_ELF
9559 /* Remove any overlapping mapping symbols generated by alignment frags. */
9560 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9561 /* Now do generic ELF adjustments. */
9562 elf_adjust_symtab ();
9563 #endif
9564 }
9565
9566 static void
9567 checked_hash_insert (htab_t table, const char *key, void *value)
9568 {
9569 str_hash_insert (table, key, value, 0);
9570 }
9571
9572 static void
9573 sysreg_hash_insert (htab_t table, const char *key, void *value)
9574 {
9575 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9576 checked_hash_insert (table, key, value);
9577 }
9578
9579 static void
9580 fill_instruction_hash_table (void)
9581 {
9582 const aarch64_opcode *opcode = aarch64_opcode_table;
9583
9584 while (opcode->name != NULL)
9585 {
9586 templates *templ, *new_templ;
9587 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9588
9589 new_templ = XNEW (templates);
9590 new_templ->opcode = opcode;
9591 new_templ->next = NULL;
9592
9593 if (!templ)
9594 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9595 else
9596 {
9597 new_templ->next = templ->next;
9598 templ->next = new_templ;
9599 }
9600 ++opcode;
9601 }
9602 }
9603
9604 static inline void
9605 convert_to_upper (char *dst, const char *src, size_t num)
9606 {
9607 unsigned int i;
9608 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9609 *dst = TOUPPER (*src);
9610 *dst = '\0';
9611 }
9612
9613 /* Assume STR point to a lower-case string, allocate, convert and return
9614 the corresponding upper-case string. */
9615 static inline const char*
9616 get_upper_str (const char *str)
9617 {
9618 char *ret;
9619 size_t len = strlen (str);
9620 ret = XNEWVEC (char, len + 1);
9621 convert_to_upper (ret, str, len);
9622 return ret;
9623 }
9624
9625 /* MD interface: Initialization. */
9626
9627 void
9628 md_begin (void)
9629 {
9630 unsigned mach;
9631 unsigned int i;
9632
9633 aarch64_ops_hsh = str_htab_create ();
9634 aarch64_cond_hsh = str_htab_create ();
9635 aarch64_shift_hsh = str_htab_create ();
9636 aarch64_sys_regs_hsh = str_htab_create ();
9637 aarch64_pstatefield_hsh = str_htab_create ();
9638 aarch64_sys_regs_ic_hsh = str_htab_create ();
9639 aarch64_sys_regs_dc_hsh = str_htab_create ();
9640 aarch64_sys_regs_at_hsh = str_htab_create ();
9641 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9642 aarch64_sys_regs_sr_hsh = str_htab_create ();
9643 aarch64_reg_hsh = str_htab_create ();
9644 aarch64_barrier_opt_hsh = str_htab_create ();
9645 aarch64_nzcv_hsh = str_htab_create ();
9646 aarch64_pldop_hsh = str_htab_create ();
9647 aarch64_hint_opt_hsh = str_htab_create ();
9648
9649 fill_instruction_hash_table ();
9650
9651 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9652 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9653 (void *) (aarch64_sys_regs + i));
9654
9655 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9656 sysreg_hash_insert (aarch64_pstatefield_hsh,
9657 aarch64_pstatefields[i].name,
9658 (void *) (aarch64_pstatefields + i));
9659
9660 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9661 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9662 aarch64_sys_regs_ic[i].name,
9663 (void *) (aarch64_sys_regs_ic + i));
9664
9665 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9666 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9667 aarch64_sys_regs_dc[i].name,
9668 (void *) (aarch64_sys_regs_dc + i));
9669
9670 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9671 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9672 aarch64_sys_regs_at[i].name,
9673 (void *) (aarch64_sys_regs_at + i));
9674
9675 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9676 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9677 aarch64_sys_regs_tlbi[i].name,
9678 (void *) (aarch64_sys_regs_tlbi + i));
9679
9680 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9681 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9682 aarch64_sys_regs_sr[i].name,
9683 (void *) (aarch64_sys_regs_sr + i));
9684
9685 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9686 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9687 (void *) (reg_names + i));
9688
9689 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9690 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9691 (void *) (nzcv_names + i));
9692
9693 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9694 {
9695 const char *name = aarch64_operand_modifiers[i].name;
9696 checked_hash_insert (aarch64_shift_hsh, name,
9697 (void *) (aarch64_operand_modifiers + i));
9698 /* Also hash the name in the upper case. */
9699 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9700 (void *) (aarch64_operand_modifiers + i));
9701 }
9702
9703 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9704 {
9705 unsigned int j;
9706 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9707 the same condition code. */
9708 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9709 {
9710 const char *name = aarch64_conds[i].names[j];
9711 if (name == NULL)
9712 break;
9713 checked_hash_insert (aarch64_cond_hsh, name,
9714 (void *) (aarch64_conds + i));
9715 /* Also hash the name in the upper case. */
9716 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9717 (void *) (aarch64_conds + i));
9718 }
9719 }
9720
9721 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9722 {
9723 const char *name = aarch64_barrier_options[i].name;
9724 /* Skip xx00 - the unallocated values of option. */
9725 if ((i & 0x3) == 0)
9726 continue;
9727 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9728 (void *) (aarch64_barrier_options + i));
9729 /* Also hash the name in the upper case. */
9730 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9731 (void *) (aarch64_barrier_options + i));
9732 }
9733
9734 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9735 {
9736 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9737 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9738 (void *) (aarch64_barrier_dsb_nxs_options + i));
9739 /* Also hash the name in the upper case. */
9740 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9741 (void *) (aarch64_barrier_dsb_nxs_options + i));
9742 }
9743
9744 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9745 {
9746 const char* name = aarch64_prfops[i].name;
9747 /* Skip the unallocated hint encodings. */
9748 if (name == NULL)
9749 continue;
9750 checked_hash_insert (aarch64_pldop_hsh, name,
9751 (void *) (aarch64_prfops + i));
9752 /* Also hash the name in the upper case. */
9753 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9754 (void *) (aarch64_prfops + i));
9755 }
9756
9757 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9758 {
9759 const char* name = aarch64_hint_options[i].name;
9760 const char* upper_name = get_upper_str(name);
9761
9762 checked_hash_insert (aarch64_hint_opt_hsh, name,
9763 (void *) (aarch64_hint_options + i));
9764
9765 /* Also hash the name in the upper case if not the same. */
9766 if (strcmp (name, upper_name) != 0)
9767 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9768 (void *) (aarch64_hint_options + i));
9769 }
9770
9771 /* Set the cpu variant based on the command-line options. */
9772 if (!mcpu_cpu_opt)
9773 mcpu_cpu_opt = march_cpu_opt;
9774
9775 if (!mcpu_cpu_opt)
9776 mcpu_cpu_opt = &cpu_default;
9777
9778 cpu_variant = *mcpu_cpu_opt;
9779
9780 /* Record the CPU type. */
9781 if(ilp32_p)
9782 mach = bfd_mach_aarch64_ilp32;
9783 else if (llp64_p)
9784 mach = bfd_mach_aarch64_llp64;
9785 else
9786 mach = bfd_mach_aarch64;
9787
9788 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9789 #ifdef OBJ_ELF
9790 /* FIXME - is there a better way to do it ? */
9791 aarch64_sframe_cfa_sp_reg = 31;
9792 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
9793 aarch64_sframe_cfa_ra_reg = 30;
9794 #endif
9795 }
9796
9797 /* Command line processing. */
9798
9799 const char *md_shortopts = "m:";
9800
9801 #ifdef AARCH64_BI_ENDIAN
9802 #define OPTION_EB (OPTION_MD_BASE + 0)
9803 #define OPTION_EL (OPTION_MD_BASE + 1)
9804 #else
9805 #if TARGET_BYTES_BIG_ENDIAN
9806 #define OPTION_EB (OPTION_MD_BASE + 0)
9807 #else
9808 #define OPTION_EL (OPTION_MD_BASE + 1)
9809 #endif
9810 #endif
9811
9812 struct option md_longopts[] = {
9813 #ifdef OPTION_EB
9814 {"EB", no_argument, NULL, OPTION_EB},
9815 #endif
9816 #ifdef OPTION_EL
9817 {"EL", no_argument, NULL, OPTION_EL},
9818 #endif
9819 {NULL, no_argument, NULL, 0}
9820 };
9821
9822 size_t md_longopts_size = sizeof (md_longopts);
9823
9824 struct aarch64_option_table
9825 {
9826 const char *option; /* Option name to match. */
9827 const char *help; /* Help information. */
9828 int *var; /* Variable to change. */
9829 int value; /* What to change it to. */
9830 char *deprecated; /* If non-null, print this message. */
9831 };
9832
9833 static struct aarch64_option_table aarch64_opts[] = {
9834 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9835 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9836 NULL},
9837 #ifdef DEBUG_AARCH64
9838 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9839 #endif /* DEBUG_AARCH64 */
9840 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9841 NULL},
9842 {"mno-verbose-error", N_("do not output verbose error messages"),
9843 &verbose_error_p, 0, NULL},
9844 {NULL, NULL, NULL, 0, NULL}
9845 };
9846
9847 struct aarch64_cpu_option_table
9848 {
9849 const char *name;
9850 const aarch64_feature_set value;
9851 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9852 case. */
9853 const char *canonical_name;
9854 };
9855
9856 /* This list should, at a minimum, contain all the cpu names
9857 recognized by GCC. */
9858 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9859 {"all", AARCH64_ANY, NULL},
9860 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9861 AARCH64_FEATURE_CRC), "Cortex-A34"},
9862 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9863 AARCH64_FEATURE_CRC), "Cortex-A35"},
9864 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9865 AARCH64_FEATURE_CRC), "Cortex-A53"},
9866 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9867 AARCH64_FEATURE_CRC), "Cortex-A57"},
9868 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9869 AARCH64_FEATURE_CRC), "Cortex-A72"},
9870 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9871 AARCH64_FEATURE_CRC), "Cortex-A73"},
9872 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9873 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9874 "Cortex-A55"},
9875 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9876 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9877 "Cortex-A75"},
9878 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9879 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9880 "Cortex-A76"},
9881 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9882 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9883 | AARCH64_FEATURE_DOTPROD
9884 | AARCH64_FEATURE_SSBS),
9885 "Cortex-A76AE"},
9886 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9887 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9888 | AARCH64_FEATURE_DOTPROD
9889 | AARCH64_FEATURE_SSBS),
9890 "Cortex-A77"},
9891 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9892 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9893 | AARCH64_FEATURE_DOTPROD
9894 | AARCH64_FEATURE_SSBS),
9895 "Cortex-A65"},
9896 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9897 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9898 | AARCH64_FEATURE_DOTPROD
9899 | AARCH64_FEATURE_SSBS),
9900 "Cortex-A65AE"},
9901 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9902 AARCH64_FEATURE_F16
9903 | AARCH64_FEATURE_RCPC
9904 | AARCH64_FEATURE_DOTPROD
9905 | AARCH64_FEATURE_SSBS
9906 | AARCH64_FEATURE_PROFILE),
9907 "Cortex-A78"},
9908 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9909 AARCH64_FEATURE_F16
9910 | AARCH64_FEATURE_RCPC
9911 | AARCH64_FEATURE_DOTPROD
9912 | AARCH64_FEATURE_SSBS
9913 | AARCH64_FEATURE_PROFILE),
9914 "Cortex-A78AE"},
9915 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9916 AARCH64_FEATURE_DOTPROD
9917 | AARCH64_FEATURE_F16
9918 | AARCH64_FEATURE_FLAGM
9919 | AARCH64_FEATURE_PAC
9920 | AARCH64_FEATURE_PROFILE
9921 | AARCH64_FEATURE_RCPC
9922 | AARCH64_FEATURE_SSBS),
9923 "Cortex-A78C"},
9924 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9925 AARCH64_FEATURE_BFLOAT16
9926 | AARCH64_FEATURE_I8MM
9927 | AARCH64_FEATURE_MEMTAG
9928 | AARCH64_FEATURE_SVE2_BITPERM),
9929 "Cortex-A510"},
9930 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9931 AARCH64_FEATURE_BFLOAT16
9932 | AARCH64_FEATURE_I8MM
9933 | AARCH64_FEATURE_MEMTAG
9934 | AARCH64_FEATURE_SVE2_BITPERM),
9935 "Cortex-A710"},
9936 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9937 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9938 | AARCH64_FEATURE_DOTPROD
9939 | AARCH64_FEATURE_PROFILE),
9940 "Ares"},
9941 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9942 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9943 "Samsung Exynos M1"},
9944 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9945 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9946 | AARCH64_FEATURE_RDMA),
9947 "Qualcomm Falkor"},
9948 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9949 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9950 | AARCH64_FEATURE_DOTPROD
9951 | AARCH64_FEATURE_SSBS),
9952 "Neoverse E1"},
9953 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9954 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9955 | AARCH64_FEATURE_DOTPROD
9956 | AARCH64_FEATURE_PROFILE),
9957 "Neoverse N1"},
9958 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9959 AARCH64_FEATURE_BFLOAT16
9960 | AARCH64_FEATURE_I8MM
9961 | AARCH64_FEATURE_F16
9962 | AARCH64_FEATURE_SVE
9963 | AARCH64_FEATURE_SVE2
9964 | AARCH64_FEATURE_SVE2_BITPERM
9965 | AARCH64_FEATURE_MEMTAG
9966 | AARCH64_FEATURE_RNG),
9967 "Neoverse N2"},
9968 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9969 AARCH64_FEATURE_PROFILE
9970 | AARCH64_FEATURE_CVADP
9971 | AARCH64_FEATURE_SVE
9972 | AARCH64_FEATURE_SSBS
9973 | AARCH64_FEATURE_RNG
9974 | AARCH64_FEATURE_F16
9975 | AARCH64_FEATURE_BFLOAT16
9976 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9977 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9978 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9979 | AARCH64_FEATURE_RDMA),
9980 "Qualcomm QDF24XX"},
9981 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9982 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9983 "Qualcomm Saphira"},
9984 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9985 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9986 "Cavium ThunderX"},
9987 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9988 AARCH64_FEATURE_CRYPTO),
9989 "Broadcom Vulcan"},
9990 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9991 in earlier releases and is superseded by 'xgene1' in all
9992 tools. */
9993 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9994 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9995 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9996 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9997 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9998 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9999 AARCH64_FEATURE_F16
10000 | AARCH64_FEATURE_RCPC
10001 | AARCH64_FEATURE_DOTPROD
10002 | AARCH64_FEATURE_SSBS
10003 | AARCH64_FEATURE_PROFILE),
10004 "Cortex-X1"},
10005 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
10006 AARCH64_FEATURE_BFLOAT16
10007 | AARCH64_FEATURE_I8MM
10008 | AARCH64_FEATURE_MEMTAG
10009 | AARCH64_FEATURE_SVE2_BITPERM),
10010 "Cortex-X2"},
10011 {"generic", AARCH64_ARCH_V8, NULL},
10012
10013 {NULL, AARCH64_ARCH_NONE, NULL}
10014 };
10015
10016 struct aarch64_arch_option_table
10017 {
10018 const char *name;
10019 const aarch64_feature_set value;
10020 };
10021
10022 /* This list should, at a minimum, contain all the architecture names
10023 recognized by GCC. */
10024 static const struct aarch64_arch_option_table aarch64_archs[] = {
10025 {"all", AARCH64_ANY},
10026 {"armv8-a", AARCH64_ARCH_V8},
10027 {"armv8.1-a", AARCH64_ARCH_V8_1},
10028 {"armv8.2-a", AARCH64_ARCH_V8_2},
10029 {"armv8.3-a", AARCH64_ARCH_V8_3},
10030 {"armv8.4-a", AARCH64_ARCH_V8_4},
10031 {"armv8.5-a", AARCH64_ARCH_V8_5},
10032 {"armv8.6-a", AARCH64_ARCH_V8_6},
10033 {"armv8.7-a", AARCH64_ARCH_V8_7},
10034 {"armv8.8-a", AARCH64_ARCH_V8_8},
10035 {"armv8-r", AARCH64_ARCH_V8_R},
10036 {"armv9-a", AARCH64_ARCH_V9},
10037 {"armv9.1-a", AARCH64_ARCH_V9_1},
10038 {"armv9.2-a", AARCH64_ARCH_V9_2},
10039 {"armv9.3-a", AARCH64_ARCH_V9_3},
10040 {NULL, AARCH64_ARCH_NONE}
10041 };
10042
10043 /* ISA extensions. */
10044 struct aarch64_option_cpu_value_table
10045 {
10046 const char *name;
10047 const aarch64_feature_set value;
10048 const aarch64_feature_set require; /* Feature dependencies. */
10049 };
10050
10051 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
10052 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
10053 AARCH64_ARCH_NONE},
10054 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
10055 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10056 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
10057 AARCH64_ARCH_NONE},
10058 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
10059 AARCH64_ARCH_NONE},
10060 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
10061 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10062 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
10063 AARCH64_ARCH_NONE},
10064 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
10065 AARCH64_ARCH_NONE},
10066 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
10067 AARCH64_ARCH_NONE},
10068 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
10069 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10070 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
10071 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10072 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
10073 AARCH64_FEATURE (AARCH64_FEATURE_F16, 0)},
10074 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
10075 AARCH64_ARCH_NONE},
10076 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
10077 AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0)},
10078 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
10079 AARCH64_ARCH_NONE},
10080 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
10081 AARCH64_FEATURE (AARCH64_FEATURE_F16
10082 | AARCH64_FEATURE_SIMD, 0)},
10083 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
10084 AARCH64_ARCH_NONE},
10085 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
10086 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10087 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
10088 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10089 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
10090 AARCH64_ARCH_NONE},
10091 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
10092 AARCH64_ARCH_NONE},
10093 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
10094 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10095 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
10096 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10097 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
10098 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
10099 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
10100 AARCH64_ARCH_NONE},
10101 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
10102 AARCH64_ARCH_NONE},
10103 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
10104 AARCH64_ARCH_NONE},
10105 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
10106 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10107 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
10108 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10109 | AARCH64_FEATURE_SM4, 0)},
10110 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
10111 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10112 | AARCH64_FEATURE_AES, 0)},
10113 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
10114 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10115 | AARCH64_FEATURE_SHA3, 0)},
10116 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
10117 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
10118 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
10119 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10120 | AARCH64_FEATURE_BFLOAT16, 0)},
10121 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10122 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10123 {"sme-f64f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10124 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10125 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10126 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10127 {"sme-i16i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10128 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10129 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
10130 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10131 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
10132 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10133 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
10134 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10135 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
10136 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10137 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
10138 AARCH64_ARCH_NONE},
10139 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
10140 AARCH64_ARCH_NONE},
10141 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
10142 AARCH64_ARCH_NONE},
10143 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
10144 AARCH64_ARCH_NONE},
10145 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
10146 AARCH64_ARCH_NONE},
10147 {"cssc", AARCH64_FEATURE (AARCH64_FEATURE_CSSC, 0),
10148 AARCH64_ARCH_NONE},
10149 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
10150 };
10151
10152 struct aarch64_long_option_table
10153 {
10154 const char *option; /* Substring to match. */
10155 const char *help; /* Help information. */
10156 int (*func) (const char *subopt); /* Function to decode sub-option. */
10157 char *deprecated; /* If non-null, print this message. */
10158 };
10159
10160 /* Transitive closure of features depending on set. */
10161 static aarch64_feature_set
10162 aarch64_feature_disable_set (aarch64_feature_set set)
10163 {
10164 const struct aarch64_option_cpu_value_table *opt;
10165 aarch64_feature_set prev = 0;
10166
10167 while (prev != set) {
10168 prev = set;
10169 for (opt = aarch64_features; opt->name != NULL; opt++)
10170 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10171 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10172 }
10173 return set;
10174 }
10175
10176 /* Transitive closure of dependencies of set. */
10177 static aarch64_feature_set
10178 aarch64_feature_enable_set (aarch64_feature_set set)
10179 {
10180 const struct aarch64_option_cpu_value_table *opt;
10181 aarch64_feature_set prev = 0;
10182
10183 while (prev != set) {
10184 prev = set;
10185 for (opt = aarch64_features; opt->name != NULL; opt++)
10186 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10187 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10188 }
10189 return set;
10190 }
10191
10192 static int
10193 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10194 bool ext_only)
10195 {
10196 /* We insist on extensions being added before being removed. We achieve
10197 this by using the ADDING_VALUE variable to indicate whether we are
10198 adding an extension (1) or removing it (0) and only allowing it to
10199 change in the order -1 -> 1 -> 0. */
10200 int adding_value = -1;
10201 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10202
10203 /* Copy the feature set, so that we can modify it. */
10204 *ext_set = **opt_p;
10205 *opt_p = ext_set;
10206
10207 while (str != NULL && *str != 0)
10208 {
10209 const struct aarch64_option_cpu_value_table *opt;
10210 const char *ext = NULL;
10211 int optlen;
10212
10213 if (!ext_only)
10214 {
10215 if (*str != '+')
10216 {
10217 as_bad (_("invalid architectural extension"));
10218 return 0;
10219 }
10220
10221 ext = strchr (++str, '+');
10222 }
10223
10224 if (ext != NULL)
10225 optlen = ext - str;
10226 else
10227 optlen = strlen (str);
10228
10229 if (optlen >= 2 && startswith (str, "no"))
10230 {
10231 if (adding_value != 0)
10232 adding_value = 0;
10233 optlen -= 2;
10234 str += 2;
10235 }
10236 else if (optlen > 0)
10237 {
10238 if (adding_value == -1)
10239 adding_value = 1;
10240 else if (adding_value != 1)
10241 {
10242 as_bad (_("must specify extensions to add before specifying "
10243 "those to remove"));
10244 return false;
10245 }
10246 }
10247
10248 if (optlen == 0)
10249 {
10250 as_bad (_("missing architectural extension"));
10251 return 0;
10252 }
10253
10254 gas_assert (adding_value != -1);
10255
10256 for (opt = aarch64_features; opt->name != NULL; opt++)
10257 if (strncmp (opt->name, str, optlen) == 0)
10258 {
10259 aarch64_feature_set set;
10260
10261 /* Add or remove the extension. */
10262 if (adding_value)
10263 {
10264 set = aarch64_feature_enable_set (opt->value);
10265 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10266 }
10267 else
10268 {
10269 set = aarch64_feature_disable_set (opt->value);
10270 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10271 }
10272 break;
10273 }
10274
10275 if (opt->name == NULL)
10276 {
10277 as_bad (_("unknown architectural extension `%s'"), str);
10278 return 0;
10279 }
10280
10281 str = ext;
10282 };
10283
10284 return 1;
10285 }
10286
10287 static int
10288 aarch64_parse_cpu (const char *str)
10289 {
10290 const struct aarch64_cpu_option_table *opt;
10291 const char *ext = strchr (str, '+');
10292 size_t optlen;
10293
10294 if (ext != NULL)
10295 optlen = ext - str;
10296 else
10297 optlen = strlen (str);
10298
10299 if (optlen == 0)
10300 {
10301 as_bad (_("missing cpu name `%s'"), str);
10302 return 0;
10303 }
10304
10305 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10306 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10307 {
10308 mcpu_cpu_opt = &opt->value;
10309 if (ext != NULL)
10310 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10311
10312 return 1;
10313 }
10314
10315 as_bad (_("unknown cpu `%s'"), str);
10316 return 0;
10317 }
10318
10319 static int
10320 aarch64_parse_arch (const char *str)
10321 {
10322 const struct aarch64_arch_option_table *opt;
10323 const char *ext = strchr (str, '+');
10324 size_t optlen;
10325
10326 if (ext != NULL)
10327 optlen = ext - str;
10328 else
10329 optlen = strlen (str);
10330
10331 if (optlen == 0)
10332 {
10333 as_bad (_("missing architecture name `%s'"), str);
10334 return 0;
10335 }
10336
10337 for (opt = aarch64_archs; opt->name != NULL; opt++)
10338 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10339 {
10340 march_cpu_opt = &opt->value;
10341 if (ext != NULL)
10342 return aarch64_parse_features (ext, &march_cpu_opt, false);
10343
10344 return 1;
10345 }
10346
10347 as_bad (_("unknown architecture `%s'\n"), str);
10348 return 0;
10349 }
10350
10351 /* ABIs. */
10352 struct aarch64_option_abi_value_table
10353 {
10354 const char *name;
10355 enum aarch64_abi_type value;
10356 };
10357
10358 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10359 #ifdef OBJ_ELF
10360 {"ilp32", AARCH64_ABI_ILP32},
10361 {"lp64", AARCH64_ABI_LP64},
10362 #else
10363 {"llp64", AARCH64_ABI_LLP64},
10364 #endif
10365 };
10366
10367 static int
10368 aarch64_parse_abi (const char *str)
10369 {
10370 unsigned int i;
10371
10372 if (str[0] == '\0')
10373 {
10374 as_bad (_("missing abi name `%s'"), str);
10375 return 0;
10376 }
10377
10378 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10379 if (strcmp (str, aarch64_abis[i].name) == 0)
10380 {
10381 aarch64_abi = aarch64_abis[i].value;
10382 return 1;
10383 }
10384
10385 as_bad (_("unknown abi `%s'\n"), str);
10386 return 0;
10387 }
10388
10389 static struct aarch64_long_option_table aarch64_long_opts[] = {
10390 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10391 aarch64_parse_abi, NULL},
10392 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10393 aarch64_parse_cpu, NULL},
10394 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10395 aarch64_parse_arch, NULL},
10396 {NULL, NULL, 0, NULL}
10397 };
10398
10399 int
10400 md_parse_option (int c, const char *arg)
10401 {
10402 struct aarch64_option_table *opt;
10403 struct aarch64_long_option_table *lopt;
10404
10405 switch (c)
10406 {
10407 #ifdef OPTION_EB
10408 case OPTION_EB:
10409 target_big_endian = 1;
10410 break;
10411 #endif
10412
10413 #ifdef OPTION_EL
10414 case OPTION_EL:
10415 target_big_endian = 0;
10416 break;
10417 #endif
10418
10419 case 'a':
10420 /* Listing option. Just ignore these, we don't support additional
10421 ones. */
10422 return 0;
10423
10424 default:
10425 for (opt = aarch64_opts; opt->option != NULL; opt++)
10426 {
10427 if (c == opt->option[0]
10428 && ((arg == NULL && opt->option[1] == 0)
10429 || streq (arg, opt->option + 1)))
10430 {
10431 /* If the option is deprecated, tell the user. */
10432 if (opt->deprecated != NULL)
10433 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10434 arg ? arg : "", _(opt->deprecated));
10435
10436 if (opt->var != NULL)
10437 *opt->var = opt->value;
10438
10439 return 1;
10440 }
10441 }
10442
10443 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10444 {
10445 /* These options are expected to have an argument. */
10446 if (c == lopt->option[0]
10447 && arg != NULL
10448 && startswith (arg, lopt->option + 1))
10449 {
10450 /* If the option is deprecated, tell the user. */
10451 if (lopt->deprecated != NULL)
10452 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10453 _(lopt->deprecated));
10454
10455 /* Call the sup-option parser. */
10456 return lopt->func (arg + strlen (lopt->option) - 1);
10457 }
10458 }
10459
10460 return 0;
10461 }
10462
10463 return 1;
10464 }
10465
10466 void
10467 md_show_usage (FILE * fp)
10468 {
10469 struct aarch64_option_table *opt;
10470 struct aarch64_long_option_table *lopt;
10471
10472 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10473
10474 for (opt = aarch64_opts; opt->option != NULL; opt++)
10475 if (opt->help != NULL)
10476 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10477
10478 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10479 if (lopt->help != NULL)
10480 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10481
10482 #ifdef OPTION_EB
10483 fprintf (fp, _("\
10484 -EB assemble code for a big-endian cpu\n"));
10485 #endif
10486
10487 #ifdef OPTION_EL
10488 fprintf (fp, _("\
10489 -EL assemble code for a little-endian cpu\n"));
10490 #endif
10491 }
10492
10493 /* Parse a .cpu directive. */
10494
10495 static void
10496 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10497 {
10498 const struct aarch64_cpu_option_table *opt;
10499 char saved_char;
10500 char *name;
10501 char *ext;
10502 size_t optlen;
10503
10504 name = input_line_pointer;
10505 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10506 saved_char = *input_line_pointer;
10507 *input_line_pointer = 0;
10508
10509 ext = strchr (name, '+');
10510
10511 if (ext != NULL)
10512 optlen = ext - name;
10513 else
10514 optlen = strlen (name);
10515
10516 /* Skip the first "all" entry. */
10517 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10518 if (strlen (opt->name) == optlen
10519 && strncmp (name, opt->name, optlen) == 0)
10520 {
10521 mcpu_cpu_opt = &opt->value;
10522 if (ext != NULL)
10523 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10524 return;
10525
10526 cpu_variant = *mcpu_cpu_opt;
10527
10528 *input_line_pointer = saved_char;
10529 demand_empty_rest_of_line ();
10530 return;
10531 }
10532 as_bad (_("unknown cpu `%s'"), name);
10533 *input_line_pointer = saved_char;
10534 ignore_rest_of_line ();
10535 }
10536
10537
10538 /* Parse a .arch directive. */
10539
10540 static void
10541 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10542 {
10543 const struct aarch64_arch_option_table *opt;
10544 char saved_char;
10545 char *name;
10546 char *ext;
10547 size_t optlen;
10548
10549 name = input_line_pointer;
10550 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10551 saved_char = *input_line_pointer;
10552 *input_line_pointer = 0;
10553
10554 ext = strchr (name, '+');
10555
10556 if (ext != NULL)
10557 optlen = ext - name;
10558 else
10559 optlen = strlen (name);
10560
10561 /* Skip the first "all" entry. */
10562 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10563 if (strlen (opt->name) == optlen
10564 && strncmp (name, opt->name, optlen) == 0)
10565 {
10566 mcpu_cpu_opt = &opt->value;
10567 if (ext != NULL)
10568 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10569 return;
10570
10571 cpu_variant = *mcpu_cpu_opt;
10572
10573 *input_line_pointer = saved_char;
10574 demand_empty_rest_of_line ();
10575 return;
10576 }
10577
10578 as_bad (_("unknown architecture `%s'\n"), name);
10579 *input_line_pointer = saved_char;
10580 ignore_rest_of_line ();
10581 }
10582
10583 /* Parse a .arch_extension directive. */
10584
10585 static void
10586 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10587 {
10588 char saved_char;
10589 char *ext = input_line_pointer;
10590
10591 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10592 saved_char = *input_line_pointer;
10593 *input_line_pointer = 0;
10594
10595 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10596 return;
10597
10598 cpu_variant = *mcpu_cpu_opt;
10599
10600 *input_line_pointer = saved_char;
10601 demand_empty_rest_of_line ();
10602 }
10603
10604 /* Copy symbol information. */
10605
10606 void
10607 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10608 {
10609 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10610 }
10611
10612 #ifdef OBJ_ELF
10613 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10614 This is needed so AArch64 specific st_other values can be independently
10615 specified for an IFUNC resolver (that is called by the dynamic linker)
10616 and the symbol it resolves (aliased to the resolver). In particular,
10617 if a function symbol has special st_other value set via directives,
10618 then attaching an IFUNC resolver to that symbol should not override
10619 the st_other setting. Requiring the directive on the IFUNC resolver
10620 symbol would be unexpected and problematic in C code, where the two
10621 symbols appear as two independent function declarations. */
10622
10623 void
10624 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10625 {
10626 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10627 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10628 /* If size is unset, copy size from src. Because we don't track whether
10629 .size has been used, we can't differentiate .size dest, 0 from the case
10630 where dest's size is unset. */
10631 if (!destelf->size && S_GET_SIZE (dest) == 0)
10632 {
10633 if (srcelf->size)
10634 {
10635 destelf->size = XNEW (expressionS);
10636 *destelf->size = *srcelf->size;
10637 }
10638 S_SET_SIZE (dest, S_GET_SIZE (src));
10639 }
10640 }
10641 #endif