aarch64: Add BC instruction
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2021 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* SME horizontal or vertical slice indicator, encoded in "V".
103 Values:
104 0 - Horizontal
105 1 - vertical
106 */
107 enum sme_hv_slice
108 {
109 HV_horizontal = 0,
110 HV_vertical = 1
111 };
112
113 /* Bits for DEFINED field in vector_type_el. */
114 #define NTA_HASTYPE 1
115 #define NTA_HASINDEX 2
116 #define NTA_HASVARWIDTH 4
117
118 struct vector_type_el
119 {
120 enum vector_el_type type;
121 unsigned char defined;
122 unsigned width;
123 int64_t index;
124 };
125
126 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
127
128 struct reloc
129 {
130 bfd_reloc_code_real_type type;
131 expressionS exp;
132 int pc_rel;
133 enum aarch64_opnd opnd;
134 uint32_t flags;
135 unsigned need_libopcodes_p : 1;
136 };
137
138 struct aarch64_instruction
139 {
140 /* libopcodes structure for instruction intermediate representation. */
141 aarch64_inst base;
142 /* Record assembly errors found during the parsing. */
143 struct
144 {
145 enum aarch64_operand_error_kind kind;
146 const char *error;
147 } parsing_error;
148 /* The condition that appears in the assembly line. */
149 int cond;
150 /* Relocation information (including the GAS internal fixup). */
151 struct reloc reloc;
152 /* Need to generate an immediate in the literal pool. */
153 unsigned gen_lit_pool : 1;
154 };
155
156 typedef struct aarch64_instruction aarch64_instruction;
157
158 static aarch64_instruction inst;
159
160 static bool parse_operands (char *, const aarch64_opcode *);
161 static bool programmer_friendly_fixup (aarch64_instruction *);
162
163 /* Diagnostics inline function utilities.
164
165 These are lightweight utilities which should only be called by parse_operands
166 and other parsers. GAS processes each assembly line by parsing it against
167 instruction template(s), in the case of multiple templates (for the same
168 mnemonic name), those templates are tried one by one until one succeeds or
169 all fail. An assembly line may fail a few templates before being
170 successfully parsed; an error saved here in most cases is not a user error
171 but an error indicating the current template is not the right template.
172 Therefore it is very important that errors can be saved at a low cost during
173 the parsing; we don't want to slow down the whole parsing by recording
174 non-user errors in detail.
175
176 Remember that the objective is to help GAS pick up the most appropriate
177 error message in the case of multiple templates, e.g. FMOV which has 8
178 templates. */
179
180 static inline void
181 clear_error (void)
182 {
183 inst.parsing_error.kind = AARCH64_OPDE_NIL;
184 inst.parsing_error.error = NULL;
185 }
186
187 static inline bool
188 error_p (void)
189 {
190 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
191 }
192
193 static inline const char *
194 get_error_message (void)
195 {
196 return inst.parsing_error.error;
197 }
198
199 static inline enum aarch64_operand_error_kind
200 get_error_kind (void)
201 {
202 return inst.parsing_error.kind;
203 }
204
205 static inline void
206 set_error (enum aarch64_operand_error_kind kind, const char *error)
207 {
208 inst.parsing_error.kind = kind;
209 inst.parsing_error.error = error;
210 }
211
212 static inline void
213 set_recoverable_error (const char *error)
214 {
215 set_error (AARCH64_OPDE_RECOVERABLE, error);
216 }
217
218 /* Use the DESC field of the corresponding aarch64_operand entry to compose
219 the error message. */
220 static inline void
221 set_default_error (void)
222 {
223 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
224 }
225
226 static inline void
227 set_syntax_error (const char *error)
228 {
229 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
230 }
231
232 static inline void
233 set_first_syntax_error (const char *error)
234 {
235 if (! error_p ())
236 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
237 }
238
239 static inline void
240 set_fatal_syntax_error (const char *error)
241 {
242 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
243 }
244 \f
245 /* Return value for certain parsers when the parsing fails; those parsers
246 return the information of the parsed result, e.g. register number, on
247 success. */
248 #define PARSE_FAIL -1
249
250 /* This is an invalid condition code that means no conditional field is
251 present. */
252 #define COND_ALWAYS 0x10
253
254 typedef struct
255 {
256 const char *template;
257 uint32_t value;
258 } asm_nzcv;
259
260 struct reloc_entry
261 {
262 char *name;
263 bfd_reloc_code_real_type reloc;
264 };
265
266 /* Macros to define the register types and masks for the purpose
267 of parsing. */
268
269 #undef AARCH64_REG_TYPES
270 #define AARCH64_REG_TYPES \
271 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
272 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
273 BASIC_REG_TYPE(SP_32) /* wsp */ \
274 BASIC_REG_TYPE(SP_64) /* sp */ \
275 BASIC_REG_TYPE(Z_32) /* wzr */ \
276 BASIC_REG_TYPE(Z_64) /* xzr */ \
277 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
278 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
279 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
280 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
281 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
282 BASIC_REG_TYPE(VN) /* v[0-31] */ \
283 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
284 BASIC_REG_TYPE(PN) /* p[0-15] */ \
285 BASIC_REG_TYPE(ZA) /* za[0-15] */ \
286 BASIC_REG_TYPE(ZAH) /* za[0-15]h */ \
287 BASIC_REG_TYPE(ZAV) /* za[0-15]v */ \
288 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
289 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
290 /* Typecheck: same, plus SVE registers. */ \
291 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
292 | REG_TYPE(ZN)) \
293 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
294 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
295 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
296 /* Typecheck: same, plus SVE registers. */ \
297 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
298 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
299 | REG_TYPE(ZN)) \
300 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
301 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
302 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
303 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
304 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
306 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
307 /* Typecheck: any [BHSDQ]P FP. */ \
308 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
309 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
310 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
311 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
312 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
313 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
314 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
315 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
316 be used for SVE instructions, since Zn and Pn are valid symbols \
317 in other contexts. */ \
318 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
319 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
320 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
321 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
322 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
323 | REG_TYPE(ZN) | REG_TYPE(PN)) \
324 /* Any integer register; used for error messages only. */ \
325 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
326 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
327 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
328 /* Pseudo type to mark the end of the enumerator sequence. */ \
329 BASIC_REG_TYPE(MAX)
330
331 #undef BASIC_REG_TYPE
332 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
333 #undef MULTI_REG_TYPE
334 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
335
336 /* Register type enumerators. */
337 typedef enum aarch64_reg_type_
338 {
339 /* A list of REG_TYPE_*. */
340 AARCH64_REG_TYPES
341 } aarch64_reg_type;
342
343 #undef BASIC_REG_TYPE
344 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
345 #undef REG_TYPE
346 #define REG_TYPE(T) (1 << REG_TYPE_##T)
347 #undef MULTI_REG_TYPE
348 #define MULTI_REG_TYPE(T,V) V,
349
350 /* Structure for a hash table entry for a register. */
351 typedef struct
352 {
353 const char *name;
354 unsigned char number;
355 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
356 unsigned char builtin;
357 } reg_entry;
358
359 /* Values indexed by aarch64_reg_type to assist the type checking. */
360 static const unsigned reg_type_masks[] =
361 {
362 AARCH64_REG_TYPES
363 };
364
365 #undef BASIC_REG_TYPE
366 #undef REG_TYPE
367 #undef MULTI_REG_TYPE
368 #undef AARCH64_REG_TYPES
369
370 /* Diagnostics used when we don't get a register of the expected type.
371 Note: this has to synchronized with aarch64_reg_type definitions
372 above. */
373 static const char *
374 get_reg_expected_msg (aarch64_reg_type reg_type)
375 {
376 const char *msg;
377
378 switch (reg_type)
379 {
380 case REG_TYPE_R_32:
381 msg = N_("integer 32-bit register expected");
382 break;
383 case REG_TYPE_R_64:
384 msg = N_("integer 64-bit register expected");
385 break;
386 case REG_TYPE_R_N:
387 msg = N_("integer register expected");
388 break;
389 case REG_TYPE_R64_SP:
390 msg = N_("64-bit integer or SP register expected");
391 break;
392 case REG_TYPE_SVE_BASE:
393 msg = N_("base register expected");
394 break;
395 case REG_TYPE_R_Z:
396 msg = N_("integer or zero register expected");
397 break;
398 case REG_TYPE_SVE_OFFSET:
399 msg = N_("offset register expected");
400 break;
401 case REG_TYPE_R_SP:
402 msg = N_("integer or SP register expected");
403 break;
404 case REG_TYPE_R_Z_SP:
405 msg = N_("integer, zero or SP register expected");
406 break;
407 case REG_TYPE_FP_B:
408 msg = N_("8-bit SIMD scalar register expected");
409 break;
410 case REG_TYPE_FP_H:
411 msg = N_("16-bit SIMD scalar or floating-point half precision "
412 "register expected");
413 break;
414 case REG_TYPE_FP_S:
415 msg = N_("32-bit SIMD scalar or floating-point single precision "
416 "register expected");
417 break;
418 case REG_TYPE_FP_D:
419 msg = N_("64-bit SIMD scalar or floating-point double precision "
420 "register expected");
421 break;
422 case REG_TYPE_FP_Q:
423 msg = N_("128-bit SIMD scalar or floating-point quad precision "
424 "register expected");
425 break;
426 case REG_TYPE_R_Z_BHSDQ_V:
427 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
428 msg = N_("register expected");
429 break;
430 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
431 msg = N_("SIMD scalar or floating-point register expected");
432 break;
433 case REG_TYPE_VN: /* any V reg */
434 msg = N_("vector register expected");
435 break;
436 case REG_TYPE_ZN:
437 msg = N_("SVE vector register expected");
438 break;
439 case REG_TYPE_PN:
440 msg = N_("SVE predicate register expected");
441 break;
442 default:
443 as_fatal (_("invalid register type %d"), reg_type);
444 }
445 return msg;
446 }
447
448 /* Some well known registers that we refer to directly elsewhere. */
449 #define REG_SP 31
450 #define REG_ZR 31
451
452 /* Instructions take 4 bytes in the object file. */
453 #define INSN_SIZE 4
454
455 static htab_t aarch64_ops_hsh;
456 static htab_t aarch64_cond_hsh;
457 static htab_t aarch64_shift_hsh;
458 static htab_t aarch64_sys_regs_hsh;
459 static htab_t aarch64_pstatefield_hsh;
460 static htab_t aarch64_sys_regs_ic_hsh;
461 static htab_t aarch64_sys_regs_dc_hsh;
462 static htab_t aarch64_sys_regs_at_hsh;
463 static htab_t aarch64_sys_regs_tlbi_hsh;
464 static htab_t aarch64_sys_regs_sr_hsh;
465 static htab_t aarch64_reg_hsh;
466 static htab_t aarch64_barrier_opt_hsh;
467 static htab_t aarch64_nzcv_hsh;
468 static htab_t aarch64_pldop_hsh;
469 static htab_t aarch64_hint_opt_hsh;
470
471 /* Stuff needed to resolve the label ambiguity
472 As:
473 ...
474 label: <insn>
475 may differ from:
476 ...
477 label:
478 <insn> */
479
480 static symbolS *last_label_seen;
481
482 /* Literal pool structure. Held on a per-section
483 and per-sub-section basis. */
484
485 #define MAX_LITERAL_POOL_SIZE 1024
486 typedef struct literal_expression
487 {
488 expressionS exp;
489 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
490 LITTLENUM_TYPE * bignum;
491 } literal_expression;
492
493 typedef struct literal_pool
494 {
495 literal_expression literals[MAX_LITERAL_POOL_SIZE];
496 unsigned int next_free_entry;
497 unsigned int id;
498 symbolS *symbol;
499 segT section;
500 subsegT sub_section;
501 int size;
502 struct literal_pool *next;
503 } literal_pool;
504
505 /* Pointer to a linked list of literal pools. */
506 static literal_pool *list_of_pools = NULL;
507 \f
508 /* Pure syntax. */
509
510 /* This array holds the chars that always start a comment. If the
511 pre-processor is disabled, these aren't very useful. */
512 const char comment_chars[] = "";
513
514 /* This array holds the chars that only start a comment at the beginning of
515 a line. If the line seems to have the form '# 123 filename'
516 .line and .file directives will appear in the pre-processed output. */
517 /* Note that input_file.c hand checks for '#' at the beginning of the
518 first line of the input file. This is because the compiler outputs
519 #NO_APP at the beginning of its output. */
520 /* Also note that comments like this one will always work. */
521 const char line_comment_chars[] = "#";
522
523 const char line_separator_chars[] = ";";
524
525 /* Chars that can be used to separate mant
526 from exp in floating point numbers. */
527 const char EXP_CHARS[] = "eE";
528
529 /* Chars that mean this number is a floating point constant. */
530 /* As in 0f12.456 */
531 /* or 0d1.2345e12 */
532
533 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
534
535 /* Prefix character that indicates the start of an immediate value. */
536 #define is_immediate_prefix(C) ((C) == '#')
537
538 /* Separator character handling. */
539
540 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
541
542 static inline bool
543 skip_past_char (char **str, char c)
544 {
545 if (**str == c)
546 {
547 (*str)++;
548 return true;
549 }
550 else
551 return false;
552 }
553
554 #define skip_past_comma(str) skip_past_char (str, ',')
555
556 /* Arithmetic expressions (possibly involving symbols). */
557
558 static bool in_aarch64_get_expression = false;
559
560 /* Third argument to aarch64_get_expression. */
561 #define GE_NO_PREFIX false
562 #define GE_OPT_PREFIX true
563
564 /* Fourth argument to aarch64_get_expression. */
565 #define ALLOW_ABSENT false
566 #define REJECT_ABSENT true
567
568 /* Fifth argument to aarch64_get_expression. */
569 #define NORMAL_RESOLUTION false
570
571 /* Return TRUE if the string pointed by *STR is successfully parsed
572 as an valid expression; *EP will be filled with the information of
573 such an expression. Otherwise return FALSE.
574
575 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
576 If REJECT_ABSENT is true then trat missing expressions as an error.
577 If DEFER_RESOLUTION is true, then do not resolve expressions against
578 constant symbols. Necessary if the expression is part of a fixup
579 that uses a reloc that must be emitted. */
580
581 static bool
582 aarch64_get_expression (expressionS * ep,
583 char ** str,
584 bool allow_immediate_prefix,
585 bool reject_absent,
586 bool defer_resolution)
587 {
588 char *save_in;
589 segT seg;
590 bool prefix_present = false;
591
592 if (allow_immediate_prefix)
593 {
594 if (is_immediate_prefix (**str))
595 {
596 (*str)++;
597 prefix_present = true;
598 }
599 }
600
601 memset (ep, 0, sizeof (expressionS));
602
603 save_in = input_line_pointer;
604 input_line_pointer = *str;
605 in_aarch64_get_expression = true;
606 if (defer_resolution)
607 seg = deferred_expression (ep);
608 else
609 seg = expression (ep);
610 in_aarch64_get_expression = false;
611
612 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
613 {
614 /* We found a bad expression in md_operand(). */
615 *str = input_line_pointer;
616 input_line_pointer = save_in;
617 if (prefix_present && ! error_p ())
618 set_fatal_syntax_error (_("bad expression"));
619 else
620 set_first_syntax_error (_("bad expression"));
621 return false;
622 }
623
624 #ifdef OBJ_AOUT
625 if (seg != absolute_section
626 && seg != text_section
627 && seg != data_section
628 && seg != bss_section
629 && seg != undefined_section)
630 {
631 set_syntax_error (_("bad segment"));
632 *str = input_line_pointer;
633 input_line_pointer = save_in;
634 return false;
635 }
636 #else
637 (void) seg;
638 #endif
639
640 *str = input_line_pointer;
641 input_line_pointer = save_in;
642 return true;
643 }
644
645 /* Turn a string in input_line_pointer into a floating point constant
646 of type TYPE, and store the appropriate bytes in *LITP. The number
647 of LITTLENUMS emitted is stored in *SIZEP. An error message is
648 returned, or NULL on OK. */
649
650 const char *
651 md_atof (int type, char *litP, int *sizeP)
652 {
653 return ieee_md_atof (type, litP, sizeP, target_big_endian);
654 }
655
656 /* We handle all bad expressions here, so that we can report the faulty
657 instruction in the error message. */
658 void
659 md_operand (expressionS * exp)
660 {
661 if (in_aarch64_get_expression)
662 exp->X_op = O_illegal;
663 }
664
665 /* Immediate values. */
666
667 /* Errors may be set multiple times during parsing or bit encoding
668 (particularly in the Neon bits), but usually the earliest error which is set
669 will be the most meaningful. Avoid overwriting it with later (cascading)
670 errors by calling this function. */
671
672 static void
673 first_error (const char *error)
674 {
675 if (! error_p ())
676 set_syntax_error (error);
677 }
678
679 /* Similar to first_error, but this function accepts formatted error
680 message. */
681 static void
682 first_error_fmt (const char *format, ...)
683 {
684 va_list args;
685 enum
686 { size = 100 };
687 /* N.B. this single buffer will not cause error messages for different
688 instructions to pollute each other; this is because at the end of
689 processing of each assembly line, error message if any will be
690 collected by as_bad. */
691 static char buffer[size];
692
693 if (! error_p ())
694 {
695 int ret ATTRIBUTE_UNUSED;
696 va_start (args, format);
697 ret = vsnprintf (buffer, size, format, args);
698 know (ret <= size - 1 && ret >= 0);
699 va_end (args);
700 set_syntax_error (buffer);
701 }
702 }
703
704 /* Register parsing. */
705
706 /* Generic register parser which is called by other specialized
707 register parsers.
708 CCP points to what should be the beginning of a register name.
709 If it is indeed a valid register name, advance CCP over it and
710 return the reg_entry structure; otherwise return NULL.
711 It does not issue diagnostics. */
712
713 static reg_entry *
714 parse_reg (char **ccp)
715 {
716 char *start = *ccp;
717 char *p;
718 reg_entry *reg;
719
720 #ifdef REGISTER_PREFIX
721 if (*start != REGISTER_PREFIX)
722 return NULL;
723 start++;
724 #endif
725
726 p = start;
727 if (!ISALPHA (*p) || !is_name_beginner (*p))
728 return NULL;
729
730 do
731 p++;
732 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
733
734 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
735
736 if (!reg)
737 return NULL;
738
739 *ccp = p;
740 return reg;
741 }
742
743 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
744 return FALSE. */
745 static bool
746 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
747 {
748 return (reg_type_masks[type] & (1 << reg->type)) != 0;
749 }
750
751 /* Try to parse a base or offset register. Allow SVE base and offset
752 registers if REG_TYPE includes SVE registers. Return the register
753 entry on success, setting *QUALIFIER to the register qualifier.
754 Return null otherwise.
755
756 Note that this function does not issue any diagnostics. */
757
758 static const reg_entry *
759 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
760 aarch64_opnd_qualifier_t *qualifier)
761 {
762 char *str = *ccp;
763 const reg_entry *reg = parse_reg (&str);
764
765 if (reg == NULL)
766 return NULL;
767
768 switch (reg->type)
769 {
770 case REG_TYPE_R_32:
771 case REG_TYPE_SP_32:
772 case REG_TYPE_Z_32:
773 *qualifier = AARCH64_OPND_QLF_W;
774 break;
775
776 case REG_TYPE_R_64:
777 case REG_TYPE_SP_64:
778 case REG_TYPE_Z_64:
779 *qualifier = AARCH64_OPND_QLF_X;
780 break;
781
782 case REG_TYPE_ZN:
783 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
784 || str[0] != '.')
785 return NULL;
786 switch (TOLOWER (str[1]))
787 {
788 case 's':
789 *qualifier = AARCH64_OPND_QLF_S_S;
790 break;
791 case 'd':
792 *qualifier = AARCH64_OPND_QLF_S_D;
793 break;
794 default:
795 return NULL;
796 }
797 str += 2;
798 break;
799
800 default:
801 return NULL;
802 }
803
804 *ccp = str;
805
806 return reg;
807 }
808
809 /* Try to parse a base or offset register. Return the register entry
810 on success, setting *QUALIFIER to the register qualifier. Return null
811 otherwise.
812
813 Note that this function does not issue any diagnostics. */
814
815 static const reg_entry *
816 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
817 {
818 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
819 }
820
821 /* Parse the qualifier of a vector register or vector element of type
822 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
823 succeeds; otherwise return FALSE.
824
825 Accept only one occurrence of:
826 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
827 b h s d q */
828 static bool
829 parse_vector_type_for_operand (aarch64_reg_type reg_type,
830 struct vector_type_el *parsed_type, char **str)
831 {
832 char *ptr = *str;
833 unsigned width;
834 unsigned element_size;
835 enum vector_el_type type;
836
837 /* skip '.' */
838 gas_assert (*ptr == '.');
839 ptr++;
840
841 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
842 {
843 width = 0;
844 goto elt_size;
845 }
846 width = strtoul (ptr, &ptr, 10);
847 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
848 {
849 first_error_fmt (_("bad size %d in vector width specifier"), width);
850 return false;
851 }
852
853 elt_size:
854 switch (TOLOWER (*ptr))
855 {
856 case 'b':
857 type = NT_b;
858 element_size = 8;
859 break;
860 case 'h':
861 type = NT_h;
862 element_size = 16;
863 break;
864 case 's':
865 type = NT_s;
866 element_size = 32;
867 break;
868 case 'd':
869 type = NT_d;
870 element_size = 64;
871 break;
872 case 'q':
873 if (reg_type == REG_TYPE_ZN || width == 1)
874 {
875 type = NT_q;
876 element_size = 128;
877 break;
878 }
879 /* fall through. */
880 default:
881 if (*ptr != '\0')
882 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
883 else
884 first_error (_("missing element size"));
885 return false;
886 }
887 if (width != 0 && width * element_size != 64
888 && width * element_size != 128
889 && !(width == 2 && element_size == 16)
890 && !(width == 4 && element_size == 8))
891 {
892 first_error_fmt (_
893 ("invalid element size %d and vector size combination %c"),
894 width, *ptr);
895 return false;
896 }
897 ptr++;
898
899 parsed_type->type = type;
900 parsed_type->width = width;
901
902 *str = ptr;
903
904 return true;
905 }
906
907 /* *STR contains an SVE zero/merge predication suffix. Parse it into
908 *PARSED_TYPE and point *STR at the end of the suffix. */
909
910 static bool
911 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
912 {
913 char *ptr = *str;
914
915 /* Skip '/'. */
916 gas_assert (*ptr == '/');
917 ptr++;
918 switch (TOLOWER (*ptr))
919 {
920 case 'z':
921 parsed_type->type = NT_zero;
922 break;
923 case 'm':
924 parsed_type->type = NT_merge;
925 break;
926 default:
927 if (*ptr != '\0' && *ptr != ',')
928 first_error_fmt (_("unexpected character `%c' in predication type"),
929 *ptr);
930 else
931 first_error (_("missing predication type"));
932 return false;
933 }
934 parsed_type->width = 0;
935 *str = ptr + 1;
936 return true;
937 }
938
939 /* Parse a register of the type TYPE.
940
941 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
942 name or the parsed register is not of TYPE.
943
944 Otherwise return the register number, and optionally fill in the actual
945 type of the register in *RTYPE when multiple alternatives were given, and
946 return the register shape and element index information in *TYPEINFO.
947
948 IN_REG_LIST should be set with TRUE if the caller is parsing a register
949 list. */
950
951 static int
952 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
953 struct vector_type_el *typeinfo, bool in_reg_list)
954 {
955 char *str = *ccp;
956 const reg_entry *reg = parse_reg (&str);
957 struct vector_type_el atype;
958 struct vector_type_el parsetype;
959 bool is_typed_vecreg = false;
960
961 atype.defined = 0;
962 atype.type = NT_invtype;
963 atype.width = -1;
964 atype.index = 0;
965
966 if (reg == NULL)
967 {
968 if (typeinfo)
969 *typeinfo = atype;
970 set_default_error ();
971 return PARSE_FAIL;
972 }
973
974 if (! aarch64_check_reg_type (reg, type))
975 {
976 DEBUG_TRACE ("reg type check failed");
977 set_default_error ();
978 return PARSE_FAIL;
979 }
980 type = reg->type;
981
982 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
983 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
984 {
985 if (*str == '.')
986 {
987 if (!parse_vector_type_for_operand (type, &parsetype, &str))
988 return PARSE_FAIL;
989 }
990 else
991 {
992 if (!parse_predication_for_operand (&parsetype, &str))
993 return PARSE_FAIL;
994 }
995
996 /* Register if of the form Vn.[bhsdq]. */
997 is_typed_vecreg = true;
998
999 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1000 {
1001 /* The width is always variable; we don't allow an integer width
1002 to be specified. */
1003 gas_assert (parsetype.width == 0);
1004 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1005 }
1006 else if (parsetype.width == 0)
1007 /* Expect index. In the new scheme we cannot have
1008 Vn.[bhsdq] represent a scalar. Therefore any
1009 Vn.[bhsdq] should have an index following it.
1010 Except in reglists of course. */
1011 atype.defined |= NTA_HASINDEX;
1012 else
1013 atype.defined |= NTA_HASTYPE;
1014
1015 atype.type = parsetype.type;
1016 atype.width = parsetype.width;
1017 }
1018
1019 if (skip_past_char (&str, '['))
1020 {
1021 expressionS exp;
1022
1023 /* Reject Sn[index] syntax. */
1024 if (!is_typed_vecreg)
1025 {
1026 first_error (_("this type of register can't be indexed"));
1027 return PARSE_FAIL;
1028 }
1029
1030 if (in_reg_list)
1031 {
1032 first_error (_("index not allowed inside register list"));
1033 return PARSE_FAIL;
1034 }
1035
1036 atype.defined |= NTA_HASINDEX;
1037
1038 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1039 NORMAL_RESOLUTION);
1040
1041 if (exp.X_op != O_constant)
1042 {
1043 first_error (_("constant expression required"));
1044 return PARSE_FAIL;
1045 }
1046
1047 if (! skip_past_char (&str, ']'))
1048 return PARSE_FAIL;
1049
1050 atype.index = exp.X_add_number;
1051 }
1052 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1053 {
1054 /* Indexed vector register expected. */
1055 first_error (_("indexed vector register expected"));
1056 return PARSE_FAIL;
1057 }
1058
1059 /* A vector reg Vn should be typed or indexed. */
1060 if (type == REG_TYPE_VN && atype.defined == 0)
1061 {
1062 first_error (_("invalid use of vector register"));
1063 }
1064
1065 if (typeinfo)
1066 *typeinfo = atype;
1067
1068 if (rtype)
1069 *rtype = type;
1070
1071 *ccp = str;
1072
1073 return reg->number;
1074 }
1075
1076 /* Parse register.
1077
1078 Return the register number on success; return PARSE_FAIL otherwise.
1079
1080 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1081 the register (e.g. NEON double or quad reg when either has been requested).
1082
1083 If this is a NEON vector register with additional type information, fill
1084 in the struct pointed to by VECTYPE (if non-NULL).
1085
1086 This parser does not handle register list. */
1087
1088 static int
1089 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1090 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1091 {
1092 struct vector_type_el atype;
1093 char *str = *ccp;
1094 int reg = parse_typed_reg (&str, type, rtype, &atype,
1095 /*in_reg_list= */ false);
1096
1097 if (reg == PARSE_FAIL)
1098 return PARSE_FAIL;
1099
1100 if (vectype)
1101 *vectype = atype;
1102
1103 *ccp = str;
1104
1105 return reg;
1106 }
1107
1108 static inline bool
1109 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1110 {
1111 return
1112 e1.type == e2.type
1113 && e1.defined == e2.defined
1114 && e1.width == e2.width && e1.index == e2.index;
1115 }
1116
1117 /* This function parses a list of vector registers of type TYPE.
1118 On success, it returns the parsed register list information in the
1119 following encoded format:
1120
1121 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1122 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1123
1124 The information of the register shape and/or index is returned in
1125 *VECTYPE.
1126
1127 It returns PARSE_FAIL if the register list is invalid.
1128
1129 The list contains one to four registers.
1130 Each register can be one of:
1131 <Vt>.<T>[<index>]
1132 <Vt>.<T>
1133 All <T> should be identical.
1134 All <index> should be identical.
1135 There are restrictions on <Vt> numbers which are checked later
1136 (by reg_list_valid_p). */
1137
1138 static int
1139 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1140 struct vector_type_el *vectype)
1141 {
1142 char *str = *ccp;
1143 int nb_regs;
1144 struct vector_type_el typeinfo, typeinfo_first;
1145 int val, val_range;
1146 int in_range;
1147 int ret_val;
1148 int i;
1149 bool error = false;
1150 bool expect_index = false;
1151
1152 if (*str != '{')
1153 {
1154 set_syntax_error (_("expecting {"));
1155 return PARSE_FAIL;
1156 }
1157 str++;
1158
1159 nb_regs = 0;
1160 typeinfo_first.defined = 0;
1161 typeinfo_first.type = NT_invtype;
1162 typeinfo_first.width = -1;
1163 typeinfo_first.index = 0;
1164 ret_val = 0;
1165 val = -1;
1166 val_range = -1;
1167 in_range = 0;
1168 do
1169 {
1170 if (in_range)
1171 {
1172 str++; /* skip over '-' */
1173 val_range = val;
1174 }
1175 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1176 /*in_reg_list= */ true);
1177 if (val == PARSE_FAIL)
1178 {
1179 set_first_syntax_error (_("invalid vector register in list"));
1180 error = true;
1181 continue;
1182 }
1183 /* reject [bhsd]n */
1184 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1185 {
1186 set_first_syntax_error (_("invalid scalar register in list"));
1187 error = true;
1188 continue;
1189 }
1190
1191 if (typeinfo.defined & NTA_HASINDEX)
1192 expect_index = true;
1193
1194 if (in_range)
1195 {
1196 if (val < val_range)
1197 {
1198 set_first_syntax_error
1199 (_("invalid range in vector register list"));
1200 error = true;
1201 }
1202 val_range++;
1203 }
1204 else
1205 {
1206 val_range = val;
1207 if (nb_regs == 0)
1208 typeinfo_first = typeinfo;
1209 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1210 {
1211 set_first_syntax_error
1212 (_("type mismatch in vector register list"));
1213 error = true;
1214 }
1215 }
1216 if (! error)
1217 for (i = val_range; i <= val; i++)
1218 {
1219 ret_val |= i << (5 * nb_regs);
1220 nb_regs++;
1221 }
1222 in_range = 0;
1223 }
1224 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1225
1226 skip_whitespace (str);
1227 if (*str != '}')
1228 {
1229 set_first_syntax_error (_("end of vector register list not found"));
1230 error = true;
1231 }
1232 str++;
1233
1234 skip_whitespace (str);
1235
1236 if (expect_index)
1237 {
1238 if (skip_past_char (&str, '['))
1239 {
1240 expressionS exp;
1241
1242 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1243 NORMAL_RESOLUTION);
1244 if (exp.X_op != O_constant)
1245 {
1246 set_first_syntax_error (_("constant expression required."));
1247 error = true;
1248 }
1249 if (! skip_past_char (&str, ']'))
1250 error = true;
1251 else
1252 typeinfo_first.index = exp.X_add_number;
1253 }
1254 else
1255 {
1256 set_first_syntax_error (_("expected index"));
1257 error = true;
1258 }
1259 }
1260
1261 if (nb_regs > 4)
1262 {
1263 set_first_syntax_error (_("too many registers in vector register list"));
1264 error = true;
1265 }
1266 else if (nb_regs == 0)
1267 {
1268 set_first_syntax_error (_("empty vector register list"));
1269 error = true;
1270 }
1271
1272 *ccp = str;
1273 if (! error)
1274 *vectype = typeinfo_first;
1275
1276 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1277 }
1278
1279 /* Directives: register aliases. */
1280
1281 static reg_entry *
1282 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1283 {
1284 reg_entry *new;
1285 const char *name;
1286
1287 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1288 {
1289 if (new->builtin)
1290 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1291 str);
1292
1293 /* Only warn about a redefinition if it's not defined as the
1294 same register. */
1295 else if (new->number != number || new->type != type)
1296 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1297
1298 return NULL;
1299 }
1300
1301 name = xstrdup (str);
1302 new = XNEW (reg_entry);
1303
1304 new->name = name;
1305 new->number = number;
1306 new->type = type;
1307 new->builtin = false;
1308
1309 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1310
1311 return new;
1312 }
1313
1314 /* Look for the .req directive. This is of the form:
1315
1316 new_register_name .req existing_register_name
1317
1318 If we find one, or if it looks sufficiently like one that we want to
1319 handle any error here, return TRUE. Otherwise return FALSE. */
1320
1321 static bool
1322 create_register_alias (char *newname, char *p)
1323 {
1324 const reg_entry *old;
1325 char *oldname, *nbuf;
1326 size_t nlen;
1327
1328 /* The input scrubber ensures that whitespace after the mnemonic is
1329 collapsed to single spaces. */
1330 oldname = p;
1331 if (!startswith (oldname, " .req "))
1332 return false;
1333
1334 oldname += 6;
1335 if (*oldname == '\0')
1336 return false;
1337
1338 old = str_hash_find (aarch64_reg_hsh, oldname);
1339 if (!old)
1340 {
1341 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1342 return true;
1343 }
1344
1345 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1346 the desired alias name, and p points to its end. If not, then
1347 the desired alias name is in the global original_case_string. */
1348 #ifdef TC_CASE_SENSITIVE
1349 nlen = p - newname;
1350 #else
1351 newname = original_case_string;
1352 nlen = strlen (newname);
1353 #endif
1354
1355 nbuf = xmemdup0 (newname, nlen);
1356
1357 /* Create aliases under the new name as stated; an all-lowercase
1358 version of the new name; and an all-uppercase version of the new
1359 name. */
1360 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1361 {
1362 for (p = nbuf; *p; p++)
1363 *p = TOUPPER (*p);
1364
1365 if (strncmp (nbuf, newname, nlen))
1366 {
1367 /* If this attempt to create an additional alias fails, do not bother
1368 trying to create the all-lower case alias. We will fail and issue
1369 a second, duplicate error message. This situation arises when the
1370 programmer does something like:
1371 foo .req r0
1372 Foo .req r1
1373 The second .req creates the "Foo" alias but then fails to create
1374 the artificial FOO alias because it has already been created by the
1375 first .req. */
1376 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1377 {
1378 free (nbuf);
1379 return true;
1380 }
1381 }
1382
1383 for (p = nbuf; *p; p++)
1384 *p = TOLOWER (*p);
1385
1386 if (strncmp (nbuf, newname, nlen))
1387 insert_reg_alias (nbuf, old->number, old->type);
1388 }
1389
1390 free (nbuf);
1391 return true;
1392 }
1393
1394 /* Should never be called, as .req goes between the alias and the
1395 register name, not at the beginning of the line. */
1396 static void
1397 s_req (int a ATTRIBUTE_UNUSED)
1398 {
1399 as_bad (_("invalid syntax for .req directive"));
1400 }
1401
1402 /* The .unreq directive deletes an alias which was previously defined
1403 by .req. For example:
1404
1405 my_alias .req r11
1406 .unreq my_alias */
1407
1408 static void
1409 s_unreq (int a ATTRIBUTE_UNUSED)
1410 {
1411 char *name;
1412 char saved_char;
1413
1414 name = input_line_pointer;
1415
1416 while (*input_line_pointer != 0
1417 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1418 ++input_line_pointer;
1419
1420 saved_char = *input_line_pointer;
1421 *input_line_pointer = 0;
1422
1423 if (!*name)
1424 as_bad (_("invalid syntax for .unreq directive"));
1425 else
1426 {
1427 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1428
1429 if (!reg)
1430 as_bad (_("unknown register alias '%s'"), name);
1431 else if (reg->builtin)
1432 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1433 name);
1434 else
1435 {
1436 char *p;
1437 char *nbuf;
1438
1439 str_hash_delete (aarch64_reg_hsh, name);
1440 free ((char *) reg->name);
1441 free (reg);
1442
1443 /* Also locate the all upper case and all lower case versions.
1444 Do not complain if we cannot find one or the other as it
1445 was probably deleted above. */
1446
1447 nbuf = strdup (name);
1448 for (p = nbuf; *p; p++)
1449 *p = TOUPPER (*p);
1450 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1451 if (reg)
1452 {
1453 str_hash_delete (aarch64_reg_hsh, nbuf);
1454 free ((char *) reg->name);
1455 free (reg);
1456 }
1457
1458 for (p = nbuf; *p; p++)
1459 *p = TOLOWER (*p);
1460 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1461 if (reg)
1462 {
1463 str_hash_delete (aarch64_reg_hsh, nbuf);
1464 free ((char *) reg->name);
1465 free (reg);
1466 }
1467
1468 free (nbuf);
1469 }
1470 }
1471
1472 *input_line_pointer = saved_char;
1473 demand_empty_rest_of_line ();
1474 }
1475
1476 /* Directives: Instruction set selection. */
1477
1478 #ifdef OBJ_ELF
1479 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1480 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1481 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1482 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1483
1484 /* Create a new mapping symbol for the transition to STATE. */
1485
1486 static void
1487 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1488 {
1489 symbolS *symbolP;
1490 const char *symname;
1491 int type;
1492
1493 switch (state)
1494 {
1495 case MAP_DATA:
1496 symname = "$d";
1497 type = BSF_NO_FLAGS;
1498 break;
1499 case MAP_INSN:
1500 symname = "$x";
1501 type = BSF_NO_FLAGS;
1502 break;
1503 default:
1504 abort ();
1505 }
1506
1507 symbolP = symbol_new (symname, now_seg, frag, value);
1508 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1509
1510 /* Save the mapping symbols for future reference. Also check that
1511 we do not place two mapping symbols at the same offset within a
1512 frag. We'll handle overlap between frags in
1513 check_mapping_symbols.
1514
1515 If .fill or other data filling directive generates zero sized data,
1516 the mapping symbol for the following code will have the same value
1517 as the one generated for the data filling directive. In this case,
1518 we replace the old symbol with the new one at the same address. */
1519 if (value == 0)
1520 {
1521 if (frag->tc_frag_data.first_map != NULL)
1522 {
1523 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1524 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1525 &symbol_lastP);
1526 }
1527 frag->tc_frag_data.first_map = symbolP;
1528 }
1529 if (frag->tc_frag_data.last_map != NULL)
1530 {
1531 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1532 S_GET_VALUE (symbolP));
1533 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1534 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1535 &symbol_lastP);
1536 }
1537 frag->tc_frag_data.last_map = symbolP;
1538 }
1539
1540 /* We must sometimes convert a region marked as code to data during
1541 code alignment, if an odd number of bytes have to be padded. The
1542 code mapping symbol is pushed to an aligned address. */
1543
1544 static void
1545 insert_data_mapping_symbol (enum mstate state,
1546 valueT value, fragS * frag, offsetT bytes)
1547 {
1548 /* If there was already a mapping symbol, remove it. */
1549 if (frag->tc_frag_data.last_map != NULL
1550 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1551 frag->fr_address + value)
1552 {
1553 symbolS *symp = frag->tc_frag_data.last_map;
1554
1555 if (value == 0)
1556 {
1557 know (frag->tc_frag_data.first_map == symp);
1558 frag->tc_frag_data.first_map = NULL;
1559 }
1560 frag->tc_frag_data.last_map = NULL;
1561 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1562 }
1563
1564 make_mapping_symbol (MAP_DATA, value, frag);
1565 make_mapping_symbol (state, value + bytes, frag);
1566 }
1567
1568 static void mapping_state_2 (enum mstate state, int max_chars);
1569
1570 /* Set the mapping state to STATE. Only call this when about to
1571 emit some STATE bytes to the file. */
1572
1573 void
1574 mapping_state (enum mstate state)
1575 {
1576 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1577
1578 if (state == MAP_INSN)
1579 /* AArch64 instructions require 4-byte alignment. When emitting
1580 instructions into any section, record the appropriate section
1581 alignment. */
1582 record_alignment (now_seg, 2);
1583
1584 if (mapstate == state)
1585 /* The mapping symbol has already been emitted.
1586 There is nothing else to do. */
1587 return;
1588
1589 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1590 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1591 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1592 evaluated later in the next else. */
1593 return;
1594 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1595 {
1596 /* Only add the symbol if the offset is > 0:
1597 if we're at the first frag, check it's size > 0;
1598 if we're not at the first frag, then for sure
1599 the offset is > 0. */
1600 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1601 const int add_symbol = (frag_now != frag_first)
1602 || (frag_now_fix () > 0);
1603
1604 if (add_symbol)
1605 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1606 }
1607 #undef TRANSITION
1608
1609 mapping_state_2 (state, 0);
1610 }
1611
1612 /* Same as mapping_state, but MAX_CHARS bytes have already been
1613 allocated. Put the mapping symbol that far back. */
1614
1615 static void
1616 mapping_state_2 (enum mstate state, int max_chars)
1617 {
1618 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1619
1620 if (!SEG_NORMAL (now_seg))
1621 return;
1622
1623 if (mapstate == state)
1624 /* The mapping symbol has already been emitted.
1625 There is nothing else to do. */
1626 return;
1627
1628 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1629 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1630 }
1631 #else
1632 #define mapping_state(x) /* nothing */
1633 #define mapping_state_2(x, y) /* nothing */
1634 #endif
1635
1636 /* Directives: sectioning and alignment. */
1637
1638 static void
1639 s_bss (int ignore ATTRIBUTE_UNUSED)
1640 {
1641 /* We don't support putting frags in the BSS segment, we fake it by
1642 marking in_bss, then looking at s_skip for clues. */
1643 subseg_set (bss_section, 0);
1644 demand_empty_rest_of_line ();
1645 mapping_state (MAP_DATA);
1646 }
1647
1648 static void
1649 s_even (int ignore ATTRIBUTE_UNUSED)
1650 {
1651 /* Never make frag if expect extra pass. */
1652 if (!need_pass_2)
1653 frag_align (1, 0, 0);
1654
1655 record_alignment (now_seg, 1);
1656
1657 demand_empty_rest_of_line ();
1658 }
1659
1660 /* Directives: Literal pools. */
1661
1662 static literal_pool *
1663 find_literal_pool (int size)
1664 {
1665 literal_pool *pool;
1666
1667 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1668 {
1669 if (pool->section == now_seg
1670 && pool->sub_section == now_subseg && pool->size == size)
1671 break;
1672 }
1673
1674 return pool;
1675 }
1676
1677 static literal_pool *
1678 find_or_make_literal_pool (int size)
1679 {
1680 /* Next literal pool ID number. */
1681 static unsigned int latest_pool_num = 1;
1682 literal_pool *pool;
1683
1684 pool = find_literal_pool (size);
1685
1686 if (pool == NULL)
1687 {
1688 /* Create a new pool. */
1689 pool = XNEW (literal_pool);
1690 if (!pool)
1691 return NULL;
1692
1693 /* Currently we always put the literal pool in the current text
1694 section. If we were generating "small" model code where we
1695 knew that all code and initialised data was within 1MB then
1696 we could output literals to mergeable, read-only data
1697 sections. */
1698
1699 pool->next_free_entry = 0;
1700 pool->section = now_seg;
1701 pool->sub_section = now_subseg;
1702 pool->size = size;
1703 pool->next = list_of_pools;
1704 pool->symbol = NULL;
1705
1706 /* Add it to the list. */
1707 list_of_pools = pool;
1708 }
1709
1710 /* New pools, and emptied pools, will have a NULL symbol. */
1711 if (pool->symbol == NULL)
1712 {
1713 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1714 &zero_address_frag, 0);
1715 pool->id = latest_pool_num++;
1716 }
1717
1718 /* Done. */
1719 return pool;
1720 }
1721
1722 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1723 Return TRUE on success, otherwise return FALSE. */
1724 static bool
1725 add_to_lit_pool (expressionS *exp, int size)
1726 {
1727 literal_pool *pool;
1728 unsigned int entry;
1729
1730 pool = find_or_make_literal_pool (size);
1731
1732 /* Check if this literal value is already in the pool. */
1733 for (entry = 0; entry < pool->next_free_entry; entry++)
1734 {
1735 expressionS * litexp = & pool->literals[entry].exp;
1736
1737 if ((litexp->X_op == exp->X_op)
1738 && (exp->X_op == O_constant)
1739 && (litexp->X_add_number == exp->X_add_number)
1740 && (litexp->X_unsigned == exp->X_unsigned))
1741 break;
1742
1743 if ((litexp->X_op == exp->X_op)
1744 && (exp->X_op == O_symbol)
1745 && (litexp->X_add_number == exp->X_add_number)
1746 && (litexp->X_add_symbol == exp->X_add_symbol)
1747 && (litexp->X_op_symbol == exp->X_op_symbol))
1748 break;
1749 }
1750
1751 /* Do we need to create a new entry? */
1752 if (entry == pool->next_free_entry)
1753 {
1754 if (entry >= MAX_LITERAL_POOL_SIZE)
1755 {
1756 set_syntax_error (_("literal pool overflow"));
1757 return false;
1758 }
1759
1760 pool->literals[entry].exp = *exp;
1761 pool->next_free_entry += 1;
1762 if (exp->X_op == O_big)
1763 {
1764 /* PR 16688: Bignums are held in a single global array. We must
1765 copy and preserve that value now, before it is overwritten. */
1766 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1767 exp->X_add_number);
1768 memcpy (pool->literals[entry].bignum, generic_bignum,
1769 CHARS_PER_LITTLENUM * exp->X_add_number);
1770 }
1771 else
1772 pool->literals[entry].bignum = NULL;
1773 }
1774
1775 exp->X_op = O_symbol;
1776 exp->X_add_number = ((int) entry) * size;
1777 exp->X_add_symbol = pool->symbol;
1778
1779 return true;
1780 }
1781
1782 /* Can't use symbol_new here, so have to create a symbol and then at
1783 a later date assign it a value. That's what these functions do. */
1784
1785 static void
1786 symbol_locate (symbolS * symbolP,
1787 const char *name,/* It is copied, the caller can modify. */
1788 segT segment, /* Segment identifier (SEG_<something>). */
1789 valueT valu, /* Symbol value. */
1790 fragS * frag) /* Associated fragment. */
1791 {
1792 size_t name_length;
1793 char *preserved_copy_of_name;
1794
1795 name_length = strlen (name) + 1; /* +1 for \0. */
1796 obstack_grow (&notes, name, name_length);
1797 preserved_copy_of_name = obstack_finish (&notes);
1798
1799 #ifdef tc_canonicalize_symbol_name
1800 preserved_copy_of_name =
1801 tc_canonicalize_symbol_name (preserved_copy_of_name);
1802 #endif
1803
1804 S_SET_NAME (symbolP, preserved_copy_of_name);
1805
1806 S_SET_SEGMENT (symbolP, segment);
1807 S_SET_VALUE (symbolP, valu);
1808 symbol_clear_list_pointers (symbolP);
1809
1810 symbol_set_frag (symbolP, frag);
1811
1812 /* Link to end of symbol chain. */
1813 {
1814 extern int symbol_table_frozen;
1815
1816 if (symbol_table_frozen)
1817 abort ();
1818 }
1819
1820 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1821
1822 obj_symbol_new_hook (symbolP);
1823
1824 #ifdef tc_symbol_new_hook
1825 tc_symbol_new_hook (symbolP);
1826 #endif
1827
1828 #ifdef DEBUG_SYMS
1829 verify_symbol_chain (symbol_rootP, symbol_lastP);
1830 #endif /* DEBUG_SYMS */
1831 }
1832
1833
1834 static void
1835 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1836 {
1837 unsigned int entry;
1838 literal_pool *pool;
1839 char sym_name[20];
1840 int align;
1841
1842 for (align = 2; align <= 4; align++)
1843 {
1844 int size = 1 << align;
1845
1846 pool = find_literal_pool (size);
1847 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1848 continue;
1849
1850 /* Align pool as you have word accesses.
1851 Only make a frag if we have to. */
1852 if (!need_pass_2)
1853 frag_align (align, 0, 0);
1854
1855 mapping_state (MAP_DATA);
1856
1857 record_alignment (now_seg, align);
1858
1859 sprintf (sym_name, "$$lit_\002%x", pool->id);
1860
1861 symbol_locate (pool->symbol, sym_name, now_seg,
1862 (valueT) frag_now_fix (), frag_now);
1863 symbol_table_insert (pool->symbol);
1864
1865 for (entry = 0; entry < pool->next_free_entry; entry++)
1866 {
1867 expressionS * exp = & pool->literals[entry].exp;
1868
1869 if (exp->X_op == O_big)
1870 {
1871 /* PR 16688: Restore the global bignum value. */
1872 gas_assert (pool->literals[entry].bignum != NULL);
1873 memcpy (generic_bignum, pool->literals[entry].bignum,
1874 CHARS_PER_LITTLENUM * exp->X_add_number);
1875 }
1876
1877 /* First output the expression in the instruction to the pool. */
1878 emit_expr (exp, size); /* .word|.xword */
1879
1880 if (exp->X_op == O_big)
1881 {
1882 free (pool->literals[entry].bignum);
1883 pool->literals[entry].bignum = NULL;
1884 }
1885 }
1886
1887 /* Mark the pool as empty. */
1888 pool->next_free_entry = 0;
1889 pool->symbol = NULL;
1890 }
1891 }
1892
1893 #ifdef OBJ_ELF
1894 /* Forward declarations for functions below, in the MD interface
1895 section. */
1896 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1897 static struct reloc_table_entry * find_reloc_table_entry (char **);
1898
1899 /* Directives: Data. */
1900 /* N.B. the support for relocation suffix in this directive needs to be
1901 implemented properly. */
1902
1903 static void
1904 s_aarch64_elf_cons (int nbytes)
1905 {
1906 expressionS exp;
1907
1908 #ifdef md_flush_pending_output
1909 md_flush_pending_output ();
1910 #endif
1911
1912 if (is_it_end_of_statement ())
1913 {
1914 demand_empty_rest_of_line ();
1915 return;
1916 }
1917
1918 #ifdef md_cons_align
1919 md_cons_align (nbytes);
1920 #endif
1921
1922 mapping_state (MAP_DATA);
1923 do
1924 {
1925 struct reloc_table_entry *reloc;
1926
1927 expression (&exp);
1928
1929 if (exp.X_op != O_symbol)
1930 emit_expr (&exp, (unsigned int) nbytes);
1931 else
1932 {
1933 skip_past_char (&input_line_pointer, '#');
1934 if (skip_past_char (&input_line_pointer, ':'))
1935 {
1936 reloc = find_reloc_table_entry (&input_line_pointer);
1937 if (reloc == NULL)
1938 as_bad (_("unrecognized relocation suffix"));
1939 else
1940 as_bad (_("unimplemented relocation suffix"));
1941 ignore_rest_of_line ();
1942 return;
1943 }
1944 else
1945 emit_expr (&exp, (unsigned int) nbytes);
1946 }
1947 }
1948 while (*input_line_pointer++ == ',');
1949
1950 /* Put terminator back into stream. */
1951 input_line_pointer--;
1952 demand_empty_rest_of_line ();
1953 }
1954
1955 /* Mark symbol that it follows a variant PCS convention. */
1956
1957 static void
1958 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1959 {
1960 char *name;
1961 char c;
1962 symbolS *sym;
1963 asymbol *bfdsym;
1964 elf_symbol_type *elfsym;
1965
1966 c = get_symbol_name (&name);
1967 if (!*name)
1968 as_bad (_("Missing symbol name in directive"));
1969 sym = symbol_find_or_make (name);
1970 restore_line_pointer (c);
1971 demand_empty_rest_of_line ();
1972 bfdsym = symbol_get_bfdsym (sym);
1973 elfsym = elf_symbol_from (bfdsym);
1974 gas_assert (elfsym);
1975 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
1976 }
1977 #endif /* OBJ_ELF */
1978
1979 /* Output a 32-bit word, but mark as an instruction. */
1980
1981 static void
1982 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1983 {
1984 expressionS exp;
1985
1986 #ifdef md_flush_pending_output
1987 md_flush_pending_output ();
1988 #endif
1989
1990 if (is_it_end_of_statement ())
1991 {
1992 demand_empty_rest_of_line ();
1993 return;
1994 }
1995
1996 /* Sections are assumed to start aligned. In executable section, there is no
1997 MAP_DATA symbol pending. So we only align the address during
1998 MAP_DATA --> MAP_INSN transition.
1999 For other sections, this is not guaranteed. */
2000 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2001 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2002 frag_align_code (2, 0);
2003
2004 #ifdef OBJ_ELF
2005 mapping_state (MAP_INSN);
2006 #endif
2007
2008 do
2009 {
2010 expression (&exp);
2011 if (exp.X_op != O_constant)
2012 {
2013 as_bad (_("constant expression required"));
2014 ignore_rest_of_line ();
2015 return;
2016 }
2017
2018 if (target_big_endian)
2019 {
2020 unsigned int val = exp.X_add_number;
2021 exp.X_add_number = SWAP_32 (val);
2022 }
2023 emit_expr (&exp, 4);
2024 }
2025 while (*input_line_pointer++ == ',');
2026
2027 /* Put terminator back into stream. */
2028 input_line_pointer--;
2029 demand_empty_rest_of_line ();
2030 }
2031
2032 static void
2033 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2034 {
2035 demand_empty_rest_of_line ();
2036 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2037 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2038 }
2039
2040 #ifdef OBJ_ELF
2041 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2042
2043 static void
2044 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2045 {
2046 expressionS exp;
2047
2048 expression (&exp);
2049 frag_grow (4);
2050 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2051 BFD_RELOC_AARCH64_TLSDESC_ADD);
2052
2053 demand_empty_rest_of_line ();
2054 }
2055
2056 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2057
2058 static void
2059 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2060 {
2061 expressionS exp;
2062
2063 /* Since we're just labelling the code, there's no need to define a
2064 mapping symbol. */
2065 expression (&exp);
2066 /* Make sure there is enough room in this frag for the following
2067 blr. This trick only works if the blr follows immediately after
2068 the .tlsdesc directive. */
2069 frag_grow (4);
2070 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2071 BFD_RELOC_AARCH64_TLSDESC_CALL);
2072
2073 demand_empty_rest_of_line ();
2074 }
2075
2076 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2077
2078 static void
2079 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2080 {
2081 expressionS exp;
2082
2083 expression (&exp);
2084 frag_grow (4);
2085 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2086 BFD_RELOC_AARCH64_TLSDESC_LDR);
2087
2088 demand_empty_rest_of_line ();
2089 }
2090 #endif /* OBJ_ELF */
2091
2092 static void s_aarch64_arch (int);
2093 static void s_aarch64_cpu (int);
2094 static void s_aarch64_arch_extension (int);
2095
2096 /* This table describes all the machine specific pseudo-ops the assembler
2097 has to support. The fields are:
2098 pseudo-op name without dot
2099 function to call to execute this pseudo-op
2100 Integer arg to pass to the function. */
2101
2102 const pseudo_typeS md_pseudo_table[] = {
2103 /* Never called because '.req' does not start a line. */
2104 {"req", s_req, 0},
2105 {"unreq", s_unreq, 0},
2106 {"bss", s_bss, 0},
2107 {"even", s_even, 0},
2108 {"ltorg", s_ltorg, 0},
2109 {"pool", s_ltorg, 0},
2110 {"cpu", s_aarch64_cpu, 0},
2111 {"arch", s_aarch64_arch, 0},
2112 {"arch_extension", s_aarch64_arch_extension, 0},
2113 {"inst", s_aarch64_inst, 0},
2114 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2115 #ifdef OBJ_ELF
2116 {"tlsdescadd", s_tlsdescadd, 0},
2117 {"tlsdesccall", s_tlsdesccall, 0},
2118 {"tlsdescldr", s_tlsdescldr, 0},
2119 {"word", s_aarch64_elf_cons, 4},
2120 {"long", s_aarch64_elf_cons, 4},
2121 {"xword", s_aarch64_elf_cons, 8},
2122 {"dword", s_aarch64_elf_cons, 8},
2123 {"variant_pcs", s_variant_pcs, 0},
2124 #endif
2125 {"float16", float_cons, 'h'},
2126 {"bfloat16", float_cons, 'b'},
2127 {0, 0, 0}
2128 };
2129 \f
2130
2131 /* Check whether STR points to a register name followed by a comma or the
2132 end of line; REG_TYPE indicates which register types are checked
2133 against. Return TRUE if STR is such a register name; otherwise return
2134 FALSE. The function does not intend to produce any diagnostics, but since
2135 the register parser aarch64_reg_parse, which is called by this function,
2136 does produce diagnostics, we call clear_error to clear any diagnostics
2137 that may be generated by aarch64_reg_parse.
2138 Also, the function returns FALSE directly if there is any user error
2139 present at the function entry. This prevents the existing diagnostics
2140 state from being spoiled.
2141 The function currently serves parse_constant_immediate and
2142 parse_big_immediate only. */
2143 static bool
2144 reg_name_p (char *str, aarch64_reg_type reg_type)
2145 {
2146 int reg;
2147
2148 /* Prevent the diagnostics state from being spoiled. */
2149 if (error_p ())
2150 return false;
2151
2152 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2153
2154 /* Clear the parsing error that may be set by the reg parser. */
2155 clear_error ();
2156
2157 if (reg == PARSE_FAIL)
2158 return false;
2159
2160 skip_whitespace (str);
2161 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2162 return true;
2163
2164 return false;
2165 }
2166
2167 /* Parser functions used exclusively in instruction operands. */
2168
2169 /* Parse an immediate expression which may not be constant.
2170
2171 To prevent the expression parser from pushing a register name
2172 into the symbol table as an undefined symbol, firstly a check is
2173 done to find out whether STR is a register of type REG_TYPE followed
2174 by a comma or the end of line. Return FALSE if STR is such a string. */
2175
2176 static bool
2177 parse_immediate_expression (char **str, expressionS *exp,
2178 aarch64_reg_type reg_type)
2179 {
2180 if (reg_name_p (*str, reg_type))
2181 {
2182 set_recoverable_error (_("immediate operand required"));
2183 return false;
2184 }
2185
2186 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT,
2187 NORMAL_RESOLUTION);
2188
2189 if (exp->X_op == O_absent)
2190 {
2191 set_fatal_syntax_error (_("missing immediate expression"));
2192 return false;
2193 }
2194
2195 return true;
2196 }
2197
2198 /* Constant immediate-value read function for use in insn parsing.
2199 STR points to the beginning of the immediate (with the optional
2200 leading #); *VAL receives the value. REG_TYPE says which register
2201 names should be treated as registers rather than as symbolic immediates.
2202
2203 Return TRUE on success; otherwise return FALSE. */
2204
2205 static bool
2206 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2207 {
2208 expressionS exp;
2209
2210 if (! parse_immediate_expression (str, &exp, reg_type))
2211 return false;
2212
2213 if (exp.X_op != O_constant)
2214 {
2215 set_syntax_error (_("constant expression required"));
2216 return false;
2217 }
2218
2219 *val = exp.X_add_number;
2220 return true;
2221 }
2222
2223 static uint32_t
2224 encode_imm_float_bits (uint32_t imm)
2225 {
2226 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2227 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2228 }
2229
2230 /* Return TRUE if the single-precision floating-point value encoded in IMM
2231 can be expressed in the AArch64 8-bit signed floating-point format with
2232 3-bit exponent and normalized 4 bits of precision; in other words, the
2233 floating-point value must be expressable as
2234 (+/-) n / 16 * power (2, r)
2235 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2236
2237 static bool
2238 aarch64_imm_float_p (uint32_t imm)
2239 {
2240 /* If a single-precision floating-point value has the following bit
2241 pattern, it can be expressed in the AArch64 8-bit floating-point
2242 format:
2243
2244 3 32222222 2221111111111
2245 1 09876543 21098765432109876543210
2246 n Eeeeeexx xxxx0000000000000000000
2247
2248 where n, e and each x are either 0 or 1 independently, with
2249 E == ~ e. */
2250
2251 uint32_t pattern;
2252
2253 /* Prepare the pattern for 'Eeeeee'. */
2254 if (((imm >> 30) & 0x1) == 0)
2255 pattern = 0x3e000000;
2256 else
2257 pattern = 0x40000000;
2258
2259 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2260 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2261 }
2262
2263 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2264 as an IEEE float without any loss of precision. Store the value in
2265 *FPWORD if so. */
2266
2267 static bool
2268 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2269 {
2270 /* If a double-precision floating-point value has the following bit
2271 pattern, it can be expressed in a float:
2272
2273 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2274 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2275 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2276
2277 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2278 if Eeee_eeee != 1111_1111
2279
2280 where n, e, s and S are either 0 or 1 independently and where ~ is the
2281 inverse of E. */
2282
2283 uint32_t pattern;
2284 uint32_t high32 = imm >> 32;
2285 uint32_t low32 = imm;
2286
2287 /* Lower 29 bits need to be 0s. */
2288 if ((imm & 0x1fffffff) != 0)
2289 return false;
2290
2291 /* Prepare the pattern for 'Eeeeeeeee'. */
2292 if (((high32 >> 30) & 0x1) == 0)
2293 pattern = 0x38000000;
2294 else
2295 pattern = 0x40000000;
2296
2297 /* Check E~~~. */
2298 if ((high32 & 0x78000000) != pattern)
2299 return false;
2300
2301 /* Check Eeee_eeee != 1111_1111. */
2302 if ((high32 & 0x7ff00000) == 0x47f00000)
2303 return false;
2304
2305 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2306 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2307 | (low32 >> 29)); /* 3 S bits. */
2308 return true;
2309 }
2310
2311 /* Return true if we should treat OPERAND as a double-precision
2312 floating-point operand rather than a single-precision one. */
2313 static bool
2314 double_precision_operand_p (const aarch64_opnd_info *operand)
2315 {
2316 /* Check for unsuffixed SVE registers, which are allowed
2317 for LDR and STR but not in instructions that require an
2318 immediate. We get better error messages if we arbitrarily
2319 pick one size, parse the immediate normally, and then
2320 report the match failure in the normal way. */
2321 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2322 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2323 }
2324
2325 /* Parse a floating-point immediate. Return TRUE on success and return the
2326 value in *IMMED in the format of IEEE754 single-precision encoding.
2327 *CCP points to the start of the string; DP_P is TRUE when the immediate
2328 is expected to be in double-precision (N.B. this only matters when
2329 hexadecimal representation is involved). REG_TYPE says which register
2330 names should be treated as registers rather than as symbolic immediates.
2331
2332 This routine accepts any IEEE float; it is up to the callers to reject
2333 invalid ones. */
2334
2335 static bool
2336 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2337 aarch64_reg_type reg_type)
2338 {
2339 char *str = *ccp;
2340 char *fpnum;
2341 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2342 int64_t val = 0;
2343 unsigned fpword = 0;
2344 bool hex_p = false;
2345
2346 skip_past_char (&str, '#');
2347
2348 fpnum = str;
2349 skip_whitespace (fpnum);
2350
2351 if (startswith (fpnum, "0x"))
2352 {
2353 /* Support the hexadecimal representation of the IEEE754 encoding.
2354 Double-precision is expected when DP_P is TRUE, otherwise the
2355 representation should be in single-precision. */
2356 if (! parse_constant_immediate (&str, &val, reg_type))
2357 goto invalid_fp;
2358
2359 if (dp_p)
2360 {
2361 if (!can_convert_double_to_float (val, &fpword))
2362 goto invalid_fp;
2363 }
2364 else if ((uint64_t) val > 0xffffffff)
2365 goto invalid_fp;
2366 else
2367 fpword = val;
2368
2369 hex_p = true;
2370 }
2371 else if (reg_name_p (str, reg_type))
2372 {
2373 set_recoverable_error (_("immediate operand required"));
2374 return false;
2375 }
2376
2377 if (! hex_p)
2378 {
2379 int i;
2380
2381 if ((str = atof_ieee (str, 's', words)) == NULL)
2382 goto invalid_fp;
2383
2384 /* Our FP word must be 32 bits (single-precision FP). */
2385 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2386 {
2387 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2388 fpword |= words[i];
2389 }
2390 }
2391
2392 *immed = fpword;
2393 *ccp = str;
2394 return true;
2395
2396 invalid_fp:
2397 set_fatal_syntax_error (_("invalid floating-point constant"));
2398 return false;
2399 }
2400
2401 /* Less-generic immediate-value read function with the possibility of loading
2402 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2403 instructions.
2404
2405 To prevent the expression parser from pushing a register name into the
2406 symbol table as an undefined symbol, a check is firstly done to find
2407 out whether STR is a register of type REG_TYPE followed by a comma or
2408 the end of line. Return FALSE if STR is such a register. */
2409
2410 static bool
2411 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2412 {
2413 char *ptr = *str;
2414
2415 if (reg_name_p (ptr, reg_type))
2416 {
2417 set_syntax_error (_("immediate operand required"));
2418 return false;
2419 }
2420
2421 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT,
2422 NORMAL_RESOLUTION);
2423
2424 if (inst.reloc.exp.X_op == O_constant)
2425 *imm = inst.reloc.exp.X_add_number;
2426
2427 *str = ptr;
2428
2429 return true;
2430 }
2431
2432 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2433 if NEED_LIBOPCODES is non-zero, the fixup will need
2434 assistance from the libopcodes. */
2435
2436 static inline void
2437 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2438 const aarch64_opnd_info *operand,
2439 int need_libopcodes_p)
2440 {
2441 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2442 reloc->opnd = operand->type;
2443 if (need_libopcodes_p)
2444 reloc->need_libopcodes_p = 1;
2445 };
2446
2447 /* Return TRUE if the instruction needs to be fixed up later internally by
2448 the GAS; otherwise return FALSE. */
2449
2450 static inline bool
2451 aarch64_gas_internal_fixup_p (void)
2452 {
2453 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2454 }
2455
2456 /* Assign the immediate value to the relevant field in *OPERAND if
2457 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2458 needs an internal fixup in a later stage.
2459 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2460 IMM.VALUE that may get assigned with the constant. */
2461 static inline void
2462 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2463 aarch64_opnd_info *operand,
2464 int addr_off_p,
2465 int need_libopcodes_p,
2466 int skip_p)
2467 {
2468 if (reloc->exp.X_op == O_constant)
2469 {
2470 if (addr_off_p)
2471 operand->addr.offset.imm = reloc->exp.X_add_number;
2472 else
2473 operand->imm.value = reloc->exp.X_add_number;
2474 reloc->type = BFD_RELOC_UNUSED;
2475 }
2476 else
2477 {
2478 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2479 /* Tell libopcodes to ignore this operand or not. This is helpful
2480 when one of the operands needs to be fixed up later but we need
2481 libopcodes to check the other operands. */
2482 operand->skip = skip_p;
2483 }
2484 }
2485
2486 /* Relocation modifiers. Each entry in the table contains the textual
2487 name for the relocation which may be placed before a symbol used as
2488 a load/store offset, or add immediate. It must be surrounded by a
2489 leading and trailing colon, for example:
2490
2491 ldr x0, [x1, #:rello:varsym]
2492 add x0, x1, #:rello:varsym */
2493
2494 struct reloc_table_entry
2495 {
2496 const char *name;
2497 int pc_rel;
2498 bfd_reloc_code_real_type adr_type;
2499 bfd_reloc_code_real_type adrp_type;
2500 bfd_reloc_code_real_type movw_type;
2501 bfd_reloc_code_real_type add_type;
2502 bfd_reloc_code_real_type ldst_type;
2503 bfd_reloc_code_real_type ld_literal_type;
2504 };
2505
2506 static struct reloc_table_entry reloc_table[] =
2507 {
2508 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2509 {"lo12", 0,
2510 0, /* adr_type */
2511 0,
2512 0,
2513 BFD_RELOC_AARCH64_ADD_LO12,
2514 BFD_RELOC_AARCH64_LDST_LO12,
2515 0},
2516
2517 /* Higher 21 bits of pc-relative page offset: ADRP */
2518 {"pg_hi21", 1,
2519 0, /* adr_type */
2520 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2521 0,
2522 0,
2523 0,
2524 0},
2525
2526 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2527 {"pg_hi21_nc", 1,
2528 0, /* adr_type */
2529 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2530 0,
2531 0,
2532 0,
2533 0},
2534
2535 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2536 {"abs_g0", 0,
2537 0, /* adr_type */
2538 0,
2539 BFD_RELOC_AARCH64_MOVW_G0,
2540 0,
2541 0,
2542 0},
2543
2544 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2545 {"abs_g0_s", 0,
2546 0, /* adr_type */
2547 0,
2548 BFD_RELOC_AARCH64_MOVW_G0_S,
2549 0,
2550 0,
2551 0},
2552
2553 /* Less significant bits 0-15 of address/value: MOVK, no check */
2554 {"abs_g0_nc", 0,
2555 0, /* adr_type */
2556 0,
2557 BFD_RELOC_AARCH64_MOVW_G0_NC,
2558 0,
2559 0,
2560 0},
2561
2562 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2563 {"abs_g1", 0,
2564 0, /* adr_type */
2565 0,
2566 BFD_RELOC_AARCH64_MOVW_G1,
2567 0,
2568 0,
2569 0},
2570
2571 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2572 {"abs_g1_s", 0,
2573 0, /* adr_type */
2574 0,
2575 BFD_RELOC_AARCH64_MOVW_G1_S,
2576 0,
2577 0,
2578 0},
2579
2580 /* Less significant bits 16-31 of address/value: MOVK, no check */
2581 {"abs_g1_nc", 0,
2582 0, /* adr_type */
2583 0,
2584 BFD_RELOC_AARCH64_MOVW_G1_NC,
2585 0,
2586 0,
2587 0},
2588
2589 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2590 {"abs_g2", 0,
2591 0, /* adr_type */
2592 0,
2593 BFD_RELOC_AARCH64_MOVW_G2,
2594 0,
2595 0,
2596 0},
2597
2598 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2599 {"abs_g2_s", 0,
2600 0, /* adr_type */
2601 0,
2602 BFD_RELOC_AARCH64_MOVW_G2_S,
2603 0,
2604 0,
2605 0},
2606
2607 /* Less significant bits 32-47 of address/value: MOVK, no check */
2608 {"abs_g2_nc", 0,
2609 0, /* adr_type */
2610 0,
2611 BFD_RELOC_AARCH64_MOVW_G2_NC,
2612 0,
2613 0,
2614 0},
2615
2616 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2617 {"abs_g3", 0,
2618 0, /* adr_type */
2619 0,
2620 BFD_RELOC_AARCH64_MOVW_G3,
2621 0,
2622 0,
2623 0},
2624
2625 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2626 {"prel_g0", 1,
2627 0, /* adr_type */
2628 0,
2629 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2630 0,
2631 0,
2632 0},
2633
2634 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2635 {"prel_g0_nc", 1,
2636 0, /* adr_type */
2637 0,
2638 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2639 0,
2640 0,
2641 0},
2642
2643 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2644 {"prel_g1", 1,
2645 0, /* adr_type */
2646 0,
2647 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2648 0,
2649 0,
2650 0},
2651
2652 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2653 {"prel_g1_nc", 1,
2654 0, /* adr_type */
2655 0,
2656 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2657 0,
2658 0,
2659 0},
2660
2661 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2662 {"prel_g2", 1,
2663 0, /* adr_type */
2664 0,
2665 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2666 0,
2667 0,
2668 0},
2669
2670 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2671 {"prel_g2_nc", 1,
2672 0, /* adr_type */
2673 0,
2674 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2675 0,
2676 0,
2677 0},
2678
2679 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2680 {"prel_g3", 1,
2681 0, /* adr_type */
2682 0,
2683 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2684 0,
2685 0,
2686 0},
2687
2688 /* Get to the page containing GOT entry for a symbol. */
2689 {"got", 1,
2690 0, /* adr_type */
2691 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2692 0,
2693 0,
2694 0,
2695 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2696
2697 /* 12 bit offset into the page containing GOT entry for that symbol. */
2698 {"got_lo12", 0,
2699 0, /* adr_type */
2700 0,
2701 0,
2702 0,
2703 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2704 0},
2705
2706 /* 0-15 bits of address/value: MOVk, no check. */
2707 {"gotoff_g0_nc", 0,
2708 0, /* adr_type */
2709 0,
2710 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2711 0,
2712 0,
2713 0},
2714
2715 /* Most significant bits 16-31 of address/value: MOVZ. */
2716 {"gotoff_g1", 0,
2717 0, /* adr_type */
2718 0,
2719 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2720 0,
2721 0,
2722 0},
2723
2724 /* 15 bit offset into the page containing GOT entry for that symbol. */
2725 {"gotoff_lo15", 0,
2726 0, /* adr_type */
2727 0,
2728 0,
2729 0,
2730 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2731 0},
2732
2733 /* Get to the page containing GOT TLS entry for a symbol */
2734 {"gottprel_g0_nc", 0,
2735 0, /* adr_type */
2736 0,
2737 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2738 0,
2739 0,
2740 0},
2741
2742 /* Get to the page containing GOT TLS entry for a symbol */
2743 {"gottprel_g1", 0,
2744 0, /* adr_type */
2745 0,
2746 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2747 0,
2748 0,
2749 0},
2750
2751 /* Get to the page containing GOT TLS entry for a symbol */
2752 {"tlsgd", 0,
2753 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2754 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2755 0,
2756 0,
2757 0,
2758 0},
2759
2760 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2761 {"tlsgd_lo12", 0,
2762 0, /* adr_type */
2763 0,
2764 0,
2765 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2766 0,
2767 0},
2768
2769 /* Lower 16 bits address/value: MOVk. */
2770 {"tlsgd_g0_nc", 0,
2771 0, /* adr_type */
2772 0,
2773 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2774 0,
2775 0,
2776 0},
2777
2778 /* Most significant bits 16-31 of address/value: MOVZ. */
2779 {"tlsgd_g1", 0,
2780 0, /* adr_type */
2781 0,
2782 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2783 0,
2784 0,
2785 0},
2786
2787 /* Get to the page containing GOT TLS entry for a symbol */
2788 {"tlsdesc", 0,
2789 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2790 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2791 0,
2792 0,
2793 0,
2794 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2795
2796 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2797 {"tlsdesc_lo12", 0,
2798 0, /* adr_type */
2799 0,
2800 0,
2801 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2802 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2803 0},
2804
2805 /* Get to the page containing GOT TLS entry for a symbol.
2806 The same as GD, we allocate two consecutive GOT slots
2807 for module index and module offset, the only difference
2808 with GD is the module offset should be initialized to
2809 zero without any outstanding runtime relocation. */
2810 {"tlsldm", 0,
2811 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2812 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2813 0,
2814 0,
2815 0,
2816 0},
2817
2818 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2819 {"tlsldm_lo12_nc", 0,
2820 0, /* adr_type */
2821 0,
2822 0,
2823 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2824 0,
2825 0},
2826
2827 /* 12 bit offset into the module TLS base address. */
2828 {"dtprel_lo12", 0,
2829 0, /* adr_type */
2830 0,
2831 0,
2832 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2833 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2834 0},
2835
2836 /* Same as dtprel_lo12, no overflow check. */
2837 {"dtprel_lo12_nc", 0,
2838 0, /* adr_type */
2839 0,
2840 0,
2841 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2842 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2843 0},
2844
2845 /* bits[23:12] of offset to the module TLS base address. */
2846 {"dtprel_hi12", 0,
2847 0, /* adr_type */
2848 0,
2849 0,
2850 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2851 0,
2852 0},
2853
2854 /* bits[15:0] of offset to the module TLS base address. */
2855 {"dtprel_g0", 0,
2856 0, /* adr_type */
2857 0,
2858 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2859 0,
2860 0,
2861 0},
2862
2863 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2864 {"dtprel_g0_nc", 0,
2865 0, /* adr_type */
2866 0,
2867 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2868 0,
2869 0,
2870 0},
2871
2872 /* bits[31:16] of offset to the module TLS base address. */
2873 {"dtprel_g1", 0,
2874 0, /* adr_type */
2875 0,
2876 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2877 0,
2878 0,
2879 0},
2880
2881 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2882 {"dtprel_g1_nc", 0,
2883 0, /* adr_type */
2884 0,
2885 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2886 0,
2887 0,
2888 0},
2889
2890 /* bits[47:32] of offset to the module TLS base address. */
2891 {"dtprel_g2", 0,
2892 0, /* adr_type */
2893 0,
2894 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2895 0,
2896 0,
2897 0},
2898
2899 /* Lower 16 bit offset into GOT entry for a symbol */
2900 {"tlsdesc_off_g0_nc", 0,
2901 0, /* adr_type */
2902 0,
2903 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2904 0,
2905 0,
2906 0},
2907
2908 /* Higher 16 bit offset into GOT entry for a symbol */
2909 {"tlsdesc_off_g1", 0,
2910 0, /* adr_type */
2911 0,
2912 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2913 0,
2914 0,
2915 0},
2916
2917 /* Get to the page containing GOT TLS entry for a symbol */
2918 {"gottprel", 0,
2919 0, /* adr_type */
2920 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2921 0,
2922 0,
2923 0,
2924 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2925
2926 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2927 {"gottprel_lo12", 0,
2928 0, /* adr_type */
2929 0,
2930 0,
2931 0,
2932 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2933 0},
2934
2935 /* Get tp offset for a symbol. */
2936 {"tprel", 0,
2937 0, /* adr_type */
2938 0,
2939 0,
2940 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2941 0,
2942 0},
2943
2944 /* Get tp offset for a symbol. */
2945 {"tprel_lo12", 0,
2946 0, /* adr_type */
2947 0,
2948 0,
2949 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2950 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2951 0},
2952
2953 /* Get tp offset for a symbol. */
2954 {"tprel_hi12", 0,
2955 0, /* adr_type */
2956 0,
2957 0,
2958 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2959 0,
2960 0},
2961
2962 /* Get tp offset for a symbol. */
2963 {"tprel_lo12_nc", 0,
2964 0, /* adr_type */
2965 0,
2966 0,
2967 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2968 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2969 0},
2970
2971 /* Most significant bits 32-47 of address/value: MOVZ. */
2972 {"tprel_g2", 0,
2973 0, /* adr_type */
2974 0,
2975 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2976 0,
2977 0,
2978 0},
2979
2980 /* Most significant bits 16-31 of address/value: MOVZ. */
2981 {"tprel_g1", 0,
2982 0, /* adr_type */
2983 0,
2984 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2985 0,
2986 0,
2987 0},
2988
2989 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2990 {"tprel_g1_nc", 0,
2991 0, /* adr_type */
2992 0,
2993 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2994 0,
2995 0,
2996 0},
2997
2998 /* Most significant bits 0-15 of address/value: MOVZ. */
2999 {"tprel_g0", 0,
3000 0, /* adr_type */
3001 0,
3002 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3003 0,
3004 0,
3005 0},
3006
3007 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3008 {"tprel_g0_nc", 0,
3009 0, /* adr_type */
3010 0,
3011 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3012 0,
3013 0,
3014 0},
3015
3016 /* 15bit offset from got entry to base address of GOT table. */
3017 {"gotpage_lo15", 0,
3018 0,
3019 0,
3020 0,
3021 0,
3022 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3023 0},
3024
3025 /* 14bit offset from got entry to base address of GOT table. */
3026 {"gotpage_lo14", 0,
3027 0,
3028 0,
3029 0,
3030 0,
3031 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3032 0},
3033 };
3034
3035 /* Given the address of a pointer pointing to the textual name of a
3036 relocation as may appear in assembler source, attempt to find its
3037 details in reloc_table. The pointer will be updated to the character
3038 after the trailing colon. On failure, NULL will be returned;
3039 otherwise return the reloc_table_entry. */
3040
3041 static struct reloc_table_entry *
3042 find_reloc_table_entry (char **str)
3043 {
3044 unsigned int i;
3045 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3046 {
3047 int length = strlen (reloc_table[i].name);
3048
3049 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3050 && (*str)[length] == ':')
3051 {
3052 *str += (length + 1);
3053 return &reloc_table[i];
3054 }
3055 }
3056
3057 return NULL;
3058 }
3059
3060 /* Returns 0 if the relocation should never be forced,
3061 1 if the relocation must be forced, and -1 if either
3062 result is OK. */
3063
3064 static signed int
3065 aarch64_force_reloc (unsigned int type)
3066 {
3067 switch (type)
3068 {
3069 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3070 /* Perform these "immediate" internal relocations
3071 even if the symbol is extern or weak. */
3072 return 0;
3073
3074 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3075 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3076 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3077 /* Pseudo relocs that need to be fixed up according to
3078 ilp32_p. */
3079 return 0;
3080
3081 case BFD_RELOC_AARCH64_ADD_LO12:
3082 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3083 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3084 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3085 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3086 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3087 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3088 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3089 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3090 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3091 case BFD_RELOC_AARCH64_LDST128_LO12:
3092 case BFD_RELOC_AARCH64_LDST16_LO12:
3093 case BFD_RELOC_AARCH64_LDST32_LO12:
3094 case BFD_RELOC_AARCH64_LDST64_LO12:
3095 case BFD_RELOC_AARCH64_LDST8_LO12:
3096 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3097 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3098 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3099 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3100 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3101 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3102 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3103 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3104 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3105 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3106 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3107 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3108 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3109 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3110 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3111 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3112 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3113 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3114 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3115 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3116 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3117 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3118 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3119 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3120 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3121 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3122 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3123 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3124 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3125 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3126 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3127 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3128 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3129 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3130 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3131 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3132 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3133 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3134 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3135 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3136 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3137 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3138 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3139 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3140 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3141 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3142 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3143 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3144 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3145 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3146 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3147 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3148 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3149 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3150 /* Always leave these relocations for the linker. */
3151 return 1;
3152
3153 default:
3154 return -1;
3155 }
3156 }
3157
3158 int
3159 aarch64_force_relocation (struct fix *fixp)
3160 {
3161 int res = aarch64_force_reloc (fixp->fx_r_type);
3162
3163 if (res == -1)
3164 return generic_force_reloc (fixp);
3165 return res;
3166 }
3167
3168 /* Mode argument to parse_shift and parser_shifter_operand. */
3169 enum parse_shift_mode
3170 {
3171 SHIFTED_NONE, /* no shifter allowed */
3172 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3173 "#imm{,lsl #n}" */
3174 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3175 "#imm" */
3176 SHIFTED_LSL, /* bare "lsl #n" */
3177 SHIFTED_MUL, /* bare "mul #n" */
3178 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3179 SHIFTED_MUL_VL, /* "mul vl" */
3180 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3181 };
3182
3183 /* Parse a <shift> operator on an AArch64 data processing instruction.
3184 Return TRUE on success; otherwise return FALSE. */
3185 static bool
3186 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3187 {
3188 const struct aarch64_name_value_pair *shift_op;
3189 enum aarch64_modifier_kind kind;
3190 expressionS exp;
3191 int exp_has_prefix;
3192 char *s = *str;
3193 char *p = s;
3194
3195 for (p = *str; ISALPHA (*p); p++)
3196 ;
3197
3198 if (p == *str)
3199 {
3200 set_syntax_error (_("shift expression expected"));
3201 return false;
3202 }
3203
3204 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3205
3206 if (shift_op == NULL)
3207 {
3208 set_syntax_error (_("shift operator expected"));
3209 return false;
3210 }
3211
3212 kind = aarch64_get_operand_modifier (shift_op);
3213
3214 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3215 {
3216 set_syntax_error (_("invalid use of 'MSL'"));
3217 return false;
3218 }
3219
3220 if (kind == AARCH64_MOD_MUL
3221 && mode != SHIFTED_MUL
3222 && mode != SHIFTED_MUL_VL)
3223 {
3224 set_syntax_error (_("invalid use of 'MUL'"));
3225 return false;
3226 }
3227
3228 switch (mode)
3229 {
3230 case SHIFTED_LOGIC_IMM:
3231 if (aarch64_extend_operator_p (kind))
3232 {
3233 set_syntax_error (_("extending shift is not permitted"));
3234 return false;
3235 }
3236 break;
3237
3238 case SHIFTED_ARITH_IMM:
3239 if (kind == AARCH64_MOD_ROR)
3240 {
3241 set_syntax_error (_("'ROR' shift is not permitted"));
3242 return false;
3243 }
3244 break;
3245
3246 case SHIFTED_LSL:
3247 if (kind != AARCH64_MOD_LSL)
3248 {
3249 set_syntax_error (_("only 'LSL' shift is permitted"));
3250 return false;
3251 }
3252 break;
3253
3254 case SHIFTED_MUL:
3255 if (kind != AARCH64_MOD_MUL)
3256 {
3257 set_syntax_error (_("only 'MUL' is permitted"));
3258 return false;
3259 }
3260 break;
3261
3262 case SHIFTED_MUL_VL:
3263 /* "MUL VL" consists of two separate tokens. Require the first
3264 token to be "MUL" and look for a following "VL". */
3265 if (kind == AARCH64_MOD_MUL)
3266 {
3267 skip_whitespace (p);
3268 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3269 {
3270 p += 2;
3271 kind = AARCH64_MOD_MUL_VL;
3272 break;
3273 }
3274 }
3275 set_syntax_error (_("only 'MUL VL' is permitted"));
3276 return false;
3277
3278 case SHIFTED_REG_OFFSET:
3279 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3280 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3281 {
3282 set_fatal_syntax_error
3283 (_("invalid shift for the register offset addressing mode"));
3284 return false;
3285 }
3286 break;
3287
3288 case SHIFTED_LSL_MSL:
3289 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3290 {
3291 set_syntax_error (_("invalid shift operator"));
3292 return false;
3293 }
3294 break;
3295
3296 default:
3297 abort ();
3298 }
3299
3300 /* Whitespace can appear here if the next thing is a bare digit. */
3301 skip_whitespace (p);
3302
3303 /* Parse shift amount. */
3304 exp_has_prefix = 0;
3305 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3306 exp.X_op = O_absent;
3307 else
3308 {
3309 if (is_immediate_prefix (*p))
3310 {
3311 p++;
3312 exp_has_prefix = 1;
3313 }
3314 (void) aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT,
3315 NORMAL_RESOLUTION);
3316 }
3317 if (kind == AARCH64_MOD_MUL_VL)
3318 /* For consistency, give MUL VL the same shift amount as an implicit
3319 MUL #1. */
3320 operand->shifter.amount = 1;
3321 else if (exp.X_op == O_absent)
3322 {
3323 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3324 {
3325 set_syntax_error (_("missing shift amount"));
3326 return false;
3327 }
3328 operand->shifter.amount = 0;
3329 }
3330 else if (exp.X_op != O_constant)
3331 {
3332 set_syntax_error (_("constant shift amount required"));
3333 return false;
3334 }
3335 /* For parsing purposes, MUL #n has no inherent range. The range
3336 depends on the operand and will be checked by operand-specific
3337 routines. */
3338 else if (kind != AARCH64_MOD_MUL
3339 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3340 {
3341 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3342 return false;
3343 }
3344 else
3345 {
3346 operand->shifter.amount = exp.X_add_number;
3347 operand->shifter.amount_present = 1;
3348 }
3349
3350 operand->shifter.operator_present = 1;
3351 operand->shifter.kind = kind;
3352
3353 *str = p;
3354 return true;
3355 }
3356
3357 /* Parse a <shifter_operand> for a data processing instruction:
3358
3359 #<immediate>
3360 #<immediate>, LSL #imm
3361
3362 Validation of immediate operands is deferred to md_apply_fix.
3363
3364 Return TRUE on success; otherwise return FALSE. */
3365
3366 static bool
3367 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3368 enum parse_shift_mode mode)
3369 {
3370 char *p;
3371
3372 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3373 return false;
3374
3375 p = *str;
3376
3377 /* Accept an immediate expression. */
3378 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3379 REJECT_ABSENT, NORMAL_RESOLUTION))
3380 return false;
3381
3382 /* Accept optional LSL for arithmetic immediate values. */
3383 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3384 if (! parse_shift (&p, operand, SHIFTED_LSL))
3385 return false;
3386
3387 /* Not accept any shifter for logical immediate values. */
3388 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3389 && parse_shift (&p, operand, mode))
3390 {
3391 set_syntax_error (_("unexpected shift operator"));
3392 return false;
3393 }
3394
3395 *str = p;
3396 return true;
3397 }
3398
3399 /* Parse a <shifter_operand> for a data processing instruction:
3400
3401 <Rm>
3402 <Rm>, <shift>
3403 #<immediate>
3404 #<immediate>, LSL #imm
3405
3406 where <shift> is handled by parse_shift above, and the last two
3407 cases are handled by the function above.
3408
3409 Validation of immediate operands is deferred to md_apply_fix.
3410
3411 Return TRUE on success; otherwise return FALSE. */
3412
3413 static bool
3414 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3415 enum parse_shift_mode mode)
3416 {
3417 const reg_entry *reg;
3418 aarch64_opnd_qualifier_t qualifier;
3419 enum aarch64_operand_class opd_class
3420 = aarch64_get_operand_class (operand->type);
3421
3422 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3423 if (reg)
3424 {
3425 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3426 {
3427 set_syntax_error (_("unexpected register in the immediate operand"));
3428 return false;
3429 }
3430
3431 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3432 {
3433 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3434 return false;
3435 }
3436
3437 operand->reg.regno = reg->number;
3438 operand->qualifier = qualifier;
3439
3440 /* Accept optional shift operation on register. */
3441 if (! skip_past_comma (str))
3442 return true;
3443
3444 if (! parse_shift (str, operand, mode))
3445 return false;
3446
3447 return true;
3448 }
3449 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3450 {
3451 set_syntax_error
3452 (_("integer register expected in the extended/shifted operand "
3453 "register"));
3454 return false;
3455 }
3456
3457 /* We have a shifted immediate variable. */
3458 return parse_shifter_operand_imm (str, operand, mode);
3459 }
3460
3461 /* Return TRUE on success; return FALSE otherwise. */
3462
3463 static bool
3464 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3465 enum parse_shift_mode mode)
3466 {
3467 char *p = *str;
3468
3469 /* Determine if we have the sequence of characters #: or just :
3470 coming next. If we do, then we check for a :rello: relocation
3471 modifier. If we don't, punt the whole lot to
3472 parse_shifter_operand. */
3473
3474 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3475 {
3476 struct reloc_table_entry *entry;
3477
3478 if (p[0] == '#')
3479 p += 2;
3480 else
3481 p++;
3482 *str = p;
3483
3484 /* Try to parse a relocation. Anything else is an error. */
3485 if (!(entry = find_reloc_table_entry (str)))
3486 {
3487 set_syntax_error (_("unknown relocation modifier"));
3488 return false;
3489 }
3490
3491 if (entry->add_type == 0)
3492 {
3493 set_syntax_error
3494 (_("this relocation modifier is not allowed on this instruction"));
3495 return false;
3496 }
3497
3498 /* Save str before we decompose it. */
3499 p = *str;
3500
3501 /* Next, we parse the expression. */
3502 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3503 REJECT_ABSENT,
3504 aarch64_force_reloc (entry->add_type) == 1))
3505 return false;
3506
3507 /* Record the relocation type (use the ADD variant here). */
3508 inst.reloc.type = entry->add_type;
3509 inst.reloc.pc_rel = entry->pc_rel;
3510
3511 /* If str is empty, we've reached the end, stop here. */
3512 if (**str == '\0')
3513 return true;
3514
3515 /* Otherwise, we have a shifted reloc modifier, so rewind to
3516 recover the variable name and continue parsing for the shifter. */
3517 *str = p;
3518 return parse_shifter_operand_imm (str, operand, mode);
3519 }
3520
3521 return parse_shifter_operand (str, operand, mode);
3522 }
3523
3524 /* Parse all forms of an address expression. Information is written
3525 to *OPERAND and/or inst.reloc.
3526
3527 The A64 instruction set has the following addressing modes:
3528
3529 Offset
3530 [base] // in SIMD ld/st structure
3531 [base{,#0}] // in ld/st exclusive
3532 [base{,#imm}]
3533 [base,Xm{,LSL #imm}]
3534 [base,Xm,SXTX {#imm}]
3535 [base,Wm,(S|U)XTW {#imm}]
3536 Pre-indexed
3537 [base]! // in ldraa/ldrab exclusive
3538 [base,#imm]!
3539 Post-indexed
3540 [base],#imm
3541 [base],Xm // in SIMD ld/st structure
3542 PC-relative (literal)
3543 label
3544 SVE:
3545 [base,#imm,MUL VL]
3546 [base,Zm.D{,LSL #imm}]
3547 [base,Zm.S,(S|U)XTW {#imm}]
3548 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3549 [Zn.S,#imm]
3550 [Zn.D,#imm]
3551 [Zn.S{, Xm}]
3552 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3553 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3554 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3555
3556 (As a convenience, the notation "=immediate" is permitted in conjunction
3557 with the pc-relative literal load instructions to automatically place an
3558 immediate value or symbolic address in a nearby literal pool and generate
3559 a hidden label which references it.)
3560
3561 Upon a successful parsing, the address structure in *OPERAND will be
3562 filled in the following way:
3563
3564 .base_regno = <base>
3565 .offset.is_reg // 1 if the offset is a register
3566 .offset.imm = <imm>
3567 .offset.regno = <Rm>
3568
3569 For different addressing modes defined in the A64 ISA:
3570
3571 Offset
3572 .pcrel=0; .preind=1; .postind=0; .writeback=0
3573 Pre-indexed
3574 .pcrel=0; .preind=1; .postind=0; .writeback=1
3575 Post-indexed
3576 .pcrel=0; .preind=0; .postind=1; .writeback=1
3577 PC-relative (literal)
3578 .pcrel=1; .preind=1; .postind=0; .writeback=0
3579
3580 The shift/extension information, if any, will be stored in .shifter.
3581 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3582 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3583 corresponding register.
3584
3585 BASE_TYPE says which types of base register should be accepted and
3586 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3587 is the type of shifter that is allowed for immediate offsets,
3588 or SHIFTED_NONE if none.
3589
3590 In all other respects, it is the caller's responsibility to check
3591 for addressing modes not supported by the instruction, and to set
3592 inst.reloc.type. */
3593
3594 static bool
3595 parse_address_main (char **str, aarch64_opnd_info *operand,
3596 aarch64_opnd_qualifier_t *base_qualifier,
3597 aarch64_opnd_qualifier_t *offset_qualifier,
3598 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3599 enum parse_shift_mode imm_shift_mode)
3600 {
3601 char *p = *str;
3602 const reg_entry *reg;
3603 expressionS *exp = &inst.reloc.exp;
3604
3605 *base_qualifier = AARCH64_OPND_QLF_NIL;
3606 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3607 if (! skip_past_char (&p, '['))
3608 {
3609 /* =immediate or label. */
3610 operand->addr.pcrel = 1;
3611 operand->addr.preind = 1;
3612
3613 /* #:<reloc_op>:<symbol> */
3614 skip_past_char (&p, '#');
3615 if (skip_past_char (&p, ':'))
3616 {
3617 bfd_reloc_code_real_type ty;
3618 struct reloc_table_entry *entry;
3619
3620 /* Try to parse a relocation modifier. Anything else is
3621 an error. */
3622 entry = find_reloc_table_entry (&p);
3623 if (! entry)
3624 {
3625 set_syntax_error (_("unknown relocation modifier"));
3626 return false;
3627 }
3628
3629 switch (operand->type)
3630 {
3631 case AARCH64_OPND_ADDR_PCREL21:
3632 /* adr */
3633 ty = entry->adr_type;
3634 break;
3635
3636 default:
3637 ty = entry->ld_literal_type;
3638 break;
3639 }
3640
3641 if (ty == 0)
3642 {
3643 set_syntax_error
3644 (_("this relocation modifier is not allowed on this "
3645 "instruction"));
3646 return false;
3647 }
3648
3649 /* #:<reloc_op>: */
3650 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3651 aarch64_force_reloc (entry->add_type) == 1))
3652 {
3653 set_syntax_error (_("invalid relocation expression"));
3654 return false;
3655 }
3656 /* #:<reloc_op>:<expr> */
3657 /* Record the relocation type. */
3658 inst.reloc.type = ty;
3659 inst.reloc.pc_rel = entry->pc_rel;
3660 }
3661 else
3662 {
3663 if (skip_past_char (&p, '='))
3664 /* =immediate; need to generate the literal in the literal pool. */
3665 inst.gen_lit_pool = 1;
3666
3667 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3668 NORMAL_RESOLUTION))
3669 {
3670 set_syntax_error (_("invalid address"));
3671 return false;
3672 }
3673 }
3674
3675 *str = p;
3676 return true;
3677 }
3678
3679 /* [ */
3680
3681 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3682 if (!reg || !aarch64_check_reg_type (reg, base_type))
3683 {
3684 set_syntax_error (_(get_reg_expected_msg (base_type)));
3685 return false;
3686 }
3687 operand->addr.base_regno = reg->number;
3688
3689 /* [Xn */
3690 if (skip_past_comma (&p))
3691 {
3692 /* [Xn, */
3693 operand->addr.preind = 1;
3694
3695 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3696 if (reg)
3697 {
3698 if (!aarch64_check_reg_type (reg, offset_type))
3699 {
3700 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3701 return false;
3702 }
3703
3704 /* [Xn,Rm */
3705 operand->addr.offset.regno = reg->number;
3706 operand->addr.offset.is_reg = 1;
3707 /* Shifted index. */
3708 if (skip_past_comma (&p))
3709 {
3710 /* [Xn,Rm, */
3711 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3712 /* Use the diagnostics set in parse_shift, so not set new
3713 error message here. */
3714 return false;
3715 }
3716 /* We only accept:
3717 [base,Xm] # For vector plus scalar SVE2 indexing.
3718 [base,Xm{,LSL #imm}]
3719 [base,Xm,SXTX {#imm}]
3720 [base,Wm,(S|U)XTW {#imm}] */
3721 if (operand->shifter.kind == AARCH64_MOD_NONE
3722 || operand->shifter.kind == AARCH64_MOD_LSL
3723 || operand->shifter.kind == AARCH64_MOD_SXTX)
3724 {
3725 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3726 {
3727 set_syntax_error (_("invalid use of 32-bit register offset"));
3728 return false;
3729 }
3730 if (aarch64_get_qualifier_esize (*base_qualifier)
3731 != aarch64_get_qualifier_esize (*offset_qualifier)
3732 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3733 || *base_qualifier != AARCH64_OPND_QLF_S_S
3734 || *offset_qualifier != AARCH64_OPND_QLF_X))
3735 {
3736 set_syntax_error (_("offset has different size from base"));
3737 return false;
3738 }
3739 }
3740 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3741 {
3742 set_syntax_error (_("invalid use of 64-bit register offset"));
3743 return false;
3744 }
3745 }
3746 else
3747 {
3748 /* [Xn,#:<reloc_op>:<symbol> */
3749 skip_past_char (&p, '#');
3750 if (skip_past_char (&p, ':'))
3751 {
3752 struct reloc_table_entry *entry;
3753
3754 /* Try to parse a relocation modifier. Anything else is
3755 an error. */
3756 if (!(entry = find_reloc_table_entry (&p)))
3757 {
3758 set_syntax_error (_("unknown relocation modifier"));
3759 return false;
3760 }
3761
3762 if (entry->ldst_type == 0)
3763 {
3764 set_syntax_error
3765 (_("this relocation modifier is not allowed on this "
3766 "instruction"));
3767 return false;
3768 }
3769
3770 /* [Xn,#:<reloc_op>: */
3771 /* We now have the group relocation table entry corresponding to
3772 the name in the assembler source. Next, we parse the
3773 expression. */
3774 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3775 aarch64_force_reloc (entry->add_type) == 1))
3776 {
3777 set_syntax_error (_("invalid relocation expression"));
3778 return false;
3779 }
3780
3781 /* [Xn,#:<reloc_op>:<expr> */
3782 /* Record the load/store relocation type. */
3783 inst.reloc.type = entry->ldst_type;
3784 inst.reloc.pc_rel = entry->pc_rel;
3785 }
3786 else
3787 {
3788 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3789 NORMAL_RESOLUTION))
3790 {
3791 set_syntax_error (_("invalid expression in the address"));
3792 return false;
3793 }
3794 /* [Xn,<expr> */
3795 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3796 /* [Xn,<expr>,<shifter> */
3797 if (! parse_shift (&p, operand, imm_shift_mode))
3798 return false;
3799 }
3800 }
3801 }
3802
3803 if (! skip_past_char (&p, ']'))
3804 {
3805 set_syntax_error (_("']' expected"));
3806 return false;
3807 }
3808
3809 if (skip_past_char (&p, '!'))
3810 {
3811 if (operand->addr.preind && operand->addr.offset.is_reg)
3812 {
3813 set_syntax_error (_("register offset not allowed in pre-indexed "
3814 "addressing mode"));
3815 return false;
3816 }
3817 /* [Xn]! */
3818 operand->addr.writeback = 1;
3819 }
3820 else if (skip_past_comma (&p))
3821 {
3822 /* [Xn], */
3823 operand->addr.postind = 1;
3824 operand->addr.writeback = 1;
3825
3826 if (operand->addr.preind)
3827 {
3828 set_syntax_error (_("cannot combine pre- and post-indexing"));
3829 return false;
3830 }
3831
3832 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3833 if (reg)
3834 {
3835 /* [Xn],Xm */
3836 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3837 {
3838 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3839 return false;
3840 }
3841
3842 operand->addr.offset.regno = reg->number;
3843 operand->addr.offset.is_reg = 1;
3844 }
3845 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3846 NORMAL_RESOLUTION))
3847 {
3848 /* [Xn],#expr */
3849 set_syntax_error (_("invalid expression in the address"));
3850 return false;
3851 }
3852 }
3853
3854 /* If at this point neither .preind nor .postind is set, we have a
3855 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3856 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3857 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3858 [Zn.<T>, xzr]. */
3859 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3860 {
3861 if (operand->addr.writeback)
3862 {
3863 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3864 {
3865 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3866 operand->addr.offset.is_reg = 0;
3867 operand->addr.offset.imm = 0;
3868 operand->addr.preind = 1;
3869 }
3870 else
3871 {
3872 /* Reject [Rn]! */
3873 set_syntax_error (_("missing offset in the pre-indexed address"));
3874 return false;
3875 }
3876 }
3877 else
3878 {
3879 operand->addr.preind = 1;
3880 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3881 {
3882 operand->addr.offset.is_reg = 1;
3883 operand->addr.offset.regno = REG_ZR;
3884 *offset_qualifier = AARCH64_OPND_QLF_X;
3885 }
3886 else
3887 {
3888 inst.reloc.exp.X_op = O_constant;
3889 inst.reloc.exp.X_add_number = 0;
3890 }
3891 }
3892 }
3893
3894 *str = p;
3895 return true;
3896 }
3897
3898 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3899 on success. */
3900 static bool
3901 parse_address (char **str, aarch64_opnd_info *operand)
3902 {
3903 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3904 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3905 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3906 }
3907
3908 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3909 The arguments have the same meaning as for parse_address_main.
3910 Return TRUE on success. */
3911 static bool
3912 parse_sve_address (char **str, aarch64_opnd_info *operand,
3913 aarch64_opnd_qualifier_t *base_qualifier,
3914 aarch64_opnd_qualifier_t *offset_qualifier)
3915 {
3916 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3917 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3918 SHIFTED_MUL_VL);
3919 }
3920
3921 /* Parse a register X0-X30. The register must be 64-bit and register 31
3922 is unallocated. */
3923 static bool
3924 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
3925 {
3926 const reg_entry *reg = parse_reg (str);
3927 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
3928 {
3929 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3930 return false;
3931 }
3932 operand->reg.regno = reg->number;
3933 operand->qualifier = AARCH64_OPND_QLF_X;
3934 return true;
3935 }
3936
3937 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3938 Return TRUE on success; otherwise return FALSE. */
3939 static bool
3940 parse_half (char **str, int *internal_fixup_p)
3941 {
3942 char *p = *str;
3943
3944 skip_past_char (&p, '#');
3945
3946 gas_assert (internal_fixup_p);
3947 *internal_fixup_p = 0;
3948
3949 if (*p == ':')
3950 {
3951 struct reloc_table_entry *entry;
3952
3953 /* Try to parse a relocation. Anything else is an error. */
3954 ++p;
3955
3956 if (!(entry = find_reloc_table_entry (&p)))
3957 {
3958 set_syntax_error (_("unknown relocation modifier"));
3959 return false;
3960 }
3961
3962 if (entry->movw_type == 0)
3963 {
3964 set_syntax_error
3965 (_("this relocation modifier is not allowed on this instruction"));
3966 return false;
3967 }
3968
3969 inst.reloc.type = entry->movw_type;
3970 }
3971 else
3972 *internal_fixup_p = 1;
3973
3974 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3975 aarch64_force_reloc (inst.reloc.type) == 1))
3976 return false;
3977
3978 *str = p;
3979 return true;
3980 }
3981
3982 /* Parse an operand for an ADRP instruction:
3983 ADRP <Xd>, <label>
3984 Return TRUE on success; otherwise return FALSE. */
3985
3986 static bool
3987 parse_adrp (char **str)
3988 {
3989 char *p;
3990
3991 p = *str;
3992 if (*p == ':')
3993 {
3994 struct reloc_table_entry *entry;
3995
3996 /* Try to parse a relocation. Anything else is an error. */
3997 ++p;
3998 if (!(entry = find_reloc_table_entry (&p)))
3999 {
4000 set_syntax_error (_("unknown relocation modifier"));
4001 return false;
4002 }
4003
4004 if (entry->adrp_type == 0)
4005 {
4006 set_syntax_error
4007 (_("this relocation modifier is not allowed on this instruction"));
4008 return false;
4009 }
4010
4011 inst.reloc.type = entry->adrp_type;
4012 }
4013 else
4014 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4015
4016 inst.reloc.pc_rel = 1;
4017 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
4018 aarch64_force_reloc (inst.reloc.type) == 1))
4019 return false;
4020 *str = p;
4021 return true;
4022 }
4023
4024 /* Miscellaneous. */
4025
4026 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4027 of SIZE tokens in which index I gives the token for field value I,
4028 or is null if field value I is invalid. REG_TYPE says which register
4029 names should be treated as registers rather than as symbolic immediates.
4030
4031 Return true on success, moving *STR past the operand and storing the
4032 field value in *VAL. */
4033
4034 static int
4035 parse_enum_string (char **str, int64_t *val, const char *const *array,
4036 size_t size, aarch64_reg_type reg_type)
4037 {
4038 expressionS exp;
4039 char *p, *q;
4040 size_t i;
4041
4042 /* Match C-like tokens. */
4043 p = q = *str;
4044 while (ISALNUM (*q))
4045 q++;
4046
4047 for (i = 0; i < size; ++i)
4048 if (array[i]
4049 && strncasecmp (array[i], p, q - p) == 0
4050 && array[i][q - p] == 0)
4051 {
4052 *val = i;
4053 *str = q;
4054 return true;
4055 }
4056
4057 if (!parse_immediate_expression (&p, &exp, reg_type))
4058 return false;
4059
4060 if (exp.X_op == O_constant
4061 && (uint64_t) exp.X_add_number < size)
4062 {
4063 *val = exp.X_add_number;
4064 *str = p;
4065 return true;
4066 }
4067
4068 /* Use the default error for this operand. */
4069 return false;
4070 }
4071
4072 /* Parse an option for a preload instruction. Returns the encoding for the
4073 option, or PARSE_FAIL. */
4074
4075 static int
4076 parse_pldop (char **str)
4077 {
4078 char *p, *q;
4079 const struct aarch64_name_value_pair *o;
4080
4081 p = q = *str;
4082 while (ISALNUM (*q))
4083 q++;
4084
4085 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4086 if (!o)
4087 return PARSE_FAIL;
4088
4089 *str = q;
4090 return o->value;
4091 }
4092
4093 /* Parse an option for a barrier instruction. Returns the encoding for the
4094 option, or PARSE_FAIL. */
4095
4096 static int
4097 parse_barrier (char **str)
4098 {
4099 char *p, *q;
4100 const struct aarch64_name_value_pair *o;
4101
4102 p = q = *str;
4103 while (ISALPHA (*q))
4104 q++;
4105
4106 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4107 if (!o)
4108 return PARSE_FAIL;
4109
4110 *str = q;
4111 return o->value;
4112 }
4113
4114 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4115 return 0 if successful. Otherwise return PARSE_FAIL. */
4116
4117 static int
4118 parse_barrier_psb (char **str,
4119 const struct aarch64_name_value_pair ** hint_opt)
4120 {
4121 char *p, *q;
4122 const struct aarch64_name_value_pair *o;
4123
4124 p = q = *str;
4125 while (ISALPHA (*q))
4126 q++;
4127
4128 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4129 if (!o)
4130 {
4131 set_fatal_syntax_error
4132 ( _("unknown or missing option to PSB/TSB"));
4133 return PARSE_FAIL;
4134 }
4135
4136 if (o->value != 0x11)
4137 {
4138 /* PSB only accepts option name 'CSYNC'. */
4139 set_syntax_error
4140 (_("the specified option is not accepted for PSB/TSB"));
4141 return PARSE_FAIL;
4142 }
4143
4144 *str = q;
4145 *hint_opt = o;
4146 return 0;
4147 }
4148
4149 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4150 return 0 if successful. Otherwise return PARSE_FAIL. */
4151
4152 static int
4153 parse_bti_operand (char **str,
4154 const struct aarch64_name_value_pair ** hint_opt)
4155 {
4156 char *p, *q;
4157 const struct aarch64_name_value_pair *o;
4158
4159 p = q = *str;
4160 while (ISALPHA (*q))
4161 q++;
4162
4163 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4164 if (!o)
4165 {
4166 set_fatal_syntax_error
4167 ( _("unknown option to BTI"));
4168 return PARSE_FAIL;
4169 }
4170
4171 switch (o->value)
4172 {
4173 /* Valid BTI operands. */
4174 case HINT_OPD_C:
4175 case HINT_OPD_J:
4176 case HINT_OPD_JC:
4177 break;
4178
4179 default:
4180 set_syntax_error
4181 (_("unknown option to BTI"));
4182 return PARSE_FAIL;
4183 }
4184
4185 *str = q;
4186 *hint_opt = o;
4187 return 0;
4188 }
4189
4190 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4191 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4192 on failure. Format:
4193
4194 REG_TYPE.QUALIFIER
4195
4196 Side effect: Update STR with current parse position of success.
4197 */
4198
4199 static const reg_entry *
4200 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4201 aarch64_opnd_qualifier_t *qualifier)
4202 {
4203 char *q;
4204
4205 reg_entry *reg = parse_reg (str);
4206 if (reg != NULL && reg->type == reg_type)
4207 {
4208 if (!skip_past_char (str, '.'))
4209 {
4210 set_syntax_error (_("missing ZA tile element size separator"));
4211 return NULL;
4212 }
4213
4214 q = *str;
4215 switch (TOLOWER (*q))
4216 {
4217 case 'b':
4218 *qualifier = AARCH64_OPND_QLF_S_B;
4219 break;
4220 case 'h':
4221 *qualifier = AARCH64_OPND_QLF_S_H;
4222 break;
4223 case 's':
4224 *qualifier = AARCH64_OPND_QLF_S_S;
4225 break;
4226 case 'd':
4227 *qualifier = AARCH64_OPND_QLF_S_D;
4228 break;
4229 case 'q':
4230 *qualifier = AARCH64_OPND_QLF_S_Q;
4231 break;
4232 default:
4233 return NULL;
4234 }
4235 q++;
4236
4237 *str = q;
4238 return reg;
4239 }
4240
4241 return NULL;
4242 }
4243
4244 /* Parse SME ZA tile encoded in <ZAda> assembler symbol.
4245 Function return tile QUALIFIER on success.
4246
4247 Tiles are in example format: za[0-9]\.[bhsd]
4248
4249 Function returns <ZAda> register number or PARSE_FAIL.
4250 */
4251 static int
4252 parse_sme_zada_operand (char **str, aarch64_opnd_qualifier_t *qualifier)
4253 {
4254 int regno;
4255 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_ZA, qualifier);
4256
4257 if (reg == NULL)
4258 return PARSE_FAIL;
4259 regno = reg->number;
4260
4261 switch (*qualifier)
4262 {
4263 case AARCH64_OPND_QLF_S_B:
4264 if (regno != 0x00)
4265 {
4266 set_syntax_error (_("invalid ZA tile register number, expected za0"));
4267 return PARSE_FAIL;
4268 }
4269 break;
4270 case AARCH64_OPND_QLF_S_H:
4271 if (regno > 0x01)
4272 {
4273 set_syntax_error (_("invalid ZA tile register number, expected za0-za1"));
4274 return PARSE_FAIL;
4275 }
4276 break;
4277 case AARCH64_OPND_QLF_S_S:
4278 if (regno > 0x03)
4279 {
4280 /* For the 32-bit variant: is the name of the ZA tile ZA0-ZA3. */
4281 set_syntax_error (_("invalid ZA tile register number, expected za0-za3"));
4282 return PARSE_FAIL;
4283 }
4284 break;
4285 case AARCH64_OPND_QLF_S_D:
4286 if (regno > 0x07)
4287 {
4288 /* For the 64-bit variant: is the name of the ZA tile ZA0-ZA7 */
4289 set_syntax_error (_("invalid ZA tile register number, expected za0-za7"));
4290 return PARSE_FAIL;
4291 }
4292 break;
4293 default:
4294 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s and d"));
4295 return PARSE_FAIL;
4296 }
4297
4298 return regno;
4299 }
4300
4301 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4302
4303 #<imm>
4304 <imm>
4305
4306 Function return TRUE if immediate was found, or FALSE.
4307 */
4308 static bool
4309 parse_sme_immediate (char **str, int64_t *imm)
4310 {
4311 int64_t val;
4312 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4313 return false;
4314
4315 *imm = val;
4316 return true;
4317 }
4318
4319 /* Parse index with vector select register and immediate:
4320
4321 [<Wv>, <imm>]
4322 [<Wv>, #<imm>]
4323 where <Wv> is in W12-W15 range and # is optional for immediate.
4324
4325 Function performs extra check for mandatory immediate value if REQUIRE_IMM
4326 is set to true.
4327
4328 On success function returns TRUE and populated VECTOR_SELECT_REGISTER and
4329 IMM output.
4330 */
4331 static bool
4332 parse_sme_za_hv_tiles_operand_index (char **str,
4333 int *vector_select_register,
4334 int64_t *imm)
4335 {
4336 const reg_entry *reg;
4337
4338 if (!skip_past_char (str, '['))
4339 {
4340 set_syntax_error (_("expected '['"));
4341 return false;
4342 }
4343
4344 /* Vector select register W12-W15 encoded in the 2-bit Rv field. */
4345 reg = parse_reg (str);
4346 if (reg == NULL || reg->type != REG_TYPE_R_32
4347 || reg->number < 12 || reg->number > 15)
4348 {
4349 set_syntax_error (_("expected vector select register W12-W15"));
4350 return false;
4351 }
4352 *vector_select_register = reg->number;
4353
4354 if (!skip_past_char (str, ',')) /* Optional index offset immediate. */
4355 {
4356 set_syntax_error (_("expected ','"));
4357 return false;
4358 }
4359
4360 if (!parse_sme_immediate (str, imm))
4361 {
4362 set_syntax_error (_("index offset immediate expected"));
4363 return false;
4364 }
4365
4366 if (!skip_past_char (str, ']'))
4367 {
4368 set_syntax_error (_("expected ']'"));
4369 return false;
4370 }
4371
4372 return true;
4373 }
4374
4375 /* Parse SME ZA horizontal or vertical vector access to tiles.
4376 Function extracts from STR to SLICE_INDICATOR <HV> horizontal (0) or
4377 vertical (1) ZA tile vector orientation. VECTOR_SELECT_REGISTER
4378 contains <Wv> select register and corresponding optional IMMEDIATE.
4379 In addition QUALIFIER is extracted.
4380
4381 Field format examples:
4382
4383 ZA0<HV>.B[<Wv>, #<imm>]
4384 <ZAn><HV>.H[<Wv>, #<imm>]
4385 <ZAn><HV>.S[<Wv>, #<imm>]
4386 <ZAn><HV>.D[<Wv>, #<imm>]
4387 <ZAn><HV>.Q[<Wv>, #<imm>]
4388
4389 Function returns <ZAda> register number or PARSE_FAIL.
4390 */
4391 static int
4392 parse_sme_za_hv_tiles_operand (char **str,
4393 enum sme_hv_slice *slice_indicator,
4394 int *vector_select_register,
4395 int *imm,
4396 aarch64_opnd_qualifier_t *qualifier)
4397 {
4398 char *qh, *qv;
4399 int regno;
4400 int regno_limit;
4401 int64_t imm_limit;
4402 int64_t imm_value;
4403 const reg_entry *reg;
4404
4405 qh = qv = *str;
4406 if ((reg = parse_reg_with_qual (&qh, REG_TYPE_ZAH, qualifier)) != NULL)
4407 {
4408 *slice_indicator = HV_horizontal;
4409 *str = qh;
4410 }
4411 else if ((reg = parse_reg_with_qual (&qv, REG_TYPE_ZAV, qualifier)) != NULL)
4412 {
4413 *slice_indicator = HV_vertical;
4414 *str = qv;
4415 }
4416 else
4417 return PARSE_FAIL;
4418 regno = reg->number;
4419
4420 switch (*qualifier)
4421 {
4422 case AARCH64_OPND_QLF_S_B:
4423 regno_limit = 0;
4424 imm_limit = 15;
4425 break;
4426 case AARCH64_OPND_QLF_S_H:
4427 regno_limit = 1;
4428 imm_limit = 7;
4429 break;
4430 case AARCH64_OPND_QLF_S_S:
4431 regno_limit = 3;
4432 imm_limit = 3;
4433 break;
4434 case AARCH64_OPND_QLF_S_D:
4435 regno_limit = 7;
4436 imm_limit = 1;
4437 break;
4438 case AARCH64_OPND_QLF_S_Q:
4439 regno_limit = 15;
4440 imm_limit = 0;
4441 break;
4442 default:
4443 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s, d and q"));
4444 return PARSE_FAIL;
4445 }
4446
4447 /* Check if destination register ZA tile vector is in range for given
4448 instruction variant. */
4449 if (regno < 0 || regno > regno_limit)
4450 {
4451 set_syntax_error (_("ZA tile vector out of range"));
4452 return PARSE_FAIL;
4453 }
4454
4455 if (!parse_sme_za_hv_tiles_operand_index (str, vector_select_register,
4456 &imm_value))
4457 return PARSE_FAIL;
4458
4459 /* Check if optional index offset is in the range for instruction
4460 variant. */
4461 if (imm_value < 0 || imm_value > imm_limit)
4462 {
4463 set_syntax_error (_("index offset out of range"));
4464 return PARSE_FAIL;
4465 }
4466
4467 *imm = imm_value;
4468
4469 return regno;
4470 }
4471
4472
4473 static int
4474 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4475 enum sme_hv_slice *slice_indicator,
4476 int *vector_select_register,
4477 int *imm,
4478 aarch64_opnd_qualifier_t *qualifier)
4479 {
4480 int regno;
4481
4482 if (!skip_past_char (str, '{'))
4483 {
4484 set_syntax_error (_("expected '{'"));
4485 return PARSE_FAIL;
4486 }
4487
4488 regno = parse_sme_za_hv_tiles_operand (str, slice_indicator,
4489 vector_select_register, imm,
4490 qualifier);
4491
4492 if (regno == PARSE_FAIL)
4493 return PARSE_FAIL;
4494
4495 if (!skip_past_char (str, '}'))
4496 {
4497 set_syntax_error (_("expected '}'"));
4498 return PARSE_FAIL;
4499 }
4500
4501 return regno;
4502 }
4503
4504 /* Parse list of up to eight 64-bit element tile names separated by commas in
4505 SME's ZERO instruction:
4506
4507 ZERO { <mask> }
4508
4509 Function returns <mask>:
4510
4511 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4512 */
4513 static int
4514 parse_sme_zero_mask(char **str)
4515 {
4516 char *q;
4517 int mask;
4518 aarch64_opnd_qualifier_t qualifier;
4519
4520 mask = 0x00;
4521 q = *str;
4522 do
4523 {
4524 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA, &qualifier);
4525 if (reg)
4526 {
4527 int regno = reg->number;
4528 if (qualifier == AARCH64_OPND_QLF_S_B && regno == 0)
4529 {
4530 /* { ZA0.B } is assembled as all-ones immediate. */
4531 mask = 0xff;
4532 }
4533 else if (qualifier == AARCH64_OPND_QLF_S_H && regno < 2)
4534 mask |= 0x55 << regno;
4535 else if (qualifier == AARCH64_OPND_QLF_S_S && regno < 4)
4536 mask |= 0x11 << regno;
4537 else if (qualifier == AARCH64_OPND_QLF_S_D && regno < 8)
4538 mask |= 0x01 << regno;
4539 else
4540 {
4541 set_syntax_error (_("wrong ZA tile element format"));
4542 return PARSE_FAIL;
4543 }
4544 continue;
4545 }
4546 else if (strncasecmp (q, "za", 2) == 0
4547 && !ISALNUM (q[2]))
4548 {
4549 /* { ZA } is assembled as all-ones immediate. */
4550 mask = 0xff;
4551 q += 2;
4552 continue;
4553 }
4554 else
4555 {
4556 set_syntax_error (_("wrong ZA tile element format"));
4557 return PARSE_FAIL;
4558 }
4559 }
4560 while (skip_past_char (&q, ','));
4561
4562 *str = q;
4563 return mask;
4564 }
4565
4566 /* Wraps in curly braces <mask> operand ZERO instruction:
4567
4568 ZERO { <mask> }
4569
4570 Function returns value of <mask> bit-field.
4571 */
4572 static int
4573 parse_sme_list_of_64bit_tiles (char **str)
4574 {
4575 int regno;
4576
4577 if (!skip_past_char (str, '{'))
4578 {
4579 set_syntax_error (_("expected '{'"));
4580 return PARSE_FAIL;
4581 }
4582
4583 /* Empty <mask> list is an all-zeros immediate. */
4584 if (!skip_past_char (str, '}'))
4585 {
4586 regno = parse_sme_zero_mask (str);
4587 if (regno == PARSE_FAIL)
4588 return PARSE_FAIL;
4589
4590 if (!skip_past_char (str, '}'))
4591 {
4592 set_syntax_error (_("expected '}'"));
4593 return PARSE_FAIL;
4594 }
4595 }
4596 else
4597 regno = 0x00;
4598
4599 return regno;
4600 }
4601
4602 /* Parse ZA array operand used in e.g. STR and LDR instruction.
4603 Operand format:
4604
4605 ZA[<Wv>, <imm>]
4606 ZA[<Wv>, #<imm>]
4607
4608 Function returns <Wv> or PARSE_FAIL.
4609 */
4610 static int
4611 parse_sme_za_array (char **str, int *imm)
4612 {
4613 char *p, *q;
4614 int regno;
4615 int64_t imm_value;
4616
4617 p = q = *str;
4618 while (ISALPHA (*q))
4619 q++;
4620
4621 if ((q - p != 2) || strncasecmp ("za", p, q - p) != 0)
4622 {
4623 set_syntax_error (_("expected ZA array"));
4624 return PARSE_FAIL;
4625 }
4626
4627 if (! parse_sme_za_hv_tiles_operand_index (&q, &regno, &imm_value))
4628 return PARSE_FAIL;
4629
4630 if (imm_value < 0 || imm_value > 15)
4631 {
4632 set_syntax_error (_("offset out of range"));
4633 return PARSE_FAIL;
4634 }
4635
4636 *imm = imm_value;
4637 *str = q;
4638 return regno;
4639 }
4640
4641 /* Parse streaming mode operand for SMSTART and SMSTOP.
4642
4643 {SM | ZA}
4644
4645 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4646 */
4647 static int
4648 parse_sme_sm_za (char **str)
4649 {
4650 char *p, *q;
4651
4652 p = q = *str;
4653 while (ISALPHA (*q))
4654 q++;
4655
4656 if ((q - p != 2)
4657 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4658 {
4659 set_syntax_error (_("expected SM or ZA operand"));
4660 return PARSE_FAIL;
4661 }
4662
4663 *str = q;
4664 return TOLOWER (p[0]);
4665 }
4666
4667 /* Parse the name of the source scalable predicate register, the index base
4668 register W12-W15 and the element index. Function performs element index
4669 limit checks as well as qualifier type checks.
4670
4671 <Pn>.<T>[<Wv>, <imm>]
4672 <Pn>.<T>[<Wv>, #<imm>]
4673
4674 On success function sets <Wv> to INDEX_BASE_REG, <T> to QUALIFIER and
4675 <imm> to IMM.
4676 Function returns <Pn>, or PARSE_FAIL.
4677 */
4678 static int
4679 parse_sme_pred_reg_with_index(char **str,
4680 int *index_base_reg,
4681 int *imm,
4682 aarch64_opnd_qualifier_t *qualifier)
4683 {
4684 int regno;
4685 int64_t imm_limit;
4686 int64_t imm_value;
4687 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_PN, qualifier);
4688
4689 if (reg == NULL)
4690 return PARSE_FAIL;
4691 regno = reg->number;
4692
4693 switch (*qualifier)
4694 {
4695 case AARCH64_OPND_QLF_S_B:
4696 imm_limit = 15;
4697 break;
4698 case AARCH64_OPND_QLF_S_H:
4699 imm_limit = 7;
4700 break;
4701 case AARCH64_OPND_QLF_S_S:
4702 imm_limit = 3;
4703 break;
4704 case AARCH64_OPND_QLF_S_D:
4705 imm_limit = 1;
4706 break;
4707 default:
4708 set_syntax_error (_("wrong predicate register element size, allowed b, h, s and d"));
4709 return PARSE_FAIL;
4710 }
4711
4712 if (! parse_sme_za_hv_tiles_operand_index (str, index_base_reg, &imm_value))
4713 return PARSE_FAIL;
4714
4715 if (imm_value < 0 || imm_value > imm_limit)
4716 {
4717 set_syntax_error (_("element index out of range for given variant"));
4718 return PARSE_FAIL;
4719 }
4720
4721 *imm = imm_value;
4722
4723 return regno;
4724 }
4725
4726 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4727 Returns the encoding for the option, or PARSE_FAIL.
4728
4729 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4730 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4731
4732 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4733 field, otherwise as a system register.
4734 */
4735
4736 static int
4737 parse_sys_reg (char **str, htab_t sys_regs,
4738 int imple_defined_p, int pstatefield_p,
4739 uint32_t* flags)
4740 {
4741 char *p, *q;
4742 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4743 const aarch64_sys_reg *o;
4744 int value;
4745
4746 p = buf;
4747 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4748 if (p < buf + (sizeof (buf) - 1))
4749 *p++ = TOLOWER (*q);
4750 *p = '\0';
4751
4752 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4753 valid system register. This is enforced by construction of the hash
4754 table. */
4755 if (p - buf != q - *str)
4756 return PARSE_FAIL;
4757
4758 o = str_hash_find (sys_regs, buf);
4759 if (!o)
4760 {
4761 if (!imple_defined_p)
4762 return PARSE_FAIL;
4763 else
4764 {
4765 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4766 unsigned int op0, op1, cn, cm, op2;
4767
4768 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4769 != 5)
4770 return PARSE_FAIL;
4771 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4772 return PARSE_FAIL;
4773 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4774 if (flags)
4775 *flags = 0;
4776 }
4777 }
4778 else
4779 {
4780 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4781 as_bad (_("selected processor does not support PSTATE field "
4782 "name '%s'"), buf);
4783 if (!pstatefield_p
4784 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4785 o->value, o->flags, o->features))
4786 as_bad (_("selected processor does not support system register "
4787 "name '%s'"), buf);
4788 if (aarch64_sys_reg_deprecated_p (o->flags))
4789 as_warn (_("system register name '%s' is deprecated and may be "
4790 "removed in a future release"), buf);
4791 value = o->value;
4792 if (flags)
4793 *flags = o->flags;
4794 }
4795
4796 *str = q;
4797 return value;
4798 }
4799
4800 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4801 for the option, or NULL. */
4802
4803 static const aarch64_sys_ins_reg *
4804 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4805 {
4806 char *p, *q;
4807 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4808 const aarch64_sys_ins_reg *o;
4809
4810 p = buf;
4811 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4812 if (p < buf + (sizeof (buf) - 1))
4813 *p++ = TOLOWER (*q);
4814 *p = '\0';
4815
4816 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4817 valid system register. This is enforced by construction of the hash
4818 table. */
4819 if (p - buf != q - *str)
4820 return NULL;
4821
4822 o = str_hash_find (sys_ins_regs, buf);
4823 if (!o)
4824 return NULL;
4825
4826 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4827 o->name, o->value, o->flags, 0))
4828 as_bad (_("selected processor does not support system register "
4829 "name '%s'"), buf);
4830 if (aarch64_sys_reg_deprecated_p (o->flags))
4831 as_warn (_("system register name '%s' is deprecated and may be "
4832 "removed in a future release"), buf);
4833
4834 *str = q;
4835 return o;
4836 }
4837 \f
4838 #define po_char_or_fail(chr) do { \
4839 if (! skip_past_char (&str, chr)) \
4840 goto failure; \
4841 } while (0)
4842
4843 #define po_reg_or_fail(regtype) do { \
4844 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4845 if (val == PARSE_FAIL) \
4846 { \
4847 set_default_error (); \
4848 goto failure; \
4849 } \
4850 } while (0)
4851
4852 #define po_int_reg_or_fail(reg_type) do { \
4853 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4854 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4855 { \
4856 set_default_error (); \
4857 goto failure; \
4858 } \
4859 info->reg.regno = reg->number; \
4860 info->qualifier = qualifier; \
4861 } while (0)
4862
4863 #define po_imm_nc_or_fail() do { \
4864 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4865 goto failure; \
4866 } while (0)
4867
4868 #define po_imm_or_fail(min, max) do { \
4869 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4870 goto failure; \
4871 if (val < min || val > max) \
4872 { \
4873 set_fatal_syntax_error (_("immediate value out of range "\
4874 #min " to "#max)); \
4875 goto failure; \
4876 } \
4877 } while (0)
4878
4879 #define po_enum_or_fail(array) do { \
4880 if (!parse_enum_string (&str, &val, array, \
4881 ARRAY_SIZE (array), imm_reg_type)) \
4882 goto failure; \
4883 } while (0)
4884
4885 #define po_misc_or_fail(expr) do { \
4886 if (!expr) \
4887 goto failure; \
4888 } while (0)
4889 \f
4890 /* encode the 12-bit imm field of Add/sub immediate */
4891 static inline uint32_t
4892 encode_addsub_imm (uint32_t imm)
4893 {
4894 return imm << 10;
4895 }
4896
4897 /* encode the shift amount field of Add/sub immediate */
4898 static inline uint32_t
4899 encode_addsub_imm_shift_amount (uint32_t cnt)
4900 {
4901 return cnt << 22;
4902 }
4903
4904
4905 /* encode the imm field of Adr instruction */
4906 static inline uint32_t
4907 encode_adr_imm (uint32_t imm)
4908 {
4909 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4910 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4911 }
4912
4913 /* encode the immediate field of Move wide immediate */
4914 static inline uint32_t
4915 encode_movw_imm (uint32_t imm)
4916 {
4917 return imm << 5;
4918 }
4919
4920 /* encode the 26-bit offset of unconditional branch */
4921 static inline uint32_t
4922 encode_branch_ofs_26 (uint32_t ofs)
4923 {
4924 return ofs & ((1 << 26) - 1);
4925 }
4926
4927 /* encode the 19-bit offset of conditional branch and compare & branch */
4928 static inline uint32_t
4929 encode_cond_branch_ofs_19 (uint32_t ofs)
4930 {
4931 return (ofs & ((1 << 19) - 1)) << 5;
4932 }
4933
4934 /* encode the 19-bit offset of ld literal */
4935 static inline uint32_t
4936 encode_ld_lit_ofs_19 (uint32_t ofs)
4937 {
4938 return (ofs & ((1 << 19) - 1)) << 5;
4939 }
4940
4941 /* Encode the 14-bit offset of test & branch. */
4942 static inline uint32_t
4943 encode_tst_branch_ofs_14 (uint32_t ofs)
4944 {
4945 return (ofs & ((1 << 14) - 1)) << 5;
4946 }
4947
4948 /* Encode the 16-bit imm field of svc/hvc/smc. */
4949 static inline uint32_t
4950 encode_svc_imm (uint32_t imm)
4951 {
4952 return imm << 5;
4953 }
4954
4955 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4956 static inline uint32_t
4957 reencode_addsub_switch_add_sub (uint32_t opcode)
4958 {
4959 return opcode ^ (1 << 30);
4960 }
4961
4962 static inline uint32_t
4963 reencode_movzn_to_movz (uint32_t opcode)
4964 {
4965 return opcode | (1 << 30);
4966 }
4967
4968 static inline uint32_t
4969 reencode_movzn_to_movn (uint32_t opcode)
4970 {
4971 return opcode & ~(1 << 30);
4972 }
4973
4974 /* Overall per-instruction processing. */
4975
4976 /* We need to be able to fix up arbitrary expressions in some statements.
4977 This is so that we can handle symbols that are an arbitrary distance from
4978 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4979 which returns part of an address in a form which will be valid for
4980 a data instruction. We do this by pushing the expression into a symbol
4981 in the expr_section, and creating a fix for that. */
4982
4983 static fixS *
4984 fix_new_aarch64 (fragS * frag,
4985 int where,
4986 short int size,
4987 expressionS * exp,
4988 int pc_rel,
4989 int reloc)
4990 {
4991 fixS *new_fix;
4992
4993 switch (exp->X_op)
4994 {
4995 case O_constant:
4996 case O_symbol:
4997 case O_add:
4998 case O_subtract:
4999 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5000 break;
5001
5002 default:
5003 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5004 pc_rel, reloc);
5005 break;
5006 }
5007 return new_fix;
5008 }
5009 \f
5010 /* Diagnostics on operands errors. */
5011
5012 /* By default, output verbose error message.
5013 Disable the verbose error message by -mno-verbose-error. */
5014 static int verbose_error_p = 1;
5015
5016 #ifdef DEBUG_AARCH64
5017 /* N.B. this is only for the purpose of debugging. */
5018 const char* operand_mismatch_kind_names[] =
5019 {
5020 "AARCH64_OPDE_NIL",
5021 "AARCH64_OPDE_RECOVERABLE",
5022 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5023 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5024 "AARCH64_OPDE_SYNTAX_ERROR",
5025 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5026 "AARCH64_OPDE_INVALID_VARIANT",
5027 "AARCH64_OPDE_OUT_OF_RANGE",
5028 "AARCH64_OPDE_UNALIGNED",
5029 "AARCH64_OPDE_REG_LIST",
5030 "AARCH64_OPDE_OTHER_ERROR",
5031 };
5032 #endif /* DEBUG_AARCH64 */
5033
5034 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5035
5036 When multiple errors of different kinds are found in the same assembly
5037 line, only the error of the highest severity will be picked up for
5038 issuing the diagnostics. */
5039
5040 static inline bool
5041 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5042 enum aarch64_operand_error_kind rhs)
5043 {
5044 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5045 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5046 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5047 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5048 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5049 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5050 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5051 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
5052 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5053 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
5054 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
5055 return lhs > rhs;
5056 }
5057
5058 /* Helper routine to get the mnemonic name from the assembly instruction
5059 line; should only be called for the diagnosis purpose, as there is
5060 string copy operation involved, which may affect the runtime
5061 performance if used in elsewhere. */
5062
5063 static const char*
5064 get_mnemonic_name (const char *str)
5065 {
5066 static char mnemonic[32];
5067 char *ptr;
5068
5069 /* Get the first 15 bytes and assume that the full name is included. */
5070 strncpy (mnemonic, str, 31);
5071 mnemonic[31] = '\0';
5072
5073 /* Scan up to the end of the mnemonic, which must end in white space,
5074 '.', or end of string. */
5075 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5076 ;
5077
5078 *ptr = '\0';
5079
5080 /* Append '...' to the truncated long name. */
5081 if (ptr - mnemonic == 31)
5082 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5083
5084 return mnemonic;
5085 }
5086
5087 static void
5088 reset_aarch64_instruction (aarch64_instruction *instruction)
5089 {
5090 memset (instruction, '\0', sizeof (aarch64_instruction));
5091 instruction->reloc.type = BFD_RELOC_UNUSED;
5092 }
5093
5094 /* Data structures storing one user error in the assembly code related to
5095 operands. */
5096
5097 struct operand_error_record
5098 {
5099 const aarch64_opcode *opcode;
5100 aarch64_operand_error detail;
5101 struct operand_error_record *next;
5102 };
5103
5104 typedef struct operand_error_record operand_error_record;
5105
5106 struct operand_errors
5107 {
5108 operand_error_record *head;
5109 operand_error_record *tail;
5110 };
5111
5112 typedef struct operand_errors operand_errors;
5113
5114 /* Top-level data structure reporting user errors for the current line of
5115 the assembly code.
5116 The way md_assemble works is that all opcodes sharing the same mnemonic
5117 name are iterated to find a match to the assembly line. In this data
5118 structure, each of the such opcodes will have one operand_error_record
5119 allocated and inserted. In other words, excessive errors related with
5120 a single opcode are disregarded. */
5121 operand_errors operand_error_report;
5122
5123 /* Free record nodes. */
5124 static operand_error_record *free_opnd_error_record_nodes = NULL;
5125
5126 /* Initialize the data structure that stores the operand mismatch
5127 information on assembling one line of the assembly code. */
5128 static void
5129 init_operand_error_report (void)
5130 {
5131 if (operand_error_report.head != NULL)
5132 {
5133 gas_assert (operand_error_report.tail != NULL);
5134 operand_error_report.tail->next = free_opnd_error_record_nodes;
5135 free_opnd_error_record_nodes = operand_error_report.head;
5136 operand_error_report.head = NULL;
5137 operand_error_report.tail = NULL;
5138 return;
5139 }
5140 gas_assert (operand_error_report.tail == NULL);
5141 }
5142
5143 /* Return TRUE if some operand error has been recorded during the
5144 parsing of the current assembly line using the opcode *OPCODE;
5145 otherwise return FALSE. */
5146 static inline bool
5147 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5148 {
5149 operand_error_record *record = operand_error_report.head;
5150 return record && record->opcode == opcode;
5151 }
5152
5153 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5154 OPCODE field is initialized with OPCODE.
5155 N.B. only one record for each opcode, i.e. the maximum of one error is
5156 recorded for each instruction template. */
5157
5158 static void
5159 add_operand_error_record (const operand_error_record* new_record)
5160 {
5161 const aarch64_opcode *opcode = new_record->opcode;
5162 operand_error_record* record = operand_error_report.head;
5163
5164 /* The record may have been created for this opcode. If not, we need
5165 to prepare one. */
5166 if (! opcode_has_operand_error_p (opcode))
5167 {
5168 /* Get one empty record. */
5169 if (free_opnd_error_record_nodes == NULL)
5170 {
5171 record = XNEW (operand_error_record);
5172 }
5173 else
5174 {
5175 record = free_opnd_error_record_nodes;
5176 free_opnd_error_record_nodes = record->next;
5177 }
5178 record->opcode = opcode;
5179 /* Insert at the head. */
5180 record->next = operand_error_report.head;
5181 operand_error_report.head = record;
5182 if (operand_error_report.tail == NULL)
5183 operand_error_report.tail = record;
5184 }
5185 else if (record->detail.kind != AARCH64_OPDE_NIL
5186 && record->detail.index <= new_record->detail.index
5187 && operand_error_higher_severity_p (record->detail.kind,
5188 new_record->detail.kind))
5189 {
5190 /* In the case of multiple errors found on operands related with a
5191 single opcode, only record the error of the leftmost operand and
5192 only if the error is of higher severity. */
5193 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5194 " the existing error %s on operand %d",
5195 operand_mismatch_kind_names[new_record->detail.kind],
5196 new_record->detail.index,
5197 operand_mismatch_kind_names[record->detail.kind],
5198 record->detail.index);
5199 return;
5200 }
5201
5202 record->detail = new_record->detail;
5203 }
5204
5205 static inline void
5206 record_operand_error_info (const aarch64_opcode *opcode,
5207 aarch64_operand_error *error_info)
5208 {
5209 operand_error_record record;
5210 record.opcode = opcode;
5211 record.detail = *error_info;
5212 add_operand_error_record (&record);
5213 }
5214
5215 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5216 error message *ERROR, for operand IDX (count from 0). */
5217
5218 static void
5219 record_operand_error (const aarch64_opcode *opcode, int idx,
5220 enum aarch64_operand_error_kind kind,
5221 const char* error)
5222 {
5223 aarch64_operand_error info;
5224 memset(&info, 0, sizeof (info));
5225 info.index = idx;
5226 info.kind = kind;
5227 info.error = error;
5228 info.non_fatal = false;
5229 record_operand_error_info (opcode, &info);
5230 }
5231
5232 static void
5233 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5234 enum aarch64_operand_error_kind kind,
5235 const char* error, const int *extra_data)
5236 {
5237 aarch64_operand_error info;
5238 info.index = idx;
5239 info.kind = kind;
5240 info.error = error;
5241 info.data[0].i = extra_data[0];
5242 info.data[1].i = extra_data[1];
5243 info.data[2].i = extra_data[2];
5244 info.non_fatal = false;
5245 record_operand_error_info (opcode, &info);
5246 }
5247
5248 static void
5249 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5250 const char* error, int lower_bound,
5251 int upper_bound)
5252 {
5253 int data[3] = {lower_bound, upper_bound, 0};
5254 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5255 error, data);
5256 }
5257
5258 /* Remove the operand error record for *OPCODE. */
5259 static void ATTRIBUTE_UNUSED
5260 remove_operand_error_record (const aarch64_opcode *opcode)
5261 {
5262 if (opcode_has_operand_error_p (opcode))
5263 {
5264 operand_error_record* record = operand_error_report.head;
5265 gas_assert (record != NULL && operand_error_report.tail != NULL);
5266 operand_error_report.head = record->next;
5267 record->next = free_opnd_error_record_nodes;
5268 free_opnd_error_record_nodes = record;
5269 if (operand_error_report.head == NULL)
5270 {
5271 gas_assert (operand_error_report.tail == record);
5272 operand_error_report.tail = NULL;
5273 }
5274 }
5275 }
5276
5277 /* Given the instruction in *INSTR, return the index of the best matched
5278 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5279
5280 Return -1 if there is no qualifier sequence; return the first match
5281 if there is multiple matches found. */
5282
5283 static int
5284 find_best_match (const aarch64_inst *instr,
5285 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5286 {
5287 int i, num_opnds, max_num_matched, idx;
5288
5289 num_opnds = aarch64_num_of_operands (instr->opcode);
5290 if (num_opnds == 0)
5291 {
5292 DEBUG_TRACE ("no operand");
5293 return -1;
5294 }
5295
5296 max_num_matched = 0;
5297 idx = 0;
5298
5299 /* For each pattern. */
5300 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5301 {
5302 int j, num_matched;
5303 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5304
5305 /* Most opcodes has much fewer patterns in the list. */
5306 if (empty_qualifier_sequence_p (qualifiers))
5307 {
5308 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5309 break;
5310 }
5311
5312 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5313 if (*qualifiers == instr->operands[j].qualifier)
5314 ++num_matched;
5315
5316 if (num_matched > max_num_matched)
5317 {
5318 max_num_matched = num_matched;
5319 idx = i;
5320 }
5321 }
5322
5323 DEBUG_TRACE ("return with %d", idx);
5324 return idx;
5325 }
5326
5327 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5328 corresponding operands in *INSTR. */
5329
5330 static inline void
5331 assign_qualifier_sequence (aarch64_inst *instr,
5332 const aarch64_opnd_qualifier_t *qualifiers)
5333 {
5334 int i = 0;
5335 int num_opnds = aarch64_num_of_operands (instr->opcode);
5336 gas_assert (num_opnds);
5337 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5338 instr->operands[i].qualifier = *qualifiers;
5339 }
5340
5341 /* Print operands for the diagnosis purpose. */
5342
5343 static void
5344 print_operands (char *buf, const aarch64_opcode *opcode,
5345 const aarch64_opnd_info *opnds)
5346 {
5347 int i;
5348
5349 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5350 {
5351 char str[128];
5352
5353 /* We regard the opcode operand info more, however we also look into
5354 the inst->operands to support the disassembling of the optional
5355 operand.
5356 The two operand code should be the same in all cases, apart from
5357 when the operand can be optional. */
5358 if (opcode->operands[i] == AARCH64_OPND_NIL
5359 || opnds[i].type == AARCH64_OPND_NIL)
5360 break;
5361
5362 /* Generate the operand string in STR. */
5363 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5364 NULL, cpu_variant);
5365
5366 /* Delimiter. */
5367 if (str[0] != '\0')
5368 strcat (buf, i == 0 ? " " : ", ");
5369
5370 /* Append the operand string. */
5371 strcat (buf, str);
5372 }
5373 }
5374
5375 /* Send to stderr a string as information. */
5376
5377 static void
5378 output_info (const char *format, ...)
5379 {
5380 const char *file;
5381 unsigned int line;
5382 va_list args;
5383
5384 file = as_where (&line);
5385 if (file)
5386 {
5387 if (line != 0)
5388 fprintf (stderr, "%s:%u: ", file, line);
5389 else
5390 fprintf (stderr, "%s: ", file);
5391 }
5392 fprintf (stderr, _("Info: "));
5393 va_start (args, format);
5394 vfprintf (stderr, format, args);
5395 va_end (args);
5396 (void) putc ('\n', stderr);
5397 }
5398
5399 /* Output one operand error record. */
5400
5401 static void
5402 output_operand_error_record (const operand_error_record *record, char *str)
5403 {
5404 const aarch64_operand_error *detail = &record->detail;
5405 int idx = detail->index;
5406 const aarch64_opcode *opcode = record->opcode;
5407 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5408 : AARCH64_OPND_NIL);
5409
5410 typedef void (*handler_t)(const char *format, ...);
5411 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5412
5413 switch (detail->kind)
5414 {
5415 case AARCH64_OPDE_NIL:
5416 gas_assert (0);
5417 break;
5418
5419 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5420 handler (_("this `%s' should have an immediately preceding `%s'"
5421 " -- `%s'"),
5422 detail->data[0].s, detail->data[1].s, str);
5423 break;
5424
5425 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5426 handler (_("the preceding `%s' should be followed by `%s` rather"
5427 " than `%s` -- `%s'"),
5428 detail->data[1].s, detail->data[0].s, opcode->name, str);
5429 break;
5430
5431 case AARCH64_OPDE_SYNTAX_ERROR:
5432 case AARCH64_OPDE_RECOVERABLE:
5433 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5434 case AARCH64_OPDE_OTHER_ERROR:
5435 /* Use the prepared error message if there is, otherwise use the
5436 operand description string to describe the error. */
5437 if (detail->error != NULL)
5438 {
5439 if (idx < 0)
5440 handler (_("%s -- `%s'"), detail->error, str);
5441 else
5442 handler (_("%s at operand %d -- `%s'"),
5443 detail->error, idx + 1, str);
5444 }
5445 else
5446 {
5447 gas_assert (idx >= 0);
5448 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5449 aarch64_get_operand_desc (opd_code), str);
5450 }
5451 break;
5452
5453 case AARCH64_OPDE_INVALID_VARIANT:
5454 handler (_("operand mismatch -- `%s'"), str);
5455 if (verbose_error_p)
5456 {
5457 /* We will try to correct the erroneous instruction and also provide
5458 more information e.g. all other valid variants.
5459
5460 The string representation of the corrected instruction and other
5461 valid variants are generated by
5462
5463 1) obtaining the intermediate representation of the erroneous
5464 instruction;
5465 2) manipulating the IR, e.g. replacing the operand qualifier;
5466 3) printing out the instruction by calling the printer functions
5467 shared with the disassembler.
5468
5469 The limitation of this method is that the exact input assembly
5470 line cannot be accurately reproduced in some cases, for example an
5471 optional operand present in the actual assembly line will be
5472 omitted in the output; likewise for the optional syntax rules,
5473 e.g. the # before the immediate. Another limitation is that the
5474 assembly symbols and relocation operations in the assembly line
5475 currently cannot be printed out in the error report. Last but not
5476 least, when there is other error(s) co-exist with this error, the
5477 'corrected' instruction may be still incorrect, e.g. given
5478 'ldnp h0,h1,[x0,#6]!'
5479 this diagnosis will provide the version:
5480 'ldnp s0,s1,[x0,#6]!'
5481 which is still not right. */
5482 size_t len = strlen (get_mnemonic_name (str));
5483 int i, qlf_idx;
5484 bool result;
5485 char buf[2048];
5486 aarch64_inst *inst_base = &inst.base;
5487 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5488
5489 /* Init inst. */
5490 reset_aarch64_instruction (&inst);
5491 inst_base->opcode = opcode;
5492
5493 /* Reset the error report so that there is no side effect on the
5494 following operand parsing. */
5495 init_operand_error_report ();
5496
5497 /* Fill inst. */
5498 result = parse_operands (str + len, opcode)
5499 && programmer_friendly_fixup (&inst);
5500 gas_assert (result);
5501 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5502 NULL, NULL, insn_sequence);
5503 gas_assert (!result);
5504
5505 /* Find the most matched qualifier sequence. */
5506 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5507 gas_assert (qlf_idx > -1);
5508
5509 /* Assign the qualifiers. */
5510 assign_qualifier_sequence (inst_base,
5511 opcode->qualifiers_list[qlf_idx]);
5512
5513 /* Print the hint. */
5514 output_info (_(" did you mean this?"));
5515 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5516 print_operands (buf, opcode, inst_base->operands);
5517 output_info (_(" %s"), buf);
5518
5519 /* Print out other variant(s) if there is any. */
5520 if (qlf_idx != 0 ||
5521 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5522 output_info (_(" other valid variant(s):"));
5523
5524 /* For each pattern. */
5525 qualifiers_list = opcode->qualifiers_list;
5526 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5527 {
5528 /* Most opcodes has much fewer patterns in the list.
5529 First NIL qualifier indicates the end in the list. */
5530 if (empty_qualifier_sequence_p (*qualifiers_list))
5531 break;
5532
5533 if (i != qlf_idx)
5534 {
5535 /* Mnemonics name. */
5536 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5537
5538 /* Assign the qualifiers. */
5539 assign_qualifier_sequence (inst_base, *qualifiers_list);
5540
5541 /* Print instruction. */
5542 print_operands (buf, opcode, inst_base->operands);
5543
5544 output_info (_(" %s"), buf);
5545 }
5546 }
5547 }
5548 break;
5549
5550 case AARCH64_OPDE_UNTIED_IMMS:
5551 handler (_("operand %d must have the same immediate value "
5552 "as operand 1 -- `%s'"),
5553 detail->index + 1, str);
5554 break;
5555
5556 case AARCH64_OPDE_UNTIED_OPERAND:
5557 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5558 detail->index + 1, str);
5559 break;
5560
5561 case AARCH64_OPDE_OUT_OF_RANGE:
5562 if (detail->data[0].i != detail->data[1].i)
5563 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5564 detail->error ? detail->error : _("immediate value"),
5565 detail->data[0].i, detail->data[1].i, idx + 1, str);
5566 else
5567 handler (_("%s must be %d at operand %d -- `%s'"),
5568 detail->error ? detail->error : _("immediate value"),
5569 detail->data[0].i, idx + 1, str);
5570 break;
5571
5572 case AARCH64_OPDE_REG_LIST:
5573 if (detail->data[0].i == 1)
5574 handler (_("invalid number of registers in the list; "
5575 "only 1 register is expected at operand %d -- `%s'"),
5576 idx + 1, str);
5577 else
5578 handler (_("invalid number of registers in the list; "
5579 "%d registers are expected at operand %d -- `%s'"),
5580 detail->data[0].i, idx + 1, str);
5581 break;
5582
5583 case AARCH64_OPDE_UNALIGNED:
5584 handler (_("immediate value must be a multiple of "
5585 "%d at operand %d -- `%s'"),
5586 detail->data[0].i, idx + 1, str);
5587 break;
5588
5589 default:
5590 gas_assert (0);
5591 break;
5592 }
5593 }
5594
5595 /* Process and output the error message about the operand mismatching.
5596
5597 When this function is called, the operand error information had
5598 been collected for an assembly line and there will be multiple
5599 errors in the case of multiple instruction templates; output the
5600 error message that most closely describes the problem.
5601
5602 The errors to be printed can be filtered on printing all errors
5603 or only non-fatal errors. This distinction has to be made because
5604 the error buffer may already be filled with fatal errors we don't want to
5605 print due to the different instruction templates. */
5606
5607 static void
5608 output_operand_error_report (char *str, bool non_fatal_only)
5609 {
5610 int largest_error_pos;
5611 const char *msg = NULL;
5612 enum aarch64_operand_error_kind kind;
5613 operand_error_record *curr;
5614 operand_error_record *head = operand_error_report.head;
5615 operand_error_record *record = NULL;
5616
5617 /* No error to report. */
5618 if (head == NULL)
5619 return;
5620
5621 gas_assert (head != NULL && operand_error_report.tail != NULL);
5622
5623 /* Only one error. */
5624 if (head == operand_error_report.tail)
5625 {
5626 /* If the only error is a non-fatal one and we don't want to print it,
5627 just exit. */
5628 if (!non_fatal_only || head->detail.non_fatal)
5629 {
5630 DEBUG_TRACE ("single opcode entry with error kind: %s",
5631 operand_mismatch_kind_names[head->detail.kind]);
5632 output_operand_error_record (head, str);
5633 }
5634 return;
5635 }
5636
5637 /* Find the error kind of the highest severity. */
5638 DEBUG_TRACE ("multiple opcode entries with error kind");
5639 kind = AARCH64_OPDE_NIL;
5640 for (curr = head; curr != NULL; curr = curr->next)
5641 {
5642 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5643 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5644 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5645 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5646 kind = curr->detail.kind;
5647 }
5648
5649 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5650
5651 /* Pick up one of errors of KIND to report. */
5652 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5653 for (curr = head; curr != NULL; curr = curr->next)
5654 {
5655 /* If we don't want to print non-fatal errors then don't consider them
5656 at all. */
5657 if (curr->detail.kind != kind
5658 || (non_fatal_only && !curr->detail.non_fatal))
5659 continue;
5660 /* If there are multiple errors, pick up the one with the highest
5661 mismatching operand index. In the case of multiple errors with
5662 the equally highest operand index, pick up the first one or the
5663 first one with non-NULL error message. */
5664 if (curr->detail.index > largest_error_pos
5665 || (curr->detail.index == largest_error_pos && msg == NULL
5666 && curr->detail.error != NULL))
5667 {
5668 largest_error_pos = curr->detail.index;
5669 record = curr;
5670 msg = record->detail.error;
5671 }
5672 }
5673
5674 /* The way errors are collected in the back-end is a bit non-intuitive. But
5675 essentially, because each operand template is tried recursively you may
5676 always have errors collected from the previous tried OPND. These are
5677 usually skipped if there is one successful match. However now with the
5678 non-fatal errors we have to ignore those previously collected hard errors
5679 when we're only interested in printing the non-fatal ones. This condition
5680 prevents us from printing errors that are not appropriate, since we did
5681 match a condition, but it also has warnings that it wants to print. */
5682 if (non_fatal_only && !record)
5683 return;
5684
5685 gas_assert (largest_error_pos != -2 && record != NULL);
5686 DEBUG_TRACE ("Pick up error kind %s to report",
5687 operand_mismatch_kind_names[record->detail.kind]);
5688
5689 /* Output. */
5690 output_operand_error_record (record, str);
5691 }
5692 \f
5693 /* Write an AARCH64 instruction to buf - always little-endian. */
5694 static void
5695 put_aarch64_insn (char *buf, uint32_t insn)
5696 {
5697 unsigned char *where = (unsigned char *) buf;
5698 where[0] = insn;
5699 where[1] = insn >> 8;
5700 where[2] = insn >> 16;
5701 where[3] = insn >> 24;
5702 }
5703
5704 static uint32_t
5705 get_aarch64_insn (char *buf)
5706 {
5707 unsigned char *where = (unsigned char *) buf;
5708 uint32_t result;
5709 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5710 | ((uint32_t) where[3] << 24)));
5711 return result;
5712 }
5713
5714 static void
5715 output_inst (struct aarch64_inst *new_inst)
5716 {
5717 char *to = NULL;
5718
5719 to = frag_more (INSN_SIZE);
5720
5721 frag_now->tc_frag_data.recorded = 1;
5722
5723 put_aarch64_insn (to, inst.base.value);
5724
5725 if (inst.reloc.type != BFD_RELOC_UNUSED)
5726 {
5727 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5728 INSN_SIZE, &inst.reloc.exp,
5729 inst.reloc.pc_rel,
5730 inst.reloc.type);
5731 DEBUG_TRACE ("Prepared relocation fix up");
5732 /* Don't check the addend value against the instruction size,
5733 that's the job of our code in md_apply_fix(). */
5734 fixp->fx_no_overflow = 1;
5735 if (new_inst != NULL)
5736 fixp->tc_fix_data.inst = new_inst;
5737 if (aarch64_gas_internal_fixup_p ())
5738 {
5739 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5740 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5741 fixp->fx_addnumber = inst.reloc.flags;
5742 }
5743 }
5744
5745 dwarf2_emit_insn (INSN_SIZE);
5746 }
5747
5748 /* Link together opcodes of the same name. */
5749
5750 struct templates
5751 {
5752 const aarch64_opcode *opcode;
5753 struct templates *next;
5754 };
5755
5756 typedef struct templates templates;
5757
5758 static templates *
5759 lookup_mnemonic (const char *start, int len)
5760 {
5761 templates *templ = NULL;
5762
5763 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5764 return templ;
5765 }
5766
5767 /* Subroutine of md_assemble, responsible for looking up the primary
5768 opcode from the mnemonic the user wrote. BASE points to the beginning
5769 of the mnemonic, DOT points to the first '.' within the mnemonic
5770 (if any) and END points to the end of the mnemonic. */
5771
5772 static templates *
5773 opcode_lookup (char *base, char *dot, char *end)
5774 {
5775 const aarch64_cond *cond;
5776 char condname[16];
5777 int len;
5778
5779 if (dot == end)
5780 return 0;
5781
5782 inst.cond = COND_ALWAYS;
5783
5784 /* Handle a possible condition. */
5785 if (dot)
5786 {
5787 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5788 if (!cond)
5789 return 0;
5790 inst.cond = cond->value;
5791 len = dot - base;
5792 }
5793 else
5794 len = end - base;
5795
5796 if (inst.cond == COND_ALWAYS)
5797 {
5798 /* Look for unaffixed mnemonic. */
5799 return lookup_mnemonic (base, len);
5800 }
5801 else if (len <= 13)
5802 {
5803 /* append ".c" to mnemonic if conditional */
5804 memcpy (condname, base, len);
5805 memcpy (condname + len, ".c", 2);
5806 base = condname;
5807 len += 2;
5808 return lookup_mnemonic (base, len);
5809 }
5810
5811 return NULL;
5812 }
5813
5814 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5815 to a corresponding operand qualifier. */
5816
5817 static inline aarch64_opnd_qualifier_t
5818 vectype_to_qualifier (const struct vector_type_el *vectype)
5819 {
5820 /* Element size in bytes indexed by vector_el_type. */
5821 const unsigned char ele_size[5]
5822 = {1, 2, 4, 8, 16};
5823 const unsigned int ele_base [5] =
5824 {
5825 AARCH64_OPND_QLF_V_4B,
5826 AARCH64_OPND_QLF_V_2H,
5827 AARCH64_OPND_QLF_V_2S,
5828 AARCH64_OPND_QLF_V_1D,
5829 AARCH64_OPND_QLF_V_1Q
5830 };
5831
5832 if (!vectype->defined || vectype->type == NT_invtype)
5833 goto vectype_conversion_fail;
5834
5835 if (vectype->type == NT_zero)
5836 return AARCH64_OPND_QLF_P_Z;
5837 if (vectype->type == NT_merge)
5838 return AARCH64_OPND_QLF_P_M;
5839
5840 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5841
5842 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5843 {
5844 /* Special case S_4B. */
5845 if (vectype->type == NT_b && vectype->width == 4)
5846 return AARCH64_OPND_QLF_S_4B;
5847
5848 /* Special case S_2H. */
5849 if (vectype->type == NT_h && vectype->width == 2)
5850 return AARCH64_OPND_QLF_S_2H;
5851
5852 /* Vector element register. */
5853 return AARCH64_OPND_QLF_S_B + vectype->type;
5854 }
5855 else
5856 {
5857 /* Vector register. */
5858 int reg_size = ele_size[vectype->type] * vectype->width;
5859 unsigned offset;
5860 unsigned shift;
5861 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5862 goto vectype_conversion_fail;
5863
5864 /* The conversion is by calculating the offset from the base operand
5865 qualifier for the vector type. The operand qualifiers are regular
5866 enough that the offset can established by shifting the vector width by
5867 a vector-type dependent amount. */
5868 shift = 0;
5869 if (vectype->type == NT_b)
5870 shift = 3;
5871 else if (vectype->type == NT_h || vectype->type == NT_s)
5872 shift = 2;
5873 else if (vectype->type >= NT_d)
5874 shift = 1;
5875 else
5876 gas_assert (0);
5877
5878 offset = ele_base [vectype->type] + (vectype->width >> shift);
5879 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5880 && offset <= AARCH64_OPND_QLF_V_1Q);
5881 return offset;
5882 }
5883
5884 vectype_conversion_fail:
5885 first_error (_("bad vector arrangement type"));
5886 return AARCH64_OPND_QLF_NIL;
5887 }
5888
5889 /* Process an optional operand that is found omitted from the assembly line.
5890 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5891 instruction's opcode entry while IDX is the index of this omitted operand.
5892 */
5893
5894 static void
5895 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5896 int idx, aarch64_opnd_info *operand)
5897 {
5898 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5899 gas_assert (optional_operand_p (opcode, idx));
5900 gas_assert (!operand->present);
5901
5902 switch (type)
5903 {
5904 case AARCH64_OPND_Rd:
5905 case AARCH64_OPND_Rn:
5906 case AARCH64_OPND_Rm:
5907 case AARCH64_OPND_Rt:
5908 case AARCH64_OPND_Rt2:
5909 case AARCH64_OPND_Rt_LS64:
5910 case AARCH64_OPND_Rt_SP:
5911 case AARCH64_OPND_Rs:
5912 case AARCH64_OPND_Ra:
5913 case AARCH64_OPND_Rt_SYS:
5914 case AARCH64_OPND_Rd_SP:
5915 case AARCH64_OPND_Rn_SP:
5916 case AARCH64_OPND_Rm_SP:
5917 case AARCH64_OPND_Fd:
5918 case AARCH64_OPND_Fn:
5919 case AARCH64_OPND_Fm:
5920 case AARCH64_OPND_Fa:
5921 case AARCH64_OPND_Ft:
5922 case AARCH64_OPND_Ft2:
5923 case AARCH64_OPND_Sd:
5924 case AARCH64_OPND_Sn:
5925 case AARCH64_OPND_Sm:
5926 case AARCH64_OPND_Va:
5927 case AARCH64_OPND_Vd:
5928 case AARCH64_OPND_Vn:
5929 case AARCH64_OPND_Vm:
5930 case AARCH64_OPND_VdD1:
5931 case AARCH64_OPND_VnD1:
5932 operand->reg.regno = default_value;
5933 break;
5934
5935 case AARCH64_OPND_Ed:
5936 case AARCH64_OPND_En:
5937 case AARCH64_OPND_Em:
5938 case AARCH64_OPND_Em16:
5939 case AARCH64_OPND_SM3_IMM2:
5940 operand->reglane.regno = default_value;
5941 break;
5942
5943 case AARCH64_OPND_IDX:
5944 case AARCH64_OPND_BIT_NUM:
5945 case AARCH64_OPND_IMMR:
5946 case AARCH64_OPND_IMMS:
5947 case AARCH64_OPND_SHLL_IMM:
5948 case AARCH64_OPND_IMM_VLSL:
5949 case AARCH64_OPND_IMM_VLSR:
5950 case AARCH64_OPND_CCMP_IMM:
5951 case AARCH64_OPND_FBITS:
5952 case AARCH64_OPND_UIMM4:
5953 case AARCH64_OPND_UIMM3_OP1:
5954 case AARCH64_OPND_UIMM3_OP2:
5955 case AARCH64_OPND_IMM:
5956 case AARCH64_OPND_IMM_2:
5957 case AARCH64_OPND_WIDTH:
5958 case AARCH64_OPND_UIMM7:
5959 case AARCH64_OPND_NZCV:
5960 case AARCH64_OPND_SVE_PATTERN:
5961 case AARCH64_OPND_SVE_PRFOP:
5962 operand->imm.value = default_value;
5963 break;
5964
5965 case AARCH64_OPND_SVE_PATTERN_SCALED:
5966 operand->imm.value = default_value;
5967 operand->shifter.kind = AARCH64_MOD_MUL;
5968 operand->shifter.amount = 1;
5969 break;
5970
5971 case AARCH64_OPND_EXCEPTION:
5972 inst.reloc.type = BFD_RELOC_UNUSED;
5973 break;
5974
5975 case AARCH64_OPND_BARRIER_ISB:
5976 operand->barrier = aarch64_barrier_options + default_value;
5977 break;
5978
5979 case AARCH64_OPND_BTI_TARGET:
5980 operand->hint_option = aarch64_hint_options + default_value;
5981 break;
5982
5983 default:
5984 break;
5985 }
5986 }
5987
5988 /* Process the relocation type for move wide instructions.
5989 Return TRUE on success; otherwise return FALSE. */
5990
5991 static bool
5992 process_movw_reloc_info (void)
5993 {
5994 int is32;
5995 unsigned shift;
5996
5997 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5998
5999 if (inst.base.opcode->op == OP_MOVK)
6000 switch (inst.reloc.type)
6001 {
6002 case BFD_RELOC_AARCH64_MOVW_G0_S:
6003 case BFD_RELOC_AARCH64_MOVW_G1_S:
6004 case BFD_RELOC_AARCH64_MOVW_G2_S:
6005 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6006 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6007 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6008 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6009 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6010 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6011 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6012 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6013 set_syntax_error
6014 (_("the specified relocation type is not allowed for MOVK"));
6015 return false;
6016 default:
6017 break;
6018 }
6019
6020 switch (inst.reloc.type)
6021 {
6022 case BFD_RELOC_AARCH64_MOVW_G0:
6023 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6024 case BFD_RELOC_AARCH64_MOVW_G0_S:
6025 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6026 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6027 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6028 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6029 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6030 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6031 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6032 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6033 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6034 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6035 shift = 0;
6036 break;
6037 case BFD_RELOC_AARCH64_MOVW_G1:
6038 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6039 case BFD_RELOC_AARCH64_MOVW_G1_S:
6040 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6041 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6042 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6043 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6044 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6045 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6046 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6047 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6048 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6049 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6050 shift = 16;
6051 break;
6052 case BFD_RELOC_AARCH64_MOVW_G2:
6053 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6054 case BFD_RELOC_AARCH64_MOVW_G2_S:
6055 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6056 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6057 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6058 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6059 if (is32)
6060 {
6061 set_fatal_syntax_error
6062 (_("the specified relocation type is not allowed for 32-bit "
6063 "register"));
6064 return false;
6065 }
6066 shift = 32;
6067 break;
6068 case BFD_RELOC_AARCH64_MOVW_G3:
6069 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6070 if (is32)
6071 {
6072 set_fatal_syntax_error
6073 (_("the specified relocation type is not allowed for 32-bit "
6074 "register"));
6075 return false;
6076 }
6077 shift = 48;
6078 break;
6079 default:
6080 /* More cases should be added when more MOVW-related relocation types
6081 are supported in GAS. */
6082 gas_assert (aarch64_gas_internal_fixup_p ());
6083 /* The shift amount should have already been set by the parser. */
6084 return true;
6085 }
6086 inst.base.operands[1].shifter.amount = shift;
6087 return true;
6088 }
6089
6090 /* A primitive log calculator. */
6091
6092 static inline unsigned int
6093 get_logsz (unsigned int size)
6094 {
6095 const unsigned char ls[16] =
6096 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6097 if (size > 16)
6098 {
6099 gas_assert (0);
6100 return -1;
6101 }
6102 gas_assert (ls[size - 1] != (unsigned char)-1);
6103 return ls[size - 1];
6104 }
6105
6106 /* Determine and return the real reloc type code for an instruction
6107 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6108
6109 static inline bfd_reloc_code_real_type
6110 ldst_lo12_determine_real_reloc_type (void)
6111 {
6112 unsigned logsz, max_logsz;
6113 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6114 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6115
6116 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6117 {
6118 BFD_RELOC_AARCH64_LDST8_LO12,
6119 BFD_RELOC_AARCH64_LDST16_LO12,
6120 BFD_RELOC_AARCH64_LDST32_LO12,
6121 BFD_RELOC_AARCH64_LDST64_LO12,
6122 BFD_RELOC_AARCH64_LDST128_LO12
6123 },
6124 {
6125 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6126 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6127 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6128 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6129 BFD_RELOC_AARCH64_NONE
6130 },
6131 {
6132 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6133 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6134 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6135 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6136 BFD_RELOC_AARCH64_NONE
6137 },
6138 {
6139 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6140 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6141 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6142 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6143 BFD_RELOC_AARCH64_NONE
6144 },
6145 {
6146 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6147 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6148 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6149 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6150 BFD_RELOC_AARCH64_NONE
6151 }
6152 };
6153
6154 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6155 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6156 || (inst.reloc.type
6157 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6158 || (inst.reloc.type
6159 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6160 || (inst.reloc.type
6161 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6162 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6163
6164 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6165 opd1_qlf =
6166 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6167 1, opd0_qlf, 0);
6168 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6169
6170 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6171
6172 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6173 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6174 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6175 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6176 max_logsz = 3;
6177 else
6178 max_logsz = 4;
6179
6180 if (logsz > max_logsz)
6181 {
6182 /* SEE PR 27904 for an example of this. */
6183 set_fatal_syntax_error
6184 (_("relocation qualifier does not match instruction size"));
6185 return BFD_RELOC_AARCH64_NONE;
6186 }
6187
6188 /* In reloc.c, these pseudo relocation types should be defined in similar
6189 order as above reloc_ldst_lo12 array. Because the array index calculation
6190 below relies on this. */
6191 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6192 }
6193
6194 /* Check whether a register list REGINFO is valid. The registers must be
6195 numbered in increasing order (modulo 32), in increments of one or two.
6196
6197 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6198 increments of two.
6199
6200 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6201
6202 static bool
6203 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6204 {
6205 uint32_t i, nb_regs, prev_regno, incr;
6206
6207 nb_regs = 1 + (reginfo & 0x3);
6208 reginfo >>= 2;
6209 prev_regno = reginfo & 0x1f;
6210 incr = accept_alternate ? 2 : 1;
6211
6212 for (i = 1; i < nb_regs; ++i)
6213 {
6214 uint32_t curr_regno;
6215 reginfo >>= 5;
6216 curr_regno = reginfo & 0x1f;
6217 if (curr_regno != ((prev_regno + incr) & 0x1f))
6218 return false;
6219 prev_regno = curr_regno;
6220 }
6221
6222 return true;
6223 }
6224
6225 /* Generic instruction operand parser. This does no encoding and no
6226 semantic validation; it merely squirrels values away in the inst
6227 structure. Returns TRUE or FALSE depending on whether the
6228 specified grammar matched. */
6229
6230 static bool
6231 parse_operands (char *str, const aarch64_opcode *opcode)
6232 {
6233 int i;
6234 char *backtrack_pos = 0;
6235 const enum aarch64_opnd *operands = opcode->operands;
6236 aarch64_reg_type imm_reg_type;
6237
6238 clear_error ();
6239 skip_whitespace (str);
6240
6241 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
6242 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6243 else
6244 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6245
6246 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6247 {
6248 int64_t val;
6249 const reg_entry *reg;
6250 int comma_skipped_p = 0;
6251 aarch64_reg_type rtype;
6252 struct vector_type_el vectype;
6253 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6254 aarch64_opnd_info *info = &inst.base.operands[i];
6255 aarch64_reg_type reg_type;
6256
6257 DEBUG_TRACE ("parse operand %d", i);
6258
6259 /* Assign the operand code. */
6260 info->type = operands[i];
6261
6262 if (optional_operand_p (opcode, i))
6263 {
6264 /* Remember where we are in case we need to backtrack. */
6265 gas_assert (!backtrack_pos);
6266 backtrack_pos = str;
6267 }
6268
6269 /* Expect comma between operands; the backtrack mechanism will take
6270 care of cases of omitted optional operand. */
6271 if (i > 0 && ! skip_past_char (&str, ','))
6272 {
6273 set_syntax_error (_("comma expected between operands"));
6274 goto failure;
6275 }
6276 else
6277 comma_skipped_p = 1;
6278
6279 switch (operands[i])
6280 {
6281 case AARCH64_OPND_Rd:
6282 case AARCH64_OPND_Rn:
6283 case AARCH64_OPND_Rm:
6284 case AARCH64_OPND_Rt:
6285 case AARCH64_OPND_Rt2:
6286 case AARCH64_OPND_Rs:
6287 case AARCH64_OPND_Ra:
6288 case AARCH64_OPND_Rt_LS64:
6289 case AARCH64_OPND_Rt_SYS:
6290 case AARCH64_OPND_PAIRREG:
6291 case AARCH64_OPND_SVE_Rm:
6292 po_int_reg_or_fail (REG_TYPE_R_Z);
6293
6294 /* In LS64 load/store instructions Rt register number must be even
6295 and <=22. */
6296 if (operands[i] == AARCH64_OPND_Rt_LS64)
6297 {
6298 /* We've already checked if this is valid register.
6299 This will check if register number (Rt) is not undefined for LS64
6300 instructions:
6301 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6302 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6303 {
6304 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6305 goto failure;
6306 }
6307 }
6308 break;
6309
6310 case AARCH64_OPND_Rd_SP:
6311 case AARCH64_OPND_Rn_SP:
6312 case AARCH64_OPND_Rt_SP:
6313 case AARCH64_OPND_SVE_Rn_SP:
6314 case AARCH64_OPND_Rm_SP:
6315 po_int_reg_or_fail (REG_TYPE_R_SP);
6316 break;
6317
6318 case AARCH64_OPND_Rm_EXT:
6319 case AARCH64_OPND_Rm_SFT:
6320 po_misc_or_fail (parse_shifter_operand
6321 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6322 ? SHIFTED_ARITH_IMM
6323 : SHIFTED_LOGIC_IMM)));
6324 if (!info->shifter.operator_present)
6325 {
6326 /* Default to LSL if not present. Libopcodes prefers shifter
6327 kind to be explicit. */
6328 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6329 info->shifter.kind = AARCH64_MOD_LSL;
6330 /* For Rm_EXT, libopcodes will carry out further check on whether
6331 or not stack pointer is used in the instruction (Recall that
6332 "the extend operator is not optional unless at least one of
6333 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6334 }
6335 break;
6336
6337 case AARCH64_OPND_Fd:
6338 case AARCH64_OPND_Fn:
6339 case AARCH64_OPND_Fm:
6340 case AARCH64_OPND_Fa:
6341 case AARCH64_OPND_Ft:
6342 case AARCH64_OPND_Ft2:
6343 case AARCH64_OPND_Sd:
6344 case AARCH64_OPND_Sn:
6345 case AARCH64_OPND_Sm:
6346 case AARCH64_OPND_SVE_VZn:
6347 case AARCH64_OPND_SVE_Vd:
6348 case AARCH64_OPND_SVE_Vm:
6349 case AARCH64_OPND_SVE_Vn:
6350 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
6351 if (val == PARSE_FAIL)
6352 {
6353 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6354 goto failure;
6355 }
6356 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
6357
6358 info->reg.regno = val;
6359 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
6360 break;
6361
6362 case AARCH64_OPND_SVE_Pd:
6363 case AARCH64_OPND_SVE_Pg3:
6364 case AARCH64_OPND_SVE_Pg4_5:
6365 case AARCH64_OPND_SVE_Pg4_10:
6366 case AARCH64_OPND_SVE_Pg4_16:
6367 case AARCH64_OPND_SVE_Pm:
6368 case AARCH64_OPND_SVE_Pn:
6369 case AARCH64_OPND_SVE_Pt:
6370 case AARCH64_OPND_SME_Pm:
6371 reg_type = REG_TYPE_PN;
6372 goto vector_reg;
6373
6374 case AARCH64_OPND_SVE_Za_5:
6375 case AARCH64_OPND_SVE_Za_16:
6376 case AARCH64_OPND_SVE_Zd:
6377 case AARCH64_OPND_SVE_Zm_5:
6378 case AARCH64_OPND_SVE_Zm_16:
6379 case AARCH64_OPND_SVE_Zn:
6380 case AARCH64_OPND_SVE_Zt:
6381 reg_type = REG_TYPE_ZN;
6382 goto vector_reg;
6383
6384 case AARCH64_OPND_Va:
6385 case AARCH64_OPND_Vd:
6386 case AARCH64_OPND_Vn:
6387 case AARCH64_OPND_Vm:
6388 reg_type = REG_TYPE_VN;
6389 vector_reg:
6390 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6391 if (val == PARSE_FAIL)
6392 {
6393 first_error (_(get_reg_expected_msg (reg_type)));
6394 goto failure;
6395 }
6396 if (vectype.defined & NTA_HASINDEX)
6397 goto failure;
6398
6399 info->reg.regno = val;
6400 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6401 && vectype.type == NT_invtype)
6402 /* Unqualified Pn and Zn registers are allowed in certain
6403 contexts. Rely on F_STRICT qualifier checking to catch
6404 invalid uses. */
6405 info->qualifier = AARCH64_OPND_QLF_NIL;
6406 else
6407 {
6408 info->qualifier = vectype_to_qualifier (&vectype);
6409 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6410 goto failure;
6411 }
6412 break;
6413
6414 case AARCH64_OPND_VdD1:
6415 case AARCH64_OPND_VnD1:
6416 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
6417 if (val == PARSE_FAIL)
6418 {
6419 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6420 goto failure;
6421 }
6422 if (vectype.type != NT_d || vectype.index != 1)
6423 {
6424 set_fatal_syntax_error
6425 (_("the top half of a 128-bit FP/SIMD register is expected"));
6426 goto failure;
6427 }
6428 info->reg.regno = val;
6429 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6430 here; it is correct for the purpose of encoding/decoding since
6431 only the register number is explicitly encoded in the related
6432 instructions, although this appears a bit hacky. */
6433 info->qualifier = AARCH64_OPND_QLF_S_D;
6434 break;
6435
6436 case AARCH64_OPND_SVE_Zm3_INDEX:
6437 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6438 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6439 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6440 case AARCH64_OPND_SVE_Zm4_INDEX:
6441 case AARCH64_OPND_SVE_Zn_INDEX:
6442 reg_type = REG_TYPE_ZN;
6443 goto vector_reg_index;
6444
6445 case AARCH64_OPND_Ed:
6446 case AARCH64_OPND_En:
6447 case AARCH64_OPND_Em:
6448 case AARCH64_OPND_Em16:
6449 case AARCH64_OPND_SM3_IMM2:
6450 reg_type = REG_TYPE_VN;
6451 vector_reg_index:
6452 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6453 if (val == PARSE_FAIL)
6454 {
6455 first_error (_(get_reg_expected_msg (reg_type)));
6456 goto failure;
6457 }
6458 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6459 goto failure;
6460
6461 info->reglane.regno = val;
6462 info->reglane.index = vectype.index;
6463 info->qualifier = vectype_to_qualifier (&vectype);
6464 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6465 goto failure;
6466 break;
6467
6468 case AARCH64_OPND_SVE_ZnxN:
6469 case AARCH64_OPND_SVE_ZtxN:
6470 reg_type = REG_TYPE_ZN;
6471 goto vector_reg_list;
6472
6473 case AARCH64_OPND_LVn:
6474 case AARCH64_OPND_LVt:
6475 case AARCH64_OPND_LVt_AL:
6476 case AARCH64_OPND_LEt:
6477 reg_type = REG_TYPE_VN;
6478 vector_reg_list:
6479 if (reg_type == REG_TYPE_ZN
6480 && get_opcode_dependent_value (opcode) == 1
6481 && *str != '{')
6482 {
6483 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6484 if (val == PARSE_FAIL)
6485 {
6486 first_error (_(get_reg_expected_msg (reg_type)));
6487 goto failure;
6488 }
6489 info->reglist.first_regno = val;
6490 info->reglist.num_regs = 1;
6491 }
6492 else
6493 {
6494 val = parse_vector_reg_list (&str, reg_type, &vectype);
6495 if (val == PARSE_FAIL)
6496 goto failure;
6497
6498 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6499 {
6500 set_fatal_syntax_error (_("invalid register list"));
6501 goto failure;
6502 }
6503
6504 if (vectype.width != 0 && *str != ',')
6505 {
6506 set_fatal_syntax_error
6507 (_("expected element type rather than vector type"));
6508 goto failure;
6509 }
6510
6511 info->reglist.first_regno = (val >> 2) & 0x1f;
6512 info->reglist.num_regs = (val & 0x3) + 1;
6513 }
6514 if (operands[i] == AARCH64_OPND_LEt)
6515 {
6516 if (!(vectype.defined & NTA_HASINDEX))
6517 goto failure;
6518 info->reglist.has_index = 1;
6519 info->reglist.index = vectype.index;
6520 }
6521 else
6522 {
6523 if (vectype.defined & NTA_HASINDEX)
6524 goto failure;
6525 if (!(vectype.defined & NTA_HASTYPE))
6526 {
6527 if (reg_type == REG_TYPE_ZN)
6528 set_fatal_syntax_error (_("missing type suffix"));
6529 goto failure;
6530 }
6531 }
6532 info->qualifier = vectype_to_qualifier (&vectype);
6533 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6534 goto failure;
6535 break;
6536
6537 case AARCH64_OPND_CRn:
6538 case AARCH64_OPND_CRm:
6539 {
6540 char prefix = *(str++);
6541 if (prefix != 'c' && prefix != 'C')
6542 goto failure;
6543
6544 po_imm_nc_or_fail ();
6545 if (val > 15)
6546 {
6547 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6548 goto failure;
6549 }
6550 info->qualifier = AARCH64_OPND_QLF_CR;
6551 info->imm.value = val;
6552 break;
6553 }
6554
6555 case AARCH64_OPND_SHLL_IMM:
6556 case AARCH64_OPND_IMM_VLSR:
6557 po_imm_or_fail (1, 64);
6558 info->imm.value = val;
6559 break;
6560
6561 case AARCH64_OPND_CCMP_IMM:
6562 case AARCH64_OPND_SIMM5:
6563 case AARCH64_OPND_FBITS:
6564 case AARCH64_OPND_TME_UIMM16:
6565 case AARCH64_OPND_UIMM4:
6566 case AARCH64_OPND_UIMM4_ADDG:
6567 case AARCH64_OPND_UIMM10:
6568 case AARCH64_OPND_UIMM3_OP1:
6569 case AARCH64_OPND_UIMM3_OP2:
6570 case AARCH64_OPND_IMM_VLSL:
6571 case AARCH64_OPND_IMM:
6572 case AARCH64_OPND_IMM_2:
6573 case AARCH64_OPND_WIDTH:
6574 case AARCH64_OPND_SVE_INV_LIMM:
6575 case AARCH64_OPND_SVE_LIMM:
6576 case AARCH64_OPND_SVE_LIMM_MOV:
6577 case AARCH64_OPND_SVE_SHLIMM_PRED:
6578 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6579 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6580 case AARCH64_OPND_SVE_SHRIMM_PRED:
6581 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6582 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6583 case AARCH64_OPND_SVE_SIMM5:
6584 case AARCH64_OPND_SVE_SIMM5B:
6585 case AARCH64_OPND_SVE_SIMM6:
6586 case AARCH64_OPND_SVE_SIMM8:
6587 case AARCH64_OPND_SVE_UIMM3:
6588 case AARCH64_OPND_SVE_UIMM7:
6589 case AARCH64_OPND_SVE_UIMM8:
6590 case AARCH64_OPND_SVE_UIMM8_53:
6591 case AARCH64_OPND_IMM_ROT1:
6592 case AARCH64_OPND_IMM_ROT2:
6593 case AARCH64_OPND_IMM_ROT3:
6594 case AARCH64_OPND_SVE_IMM_ROT1:
6595 case AARCH64_OPND_SVE_IMM_ROT2:
6596 case AARCH64_OPND_SVE_IMM_ROT3:
6597 po_imm_nc_or_fail ();
6598 info->imm.value = val;
6599 break;
6600
6601 case AARCH64_OPND_SVE_AIMM:
6602 case AARCH64_OPND_SVE_ASIMM:
6603 po_imm_nc_or_fail ();
6604 info->imm.value = val;
6605 skip_whitespace (str);
6606 if (skip_past_comma (&str))
6607 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6608 else
6609 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6610 break;
6611
6612 case AARCH64_OPND_SVE_PATTERN:
6613 po_enum_or_fail (aarch64_sve_pattern_array);
6614 info->imm.value = val;
6615 break;
6616
6617 case AARCH64_OPND_SVE_PATTERN_SCALED:
6618 po_enum_or_fail (aarch64_sve_pattern_array);
6619 info->imm.value = val;
6620 if (skip_past_comma (&str)
6621 && !parse_shift (&str, info, SHIFTED_MUL))
6622 goto failure;
6623 if (!info->shifter.operator_present)
6624 {
6625 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6626 info->shifter.kind = AARCH64_MOD_MUL;
6627 info->shifter.amount = 1;
6628 }
6629 break;
6630
6631 case AARCH64_OPND_SVE_PRFOP:
6632 po_enum_or_fail (aarch64_sve_prfop_array);
6633 info->imm.value = val;
6634 break;
6635
6636 case AARCH64_OPND_UIMM7:
6637 po_imm_or_fail (0, 127);
6638 info->imm.value = val;
6639 break;
6640
6641 case AARCH64_OPND_IDX:
6642 case AARCH64_OPND_MASK:
6643 case AARCH64_OPND_BIT_NUM:
6644 case AARCH64_OPND_IMMR:
6645 case AARCH64_OPND_IMMS:
6646 po_imm_or_fail (0, 63);
6647 info->imm.value = val;
6648 break;
6649
6650 case AARCH64_OPND_IMM0:
6651 po_imm_nc_or_fail ();
6652 if (val != 0)
6653 {
6654 set_fatal_syntax_error (_("immediate zero expected"));
6655 goto failure;
6656 }
6657 info->imm.value = 0;
6658 break;
6659
6660 case AARCH64_OPND_FPIMM0:
6661 {
6662 int qfloat;
6663 bool res1 = false, res2 = false;
6664 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6665 it is probably not worth the effort to support it. */
6666 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6667 imm_reg_type))
6668 && (error_p ()
6669 || !(res2 = parse_constant_immediate (&str, &val,
6670 imm_reg_type))))
6671 goto failure;
6672 if ((res1 && qfloat == 0) || (res2 && val == 0))
6673 {
6674 info->imm.value = 0;
6675 info->imm.is_fp = 1;
6676 break;
6677 }
6678 set_fatal_syntax_error (_("immediate zero expected"));
6679 goto failure;
6680 }
6681
6682 case AARCH64_OPND_IMM_MOV:
6683 {
6684 char *saved = str;
6685 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6686 reg_name_p (str, REG_TYPE_VN))
6687 goto failure;
6688 str = saved;
6689 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6690 GE_OPT_PREFIX, REJECT_ABSENT,
6691 NORMAL_RESOLUTION));
6692 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6693 later. fix_mov_imm_insn will try to determine a machine
6694 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6695 message if the immediate cannot be moved by a single
6696 instruction. */
6697 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6698 inst.base.operands[i].skip = 1;
6699 }
6700 break;
6701
6702 case AARCH64_OPND_SIMD_IMM:
6703 case AARCH64_OPND_SIMD_IMM_SFT:
6704 if (! parse_big_immediate (&str, &val, imm_reg_type))
6705 goto failure;
6706 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6707 /* addr_off_p */ 0,
6708 /* need_libopcodes_p */ 1,
6709 /* skip_p */ 1);
6710 /* Parse shift.
6711 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6712 shift, we don't check it here; we leave the checking to
6713 the libopcodes (operand_general_constraint_met_p). By
6714 doing this, we achieve better diagnostics. */
6715 if (skip_past_comma (&str)
6716 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6717 goto failure;
6718 if (!info->shifter.operator_present
6719 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6720 {
6721 /* Default to LSL if not present. Libopcodes prefers shifter
6722 kind to be explicit. */
6723 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6724 info->shifter.kind = AARCH64_MOD_LSL;
6725 }
6726 break;
6727
6728 case AARCH64_OPND_FPIMM:
6729 case AARCH64_OPND_SIMD_FPIMM:
6730 case AARCH64_OPND_SVE_FPIMM8:
6731 {
6732 int qfloat;
6733 bool dp_p;
6734
6735 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6736 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6737 || !aarch64_imm_float_p (qfloat))
6738 {
6739 if (!error_p ())
6740 set_fatal_syntax_error (_("invalid floating-point"
6741 " constant"));
6742 goto failure;
6743 }
6744 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6745 inst.base.operands[i].imm.is_fp = 1;
6746 }
6747 break;
6748
6749 case AARCH64_OPND_SVE_I1_HALF_ONE:
6750 case AARCH64_OPND_SVE_I1_HALF_TWO:
6751 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6752 {
6753 int qfloat;
6754 bool dp_p;
6755
6756 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6757 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6758 {
6759 if (!error_p ())
6760 set_fatal_syntax_error (_("invalid floating-point"
6761 " constant"));
6762 goto failure;
6763 }
6764 inst.base.operands[i].imm.value = qfloat;
6765 inst.base.operands[i].imm.is_fp = 1;
6766 }
6767 break;
6768
6769 case AARCH64_OPND_LIMM:
6770 po_misc_or_fail (parse_shifter_operand (&str, info,
6771 SHIFTED_LOGIC_IMM));
6772 if (info->shifter.operator_present)
6773 {
6774 set_fatal_syntax_error
6775 (_("shift not allowed for bitmask immediate"));
6776 goto failure;
6777 }
6778 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6779 /* addr_off_p */ 0,
6780 /* need_libopcodes_p */ 1,
6781 /* skip_p */ 1);
6782 break;
6783
6784 case AARCH64_OPND_AIMM:
6785 if (opcode->op == OP_ADD)
6786 /* ADD may have relocation types. */
6787 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6788 SHIFTED_ARITH_IMM));
6789 else
6790 po_misc_or_fail (parse_shifter_operand (&str, info,
6791 SHIFTED_ARITH_IMM));
6792 switch (inst.reloc.type)
6793 {
6794 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6795 info->shifter.amount = 12;
6796 break;
6797 case BFD_RELOC_UNUSED:
6798 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6799 if (info->shifter.kind != AARCH64_MOD_NONE)
6800 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6801 inst.reloc.pc_rel = 0;
6802 break;
6803 default:
6804 break;
6805 }
6806 info->imm.value = 0;
6807 if (!info->shifter.operator_present)
6808 {
6809 /* Default to LSL if not present. Libopcodes prefers shifter
6810 kind to be explicit. */
6811 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6812 info->shifter.kind = AARCH64_MOD_LSL;
6813 }
6814 break;
6815
6816 case AARCH64_OPND_HALF:
6817 {
6818 /* #<imm16> or relocation. */
6819 int internal_fixup_p;
6820 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6821 if (internal_fixup_p)
6822 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6823 skip_whitespace (str);
6824 if (skip_past_comma (&str))
6825 {
6826 /* {, LSL #<shift>} */
6827 if (! aarch64_gas_internal_fixup_p ())
6828 {
6829 set_fatal_syntax_error (_("can't mix relocation modifier "
6830 "with explicit shift"));
6831 goto failure;
6832 }
6833 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6834 }
6835 else
6836 inst.base.operands[i].shifter.amount = 0;
6837 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6838 inst.base.operands[i].imm.value = 0;
6839 if (! process_movw_reloc_info ())
6840 goto failure;
6841 }
6842 break;
6843
6844 case AARCH64_OPND_EXCEPTION:
6845 case AARCH64_OPND_UNDEFINED:
6846 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6847 imm_reg_type));
6848 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6849 /* addr_off_p */ 0,
6850 /* need_libopcodes_p */ 0,
6851 /* skip_p */ 1);
6852 break;
6853
6854 case AARCH64_OPND_NZCV:
6855 {
6856 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6857 if (nzcv != NULL)
6858 {
6859 str += 4;
6860 info->imm.value = nzcv->value;
6861 break;
6862 }
6863 po_imm_or_fail (0, 15);
6864 info->imm.value = val;
6865 }
6866 break;
6867
6868 case AARCH64_OPND_COND:
6869 case AARCH64_OPND_COND1:
6870 {
6871 char *start = str;
6872 do
6873 str++;
6874 while (ISALPHA (*str));
6875 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6876 if (info->cond == NULL)
6877 {
6878 set_syntax_error (_("invalid condition"));
6879 goto failure;
6880 }
6881 else if (operands[i] == AARCH64_OPND_COND1
6882 && (info->cond->value & 0xe) == 0xe)
6883 {
6884 /* Do not allow AL or NV. */
6885 set_default_error ();
6886 goto failure;
6887 }
6888 }
6889 break;
6890
6891 case AARCH64_OPND_ADDR_ADRP:
6892 po_misc_or_fail (parse_adrp (&str));
6893 /* Clear the value as operand needs to be relocated. */
6894 info->imm.value = 0;
6895 break;
6896
6897 case AARCH64_OPND_ADDR_PCREL14:
6898 case AARCH64_OPND_ADDR_PCREL19:
6899 case AARCH64_OPND_ADDR_PCREL21:
6900 case AARCH64_OPND_ADDR_PCREL26:
6901 po_misc_or_fail (parse_address (&str, info));
6902 if (!info->addr.pcrel)
6903 {
6904 set_syntax_error (_("invalid pc-relative address"));
6905 goto failure;
6906 }
6907 if (inst.gen_lit_pool
6908 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6909 {
6910 /* Only permit "=value" in the literal load instructions.
6911 The literal will be generated by programmer_friendly_fixup. */
6912 set_syntax_error (_("invalid use of \"=immediate\""));
6913 goto failure;
6914 }
6915 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6916 {
6917 set_syntax_error (_("unrecognized relocation suffix"));
6918 goto failure;
6919 }
6920 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6921 {
6922 info->imm.value = inst.reloc.exp.X_add_number;
6923 inst.reloc.type = BFD_RELOC_UNUSED;
6924 }
6925 else
6926 {
6927 info->imm.value = 0;
6928 if (inst.reloc.type == BFD_RELOC_UNUSED)
6929 switch (opcode->iclass)
6930 {
6931 case compbranch:
6932 case condbranch:
6933 /* e.g. CBZ or B.COND */
6934 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6935 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6936 break;
6937 case testbranch:
6938 /* e.g. TBZ */
6939 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6940 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6941 break;
6942 case branch_imm:
6943 /* e.g. B or BL */
6944 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6945 inst.reloc.type =
6946 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6947 : BFD_RELOC_AARCH64_JUMP26;
6948 break;
6949 case loadlit:
6950 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6951 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6952 break;
6953 case pcreladdr:
6954 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6955 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6956 break;
6957 default:
6958 gas_assert (0);
6959 abort ();
6960 }
6961 inst.reloc.pc_rel = 1;
6962 }
6963 break;
6964
6965 case AARCH64_OPND_ADDR_SIMPLE:
6966 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6967 {
6968 /* [<Xn|SP>{, #<simm>}] */
6969 char *start = str;
6970 /* First use the normal address-parsing routines, to get
6971 the usual syntax errors. */
6972 po_misc_or_fail (parse_address (&str, info));
6973 if (info->addr.pcrel || info->addr.offset.is_reg
6974 || !info->addr.preind || info->addr.postind
6975 || info->addr.writeback)
6976 {
6977 set_syntax_error (_("invalid addressing mode"));
6978 goto failure;
6979 }
6980
6981 /* Then retry, matching the specific syntax of these addresses. */
6982 str = start;
6983 po_char_or_fail ('[');
6984 po_reg_or_fail (REG_TYPE_R64_SP);
6985 /* Accept optional ", #0". */
6986 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6987 && skip_past_char (&str, ','))
6988 {
6989 skip_past_char (&str, '#');
6990 if (! skip_past_char (&str, '0'))
6991 {
6992 set_fatal_syntax_error
6993 (_("the optional immediate offset can only be 0"));
6994 goto failure;
6995 }
6996 }
6997 po_char_or_fail (']');
6998 break;
6999 }
7000
7001 case AARCH64_OPND_ADDR_REGOFF:
7002 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7003 po_misc_or_fail (parse_address (&str, info));
7004 regoff_addr:
7005 if (info->addr.pcrel || !info->addr.offset.is_reg
7006 || !info->addr.preind || info->addr.postind
7007 || info->addr.writeback)
7008 {
7009 set_syntax_error (_("invalid addressing mode"));
7010 goto failure;
7011 }
7012 if (!info->shifter.operator_present)
7013 {
7014 /* Default to LSL if not present. Libopcodes prefers shifter
7015 kind to be explicit. */
7016 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7017 info->shifter.kind = AARCH64_MOD_LSL;
7018 }
7019 /* Qualifier to be deduced by libopcodes. */
7020 break;
7021
7022 case AARCH64_OPND_ADDR_SIMM7:
7023 po_misc_or_fail (parse_address (&str, info));
7024 if (info->addr.pcrel || info->addr.offset.is_reg
7025 || (!info->addr.preind && !info->addr.postind))
7026 {
7027 set_syntax_error (_("invalid addressing mode"));
7028 goto failure;
7029 }
7030 if (inst.reloc.type != BFD_RELOC_UNUSED)
7031 {
7032 set_syntax_error (_("relocation not allowed"));
7033 goto failure;
7034 }
7035 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7036 /* addr_off_p */ 1,
7037 /* need_libopcodes_p */ 1,
7038 /* skip_p */ 0);
7039 break;
7040
7041 case AARCH64_OPND_ADDR_SIMM9:
7042 case AARCH64_OPND_ADDR_SIMM9_2:
7043 case AARCH64_OPND_ADDR_SIMM11:
7044 case AARCH64_OPND_ADDR_SIMM13:
7045 po_misc_or_fail (parse_address (&str, info));
7046 if (info->addr.pcrel || info->addr.offset.is_reg
7047 || (!info->addr.preind && !info->addr.postind)
7048 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7049 && info->addr.writeback))
7050 {
7051 set_syntax_error (_("invalid addressing mode"));
7052 goto failure;
7053 }
7054 if (inst.reloc.type != BFD_RELOC_UNUSED)
7055 {
7056 set_syntax_error (_("relocation not allowed"));
7057 goto failure;
7058 }
7059 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7060 /* addr_off_p */ 1,
7061 /* need_libopcodes_p */ 1,
7062 /* skip_p */ 0);
7063 break;
7064
7065 case AARCH64_OPND_ADDR_SIMM10:
7066 case AARCH64_OPND_ADDR_OFFSET:
7067 po_misc_or_fail (parse_address (&str, info));
7068 if (info->addr.pcrel || info->addr.offset.is_reg
7069 || !info->addr.preind || info->addr.postind)
7070 {
7071 set_syntax_error (_("invalid addressing mode"));
7072 goto failure;
7073 }
7074 if (inst.reloc.type != BFD_RELOC_UNUSED)
7075 {
7076 set_syntax_error (_("relocation not allowed"));
7077 goto failure;
7078 }
7079 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7080 /* addr_off_p */ 1,
7081 /* need_libopcodes_p */ 1,
7082 /* skip_p */ 0);
7083 break;
7084
7085 case AARCH64_OPND_ADDR_UIMM12:
7086 po_misc_or_fail (parse_address (&str, info));
7087 if (info->addr.pcrel || info->addr.offset.is_reg
7088 || !info->addr.preind || info->addr.writeback)
7089 {
7090 set_syntax_error (_("invalid addressing mode"));
7091 goto failure;
7092 }
7093 if (inst.reloc.type == BFD_RELOC_UNUSED)
7094 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7095 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7096 || (inst.reloc.type
7097 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7098 || (inst.reloc.type
7099 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7100 || (inst.reloc.type
7101 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7102 || (inst.reloc.type
7103 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7104 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7105 /* Leave qualifier to be determined by libopcodes. */
7106 break;
7107
7108 case AARCH64_OPND_SIMD_ADDR_POST:
7109 /* [<Xn|SP>], <Xm|#<amount>> */
7110 po_misc_or_fail (parse_address (&str, info));
7111 if (!info->addr.postind || !info->addr.writeback)
7112 {
7113 set_syntax_error (_("invalid addressing mode"));
7114 goto failure;
7115 }
7116 if (!info->addr.offset.is_reg)
7117 {
7118 if (inst.reloc.exp.X_op == O_constant)
7119 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7120 else
7121 {
7122 set_fatal_syntax_error
7123 (_("writeback value must be an immediate constant"));
7124 goto failure;
7125 }
7126 }
7127 /* No qualifier. */
7128 break;
7129
7130 case AARCH64_OPND_SME_SM_ZA:
7131 /* { SM | ZA } */
7132 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7133 {
7134 set_syntax_error (_("unknown or missing PSTATE field name"));
7135 goto failure;
7136 }
7137 info->reg.regno = val;
7138 break;
7139
7140 case AARCH64_OPND_SME_PnT_Wm_imm:
7141 /* <Pn>.<T>[<Wm>, #<imm>] */
7142 {
7143 int index_base_reg;
7144 int imm;
7145 val = parse_sme_pred_reg_with_index (&str,
7146 &index_base_reg,
7147 &imm,
7148 &qualifier);
7149 if (val == PARSE_FAIL)
7150 goto failure;
7151
7152 info->za_tile_vector.regno = val;
7153 info->za_tile_vector.index.regno = index_base_reg;
7154 info->za_tile_vector.index.imm = imm;
7155 info->qualifier = qualifier;
7156 break;
7157 }
7158
7159 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7160 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7161 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7162 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7163 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7164 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7165 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7166 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7167 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7168 case AARCH64_OPND_SVE_ADDR_RI_U6:
7169 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7170 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7171 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7172 /* [X<n>{, #imm, MUL VL}]
7173 [X<n>{, #imm}]
7174 but recognizing SVE registers. */
7175 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7176 &offset_qualifier));
7177 if (base_qualifier != AARCH64_OPND_QLF_X)
7178 {
7179 set_syntax_error (_("invalid addressing mode"));
7180 goto failure;
7181 }
7182 sve_regimm:
7183 if (info->addr.pcrel || info->addr.offset.is_reg
7184 || !info->addr.preind || info->addr.writeback)
7185 {
7186 set_syntax_error (_("invalid addressing mode"));
7187 goto failure;
7188 }
7189 if (inst.reloc.type != BFD_RELOC_UNUSED
7190 || inst.reloc.exp.X_op != O_constant)
7191 {
7192 /* Make sure this has priority over
7193 "invalid addressing mode". */
7194 set_fatal_syntax_error (_("constant offset required"));
7195 goto failure;
7196 }
7197 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7198 break;
7199
7200 case AARCH64_OPND_SVE_ADDR_R:
7201 /* [<Xn|SP>{, <R><m>}]
7202 but recognizing SVE registers. */
7203 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7204 &offset_qualifier));
7205 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7206 {
7207 offset_qualifier = AARCH64_OPND_QLF_X;
7208 info->addr.offset.is_reg = 1;
7209 info->addr.offset.regno = 31;
7210 }
7211 else if (base_qualifier != AARCH64_OPND_QLF_X
7212 || offset_qualifier != AARCH64_OPND_QLF_X)
7213 {
7214 set_syntax_error (_("invalid addressing mode"));
7215 goto failure;
7216 }
7217 goto regoff_addr;
7218
7219 case AARCH64_OPND_SVE_ADDR_RR:
7220 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7221 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7222 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7223 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7224 case AARCH64_OPND_SVE_ADDR_RX:
7225 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7226 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7227 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7228 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7229 but recognizing SVE registers. */
7230 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7231 &offset_qualifier));
7232 if (base_qualifier != AARCH64_OPND_QLF_X
7233 || offset_qualifier != AARCH64_OPND_QLF_X)
7234 {
7235 set_syntax_error (_("invalid addressing mode"));
7236 goto failure;
7237 }
7238 goto regoff_addr;
7239
7240 case AARCH64_OPND_SVE_ADDR_RZ:
7241 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7242 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7243 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7244 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7245 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7246 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7247 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7248 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7249 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7250 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7251 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7252 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7253 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7254 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7255 &offset_qualifier));
7256 if (base_qualifier != AARCH64_OPND_QLF_X
7257 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7258 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7259 {
7260 set_syntax_error (_("invalid addressing mode"));
7261 goto failure;
7262 }
7263 info->qualifier = offset_qualifier;
7264 goto regoff_addr;
7265
7266 case AARCH64_OPND_SVE_ADDR_ZX:
7267 /* [Zn.<T>{, <Xm>}]. */
7268 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7269 &offset_qualifier));
7270 /* Things to check:
7271 base_qualifier either S_S or S_D
7272 offset_qualifier must be X
7273 */
7274 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7275 && base_qualifier != AARCH64_OPND_QLF_S_D)
7276 || offset_qualifier != AARCH64_OPND_QLF_X)
7277 {
7278 set_syntax_error (_("invalid addressing mode"));
7279 goto failure;
7280 }
7281 info->qualifier = base_qualifier;
7282 if (!info->addr.offset.is_reg || info->addr.pcrel
7283 || !info->addr.preind || info->addr.writeback
7284 || info->shifter.operator_present != 0)
7285 {
7286 set_syntax_error (_("invalid addressing mode"));
7287 goto failure;
7288 }
7289 info->shifter.kind = AARCH64_MOD_LSL;
7290 break;
7291
7292
7293 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7294 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7295 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7296 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7297 /* [Z<n>.<T>{, #imm}] */
7298 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7299 &offset_qualifier));
7300 if (base_qualifier != AARCH64_OPND_QLF_S_S
7301 && base_qualifier != AARCH64_OPND_QLF_S_D)
7302 {
7303 set_syntax_error (_("invalid addressing mode"));
7304 goto failure;
7305 }
7306 info->qualifier = base_qualifier;
7307 goto sve_regimm;
7308
7309 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7310 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7311 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7312 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7313 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7314
7315 We don't reject:
7316
7317 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7318
7319 here since we get better error messages by leaving it to
7320 the qualifier checking routines. */
7321 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7322 &offset_qualifier));
7323 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7324 && base_qualifier != AARCH64_OPND_QLF_S_D)
7325 || offset_qualifier != base_qualifier)
7326 {
7327 set_syntax_error (_("invalid addressing mode"));
7328 goto failure;
7329 }
7330 info->qualifier = base_qualifier;
7331 goto regoff_addr;
7332
7333 case AARCH64_OPND_SYSREG:
7334 {
7335 uint32_t sysreg_flags;
7336 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7337 &sysreg_flags)) == PARSE_FAIL)
7338 {
7339 set_syntax_error (_("unknown or missing system register name"));
7340 goto failure;
7341 }
7342 inst.base.operands[i].sysreg.value = val;
7343 inst.base.operands[i].sysreg.flags = sysreg_flags;
7344 break;
7345 }
7346
7347 case AARCH64_OPND_PSTATEFIELD:
7348 {
7349 uint32_t sysreg_flags;
7350 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7351 &sysreg_flags)) == PARSE_FAIL)
7352 {
7353 set_syntax_error (_("unknown or missing PSTATE field name"));
7354 goto failure;
7355 }
7356 inst.base.operands[i].pstatefield = val;
7357 inst.base.operands[i].sysreg.flags = sysreg_flags;
7358 break;
7359 }
7360
7361 case AARCH64_OPND_SYSREG_IC:
7362 inst.base.operands[i].sysins_op =
7363 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7364 goto sys_reg_ins;
7365
7366 case AARCH64_OPND_SYSREG_DC:
7367 inst.base.operands[i].sysins_op =
7368 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7369 goto sys_reg_ins;
7370
7371 case AARCH64_OPND_SYSREG_AT:
7372 inst.base.operands[i].sysins_op =
7373 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7374 goto sys_reg_ins;
7375
7376 case AARCH64_OPND_SYSREG_SR:
7377 inst.base.operands[i].sysins_op =
7378 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7379 goto sys_reg_ins;
7380
7381 case AARCH64_OPND_SYSREG_TLBI:
7382 inst.base.operands[i].sysins_op =
7383 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7384 sys_reg_ins:
7385 if (inst.base.operands[i].sysins_op == NULL)
7386 {
7387 set_fatal_syntax_error ( _("unknown or missing operation name"));
7388 goto failure;
7389 }
7390 break;
7391
7392 case AARCH64_OPND_BARRIER:
7393 case AARCH64_OPND_BARRIER_ISB:
7394 val = parse_barrier (&str);
7395 if (val != PARSE_FAIL
7396 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7397 {
7398 /* ISB only accepts options name 'sy'. */
7399 set_syntax_error
7400 (_("the specified option is not accepted in ISB"));
7401 /* Turn off backtrack as this optional operand is present. */
7402 backtrack_pos = 0;
7403 goto failure;
7404 }
7405 if (val != PARSE_FAIL
7406 && operands[i] == AARCH64_OPND_BARRIER)
7407 {
7408 /* Regular barriers accept options CRm (C0-C15).
7409 DSB nXS barrier variant accepts values > 15. */
7410 if (val < 0 || val > 15)
7411 {
7412 set_syntax_error (_("the specified option is not accepted in DSB"));
7413 goto failure;
7414 }
7415 }
7416 /* This is an extension to accept a 0..15 immediate. */
7417 if (val == PARSE_FAIL)
7418 po_imm_or_fail (0, 15);
7419 info->barrier = aarch64_barrier_options + val;
7420 break;
7421
7422 case AARCH64_OPND_BARRIER_DSB_NXS:
7423 val = parse_barrier (&str);
7424 if (val != PARSE_FAIL)
7425 {
7426 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7427 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7428 {
7429 set_syntax_error (_("the specified option is not accepted in DSB"));
7430 /* Turn off backtrack as this optional operand is present. */
7431 backtrack_pos = 0;
7432 goto failure;
7433 }
7434 }
7435 else
7436 {
7437 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7438 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7439 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7440 goto failure;
7441 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7442 {
7443 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7444 goto failure;
7445 }
7446 }
7447 /* Option index is encoded as 2-bit value in val<3:2>. */
7448 val = (val >> 2) - 4;
7449 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7450 break;
7451
7452 case AARCH64_OPND_PRFOP:
7453 val = parse_pldop (&str);
7454 /* This is an extension to accept a 0..31 immediate. */
7455 if (val == PARSE_FAIL)
7456 po_imm_or_fail (0, 31);
7457 inst.base.operands[i].prfop = aarch64_prfops + val;
7458 break;
7459
7460 case AARCH64_OPND_BARRIER_PSB:
7461 val = parse_barrier_psb (&str, &(info->hint_option));
7462 if (val == PARSE_FAIL)
7463 goto failure;
7464 break;
7465
7466 case AARCH64_OPND_BTI_TARGET:
7467 val = parse_bti_operand (&str, &(info->hint_option));
7468 if (val == PARSE_FAIL)
7469 goto failure;
7470 break;
7471
7472 case AARCH64_OPND_SME_ZAda_2b:
7473 case AARCH64_OPND_SME_ZAda_3b:
7474 val = parse_sme_zada_operand (&str, &qualifier);
7475 if (val == PARSE_FAIL)
7476 goto failure;
7477 info->reg.regno = val;
7478 info->qualifier = qualifier;
7479 break;
7480
7481 case AARCH64_OPND_SME_ZA_HV_idx_src:
7482 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7483 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7484 {
7485 enum sme_hv_slice slice_indicator;
7486 int vector_select_register;
7487 int imm;
7488
7489 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr)
7490 val = parse_sme_za_hv_tiles_operand_with_braces (&str,
7491 &slice_indicator,
7492 &vector_select_register,
7493 &imm,
7494 &qualifier);
7495 else
7496 val = parse_sme_za_hv_tiles_operand (&str, &slice_indicator,
7497 &vector_select_register,
7498 &imm,
7499 &qualifier);
7500 if (val == PARSE_FAIL)
7501 goto failure;
7502 info->za_tile_vector.regno = val;
7503 info->za_tile_vector.index.regno = vector_select_register;
7504 info->za_tile_vector.index.imm = imm;
7505 info->za_tile_vector.v = slice_indicator;
7506 info->qualifier = qualifier;
7507 break;
7508 }
7509
7510 case AARCH64_OPND_SME_list_of_64bit_tiles:
7511 val = parse_sme_list_of_64bit_tiles (&str);
7512 if (val == PARSE_FAIL)
7513 goto failure;
7514 info->imm.value = val;
7515 break;
7516
7517 case AARCH64_OPND_SME_ZA_array:
7518 {
7519 int imm;
7520 val = parse_sme_za_array (&str, &imm);
7521 if (val == PARSE_FAIL)
7522 goto failure;
7523 info->za_tile_vector.index.regno = val;
7524 info->za_tile_vector.index.imm = imm;
7525 break;
7526 }
7527
7528 case AARCH64_OPND_MOPS_ADDR_Rd:
7529 case AARCH64_OPND_MOPS_ADDR_Rs:
7530 po_char_or_fail ('[');
7531 if (!parse_x0_to_x30 (&str, info))
7532 goto failure;
7533 po_char_or_fail (']');
7534 po_char_or_fail ('!');
7535 break;
7536
7537 case AARCH64_OPND_MOPS_WB_Rn:
7538 if (!parse_x0_to_x30 (&str, info))
7539 goto failure;
7540 po_char_or_fail ('!');
7541 break;
7542
7543 default:
7544 as_fatal (_("unhandled operand code %d"), operands[i]);
7545 }
7546
7547 /* If we get here, this operand was successfully parsed. */
7548 inst.base.operands[i].present = 1;
7549 continue;
7550
7551 failure:
7552 /* The parse routine should already have set the error, but in case
7553 not, set a default one here. */
7554 if (! error_p ())
7555 set_default_error ();
7556
7557 if (! backtrack_pos)
7558 goto parse_operands_return;
7559
7560 {
7561 /* We reach here because this operand is marked as optional, and
7562 either no operand was supplied or the operand was supplied but it
7563 was syntactically incorrect. In the latter case we report an
7564 error. In the former case we perform a few more checks before
7565 dropping through to the code to insert the default operand. */
7566
7567 char *tmp = backtrack_pos;
7568 char endchar = END_OF_INSN;
7569
7570 if (i != (aarch64_num_of_operands (opcode) - 1))
7571 endchar = ',';
7572 skip_past_char (&tmp, ',');
7573
7574 if (*tmp != endchar)
7575 /* The user has supplied an operand in the wrong format. */
7576 goto parse_operands_return;
7577
7578 /* Make sure there is not a comma before the optional operand.
7579 For example the fifth operand of 'sys' is optional:
7580
7581 sys #0,c0,c0,#0, <--- wrong
7582 sys #0,c0,c0,#0 <--- correct. */
7583 if (comma_skipped_p && i && endchar == END_OF_INSN)
7584 {
7585 set_fatal_syntax_error
7586 (_("unexpected comma before the omitted optional operand"));
7587 goto parse_operands_return;
7588 }
7589 }
7590
7591 /* Reaching here means we are dealing with an optional operand that is
7592 omitted from the assembly line. */
7593 gas_assert (optional_operand_p (opcode, i));
7594 info->present = 0;
7595 process_omitted_operand (operands[i], opcode, i, info);
7596
7597 /* Try again, skipping the optional operand at backtrack_pos. */
7598 str = backtrack_pos;
7599 backtrack_pos = 0;
7600
7601 /* Clear any error record after the omitted optional operand has been
7602 successfully handled. */
7603 clear_error ();
7604 }
7605
7606 /* Check if we have parsed all the operands. */
7607 if (*str != '\0' && ! error_p ())
7608 {
7609 /* Set I to the index of the last present operand; this is
7610 for the purpose of diagnostics. */
7611 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7612 ;
7613 set_fatal_syntax_error
7614 (_("unexpected characters following instruction"));
7615 }
7616
7617 parse_operands_return:
7618
7619 if (error_p ())
7620 {
7621 DEBUG_TRACE ("parsing FAIL: %s - %s",
7622 operand_mismatch_kind_names[get_error_kind ()],
7623 get_error_message ());
7624 /* Record the operand error properly; this is useful when there
7625 are multiple instruction templates for a mnemonic name, so that
7626 later on, we can select the error that most closely describes
7627 the problem. */
7628 record_operand_error (opcode, i, get_error_kind (),
7629 get_error_message ());
7630 return false;
7631 }
7632 else
7633 {
7634 DEBUG_TRACE ("parsing SUCCESS");
7635 return true;
7636 }
7637 }
7638
7639 /* It does some fix-up to provide some programmer friendly feature while
7640 keeping the libopcodes happy, i.e. libopcodes only accepts
7641 the preferred architectural syntax.
7642 Return FALSE if there is any failure; otherwise return TRUE. */
7643
7644 static bool
7645 programmer_friendly_fixup (aarch64_instruction *instr)
7646 {
7647 aarch64_inst *base = &instr->base;
7648 const aarch64_opcode *opcode = base->opcode;
7649 enum aarch64_op op = opcode->op;
7650 aarch64_opnd_info *operands = base->operands;
7651
7652 DEBUG_TRACE ("enter");
7653
7654 switch (opcode->iclass)
7655 {
7656 case testbranch:
7657 /* TBNZ Xn|Wn, #uimm6, label
7658 Test and Branch Not Zero: conditionally jumps to label if bit number
7659 uimm6 in register Xn is not zero. The bit number implies the width of
7660 the register, which may be written and should be disassembled as Wn if
7661 uimm is less than 32. */
7662 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7663 {
7664 if (operands[1].imm.value >= 32)
7665 {
7666 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7667 0, 31);
7668 return false;
7669 }
7670 operands[0].qualifier = AARCH64_OPND_QLF_X;
7671 }
7672 break;
7673 case loadlit:
7674 /* LDR Wt, label | =value
7675 As a convenience assemblers will typically permit the notation
7676 "=value" in conjunction with the pc-relative literal load instructions
7677 to automatically place an immediate value or symbolic address in a
7678 nearby literal pool and generate a hidden label which references it.
7679 ISREG has been set to 0 in the case of =value. */
7680 if (instr->gen_lit_pool
7681 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7682 {
7683 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7684 if (op == OP_LDRSW_LIT)
7685 size = 4;
7686 if (instr->reloc.exp.X_op != O_constant
7687 && instr->reloc.exp.X_op != O_big
7688 && instr->reloc.exp.X_op != O_symbol)
7689 {
7690 record_operand_error (opcode, 1,
7691 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7692 _("constant expression expected"));
7693 return false;
7694 }
7695 if (! add_to_lit_pool (&instr->reloc.exp, size))
7696 {
7697 record_operand_error (opcode, 1,
7698 AARCH64_OPDE_OTHER_ERROR,
7699 _("literal pool insertion failed"));
7700 return false;
7701 }
7702 }
7703 break;
7704 case log_shift:
7705 case bitfield:
7706 /* UXT[BHW] Wd, Wn
7707 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7708 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7709 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7710 A programmer-friendly assembler should accept a destination Xd in
7711 place of Wd, however that is not the preferred form for disassembly.
7712 */
7713 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7714 && operands[1].qualifier == AARCH64_OPND_QLF_W
7715 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7716 operands[0].qualifier = AARCH64_OPND_QLF_W;
7717 break;
7718
7719 case addsub_ext:
7720 {
7721 /* In the 64-bit form, the final register operand is written as Wm
7722 for all but the (possibly omitted) UXTX/LSL and SXTX
7723 operators.
7724 As a programmer-friendly assembler, we accept e.g.
7725 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7726 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7727 int idx = aarch64_operand_index (opcode->operands,
7728 AARCH64_OPND_Rm_EXT);
7729 gas_assert (idx == 1 || idx == 2);
7730 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7731 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7732 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7733 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7734 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7735 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7736 }
7737 break;
7738
7739 default:
7740 break;
7741 }
7742
7743 DEBUG_TRACE ("exit with SUCCESS");
7744 return true;
7745 }
7746
7747 /* Check for loads and stores that will cause unpredictable behavior. */
7748
7749 static void
7750 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7751 {
7752 aarch64_inst *base = &instr->base;
7753 const aarch64_opcode *opcode = base->opcode;
7754 const aarch64_opnd_info *opnds = base->operands;
7755 switch (opcode->iclass)
7756 {
7757 case ldst_pos:
7758 case ldst_imm9:
7759 case ldst_imm10:
7760 case ldst_unscaled:
7761 case ldst_unpriv:
7762 /* Loading/storing the base register is unpredictable if writeback. */
7763 if ((aarch64_get_operand_class (opnds[0].type)
7764 == AARCH64_OPND_CLASS_INT_REG)
7765 && opnds[0].reg.regno == opnds[1].addr.base_regno
7766 && opnds[1].addr.base_regno != REG_SP
7767 /* Exempt STG/STZG/ST2G/STZ2G. */
7768 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7769 && opnds[1].addr.writeback)
7770 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7771 break;
7772
7773 case ldstpair_off:
7774 case ldstnapair_offs:
7775 case ldstpair_indexed:
7776 /* Loading/storing the base register is unpredictable if writeback. */
7777 if ((aarch64_get_operand_class (opnds[0].type)
7778 == AARCH64_OPND_CLASS_INT_REG)
7779 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7780 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7781 && opnds[2].addr.base_regno != REG_SP
7782 /* Exempt STGP. */
7783 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7784 && opnds[2].addr.writeback)
7785 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7786 /* Load operations must load different registers. */
7787 if ((opcode->opcode & (1 << 22))
7788 && opnds[0].reg.regno == opnds[1].reg.regno)
7789 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7790 break;
7791
7792 case ldstexcl:
7793 if ((aarch64_get_operand_class (opnds[0].type)
7794 == AARCH64_OPND_CLASS_INT_REG)
7795 && (aarch64_get_operand_class (opnds[1].type)
7796 == AARCH64_OPND_CLASS_INT_REG))
7797 {
7798 if ((opcode->opcode & (1 << 22)))
7799 {
7800 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7801 if ((opcode->opcode & (1 << 21))
7802 && opnds[0].reg.regno == opnds[1].reg.regno)
7803 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7804 }
7805 else
7806 {
7807 /* Store-Exclusive is unpredictable if Rt == Rs. */
7808 if (opnds[0].reg.regno == opnds[1].reg.regno)
7809 as_warn
7810 (_("unpredictable: identical transfer and status registers"
7811 " --`%s'"),str);
7812
7813 if (opnds[0].reg.regno == opnds[2].reg.regno)
7814 {
7815 if (!(opcode->opcode & (1 << 21)))
7816 /* Store-Exclusive is unpredictable if Rn == Rs. */
7817 as_warn
7818 (_("unpredictable: identical base and status registers"
7819 " --`%s'"),str);
7820 else
7821 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7822 as_warn
7823 (_("unpredictable: "
7824 "identical transfer and status registers"
7825 " --`%s'"),str);
7826 }
7827
7828 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7829 if ((opcode->opcode & (1 << 21))
7830 && opnds[0].reg.regno == opnds[3].reg.regno
7831 && opnds[3].reg.regno != REG_SP)
7832 as_warn (_("unpredictable: identical base and status registers"
7833 " --`%s'"),str);
7834 }
7835 }
7836 break;
7837
7838 default:
7839 break;
7840 }
7841 }
7842
7843 static void
7844 force_automatic_sequence_close (void)
7845 {
7846 struct aarch64_segment_info_type *tc_seg_info;
7847
7848 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7849 if (tc_seg_info->insn_sequence.instr)
7850 {
7851 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7852 _("previous `%s' sequence has not been closed"),
7853 tc_seg_info->insn_sequence.instr->opcode->name);
7854 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7855 }
7856 }
7857
7858 /* A wrapper function to interface with libopcodes on encoding and
7859 record the error message if there is any.
7860
7861 Return TRUE on success; otherwise return FALSE. */
7862
7863 static bool
7864 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7865 aarch64_insn *code)
7866 {
7867 aarch64_operand_error error_info;
7868 memset (&error_info, '\0', sizeof (error_info));
7869 error_info.kind = AARCH64_OPDE_NIL;
7870 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7871 && !error_info.non_fatal)
7872 return true;
7873
7874 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7875 record_operand_error_info (opcode, &error_info);
7876 return error_info.non_fatal;
7877 }
7878
7879 #ifdef DEBUG_AARCH64
7880 static inline void
7881 dump_opcode_operands (const aarch64_opcode *opcode)
7882 {
7883 int i = 0;
7884 while (opcode->operands[i] != AARCH64_OPND_NIL)
7885 {
7886 aarch64_verbose ("\t\t opnd%d: %s", i,
7887 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7888 ? aarch64_get_operand_name (opcode->operands[i])
7889 : aarch64_get_operand_desc (opcode->operands[i]));
7890 ++i;
7891 }
7892 }
7893 #endif /* DEBUG_AARCH64 */
7894
7895 /* This is the guts of the machine-dependent assembler. STR points to a
7896 machine dependent instruction. This function is supposed to emit
7897 the frags/bytes it assembles to. */
7898
7899 void
7900 md_assemble (char *str)
7901 {
7902 templates *template;
7903 const aarch64_opcode *opcode;
7904 struct aarch64_segment_info_type *tc_seg_info;
7905 aarch64_inst *inst_base;
7906 unsigned saved_cond;
7907
7908 /* Align the previous label if needed. */
7909 if (last_label_seen != NULL)
7910 {
7911 symbol_set_frag (last_label_seen, frag_now);
7912 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7913 S_SET_SEGMENT (last_label_seen, now_seg);
7914 }
7915
7916 /* Update the current insn_sequence from the segment. */
7917 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7918 insn_sequence = &tc_seg_info->insn_sequence;
7919 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
7920
7921 inst.reloc.type = BFD_RELOC_UNUSED;
7922
7923 DEBUG_TRACE ("\n\n");
7924 DEBUG_TRACE ("==============================");
7925 DEBUG_TRACE ("Enter md_assemble with %s", str);
7926
7927 /* Scan up to the end of the mnemonic, which must end in whitespace,
7928 '.', or end of string. */
7929 char *p = str;
7930 char *dot = 0;
7931 for (; is_part_of_name (*p); p++)
7932 if (*p == '.' && !dot)
7933 dot = p;
7934
7935 if (p == str)
7936 {
7937 as_bad (_("unknown mnemonic -- `%s'"), str);
7938 return;
7939 }
7940
7941 if (!dot && create_register_alias (str, p))
7942 return;
7943
7944 template = opcode_lookup (str, dot, p);
7945 if (!template)
7946 {
7947 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7948 str);
7949 return;
7950 }
7951
7952 skip_whitespace (p);
7953 if (*p == ',')
7954 {
7955 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7956 get_mnemonic_name (str), str);
7957 return;
7958 }
7959
7960 init_operand_error_report ();
7961
7962 /* Sections are assumed to start aligned. In executable section, there is no
7963 MAP_DATA symbol pending. So we only align the address during
7964 MAP_DATA --> MAP_INSN transition.
7965 For other sections, this is not guaranteed. */
7966 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7967 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7968 frag_align_code (2, 0);
7969
7970 saved_cond = inst.cond;
7971 reset_aarch64_instruction (&inst);
7972 inst.cond = saved_cond;
7973
7974 /* Iterate through all opcode entries with the same mnemonic name. */
7975 do
7976 {
7977 opcode = template->opcode;
7978
7979 DEBUG_TRACE ("opcode %s found", opcode->name);
7980 #ifdef DEBUG_AARCH64
7981 if (debug_dump)
7982 dump_opcode_operands (opcode);
7983 #endif /* DEBUG_AARCH64 */
7984
7985 mapping_state (MAP_INSN);
7986
7987 inst_base = &inst.base;
7988 inst_base->opcode = opcode;
7989
7990 /* Truly conditionally executed instructions, e.g. b.cond. */
7991 if (opcode->flags & F_COND)
7992 {
7993 gas_assert (inst.cond != COND_ALWAYS);
7994 inst_base->cond = get_cond_from_value (inst.cond);
7995 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7996 }
7997 else if (inst.cond != COND_ALWAYS)
7998 {
7999 /* It shouldn't arrive here, where the assembly looks like a
8000 conditional instruction but the found opcode is unconditional. */
8001 gas_assert (0);
8002 continue;
8003 }
8004
8005 if (parse_operands (p, opcode)
8006 && programmer_friendly_fixup (&inst)
8007 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8008 {
8009 /* Check that this instruction is supported for this CPU. */
8010 if (!opcode->avariant
8011 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
8012 {
8013 as_bad (_("selected processor does not support `%s'"), str);
8014 return;
8015 }
8016
8017 warn_unpredictable_ldst (&inst, str);
8018
8019 if (inst.reloc.type == BFD_RELOC_UNUSED
8020 || !inst.reloc.need_libopcodes_p)
8021 output_inst (NULL);
8022 else
8023 {
8024 /* If there is relocation generated for the instruction,
8025 store the instruction information for the future fix-up. */
8026 struct aarch64_inst *copy;
8027 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8028 copy = XNEW (struct aarch64_inst);
8029 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8030 output_inst (copy);
8031 }
8032
8033 /* Issue non-fatal messages if any. */
8034 output_operand_error_report (str, true);
8035 return;
8036 }
8037
8038 template = template->next;
8039 if (template != NULL)
8040 {
8041 reset_aarch64_instruction (&inst);
8042 inst.cond = saved_cond;
8043 }
8044 }
8045 while (template != NULL);
8046
8047 /* Issue the error messages if any. */
8048 output_operand_error_report (str, false);
8049 }
8050
8051 /* Various frobbings of labels and their addresses. */
8052
8053 void
8054 aarch64_start_line_hook (void)
8055 {
8056 last_label_seen = NULL;
8057 }
8058
8059 void
8060 aarch64_frob_label (symbolS * sym)
8061 {
8062 last_label_seen = sym;
8063
8064 dwarf2_emit_label (sym);
8065 }
8066
8067 void
8068 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8069 {
8070 /* Check to see if we have a block to close. */
8071 force_automatic_sequence_close ();
8072 }
8073
8074 int
8075 aarch64_data_in_code (void)
8076 {
8077 if (startswith (input_line_pointer + 1, "data:"))
8078 {
8079 *input_line_pointer = '/';
8080 input_line_pointer += 5;
8081 *input_line_pointer = 0;
8082 return 1;
8083 }
8084
8085 return 0;
8086 }
8087
8088 char *
8089 aarch64_canonicalize_symbol_name (char *name)
8090 {
8091 int len;
8092
8093 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8094 *(name + len - 5) = 0;
8095
8096 return name;
8097 }
8098 \f
8099 /* Table of all register names defined by default. The user can
8100 define additional names with .req. Note that all register names
8101 should appear in both upper and lowercase variants. Some registers
8102 also have mixed-case names. */
8103
8104 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8105 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8106 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8107 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8108 #define REGSET16(p,t) \
8109 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8110 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8111 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8112 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8113 #define REGSET16S(p,s,t) \
8114 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8115 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8116 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8117 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8118 #define REGSET31(p,t) \
8119 REGSET16(p, t), \
8120 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8121 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8122 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8123 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8124 #define REGSET(p,t) \
8125 REGSET31(p,t), REGNUM(p,31,t)
8126
8127 /* These go into aarch64_reg_hsh hash-table. */
8128 static const reg_entry reg_names[] = {
8129 /* Integer registers. */
8130 REGSET31 (x, R_64), REGSET31 (X, R_64),
8131 REGSET31 (w, R_32), REGSET31 (W, R_32),
8132
8133 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8134 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8135 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8136 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8137 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8138 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8139
8140 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8141 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8142
8143 /* Floating-point single precision registers. */
8144 REGSET (s, FP_S), REGSET (S, FP_S),
8145
8146 /* Floating-point double precision registers. */
8147 REGSET (d, FP_D), REGSET (D, FP_D),
8148
8149 /* Floating-point half precision registers. */
8150 REGSET (h, FP_H), REGSET (H, FP_H),
8151
8152 /* Floating-point byte precision registers. */
8153 REGSET (b, FP_B), REGSET (B, FP_B),
8154
8155 /* Floating-point quad precision registers. */
8156 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8157
8158 /* FP/SIMD registers. */
8159 REGSET (v, VN), REGSET (V, VN),
8160
8161 /* SVE vector registers. */
8162 REGSET (z, ZN), REGSET (Z, ZN),
8163
8164 /* SVE predicate registers. */
8165 REGSET16 (p, PN), REGSET16 (P, PN),
8166
8167 /* SME ZA tile registers. */
8168 REGSET16 (za, ZA), REGSET16 (ZA, ZA),
8169
8170 /* SME ZA tile registers (horizontal slice). */
8171 REGSET16S (za, h, ZAH), REGSET16S (ZA, H, ZAH),
8172
8173 /* SME ZA tile registers (vertical slice). */
8174 REGSET16S (za, v, ZAV), REGSET16S (ZA, V, ZAV)
8175 };
8176
8177 #undef REGDEF
8178 #undef REGDEF_ALIAS
8179 #undef REGNUM
8180 #undef REGSET16
8181 #undef REGSET31
8182 #undef REGSET
8183
8184 #define N 1
8185 #define n 0
8186 #define Z 1
8187 #define z 0
8188 #define C 1
8189 #define c 0
8190 #define V 1
8191 #define v 0
8192 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8193 static const asm_nzcv nzcv_names[] = {
8194 {"nzcv", B (n, z, c, v)},
8195 {"nzcV", B (n, z, c, V)},
8196 {"nzCv", B (n, z, C, v)},
8197 {"nzCV", B (n, z, C, V)},
8198 {"nZcv", B (n, Z, c, v)},
8199 {"nZcV", B (n, Z, c, V)},
8200 {"nZCv", B (n, Z, C, v)},
8201 {"nZCV", B (n, Z, C, V)},
8202 {"Nzcv", B (N, z, c, v)},
8203 {"NzcV", B (N, z, c, V)},
8204 {"NzCv", B (N, z, C, v)},
8205 {"NzCV", B (N, z, C, V)},
8206 {"NZcv", B (N, Z, c, v)},
8207 {"NZcV", B (N, Z, c, V)},
8208 {"NZCv", B (N, Z, C, v)},
8209 {"NZCV", B (N, Z, C, V)}
8210 };
8211
8212 #undef N
8213 #undef n
8214 #undef Z
8215 #undef z
8216 #undef C
8217 #undef c
8218 #undef V
8219 #undef v
8220 #undef B
8221 \f
8222 /* MD interface: bits in the object file. */
8223
8224 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8225 for use in the a.out file, and stores them in the array pointed to by buf.
8226 This knows about the endian-ness of the target machine and does
8227 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8228 2 (short) and 4 (long) Floating numbers are put out as a series of
8229 LITTLENUMS (shorts, here at least). */
8230
8231 void
8232 md_number_to_chars (char *buf, valueT val, int n)
8233 {
8234 if (target_big_endian)
8235 number_to_chars_bigendian (buf, val, n);
8236 else
8237 number_to_chars_littleendian (buf, val, n);
8238 }
8239
8240 /* MD interface: Sections. */
8241
8242 /* Estimate the size of a frag before relaxing. Assume everything fits in
8243 4 bytes. */
8244
8245 int
8246 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8247 {
8248 fragp->fr_var = 4;
8249 return 4;
8250 }
8251
8252 /* Round up a section size to the appropriate boundary. */
8253
8254 valueT
8255 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8256 {
8257 return size;
8258 }
8259
8260 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8261 of an rs_align_code fragment.
8262
8263 Here we fill the frag with the appropriate info for padding the
8264 output stream. The resulting frag will consist of a fixed (fr_fix)
8265 and of a repeating (fr_var) part.
8266
8267 The fixed content is always emitted before the repeating content and
8268 these two parts are used as follows in constructing the output:
8269 - the fixed part will be used to align to a valid instruction word
8270 boundary, in case that we start at a misaligned address; as no
8271 executable instruction can live at the misaligned location, we
8272 simply fill with zeros;
8273 - the variable part will be used to cover the remaining padding and
8274 we fill using the AArch64 NOP instruction.
8275
8276 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8277 enough storage space for up to 3 bytes for padding the back to a valid
8278 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8279
8280 void
8281 aarch64_handle_align (fragS * fragP)
8282 {
8283 /* NOP = d503201f */
8284 /* AArch64 instructions are always little-endian. */
8285 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8286
8287 int bytes, fix, noop_size;
8288 char *p;
8289
8290 if (fragP->fr_type != rs_align_code)
8291 return;
8292
8293 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8294 p = fragP->fr_literal + fragP->fr_fix;
8295
8296 #ifdef OBJ_ELF
8297 gas_assert (fragP->tc_frag_data.recorded);
8298 #endif
8299
8300 noop_size = sizeof (aarch64_noop);
8301
8302 fix = bytes & (noop_size - 1);
8303 if (fix)
8304 {
8305 #ifdef OBJ_ELF
8306 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8307 #endif
8308 memset (p, 0, fix);
8309 p += fix;
8310 fragP->fr_fix += fix;
8311 }
8312
8313 if (noop_size)
8314 memcpy (p, aarch64_noop, noop_size);
8315 fragP->fr_var = noop_size;
8316 }
8317
8318 /* Perform target specific initialisation of a frag.
8319 Note - despite the name this initialisation is not done when the frag
8320 is created, but only when its type is assigned. A frag can be created
8321 and used a long time before its type is set, so beware of assuming that
8322 this initialisation is performed first. */
8323
8324 #ifndef OBJ_ELF
8325 void
8326 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8327 int max_chars ATTRIBUTE_UNUSED)
8328 {
8329 }
8330
8331 #else /* OBJ_ELF is defined. */
8332 void
8333 aarch64_init_frag (fragS * fragP, int max_chars)
8334 {
8335 /* Record a mapping symbol for alignment frags. We will delete this
8336 later if the alignment ends up empty. */
8337 if (!fragP->tc_frag_data.recorded)
8338 fragP->tc_frag_data.recorded = 1;
8339
8340 /* PR 21809: Do not set a mapping state for debug sections
8341 - it just confuses other tools. */
8342 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8343 return;
8344
8345 switch (fragP->fr_type)
8346 {
8347 case rs_align_test:
8348 case rs_fill:
8349 mapping_state_2 (MAP_DATA, max_chars);
8350 break;
8351 case rs_align:
8352 /* PR 20364: We can get alignment frags in code sections,
8353 so do not just assume that we should use the MAP_DATA state. */
8354 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8355 break;
8356 case rs_align_code:
8357 mapping_state_2 (MAP_INSN, max_chars);
8358 break;
8359 default:
8360 break;
8361 }
8362 }
8363 \f
8364 /* Initialize the DWARF-2 unwind information for this procedure. */
8365
8366 void
8367 tc_aarch64_frame_initial_instructions (void)
8368 {
8369 cfi_add_CFA_def_cfa (REG_SP, 0);
8370 }
8371 #endif /* OBJ_ELF */
8372
8373 /* Convert REGNAME to a DWARF-2 register number. */
8374
8375 int
8376 tc_aarch64_regname_to_dw2regnum (char *regname)
8377 {
8378 const reg_entry *reg = parse_reg (&regname);
8379 if (reg == NULL)
8380 return -1;
8381
8382 switch (reg->type)
8383 {
8384 case REG_TYPE_SP_32:
8385 case REG_TYPE_SP_64:
8386 case REG_TYPE_R_32:
8387 case REG_TYPE_R_64:
8388 return reg->number;
8389
8390 case REG_TYPE_FP_B:
8391 case REG_TYPE_FP_H:
8392 case REG_TYPE_FP_S:
8393 case REG_TYPE_FP_D:
8394 case REG_TYPE_FP_Q:
8395 return reg->number + 64;
8396
8397 default:
8398 break;
8399 }
8400 return -1;
8401 }
8402
8403 /* Implement DWARF2_ADDR_SIZE. */
8404
8405 int
8406 aarch64_dwarf2_addr_size (void)
8407 {
8408 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8409 if (ilp32_p)
8410 return 4;
8411 #endif
8412 return bfd_arch_bits_per_address (stdoutput) / 8;
8413 }
8414
8415 /* MD interface: Symbol and relocation handling. */
8416
8417 /* Return the address within the segment that a PC-relative fixup is
8418 relative to. For AArch64 PC-relative fixups applied to instructions
8419 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8420
8421 long
8422 md_pcrel_from_section (fixS * fixP, segT seg)
8423 {
8424 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8425
8426 /* If this is pc-relative and we are going to emit a relocation
8427 then we just want to put out any pipeline compensation that the linker
8428 will need. Otherwise we want to use the calculated base. */
8429 if (fixP->fx_pcrel
8430 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8431 || aarch64_force_relocation (fixP)))
8432 base = 0;
8433
8434 /* AArch64 should be consistent for all pc-relative relocations. */
8435 return base + AARCH64_PCREL_OFFSET;
8436 }
8437
8438 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8439 Otherwise we have no need to default values of symbols. */
8440
8441 symbolS *
8442 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8443 {
8444 #ifdef OBJ_ELF
8445 if (name[0] == '_' && name[1] == 'G'
8446 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8447 {
8448 if (!GOT_symbol)
8449 {
8450 if (symbol_find (name))
8451 as_bad (_("GOT already in the symbol table"));
8452
8453 GOT_symbol = symbol_new (name, undefined_section,
8454 &zero_address_frag, 0);
8455 }
8456
8457 return GOT_symbol;
8458 }
8459 #endif
8460
8461 return 0;
8462 }
8463
8464 /* Return non-zero if the indicated VALUE has overflowed the maximum
8465 range expressible by a unsigned number with the indicated number of
8466 BITS. */
8467
8468 static bool
8469 unsigned_overflow (valueT value, unsigned bits)
8470 {
8471 valueT lim;
8472 if (bits >= sizeof (valueT) * 8)
8473 return false;
8474 lim = (valueT) 1 << bits;
8475 return (value >= lim);
8476 }
8477
8478
8479 /* Return non-zero if the indicated VALUE has overflowed the maximum
8480 range expressible by an signed number with the indicated number of
8481 BITS. */
8482
8483 static bool
8484 signed_overflow (offsetT value, unsigned bits)
8485 {
8486 offsetT lim;
8487 if (bits >= sizeof (offsetT) * 8)
8488 return false;
8489 lim = (offsetT) 1 << (bits - 1);
8490 return (value < -lim || value >= lim);
8491 }
8492
8493 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8494 unsigned immediate offset load/store instruction, try to encode it as
8495 an unscaled, 9-bit, signed immediate offset load/store instruction.
8496 Return TRUE if it is successful; otherwise return FALSE.
8497
8498 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8499 in response to the standard LDR/STR mnemonics when the immediate offset is
8500 unambiguous, i.e. when it is negative or unaligned. */
8501
8502 static bool
8503 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8504 {
8505 int idx;
8506 enum aarch64_op new_op;
8507 const aarch64_opcode *new_opcode;
8508
8509 gas_assert (instr->opcode->iclass == ldst_pos);
8510
8511 switch (instr->opcode->op)
8512 {
8513 case OP_LDRB_POS:new_op = OP_LDURB; break;
8514 case OP_STRB_POS: new_op = OP_STURB; break;
8515 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8516 case OP_LDRH_POS: new_op = OP_LDURH; break;
8517 case OP_STRH_POS: new_op = OP_STURH; break;
8518 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8519 case OP_LDR_POS: new_op = OP_LDUR; break;
8520 case OP_STR_POS: new_op = OP_STUR; break;
8521 case OP_LDRF_POS: new_op = OP_LDURV; break;
8522 case OP_STRF_POS: new_op = OP_STURV; break;
8523 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8524 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8525 default: new_op = OP_NIL; break;
8526 }
8527
8528 if (new_op == OP_NIL)
8529 return false;
8530
8531 new_opcode = aarch64_get_opcode (new_op);
8532 gas_assert (new_opcode != NULL);
8533
8534 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8535 instr->opcode->op, new_opcode->op);
8536
8537 aarch64_replace_opcode (instr, new_opcode);
8538
8539 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8540 qualifier matching may fail because the out-of-date qualifier will
8541 prevent the operand being updated with a new and correct qualifier. */
8542 idx = aarch64_operand_index (instr->opcode->operands,
8543 AARCH64_OPND_ADDR_SIMM9);
8544 gas_assert (idx == 1);
8545 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8546
8547 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8548
8549 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8550 insn_sequence))
8551 return false;
8552
8553 return true;
8554 }
8555
8556 /* Called by fix_insn to fix a MOV immediate alias instruction.
8557
8558 Operand for a generic move immediate instruction, which is an alias
8559 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8560 a 32-bit/64-bit immediate value into general register. An assembler error
8561 shall result if the immediate cannot be created by a single one of these
8562 instructions. If there is a choice, then to ensure reversability an
8563 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8564
8565 static void
8566 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8567 {
8568 const aarch64_opcode *opcode;
8569
8570 /* Need to check if the destination is SP/ZR. The check has to be done
8571 before any aarch64_replace_opcode. */
8572 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8573 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8574
8575 instr->operands[1].imm.value = value;
8576 instr->operands[1].skip = 0;
8577
8578 if (try_mov_wide_p)
8579 {
8580 /* Try the MOVZ alias. */
8581 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8582 aarch64_replace_opcode (instr, opcode);
8583 if (aarch64_opcode_encode (instr->opcode, instr,
8584 &instr->value, NULL, NULL, insn_sequence))
8585 {
8586 put_aarch64_insn (buf, instr->value);
8587 return;
8588 }
8589 /* Try the MOVK alias. */
8590 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8591 aarch64_replace_opcode (instr, opcode);
8592 if (aarch64_opcode_encode (instr->opcode, instr,
8593 &instr->value, NULL, NULL, insn_sequence))
8594 {
8595 put_aarch64_insn (buf, instr->value);
8596 return;
8597 }
8598 }
8599
8600 if (try_mov_bitmask_p)
8601 {
8602 /* Try the ORR alias. */
8603 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8604 aarch64_replace_opcode (instr, opcode);
8605 if (aarch64_opcode_encode (instr->opcode, instr,
8606 &instr->value, NULL, NULL, insn_sequence))
8607 {
8608 put_aarch64_insn (buf, instr->value);
8609 return;
8610 }
8611 }
8612
8613 as_bad_where (fixP->fx_file, fixP->fx_line,
8614 _("immediate cannot be moved by a single instruction"));
8615 }
8616
8617 /* An instruction operand which is immediate related may have symbol used
8618 in the assembly, e.g.
8619
8620 mov w0, u32
8621 .set u32, 0x00ffff00
8622
8623 At the time when the assembly instruction is parsed, a referenced symbol,
8624 like 'u32' in the above example may not have been seen; a fixS is created
8625 in such a case and is handled here after symbols have been resolved.
8626 Instruction is fixed up with VALUE using the information in *FIXP plus
8627 extra information in FLAGS.
8628
8629 This function is called by md_apply_fix to fix up instructions that need
8630 a fix-up described above but does not involve any linker-time relocation. */
8631
8632 static void
8633 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8634 {
8635 int idx;
8636 uint32_t insn;
8637 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8638 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8639 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8640
8641 if (new_inst)
8642 {
8643 /* Now the instruction is about to be fixed-up, so the operand that
8644 was previously marked as 'ignored' needs to be unmarked in order
8645 to get the encoding done properly. */
8646 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8647 new_inst->operands[idx].skip = 0;
8648 }
8649
8650 gas_assert (opnd != AARCH64_OPND_NIL);
8651
8652 switch (opnd)
8653 {
8654 case AARCH64_OPND_EXCEPTION:
8655 case AARCH64_OPND_UNDEFINED:
8656 if (unsigned_overflow (value, 16))
8657 as_bad_where (fixP->fx_file, fixP->fx_line,
8658 _("immediate out of range"));
8659 insn = get_aarch64_insn (buf);
8660 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8661 put_aarch64_insn (buf, insn);
8662 break;
8663
8664 case AARCH64_OPND_AIMM:
8665 /* ADD or SUB with immediate.
8666 NOTE this assumes we come here with a add/sub shifted reg encoding
8667 3 322|2222|2 2 2 21111 111111
8668 1 098|7654|3 2 1 09876 543210 98765 43210
8669 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8670 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8671 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8672 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8673 ->
8674 3 322|2222|2 2 221111111111
8675 1 098|7654|3 2 109876543210 98765 43210
8676 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8677 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8678 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8679 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8680 Fields sf Rn Rd are already set. */
8681 insn = get_aarch64_insn (buf);
8682 if (value < 0)
8683 {
8684 /* Add <-> sub. */
8685 insn = reencode_addsub_switch_add_sub (insn);
8686 value = -value;
8687 }
8688
8689 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8690 && unsigned_overflow (value, 12))
8691 {
8692 /* Try to shift the value by 12 to make it fit. */
8693 if (((value >> 12) << 12) == value
8694 && ! unsigned_overflow (value, 12 + 12))
8695 {
8696 value >>= 12;
8697 insn |= encode_addsub_imm_shift_amount (1);
8698 }
8699 }
8700
8701 if (unsigned_overflow (value, 12))
8702 as_bad_where (fixP->fx_file, fixP->fx_line,
8703 _("immediate out of range"));
8704
8705 insn |= encode_addsub_imm (value);
8706
8707 put_aarch64_insn (buf, insn);
8708 break;
8709
8710 case AARCH64_OPND_SIMD_IMM:
8711 case AARCH64_OPND_SIMD_IMM_SFT:
8712 case AARCH64_OPND_LIMM:
8713 /* Bit mask immediate. */
8714 gas_assert (new_inst != NULL);
8715 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8716 new_inst->operands[idx].imm.value = value;
8717 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8718 &new_inst->value, NULL, NULL, insn_sequence))
8719 put_aarch64_insn (buf, new_inst->value);
8720 else
8721 as_bad_where (fixP->fx_file, fixP->fx_line,
8722 _("invalid immediate"));
8723 break;
8724
8725 case AARCH64_OPND_HALF:
8726 /* 16-bit unsigned immediate. */
8727 if (unsigned_overflow (value, 16))
8728 as_bad_where (fixP->fx_file, fixP->fx_line,
8729 _("immediate out of range"));
8730 insn = get_aarch64_insn (buf);
8731 insn |= encode_movw_imm (value & 0xffff);
8732 put_aarch64_insn (buf, insn);
8733 break;
8734
8735 case AARCH64_OPND_IMM_MOV:
8736 /* Operand for a generic move immediate instruction, which is
8737 an alias instruction that generates a single MOVZ, MOVN or ORR
8738 instruction to loads a 32-bit/64-bit immediate value into general
8739 register. An assembler error shall result if the immediate cannot be
8740 created by a single one of these instructions. If there is a choice,
8741 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8742 and MOVZ or MOVN to ORR. */
8743 gas_assert (new_inst != NULL);
8744 fix_mov_imm_insn (fixP, buf, new_inst, value);
8745 break;
8746
8747 case AARCH64_OPND_ADDR_SIMM7:
8748 case AARCH64_OPND_ADDR_SIMM9:
8749 case AARCH64_OPND_ADDR_SIMM9_2:
8750 case AARCH64_OPND_ADDR_SIMM10:
8751 case AARCH64_OPND_ADDR_UIMM12:
8752 case AARCH64_OPND_ADDR_SIMM11:
8753 case AARCH64_OPND_ADDR_SIMM13:
8754 /* Immediate offset in an address. */
8755 insn = get_aarch64_insn (buf);
8756
8757 gas_assert (new_inst != NULL && new_inst->value == insn);
8758 gas_assert (new_inst->opcode->operands[1] == opnd
8759 || new_inst->opcode->operands[2] == opnd);
8760
8761 /* Get the index of the address operand. */
8762 if (new_inst->opcode->operands[1] == opnd)
8763 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8764 idx = 1;
8765 else
8766 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8767 idx = 2;
8768
8769 /* Update the resolved offset value. */
8770 new_inst->operands[idx].addr.offset.imm = value;
8771
8772 /* Encode/fix-up. */
8773 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8774 &new_inst->value, NULL, NULL, insn_sequence))
8775 {
8776 put_aarch64_insn (buf, new_inst->value);
8777 break;
8778 }
8779 else if (new_inst->opcode->iclass == ldst_pos
8780 && try_to_encode_as_unscaled_ldst (new_inst))
8781 {
8782 put_aarch64_insn (buf, new_inst->value);
8783 break;
8784 }
8785
8786 as_bad_where (fixP->fx_file, fixP->fx_line,
8787 _("immediate offset out of range"));
8788 break;
8789
8790 default:
8791 gas_assert (0);
8792 as_fatal (_("unhandled operand code %d"), opnd);
8793 }
8794 }
8795
8796 /* Apply a fixup (fixP) to segment data, once it has been determined
8797 by our caller that we have all the info we need to fix it up.
8798
8799 Parameter valP is the pointer to the value of the bits. */
8800
8801 void
8802 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8803 {
8804 offsetT value = *valP;
8805 uint32_t insn;
8806 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8807 int scale;
8808 unsigned flags = fixP->fx_addnumber;
8809
8810 DEBUG_TRACE ("\n\n");
8811 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8812 DEBUG_TRACE ("Enter md_apply_fix");
8813
8814 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8815
8816 /* Note whether this will delete the relocation. */
8817
8818 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
8819 fixP->fx_done = 1;
8820
8821 /* Process the relocations. */
8822 switch (fixP->fx_r_type)
8823 {
8824 case BFD_RELOC_NONE:
8825 /* This will need to go in the object file. */
8826 fixP->fx_done = 0;
8827 break;
8828
8829 case BFD_RELOC_8:
8830 case BFD_RELOC_8_PCREL:
8831 if (fixP->fx_done || !seg->use_rela_p)
8832 md_number_to_chars (buf, value, 1);
8833 break;
8834
8835 case BFD_RELOC_16:
8836 case BFD_RELOC_16_PCREL:
8837 if (fixP->fx_done || !seg->use_rela_p)
8838 md_number_to_chars (buf, value, 2);
8839 break;
8840
8841 case BFD_RELOC_32:
8842 case BFD_RELOC_32_PCREL:
8843 if (fixP->fx_done || !seg->use_rela_p)
8844 md_number_to_chars (buf, value, 4);
8845 break;
8846
8847 case BFD_RELOC_64:
8848 case BFD_RELOC_64_PCREL:
8849 if (fixP->fx_done || !seg->use_rela_p)
8850 md_number_to_chars (buf, value, 8);
8851 break;
8852
8853 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8854 /* We claim that these fixups have been processed here, even if
8855 in fact we generate an error because we do not have a reloc
8856 for them, so tc_gen_reloc() will reject them. */
8857 fixP->fx_done = 1;
8858 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8859 {
8860 as_bad_where (fixP->fx_file, fixP->fx_line,
8861 _("undefined symbol %s used as an immediate value"),
8862 S_GET_NAME (fixP->fx_addsy));
8863 goto apply_fix_return;
8864 }
8865 fix_insn (fixP, flags, value);
8866 break;
8867
8868 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8869 if (fixP->fx_done || !seg->use_rela_p)
8870 {
8871 if (value & 3)
8872 as_bad_where (fixP->fx_file, fixP->fx_line,
8873 _("pc-relative load offset not word aligned"));
8874 if (signed_overflow (value, 21))
8875 as_bad_where (fixP->fx_file, fixP->fx_line,
8876 _("pc-relative load offset out of range"));
8877 insn = get_aarch64_insn (buf);
8878 insn |= encode_ld_lit_ofs_19 (value >> 2);
8879 put_aarch64_insn (buf, insn);
8880 }
8881 break;
8882
8883 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8884 if (fixP->fx_done || !seg->use_rela_p)
8885 {
8886 if (signed_overflow (value, 21))
8887 as_bad_where (fixP->fx_file, fixP->fx_line,
8888 _("pc-relative address offset out of range"));
8889 insn = get_aarch64_insn (buf);
8890 insn |= encode_adr_imm (value);
8891 put_aarch64_insn (buf, insn);
8892 }
8893 break;
8894
8895 case BFD_RELOC_AARCH64_BRANCH19:
8896 if (fixP->fx_done || !seg->use_rela_p)
8897 {
8898 if (value & 3)
8899 as_bad_where (fixP->fx_file, fixP->fx_line,
8900 _("conditional branch target not word aligned"));
8901 if (signed_overflow (value, 21))
8902 as_bad_where (fixP->fx_file, fixP->fx_line,
8903 _("conditional branch out of range"));
8904 insn = get_aarch64_insn (buf);
8905 insn |= encode_cond_branch_ofs_19 (value >> 2);
8906 put_aarch64_insn (buf, insn);
8907 }
8908 break;
8909
8910 case BFD_RELOC_AARCH64_TSTBR14:
8911 if (fixP->fx_done || !seg->use_rela_p)
8912 {
8913 if (value & 3)
8914 as_bad_where (fixP->fx_file, fixP->fx_line,
8915 _("conditional branch target not word aligned"));
8916 if (signed_overflow (value, 16))
8917 as_bad_where (fixP->fx_file, fixP->fx_line,
8918 _("conditional branch out of range"));
8919 insn = get_aarch64_insn (buf);
8920 insn |= encode_tst_branch_ofs_14 (value >> 2);
8921 put_aarch64_insn (buf, insn);
8922 }
8923 break;
8924
8925 case BFD_RELOC_AARCH64_CALL26:
8926 case BFD_RELOC_AARCH64_JUMP26:
8927 if (fixP->fx_done || !seg->use_rela_p)
8928 {
8929 if (value & 3)
8930 as_bad_where (fixP->fx_file, fixP->fx_line,
8931 _("branch target not word aligned"));
8932 if (signed_overflow (value, 28))
8933 as_bad_where (fixP->fx_file, fixP->fx_line,
8934 _("branch out of range"));
8935 insn = get_aarch64_insn (buf);
8936 insn |= encode_branch_ofs_26 (value >> 2);
8937 put_aarch64_insn (buf, insn);
8938 }
8939 break;
8940
8941 case BFD_RELOC_AARCH64_MOVW_G0:
8942 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8943 case BFD_RELOC_AARCH64_MOVW_G0_S:
8944 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8945 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8946 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8947 scale = 0;
8948 goto movw_common;
8949 case BFD_RELOC_AARCH64_MOVW_G1:
8950 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8951 case BFD_RELOC_AARCH64_MOVW_G1_S:
8952 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8953 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8954 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8955 scale = 16;
8956 goto movw_common;
8957 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8958 scale = 0;
8959 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8960 /* Should always be exported to object file, see
8961 aarch64_force_relocation(). */
8962 gas_assert (!fixP->fx_done);
8963 gas_assert (seg->use_rela_p);
8964 goto movw_common;
8965 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8966 scale = 16;
8967 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8968 /* Should always be exported to object file, see
8969 aarch64_force_relocation(). */
8970 gas_assert (!fixP->fx_done);
8971 gas_assert (seg->use_rela_p);
8972 goto movw_common;
8973 case BFD_RELOC_AARCH64_MOVW_G2:
8974 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8975 case BFD_RELOC_AARCH64_MOVW_G2_S:
8976 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8977 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8978 scale = 32;
8979 goto movw_common;
8980 case BFD_RELOC_AARCH64_MOVW_G3:
8981 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8982 scale = 48;
8983 movw_common:
8984 if (fixP->fx_done || !seg->use_rela_p)
8985 {
8986 insn = get_aarch64_insn (buf);
8987
8988 if (!fixP->fx_done)
8989 {
8990 /* REL signed addend must fit in 16 bits */
8991 if (signed_overflow (value, 16))
8992 as_bad_where (fixP->fx_file, fixP->fx_line,
8993 _("offset out of range"));
8994 }
8995 else
8996 {
8997 /* Check for overflow and scale. */
8998 switch (fixP->fx_r_type)
8999 {
9000 case BFD_RELOC_AARCH64_MOVW_G0:
9001 case BFD_RELOC_AARCH64_MOVW_G1:
9002 case BFD_RELOC_AARCH64_MOVW_G2:
9003 case BFD_RELOC_AARCH64_MOVW_G3:
9004 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9005 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9006 if (unsigned_overflow (value, scale + 16))
9007 as_bad_where (fixP->fx_file, fixP->fx_line,
9008 _("unsigned value out of range"));
9009 break;
9010 case BFD_RELOC_AARCH64_MOVW_G0_S:
9011 case BFD_RELOC_AARCH64_MOVW_G1_S:
9012 case BFD_RELOC_AARCH64_MOVW_G2_S:
9013 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9014 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9015 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9016 /* NOTE: We can only come here with movz or movn. */
9017 if (signed_overflow (value, scale + 16))
9018 as_bad_where (fixP->fx_file, fixP->fx_line,
9019 _("signed value out of range"));
9020 if (value < 0)
9021 {
9022 /* Force use of MOVN. */
9023 value = ~value;
9024 insn = reencode_movzn_to_movn (insn);
9025 }
9026 else
9027 {
9028 /* Force use of MOVZ. */
9029 insn = reencode_movzn_to_movz (insn);
9030 }
9031 break;
9032 default:
9033 /* Unchecked relocations. */
9034 break;
9035 }
9036 value >>= scale;
9037 }
9038
9039 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9040 insn |= encode_movw_imm (value & 0xffff);
9041
9042 put_aarch64_insn (buf, insn);
9043 }
9044 break;
9045
9046 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9047 fixP->fx_r_type = (ilp32_p
9048 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9049 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9050 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9051 /* Should always be exported to object file, see
9052 aarch64_force_relocation(). */
9053 gas_assert (!fixP->fx_done);
9054 gas_assert (seg->use_rela_p);
9055 break;
9056
9057 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9058 fixP->fx_r_type = (ilp32_p
9059 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9060 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9061 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9062 /* Should always be exported to object file, see
9063 aarch64_force_relocation(). */
9064 gas_assert (!fixP->fx_done);
9065 gas_assert (seg->use_rela_p);
9066 break;
9067
9068 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9069 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9070 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9071 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9072 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9073 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9074 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9075 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9076 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9077 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9078 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9079 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9080 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9081 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9082 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9083 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9084 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9085 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9086 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9087 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9088 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9089 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9090 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9091 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9092 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9093 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9094 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9095 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9096 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9097 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9098 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9099 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9100 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9101 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9102 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9103 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9104 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9105 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9106 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9107 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9108 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9109 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9110 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9111 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9112 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9113 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9114 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9115 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9116 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9117 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9118 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9119 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9120 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9121 /* Should always be exported to object file, see
9122 aarch64_force_relocation(). */
9123 gas_assert (!fixP->fx_done);
9124 gas_assert (seg->use_rela_p);
9125 break;
9126
9127 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9128 /* Should always be exported to object file, see
9129 aarch64_force_relocation(). */
9130 fixP->fx_r_type = (ilp32_p
9131 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9132 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9133 gas_assert (!fixP->fx_done);
9134 gas_assert (seg->use_rela_p);
9135 break;
9136
9137 case BFD_RELOC_AARCH64_ADD_LO12:
9138 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9139 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9140 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9141 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9142 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9143 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9144 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9145 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9146 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9147 case BFD_RELOC_AARCH64_LDST128_LO12:
9148 case BFD_RELOC_AARCH64_LDST16_LO12:
9149 case BFD_RELOC_AARCH64_LDST32_LO12:
9150 case BFD_RELOC_AARCH64_LDST64_LO12:
9151 case BFD_RELOC_AARCH64_LDST8_LO12:
9152 /* Should always be exported to object file, see
9153 aarch64_force_relocation(). */
9154 gas_assert (!fixP->fx_done);
9155 gas_assert (seg->use_rela_p);
9156 break;
9157
9158 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9159 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9160 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9161 break;
9162
9163 case BFD_RELOC_UNUSED:
9164 /* An error will already have been reported. */
9165 break;
9166
9167 default:
9168 as_bad_where (fixP->fx_file, fixP->fx_line,
9169 _("unexpected %s fixup"),
9170 bfd_get_reloc_code_name (fixP->fx_r_type));
9171 break;
9172 }
9173
9174 apply_fix_return:
9175 /* Free the allocated the struct aarch64_inst.
9176 N.B. currently there are very limited number of fix-up types actually use
9177 this field, so the impact on the performance should be minimal . */
9178 free (fixP->tc_fix_data.inst);
9179
9180 return;
9181 }
9182
9183 /* Translate internal representation of relocation info to BFD target
9184 format. */
9185
9186 arelent *
9187 tc_gen_reloc (asection * section, fixS * fixp)
9188 {
9189 arelent *reloc;
9190 bfd_reloc_code_real_type code;
9191
9192 reloc = XNEW (arelent);
9193
9194 reloc->sym_ptr_ptr = XNEW (asymbol *);
9195 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9196 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9197
9198 if (fixp->fx_pcrel)
9199 {
9200 if (section->use_rela_p)
9201 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9202 else
9203 fixp->fx_offset = reloc->address;
9204 }
9205 reloc->addend = fixp->fx_offset;
9206
9207 code = fixp->fx_r_type;
9208 switch (code)
9209 {
9210 case BFD_RELOC_16:
9211 if (fixp->fx_pcrel)
9212 code = BFD_RELOC_16_PCREL;
9213 break;
9214
9215 case BFD_RELOC_32:
9216 if (fixp->fx_pcrel)
9217 code = BFD_RELOC_32_PCREL;
9218 break;
9219
9220 case BFD_RELOC_64:
9221 if (fixp->fx_pcrel)
9222 code = BFD_RELOC_64_PCREL;
9223 break;
9224
9225 default:
9226 break;
9227 }
9228
9229 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9230 if (reloc->howto == NULL)
9231 {
9232 as_bad_where (fixp->fx_file, fixp->fx_line,
9233 _
9234 ("cannot represent %s relocation in this object file format"),
9235 bfd_get_reloc_code_name (code));
9236 return NULL;
9237 }
9238
9239 return reloc;
9240 }
9241
9242 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9243
9244 void
9245 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9246 {
9247 bfd_reloc_code_real_type type;
9248 int pcrel = 0;
9249
9250 /* Pick a reloc.
9251 FIXME: @@ Should look at CPU word size. */
9252 switch (size)
9253 {
9254 case 1:
9255 type = BFD_RELOC_8;
9256 break;
9257 case 2:
9258 type = BFD_RELOC_16;
9259 break;
9260 case 4:
9261 type = BFD_RELOC_32;
9262 break;
9263 case 8:
9264 type = BFD_RELOC_64;
9265 break;
9266 default:
9267 as_bad (_("cannot do %u-byte relocation"), size);
9268 type = BFD_RELOC_UNUSED;
9269 break;
9270 }
9271
9272 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9273 }
9274
9275 #ifdef OBJ_ELF
9276
9277 /* Implement md_after_parse_args. This is the earliest time we need to decide
9278 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9279
9280 void
9281 aarch64_after_parse_args (void)
9282 {
9283 if (aarch64_abi != AARCH64_ABI_NONE)
9284 return;
9285
9286 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9287 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9288 aarch64_abi = AARCH64_ABI_ILP32;
9289 else
9290 aarch64_abi = AARCH64_ABI_LP64;
9291 }
9292
9293 const char *
9294 elf64_aarch64_target_format (void)
9295 {
9296 #ifdef TE_CLOUDABI
9297 /* FIXME: What to do for ilp32_p ? */
9298 if (target_big_endian)
9299 return "elf64-bigaarch64-cloudabi";
9300 else
9301 return "elf64-littleaarch64-cloudabi";
9302 #else
9303 if (target_big_endian)
9304 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9305 else
9306 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9307 #endif
9308 }
9309
9310 void
9311 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9312 {
9313 elf_frob_symbol (symp, puntp);
9314 }
9315 #endif
9316
9317 /* MD interface: Finalization. */
9318
9319 /* A good place to do this, although this was probably not intended
9320 for this kind of use. We need to dump the literal pool before
9321 references are made to a null symbol pointer. */
9322
9323 void
9324 aarch64_cleanup (void)
9325 {
9326 literal_pool *pool;
9327
9328 for (pool = list_of_pools; pool; pool = pool->next)
9329 {
9330 /* Put it at the end of the relevant section. */
9331 subseg_set (pool->section, pool->sub_section);
9332 s_ltorg (0);
9333 }
9334 }
9335
9336 #ifdef OBJ_ELF
9337 /* Remove any excess mapping symbols generated for alignment frags in
9338 SEC. We may have created a mapping symbol before a zero byte
9339 alignment; remove it if there's a mapping symbol after the
9340 alignment. */
9341 static void
9342 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9343 void *dummy ATTRIBUTE_UNUSED)
9344 {
9345 segment_info_type *seginfo = seg_info (sec);
9346 fragS *fragp;
9347
9348 if (seginfo == NULL || seginfo->frchainP == NULL)
9349 return;
9350
9351 for (fragp = seginfo->frchainP->frch_root;
9352 fragp != NULL; fragp = fragp->fr_next)
9353 {
9354 symbolS *sym = fragp->tc_frag_data.last_map;
9355 fragS *next = fragp->fr_next;
9356
9357 /* Variable-sized frags have been converted to fixed size by
9358 this point. But if this was variable-sized to start with,
9359 there will be a fixed-size frag after it. So don't handle
9360 next == NULL. */
9361 if (sym == NULL || next == NULL)
9362 continue;
9363
9364 if (S_GET_VALUE (sym) < next->fr_address)
9365 /* Not at the end of this frag. */
9366 continue;
9367 know (S_GET_VALUE (sym) == next->fr_address);
9368
9369 do
9370 {
9371 if (next->tc_frag_data.first_map != NULL)
9372 {
9373 /* Next frag starts with a mapping symbol. Discard this
9374 one. */
9375 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9376 break;
9377 }
9378
9379 if (next->fr_next == NULL)
9380 {
9381 /* This mapping symbol is at the end of the section. Discard
9382 it. */
9383 know (next->fr_fix == 0 && next->fr_var == 0);
9384 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9385 break;
9386 }
9387
9388 /* As long as we have empty frags without any mapping symbols,
9389 keep looking. */
9390 /* If the next frag is non-empty and does not start with a
9391 mapping symbol, then this mapping symbol is required. */
9392 if (next->fr_address != next->fr_next->fr_address)
9393 break;
9394
9395 next = next->fr_next;
9396 }
9397 while (next != NULL);
9398 }
9399 }
9400 #endif
9401
9402 /* Adjust the symbol table. */
9403
9404 void
9405 aarch64_adjust_symtab (void)
9406 {
9407 #ifdef OBJ_ELF
9408 /* Remove any overlapping mapping symbols generated by alignment frags. */
9409 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9410 /* Now do generic ELF adjustments. */
9411 elf_adjust_symtab ();
9412 #endif
9413 }
9414
9415 static void
9416 checked_hash_insert (htab_t table, const char *key, void *value)
9417 {
9418 str_hash_insert (table, key, value, 0);
9419 }
9420
9421 static void
9422 sysreg_hash_insert (htab_t table, const char *key, void *value)
9423 {
9424 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9425 checked_hash_insert (table, key, value);
9426 }
9427
9428 static void
9429 fill_instruction_hash_table (void)
9430 {
9431 const aarch64_opcode *opcode = aarch64_opcode_table;
9432
9433 while (opcode->name != NULL)
9434 {
9435 templates *templ, *new_templ;
9436 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9437
9438 new_templ = XNEW (templates);
9439 new_templ->opcode = opcode;
9440 new_templ->next = NULL;
9441
9442 if (!templ)
9443 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9444 else
9445 {
9446 new_templ->next = templ->next;
9447 templ->next = new_templ;
9448 }
9449 ++opcode;
9450 }
9451 }
9452
9453 static inline void
9454 convert_to_upper (char *dst, const char *src, size_t num)
9455 {
9456 unsigned int i;
9457 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9458 *dst = TOUPPER (*src);
9459 *dst = '\0';
9460 }
9461
9462 /* Assume STR point to a lower-case string, allocate, convert and return
9463 the corresponding upper-case string. */
9464 static inline const char*
9465 get_upper_str (const char *str)
9466 {
9467 char *ret;
9468 size_t len = strlen (str);
9469 ret = XNEWVEC (char, len + 1);
9470 convert_to_upper (ret, str, len);
9471 return ret;
9472 }
9473
9474 /* MD interface: Initialization. */
9475
9476 void
9477 md_begin (void)
9478 {
9479 unsigned mach;
9480 unsigned int i;
9481
9482 aarch64_ops_hsh = str_htab_create ();
9483 aarch64_cond_hsh = str_htab_create ();
9484 aarch64_shift_hsh = str_htab_create ();
9485 aarch64_sys_regs_hsh = str_htab_create ();
9486 aarch64_pstatefield_hsh = str_htab_create ();
9487 aarch64_sys_regs_ic_hsh = str_htab_create ();
9488 aarch64_sys_regs_dc_hsh = str_htab_create ();
9489 aarch64_sys_regs_at_hsh = str_htab_create ();
9490 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9491 aarch64_sys_regs_sr_hsh = str_htab_create ();
9492 aarch64_reg_hsh = str_htab_create ();
9493 aarch64_barrier_opt_hsh = str_htab_create ();
9494 aarch64_nzcv_hsh = str_htab_create ();
9495 aarch64_pldop_hsh = str_htab_create ();
9496 aarch64_hint_opt_hsh = str_htab_create ();
9497
9498 fill_instruction_hash_table ();
9499
9500 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9501 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9502 (void *) (aarch64_sys_regs + i));
9503
9504 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9505 sysreg_hash_insert (aarch64_pstatefield_hsh,
9506 aarch64_pstatefields[i].name,
9507 (void *) (aarch64_pstatefields + i));
9508
9509 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9510 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9511 aarch64_sys_regs_ic[i].name,
9512 (void *) (aarch64_sys_regs_ic + i));
9513
9514 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9515 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9516 aarch64_sys_regs_dc[i].name,
9517 (void *) (aarch64_sys_regs_dc + i));
9518
9519 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9520 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9521 aarch64_sys_regs_at[i].name,
9522 (void *) (aarch64_sys_regs_at + i));
9523
9524 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9525 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9526 aarch64_sys_regs_tlbi[i].name,
9527 (void *) (aarch64_sys_regs_tlbi + i));
9528
9529 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9530 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9531 aarch64_sys_regs_sr[i].name,
9532 (void *) (aarch64_sys_regs_sr + i));
9533
9534 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9535 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9536 (void *) (reg_names + i));
9537
9538 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9539 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9540 (void *) (nzcv_names + i));
9541
9542 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9543 {
9544 const char *name = aarch64_operand_modifiers[i].name;
9545 checked_hash_insert (aarch64_shift_hsh, name,
9546 (void *) (aarch64_operand_modifiers + i));
9547 /* Also hash the name in the upper case. */
9548 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9549 (void *) (aarch64_operand_modifiers + i));
9550 }
9551
9552 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9553 {
9554 unsigned int j;
9555 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9556 the same condition code. */
9557 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9558 {
9559 const char *name = aarch64_conds[i].names[j];
9560 if (name == NULL)
9561 break;
9562 checked_hash_insert (aarch64_cond_hsh, name,
9563 (void *) (aarch64_conds + i));
9564 /* Also hash the name in the upper case. */
9565 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9566 (void *) (aarch64_conds + i));
9567 }
9568 }
9569
9570 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9571 {
9572 const char *name = aarch64_barrier_options[i].name;
9573 /* Skip xx00 - the unallocated values of option. */
9574 if ((i & 0x3) == 0)
9575 continue;
9576 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9577 (void *) (aarch64_barrier_options + i));
9578 /* Also hash the name in the upper case. */
9579 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9580 (void *) (aarch64_barrier_options + i));
9581 }
9582
9583 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9584 {
9585 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9586 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9587 (void *) (aarch64_barrier_dsb_nxs_options + i));
9588 /* Also hash the name in the upper case. */
9589 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9590 (void *) (aarch64_barrier_dsb_nxs_options + i));
9591 }
9592
9593 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9594 {
9595 const char* name = aarch64_prfops[i].name;
9596 /* Skip the unallocated hint encodings. */
9597 if (name == NULL)
9598 continue;
9599 checked_hash_insert (aarch64_pldop_hsh, name,
9600 (void *) (aarch64_prfops + i));
9601 /* Also hash the name in the upper case. */
9602 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9603 (void *) (aarch64_prfops + i));
9604 }
9605
9606 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9607 {
9608 const char* name = aarch64_hint_options[i].name;
9609 const char* upper_name = get_upper_str(name);
9610
9611 checked_hash_insert (aarch64_hint_opt_hsh, name,
9612 (void *) (aarch64_hint_options + i));
9613
9614 /* Also hash the name in the upper case if not the same. */
9615 if (strcmp (name, upper_name) != 0)
9616 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9617 (void *) (aarch64_hint_options + i));
9618 }
9619
9620 /* Set the cpu variant based on the command-line options. */
9621 if (!mcpu_cpu_opt)
9622 mcpu_cpu_opt = march_cpu_opt;
9623
9624 if (!mcpu_cpu_opt)
9625 mcpu_cpu_opt = &cpu_default;
9626
9627 cpu_variant = *mcpu_cpu_opt;
9628
9629 /* Record the CPU type. */
9630 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
9631
9632 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9633 }
9634
9635 /* Command line processing. */
9636
9637 const char *md_shortopts = "m:";
9638
9639 #ifdef AARCH64_BI_ENDIAN
9640 #define OPTION_EB (OPTION_MD_BASE + 0)
9641 #define OPTION_EL (OPTION_MD_BASE + 1)
9642 #else
9643 #if TARGET_BYTES_BIG_ENDIAN
9644 #define OPTION_EB (OPTION_MD_BASE + 0)
9645 #else
9646 #define OPTION_EL (OPTION_MD_BASE + 1)
9647 #endif
9648 #endif
9649
9650 struct option md_longopts[] = {
9651 #ifdef OPTION_EB
9652 {"EB", no_argument, NULL, OPTION_EB},
9653 #endif
9654 #ifdef OPTION_EL
9655 {"EL", no_argument, NULL, OPTION_EL},
9656 #endif
9657 {NULL, no_argument, NULL, 0}
9658 };
9659
9660 size_t md_longopts_size = sizeof (md_longopts);
9661
9662 struct aarch64_option_table
9663 {
9664 const char *option; /* Option name to match. */
9665 const char *help; /* Help information. */
9666 int *var; /* Variable to change. */
9667 int value; /* What to change it to. */
9668 char *deprecated; /* If non-null, print this message. */
9669 };
9670
9671 static struct aarch64_option_table aarch64_opts[] = {
9672 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9673 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9674 NULL},
9675 #ifdef DEBUG_AARCH64
9676 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9677 #endif /* DEBUG_AARCH64 */
9678 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9679 NULL},
9680 {"mno-verbose-error", N_("do not output verbose error messages"),
9681 &verbose_error_p, 0, NULL},
9682 {NULL, NULL, NULL, 0, NULL}
9683 };
9684
9685 struct aarch64_cpu_option_table
9686 {
9687 const char *name;
9688 const aarch64_feature_set value;
9689 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9690 case. */
9691 const char *canonical_name;
9692 };
9693
9694 /* This list should, at a minimum, contain all the cpu names
9695 recognized by GCC. */
9696 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9697 {"all", AARCH64_ANY, NULL},
9698 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9699 AARCH64_FEATURE_CRC), "Cortex-A34"},
9700 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9701 AARCH64_FEATURE_CRC), "Cortex-A35"},
9702 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9703 AARCH64_FEATURE_CRC), "Cortex-A53"},
9704 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9705 AARCH64_FEATURE_CRC), "Cortex-A57"},
9706 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9707 AARCH64_FEATURE_CRC), "Cortex-A72"},
9708 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9709 AARCH64_FEATURE_CRC), "Cortex-A73"},
9710 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9711 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9712 "Cortex-A55"},
9713 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9714 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9715 "Cortex-A75"},
9716 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9717 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9718 "Cortex-A76"},
9719 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9720 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9721 | AARCH64_FEATURE_DOTPROD
9722 | AARCH64_FEATURE_SSBS),
9723 "Cortex-A76AE"},
9724 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9725 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9726 | AARCH64_FEATURE_DOTPROD
9727 | AARCH64_FEATURE_SSBS),
9728 "Cortex-A77"},
9729 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9730 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9731 | AARCH64_FEATURE_DOTPROD
9732 | AARCH64_FEATURE_SSBS),
9733 "Cortex-A65"},
9734 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9735 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9736 | AARCH64_FEATURE_DOTPROD
9737 | AARCH64_FEATURE_SSBS),
9738 "Cortex-A65AE"},
9739 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9740 AARCH64_FEATURE_F16
9741 | AARCH64_FEATURE_RCPC
9742 | AARCH64_FEATURE_DOTPROD
9743 | AARCH64_FEATURE_SSBS
9744 | AARCH64_FEATURE_PROFILE),
9745 "Cortex-A78"},
9746 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9747 AARCH64_FEATURE_F16
9748 | AARCH64_FEATURE_RCPC
9749 | AARCH64_FEATURE_DOTPROD
9750 | AARCH64_FEATURE_SSBS
9751 | AARCH64_FEATURE_PROFILE),
9752 "Cortex-A78AE"},
9753 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9754 AARCH64_FEATURE_DOTPROD
9755 | AARCH64_FEATURE_F16
9756 | AARCH64_FEATURE_FLAGM
9757 | AARCH64_FEATURE_PAC
9758 | AARCH64_FEATURE_PROFILE
9759 | AARCH64_FEATURE_RCPC
9760 | AARCH64_FEATURE_SSBS),
9761 "Cortex-A78C"},
9762 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9763 AARCH64_FEATURE_BFLOAT16
9764 | AARCH64_FEATURE_I8MM
9765 | AARCH64_FEATURE_MEMTAG
9766 | AARCH64_FEATURE_SVE2_BITPERM),
9767 "Cortex-A510"},
9768 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9769 AARCH64_FEATURE_BFLOAT16
9770 | AARCH64_FEATURE_I8MM
9771 | AARCH64_FEATURE_MEMTAG
9772 | AARCH64_FEATURE_SVE2_BITPERM),
9773 "Cortex-A710"},
9774 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9775 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9776 | AARCH64_FEATURE_DOTPROD
9777 | AARCH64_FEATURE_PROFILE),
9778 "Ares"},
9779 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9780 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9781 "Samsung Exynos M1"},
9782 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9783 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9784 | AARCH64_FEATURE_RDMA),
9785 "Qualcomm Falkor"},
9786 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9787 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9788 | AARCH64_FEATURE_DOTPROD
9789 | AARCH64_FEATURE_SSBS),
9790 "Neoverse E1"},
9791 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9792 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9793 | AARCH64_FEATURE_DOTPROD
9794 | AARCH64_FEATURE_PROFILE),
9795 "Neoverse N1"},
9796 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9797 AARCH64_FEATURE_BFLOAT16
9798 | AARCH64_FEATURE_I8MM
9799 | AARCH64_FEATURE_F16
9800 | AARCH64_FEATURE_SVE
9801 | AARCH64_FEATURE_SVE2
9802 | AARCH64_FEATURE_SVE2_BITPERM
9803 | AARCH64_FEATURE_MEMTAG
9804 | AARCH64_FEATURE_RNG),
9805 "Neoverse N2"},
9806 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9807 AARCH64_FEATURE_PROFILE
9808 | AARCH64_FEATURE_CVADP
9809 | AARCH64_FEATURE_SVE
9810 | AARCH64_FEATURE_SSBS
9811 | AARCH64_FEATURE_RNG
9812 | AARCH64_FEATURE_F16
9813 | AARCH64_FEATURE_BFLOAT16
9814 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9815 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9816 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9817 | AARCH64_FEATURE_RDMA),
9818 "Qualcomm QDF24XX"},
9819 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9820 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9821 "Qualcomm Saphira"},
9822 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9823 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9824 "Cavium ThunderX"},
9825 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9826 AARCH64_FEATURE_CRYPTO),
9827 "Broadcom Vulcan"},
9828 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9829 in earlier releases and is superseded by 'xgene1' in all
9830 tools. */
9831 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9832 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9833 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9834 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9835 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9836 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9837 AARCH64_FEATURE_F16
9838 | AARCH64_FEATURE_RCPC
9839 | AARCH64_FEATURE_DOTPROD
9840 | AARCH64_FEATURE_SSBS
9841 | AARCH64_FEATURE_PROFILE),
9842 "Cortex-X1"},
9843 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9844 AARCH64_FEATURE_BFLOAT16
9845 | AARCH64_FEATURE_I8MM
9846 | AARCH64_FEATURE_MEMTAG
9847 | AARCH64_FEATURE_SVE2_BITPERM),
9848 "Cortex-X2"},
9849 {"generic", AARCH64_ARCH_V8, NULL},
9850
9851 {NULL, AARCH64_ARCH_NONE, NULL}
9852 };
9853
9854 struct aarch64_arch_option_table
9855 {
9856 const char *name;
9857 const aarch64_feature_set value;
9858 };
9859
9860 /* This list should, at a minimum, contain all the architecture names
9861 recognized by GCC. */
9862 static const struct aarch64_arch_option_table aarch64_archs[] = {
9863 {"all", AARCH64_ANY},
9864 {"armv8-a", AARCH64_ARCH_V8},
9865 {"armv8.1-a", AARCH64_ARCH_V8_1},
9866 {"armv8.2-a", AARCH64_ARCH_V8_2},
9867 {"armv8.3-a", AARCH64_ARCH_V8_3},
9868 {"armv8.4-a", AARCH64_ARCH_V8_4},
9869 {"armv8.5-a", AARCH64_ARCH_V8_5},
9870 {"armv8.6-a", AARCH64_ARCH_V8_6},
9871 {"armv8.7-a", AARCH64_ARCH_V8_7},
9872 {"armv8.8-a", AARCH64_ARCH_V8_8},
9873 {"armv8-r", AARCH64_ARCH_V8_R},
9874 {"armv9-a", AARCH64_ARCH_V9},
9875 {NULL, AARCH64_ARCH_NONE}
9876 };
9877
9878 /* ISA extensions. */
9879 struct aarch64_option_cpu_value_table
9880 {
9881 const char *name;
9882 const aarch64_feature_set value;
9883 const aarch64_feature_set require; /* Feature dependencies. */
9884 };
9885
9886 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9887 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9888 AARCH64_ARCH_NONE},
9889 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9890 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9891 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9892 AARCH64_ARCH_NONE},
9893 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9894 AARCH64_ARCH_NONE},
9895 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9896 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9897 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9898 AARCH64_ARCH_NONE},
9899 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9900 AARCH64_ARCH_NONE},
9901 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9902 AARCH64_ARCH_NONE},
9903 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9904 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9905 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9906 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9907 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9908 AARCH64_FEATURE (AARCH64_FEATURE_FP
9909 | AARCH64_FEATURE_F16, 0)},
9910 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9911 AARCH64_ARCH_NONE},
9912 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9913 AARCH64_FEATURE (AARCH64_FEATURE_F16
9914 | AARCH64_FEATURE_SIMD
9915 | AARCH64_FEATURE_COMPNUM, 0)},
9916 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9917 AARCH64_ARCH_NONE},
9918 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9919 AARCH64_FEATURE (AARCH64_FEATURE_F16
9920 | AARCH64_FEATURE_SIMD, 0)},
9921 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9922 AARCH64_ARCH_NONE},
9923 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9924 AARCH64_ARCH_NONE},
9925 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9926 AARCH64_ARCH_NONE},
9927 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9928 AARCH64_ARCH_NONE},
9929 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9930 AARCH64_ARCH_NONE},
9931 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9932 AARCH64_ARCH_NONE},
9933 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9934 AARCH64_ARCH_NONE},
9935 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9936 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9937 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9938 AARCH64_ARCH_NONE},
9939 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9940 AARCH64_ARCH_NONE},
9941 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9942 AARCH64_ARCH_NONE},
9943 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9944 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9945 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9946 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9947 | AARCH64_FEATURE_SM4, 0)},
9948 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9949 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9950 | AARCH64_FEATURE_AES, 0)},
9951 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9952 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9953 | AARCH64_FEATURE_SHA3, 0)},
9954 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9955 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9956 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
9957 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9958 | AARCH64_FEATURE_BFLOAT16, 0)},
9959 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64, 0),
9960 AARCH64_FEATURE (AARCH64_FEATURE_SME
9961 | AARCH64_FEATURE_SVE2
9962 | AARCH64_FEATURE_BFLOAT16, 0)},
9963 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I64, 0),
9964 AARCH64_FEATURE (AARCH64_FEATURE_SME
9965 | AARCH64_FEATURE_SVE2
9966 | AARCH64_FEATURE_BFLOAT16, 0)},
9967 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9968 AARCH64_ARCH_NONE},
9969 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9970 AARCH64_ARCH_NONE},
9971 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9972 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9973 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9974 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9975 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
9976 AARCH64_ARCH_NONE},
9977 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
9978 AARCH64_ARCH_NONE},
9979 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
9980 AARCH64_ARCH_NONE},
9981 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
9982 AARCH64_ARCH_NONE},
9983 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
9984 AARCH64_ARCH_NONE},
9985 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9986 };
9987
9988 struct aarch64_long_option_table
9989 {
9990 const char *option; /* Substring to match. */
9991 const char *help; /* Help information. */
9992 int (*func) (const char *subopt); /* Function to decode sub-option. */
9993 char *deprecated; /* If non-null, print this message. */
9994 };
9995
9996 /* Transitive closure of features depending on set. */
9997 static aarch64_feature_set
9998 aarch64_feature_disable_set (aarch64_feature_set set)
9999 {
10000 const struct aarch64_option_cpu_value_table *opt;
10001 aarch64_feature_set prev = 0;
10002
10003 while (prev != set) {
10004 prev = set;
10005 for (opt = aarch64_features; opt->name != NULL; opt++)
10006 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10007 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10008 }
10009 return set;
10010 }
10011
10012 /* Transitive closure of dependencies of set. */
10013 static aarch64_feature_set
10014 aarch64_feature_enable_set (aarch64_feature_set set)
10015 {
10016 const struct aarch64_option_cpu_value_table *opt;
10017 aarch64_feature_set prev = 0;
10018
10019 while (prev != set) {
10020 prev = set;
10021 for (opt = aarch64_features; opt->name != NULL; opt++)
10022 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10023 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10024 }
10025 return set;
10026 }
10027
10028 static int
10029 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10030 bool ext_only)
10031 {
10032 /* We insist on extensions being added before being removed. We achieve
10033 this by using the ADDING_VALUE variable to indicate whether we are
10034 adding an extension (1) or removing it (0) and only allowing it to
10035 change in the order -1 -> 1 -> 0. */
10036 int adding_value = -1;
10037 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10038
10039 /* Copy the feature set, so that we can modify it. */
10040 *ext_set = **opt_p;
10041 *opt_p = ext_set;
10042
10043 while (str != NULL && *str != 0)
10044 {
10045 const struct aarch64_option_cpu_value_table *opt;
10046 const char *ext = NULL;
10047 int optlen;
10048
10049 if (!ext_only)
10050 {
10051 if (*str != '+')
10052 {
10053 as_bad (_("invalid architectural extension"));
10054 return 0;
10055 }
10056
10057 ext = strchr (++str, '+');
10058 }
10059
10060 if (ext != NULL)
10061 optlen = ext - str;
10062 else
10063 optlen = strlen (str);
10064
10065 if (optlen >= 2 && startswith (str, "no"))
10066 {
10067 if (adding_value != 0)
10068 adding_value = 0;
10069 optlen -= 2;
10070 str += 2;
10071 }
10072 else if (optlen > 0)
10073 {
10074 if (adding_value == -1)
10075 adding_value = 1;
10076 else if (adding_value != 1)
10077 {
10078 as_bad (_("must specify extensions to add before specifying "
10079 "those to remove"));
10080 return false;
10081 }
10082 }
10083
10084 if (optlen == 0)
10085 {
10086 as_bad (_("missing architectural extension"));
10087 return 0;
10088 }
10089
10090 gas_assert (adding_value != -1);
10091
10092 for (opt = aarch64_features; opt->name != NULL; opt++)
10093 if (strncmp (opt->name, str, optlen) == 0)
10094 {
10095 aarch64_feature_set set;
10096
10097 /* Add or remove the extension. */
10098 if (adding_value)
10099 {
10100 set = aarch64_feature_enable_set (opt->value);
10101 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10102 }
10103 else
10104 {
10105 set = aarch64_feature_disable_set (opt->value);
10106 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10107 }
10108 break;
10109 }
10110
10111 if (opt->name == NULL)
10112 {
10113 as_bad (_("unknown architectural extension `%s'"), str);
10114 return 0;
10115 }
10116
10117 str = ext;
10118 };
10119
10120 return 1;
10121 }
10122
10123 static int
10124 aarch64_parse_cpu (const char *str)
10125 {
10126 const struct aarch64_cpu_option_table *opt;
10127 const char *ext = strchr (str, '+');
10128 size_t optlen;
10129
10130 if (ext != NULL)
10131 optlen = ext - str;
10132 else
10133 optlen = strlen (str);
10134
10135 if (optlen == 0)
10136 {
10137 as_bad (_("missing cpu name `%s'"), str);
10138 return 0;
10139 }
10140
10141 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10142 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10143 {
10144 mcpu_cpu_opt = &opt->value;
10145 if (ext != NULL)
10146 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10147
10148 return 1;
10149 }
10150
10151 as_bad (_("unknown cpu `%s'"), str);
10152 return 0;
10153 }
10154
10155 static int
10156 aarch64_parse_arch (const char *str)
10157 {
10158 const struct aarch64_arch_option_table *opt;
10159 const char *ext = strchr (str, '+');
10160 size_t optlen;
10161
10162 if (ext != NULL)
10163 optlen = ext - str;
10164 else
10165 optlen = strlen (str);
10166
10167 if (optlen == 0)
10168 {
10169 as_bad (_("missing architecture name `%s'"), str);
10170 return 0;
10171 }
10172
10173 for (opt = aarch64_archs; opt->name != NULL; opt++)
10174 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10175 {
10176 march_cpu_opt = &opt->value;
10177 if (ext != NULL)
10178 return aarch64_parse_features (ext, &march_cpu_opt, false);
10179
10180 return 1;
10181 }
10182
10183 as_bad (_("unknown architecture `%s'\n"), str);
10184 return 0;
10185 }
10186
10187 /* ABIs. */
10188 struct aarch64_option_abi_value_table
10189 {
10190 const char *name;
10191 enum aarch64_abi_type value;
10192 };
10193
10194 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10195 {"ilp32", AARCH64_ABI_ILP32},
10196 {"lp64", AARCH64_ABI_LP64},
10197 };
10198
10199 static int
10200 aarch64_parse_abi (const char *str)
10201 {
10202 unsigned int i;
10203
10204 if (str[0] == '\0')
10205 {
10206 as_bad (_("missing abi name `%s'"), str);
10207 return 0;
10208 }
10209
10210 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10211 if (strcmp (str, aarch64_abis[i].name) == 0)
10212 {
10213 aarch64_abi = aarch64_abis[i].value;
10214 return 1;
10215 }
10216
10217 as_bad (_("unknown abi `%s'\n"), str);
10218 return 0;
10219 }
10220
10221 static struct aarch64_long_option_table aarch64_long_opts[] = {
10222 #ifdef OBJ_ELF
10223 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10224 aarch64_parse_abi, NULL},
10225 #endif /* OBJ_ELF */
10226 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10227 aarch64_parse_cpu, NULL},
10228 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10229 aarch64_parse_arch, NULL},
10230 {NULL, NULL, 0, NULL}
10231 };
10232
10233 int
10234 md_parse_option (int c, const char *arg)
10235 {
10236 struct aarch64_option_table *opt;
10237 struct aarch64_long_option_table *lopt;
10238
10239 switch (c)
10240 {
10241 #ifdef OPTION_EB
10242 case OPTION_EB:
10243 target_big_endian = 1;
10244 break;
10245 #endif
10246
10247 #ifdef OPTION_EL
10248 case OPTION_EL:
10249 target_big_endian = 0;
10250 break;
10251 #endif
10252
10253 case 'a':
10254 /* Listing option. Just ignore these, we don't support additional
10255 ones. */
10256 return 0;
10257
10258 default:
10259 for (opt = aarch64_opts; opt->option != NULL; opt++)
10260 {
10261 if (c == opt->option[0]
10262 && ((arg == NULL && opt->option[1] == 0)
10263 || streq (arg, opt->option + 1)))
10264 {
10265 /* If the option is deprecated, tell the user. */
10266 if (opt->deprecated != NULL)
10267 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10268 arg ? arg : "", _(opt->deprecated));
10269
10270 if (opt->var != NULL)
10271 *opt->var = opt->value;
10272
10273 return 1;
10274 }
10275 }
10276
10277 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10278 {
10279 /* These options are expected to have an argument. */
10280 if (c == lopt->option[0]
10281 && arg != NULL
10282 && startswith (arg, lopt->option + 1))
10283 {
10284 /* If the option is deprecated, tell the user. */
10285 if (lopt->deprecated != NULL)
10286 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10287 _(lopt->deprecated));
10288
10289 /* Call the sup-option parser. */
10290 return lopt->func (arg + strlen (lopt->option) - 1);
10291 }
10292 }
10293
10294 return 0;
10295 }
10296
10297 return 1;
10298 }
10299
10300 void
10301 md_show_usage (FILE * fp)
10302 {
10303 struct aarch64_option_table *opt;
10304 struct aarch64_long_option_table *lopt;
10305
10306 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10307
10308 for (opt = aarch64_opts; opt->option != NULL; opt++)
10309 if (opt->help != NULL)
10310 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10311
10312 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10313 if (lopt->help != NULL)
10314 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10315
10316 #ifdef OPTION_EB
10317 fprintf (fp, _("\
10318 -EB assemble code for a big-endian cpu\n"));
10319 #endif
10320
10321 #ifdef OPTION_EL
10322 fprintf (fp, _("\
10323 -EL assemble code for a little-endian cpu\n"));
10324 #endif
10325 }
10326
10327 /* Parse a .cpu directive. */
10328
10329 static void
10330 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10331 {
10332 const struct aarch64_cpu_option_table *opt;
10333 char saved_char;
10334 char *name;
10335 char *ext;
10336 size_t optlen;
10337
10338 name = input_line_pointer;
10339 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10340 input_line_pointer++;
10341 saved_char = *input_line_pointer;
10342 *input_line_pointer = 0;
10343
10344 ext = strchr (name, '+');
10345
10346 if (ext != NULL)
10347 optlen = ext - name;
10348 else
10349 optlen = strlen (name);
10350
10351 /* Skip the first "all" entry. */
10352 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10353 if (strlen (opt->name) == optlen
10354 && strncmp (name, opt->name, optlen) == 0)
10355 {
10356 mcpu_cpu_opt = &opt->value;
10357 if (ext != NULL)
10358 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10359 return;
10360
10361 cpu_variant = *mcpu_cpu_opt;
10362
10363 *input_line_pointer = saved_char;
10364 demand_empty_rest_of_line ();
10365 return;
10366 }
10367 as_bad (_("unknown cpu `%s'"), name);
10368 *input_line_pointer = saved_char;
10369 ignore_rest_of_line ();
10370 }
10371
10372
10373 /* Parse a .arch directive. */
10374
10375 static void
10376 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10377 {
10378 const struct aarch64_arch_option_table *opt;
10379 char saved_char;
10380 char *name;
10381 char *ext;
10382 size_t optlen;
10383
10384 name = input_line_pointer;
10385 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10386 input_line_pointer++;
10387 saved_char = *input_line_pointer;
10388 *input_line_pointer = 0;
10389
10390 ext = strchr (name, '+');
10391
10392 if (ext != NULL)
10393 optlen = ext - name;
10394 else
10395 optlen = strlen (name);
10396
10397 /* Skip the first "all" entry. */
10398 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10399 if (strlen (opt->name) == optlen
10400 && strncmp (name, opt->name, optlen) == 0)
10401 {
10402 mcpu_cpu_opt = &opt->value;
10403 if (ext != NULL)
10404 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10405 return;
10406
10407 cpu_variant = *mcpu_cpu_opt;
10408
10409 *input_line_pointer = saved_char;
10410 demand_empty_rest_of_line ();
10411 return;
10412 }
10413
10414 as_bad (_("unknown architecture `%s'\n"), name);
10415 *input_line_pointer = saved_char;
10416 ignore_rest_of_line ();
10417 }
10418
10419 /* Parse a .arch_extension directive. */
10420
10421 static void
10422 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10423 {
10424 char saved_char;
10425 char *ext = input_line_pointer;;
10426
10427 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
10428 input_line_pointer++;
10429 saved_char = *input_line_pointer;
10430 *input_line_pointer = 0;
10431
10432 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10433 return;
10434
10435 cpu_variant = *mcpu_cpu_opt;
10436
10437 *input_line_pointer = saved_char;
10438 demand_empty_rest_of_line ();
10439 }
10440
10441 /* Copy symbol information. */
10442
10443 void
10444 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10445 {
10446 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10447 }
10448
10449 #ifdef OBJ_ELF
10450 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10451 This is needed so AArch64 specific st_other values can be independently
10452 specified for an IFUNC resolver (that is called by the dynamic linker)
10453 and the symbol it resolves (aliased to the resolver). In particular,
10454 if a function symbol has special st_other value set via directives,
10455 then attaching an IFUNC resolver to that symbol should not override
10456 the st_other setting. Requiring the directive on the IFUNC resolver
10457 symbol would be unexpected and problematic in C code, where the two
10458 symbols appear as two independent function declarations. */
10459
10460 void
10461 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10462 {
10463 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10464 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10465 if (srcelf->size)
10466 {
10467 if (destelf->size == NULL)
10468 destelf->size = XNEW (expressionS);
10469 *destelf->size = *srcelf->size;
10470 }
10471 else
10472 {
10473 free (destelf->size);
10474 destelf->size = NULL;
10475 }
10476 S_SET_SIZE (dest, S_GET_SIZE (src));
10477 }
10478 #endif