[AArch64][SVE 26/32] Add SVE MUL VL addressing modes
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum vector_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q,
87 NT_zero,
88 NT_merge
89 };
90
91 /* Bits for DEFINED field in vector_type_el. */
92 #define NTA_HASTYPE 1
93 #define NTA_HASINDEX 2
94 #define NTA_HASVARWIDTH 4
95
96 struct vector_type_el
97 {
98 enum vector_el_type type;
99 unsigned char defined;
100 unsigned width;
101 int64_t index;
102 };
103
104 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
105
106 struct reloc
107 {
108 bfd_reloc_code_real_type type;
109 expressionS exp;
110 int pc_rel;
111 enum aarch64_opnd opnd;
112 uint32_t flags;
113 unsigned need_libopcodes_p : 1;
114 };
115
116 struct aarch64_instruction
117 {
118 /* libopcodes structure for instruction intermediate representation. */
119 aarch64_inst base;
120 /* Record assembly errors found during the parsing. */
121 struct
122 {
123 enum aarch64_operand_error_kind kind;
124 const char *error;
125 } parsing_error;
126 /* The condition that appears in the assembly line. */
127 int cond;
128 /* Relocation information (including the GAS internal fixup). */
129 struct reloc reloc;
130 /* Need to generate an immediate in the literal pool. */
131 unsigned gen_lit_pool : 1;
132 };
133
134 typedef struct aarch64_instruction aarch64_instruction;
135
136 static aarch64_instruction inst;
137
138 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
139 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
140
141 /* Diagnostics inline function utilites.
142
143 These are lightweight utlities which should only be called by parse_operands
144 and other parsers. GAS processes each assembly line by parsing it against
145 instruction template(s), in the case of multiple templates (for the same
146 mnemonic name), those templates are tried one by one until one succeeds or
147 all fail. An assembly line may fail a few templates before being
148 successfully parsed; an error saved here in most cases is not a user error
149 but an error indicating the current template is not the right template.
150 Therefore it is very important that errors can be saved at a low cost during
151 the parsing; we don't want to slow down the whole parsing by recording
152 non-user errors in detail.
153
154 Remember that the objective is to help GAS pick up the most approapriate
155 error message in the case of multiple templates, e.g. FMOV which has 8
156 templates. */
157
158 static inline void
159 clear_error (void)
160 {
161 inst.parsing_error.kind = AARCH64_OPDE_NIL;
162 inst.parsing_error.error = NULL;
163 }
164
165 static inline bfd_boolean
166 error_p (void)
167 {
168 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
169 }
170
171 static inline const char *
172 get_error_message (void)
173 {
174 return inst.parsing_error.error;
175 }
176
177 static inline enum aarch64_operand_error_kind
178 get_error_kind (void)
179 {
180 return inst.parsing_error.kind;
181 }
182
183 static inline void
184 set_error (enum aarch64_operand_error_kind kind, const char *error)
185 {
186 inst.parsing_error.kind = kind;
187 inst.parsing_error.error = error;
188 }
189
190 static inline void
191 set_recoverable_error (const char *error)
192 {
193 set_error (AARCH64_OPDE_RECOVERABLE, error);
194 }
195
196 /* Use the DESC field of the corresponding aarch64_operand entry to compose
197 the error message. */
198 static inline void
199 set_default_error (void)
200 {
201 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
202 }
203
204 static inline void
205 set_syntax_error (const char *error)
206 {
207 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
208 }
209
210 static inline void
211 set_first_syntax_error (const char *error)
212 {
213 if (! error_p ())
214 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
215 }
216
217 static inline void
218 set_fatal_syntax_error (const char *error)
219 {
220 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
221 }
222 \f
223 /* Number of littlenums required to hold an extended precision number. */
224 #define MAX_LITTLENUMS 6
225
226 /* Return value for certain parsers when the parsing fails; those parsers
227 return the information of the parsed result, e.g. register number, on
228 success. */
229 #define PARSE_FAIL -1
230
231 /* This is an invalid condition code that means no conditional field is
232 present. */
233 #define COND_ALWAYS 0x10
234
235 typedef struct
236 {
237 const char *template;
238 unsigned long value;
239 } asm_barrier_opt;
240
241 typedef struct
242 {
243 const char *template;
244 uint32_t value;
245 } asm_nzcv;
246
247 struct reloc_entry
248 {
249 char *name;
250 bfd_reloc_code_real_type reloc;
251 };
252
253 /* Macros to define the register types and masks for the purpose
254 of parsing. */
255
256 #undef AARCH64_REG_TYPES
257 #define AARCH64_REG_TYPES \
258 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
259 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
260 BASIC_REG_TYPE(SP_32) /* wsp */ \
261 BASIC_REG_TYPE(SP_64) /* sp */ \
262 BASIC_REG_TYPE(Z_32) /* wzr */ \
263 BASIC_REG_TYPE(Z_64) /* xzr */ \
264 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
265 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
266 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
267 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
268 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
269 BASIC_REG_TYPE(CN) /* c[0-7] */ \
270 BASIC_REG_TYPE(VN) /* v[0-31] */ \
271 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
272 BASIC_REG_TYPE(PN) /* p[0-15] */ \
273 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
274 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
275 /* Typecheck: same, plus SVE registers. */ \
276 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
277 | REG_TYPE(ZN)) \
278 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
279 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
280 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
281 /* Typecheck: same, plus SVE registers. */ \
282 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
283 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
284 | REG_TYPE(ZN)) \
285 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
286 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
287 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
288 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
289 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
290 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
291 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
292 /* Typecheck: any [BHSDQ]P FP. */ \
293 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
294 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
295 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
296 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
298 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
299 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
300 /* Any integer register; used for error messages only. */ \
301 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
302 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
303 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
304 /* Pseudo type to mark the end of the enumerator sequence. */ \
305 BASIC_REG_TYPE(MAX)
306
307 #undef BASIC_REG_TYPE
308 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
309 #undef MULTI_REG_TYPE
310 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
311
312 /* Register type enumerators. */
313 typedef enum aarch64_reg_type_
314 {
315 /* A list of REG_TYPE_*. */
316 AARCH64_REG_TYPES
317 } aarch64_reg_type;
318
319 #undef BASIC_REG_TYPE
320 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
321 #undef REG_TYPE
322 #define REG_TYPE(T) (1 << REG_TYPE_##T)
323 #undef MULTI_REG_TYPE
324 #define MULTI_REG_TYPE(T,V) V,
325
326 /* Structure for a hash table entry for a register. */
327 typedef struct
328 {
329 const char *name;
330 unsigned char number;
331 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
332 unsigned char builtin;
333 } reg_entry;
334
335 /* Values indexed by aarch64_reg_type to assist the type checking. */
336 static const unsigned reg_type_masks[] =
337 {
338 AARCH64_REG_TYPES
339 };
340
341 #undef BASIC_REG_TYPE
342 #undef REG_TYPE
343 #undef MULTI_REG_TYPE
344 #undef AARCH64_REG_TYPES
345
346 /* Diagnostics used when we don't get a register of the expected type.
347 Note: this has to synchronized with aarch64_reg_type definitions
348 above. */
349 static const char *
350 get_reg_expected_msg (aarch64_reg_type reg_type)
351 {
352 const char *msg;
353
354 switch (reg_type)
355 {
356 case REG_TYPE_R_32:
357 msg = N_("integer 32-bit register expected");
358 break;
359 case REG_TYPE_R_64:
360 msg = N_("integer 64-bit register expected");
361 break;
362 case REG_TYPE_R_N:
363 msg = N_("integer register expected");
364 break;
365 case REG_TYPE_R64_SP:
366 msg = N_("64-bit integer or SP register expected");
367 break;
368 case REG_TYPE_SVE_BASE:
369 msg = N_("base register expected");
370 break;
371 case REG_TYPE_R_Z:
372 msg = N_("integer or zero register expected");
373 break;
374 case REG_TYPE_SVE_OFFSET:
375 msg = N_("offset register expected");
376 break;
377 case REG_TYPE_R_SP:
378 msg = N_("integer or SP register expected");
379 break;
380 case REG_TYPE_R_Z_SP:
381 msg = N_("integer, zero or SP register expected");
382 break;
383 case REG_TYPE_FP_B:
384 msg = N_("8-bit SIMD scalar register expected");
385 break;
386 case REG_TYPE_FP_H:
387 msg = N_("16-bit SIMD scalar or floating-point half precision "
388 "register expected");
389 break;
390 case REG_TYPE_FP_S:
391 msg = N_("32-bit SIMD scalar or floating-point single precision "
392 "register expected");
393 break;
394 case REG_TYPE_FP_D:
395 msg = N_("64-bit SIMD scalar or floating-point double precision "
396 "register expected");
397 break;
398 case REG_TYPE_FP_Q:
399 msg = N_("128-bit SIMD scalar or floating-point quad precision "
400 "register expected");
401 break;
402 case REG_TYPE_CN:
403 msg = N_("C0 - C15 expected");
404 break;
405 case REG_TYPE_R_Z_BHSDQ_V:
406 msg = N_("register expected");
407 break;
408 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
409 msg = N_("SIMD scalar or floating-point register expected");
410 break;
411 case REG_TYPE_VN: /* any V reg */
412 msg = N_("vector register expected");
413 break;
414 case REG_TYPE_ZN:
415 msg = N_("SVE vector register expected");
416 break;
417 case REG_TYPE_PN:
418 msg = N_("SVE predicate register expected");
419 break;
420 default:
421 as_fatal (_("invalid register type %d"), reg_type);
422 }
423 return msg;
424 }
425
426 /* Some well known registers that we refer to directly elsewhere. */
427 #define REG_SP 31
428
429 /* Instructions take 4 bytes in the object file. */
430 #define INSN_SIZE 4
431
432 static struct hash_control *aarch64_ops_hsh;
433 static struct hash_control *aarch64_cond_hsh;
434 static struct hash_control *aarch64_shift_hsh;
435 static struct hash_control *aarch64_sys_regs_hsh;
436 static struct hash_control *aarch64_pstatefield_hsh;
437 static struct hash_control *aarch64_sys_regs_ic_hsh;
438 static struct hash_control *aarch64_sys_regs_dc_hsh;
439 static struct hash_control *aarch64_sys_regs_at_hsh;
440 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
441 static struct hash_control *aarch64_reg_hsh;
442 static struct hash_control *aarch64_barrier_opt_hsh;
443 static struct hash_control *aarch64_nzcv_hsh;
444 static struct hash_control *aarch64_pldop_hsh;
445 static struct hash_control *aarch64_hint_opt_hsh;
446
447 /* Stuff needed to resolve the label ambiguity
448 As:
449 ...
450 label: <insn>
451 may differ from:
452 ...
453 label:
454 <insn> */
455
456 static symbolS *last_label_seen;
457
458 /* Literal pool structure. Held on a per-section
459 and per-sub-section basis. */
460
461 #define MAX_LITERAL_POOL_SIZE 1024
462 typedef struct literal_expression
463 {
464 expressionS exp;
465 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
466 LITTLENUM_TYPE * bignum;
467 } literal_expression;
468
469 typedef struct literal_pool
470 {
471 literal_expression literals[MAX_LITERAL_POOL_SIZE];
472 unsigned int next_free_entry;
473 unsigned int id;
474 symbolS *symbol;
475 segT section;
476 subsegT sub_section;
477 int size;
478 struct literal_pool *next;
479 } literal_pool;
480
481 /* Pointer to a linked list of literal pools. */
482 static literal_pool *list_of_pools = NULL;
483 \f
484 /* Pure syntax. */
485
486 /* This array holds the chars that always start a comment. If the
487 pre-processor is disabled, these aren't very useful. */
488 const char comment_chars[] = "";
489
490 /* This array holds the chars that only start a comment at the beginning of
491 a line. If the line seems to have the form '# 123 filename'
492 .line and .file directives will appear in the pre-processed output. */
493 /* Note that input_file.c hand checks for '#' at the beginning of the
494 first line of the input file. This is because the compiler outputs
495 #NO_APP at the beginning of its output. */
496 /* Also note that comments like this one will always work. */
497 const char line_comment_chars[] = "#";
498
499 const char line_separator_chars[] = ";";
500
501 /* Chars that can be used to separate mant
502 from exp in floating point numbers. */
503 const char EXP_CHARS[] = "eE";
504
505 /* Chars that mean this number is a floating point constant. */
506 /* As in 0f12.456 */
507 /* or 0d1.2345e12 */
508
509 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
510
511 /* Prefix character that indicates the start of an immediate value. */
512 #define is_immediate_prefix(C) ((C) == '#')
513
514 /* Separator character handling. */
515
516 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
517
518 static inline bfd_boolean
519 skip_past_char (char **str, char c)
520 {
521 if (**str == c)
522 {
523 (*str)++;
524 return TRUE;
525 }
526 else
527 return FALSE;
528 }
529
530 #define skip_past_comma(str) skip_past_char (str, ',')
531
532 /* Arithmetic expressions (possibly involving symbols). */
533
534 static bfd_boolean in_my_get_expression_p = FALSE;
535
536 /* Third argument to my_get_expression. */
537 #define GE_NO_PREFIX 0
538 #define GE_OPT_PREFIX 1
539
540 /* Return TRUE if the string pointed by *STR is successfully parsed
541 as an valid expression; *EP will be filled with the information of
542 such an expression. Otherwise return FALSE. */
543
544 static bfd_boolean
545 my_get_expression (expressionS * ep, char **str, int prefix_mode,
546 int reject_absent)
547 {
548 char *save_in;
549 segT seg;
550 int prefix_present_p = 0;
551
552 switch (prefix_mode)
553 {
554 case GE_NO_PREFIX:
555 break;
556 case GE_OPT_PREFIX:
557 if (is_immediate_prefix (**str))
558 {
559 (*str)++;
560 prefix_present_p = 1;
561 }
562 break;
563 default:
564 abort ();
565 }
566
567 memset (ep, 0, sizeof (expressionS));
568
569 save_in = input_line_pointer;
570 input_line_pointer = *str;
571 in_my_get_expression_p = TRUE;
572 seg = expression (ep);
573 in_my_get_expression_p = FALSE;
574
575 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
576 {
577 /* We found a bad expression in md_operand(). */
578 *str = input_line_pointer;
579 input_line_pointer = save_in;
580 if (prefix_present_p && ! error_p ())
581 set_fatal_syntax_error (_("bad expression"));
582 else
583 set_first_syntax_error (_("bad expression"));
584 return FALSE;
585 }
586
587 #ifdef OBJ_AOUT
588 if (seg != absolute_section
589 && seg != text_section
590 && seg != data_section
591 && seg != bss_section && seg != undefined_section)
592 {
593 set_syntax_error (_("bad segment"));
594 *str = input_line_pointer;
595 input_line_pointer = save_in;
596 return FALSE;
597 }
598 #else
599 (void) seg;
600 #endif
601
602 *str = input_line_pointer;
603 input_line_pointer = save_in;
604 return TRUE;
605 }
606
607 /* Turn a string in input_line_pointer into a floating point constant
608 of type TYPE, and store the appropriate bytes in *LITP. The number
609 of LITTLENUMS emitted is stored in *SIZEP. An error message is
610 returned, or NULL on OK. */
611
612 const char *
613 md_atof (int type, char *litP, int *sizeP)
614 {
615 return ieee_md_atof (type, litP, sizeP, target_big_endian);
616 }
617
618 /* We handle all bad expressions here, so that we can report the faulty
619 instruction in the error message. */
620 void
621 md_operand (expressionS * exp)
622 {
623 if (in_my_get_expression_p)
624 exp->X_op = O_illegal;
625 }
626
627 /* Immediate values. */
628
629 /* Errors may be set multiple times during parsing or bit encoding
630 (particularly in the Neon bits), but usually the earliest error which is set
631 will be the most meaningful. Avoid overwriting it with later (cascading)
632 errors by calling this function. */
633
634 static void
635 first_error (const char *error)
636 {
637 if (! error_p ())
638 set_syntax_error (error);
639 }
640
641 /* Similiar to first_error, but this function accepts formatted error
642 message. */
643 static void
644 first_error_fmt (const char *format, ...)
645 {
646 va_list args;
647 enum
648 { size = 100 };
649 /* N.B. this single buffer will not cause error messages for different
650 instructions to pollute each other; this is because at the end of
651 processing of each assembly line, error message if any will be
652 collected by as_bad. */
653 static char buffer[size];
654
655 if (! error_p ())
656 {
657 int ret ATTRIBUTE_UNUSED;
658 va_start (args, format);
659 ret = vsnprintf (buffer, size, format, args);
660 know (ret <= size - 1 && ret >= 0);
661 va_end (args);
662 set_syntax_error (buffer);
663 }
664 }
665
666 /* Register parsing. */
667
668 /* Generic register parser which is called by other specialized
669 register parsers.
670 CCP points to what should be the beginning of a register name.
671 If it is indeed a valid register name, advance CCP over it and
672 return the reg_entry structure; otherwise return NULL.
673 It does not issue diagnostics. */
674
675 static reg_entry *
676 parse_reg (char **ccp)
677 {
678 char *start = *ccp;
679 char *p;
680 reg_entry *reg;
681
682 #ifdef REGISTER_PREFIX
683 if (*start != REGISTER_PREFIX)
684 return NULL;
685 start++;
686 #endif
687
688 p = start;
689 if (!ISALPHA (*p) || !is_name_beginner (*p))
690 return NULL;
691
692 do
693 p++;
694 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
695
696 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
697
698 if (!reg)
699 return NULL;
700
701 *ccp = p;
702 return reg;
703 }
704
705 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
706 return FALSE. */
707 static bfd_boolean
708 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
709 {
710 return (reg_type_masks[type] & (1 << reg->type)) != 0;
711 }
712
713 /* Try to parse a base or offset register. Allow SVE base and offset
714 registers if REG_TYPE includes SVE registers. Return the register
715 entry on success, setting *QUALIFIER to the register qualifier.
716 Return null otherwise.
717
718 Note that this function does not issue any diagnostics. */
719
720 static const reg_entry *
721 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
722 aarch64_opnd_qualifier_t *qualifier)
723 {
724 char *str = *ccp;
725 const reg_entry *reg = parse_reg (&str);
726
727 if (reg == NULL)
728 return NULL;
729
730 switch (reg->type)
731 {
732 case REG_TYPE_R_32:
733 case REG_TYPE_SP_32:
734 case REG_TYPE_Z_32:
735 *qualifier = AARCH64_OPND_QLF_W;
736 break;
737
738 case REG_TYPE_R_64:
739 case REG_TYPE_SP_64:
740 case REG_TYPE_Z_64:
741 *qualifier = AARCH64_OPND_QLF_X;
742 break;
743
744 case REG_TYPE_ZN:
745 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
746 || str[0] != '.')
747 return NULL;
748 switch (TOLOWER (str[1]))
749 {
750 case 's':
751 *qualifier = AARCH64_OPND_QLF_S_S;
752 break;
753 case 'd':
754 *qualifier = AARCH64_OPND_QLF_S_D;
755 break;
756 default:
757 return NULL;
758 }
759 str += 2;
760 break;
761
762 default:
763 return NULL;
764 }
765
766 *ccp = str;
767
768 return reg;
769 }
770
771 /* Try to parse a base or offset register. Return the register entry
772 on success, setting *QUALIFIER to the register qualifier. Return null
773 otherwise.
774
775 Note that this function does not issue any diagnostics. */
776
777 static const reg_entry *
778 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
779 {
780 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
781 }
782
783 /* Parse the qualifier of a vector register or vector element of type
784 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
785 succeeds; otherwise return FALSE.
786
787 Accept only one occurrence of:
788 8b 16b 2h 4h 8h 2s 4s 1d 2d
789 b h s d q */
790 static bfd_boolean
791 parse_vector_type_for_operand (aarch64_reg_type reg_type,
792 struct vector_type_el *parsed_type, char **str)
793 {
794 char *ptr = *str;
795 unsigned width;
796 unsigned element_size;
797 enum vector_el_type type;
798
799 /* skip '.' */
800 gas_assert (*ptr == '.');
801 ptr++;
802
803 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
804 {
805 width = 0;
806 goto elt_size;
807 }
808 width = strtoul (ptr, &ptr, 10);
809 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
810 {
811 first_error_fmt (_("bad size %d in vector width specifier"), width);
812 return FALSE;
813 }
814
815 elt_size:
816 switch (TOLOWER (*ptr))
817 {
818 case 'b':
819 type = NT_b;
820 element_size = 8;
821 break;
822 case 'h':
823 type = NT_h;
824 element_size = 16;
825 break;
826 case 's':
827 type = NT_s;
828 element_size = 32;
829 break;
830 case 'd':
831 type = NT_d;
832 element_size = 64;
833 break;
834 case 'q':
835 if (width == 1)
836 {
837 type = NT_q;
838 element_size = 128;
839 break;
840 }
841 /* fall through. */
842 default:
843 if (*ptr != '\0')
844 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
845 else
846 first_error (_("missing element size"));
847 return FALSE;
848 }
849 if (width != 0 && width * element_size != 64 && width * element_size != 128
850 && !(width == 2 && element_size == 16))
851 {
852 first_error_fmt (_
853 ("invalid element size %d and vector size combination %c"),
854 width, *ptr);
855 return FALSE;
856 }
857 ptr++;
858
859 parsed_type->type = type;
860 parsed_type->width = width;
861
862 *str = ptr;
863
864 return TRUE;
865 }
866
867 /* *STR contains an SVE zero/merge predication suffix. Parse it into
868 *PARSED_TYPE and point *STR at the end of the suffix. */
869
870 static bfd_boolean
871 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
872 {
873 char *ptr = *str;
874
875 /* Skip '/'. */
876 gas_assert (*ptr == '/');
877 ptr++;
878 switch (TOLOWER (*ptr))
879 {
880 case 'z':
881 parsed_type->type = NT_zero;
882 break;
883 case 'm':
884 parsed_type->type = NT_merge;
885 break;
886 default:
887 if (*ptr != '\0' && *ptr != ',')
888 first_error_fmt (_("unexpected character `%c' in predication type"),
889 *ptr);
890 else
891 first_error (_("missing predication type"));
892 return FALSE;
893 }
894 parsed_type->width = 0;
895 *str = ptr + 1;
896 return TRUE;
897 }
898
899 /* Parse a register of the type TYPE.
900
901 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
902 name or the parsed register is not of TYPE.
903
904 Otherwise return the register number, and optionally fill in the actual
905 type of the register in *RTYPE when multiple alternatives were given, and
906 return the register shape and element index information in *TYPEINFO.
907
908 IN_REG_LIST should be set with TRUE if the caller is parsing a register
909 list. */
910
911 static int
912 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
913 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
914 {
915 char *str = *ccp;
916 const reg_entry *reg = parse_reg (&str);
917 struct vector_type_el atype;
918 struct vector_type_el parsetype;
919 bfd_boolean is_typed_vecreg = FALSE;
920
921 atype.defined = 0;
922 atype.type = NT_invtype;
923 atype.width = -1;
924 atype.index = 0;
925
926 if (reg == NULL)
927 {
928 if (typeinfo)
929 *typeinfo = atype;
930 set_default_error ();
931 return PARSE_FAIL;
932 }
933
934 if (! aarch64_check_reg_type (reg, type))
935 {
936 DEBUG_TRACE ("reg type check failed");
937 set_default_error ();
938 return PARSE_FAIL;
939 }
940 type = reg->type;
941
942 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
943 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
944 {
945 if (*str == '.')
946 {
947 if (!parse_vector_type_for_operand (type, &parsetype, &str))
948 return PARSE_FAIL;
949 }
950 else
951 {
952 if (!parse_predication_for_operand (&parsetype, &str))
953 return PARSE_FAIL;
954 }
955
956 /* Register if of the form Vn.[bhsdq]. */
957 is_typed_vecreg = TRUE;
958
959 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
960 {
961 /* The width is always variable; we don't allow an integer width
962 to be specified. */
963 gas_assert (parsetype.width == 0);
964 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
965 }
966 else if (parsetype.width == 0)
967 /* Expect index. In the new scheme we cannot have
968 Vn.[bhsdq] represent a scalar. Therefore any
969 Vn.[bhsdq] should have an index following it.
970 Except in reglists ofcourse. */
971 atype.defined |= NTA_HASINDEX;
972 else
973 atype.defined |= NTA_HASTYPE;
974
975 atype.type = parsetype.type;
976 atype.width = parsetype.width;
977 }
978
979 if (skip_past_char (&str, '['))
980 {
981 expressionS exp;
982
983 /* Reject Sn[index] syntax. */
984 if (!is_typed_vecreg)
985 {
986 first_error (_("this type of register can't be indexed"));
987 return PARSE_FAIL;
988 }
989
990 if (in_reg_list == TRUE)
991 {
992 first_error (_("index not allowed inside register list"));
993 return PARSE_FAIL;
994 }
995
996 atype.defined |= NTA_HASINDEX;
997
998 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
999
1000 if (exp.X_op != O_constant)
1001 {
1002 first_error (_("constant expression required"));
1003 return PARSE_FAIL;
1004 }
1005
1006 if (! skip_past_char (&str, ']'))
1007 return PARSE_FAIL;
1008
1009 atype.index = exp.X_add_number;
1010 }
1011 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1012 {
1013 /* Indexed vector register expected. */
1014 first_error (_("indexed vector register expected"));
1015 return PARSE_FAIL;
1016 }
1017
1018 /* A vector reg Vn should be typed or indexed. */
1019 if (type == REG_TYPE_VN && atype.defined == 0)
1020 {
1021 first_error (_("invalid use of vector register"));
1022 }
1023
1024 if (typeinfo)
1025 *typeinfo = atype;
1026
1027 if (rtype)
1028 *rtype = type;
1029
1030 *ccp = str;
1031
1032 return reg->number;
1033 }
1034
1035 /* Parse register.
1036
1037 Return the register number on success; return PARSE_FAIL otherwise.
1038
1039 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1040 the register (e.g. NEON double or quad reg when either has been requested).
1041
1042 If this is a NEON vector register with additional type information, fill
1043 in the struct pointed to by VECTYPE (if non-NULL).
1044
1045 This parser does not handle register list. */
1046
1047 static int
1048 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1049 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1050 {
1051 struct vector_type_el atype;
1052 char *str = *ccp;
1053 int reg = parse_typed_reg (&str, type, rtype, &atype,
1054 /*in_reg_list= */ FALSE);
1055
1056 if (reg == PARSE_FAIL)
1057 return PARSE_FAIL;
1058
1059 if (vectype)
1060 *vectype = atype;
1061
1062 *ccp = str;
1063
1064 return reg;
1065 }
1066
1067 static inline bfd_boolean
1068 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1069 {
1070 return
1071 e1.type == e2.type
1072 && e1.defined == e2.defined
1073 && e1.width == e2.width && e1.index == e2.index;
1074 }
1075
1076 /* This function parses a list of vector registers of type TYPE.
1077 On success, it returns the parsed register list information in the
1078 following encoded format:
1079
1080 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1081 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1082
1083 The information of the register shape and/or index is returned in
1084 *VECTYPE.
1085
1086 It returns PARSE_FAIL if the register list is invalid.
1087
1088 The list contains one to four registers.
1089 Each register can be one of:
1090 <Vt>.<T>[<index>]
1091 <Vt>.<T>
1092 All <T> should be identical.
1093 All <index> should be identical.
1094 There are restrictions on <Vt> numbers which are checked later
1095 (by reg_list_valid_p). */
1096
1097 static int
1098 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1099 struct vector_type_el *vectype)
1100 {
1101 char *str = *ccp;
1102 int nb_regs;
1103 struct vector_type_el typeinfo, typeinfo_first;
1104 int val, val_range;
1105 int in_range;
1106 int ret_val;
1107 int i;
1108 bfd_boolean error = FALSE;
1109 bfd_boolean expect_index = FALSE;
1110
1111 if (*str != '{')
1112 {
1113 set_syntax_error (_("expecting {"));
1114 return PARSE_FAIL;
1115 }
1116 str++;
1117
1118 nb_regs = 0;
1119 typeinfo_first.defined = 0;
1120 typeinfo_first.type = NT_invtype;
1121 typeinfo_first.width = -1;
1122 typeinfo_first.index = 0;
1123 ret_val = 0;
1124 val = -1;
1125 val_range = -1;
1126 in_range = 0;
1127 do
1128 {
1129 if (in_range)
1130 {
1131 str++; /* skip over '-' */
1132 val_range = val;
1133 }
1134 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1135 /*in_reg_list= */ TRUE);
1136 if (val == PARSE_FAIL)
1137 {
1138 set_first_syntax_error (_("invalid vector register in list"));
1139 error = TRUE;
1140 continue;
1141 }
1142 /* reject [bhsd]n */
1143 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1144 {
1145 set_first_syntax_error (_("invalid scalar register in list"));
1146 error = TRUE;
1147 continue;
1148 }
1149
1150 if (typeinfo.defined & NTA_HASINDEX)
1151 expect_index = TRUE;
1152
1153 if (in_range)
1154 {
1155 if (val < val_range)
1156 {
1157 set_first_syntax_error
1158 (_("invalid range in vector register list"));
1159 error = TRUE;
1160 }
1161 val_range++;
1162 }
1163 else
1164 {
1165 val_range = val;
1166 if (nb_regs == 0)
1167 typeinfo_first = typeinfo;
1168 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1169 {
1170 set_first_syntax_error
1171 (_("type mismatch in vector register list"));
1172 error = TRUE;
1173 }
1174 }
1175 if (! error)
1176 for (i = val_range; i <= val; i++)
1177 {
1178 ret_val |= i << (5 * nb_regs);
1179 nb_regs++;
1180 }
1181 in_range = 0;
1182 }
1183 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1184
1185 skip_whitespace (str);
1186 if (*str != '}')
1187 {
1188 set_first_syntax_error (_("end of vector register list not found"));
1189 error = TRUE;
1190 }
1191 str++;
1192
1193 skip_whitespace (str);
1194
1195 if (expect_index)
1196 {
1197 if (skip_past_char (&str, '['))
1198 {
1199 expressionS exp;
1200
1201 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1202 if (exp.X_op != O_constant)
1203 {
1204 set_first_syntax_error (_("constant expression required."));
1205 error = TRUE;
1206 }
1207 if (! skip_past_char (&str, ']'))
1208 error = TRUE;
1209 else
1210 typeinfo_first.index = exp.X_add_number;
1211 }
1212 else
1213 {
1214 set_first_syntax_error (_("expected index"));
1215 error = TRUE;
1216 }
1217 }
1218
1219 if (nb_regs > 4)
1220 {
1221 set_first_syntax_error (_("too many registers in vector register list"));
1222 error = TRUE;
1223 }
1224 else if (nb_regs == 0)
1225 {
1226 set_first_syntax_error (_("empty vector register list"));
1227 error = TRUE;
1228 }
1229
1230 *ccp = str;
1231 if (! error)
1232 *vectype = typeinfo_first;
1233
1234 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1235 }
1236
1237 /* Directives: register aliases. */
1238
1239 static reg_entry *
1240 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1241 {
1242 reg_entry *new;
1243 const char *name;
1244
1245 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1246 {
1247 if (new->builtin)
1248 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1249 str);
1250
1251 /* Only warn about a redefinition if it's not defined as the
1252 same register. */
1253 else if (new->number != number || new->type != type)
1254 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1255
1256 return NULL;
1257 }
1258
1259 name = xstrdup (str);
1260 new = XNEW (reg_entry);
1261
1262 new->name = name;
1263 new->number = number;
1264 new->type = type;
1265 new->builtin = FALSE;
1266
1267 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1268 abort ();
1269
1270 return new;
1271 }
1272
1273 /* Look for the .req directive. This is of the form:
1274
1275 new_register_name .req existing_register_name
1276
1277 If we find one, or if it looks sufficiently like one that we want to
1278 handle any error here, return TRUE. Otherwise return FALSE. */
1279
1280 static bfd_boolean
1281 create_register_alias (char *newname, char *p)
1282 {
1283 const reg_entry *old;
1284 char *oldname, *nbuf;
1285 size_t nlen;
1286
1287 /* The input scrubber ensures that whitespace after the mnemonic is
1288 collapsed to single spaces. */
1289 oldname = p;
1290 if (strncmp (oldname, " .req ", 6) != 0)
1291 return FALSE;
1292
1293 oldname += 6;
1294 if (*oldname == '\0')
1295 return FALSE;
1296
1297 old = hash_find (aarch64_reg_hsh, oldname);
1298 if (!old)
1299 {
1300 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1301 return TRUE;
1302 }
1303
1304 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1305 the desired alias name, and p points to its end. If not, then
1306 the desired alias name is in the global original_case_string. */
1307 #ifdef TC_CASE_SENSITIVE
1308 nlen = p - newname;
1309 #else
1310 newname = original_case_string;
1311 nlen = strlen (newname);
1312 #endif
1313
1314 nbuf = xmemdup0 (newname, nlen);
1315
1316 /* Create aliases under the new name as stated; an all-lowercase
1317 version of the new name; and an all-uppercase version of the new
1318 name. */
1319 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1320 {
1321 for (p = nbuf; *p; p++)
1322 *p = TOUPPER (*p);
1323
1324 if (strncmp (nbuf, newname, nlen))
1325 {
1326 /* If this attempt to create an additional alias fails, do not bother
1327 trying to create the all-lower case alias. We will fail and issue
1328 a second, duplicate error message. This situation arises when the
1329 programmer does something like:
1330 foo .req r0
1331 Foo .req r1
1332 The second .req creates the "Foo" alias but then fails to create
1333 the artificial FOO alias because it has already been created by the
1334 first .req. */
1335 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1336 {
1337 free (nbuf);
1338 return TRUE;
1339 }
1340 }
1341
1342 for (p = nbuf; *p; p++)
1343 *p = TOLOWER (*p);
1344
1345 if (strncmp (nbuf, newname, nlen))
1346 insert_reg_alias (nbuf, old->number, old->type);
1347 }
1348
1349 free (nbuf);
1350 return TRUE;
1351 }
1352
1353 /* Should never be called, as .req goes between the alias and the
1354 register name, not at the beginning of the line. */
1355 static void
1356 s_req (int a ATTRIBUTE_UNUSED)
1357 {
1358 as_bad (_("invalid syntax for .req directive"));
1359 }
1360
1361 /* The .unreq directive deletes an alias which was previously defined
1362 by .req. For example:
1363
1364 my_alias .req r11
1365 .unreq my_alias */
1366
1367 static void
1368 s_unreq (int a ATTRIBUTE_UNUSED)
1369 {
1370 char *name;
1371 char saved_char;
1372
1373 name = input_line_pointer;
1374
1375 while (*input_line_pointer != 0
1376 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1377 ++input_line_pointer;
1378
1379 saved_char = *input_line_pointer;
1380 *input_line_pointer = 0;
1381
1382 if (!*name)
1383 as_bad (_("invalid syntax for .unreq directive"));
1384 else
1385 {
1386 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1387
1388 if (!reg)
1389 as_bad (_("unknown register alias '%s'"), name);
1390 else if (reg->builtin)
1391 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1392 name);
1393 else
1394 {
1395 char *p;
1396 char *nbuf;
1397
1398 hash_delete (aarch64_reg_hsh, name, FALSE);
1399 free ((char *) reg->name);
1400 free (reg);
1401
1402 /* Also locate the all upper case and all lower case versions.
1403 Do not complain if we cannot find one or the other as it
1404 was probably deleted above. */
1405
1406 nbuf = strdup (name);
1407 for (p = nbuf; *p; p++)
1408 *p = TOUPPER (*p);
1409 reg = hash_find (aarch64_reg_hsh, nbuf);
1410 if (reg)
1411 {
1412 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1413 free ((char *) reg->name);
1414 free (reg);
1415 }
1416
1417 for (p = nbuf; *p; p++)
1418 *p = TOLOWER (*p);
1419 reg = hash_find (aarch64_reg_hsh, nbuf);
1420 if (reg)
1421 {
1422 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1423 free ((char *) reg->name);
1424 free (reg);
1425 }
1426
1427 free (nbuf);
1428 }
1429 }
1430
1431 *input_line_pointer = saved_char;
1432 demand_empty_rest_of_line ();
1433 }
1434
1435 /* Directives: Instruction set selection. */
1436
1437 #ifdef OBJ_ELF
1438 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1439 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1440 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1441 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1442
1443 /* Create a new mapping symbol for the transition to STATE. */
1444
1445 static void
1446 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1447 {
1448 symbolS *symbolP;
1449 const char *symname;
1450 int type;
1451
1452 switch (state)
1453 {
1454 case MAP_DATA:
1455 symname = "$d";
1456 type = BSF_NO_FLAGS;
1457 break;
1458 case MAP_INSN:
1459 symname = "$x";
1460 type = BSF_NO_FLAGS;
1461 break;
1462 default:
1463 abort ();
1464 }
1465
1466 symbolP = symbol_new (symname, now_seg, value, frag);
1467 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1468
1469 /* Save the mapping symbols for future reference. Also check that
1470 we do not place two mapping symbols at the same offset within a
1471 frag. We'll handle overlap between frags in
1472 check_mapping_symbols.
1473
1474 If .fill or other data filling directive generates zero sized data,
1475 the mapping symbol for the following code will have the same value
1476 as the one generated for the data filling directive. In this case,
1477 we replace the old symbol with the new one at the same address. */
1478 if (value == 0)
1479 {
1480 if (frag->tc_frag_data.first_map != NULL)
1481 {
1482 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1483 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1484 &symbol_lastP);
1485 }
1486 frag->tc_frag_data.first_map = symbolP;
1487 }
1488 if (frag->tc_frag_data.last_map != NULL)
1489 {
1490 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1491 S_GET_VALUE (symbolP));
1492 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1493 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1494 &symbol_lastP);
1495 }
1496 frag->tc_frag_data.last_map = symbolP;
1497 }
1498
1499 /* We must sometimes convert a region marked as code to data during
1500 code alignment, if an odd number of bytes have to be padded. The
1501 code mapping symbol is pushed to an aligned address. */
1502
1503 static void
1504 insert_data_mapping_symbol (enum mstate state,
1505 valueT value, fragS * frag, offsetT bytes)
1506 {
1507 /* If there was already a mapping symbol, remove it. */
1508 if (frag->tc_frag_data.last_map != NULL
1509 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1510 frag->fr_address + value)
1511 {
1512 symbolS *symp = frag->tc_frag_data.last_map;
1513
1514 if (value == 0)
1515 {
1516 know (frag->tc_frag_data.first_map == symp);
1517 frag->tc_frag_data.first_map = NULL;
1518 }
1519 frag->tc_frag_data.last_map = NULL;
1520 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1521 }
1522
1523 make_mapping_symbol (MAP_DATA, value, frag);
1524 make_mapping_symbol (state, value + bytes, frag);
1525 }
1526
1527 static void mapping_state_2 (enum mstate state, int max_chars);
1528
1529 /* Set the mapping state to STATE. Only call this when about to
1530 emit some STATE bytes to the file. */
1531
1532 void
1533 mapping_state (enum mstate state)
1534 {
1535 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1536
1537 if (state == MAP_INSN)
1538 /* AArch64 instructions require 4-byte alignment. When emitting
1539 instructions into any section, record the appropriate section
1540 alignment. */
1541 record_alignment (now_seg, 2);
1542
1543 if (mapstate == state)
1544 /* The mapping symbol has already been emitted.
1545 There is nothing else to do. */
1546 return;
1547
1548 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1549 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1550 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1551 evaluated later in the next else. */
1552 return;
1553 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1554 {
1555 /* Only add the symbol if the offset is > 0:
1556 if we're at the first frag, check it's size > 0;
1557 if we're not at the first frag, then for sure
1558 the offset is > 0. */
1559 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1560 const int add_symbol = (frag_now != frag_first)
1561 || (frag_now_fix () > 0);
1562
1563 if (add_symbol)
1564 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1565 }
1566 #undef TRANSITION
1567
1568 mapping_state_2 (state, 0);
1569 }
1570
1571 /* Same as mapping_state, but MAX_CHARS bytes have already been
1572 allocated. Put the mapping symbol that far back. */
1573
1574 static void
1575 mapping_state_2 (enum mstate state, int max_chars)
1576 {
1577 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1578
1579 if (!SEG_NORMAL (now_seg))
1580 return;
1581
1582 if (mapstate == state)
1583 /* The mapping symbol has already been emitted.
1584 There is nothing else to do. */
1585 return;
1586
1587 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1588 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1589 }
1590 #else
1591 #define mapping_state(x) /* nothing */
1592 #define mapping_state_2(x, y) /* nothing */
1593 #endif
1594
1595 /* Directives: sectioning and alignment. */
1596
1597 static void
1598 s_bss (int ignore ATTRIBUTE_UNUSED)
1599 {
1600 /* We don't support putting frags in the BSS segment, we fake it by
1601 marking in_bss, then looking at s_skip for clues. */
1602 subseg_set (bss_section, 0);
1603 demand_empty_rest_of_line ();
1604 mapping_state (MAP_DATA);
1605 }
1606
1607 static void
1608 s_even (int ignore ATTRIBUTE_UNUSED)
1609 {
1610 /* Never make frag if expect extra pass. */
1611 if (!need_pass_2)
1612 frag_align (1, 0, 0);
1613
1614 record_alignment (now_seg, 1);
1615
1616 demand_empty_rest_of_line ();
1617 }
1618
1619 /* Directives: Literal pools. */
1620
1621 static literal_pool *
1622 find_literal_pool (int size)
1623 {
1624 literal_pool *pool;
1625
1626 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1627 {
1628 if (pool->section == now_seg
1629 && pool->sub_section == now_subseg && pool->size == size)
1630 break;
1631 }
1632
1633 return pool;
1634 }
1635
1636 static literal_pool *
1637 find_or_make_literal_pool (int size)
1638 {
1639 /* Next literal pool ID number. */
1640 static unsigned int latest_pool_num = 1;
1641 literal_pool *pool;
1642
1643 pool = find_literal_pool (size);
1644
1645 if (pool == NULL)
1646 {
1647 /* Create a new pool. */
1648 pool = XNEW (literal_pool);
1649 if (!pool)
1650 return NULL;
1651
1652 /* Currently we always put the literal pool in the current text
1653 section. If we were generating "small" model code where we
1654 knew that all code and initialised data was within 1MB then
1655 we could output literals to mergeable, read-only data
1656 sections. */
1657
1658 pool->next_free_entry = 0;
1659 pool->section = now_seg;
1660 pool->sub_section = now_subseg;
1661 pool->size = size;
1662 pool->next = list_of_pools;
1663 pool->symbol = NULL;
1664
1665 /* Add it to the list. */
1666 list_of_pools = pool;
1667 }
1668
1669 /* New pools, and emptied pools, will have a NULL symbol. */
1670 if (pool->symbol == NULL)
1671 {
1672 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1673 (valueT) 0, &zero_address_frag);
1674 pool->id = latest_pool_num++;
1675 }
1676
1677 /* Done. */
1678 return pool;
1679 }
1680
1681 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1682 Return TRUE on success, otherwise return FALSE. */
1683 static bfd_boolean
1684 add_to_lit_pool (expressionS *exp, int size)
1685 {
1686 literal_pool *pool;
1687 unsigned int entry;
1688
1689 pool = find_or_make_literal_pool (size);
1690
1691 /* Check if this literal value is already in the pool. */
1692 for (entry = 0; entry < pool->next_free_entry; entry++)
1693 {
1694 expressionS * litexp = & pool->literals[entry].exp;
1695
1696 if ((litexp->X_op == exp->X_op)
1697 && (exp->X_op == O_constant)
1698 && (litexp->X_add_number == exp->X_add_number)
1699 && (litexp->X_unsigned == exp->X_unsigned))
1700 break;
1701
1702 if ((litexp->X_op == exp->X_op)
1703 && (exp->X_op == O_symbol)
1704 && (litexp->X_add_number == exp->X_add_number)
1705 && (litexp->X_add_symbol == exp->X_add_symbol)
1706 && (litexp->X_op_symbol == exp->X_op_symbol))
1707 break;
1708 }
1709
1710 /* Do we need to create a new entry? */
1711 if (entry == pool->next_free_entry)
1712 {
1713 if (entry >= MAX_LITERAL_POOL_SIZE)
1714 {
1715 set_syntax_error (_("literal pool overflow"));
1716 return FALSE;
1717 }
1718
1719 pool->literals[entry].exp = *exp;
1720 pool->next_free_entry += 1;
1721 if (exp->X_op == O_big)
1722 {
1723 /* PR 16688: Bignums are held in a single global array. We must
1724 copy and preserve that value now, before it is overwritten. */
1725 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1726 exp->X_add_number);
1727 memcpy (pool->literals[entry].bignum, generic_bignum,
1728 CHARS_PER_LITTLENUM * exp->X_add_number);
1729 }
1730 else
1731 pool->literals[entry].bignum = NULL;
1732 }
1733
1734 exp->X_op = O_symbol;
1735 exp->X_add_number = ((int) entry) * size;
1736 exp->X_add_symbol = pool->symbol;
1737
1738 return TRUE;
1739 }
1740
1741 /* Can't use symbol_new here, so have to create a symbol and then at
1742 a later date assign it a value. Thats what these functions do. */
1743
1744 static void
1745 symbol_locate (symbolS * symbolP,
1746 const char *name,/* It is copied, the caller can modify. */
1747 segT segment, /* Segment identifier (SEG_<something>). */
1748 valueT valu, /* Symbol value. */
1749 fragS * frag) /* Associated fragment. */
1750 {
1751 size_t name_length;
1752 char *preserved_copy_of_name;
1753
1754 name_length = strlen (name) + 1; /* +1 for \0. */
1755 obstack_grow (&notes, name, name_length);
1756 preserved_copy_of_name = obstack_finish (&notes);
1757
1758 #ifdef tc_canonicalize_symbol_name
1759 preserved_copy_of_name =
1760 tc_canonicalize_symbol_name (preserved_copy_of_name);
1761 #endif
1762
1763 S_SET_NAME (symbolP, preserved_copy_of_name);
1764
1765 S_SET_SEGMENT (symbolP, segment);
1766 S_SET_VALUE (symbolP, valu);
1767 symbol_clear_list_pointers (symbolP);
1768
1769 symbol_set_frag (symbolP, frag);
1770
1771 /* Link to end of symbol chain. */
1772 {
1773 extern int symbol_table_frozen;
1774
1775 if (symbol_table_frozen)
1776 abort ();
1777 }
1778
1779 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1780
1781 obj_symbol_new_hook (symbolP);
1782
1783 #ifdef tc_symbol_new_hook
1784 tc_symbol_new_hook (symbolP);
1785 #endif
1786
1787 #ifdef DEBUG_SYMS
1788 verify_symbol_chain (symbol_rootP, symbol_lastP);
1789 #endif /* DEBUG_SYMS */
1790 }
1791
1792
1793 static void
1794 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1795 {
1796 unsigned int entry;
1797 literal_pool *pool;
1798 char sym_name[20];
1799 int align;
1800
1801 for (align = 2; align <= 4; align++)
1802 {
1803 int size = 1 << align;
1804
1805 pool = find_literal_pool (size);
1806 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1807 continue;
1808
1809 /* Align pool as you have word accesses.
1810 Only make a frag if we have to. */
1811 if (!need_pass_2)
1812 frag_align (align, 0, 0);
1813
1814 mapping_state (MAP_DATA);
1815
1816 record_alignment (now_seg, align);
1817
1818 sprintf (sym_name, "$$lit_\002%x", pool->id);
1819
1820 symbol_locate (pool->symbol, sym_name, now_seg,
1821 (valueT) frag_now_fix (), frag_now);
1822 symbol_table_insert (pool->symbol);
1823
1824 for (entry = 0; entry < pool->next_free_entry; entry++)
1825 {
1826 expressionS * exp = & pool->literals[entry].exp;
1827
1828 if (exp->X_op == O_big)
1829 {
1830 /* PR 16688: Restore the global bignum value. */
1831 gas_assert (pool->literals[entry].bignum != NULL);
1832 memcpy (generic_bignum, pool->literals[entry].bignum,
1833 CHARS_PER_LITTLENUM * exp->X_add_number);
1834 }
1835
1836 /* First output the expression in the instruction to the pool. */
1837 emit_expr (exp, size); /* .word|.xword */
1838
1839 if (exp->X_op == O_big)
1840 {
1841 free (pool->literals[entry].bignum);
1842 pool->literals[entry].bignum = NULL;
1843 }
1844 }
1845
1846 /* Mark the pool as empty. */
1847 pool->next_free_entry = 0;
1848 pool->symbol = NULL;
1849 }
1850 }
1851
1852 #ifdef OBJ_ELF
1853 /* Forward declarations for functions below, in the MD interface
1854 section. */
1855 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1856 static struct reloc_table_entry * find_reloc_table_entry (char **);
1857
1858 /* Directives: Data. */
1859 /* N.B. the support for relocation suffix in this directive needs to be
1860 implemented properly. */
1861
1862 static void
1863 s_aarch64_elf_cons (int nbytes)
1864 {
1865 expressionS exp;
1866
1867 #ifdef md_flush_pending_output
1868 md_flush_pending_output ();
1869 #endif
1870
1871 if (is_it_end_of_statement ())
1872 {
1873 demand_empty_rest_of_line ();
1874 return;
1875 }
1876
1877 #ifdef md_cons_align
1878 md_cons_align (nbytes);
1879 #endif
1880
1881 mapping_state (MAP_DATA);
1882 do
1883 {
1884 struct reloc_table_entry *reloc;
1885
1886 expression (&exp);
1887
1888 if (exp.X_op != O_symbol)
1889 emit_expr (&exp, (unsigned int) nbytes);
1890 else
1891 {
1892 skip_past_char (&input_line_pointer, '#');
1893 if (skip_past_char (&input_line_pointer, ':'))
1894 {
1895 reloc = find_reloc_table_entry (&input_line_pointer);
1896 if (reloc == NULL)
1897 as_bad (_("unrecognized relocation suffix"));
1898 else
1899 as_bad (_("unimplemented relocation suffix"));
1900 ignore_rest_of_line ();
1901 return;
1902 }
1903 else
1904 emit_expr (&exp, (unsigned int) nbytes);
1905 }
1906 }
1907 while (*input_line_pointer++ == ',');
1908
1909 /* Put terminator back into stream. */
1910 input_line_pointer--;
1911 demand_empty_rest_of_line ();
1912 }
1913
1914 #endif /* OBJ_ELF */
1915
1916 /* Output a 32-bit word, but mark as an instruction. */
1917
1918 static void
1919 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1920 {
1921 expressionS exp;
1922
1923 #ifdef md_flush_pending_output
1924 md_flush_pending_output ();
1925 #endif
1926
1927 if (is_it_end_of_statement ())
1928 {
1929 demand_empty_rest_of_line ();
1930 return;
1931 }
1932
1933 /* Sections are assumed to start aligned. In executable section, there is no
1934 MAP_DATA symbol pending. So we only align the address during
1935 MAP_DATA --> MAP_INSN transition.
1936 For other sections, this is not guaranteed. */
1937 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1938 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1939 frag_align_code (2, 0);
1940
1941 #ifdef OBJ_ELF
1942 mapping_state (MAP_INSN);
1943 #endif
1944
1945 do
1946 {
1947 expression (&exp);
1948 if (exp.X_op != O_constant)
1949 {
1950 as_bad (_("constant expression required"));
1951 ignore_rest_of_line ();
1952 return;
1953 }
1954
1955 if (target_big_endian)
1956 {
1957 unsigned int val = exp.X_add_number;
1958 exp.X_add_number = SWAP_32 (val);
1959 }
1960 emit_expr (&exp, 4);
1961 }
1962 while (*input_line_pointer++ == ',');
1963
1964 /* Put terminator back into stream. */
1965 input_line_pointer--;
1966 demand_empty_rest_of_line ();
1967 }
1968
1969 #ifdef OBJ_ELF
1970 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
1971
1972 static void
1973 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
1974 {
1975 expressionS exp;
1976
1977 expression (&exp);
1978 frag_grow (4);
1979 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1980 BFD_RELOC_AARCH64_TLSDESC_ADD);
1981
1982 demand_empty_rest_of_line ();
1983 }
1984
1985 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1986
1987 static void
1988 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1989 {
1990 expressionS exp;
1991
1992 /* Since we're just labelling the code, there's no need to define a
1993 mapping symbol. */
1994 expression (&exp);
1995 /* Make sure there is enough room in this frag for the following
1996 blr. This trick only works if the blr follows immediately after
1997 the .tlsdesc directive. */
1998 frag_grow (4);
1999 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2000 BFD_RELOC_AARCH64_TLSDESC_CALL);
2001
2002 demand_empty_rest_of_line ();
2003 }
2004
2005 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2006
2007 static void
2008 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2009 {
2010 expressionS exp;
2011
2012 expression (&exp);
2013 frag_grow (4);
2014 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2015 BFD_RELOC_AARCH64_TLSDESC_LDR);
2016
2017 demand_empty_rest_of_line ();
2018 }
2019 #endif /* OBJ_ELF */
2020
2021 static void s_aarch64_arch (int);
2022 static void s_aarch64_cpu (int);
2023 static void s_aarch64_arch_extension (int);
2024
2025 /* This table describes all the machine specific pseudo-ops the assembler
2026 has to support. The fields are:
2027 pseudo-op name without dot
2028 function to call to execute this pseudo-op
2029 Integer arg to pass to the function. */
2030
2031 const pseudo_typeS md_pseudo_table[] = {
2032 /* Never called because '.req' does not start a line. */
2033 {"req", s_req, 0},
2034 {"unreq", s_unreq, 0},
2035 {"bss", s_bss, 0},
2036 {"even", s_even, 0},
2037 {"ltorg", s_ltorg, 0},
2038 {"pool", s_ltorg, 0},
2039 {"cpu", s_aarch64_cpu, 0},
2040 {"arch", s_aarch64_arch, 0},
2041 {"arch_extension", s_aarch64_arch_extension, 0},
2042 {"inst", s_aarch64_inst, 0},
2043 #ifdef OBJ_ELF
2044 {"tlsdescadd", s_tlsdescadd, 0},
2045 {"tlsdesccall", s_tlsdesccall, 0},
2046 {"tlsdescldr", s_tlsdescldr, 0},
2047 {"word", s_aarch64_elf_cons, 4},
2048 {"long", s_aarch64_elf_cons, 4},
2049 {"xword", s_aarch64_elf_cons, 8},
2050 {"dword", s_aarch64_elf_cons, 8},
2051 #endif
2052 {0, 0, 0}
2053 };
2054 \f
2055
2056 /* Check whether STR points to a register name followed by a comma or the
2057 end of line; REG_TYPE indicates which register types are checked
2058 against. Return TRUE if STR is such a register name; otherwise return
2059 FALSE. The function does not intend to produce any diagnostics, but since
2060 the register parser aarch64_reg_parse, which is called by this function,
2061 does produce diagnostics, we call clear_error to clear any diagnostics
2062 that may be generated by aarch64_reg_parse.
2063 Also, the function returns FALSE directly if there is any user error
2064 present at the function entry. This prevents the existing diagnostics
2065 state from being spoiled.
2066 The function currently serves parse_constant_immediate and
2067 parse_big_immediate only. */
2068 static bfd_boolean
2069 reg_name_p (char *str, aarch64_reg_type reg_type)
2070 {
2071 int reg;
2072
2073 /* Prevent the diagnostics state from being spoiled. */
2074 if (error_p ())
2075 return FALSE;
2076
2077 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2078
2079 /* Clear the parsing error that may be set by the reg parser. */
2080 clear_error ();
2081
2082 if (reg == PARSE_FAIL)
2083 return FALSE;
2084
2085 skip_whitespace (str);
2086 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2087 return TRUE;
2088
2089 return FALSE;
2090 }
2091
2092 /* Parser functions used exclusively in instruction operands. */
2093
2094 /* Parse an immediate expression which may not be constant.
2095
2096 To prevent the expression parser from pushing a register name
2097 into the symbol table as an undefined symbol, firstly a check is
2098 done to find out whether STR is a register of type REG_TYPE followed
2099 by a comma or the end of line. Return FALSE if STR is such a string. */
2100
2101 static bfd_boolean
2102 parse_immediate_expression (char **str, expressionS *exp,
2103 aarch64_reg_type reg_type)
2104 {
2105 if (reg_name_p (*str, reg_type))
2106 {
2107 set_recoverable_error (_("immediate operand required"));
2108 return FALSE;
2109 }
2110
2111 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2112
2113 if (exp->X_op == O_absent)
2114 {
2115 set_fatal_syntax_error (_("missing immediate expression"));
2116 return FALSE;
2117 }
2118
2119 return TRUE;
2120 }
2121
2122 /* Constant immediate-value read function for use in insn parsing.
2123 STR points to the beginning of the immediate (with the optional
2124 leading #); *VAL receives the value. REG_TYPE says which register
2125 names should be treated as registers rather than as symbolic immediates.
2126
2127 Return TRUE on success; otherwise return FALSE. */
2128
2129 static bfd_boolean
2130 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2131 {
2132 expressionS exp;
2133
2134 if (! parse_immediate_expression (str, &exp, reg_type))
2135 return FALSE;
2136
2137 if (exp.X_op != O_constant)
2138 {
2139 set_syntax_error (_("constant expression required"));
2140 return FALSE;
2141 }
2142
2143 *val = exp.X_add_number;
2144 return TRUE;
2145 }
2146
2147 static uint32_t
2148 encode_imm_float_bits (uint32_t imm)
2149 {
2150 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2151 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2152 }
2153
2154 /* Return TRUE if the single-precision floating-point value encoded in IMM
2155 can be expressed in the AArch64 8-bit signed floating-point format with
2156 3-bit exponent and normalized 4 bits of precision; in other words, the
2157 floating-point value must be expressable as
2158 (+/-) n / 16 * power (2, r)
2159 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2160
2161 static bfd_boolean
2162 aarch64_imm_float_p (uint32_t imm)
2163 {
2164 /* If a single-precision floating-point value has the following bit
2165 pattern, it can be expressed in the AArch64 8-bit floating-point
2166 format:
2167
2168 3 32222222 2221111111111
2169 1 09876543 21098765432109876543210
2170 n Eeeeeexx xxxx0000000000000000000
2171
2172 where n, e and each x are either 0 or 1 independently, with
2173 E == ~ e. */
2174
2175 uint32_t pattern;
2176
2177 /* Prepare the pattern for 'Eeeeee'. */
2178 if (((imm >> 30) & 0x1) == 0)
2179 pattern = 0x3e000000;
2180 else
2181 pattern = 0x40000000;
2182
2183 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2184 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2185 }
2186
2187 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2188 as an IEEE float without any loss of precision. Store the value in
2189 *FPWORD if so. */
2190
2191 static bfd_boolean
2192 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2193 {
2194 /* If a double-precision floating-point value has the following bit
2195 pattern, it can be expressed in a float:
2196
2197 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2198 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2199 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2200
2201 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2202 if Eeee_eeee != 1111_1111
2203
2204 where n, e, s and S are either 0 or 1 independently and where ~ is the
2205 inverse of E. */
2206
2207 uint32_t pattern;
2208 uint32_t high32 = imm >> 32;
2209 uint32_t low32 = imm;
2210
2211 /* Lower 29 bits need to be 0s. */
2212 if ((imm & 0x1fffffff) != 0)
2213 return FALSE;
2214
2215 /* Prepare the pattern for 'Eeeeeeeee'. */
2216 if (((high32 >> 30) & 0x1) == 0)
2217 pattern = 0x38000000;
2218 else
2219 pattern = 0x40000000;
2220
2221 /* Check E~~~. */
2222 if ((high32 & 0x78000000) != pattern)
2223 return FALSE;
2224
2225 /* Check Eeee_eeee != 1111_1111. */
2226 if ((high32 & 0x7ff00000) == 0x47f00000)
2227 return FALSE;
2228
2229 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2230 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2231 | (low32 >> 29)); /* 3 S bits. */
2232 return TRUE;
2233 }
2234
2235 /* Parse a floating-point immediate. Return TRUE on success and return the
2236 value in *IMMED in the format of IEEE754 single-precision encoding.
2237 *CCP points to the start of the string; DP_P is TRUE when the immediate
2238 is expected to be in double-precision (N.B. this only matters when
2239 hexadecimal representation is involved). REG_TYPE says which register
2240 names should be treated as registers rather than as symbolic immediates.
2241
2242 This routine accepts any IEEE float; it is up to the callers to reject
2243 invalid ones. */
2244
2245 static bfd_boolean
2246 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2247 aarch64_reg_type reg_type)
2248 {
2249 char *str = *ccp;
2250 char *fpnum;
2251 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2252 int found_fpchar = 0;
2253 int64_t val = 0;
2254 unsigned fpword = 0;
2255 bfd_boolean hex_p = FALSE;
2256
2257 skip_past_char (&str, '#');
2258
2259 fpnum = str;
2260 skip_whitespace (fpnum);
2261
2262 if (strncmp (fpnum, "0x", 2) == 0)
2263 {
2264 /* Support the hexadecimal representation of the IEEE754 encoding.
2265 Double-precision is expected when DP_P is TRUE, otherwise the
2266 representation should be in single-precision. */
2267 if (! parse_constant_immediate (&str, &val, reg_type))
2268 goto invalid_fp;
2269
2270 if (dp_p)
2271 {
2272 if (!can_convert_double_to_float (val, &fpword))
2273 goto invalid_fp;
2274 }
2275 else if ((uint64_t) val > 0xffffffff)
2276 goto invalid_fp;
2277 else
2278 fpword = val;
2279
2280 hex_p = TRUE;
2281 }
2282 else
2283 {
2284 if (reg_name_p (str, reg_type))
2285 {
2286 set_recoverable_error (_("immediate operand required"));
2287 return FALSE;
2288 }
2289
2290 /* We must not accidentally parse an integer as a floating-point number.
2291 Make sure that the value we parse is not an integer by checking for
2292 special characters '.' or 'e'. */
2293 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2294 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2295 {
2296 found_fpchar = 1;
2297 break;
2298 }
2299
2300 if (!found_fpchar)
2301 return FALSE;
2302 }
2303
2304 if (! hex_p)
2305 {
2306 int i;
2307
2308 if ((str = atof_ieee (str, 's', words)) == NULL)
2309 goto invalid_fp;
2310
2311 /* Our FP word must be 32 bits (single-precision FP). */
2312 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2313 {
2314 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2315 fpword |= words[i];
2316 }
2317 }
2318
2319 *immed = fpword;
2320 *ccp = str;
2321 return TRUE;
2322
2323 invalid_fp:
2324 set_fatal_syntax_error (_("invalid floating-point constant"));
2325 return FALSE;
2326 }
2327
2328 /* Less-generic immediate-value read function with the possibility of loading
2329 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2330 instructions.
2331
2332 To prevent the expression parser from pushing a register name into the
2333 symbol table as an undefined symbol, a check is firstly done to find
2334 out whether STR is a register of type REG_TYPE followed by a comma or
2335 the end of line. Return FALSE if STR is such a register. */
2336
2337 static bfd_boolean
2338 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2339 {
2340 char *ptr = *str;
2341
2342 if (reg_name_p (ptr, reg_type))
2343 {
2344 set_syntax_error (_("immediate operand required"));
2345 return FALSE;
2346 }
2347
2348 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2349
2350 if (inst.reloc.exp.X_op == O_constant)
2351 *imm = inst.reloc.exp.X_add_number;
2352
2353 *str = ptr;
2354
2355 return TRUE;
2356 }
2357
2358 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2359 if NEED_LIBOPCODES is non-zero, the fixup will need
2360 assistance from the libopcodes. */
2361
2362 static inline void
2363 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2364 const aarch64_opnd_info *operand,
2365 int need_libopcodes_p)
2366 {
2367 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2368 reloc->opnd = operand->type;
2369 if (need_libopcodes_p)
2370 reloc->need_libopcodes_p = 1;
2371 };
2372
2373 /* Return TRUE if the instruction needs to be fixed up later internally by
2374 the GAS; otherwise return FALSE. */
2375
2376 static inline bfd_boolean
2377 aarch64_gas_internal_fixup_p (void)
2378 {
2379 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2380 }
2381
2382 /* Assign the immediate value to the relavant field in *OPERAND if
2383 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2384 needs an internal fixup in a later stage.
2385 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2386 IMM.VALUE that may get assigned with the constant. */
2387 static inline void
2388 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2389 aarch64_opnd_info *operand,
2390 int addr_off_p,
2391 int need_libopcodes_p,
2392 int skip_p)
2393 {
2394 if (reloc->exp.X_op == O_constant)
2395 {
2396 if (addr_off_p)
2397 operand->addr.offset.imm = reloc->exp.X_add_number;
2398 else
2399 operand->imm.value = reloc->exp.X_add_number;
2400 reloc->type = BFD_RELOC_UNUSED;
2401 }
2402 else
2403 {
2404 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2405 /* Tell libopcodes to ignore this operand or not. This is helpful
2406 when one of the operands needs to be fixed up later but we need
2407 libopcodes to check the other operands. */
2408 operand->skip = skip_p;
2409 }
2410 }
2411
2412 /* Relocation modifiers. Each entry in the table contains the textual
2413 name for the relocation which may be placed before a symbol used as
2414 a load/store offset, or add immediate. It must be surrounded by a
2415 leading and trailing colon, for example:
2416
2417 ldr x0, [x1, #:rello:varsym]
2418 add x0, x1, #:rello:varsym */
2419
2420 struct reloc_table_entry
2421 {
2422 const char *name;
2423 int pc_rel;
2424 bfd_reloc_code_real_type adr_type;
2425 bfd_reloc_code_real_type adrp_type;
2426 bfd_reloc_code_real_type movw_type;
2427 bfd_reloc_code_real_type add_type;
2428 bfd_reloc_code_real_type ldst_type;
2429 bfd_reloc_code_real_type ld_literal_type;
2430 };
2431
2432 static struct reloc_table_entry reloc_table[] = {
2433 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2434 {"lo12", 0,
2435 0, /* adr_type */
2436 0,
2437 0,
2438 BFD_RELOC_AARCH64_ADD_LO12,
2439 BFD_RELOC_AARCH64_LDST_LO12,
2440 0},
2441
2442 /* Higher 21 bits of pc-relative page offset: ADRP */
2443 {"pg_hi21", 1,
2444 0, /* adr_type */
2445 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2446 0,
2447 0,
2448 0,
2449 0},
2450
2451 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2452 {"pg_hi21_nc", 1,
2453 0, /* adr_type */
2454 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2455 0,
2456 0,
2457 0,
2458 0},
2459
2460 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2461 {"abs_g0", 0,
2462 0, /* adr_type */
2463 0,
2464 BFD_RELOC_AARCH64_MOVW_G0,
2465 0,
2466 0,
2467 0},
2468
2469 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2470 {"abs_g0_s", 0,
2471 0, /* adr_type */
2472 0,
2473 BFD_RELOC_AARCH64_MOVW_G0_S,
2474 0,
2475 0,
2476 0},
2477
2478 /* Less significant bits 0-15 of address/value: MOVK, no check */
2479 {"abs_g0_nc", 0,
2480 0, /* adr_type */
2481 0,
2482 BFD_RELOC_AARCH64_MOVW_G0_NC,
2483 0,
2484 0,
2485 0},
2486
2487 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2488 {"abs_g1", 0,
2489 0, /* adr_type */
2490 0,
2491 BFD_RELOC_AARCH64_MOVW_G1,
2492 0,
2493 0,
2494 0},
2495
2496 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2497 {"abs_g1_s", 0,
2498 0, /* adr_type */
2499 0,
2500 BFD_RELOC_AARCH64_MOVW_G1_S,
2501 0,
2502 0,
2503 0},
2504
2505 /* Less significant bits 16-31 of address/value: MOVK, no check */
2506 {"abs_g1_nc", 0,
2507 0, /* adr_type */
2508 0,
2509 BFD_RELOC_AARCH64_MOVW_G1_NC,
2510 0,
2511 0,
2512 0},
2513
2514 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2515 {"abs_g2", 0,
2516 0, /* adr_type */
2517 0,
2518 BFD_RELOC_AARCH64_MOVW_G2,
2519 0,
2520 0,
2521 0},
2522
2523 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2524 {"abs_g2_s", 0,
2525 0, /* adr_type */
2526 0,
2527 BFD_RELOC_AARCH64_MOVW_G2_S,
2528 0,
2529 0,
2530 0},
2531
2532 /* Less significant bits 32-47 of address/value: MOVK, no check */
2533 {"abs_g2_nc", 0,
2534 0, /* adr_type */
2535 0,
2536 BFD_RELOC_AARCH64_MOVW_G2_NC,
2537 0,
2538 0,
2539 0},
2540
2541 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2542 {"abs_g3", 0,
2543 0, /* adr_type */
2544 0,
2545 BFD_RELOC_AARCH64_MOVW_G3,
2546 0,
2547 0,
2548 0},
2549
2550 /* Get to the page containing GOT entry for a symbol. */
2551 {"got", 1,
2552 0, /* adr_type */
2553 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2554 0,
2555 0,
2556 0,
2557 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2558
2559 /* 12 bit offset into the page containing GOT entry for that symbol. */
2560 {"got_lo12", 0,
2561 0, /* adr_type */
2562 0,
2563 0,
2564 0,
2565 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2566 0},
2567
2568 /* 0-15 bits of address/value: MOVk, no check. */
2569 {"gotoff_g0_nc", 0,
2570 0, /* adr_type */
2571 0,
2572 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2573 0,
2574 0,
2575 0},
2576
2577 /* Most significant bits 16-31 of address/value: MOVZ. */
2578 {"gotoff_g1", 0,
2579 0, /* adr_type */
2580 0,
2581 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2582 0,
2583 0,
2584 0},
2585
2586 /* 15 bit offset into the page containing GOT entry for that symbol. */
2587 {"gotoff_lo15", 0,
2588 0, /* adr_type */
2589 0,
2590 0,
2591 0,
2592 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2593 0},
2594
2595 /* Get to the page containing GOT TLS entry for a symbol */
2596 {"gottprel_g0_nc", 0,
2597 0, /* adr_type */
2598 0,
2599 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2600 0,
2601 0,
2602 0},
2603
2604 /* Get to the page containing GOT TLS entry for a symbol */
2605 {"gottprel_g1", 0,
2606 0, /* adr_type */
2607 0,
2608 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2609 0,
2610 0,
2611 0},
2612
2613 /* Get to the page containing GOT TLS entry for a symbol */
2614 {"tlsgd", 0,
2615 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2616 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2617 0,
2618 0,
2619 0,
2620 0},
2621
2622 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2623 {"tlsgd_lo12", 0,
2624 0, /* adr_type */
2625 0,
2626 0,
2627 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2628 0,
2629 0},
2630
2631 /* Lower 16 bits address/value: MOVk. */
2632 {"tlsgd_g0_nc", 0,
2633 0, /* adr_type */
2634 0,
2635 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2636 0,
2637 0,
2638 0},
2639
2640 /* Most significant bits 16-31 of address/value: MOVZ. */
2641 {"tlsgd_g1", 0,
2642 0, /* adr_type */
2643 0,
2644 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2645 0,
2646 0,
2647 0},
2648
2649 /* Get to the page containing GOT TLS entry for a symbol */
2650 {"tlsdesc", 0,
2651 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2652 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2653 0,
2654 0,
2655 0,
2656 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2657
2658 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2659 {"tlsdesc_lo12", 0,
2660 0, /* adr_type */
2661 0,
2662 0,
2663 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2664 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2665 0},
2666
2667 /* Get to the page containing GOT TLS entry for a symbol.
2668 The same as GD, we allocate two consecutive GOT slots
2669 for module index and module offset, the only difference
2670 with GD is the module offset should be intialized to
2671 zero without any outstanding runtime relocation. */
2672 {"tlsldm", 0,
2673 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2674 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2675 0,
2676 0,
2677 0,
2678 0},
2679
2680 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2681 {"tlsldm_lo12_nc", 0,
2682 0, /* adr_type */
2683 0,
2684 0,
2685 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2686 0,
2687 0},
2688
2689 /* 12 bit offset into the module TLS base address. */
2690 {"dtprel_lo12", 0,
2691 0, /* adr_type */
2692 0,
2693 0,
2694 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2695 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2696 0},
2697
2698 /* Same as dtprel_lo12, no overflow check. */
2699 {"dtprel_lo12_nc", 0,
2700 0, /* adr_type */
2701 0,
2702 0,
2703 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2704 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2705 0},
2706
2707 /* bits[23:12] of offset to the module TLS base address. */
2708 {"dtprel_hi12", 0,
2709 0, /* adr_type */
2710 0,
2711 0,
2712 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2713 0,
2714 0},
2715
2716 /* bits[15:0] of offset to the module TLS base address. */
2717 {"dtprel_g0", 0,
2718 0, /* adr_type */
2719 0,
2720 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2721 0,
2722 0,
2723 0},
2724
2725 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2726 {"dtprel_g0_nc", 0,
2727 0, /* adr_type */
2728 0,
2729 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2730 0,
2731 0,
2732 0},
2733
2734 /* bits[31:16] of offset to the module TLS base address. */
2735 {"dtprel_g1", 0,
2736 0, /* adr_type */
2737 0,
2738 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2739 0,
2740 0,
2741 0},
2742
2743 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2744 {"dtprel_g1_nc", 0,
2745 0, /* adr_type */
2746 0,
2747 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2748 0,
2749 0,
2750 0},
2751
2752 /* bits[47:32] of offset to the module TLS base address. */
2753 {"dtprel_g2", 0,
2754 0, /* adr_type */
2755 0,
2756 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2757 0,
2758 0,
2759 0},
2760
2761 /* Lower 16 bit offset into GOT entry for a symbol */
2762 {"tlsdesc_off_g0_nc", 0,
2763 0, /* adr_type */
2764 0,
2765 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2766 0,
2767 0,
2768 0},
2769
2770 /* Higher 16 bit offset into GOT entry for a symbol */
2771 {"tlsdesc_off_g1", 0,
2772 0, /* adr_type */
2773 0,
2774 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2775 0,
2776 0,
2777 0},
2778
2779 /* Get to the page containing GOT TLS entry for a symbol */
2780 {"gottprel", 0,
2781 0, /* adr_type */
2782 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2783 0,
2784 0,
2785 0,
2786 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2787
2788 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2789 {"gottprel_lo12", 0,
2790 0, /* adr_type */
2791 0,
2792 0,
2793 0,
2794 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2795 0},
2796
2797 /* Get tp offset for a symbol. */
2798 {"tprel", 0,
2799 0, /* adr_type */
2800 0,
2801 0,
2802 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2803 0,
2804 0},
2805
2806 /* Get tp offset for a symbol. */
2807 {"tprel_lo12", 0,
2808 0, /* adr_type */
2809 0,
2810 0,
2811 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2812 0,
2813 0},
2814
2815 /* Get tp offset for a symbol. */
2816 {"tprel_hi12", 0,
2817 0, /* adr_type */
2818 0,
2819 0,
2820 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2821 0,
2822 0},
2823
2824 /* Get tp offset for a symbol. */
2825 {"tprel_lo12_nc", 0,
2826 0, /* adr_type */
2827 0,
2828 0,
2829 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2830 0,
2831 0},
2832
2833 /* Most significant bits 32-47 of address/value: MOVZ. */
2834 {"tprel_g2", 0,
2835 0, /* adr_type */
2836 0,
2837 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2838 0,
2839 0,
2840 0},
2841
2842 /* Most significant bits 16-31 of address/value: MOVZ. */
2843 {"tprel_g1", 0,
2844 0, /* adr_type */
2845 0,
2846 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2847 0,
2848 0,
2849 0},
2850
2851 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2852 {"tprel_g1_nc", 0,
2853 0, /* adr_type */
2854 0,
2855 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2856 0,
2857 0,
2858 0},
2859
2860 /* Most significant bits 0-15 of address/value: MOVZ. */
2861 {"tprel_g0", 0,
2862 0, /* adr_type */
2863 0,
2864 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2865 0,
2866 0,
2867 0},
2868
2869 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2870 {"tprel_g0_nc", 0,
2871 0, /* adr_type */
2872 0,
2873 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2874 0,
2875 0,
2876 0},
2877
2878 /* 15bit offset from got entry to base address of GOT table. */
2879 {"gotpage_lo15", 0,
2880 0,
2881 0,
2882 0,
2883 0,
2884 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2885 0},
2886
2887 /* 14bit offset from got entry to base address of GOT table. */
2888 {"gotpage_lo14", 0,
2889 0,
2890 0,
2891 0,
2892 0,
2893 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2894 0},
2895 };
2896
2897 /* Given the address of a pointer pointing to the textual name of a
2898 relocation as may appear in assembler source, attempt to find its
2899 details in reloc_table. The pointer will be updated to the character
2900 after the trailing colon. On failure, NULL will be returned;
2901 otherwise return the reloc_table_entry. */
2902
2903 static struct reloc_table_entry *
2904 find_reloc_table_entry (char **str)
2905 {
2906 unsigned int i;
2907 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2908 {
2909 int length = strlen (reloc_table[i].name);
2910
2911 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2912 && (*str)[length] == ':')
2913 {
2914 *str += (length + 1);
2915 return &reloc_table[i];
2916 }
2917 }
2918
2919 return NULL;
2920 }
2921
2922 /* Mode argument to parse_shift and parser_shifter_operand. */
2923 enum parse_shift_mode
2924 {
2925 SHIFTED_NONE, /* no shifter allowed */
2926 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2927 "#imm{,lsl #n}" */
2928 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2929 "#imm" */
2930 SHIFTED_LSL, /* bare "lsl #n" */
2931 SHIFTED_MUL, /* bare "mul #n" */
2932 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2933 SHIFTED_MUL_VL, /* "mul vl" */
2934 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2935 };
2936
2937 /* Parse a <shift> operator on an AArch64 data processing instruction.
2938 Return TRUE on success; otherwise return FALSE. */
2939 static bfd_boolean
2940 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2941 {
2942 const struct aarch64_name_value_pair *shift_op;
2943 enum aarch64_modifier_kind kind;
2944 expressionS exp;
2945 int exp_has_prefix;
2946 char *s = *str;
2947 char *p = s;
2948
2949 for (p = *str; ISALPHA (*p); p++)
2950 ;
2951
2952 if (p == *str)
2953 {
2954 set_syntax_error (_("shift expression expected"));
2955 return FALSE;
2956 }
2957
2958 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2959
2960 if (shift_op == NULL)
2961 {
2962 set_syntax_error (_("shift operator expected"));
2963 return FALSE;
2964 }
2965
2966 kind = aarch64_get_operand_modifier (shift_op);
2967
2968 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2969 {
2970 set_syntax_error (_("invalid use of 'MSL'"));
2971 return FALSE;
2972 }
2973
2974 if (kind == AARCH64_MOD_MUL
2975 && mode != SHIFTED_MUL
2976 && mode != SHIFTED_MUL_VL)
2977 {
2978 set_syntax_error (_("invalid use of 'MUL'"));
2979 return FALSE;
2980 }
2981
2982 switch (mode)
2983 {
2984 case SHIFTED_LOGIC_IMM:
2985 if (aarch64_extend_operator_p (kind) == TRUE)
2986 {
2987 set_syntax_error (_("extending shift is not permitted"));
2988 return FALSE;
2989 }
2990 break;
2991
2992 case SHIFTED_ARITH_IMM:
2993 if (kind == AARCH64_MOD_ROR)
2994 {
2995 set_syntax_error (_("'ROR' shift is not permitted"));
2996 return FALSE;
2997 }
2998 break;
2999
3000 case SHIFTED_LSL:
3001 if (kind != AARCH64_MOD_LSL)
3002 {
3003 set_syntax_error (_("only 'LSL' shift is permitted"));
3004 return FALSE;
3005 }
3006 break;
3007
3008 case SHIFTED_MUL:
3009 if (kind != AARCH64_MOD_MUL)
3010 {
3011 set_syntax_error (_("only 'MUL' is permitted"));
3012 return FALSE;
3013 }
3014 break;
3015
3016 case SHIFTED_MUL_VL:
3017 /* "MUL VL" consists of two separate tokens. Require the first
3018 token to be "MUL" and look for a following "VL". */
3019 if (kind == AARCH64_MOD_MUL)
3020 {
3021 skip_whitespace (p);
3022 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3023 {
3024 p += 2;
3025 kind = AARCH64_MOD_MUL_VL;
3026 break;
3027 }
3028 }
3029 set_syntax_error (_("only 'MUL VL' is permitted"));
3030 return FALSE;
3031
3032 case SHIFTED_REG_OFFSET:
3033 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3034 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3035 {
3036 set_fatal_syntax_error
3037 (_("invalid shift for the register offset addressing mode"));
3038 return FALSE;
3039 }
3040 break;
3041
3042 case SHIFTED_LSL_MSL:
3043 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3044 {
3045 set_syntax_error (_("invalid shift operator"));
3046 return FALSE;
3047 }
3048 break;
3049
3050 default:
3051 abort ();
3052 }
3053
3054 /* Whitespace can appear here if the next thing is a bare digit. */
3055 skip_whitespace (p);
3056
3057 /* Parse shift amount. */
3058 exp_has_prefix = 0;
3059 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3060 exp.X_op = O_absent;
3061 else
3062 {
3063 if (is_immediate_prefix (*p))
3064 {
3065 p++;
3066 exp_has_prefix = 1;
3067 }
3068 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3069 }
3070 if (kind == AARCH64_MOD_MUL_VL)
3071 /* For consistency, give MUL VL the same shift amount as an implicit
3072 MUL #1. */
3073 operand->shifter.amount = 1;
3074 else if (exp.X_op == O_absent)
3075 {
3076 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
3077 {
3078 set_syntax_error (_("missing shift amount"));
3079 return FALSE;
3080 }
3081 operand->shifter.amount = 0;
3082 }
3083 else if (exp.X_op != O_constant)
3084 {
3085 set_syntax_error (_("constant shift amount required"));
3086 return FALSE;
3087 }
3088 /* For parsing purposes, MUL #n has no inherent range. The range
3089 depends on the operand and will be checked by operand-specific
3090 routines. */
3091 else if (kind != AARCH64_MOD_MUL
3092 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3093 {
3094 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3095 return FALSE;
3096 }
3097 else
3098 {
3099 operand->shifter.amount = exp.X_add_number;
3100 operand->shifter.amount_present = 1;
3101 }
3102
3103 operand->shifter.operator_present = 1;
3104 operand->shifter.kind = kind;
3105
3106 *str = p;
3107 return TRUE;
3108 }
3109
3110 /* Parse a <shifter_operand> for a data processing instruction:
3111
3112 #<immediate>
3113 #<immediate>, LSL #imm
3114
3115 Validation of immediate operands is deferred to md_apply_fix.
3116
3117 Return TRUE on success; otherwise return FALSE. */
3118
3119 static bfd_boolean
3120 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3121 enum parse_shift_mode mode)
3122 {
3123 char *p;
3124
3125 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3126 return FALSE;
3127
3128 p = *str;
3129
3130 /* Accept an immediate expression. */
3131 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3132 return FALSE;
3133
3134 /* Accept optional LSL for arithmetic immediate values. */
3135 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3136 if (! parse_shift (&p, operand, SHIFTED_LSL))
3137 return FALSE;
3138
3139 /* Not accept any shifter for logical immediate values. */
3140 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3141 && parse_shift (&p, operand, mode))
3142 {
3143 set_syntax_error (_("unexpected shift operator"));
3144 return FALSE;
3145 }
3146
3147 *str = p;
3148 return TRUE;
3149 }
3150
3151 /* Parse a <shifter_operand> for a data processing instruction:
3152
3153 <Rm>
3154 <Rm>, <shift>
3155 #<immediate>
3156 #<immediate>, LSL #imm
3157
3158 where <shift> is handled by parse_shift above, and the last two
3159 cases are handled by the function above.
3160
3161 Validation of immediate operands is deferred to md_apply_fix.
3162
3163 Return TRUE on success; otherwise return FALSE. */
3164
3165 static bfd_boolean
3166 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3167 enum parse_shift_mode mode)
3168 {
3169 const reg_entry *reg;
3170 aarch64_opnd_qualifier_t qualifier;
3171 enum aarch64_operand_class opd_class
3172 = aarch64_get_operand_class (operand->type);
3173
3174 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3175 if (reg)
3176 {
3177 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3178 {
3179 set_syntax_error (_("unexpected register in the immediate operand"));
3180 return FALSE;
3181 }
3182
3183 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3184 {
3185 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3186 return FALSE;
3187 }
3188
3189 operand->reg.regno = reg->number;
3190 operand->qualifier = qualifier;
3191
3192 /* Accept optional shift operation on register. */
3193 if (! skip_past_comma (str))
3194 return TRUE;
3195
3196 if (! parse_shift (str, operand, mode))
3197 return FALSE;
3198
3199 return TRUE;
3200 }
3201 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3202 {
3203 set_syntax_error
3204 (_("integer register expected in the extended/shifted operand "
3205 "register"));
3206 return FALSE;
3207 }
3208
3209 /* We have a shifted immediate variable. */
3210 return parse_shifter_operand_imm (str, operand, mode);
3211 }
3212
3213 /* Return TRUE on success; return FALSE otherwise. */
3214
3215 static bfd_boolean
3216 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3217 enum parse_shift_mode mode)
3218 {
3219 char *p = *str;
3220
3221 /* Determine if we have the sequence of characters #: or just :
3222 coming next. If we do, then we check for a :rello: relocation
3223 modifier. If we don't, punt the whole lot to
3224 parse_shifter_operand. */
3225
3226 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3227 {
3228 struct reloc_table_entry *entry;
3229
3230 if (p[0] == '#')
3231 p += 2;
3232 else
3233 p++;
3234 *str = p;
3235
3236 /* Try to parse a relocation. Anything else is an error. */
3237 if (!(entry = find_reloc_table_entry (str)))
3238 {
3239 set_syntax_error (_("unknown relocation modifier"));
3240 return FALSE;
3241 }
3242
3243 if (entry->add_type == 0)
3244 {
3245 set_syntax_error
3246 (_("this relocation modifier is not allowed on this instruction"));
3247 return FALSE;
3248 }
3249
3250 /* Save str before we decompose it. */
3251 p = *str;
3252
3253 /* Next, we parse the expression. */
3254 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3255 return FALSE;
3256
3257 /* Record the relocation type (use the ADD variant here). */
3258 inst.reloc.type = entry->add_type;
3259 inst.reloc.pc_rel = entry->pc_rel;
3260
3261 /* If str is empty, we've reached the end, stop here. */
3262 if (**str == '\0')
3263 return TRUE;
3264
3265 /* Otherwise, we have a shifted reloc modifier, so rewind to
3266 recover the variable name and continue parsing for the shifter. */
3267 *str = p;
3268 return parse_shifter_operand_imm (str, operand, mode);
3269 }
3270
3271 return parse_shifter_operand (str, operand, mode);
3272 }
3273
3274 /* Parse all forms of an address expression. Information is written
3275 to *OPERAND and/or inst.reloc.
3276
3277 The A64 instruction set has the following addressing modes:
3278
3279 Offset
3280 [base] // in SIMD ld/st structure
3281 [base{,#0}] // in ld/st exclusive
3282 [base{,#imm}]
3283 [base,Xm{,LSL #imm}]
3284 [base,Xm,SXTX {#imm}]
3285 [base,Wm,(S|U)XTW {#imm}]
3286 Pre-indexed
3287 [base,#imm]!
3288 Post-indexed
3289 [base],#imm
3290 [base],Xm // in SIMD ld/st structure
3291 PC-relative (literal)
3292 label
3293 SVE:
3294 [base,#imm,MUL VL]
3295 [base,Zm.D{,LSL #imm}]
3296 [base,Zm.S,(S|U)XTW {#imm}]
3297 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3298 [Zn.S,#imm]
3299 [Zn.D,#imm]
3300 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3301 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3302 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3303
3304 (As a convenience, the notation "=immediate" is permitted in conjunction
3305 with the pc-relative literal load instructions to automatically place an
3306 immediate value or symbolic address in a nearby literal pool and generate
3307 a hidden label which references it.)
3308
3309 Upon a successful parsing, the address structure in *OPERAND will be
3310 filled in the following way:
3311
3312 .base_regno = <base>
3313 .offset.is_reg // 1 if the offset is a register
3314 .offset.imm = <imm>
3315 .offset.regno = <Rm>
3316
3317 For different addressing modes defined in the A64 ISA:
3318
3319 Offset
3320 .pcrel=0; .preind=1; .postind=0; .writeback=0
3321 Pre-indexed
3322 .pcrel=0; .preind=1; .postind=0; .writeback=1
3323 Post-indexed
3324 .pcrel=0; .preind=0; .postind=1; .writeback=1
3325 PC-relative (literal)
3326 .pcrel=1; .preind=1; .postind=0; .writeback=0
3327
3328 The shift/extension information, if any, will be stored in .shifter.
3329 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3330 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3331 corresponding register.
3332
3333 BASE_TYPE says which types of base register should be accepted and
3334 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3335 is the type of shifter that is allowed for immediate offsets,
3336 or SHIFTED_NONE if none.
3337
3338 In all other respects, it is the caller's responsibility to check
3339 for addressing modes not supported by the instruction, and to set
3340 inst.reloc.type. */
3341
3342 static bfd_boolean
3343 parse_address_main (char **str, aarch64_opnd_info *operand,
3344 aarch64_opnd_qualifier_t *base_qualifier,
3345 aarch64_opnd_qualifier_t *offset_qualifier,
3346 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3347 enum parse_shift_mode imm_shift_mode)
3348 {
3349 char *p = *str;
3350 const reg_entry *reg;
3351 expressionS *exp = &inst.reloc.exp;
3352
3353 *base_qualifier = AARCH64_OPND_QLF_NIL;
3354 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3355 if (! skip_past_char (&p, '['))
3356 {
3357 /* =immediate or label. */
3358 operand->addr.pcrel = 1;
3359 operand->addr.preind = 1;
3360
3361 /* #:<reloc_op>:<symbol> */
3362 skip_past_char (&p, '#');
3363 if (skip_past_char (&p, ':'))
3364 {
3365 bfd_reloc_code_real_type ty;
3366 struct reloc_table_entry *entry;
3367
3368 /* Try to parse a relocation modifier. Anything else is
3369 an error. */
3370 entry = find_reloc_table_entry (&p);
3371 if (! entry)
3372 {
3373 set_syntax_error (_("unknown relocation modifier"));
3374 return FALSE;
3375 }
3376
3377 switch (operand->type)
3378 {
3379 case AARCH64_OPND_ADDR_PCREL21:
3380 /* adr */
3381 ty = entry->adr_type;
3382 break;
3383
3384 default:
3385 ty = entry->ld_literal_type;
3386 break;
3387 }
3388
3389 if (ty == 0)
3390 {
3391 set_syntax_error
3392 (_("this relocation modifier is not allowed on this "
3393 "instruction"));
3394 return FALSE;
3395 }
3396
3397 /* #:<reloc_op>: */
3398 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3399 {
3400 set_syntax_error (_("invalid relocation expression"));
3401 return FALSE;
3402 }
3403
3404 /* #:<reloc_op>:<expr> */
3405 /* Record the relocation type. */
3406 inst.reloc.type = ty;
3407 inst.reloc.pc_rel = entry->pc_rel;
3408 }
3409 else
3410 {
3411
3412 if (skip_past_char (&p, '='))
3413 /* =immediate; need to generate the literal in the literal pool. */
3414 inst.gen_lit_pool = 1;
3415
3416 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3417 {
3418 set_syntax_error (_("invalid address"));
3419 return FALSE;
3420 }
3421 }
3422
3423 *str = p;
3424 return TRUE;
3425 }
3426
3427 /* [ */
3428
3429 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3430 if (!reg || !aarch64_check_reg_type (reg, base_type))
3431 {
3432 set_syntax_error (_(get_reg_expected_msg (base_type)));
3433 return FALSE;
3434 }
3435 operand->addr.base_regno = reg->number;
3436
3437 /* [Xn */
3438 if (skip_past_comma (&p))
3439 {
3440 /* [Xn, */
3441 operand->addr.preind = 1;
3442
3443 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3444 if (reg)
3445 {
3446 if (!aarch64_check_reg_type (reg, offset_type))
3447 {
3448 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3449 return FALSE;
3450 }
3451
3452 /* [Xn,Rm */
3453 operand->addr.offset.regno = reg->number;
3454 operand->addr.offset.is_reg = 1;
3455 /* Shifted index. */
3456 if (skip_past_comma (&p))
3457 {
3458 /* [Xn,Rm, */
3459 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3460 /* Use the diagnostics set in parse_shift, so not set new
3461 error message here. */
3462 return FALSE;
3463 }
3464 /* We only accept:
3465 [base,Xm{,LSL #imm}]
3466 [base,Xm,SXTX {#imm}]
3467 [base,Wm,(S|U)XTW {#imm}] */
3468 if (operand->shifter.kind == AARCH64_MOD_NONE
3469 || operand->shifter.kind == AARCH64_MOD_LSL
3470 || operand->shifter.kind == AARCH64_MOD_SXTX)
3471 {
3472 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3473 {
3474 set_syntax_error (_("invalid use of 32-bit register offset"));
3475 return FALSE;
3476 }
3477 if (aarch64_get_qualifier_esize (*base_qualifier)
3478 != aarch64_get_qualifier_esize (*offset_qualifier))
3479 {
3480 set_syntax_error (_("offset has different size from base"));
3481 return FALSE;
3482 }
3483 }
3484 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3485 {
3486 set_syntax_error (_("invalid use of 64-bit register offset"));
3487 return FALSE;
3488 }
3489 }
3490 else
3491 {
3492 /* [Xn,#:<reloc_op>:<symbol> */
3493 skip_past_char (&p, '#');
3494 if (skip_past_char (&p, ':'))
3495 {
3496 struct reloc_table_entry *entry;
3497
3498 /* Try to parse a relocation modifier. Anything else is
3499 an error. */
3500 if (!(entry = find_reloc_table_entry (&p)))
3501 {
3502 set_syntax_error (_("unknown relocation modifier"));
3503 return FALSE;
3504 }
3505
3506 if (entry->ldst_type == 0)
3507 {
3508 set_syntax_error
3509 (_("this relocation modifier is not allowed on this "
3510 "instruction"));
3511 return FALSE;
3512 }
3513
3514 /* [Xn,#:<reloc_op>: */
3515 /* We now have the group relocation table entry corresponding to
3516 the name in the assembler source. Next, we parse the
3517 expression. */
3518 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3519 {
3520 set_syntax_error (_("invalid relocation expression"));
3521 return FALSE;
3522 }
3523
3524 /* [Xn,#:<reloc_op>:<expr> */
3525 /* Record the load/store relocation type. */
3526 inst.reloc.type = entry->ldst_type;
3527 inst.reloc.pc_rel = entry->pc_rel;
3528 }
3529 else
3530 {
3531 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3532 {
3533 set_syntax_error (_("invalid expression in the address"));
3534 return FALSE;
3535 }
3536 /* [Xn,<expr> */
3537 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3538 /* [Xn,<expr>,<shifter> */
3539 if (! parse_shift (&p, operand, imm_shift_mode))
3540 return FALSE;
3541 }
3542 }
3543 }
3544
3545 if (! skip_past_char (&p, ']'))
3546 {
3547 set_syntax_error (_("']' expected"));
3548 return FALSE;
3549 }
3550
3551 if (skip_past_char (&p, '!'))
3552 {
3553 if (operand->addr.preind && operand->addr.offset.is_reg)
3554 {
3555 set_syntax_error (_("register offset not allowed in pre-indexed "
3556 "addressing mode"));
3557 return FALSE;
3558 }
3559 /* [Xn]! */
3560 operand->addr.writeback = 1;
3561 }
3562 else if (skip_past_comma (&p))
3563 {
3564 /* [Xn], */
3565 operand->addr.postind = 1;
3566 operand->addr.writeback = 1;
3567
3568 if (operand->addr.preind)
3569 {
3570 set_syntax_error (_("cannot combine pre- and post-indexing"));
3571 return FALSE;
3572 }
3573
3574 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3575 if (reg)
3576 {
3577 /* [Xn],Xm */
3578 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3579 {
3580 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3581 return FALSE;
3582 }
3583
3584 operand->addr.offset.regno = reg->number;
3585 operand->addr.offset.is_reg = 1;
3586 }
3587 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3588 {
3589 /* [Xn],#expr */
3590 set_syntax_error (_("invalid expression in the address"));
3591 return FALSE;
3592 }
3593 }
3594
3595 /* If at this point neither .preind nor .postind is set, we have a
3596 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3597 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3598 {
3599 if (operand->addr.writeback)
3600 {
3601 /* Reject [Rn]! */
3602 set_syntax_error (_("missing offset in the pre-indexed address"));
3603 return FALSE;
3604 }
3605 operand->addr.preind = 1;
3606 inst.reloc.exp.X_op = O_constant;
3607 inst.reloc.exp.X_add_number = 0;
3608 }
3609
3610 *str = p;
3611 return TRUE;
3612 }
3613
3614 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3615 on success. */
3616 static bfd_boolean
3617 parse_address (char **str, aarch64_opnd_info *operand)
3618 {
3619 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3620 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3621 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3622 }
3623
3624 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3625 The arguments have the same meaning as for parse_address_main.
3626 Return TRUE on success. */
3627 static bfd_boolean
3628 parse_sve_address (char **str, aarch64_opnd_info *operand,
3629 aarch64_opnd_qualifier_t *base_qualifier,
3630 aarch64_opnd_qualifier_t *offset_qualifier)
3631 {
3632 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3633 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3634 SHIFTED_MUL_VL);
3635 }
3636
3637 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3638 Return TRUE on success; otherwise return FALSE. */
3639 static bfd_boolean
3640 parse_half (char **str, int *internal_fixup_p)
3641 {
3642 char *p = *str;
3643
3644 skip_past_char (&p, '#');
3645
3646 gas_assert (internal_fixup_p);
3647 *internal_fixup_p = 0;
3648
3649 if (*p == ':')
3650 {
3651 struct reloc_table_entry *entry;
3652
3653 /* Try to parse a relocation. Anything else is an error. */
3654 ++p;
3655 if (!(entry = find_reloc_table_entry (&p)))
3656 {
3657 set_syntax_error (_("unknown relocation modifier"));
3658 return FALSE;
3659 }
3660
3661 if (entry->movw_type == 0)
3662 {
3663 set_syntax_error
3664 (_("this relocation modifier is not allowed on this instruction"));
3665 return FALSE;
3666 }
3667
3668 inst.reloc.type = entry->movw_type;
3669 }
3670 else
3671 *internal_fixup_p = 1;
3672
3673 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3674 return FALSE;
3675
3676 *str = p;
3677 return TRUE;
3678 }
3679
3680 /* Parse an operand for an ADRP instruction:
3681 ADRP <Xd>, <label>
3682 Return TRUE on success; otherwise return FALSE. */
3683
3684 static bfd_boolean
3685 parse_adrp (char **str)
3686 {
3687 char *p;
3688
3689 p = *str;
3690 if (*p == ':')
3691 {
3692 struct reloc_table_entry *entry;
3693
3694 /* Try to parse a relocation. Anything else is an error. */
3695 ++p;
3696 if (!(entry = find_reloc_table_entry (&p)))
3697 {
3698 set_syntax_error (_("unknown relocation modifier"));
3699 return FALSE;
3700 }
3701
3702 if (entry->adrp_type == 0)
3703 {
3704 set_syntax_error
3705 (_("this relocation modifier is not allowed on this instruction"));
3706 return FALSE;
3707 }
3708
3709 inst.reloc.type = entry->adrp_type;
3710 }
3711 else
3712 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3713
3714 inst.reloc.pc_rel = 1;
3715
3716 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3717 return FALSE;
3718
3719 *str = p;
3720 return TRUE;
3721 }
3722
3723 /* Miscellaneous. */
3724
3725 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3726 of SIZE tokens in which index I gives the token for field value I,
3727 or is null if field value I is invalid. REG_TYPE says which register
3728 names should be treated as registers rather than as symbolic immediates.
3729
3730 Return true on success, moving *STR past the operand and storing the
3731 field value in *VAL. */
3732
3733 static int
3734 parse_enum_string (char **str, int64_t *val, const char *const *array,
3735 size_t size, aarch64_reg_type reg_type)
3736 {
3737 expressionS exp;
3738 char *p, *q;
3739 size_t i;
3740
3741 /* Match C-like tokens. */
3742 p = q = *str;
3743 while (ISALNUM (*q))
3744 q++;
3745
3746 for (i = 0; i < size; ++i)
3747 if (array[i]
3748 && strncasecmp (array[i], p, q - p) == 0
3749 && array[i][q - p] == 0)
3750 {
3751 *val = i;
3752 *str = q;
3753 return TRUE;
3754 }
3755
3756 if (!parse_immediate_expression (&p, &exp, reg_type))
3757 return FALSE;
3758
3759 if (exp.X_op == O_constant
3760 && (uint64_t) exp.X_add_number < size)
3761 {
3762 *val = exp.X_add_number;
3763 *str = p;
3764 return TRUE;
3765 }
3766
3767 /* Use the default error for this operand. */
3768 return FALSE;
3769 }
3770
3771 /* Parse an option for a preload instruction. Returns the encoding for the
3772 option, or PARSE_FAIL. */
3773
3774 static int
3775 parse_pldop (char **str)
3776 {
3777 char *p, *q;
3778 const struct aarch64_name_value_pair *o;
3779
3780 p = q = *str;
3781 while (ISALNUM (*q))
3782 q++;
3783
3784 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3785 if (!o)
3786 return PARSE_FAIL;
3787
3788 *str = q;
3789 return o->value;
3790 }
3791
3792 /* Parse an option for a barrier instruction. Returns the encoding for the
3793 option, or PARSE_FAIL. */
3794
3795 static int
3796 parse_barrier (char **str)
3797 {
3798 char *p, *q;
3799 const asm_barrier_opt *o;
3800
3801 p = q = *str;
3802 while (ISALPHA (*q))
3803 q++;
3804
3805 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3806 if (!o)
3807 return PARSE_FAIL;
3808
3809 *str = q;
3810 return o->value;
3811 }
3812
3813 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3814 return 0 if successful. Otherwise return PARSE_FAIL. */
3815
3816 static int
3817 parse_barrier_psb (char **str,
3818 const struct aarch64_name_value_pair ** hint_opt)
3819 {
3820 char *p, *q;
3821 const struct aarch64_name_value_pair *o;
3822
3823 p = q = *str;
3824 while (ISALPHA (*q))
3825 q++;
3826
3827 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3828 if (!o)
3829 {
3830 set_fatal_syntax_error
3831 ( _("unknown or missing option to PSB"));
3832 return PARSE_FAIL;
3833 }
3834
3835 if (o->value != 0x11)
3836 {
3837 /* PSB only accepts option name 'CSYNC'. */
3838 set_syntax_error
3839 (_("the specified option is not accepted for PSB"));
3840 return PARSE_FAIL;
3841 }
3842
3843 *str = q;
3844 *hint_opt = o;
3845 return 0;
3846 }
3847
3848 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3849 Returns the encoding for the option, or PARSE_FAIL.
3850
3851 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3852 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3853
3854 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3855 field, otherwise as a system register.
3856 */
3857
3858 static int
3859 parse_sys_reg (char **str, struct hash_control *sys_regs,
3860 int imple_defined_p, int pstatefield_p)
3861 {
3862 char *p, *q;
3863 char buf[32];
3864 const aarch64_sys_reg *o;
3865 int value;
3866
3867 p = buf;
3868 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3869 if (p < buf + 31)
3870 *p++ = TOLOWER (*q);
3871 *p = '\0';
3872 /* Assert that BUF be large enough. */
3873 gas_assert (p - buf == q - *str);
3874
3875 o = hash_find (sys_regs, buf);
3876 if (!o)
3877 {
3878 if (!imple_defined_p)
3879 return PARSE_FAIL;
3880 else
3881 {
3882 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3883 unsigned int op0, op1, cn, cm, op2;
3884
3885 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3886 != 5)
3887 return PARSE_FAIL;
3888 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3889 return PARSE_FAIL;
3890 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3891 }
3892 }
3893 else
3894 {
3895 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3896 as_bad (_("selected processor does not support PSTATE field "
3897 "name '%s'"), buf);
3898 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3899 as_bad (_("selected processor does not support system register "
3900 "name '%s'"), buf);
3901 if (aarch64_sys_reg_deprecated_p (o))
3902 as_warn (_("system register name '%s' is deprecated and may be "
3903 "removed in a future release"), buf);
3904 value = o->value;
3905 }
3906
3907 *str = q;
3908 return value;
3909 }
3910
3911 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3912 for the option, or NULL. */
3913
3914 static const aarch64_sys_ins_reg *
3915 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3916 {
3917 char *p, *q;
3918 char buf[32];
3919 const aarch64_sys_ins_reg *o;
3920
3921 p = buf;
3922 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3923 if (p < buf + 31)
3924 *p++ = TOLOWER (*q);
3925 *p = '\0';
3926
3927 o = hash_find (sys_ins_regs, buf);
3928 if (!o)
3929 return NULL;
3930
3931 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
3932 as_bad (_("selected processor does not support system register "
3933 "name '%s'"), buf);
3934
3935 *str = q;
3936 return o;
3937 }
3938 \f
3939 #define po_char_or_fail(chr) do { \
3940 if (! skip_past_char (&str, chr)) \
3941 goto failure; \
3942 } while (0)
3943
3944 #define po_reg_or_fail(regtype) do { \
3945 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3946 if (val == PARSE_FAIL) \
3947 { \
3948 set_default_error (); \
3949 goto failure; \
3950 } \
3951 } while (0)
3952
3953 #define po_int_reg_or_fail(reg_type) do { \
3954 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
3955 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
3956 { \
3957 set_default_error (); \
3958 goto failure; \
3959 } \
3960 info->reg.regno = reg->number; \
3961 info->qualifier = qualifier; \
3962 } while (0)
3963
3964 #define po_imm_nc_or_fail() do { \
3965 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3966 goto failure; \
3967 } while (0)
3968
3969 #define po_imm_or_fail(min, max) do { \
3970 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3971 goto failure; \
3972 if (val < min || val > max) \
3973 { \
3974 set_fatal_syntax_error (_("immediate value out of range "\
3975 #min " to "#max)); \
3976 goto failure; \
3977 } \
3978 } while (0)
3979
3980 #define po_enum_or_fail(array) do { \
3981 if (!parse_enum_string (&str, &val, array, \
3982 ARRAY_SIZE (array), imm_reg_type)) \
3983 goto failure; \
3984 } while (0)
3985
3986 #define po_misc_or_fail(expr) do { \
3987 if (!expr) \
3988 goto failure; \
3989 } while (0)
3990 \f
3991 /* encode the 12-bit imm field of Add/sub immediate */
3992 static inline uint32_t
3993 encode_addsub_imm (uint32_t imm)
3994 {
3995 return imm << 10;
3996 }
3997
3998 /* encode the shift amount field of Add/sub immediate */
3999 static inline uint32_t
4000 encode_addsub_imm_shift_amount (uint32_t cnt)
4001 {
4002 return cnt << 22;
4003 }
4004
4005
4006 /* encode the imm field of Adr instruction */
4007 static inline uint32_t
4008 encode_adr_imm (uint32_t imm)
4009 {
4010 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4011 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4012 }
4013
4014 /* encode the immediate field of Move wide immediate */
4015 static inline uint32_t
4016 encode_movw_imm (uint32_t imm)
4017 {
4018 return imm << 5;
4019 }
4020
4021 /* encode the 26-bit offset of unconditional branch */
4022 static inline uint32_t
4023 encode_branch_ofs_26 (uint32_t ofs)
4024 {
4025 return ofs & ((1 << 26) - 1);
4026 }
4027
4028 /* encode the 19-bit offset of conditional branch and compare & branch */
4029 static inline uint32_t
4030 encode_cond_branch_ofs_19 (uint32_t ofs)
4031 {
4032 return (ofs & ((1 << 19) - 1)) << 5;
4033 }
4034
4035 /* encode the 19-bit offset of ld literal */
4036 static inline uint32_t
4037 encode_ld_lit_ofs_19 (uint32_t ofs)
4038 {
4039 return (ofs & ((1 << 19) - 1)) << 5;
4040 }
4041
4042 /* Encode the 14-bit offset of test & branch. */
4043 static inline uint32_t
4044 encode_tst_branch_ofs_14 (uint32_t ofs)
4045 {
4046 return (ofs & ((1 << 14) - 1)) << 5;
4047 }
4048
4049 /* Encode the 16-bit imm field of svc/hvc/smc. */
4050 static inline uint32_t
4051 encode_svc_imm (uint32_t imm)
4052 {
4053 return imm << 5;
4054 }
4055
4056 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4057 static inline uint32_t
4058 reencode_addsub_switch_add_sub (uint32_t opcode)
4059 {
4060 return opcode ^ (1 << 30);
4061 }
4062
4063 static inline uint32_t
4064 reencode_movzn_to_movz (uint32_t opcode)
4065 {
4066 return opcode | (1 << 30);
4067 }
4068
4069 static inline uint32_t
4070 reencode_movzn_to_movn (uint32_t opcode)
4071 {
4072 return opcode & ~(1 << 30);
4073 }
4074
4075 /* Overall per-instruction processing. */
4076
4077 /* We need to be able to fix up arbitrary expressions in some statements.
4078 This is so that we can handle symbols that are an arbitrary distance from
4079 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4080 which returns part of an address in a form which will be valid for
4081 a data instruction. We do this by pushing the expression into a symbol
4082 in the expr_section, and creating a fix for that. */
4083
4084 static fixS *
4085 fix_new_aarch64 (fragS * frag,
4086 int where,
4087 short int size, expressionS * exp, int pc_rel, int reloc)
4088 {
4089 fixS *new_fix;
4090
4091 switch (exp->X_op)
4092 {
4093 case O_constant:
4094 case O_symbol:
4095 case O_add:
4096 case O_subtract:
4097 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4098 break;
4099
4100 default:
4101 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4102 pc_rel, reloc);
4103 break;
4104 }
4105 return new_fix;
4106 }
4107 \f
4108 /* Diagnostics on operands errors. */
4109
4110 /* By default, output verbose error message.
4111 Disable the verbose error message by -mno-verbose-error. */
4112 static int verbose_error_p = 1;
4113
4114 #ifdef DEBUG_AARCH64
4115 /* N.B. this is only for the purpose of debugging. */
4116 const char* operand_mismatch_kind_names[] =
4117 {
4118 "AARCH64_OPDE_NIL",
4119 "AARCH64_OPDE_RECOVERABLE",
4120 "AARCH64_OPDE_SYNTAX_ERROR",
4121 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4122 "AARCH64_OPDE_INVALID_VARIANT",
4123 "AARCH64_OPDE_OUT_OF_RANGE",
4124 "AARCH64_OPDE_UNALIGNED",
4125 "AARCH64_OPDE_REG_LIST",
4126 "AARCH64_OPDE_OTHER_ERROR",
4127 };
4128 #endif /* DEBUG_AARCH64 */
4129
4130 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4131
4132 When multiple errors of different kinds are found in the same assembly
4133 line, only the error of the highest severity will be picked up for
4134 issuing the diagnostics. */
4135
4136 static inline bfd_boolean
4137 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4138 enum aarch64_operand_error_kind rhs)
4139 {
4140 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4141 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4142 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4143 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4144 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4145 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4146 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4147 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4148 return lhs > rhs;
4149 }
4150
4151 /* Helper routine to get the mnemonic name from the assembly instruction
4152 line; should only be called for the diagnosis purpose, as there is
4153 string copy operation involved, which may affect the runtime
4154 performance if used in elsewhere. */
4155
4156 static const char*
4157 get_mnemonic_name (const char *str)
4158 {
4159 static char mnemonic[32];
4160 char *ptr;
4161
4162 /* Get the first 15 bytes and assume that the full name is included. */
4163 strncpy (mnemonic, str, 31);
4164 mnemonic[31] = '\0';
4165
4166 /* Scan up to the end of the mnemonic, which must end in white space,
4167 '.', or end of string. */
4168 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4169 ;
4170
4171 *ptr = '\0';
4172
4173 /* Append '...' to the truncated long name. */
4174 if (ptr - mnemonic == 31)
4175 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4176
4177 return mnemonic;
4178 }
4179
4180 static void
4181 reset_aarch64_instruction (aarch64_instruction *instruction)
4182 {
4183 memset (instruction, '\0', sizeof (aarch64_instruction));
4184 instruction->reloc.type = BFD_RELOC_UNUSED;
4185 }
4186
4187 /* Data strutures storing one user error in the assembly code related to
4188 operands. */
4189
4190 struct operand_error_record
4191 {
4192 const aarch64_opcode *opcode;
4193 aarch64_operand_error detail;
4194 struct operand_error_record *next;
4195 };
4196
4197 typedef struct operand_error_record operand_error_record;
4198
4199 struct operand_errors
4200 {
4201 operand_error_record *head;
4202 operand_error_record *tail;
4203 };
4204
4205 typedef struct operand_errors operand_errors;
4206
4207 /* Top-level data structure reporting user errors for the current line of
4208 the assembly code.
4209 The way md_assemble works is that all opcodes sharing the same mnemonic
4210 name are iterated to find a match to the assembly line. In this data
4211 structure, each of the such opcodes will have one operand_error_record
4212 allocated and inserted. In other words, excessive errors related with
4213 a single opcode are disregarded. */
4214 operand_errors operand_error_report;
4215
4216 /* Free record nodes. */
4217 static operand_error_record *free_opnd_error_record_nodes = NULL;
4218
4219 /* Initialize the data structure that stores the operand mismatch
4220 information on assembling one line of the assembly code. */
4221 static void
4222 init_operand_error_report (void)
4223 {
4224 if (operand_error_report.head != NULL)
4225 {
4226 gas_assert (operand_error_report.tail != NULL);
4227 operand_error_report.tail->next = free_opnd_error_record_nodes;
4228 free_opnd_error_record_nodes = operand_error_report.head;
4229 operand_error_report.head = NULL;
4230 operand_error_report.tail = NULL;
4231 return;
4232 }
4233 gas_assert (operand_error_report.tail == NULL);
4234 }
4235
4236 /* Return TRUE if some operand error has been recorded during the
4237 parsing of the current assembly line using the opcode *OPCODE;
4238 otherwise return FALSE. */
4239 static inline bfd_boolean
4240 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4241 {
4242 operand_error_record *record = operand_error_report.head;
4243 return record && record->opcode == opcode;
4244 }
4245
4246 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4247 OPCODE field is initialized with OPCODE.
4248 N.B. only one record for each opcode, i.e. the maximum of one error is
4249 recorded for each instruction template. */
4250
4251 static void
4252 add_operand_error_record (const operand_error_record* new_record)
4253 {
4254 const aarch64_opcode *opcode = new_record->opcode;
4255 operand_error_record* record = operand_error_report.head;
4256
4257 /* The record may have been created for this opcode. If not, we need
4258 to prepare one. */
4259 if (! opcode_has_operand_error_p (opcode))
4260 {
4261 /* Get one empty record. */
4262 if (free_opnd_error_record_nodes == NULL)
4263 {
4264 record = XNEW (operand_error_record);
4265 }
4266 else
4267 {
4268 record = free_opnd_error_record_nodes;
4269 free_opnd_error_record_nodes = record->next;
4270 }
4271 record->opcode = opcode;
4272 /* Insert at the head. */
4273 record->next = operand_error_report.head;
4274 operand_error_report.head = record;
4275 if (operand_error_report.tail == NULL)
4276 operand_error_report.tail = record;
4277 }
4278 else if (record->detail.kind != AARCH64_OPDE_NIL
4279 && record->detail.index <= new_record->detail.index
4280 && operand_error_higher_severity_p (record->detail.kind,
4281 new_record->detail.kind))
4282 {
4283 /* In the case of multiple errors found on operands related with a
4284 single opcode, only record the error of the leftmost operand and
4285 only if the error is of higher severity. */
4286 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4287 " the existing error %s on operand %d",
4288 operand_mismatch_kind_names[new_record->detail.kind],
4289 new_record->detail.index,
4290 operand_mismatch_kind_names[record->detail.kind],
4291 record->detail.index);
4292 return;
4293 }
4294
4295 record->detail = new_record->detail;
4296 }
4297
4298 static inline void
4299 record_operand_error_info (const aarch64_opcode *opcode,
4300 aarch64_operand_error *error_info)
4301 {
4302 operand_error_record record;
4303 record.opcode = opcode;
4304 record.detail = *error_info;
4305 add_operand_error_record (&record);
4306 }
4307
4308 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4309 error message *ERROR, for operand IDX (count from 0). */
4310
4311 static void
4312 record_operand_error (const aarch64_opcode *opcode, int idx,
4313 enum aarch64_operand_error_kind kind,
4314 const char* error)
4315 {
4316 aarch64_operand_error info;
4317 memset(&info, 0, sizeof (info));
4318 info.index = idx;
4319 info.kind = kind;
4320 info.error = error;
4321 record_operand_error_info (opcode, &info);
4322 }
4323
4324 static void
4325 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4326 enum aarch64_operand_error_kind kind,
4327 const char* error, const int *extra_data)
4328 {
4329 aarch64_operand_error info;
4330 info.index = idx;
4331 info.kind = kind;
4332 info.error = error;
4333 info.data[0] = extra_data[0];
4334 info.data[1] = extra_data[1];
4335 info.data[2] = extra_data[2];
4336 record_operand_error_info (opcode, &info);
4337 }
4338
4339 static void
4340 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4341 const char* error, int lower_bound,
4342 int upper_bound)
4343 {
4344 int data[3] = {lower_bound, upper_bound, 0};
4345 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4346 error, data);
4347 }
4348
4349 /* Remove the operand error record for *OPCODE. */
4350 static void ATTRIBUTE_UNUSED
4351 remove_operand_error_record (const aarch64_opcode *opcode)
4352 {
4353 if (opcode_has_operand_error_p (opcode))
4354 {
4355 operand_error_record* record = operand_error_report.head;
4356 gas_assert (record != NULL && operand_error_report.tail != NULL);
4357 operand_error_report.head = record->next;
4358 record->next = free_opnd_error_record_nodes;
4359 free_opnd_error_record_nodes = record;
4360 if (operand_error_report.head == NULL)
4361 {
4362 gas_assert (operand_error_report.tail == record);
4363 operand_error_report.tail = NULL;
4364 }
4365 }
4366 }
4367
4368 /* Given the instruction in *INSTR, return the index of the best matched
4369 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4370
4371 Return -1 if there is no qualifier sequence; return the first match
4372 if there is multiple matches found. */
4373
4374 static int
4375 find_best_match (const aarch64_inst *instr,
4376 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4377 {
4378 int i, num_opnds, max_num_matched, idx;
4379
4380 num_opnds = aarch64_num_of_operands (instr->opcode);
4381 if (num_opnds == 0)
4382 {
4383 DEBUG_TRACE ("no operand");
4384 return -1;
4385 }
4386
4387 max_num_matched = 0;
4388 idx = 0;
4389
4390 /* For each pattern. */
4391 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4392 {
4393 int j, num_matched;
4394 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4395
4396 /* Most opcodes has much fewer patterns in the list. */
4397 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
4398 {
4399 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4400 break;
4401 }
4402
4403 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4404 if (*qualifiers == instr->operands[j].qualifier)
4405 ++num_matched;
4406
4407 if (num_matched > max_num_matched)
4408 {
4409 max_num_matched = num_matched;
4410 idx = i;
4411 }
4412 }
4413
4414 DEBUG_TRACE ("return with %d", idx);
4415 return idx;
4416 }
4417
4418 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
4419 corresponding operands in *INSTR. */
4420
4421 static inline void
4422 assign_qualifier_sequence (aarch64_inst *instr,
4423 const aarch64_opnd_qualifier_t *qualifiers)
4424 {
4425 int i = 0;
4426 int num_opnds = aarch64_num_of_operands (instr->opcode);
4427 gas_assert (num_opnds);
4428 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4429 instr->operands[i].qualifier = *qualifiers;
4430 }
4431
4432 /* Print operands for the diagnosis purpose. */
4433
4434 static void
4435 print_operands (char *buf, const aarch64_opcode *opcode,
4436 const aarch64_opnd_info *opnds)
4437 {
4438 int i;
4439
4440 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4441 {
4442 char str[128];
4443
4444 /* We regard the opcode operand info more, however we also look into
4445 the inst->operands to support the disassembling of the optional
4446 operand.
4447 The two operand code should be the same in all cases, apart from
4448 when the operand can be optional. */
4449 if (opcode->operands[i] == AARCH64_OPND_NIL
4450 || opnds[i].type == AARCH64_OPND_NIL)
4451 break;
4452
4453 /* Generate the operand string in STR. */
4454 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL);
4455
4456 /* Delimiter. */
4457 if (str[0] != '\0')
4458 strcat (buf, i == 0 ? " " : ",");
4459
4460 /* Append the operand string. */
4461 strcat (buf, str);
4462 }
4463 }
4464
4465 /* Send to stderr a string as information. */
4466
4467 static void
4468 output_info (const char *format, ...)
4469 {
4470 const char *file;
4471 unsigned int line;
4472 va_list args;
4473
4474 file = as_where (&line);
4475 if (file)
4476 {
4477 if (line != 0)
4478 fprintf (stderr, "%s:%u: ", file, line);
4479 else
4480 fprintf (stderr, "%s: ", file);
4481 }
4482 fprintf (stderr, _("Info: "));
4483 va_start (args, format);
4484 vfprintf (stderr, format, args);
4485 va_end (args);
4486 (void) putc ('\n', stderr);
4487 }
4488
4489 /* Output one operand error record. */
4490
4491 static void
4492 output_operand_error_record (const operand_error_record *record, char *str)
4493 {
4494 const aarch64_operand_error *detail = &record->detail;
4495 int idx = detail->index;
4496 const aarch64_opcode *opcode = record->opcode;
4497 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4498 : AARCH64_OPND_NIL);
4499
4500 switch (detail->kind)
4501 {
4502 case AARCH64_OPDE_NIL:
4503 gas_assert (0);
4504 break;
4505
4506 case AARCH64_OPDE_SYNTAX_ERROR:
4507 case AARCH64_OPDE_RECOVERABLE:
4508 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4509 case AARCH64_OPDE_OTHER_ERROR:
4510 /* Use the prepared error message if there is, otherwise use the
4511 operand description string to describe the error. */
4512 if (detail->error != NULL)
4513 {
4514 if (idx < 0)
4515 as_bad (_("%s -- `%s'"), detail->error, str);
4516 else
4517 as_bad (_("%s at operand %d -- `%s'"),
4518 detail->error, idx + 1, str);
4519 }
4520 else
4521 {
4522 gas_assert (idx >= 0);
4523 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4524 aarch64_get_operand_desc (opd_code), str);
4525 }
4526 break;
4527
4528 case AARCH64_OPDE_INVALID_VARIANT:
4529 as_bad (_("operand mismatch -- `%s'"), str);
4530 if (verbose_error_p)
4531 {
4532 /* We will try to correct the erroneous instruction and also provide
4533 more information e.g. all other valid variants.
4534
4535 The string representation of the corrected instruction and other
4536 valid variants are generated by
4537
4538 1) obtaining the intermediate representation of the erroneous
4539 instruction;
4540 2) manipulating the IR, e.g. replacing the operand qualifier;
4541 3) printing out the instruction by calling the printer functions
4542 shared with the disassembler.
4543
4544 The limitation of this method is that the exact input assembly
4545 line cannot be accurately reproduced in some cases, for example an
4546 optional operand present in the actual assembly line will be
4547 omitted in the output; likewise for the optional syntax rules,
4548 e.g. the # before the immediate. Another limitation is that the
4549 assembly symbols and relocation operations in the assembly line
4550 currently cannot be printed out in the error report. Last but not
4551 least, when there is other error(s) co-exist with this error, the
4552 'corrected' instruction may be still incorrect, e.g. given
4553 'ldnp h0,h1,[x0,#6]!'
4554 this diagnosis will provide the version:
4555 'ldnp s0,s1,[x0,#6]!'
4556 which is still not right. */
4557 size_t len = strlen (get_mnemonic_name (str));
4558 int i, qlf_idx;
4559 bfd_boolean result;
4560 char buf[2048];
4561 aarch64_inst *inst_base = &inst.base;
4562 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4563
4564 /* Init inst. */
4565 reset_aarch64_instruction (&inst);
4566 inst_base->opcode = opcode;
4567
4568 /* Reset the error report so that there is no side effect on the
4569 following operand parsing. */
4570 init_operand_error_report ();
4571
4572 /* Fill inst. */
4573 result = parse_operands (str + len, opcode)
4574 && programmer_friendly_fixup (&inst);
4575 gas_assert (result);
4576 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4577 NULL, NULL);
4578 gas_assert (!result);
4579
4580 /* Find the most matched qualifier sequence. */
4581 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4582 gas_assert (qlf_idx > -1);
4583
4584 /* Assign the qualifiers. */
4585 assign_qualifier_sequence (inst_base,
4586 opcode->qualifiers_list[qlf_idx]);
4587
4588 /* Print the hint. */
4589 output_info (_(" did you mean this?"));
4590 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4591 print_operands (buf, opcode, inst_base->operands);
4592 output_info (_(" %s"), buf);
4593
4594 /* Print out other variant(s) if there is any. */
4595 if (qlf_idx != 0 ||
4596 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4597 output_info (_(" other valid variant(s):"));
4598
4599 /* For each pattern. */
4600 qualifiers_list = opcode->qualifiers_list;
4601 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4602 {
4603 /* Most opcodes has much fewer patterns in the list.
4604 First NIL qualifier indicates the end in the list. */
4605 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4606 break;
4607
4608 if (i != qlf_idx)
4609 {
4610 /* Mnemonics name. */
4611 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4612
4613 /* Assign the qualifiers. */
4614 assign_qualifier_sequence (inst_base, *qualifiers_list);
4615
4616 /* Print instruction. */
4617 print_operands (buf, opcode, inst_base->operands);
4618
4619 output_info (_(" %s"), buf);
4620 }
4621 }
4622 }
4623 break;
4624
4625 case AARCH64_OPDE_UNTIED_OPERAND:
4626 as_bad (_("operand %d must be the same register as operand 1 -- `%s'"),
4627 detail->index + 1, str);
4628 break;
4629
4630 case AARCH64_OPDE_OUT_OF_RANGE:
4631 if (detail->data[0] != detail->data[1])
4632 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4633 detail->error ? detail->error : _("immediate value"),
4634 detail->data[0], detail->data[1], idx + 1, str);
4635 else
4636 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4637 detail->error ? detail->error : _("immediate value"),
4638 detail->data[0], idx + 1, str);
4639 break;
4640
4641 case AARCH64_OPDE_REG_LIST:
4642 if (detail->data[0] == 1)
4643 as_bad (_("invalid number of registers in the list; "
4644 "only 1 register is expected at operand %d -- `%s'"),
4645 idx + 1, str);
4646 else
4647 as_bad (_("invalid number of registers in the list; "
4648 "%d registers are expected at operand %d -- `%s'"),
4649 detail->data[0], idx + 1, str);
4650 break;
4651
4652 case AARCH64_OPDE_UNALIGNED:
4653 as_bad (_("immediate value should be a multiple of "
4654 "%d at operand %d -- `%s'"),
4655 detail->data[0], idx + 1, str);
4656 break;
4657
4658 default:
4659 gas_assert (0);
4660 break;
4661 }
4662 }
4663
4664 /* Process and output the error message about the operand mismatching.
4665
4666 When this function is called, the operand error information had
4667 been collected for an assembly line and there will be multiple
4668 errors in the case of mulitple instruction templates; output the
4669 error message that most closely describes the problem. */
4670
4671 static void
4672 output_operand_error_report (char *str)
4673 {
4674 int largest_error_pos;
4675 const char *msg = NULL;
4676 enum aarch64_operand_error_kind kind;
4677 operand_error_record *curr;
4678 operand_error_record *head = operand_error_report.head;
4679 operand_error_record *record = NULL;
4680
4681 /* No error to report. */
4682 if (head == NULL)
4683 return;
4684
4685 gas_assert (head != NULL && operand_error_report.tail != NULL);
4686
4687 /* Only one error. */
4688 if (head == operand_error_report.tail)
4689 {
4690 DEBUG_TRACE ("single opcode entry with error kind: %s",
4691 operand_mismatch_kind_names[head->detail.kind]);
4692 output_operand_error_record (head, str);
4693 return;
4694 }
4695
4696 /* Find the error kind of the highest severity. */
4697 DEBUG_TRACE ("multiple opcode entres with error kind");
4698 kind = AARCH64_OPDE_NIL;
4699 for (curr = head; curr != NULL; curr = curr->next)
4700 {
4701 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4702 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4703 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4704 kind = curr->detail.kind;
4705 }
4706 gas_assert (kind != AARCH64_OPDE_NIL);
4707
4708 /* Pick up one of errors of KIND to report. */
4709 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4710 for (curr = head; curr != NULL; curr = curr->next)
4711 {
4712 if (curr->detail.kind != kind)
4713 continue;
4714 /* If there are multiple errors, pick up the one with the highest
4715 mismatching operand index. In the case of multiple errors with
4716 the equally highest operand index, pick up the first one or the
4717 first one with non-NULL error message. */
4718 if (curr->detail.index > largest_error_pos
4719 || (curr->detail.index == largest_error_pos && msg == NULL
4720 && curr->detail.error != NULL))
4721 {
4722 largest_error_pos = curr->detail.index;
4723 record = curr;
4724 msg = record->detail.error;
4725 }
4726 }
4727
4728 gas_assert (largest_error_pos != -2 && record != NULL);
4729 DEBUG_TRACE ("Pick up error kind %s to report",
4730 operand_mismatch_kind_names[record->detail.kind]);
4731
4732 /* Output. */
4733 output_operand_error_record (record, str);
4734 }
4735 \f
4736 /* Write an AARCH64 instruction to buf - always little-endian. */
4737 static void
4738 put_aarch64_insn (char *buf, uint32_t insn)
4739 {
4740 unsigned char *where = (unsigned char *) buf;
4741 where[0] = insn;
4742 where[1] = insn >> 8;
4743 where[2] = insn >> 16;
4744 where[3] = insn >> 24;
4745 }
4746
4747 static uint32_t
4748 get_aarch64_insn (char *buf)
4749 {
4750 unsigned char *where = (unsigned char *) buf;
4751 uint32_t result;
4752 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4753 return result;
4754 }
4755
4756 static void
4757 output_inst (struct aarch64_inst *new_inst)
4758 {
4759 char *to = NULL;
4760
4761 to = frag_more (INSN_SIZE);
4762
4763 frag_now->tc_frag_data.recorded = 1;
4764
4765 put_aarch64_insn (to, inst.base.value);
4766
4767 if (inst.reloc.type != BFD_RELOC_UNUSED)
4768 {
4769 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4770 INSN_SIZE, &inst.reloc.exp,
4771 inst.reloc.pc_rel,
4772 inst.reloc.type);
4773 DEBUG_TRACE ("Prepared relocation fix up");
4774 /* Don't check the addend value against the instruction size,
4775 that's the job of our code in md_apply_fix(). */
4776 fixp->fx_no_overflow = 1;
4777 if (new_inst != NULL)
4778 fixp->tc_fix_data.inst = new_inst;
4779 if (aarch64_gas_internal_fixup_p ())
4780 {
4781 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4782 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4783 fixp->fx_addnumber = inst.reloc.flags;
4784 }
4785 }
4786
4787 dwarf2_emit_insn (INSN_SIZE);
4788 }
4789
4790 /* Link together opcodes of the same name. */
4791
4792 struct templates
4793 {
4794 aarch64_opcode *opcode;
4795 struct templates *next;
4796 };
4797
4798 typedef struct templates templates;
4799
4800 static templates *
4801 lookup_mnemonic (const char *start, int len)
4802 {
4803 templates *templ = NULL;
4804
4805 templ = hash_find_n (aarch64_ops_hsh, start, len);
4806 return templ;
4807 }
4808
4809 /* Subroutine of md_assemble, responsible for looking up the primary
4810 opcode from the mnemonic the user wrote. STR points to the
4811 beginning of the mnemonic. */
4812
4813 static templates *
4814 opcode_lookup (char **str)
4815 {
4816 char *end, *base;
4817 const aarch64_cond *cond;
4818 char condname[16];
4819 int len;
4820
4821 /* Scan up to the end of the mnemonic, which must end in white space,
4822 '.', or end of string. */
4823 for (base = end = *str; is_part_of_name(*end); end++)
4824 if (*end == '.')
4825 break;
4826
4827 if (end == base)
4828 return 0;
4829
4830 inst.cond = COND_ALWAYS;
4831
4832 /* Handle a possible condition. */
4833 if (end[0] == '.')
4834 {
4835 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4836 if (cond)
4837 {
4838 inst.cond = cond->value;
4839 *str = end + 3;
4840 }
4841 else
4842 {
4843 *str = end;
4844 return 0;
4845 }
4846 }
4847 else
4848 *str = end;
4849
4850 len = end - base;
4851
4852 if (inst.cond == COND_ALWAYS)
4853 {
4854 /* Look for unaffixed mnemonic. */
4855 return lookup_mnemonic (base, len);
4856 }
4857 else if (len <= 13)
4858 {
4859 /* append ".c" to mnemonic if conditional */
4860 memcpy (condname, base, len);
4861 memcpy (condname + len, ".c", 2);
4862 base = condname;
4863 len += 2;
4864 return lookup_mnemonic (base, len);
4865 }
4866
4867 return NULL;
4868 }
4869
4870 /* Internal helper routine converting a vector_type_el structure *VECTYPE
4871 to a corresponding operand qualifier. */
4872
4873 static inline aarch64_opnd_qualifier_t
4874 vectype_to_qualifier (const struct vector_type_el *vectype)
4875 {
4876 /* Element size in bytes indexed by vector_el_type. */
4877 const unsigned char ele_size[5]
4878 = {1, 2, 4, 8, 16};
4879 const unsigned int ele_base [5] =
4880 {
4881 AARCH64_OPND_QLF_V_8B,
4882 AARCH64_OPND_QLF_V_2H,
4883 AARCH64_OPND_QLF_V_2S,
4884 AARCH64_OPND_QLF_V_1D,
4885 AARCH64_OPND_QLF_V_1Q
4886 };
4887
4888 if (!vectype->defined || vectype->type == NT_invtype)
4889 goto vectype_conversion_fail;
4890
4891 if (vectype->type == NT_zero)
4892 return AARCH64_OPND_QLF_P_Z;
4893 if (vectype->type == NT_merge)
4894 return AARCH64_OPND_QLF_P_M;
4895
4896 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4897
4898 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
4899 /* Vector element register. */
4900 return AARCH64_OPND_QLF_S_B + vectype->type;
4901 else
4902 {
4903 /* Vector register. */
4904 int reg_size = ele_size[vectype->type] * vectype->width;
4905 unsigned offset;
4906 unsigned shift;
4907 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
4908 goto vectype_conversion_fail;
4909
4910 /* The conversion is by calculating the offset from the base operand
4911 qualifier for the vector type. The operand qualifiers are regular
4912 enough that the offset can established by shifting the vector width by
4913 a vector-type dependent amount. */
4914 shift = 0;
4915 if (vectype->type == NT_b)
4916 shift = 4;
4917 else if (vectype->type == NT_h || vectype->type == NT_s)
4918 shift = 2;
4919 else if (vectype->type >= NT_d)
4920 shift = 1;
4921 else
4922 gas_assert (0);
4923
4924 offset = ele_base [vectype->type] + (vectype->width >> shift);
4925 gas_assert (AARCH64_OPND_QLF_V_8B <= offset
4926 && offset <= AARCH64_OPND_QLF_V_1Q);
4927 return offset;
4928 }
4929
4930 vectype_conversion_fail:
4931 first_error (_("bad vector arrangement type"));
4932 return AARCH64_OPND_QLF_NIL;
4933 }
4934
4935 /* Process an optional operand that is found omitted from the assembly line.
4936 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4937 instruction's opcode entry while IDX is the index of this omitted operand.
4938 */
4939
4940 static void
4941 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4942 int idx, aarch64_opnd_info *operand)
4943 {
4944 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4945 gas_assert (optional_operand_p (opcode, idx));
4946 gas_assert (!operand->present);
4947
4948 switch (type)
4949 {
4950 case AARCH64_OPND_Rd:
4951 case AARCH64_OPND_Rn:
4952 case AARCH64_OPND_Rm:
4953 case AARCH64_OPND_Rt:
4954 case AARCH64_OPND_Rt2:
4955 case AARCH64_OPND_Rs:
4956 case AARCH64_OPND_Ra:
4957 case AARCH64_OPND_Rt_SYS:
4958 case AARCH64_OPND_Rd_SP:
4959 case AARCH64_OPND_Rn_SP:
4960 case AARCH64_OPND_Fd:
4961 case AARCH64_OPND_Fn:
4962 case AARCH64_OPND_Fm:
4963 case AARCH64_OPND_Fa:
4964 case AARCH64_OPND_Ft:
4965 case AARCH64_OPND_Ft2:
4966 case AARCH64_OPND_Sd:
4967 case AARCH64_OPND_Sn:
4968 case AARCH64_OPND_Sm:
4969 case AARCH64_OPND_Vd:
4970 case AARCH64_OPND_Vn:
4971 case AARCH64_OPND_Vm:
4972 case AARCH64_OPND_VdD1:
4973 case AARCH64_OPND_VnD1:
4974 operand->reg.regno = default_value;
4975 break;
4976
4977 case AARCH64_OPND_Ed:
4978 case AARCH64_OPND_En:
4979 case AARCH64_OPND_Em:
4980 operand->reglane.regno = default_value;
4981 break;
4982
4983 case AARCH64_OPND_IDX:
4984 case AARCH64_OPND_BIT_NUM:
4985 case AARCH64_OPND_IMMR:
4986 case AARCH64_OPND_IMMS:
4987 case AARCH64_OPND_SHLL_IMM:
4988 case AARCH64_OPND_IMM_VLSL:
4989 case AARCH64_OPND_IMM_VLSR:
4990 case AARCH64_OPND_CCMP_IMM:
4991 case AARCH64_OPND_FBITS:
4992 case AARCH64_OPND_UIMM4:
4993 case AARCH64_OPND_UIMM3_OP1:
4994 case AARCH64_OPND_UIMM3_OP2:
4995 case AARCH64_OPND_IMM:
4996 case AARCH64_OPND_WIDTH:
4997 case AARCH64_OPND_UIMM7:
4998 case AARCH64_OPND_NZCV:
4999 case AARCH64_OPND_SVE_PATTERN:
5000 case AARCH64_OPND_SVE_PRFOP:
5001 operand->imm.value = default_value;
5002 break;
5003
5004 case AARCH64_OPND_SVE_PATTERN_SCALED:
5005 operand->imm.value = default_value;
5006 operand->shifter.kind = AARCH64_MOD_MUL;
5007 operand->shifter.amount = 1;
5008 break;
5009
5010 case AARCH64_OPND_EXCEPTION:
5011 inst.reloc.type = BFD_RELOC_UNUSED;
5012 break;
5013
5014 case AARCH64_OPND_BARRIER_ISB:
5015 operand->barrier = aarch64_barrier_options + default_value;
5016
5017 default:
5018 break;
5019 }
5020 }
5021
5022 /* Process the relocation type for move wide instructions.
5023 Return TRUE on success; otherwise return FALSE. */
5024
5025 static bfd_boolean
5026 process_movw_reloc_info (void)
5027 {
5028 int is32;
5029 unsigned shift;
5030
5031 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5032
5033 if (inst.base.opcode->op == OP_MOVK)
5034 switch (inst.reloc.type)
5035 {
5036 case BFD_RELOC_AARCH64_MOVW_G0_S:
5037 case BFD_RELOC_AARCH64_MOVW_G1_S:
5038 case BFD_RELOC_AARCH64_MOVW_G2_S:
5039 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5040 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5041 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5042 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5043 set_syntax_error
5044 (_("the specified relocation type is not allowed for MOVK"));
5045 return FALSE;
5046 default:
5047 break;
5048 }
5049
5050 switch (inst.reloc.type)
5051 {
5052 case BFD_RELOC_AARCH64_MOVW_G0:
5053 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5054 case BFD_RELOC_AARCH64_MOVW_G0_S:
5055 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5056 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5057 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5058 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5059 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5060 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5061 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5062 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5063 shift = 0;
5064 break;
5065 case BFD_RELOC_AARCH64_MOVW_G1:
5066 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5067 case BFD_RELOC_AARCH64_MOVW_G1_S:
5068 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5069 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5070 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5071 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5072 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5073 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5074 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5075 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5076 shift = 16;
5077 break;
5078 case BFD_RELOC_AARCH64_MOVW_G2:
5079 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5080 case BFD_RELOC_AARCH64_MOVW_G2_S:
5081 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5082 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5083 if (is32)
5084 {
5085 set_fatal_syntax_error
5086 (_("the specified relocation type is not allowed for 32-bit "
5087 "register"));
5088 return FALSE;
5089 }
5090 shift = 32;
5091 break;
5092 case BFD_RELOC_AARCH64_MOVW_G3:
5093 if (is32)
5094 {
5095 set_fatal_syntax_error
5096 (_("the specified relocation type is not allowed for 32-bit "
5097 "register"));
5098 return FALSE;
5099 }
5100 shift = 48;
5101 break;
5102 default:
5103 /* More cases should be added when more MOVW-related relocation types
5104 are supported in GAS. */
5105 gas_assert (aarch64_gas_internal_fixup_p ());
5106 /* The shift amount should have already been set by the parser. */
5107 return TRUE;
5108 }
5109 inst.base.operands[1].shifter.amount = shift;
5110 return TRUE;
5111 }
5112
5113 /* A primitive log caculator. */
5114
5115 static inline unsigned int
5116 get_logsz (unsigned int size)
5117 {
5118 const unsigned char ls[16] =
5119 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5120 if (size > 16)
5121 {
5122 gas_assert (0);
5123 return -1;
5124 }
5125 gas_assert (ls[size - 1] != (unsigned char)-1);
5126 return ls[size - 1];
5127 }
5128
5129 /* Determine and return the real reloc type code for an instruction
5130 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5131
5132 static inline bfd_reloc_code_real_type
5133 ldst_lo12_determine_real_reloc_type (void)
5134 {
5135 unsigned logsz;
5136 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5137 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5138
5139 const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = {
5140 {
5141 BFD_RELOC_AARCH64_LDST8_LO12,
5142 BFD_RELOC_AARCH64_LDST16_LO12,
5143 BFD_RELOC_AARCH64_LDST32_LO12,
5144 BFD_RELOC_AARCH64_LDST64_LO12,
5145 BFD_RELOC_AARCH64_LDST128_LO12
5146 },
5147 {
5148 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5149 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5150 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5151 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5152 BFD_RELOC_AARCH64_NONE
5153 },
5154 {
5155 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5156 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5157 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5158 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5159 BFD_RELOC_AARCH64_NONE
5160 }
5161 };
5162
5163 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5164 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5165 || (inst.reloc.type
5166 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC));
5167 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5168
5169 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5170 opd1_qlf =
5171 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5172 1, opd0_qlf, 0);
5173 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5174
5175 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5176 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5177 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5178 gas_assert (logsz <= 3);
5179 else
5180 gas_assert (logsz <= 4);
5181
5182 /* In reloc.c, these pseudo relocation types should be defined in similar
5183 order as above reloc_ldst_lo12 array. Because the array index calcuation
5184 below relies on this. */
5185 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5186 }
5187
5188 /* Check whether a register list REGINFO is valid. The registers must be
5189 numbered in increasing order (modulo 32), in increments of one or two.
5190
5191 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5192 increments of two.
5193
5194 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5195
5196 static bfd_boolean
5197 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5198 {
5199 uint32_t i, nb_regs, prev_regno, incr;
5200
5201 nb_regs = 1 + (reginfo & 0x3);
5202 reginfo >>= 2;
5203 prev_regno = reginfo & 0x1f;
5204 incr = accept_alternate ? 2 : 1;
5205
5206 for (i = 1; i < nb_regs; ++i)
5207 {
5208 uint32_t curr_regno;
5209 reginfo >>= 5;
5210 curr_regno = reginfo & 0x1f;
5211 if (curr_regno != ((prev_regno + incr) & 0x1f))
5212 return FALSE;
5213 prev_regno = curr_regno;
5214 }
5215
5216 return TRUE;
5217 }
5218
5219 /* Generic instruction operand parser. This does no encoding and no
5220 semantic validation; it merely squirrels values away in the inst
5221 structure. Returns TRUE or FALSE depending on whether the
5222 specified grammar matched. */
5223
5224 static bfd_boolean
5225 parse_operands (char *str, const aarch64_opcode *opcode)
5226 {
5227 int i;
5228 char *backtrack_pos = 0;
5229 const enum aarch64_opnd *operands = opcode->operands;
5230 aarch64_reg_type imm_reg_type;
5231
5232 clear_error ();
5233 skip_whitespace (str);
5234
5235 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5236
5237 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5238 {
5239 int64_t val;
5240 const reg_entry *reg;
5241 int comma_skipped_p = 0;
5242 aarch64_reg_type rtype;
5243 struct vector_type_el vectype;
5244 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5245 aarch64_opnd_info *info = &inst.base.operands[i];
5246 aarch64_reg_type reg_type;
5247
5248 DEBUG_TRACE ("parse operand %d", i);
5249
5250 /* Assign the operand code. */
5251 info->type = operands[i];
5252
5253 if (optional_operand_p (opcode, i))
5254 {
5255 /* Remember where we are in case we need to backtrack. */
5256 gas_assert (!backtrack_pos);
5257 backtrack_pos = str;
5258 }
5259
5260 /* Expect comma between operands; the backtrack mechanizm will take
5261 care of cases of omitted optional operand. */
5262 if (i > 0 && ! skip_past_char (&str, ','))
5263 {
5264 set_syntax_error (_("comma expected between operands"));
5265 goto failure;
5266 }
5267 else
5268 comma_skipped_p = 1;
5269
5270 switch (operands[i])
5271 {
5272 case AARCH64_OPND_Rd:
5273 case AARCH64_OPND_Rn:
5274 case AARCH64_OPND_Rm:
5275 case AARCH64_OPND_Rt:
5276 case AARCH64_OPND_Rt2:
5277 case AARCH64_OPND_Rs:
5278 case AARCH64_OPND_Ra:
5279 case AARCH64_OPND_Rt_SYS:
5280 case AARCH64_OPND_PAIRREG:
5281 po_int_reg_or_fail (REG_TYPE_R_Z);
5282 break;
5283
5284 case AARCH64_OPND_Rd_SP:
5285 case AARCH64_OPND_Rn_SP:
5286 po_int_reg_or_fail (REG_TYPE_R_SP);
5287 break;
5288
5289 case AARCH64_OPND_Rm_EXT:
5290 case AARCH64_OPND_Rm_SFT:
5291 po_misc_or_fail (parse_shifter_operand
5292 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5293 ? SHIFTED_ARITH_IMM
5294 : SHIFTED_LOGIC_IMM)));
5295 if (!info->shifter.operator_present)
5296 {
5297 /* Default to LSL if not present. Libopcodes prefers shifter
5298 kind to be explicit. */
5299 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5300 info->shifter.kind = AARCH64_MOD_LSL;
5301 /* For Rm_EXT, libopcodes will carry out further check on whether
5302 or not stack pointer is used in the instruction (Recall that
5303 "the extend operator is not optional unless at least one of
5304 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5305 }
5306 break;
5307
5308 case AARCH64_OPND_Fd:
5309 case AARCH64_OPND_Fn:
5310 case AARCH64_OPND_Fm:
5311 case AARCH64_OPND_Fa:
5312 case AARCH64_OPND_Ft:
5313 case AARCH64_OPND_Ft2:
5314 case AARCH64_OPND_Sd:
5315 case AARCH64_OPND_Sn:
5316 case AARCH64_OPND_Sm:
5317 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5318 if (val == PARSE_FAIL)
5319 {
5320 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5321 goto failure;
5322 }
5323 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5324
5325 info->reg.regno = val;
5326 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5327 break;
5328
5329 case AARCH64_OPND_SVE_Pd:
5330 case AARCH64_OPND_SVE_Pg3:
5331 case AARCH64_OPND_SVE_Pg4_5:
5332 case AARCH64_OPND_SVE_Pg4_10:
5333 case AARCH64_OPND_SVE_Pg4_16:
5334 case AARCH64_OPND_SVE_Pm:
5335 case AARCH64_OPND_SVE_Pn:
5336 case AARCH64_OPND_SVE_Pt:
5337 reg_type = REG_TYPE_PN;
5338 goto vector_reg;
5339
5340 case AARCH64_OPND_SVE_Za_5:
5341 case AARCH64_OPND_SVE_Za_16:
5342 case AARCH64_OPND_SVE_Zd:
5343 case AARCH64_OPND_SVE_Zm_5:
5344 case AARCH64_OPND_SVE_Zm_16:
5345 case AARCH64_OPND_SVE_Zn:
5346 case AARCH64_OPND_SVE_Zt:
5347 reg_type = REG_TYPE_ZN;
5348 goto vector_reg;
5349
5350 case AARCH64_OPND_Vd:
5351 case AARCH64_OPND_Vn:
5352 case AARCH64_OPND_Vm:
5353 reg_type = REG_TYPE_VN;
5354 vector_reg:
5355 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5356 if (val == PARSE_FAIL)
5357 {
5358 first_error (_(get_reg_expected_msg (reg_type)));
5359 goto failure;
5360 }
5361 if (vectype.defined & NTA_HASINDEX)
5362 goto failure;
5363
5364 info->reg.regno = val;
5365 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5366 && vectype.type == NT_invtype)
5367 /* Unqualified Pn and Zn registers are allowed in certain
5368 contexts. Rely on F_STRICT qualifier checking to catch
5369 invalid uses. */
5370 info->qualifier = AARCH64_OPND_QLF_NIL;
5371 else
5372 {
5373 info->qualifier = vectype_to_qualifier (&vectype);
5374 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5375 goto failure;
5376 }
5377 break;
5378
5379 case AARCH64_OPND_VdD1:
5380 case AARCH64_OPND_VnD1:
5381 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5382 if (val == PARSE_FAIL)
5383 {
5384 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5385 goto failure;
5386 }
5387 if (vectype.type != NT_d || vectype.index != 1)
5388 {
5389 set_fatal_syntax_error
5390 (_("the top half of a 128-bit FP/SIMD register is expected"));
5391 goto failure;
5392 }
5393 info->reg.regno = val;
5394 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5395 here; it is correct for the purpose of encoding/decoding since
5396 only the register number is explicitly encoded in the related
5397 instructions, although this appears a bit hacky. */
5398 info->qualifier = AARCH64_OPND_QLF_S_D;
5399 break;
5400
5401 case AARCH64_OPND_SVE_Zn_INDEX:
5402 reg_type = REG_TYPE_ZN;
5403 goto vector_reg_index;
5404
5405 case AARCH64_OPND_Ed:
5406 case AARCH64_OPND_En:
5407 case AARCH64_OPND_Em:
5408 reg_type = REG_TYPE_VN;
5409 vector_reg_index:
5410 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5411 if (val == PARSE_FAIL)
5412 {
5413 first_error (_(get_reg_expected_msg (reg_type)));
5414 goto failure;
5415 }
5416 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5417 goto failure;
5418
5419 info->reglane.regno = val;
5420 info->reglane.index = vectype.index;
5421 info->qualifier = vectype_to_qualifier (&vectype);
5422 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5423 goto failure;
5424 break;
5425
5426 case AARCH64_OPND_SVE_ZnxN:
5427 case AARCH64_OPND_SVE_ZtxN:
5428 reg_type = REG_TYPE_ZN;
5429 goto vector_reg_list;
5430
5431 case AARCH64_OPND_LVn:
5432 case AARCH64_OPND_LVt:
5433 case AARCH64_OPND_LVt_AL:
5434 case AARCH64_OPND_LEt:
5435 reg_type = REG_TYPE_VN;
5436 vector_reg_list:
5437 if (reg_type == REG_TYPE_ZN
5438 && get_opcode_dependent_value (opcode) == 1
5439 && *str != '{')
5440 {
5441 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5442 if (val == PARSE_FAIL)
5443 {
5444 first_error (_(get_reg_expected_msg (reg_type)));
5445 goto failure;
5446 }
5447 info->reglist.first_regno = val;
5448 info->reglist.num_regs = 1;
5449 }
5450 else
5451 {
5452 val = parse_vector_reg_list (&str, reg_type, &vectype);
5453 if (val == PARSE_FAIL)
5454 goto failure;
5455 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5456 {
5457 set_fatal_syntax_error (_("invalid register list"));
5458 goto failure;
5459 }
5460 info->reglist.first_regno = (val >> 2) & 0x1f;
5461 info->reglist.num_regs = (val & 0x3) + 1;
5462 }
5463 if (operands[i] == AARCH64_OPND_LEt)
5464 {
5465 if (!(vectype.defined & NTA_HASINDEX))
5466 goto failure;
5467 info->reglist.has_index = 1;
5468 info->reglist.index = vectype.index;
5469 }
5470 else
5471 {
5472 if (vectype.defined & NTA_HASINDEX)
5473 goto failure;
5474 if (!(vectype.defined & NTA_HASTYPE))
5475 {
5476 if (reg_type == REG_TYPE_ZN)
5477 set_fatal_syntax_error (_("missing type suffix"));
5478 goto failure;
5479 }
5480 }
5481 info->qualifier = vectype_to_qualifier (&vectype);
5482 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5483 goto failure;
5484 break;
5485
5486 case AARCH64_OPND_Cn:
5487 case AARCH64_OPND_Cm:
5488 po_reg_or_fail (REG_TYPE_CN);
5489 if (val > 15)
5490 {
5491 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
5492 goto failure;
5493 }
5494 inst.base.operands[i].reg.regno = val;
5495 break;
5496
5497 case AARCH64_OPND_SHLL_IMM:
5498 case AARCH64_OPND_IMM_VLSR:
5499 po_imm_or_fail (1, 64);
5500 info->imm.value = val;
5501 break;
5502
5503 case AARCH64_OPND_CCMP_IMM:
5504 case AARCH64_OPND_FBITS:
5505 case AARCH64_OPND_UIMM4:
5506 case AARCH64_OPND_UIMM3_OP1:
5507 case AARCH64_OPND_UIMM3_OP2:
5508 case AARCH64_OPND_IMM_VLSL:
5509 case AARCH64_OPND_IMM:
5510 case AARCH64_OPND_WIDTH:
5511 po_imm_nc_or_fail ();
5512 info->imm.value = val;
5513 break;
5514
5515 case AARCH64_OPND_SVE_PATTERN:
5516 po_enum_or_fail (aarch64_sve_pattern_array);
5517 info->imm.value = val;
5518 break;
5519
5520 case AARCH64_OPND_SVE_PATTERN_SCALED:
5521 po_enum_or_fail (aarch64_sve_pattern_array);
5522 info->imm.value = val;
5523 if (skip_past_comma (&str)
5524 && !parse_shift (&str, info, SHIFTED_MUL))
5525 goto failure;
5526 if (!info->shifter.operator_present)
5527 {
5528 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5529 info->shifter.kind = AARCH64_MOD_MUL;
5530 info->shifter.amount = 1;
5531 }
5532 break;
5533
5534 case AARCH64_OPND_SVE_PRFOP:
5535 po_enum_or_fail (aarch64_sve_prfop_array);
5536 info->imm.value = val;
5537 break;
5538
5539 case AARCH64_OPND_UIMM7:
5540 po_imm_or_fail (0, 127);
5541 info->imm.value = val;
5542 break;
5543
5544 case AARCH64_OPND_IDX:
5545 case AARCH64_OPND_BIT_NUM:
5546 case AARCH64_OPND_IMMR:
5547 case AARCH64_OPND_IMMS:
5548 po_imm_or_fail (0, 63);
5549 info->imm.value = val;
5550 break;
5551
5552 case AARCH64_OPND_IMM0:
5553 po_imm_nc_or_fail ();
5554 if (val != 0)
5555 {
5556 set_fatal_syntax_error (_("immediate zero expected"));
5557 goto failure;
5558 }
5559 info->imm.value = 0;
5560 break;
5561
5562 case AARCH64_OPND_FPIMM0:
5563 {
5564 int qfloat;
5565 bfd_boolean res1 = FALSE, res2 = FALSE;
5566 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5567 it is probably not worth the effort to support it. */
5568 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5569 imm_reg_type))
5570 && (error_p ()
5571 || !(res2 = parse_constant_immediate (&str, &val,
5572 imm_reg_type))))
5573 goto failure;
5574 if ((res1 && qfloat == 0) || (res2 && val == 0))
5575 {
5576 info->imm.value = 0;
5577 info->imm.is_fp = 1;
5578 break;
5579 }
5580 set_fatal_syntax_error (_("immediate zero expected"));
5581 goto failure;
5582 }
5583
5584 case AARCH64_OPND_IMM_MOV:
5585 {
5586 char *saved = str;
5587 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5588 reg_name_p (str, REG_TYPE_VN))
5589 goto failure;
5590 str = saved;
5591 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5592 GE_OPT_PREFIX, 1));
5593 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5594 later. fix_mov_imm_insn will try to determine a machine
5595 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5596 message if the immediate cannot be moved by a single
5597 instruction. */
5598 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5599 inst.base.operands[i].skip = 1;
5600 }
5601 break;
5602
5603 case AARCH64_OPND_SIMD_IMM:
5604 case AARCH64_OPND_SIMD_IMM_SFT:
5605 if (! parse_big_immediate (&str, &val, imm_reg_type))
5606 goto failure;
5607 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5608 /* addr_off_p */ 0,
5609 /* need_libopcodes_p */ 1,
5610 /* skip_p */ 1);
5611 /* Parse shift.
5612 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5613 shift, we don't check it here; we leave the checking to
5614 the libopcodes (operand_general_constraint_met_p). By
5615 doing this, we achieve better diagnostics. */
5616 if (skip_past_comma (&str)
5617 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5618 goto failure;
5619 if (!info->shifter.operator_present
5620 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5621 {
5622 /* Default to LSL if not present. Libopcodes prefers shifter
5623 kind to be explicit. */
5624 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5625 info->shifter.kind = AARCH64_MOD_LSL;
5626 }
5627 break;
5628
5629 case AARCH64_OPND_FPIMM:
5630 case AARCH64_OPND_SIMD_FPIMM:
5631 {
5632 int qfloat;
5633 bfd_boolean dp_p
5634 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
5635 == 8);
5636 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5637 || !aarch64_imm_float_p (qfloat))
5638 {
5639 if (!error_p ())
5640 set_fatal_syntax_error (_("invalid floating-point"
5641 " constant"));
5642 goto failure;
5643 }
5644 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5645 inst.base.operands[i].imm.is_fp = 1;
5646 }
5647 break;
5648
5649 case AARCH64_OPND_LIMM:
5650 po_misc_or_fail (parse_shifter_operand (&str, info,
5651 SHIFTED_LOGIC_IMM));
5652 if (info->shifter.operator_present)
5653 {
5654 set_fatal_syntax_error
5655 (_("shift not allowed for bitmask immediate"));
5656 goto failure;
5657 }
5658 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5659 /* addr_off_p */ 0,
5660 /* need_libopcodes_p */ 1,
5661 /* skip_p */ 1);
5662 break;
5663
5664 case AARCH64_OPND_AIMM:
5665 if (opcode->op == OP_ADD)
5666 /* ADD may have relocation types. */
5667 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5668 SHIFTED_ARITH_IMM));
5669 else
5670 po_misc_or_fail (parse_shifter_operand (&str, info,
5671 SHIFTED_ARITH_IMM));
5672 switch (inst.reloc.type)
5673 {
5674 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5675 info->shifter.amount = 12;
5676 break;
5677 case BFD_RELOC_UNUSED:
5678 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5679 if (info->shifter.kind != AARCH64_MOD_NONE)
5680 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5681 inst.reloc.pc_rel = 0;
5682 break;
5683 default:
5684 break;
5685 }
5686 info->imm.value = 0;
5687 if (!info->shifter.operator_present)
5688 {
5689 /* Default to LSL if not present. Libopcodes prefers shifter
5690 kind to be explicit. */
5691 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5692 info->shifter.kind = AARCH64_MOD_LSL;
5693 }
5694 break;
5695
5696 case AARCH64_OPND_HALF:
5697 {
5698 /* #<imm16> or relocation. */
5699 int internal_fixup_p;
5700 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5701 if (internal_fixup_p)
5702 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5703 skip_whitespace (str);
5704 if (skip_past_comma (&str))
5705 {
5706 /* {, LSL #<shift>} */
5707 if (! aarch64_gas_internal_fixup_p ())
5708 {
5709 set_fatal_syntax_error (_("can't mix relocation modifier "
5710 "with explicit shift"));
5711 goto failure;
5712 }
5713 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5714 }
5715 else
5716 inst.base.operands[i].shifter.amount = 0;
5717 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5718 inst.base.operands[i].imm.value = 0;
5719 if (! process_movw_reloc_info ())
5720 goto failure;
5721 }
5722 break;
5723
5724 case AARCH64_OPND_EXCEPTION:
5725 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
5726 imm_reg_type));
5727 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5728 /* addr_off_p */ 0,
5729 /* need_libopcodes_p */ 0,
5730 /* skip_p */ 1);
5731 break;
5732
5733 case AARCH64_OPND_NZCV:
5734 {
5735 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5736 if (nzcv != NULL)
5737 {
5738 str += 4;
5739 info->imm.value = nzcv->value;
5740 break;
5741 }
5742 po_imm_or_fail (0, 15);
5743 info->imm.value = val;
5744 }
5745 break;
5746
5747 case AARCH64_OPND_COND:
5748 case AARCH64_OPND_COND1:
5749 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5750 str += 2;
5751 if (info->cond == NULL)
5752 {
5753 set_syntax_error (_("invalid condition"));
5754 goto failure;
5755 }
5756 else if (operands[i] == AARCH64_OPND_COND1
5757 && (info->cond->value & 0xe) == 0xe)
5758 {
5759 /* Not allow AL or NV. */
5760 set_default_error ();
5761 goto failure;
5762 }
5763 break;
5764
5765 case AARCH64_OPND_ADDR_ADRP:
5766 po_misc_or_fail (parse_adrp (&str));
5767 /* Clear the value as operand needs to be relocated. */
5768 info->imm.value = 0;
5769 break;
5770
5771 case AARCH64_OPND_ADDR_PCREL14:
5772 case AARCH64_OPND_ADDR_PCREL19:
5773 case AARCH64_OPND_ADDR_PCREL21:
5774 case AARCH64_OPND_ADDR_PCREL26:
5775 po_misc_or_fail (parse_address (&str, info));
5776 if (!info->addr.pcrel)
5777 {
5778 set_syntax_error (_("invalid pc-relative address"));
5779 goto failure;
5780 }
5781 if (inst.gen_lit_pool
5782 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5783 {
5784 /* Only permit "=value" in the literal load instructions.
5785 The literal will be generated by programmer_friendly_fixup. */
5786 set_syntax_error (_("invalid use of \"=immediate\""));
5787 goto failure;
5788 }
5789 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5790 {
5791 set_syntax_error (_("unrecognized relocation suffix"));
5792 goto failure;
5793 }
5794 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5795 {
5796 info->imm.value = inst.reloc.exp.X_add_number;
5797 inst.reloc.type = BFD_RELOC_UNUSED;
5798 }
5799 else
5800 {
5801 info->imm.value = 0;
5802 if (inst.reloc.type == BFD_RELOC_UNUSED)
5803 switch (opcode->iclass)
5804 {
5805 case compbranch:
5806 case condbranch:
5807 /* e.g. CBZ or B.COND */
5808 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5809 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5810 break;
5811 case testbranch:
5812 /* e.g. TBZ */
5813 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5814 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5815 break;
5816 case branch_imm:
5817 /* e.g. B or BL */
5818 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5819 inst.reloc.type =
5820 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5821 : BFD_RELOC_AARCH64_JUMP26;
5822 break;
5823 case loadlit:
5824 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5825 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5826 break;
5827 case pcreladdr:
5828 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5829 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5830 break;
5831 default:
5832 gas_assert (0);
5833 abort ();
5834 }
5835 inst.reloc.pc_rel = 1;
5836 }
5837 break;
5838
5839 case AARCH64_OPND_ADDR_SIMPLE:
5840 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5841 {
5842 /* [<Xn|SP>{, #<simm>}] */
5843 char *start = str;
5844 /* First use the normal address-parsing routines, to get
5845 the usual syntax errors. */
5846 po_misc_or_fail (parse_address (&str, info));
5847 if (info->addr.pcrel || info->addr.offset.is_reg
5848 || !info->addr.preind || info->addr.postind
5849 || info->addr.writeback)
5850 {
5851 set_syntax_error (_("invalid addressing mode"));
5852 goto failure;
5853 }
5854
5855 /* Then retry, matching the specific syntax of these addresses. */
5856 str = start;
5857 po_char_or_fail ('[');
5858 po_reg_or_fail (REG_TYPE_R64_SP);
5859 /* Accept optional ", #0". */
5860 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5861 && skip_past_char (&str, ','))
5862 {
5863 skip_past_char (&str, '#');
5864 if (! skip_past_char (&str, '0'))
5865 {
5866 set_fatal_syntax_error
5867 (_("the optional immediate offset can only be 0"));
5868 goto failure;
5869 }
5870 }
5871 po_char_or_fail (']');
5872 break;
5873 }
5874
5875 case AARCH64_OPND_ADDR_REGOFF:
5876 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5877 po_misc_or_fail (parse_address (&str, info));
5878 regoff_addr:
5879 if (info->addr.pcrel || !info->addr.offset.is_reg
5880 || !info->addr.preind || info->addr.postind
5881 || info->addr.writeback)
5882 {
5883 set_syntax_error (_("invalid addressing mode"));
5884 goto failure;
5885 }
5886 if (!info->shifter.operator_present)
5887 {
5888 /* Default to LSL if not present. Libopcodes prefers shifter
5889 kind to be explicit. */
5890 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5891 info->shifter.kind = AARCH64_MOD_LSL;
5892 }
5893 /* Qualifier to be deduced by libopcodes. */
5894 break;
5895
5896 case AARCH64_OPND_ADDR_SIMM7:
5897 po_misc_or_fail (parse_address (&str, info));
5898 if (info->addr.pcrel || info->addr.offset.is_reg
5899 || (!info->addr.preind && !info->addr.postind))
5900 {
5901 set_syntax_error (_("invalid addressing mode"));
5902 goto failure;
5903 }
5904 if (inst.reloc.type != BFD_RELOC_UNUSED)
5905 {
5906 set_syntax_error (_("relocation not allowed"));
5907 goto failure;
5908 }
5909 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5910 /* addr_off_p */ 1,
5911 /* need_libopcodes_p */ 1,
5912 /* skip_p */ 0);
5913 break;
5914
5915 case AARCH64_OPND_ADDR_SIMM9:
5916 case AARCH64_OPND_ADDR_SIMM9_2:
5917 po_misc_or_fail (parse_address (&str, info));
5918 if (info->addr.pcrel || info->addr.offset.is_reg
5919 || (!info->addr.preind && !info->addr.postind)
5920 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5921 && info->addr.writeback))
5922 {
5923 set_syntax_error (_("invalid addressing mode"));
5924 goto failure;
5925 }
5926 if (inst.reloc.type != BFD_RELOC_UNUSED)
5927 {
5928 set_syntax_error (_("relocation not allowed"));
5929 goto failure;
5930 }
5931 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5932 /* addr_off_p */ 1,
5933 /* need_libopcodes_p */ 1,
5934 /* skip_p */ 0);
5935 break;
5936
5937 case AARCH64_OPND_ADDR_UIMM12:
5938 po_misc_or_fail (parse_address (&str, info));
5939 if (info->addr.pcrel || info->addr.offset.is_reg
5940 || !info->addr.preind || info->addr.writeback)
5941 {
5942 set_syntax_error (_("invalid addressing mode"));
5943 goto failure;
5944 }
5945 if (inst.reloc.type == BFD_RELOC_UNUSED)
5946 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5947 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5948 || (inst.reloc.type
5949 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
5950 || (inst.reloc.type
5951 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC))
5952 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5953 /* Leave qualifier to be determined by libopcodes. */
5954 break;
5955
5956 case AARCH64_OPND_SIMD_ADDR_POST:
5957 /* [<Xn|SP>], <Xm|#<amount>> */
5958 po_misc_or_fail (parse_address (&str, info));
5959 if (!info->addr.postind || !info->addr.writeback)
5960 {
5961 set_syntax_error (_("invalid addressing mode"));
5962 goto failure;
5963 }
5964 if (!info->addr.offset.is_reg)
5965 {
5966 if (inst.reloc.exp.X_op == O_constant)
5967 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5968 else
5969 {
5970 set_fatal_syntax_error
5971 (_("writeback value should be an immediate constant"));
5972 goto failure;
5973 }
5974 }
5975 /* No qualifier. */
5976 break;
5977
5978 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
5979 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
5980 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
5981 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
5982 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
5983 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
5984 case AARCH64_OPND_SVE_ADDR_RI_U6:
5985 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
5986 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
5987 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
5988 /* [X<n>{, #imm, MUL VL}]
5989 [X<n>{, #imm}]
5990 but recognizing SVE registers. */
5991 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
5992 &offset_qualifier));
5993 if (base_qualifier != AARCH64_OPND_QLF_X)
5994 {
5995 set_syntax_error (_("invalid addressing mode"));
5996 goto failure;
5997 }
5998 sve_regimm:
5999 if (info->addr.pcrel || info->addr.offset.is_reg
6000 || !info->addr.preind || info->addr.writeback)
6001 {
6002 set_syntax_error (_("invalid addressing mode"));
6003 goto failure;
6004 }
6005 if (inst.reloc.type != BFD_RELOC_UNUSED
6006 || inst.reloc.exp.X_op != O_constant)
6007 {
6008 /* Make sure this has priority over
6009 "invalid addressing mode". */
6010 set_fatal_syntax_error (_("constant offset required"));
6011 goto failure;
6012 }
6013 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6014 break;
6015
6016 case AARCH64_OPND_SVE_ADDR_RR:
6017 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6018 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6019 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6020 case AARCH64_OPND_SVE_ADDR_RX:
6021 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6022 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6023 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6024 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6025 but recognizing SVE registers. */
6026 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6027 &offset_qualifier));
6028 if (base_qualifier != AARCH64_OPND_QLF_X
6029 || offset_qualifier != AARCH64_OPND_QLF_X)
6030 {
6031 set_syntax_error (_("invalid addressing mode"));
6032 goto failure;
6033 }
6034 goto regoff_addr;
6035
6036 case AARCH64_OPND_SVE_ADDR_RZ:
6037 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6038 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6039 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6040 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6041 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6042 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6043 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6044 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6045 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6046 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6047 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6048 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6049 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6050 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6051 &offset_qualifier));
6052 if (base_qualifier != AARCH64_OPND_QLF_X
6053 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6054 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6055 {
6056 set_syntax_error (_("invalid addressing mode"));
6057 goto failure;
6058 }
6059 info->qualifier = offset_qualifier;
6060 goto regoff_addr;
6061
6062 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6063 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6064 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6065 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6066 /* [Z<n>.<T>{, #imm}] */
6067 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6068 &offset_qualifier));
6069 if (base_qualifier != AARCH64_OPND_QLF_S_S
6070 && base_qualifier != AARCH64_OPND_QLF_S_D)
6071 {
6072 set_syntax_error (_("invalid addressing mode"));
6073 goto failure;
6074 }
6075 info->qualifier = base_qualifier;
6076 goto sve_regimm;
6077
6078 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6079 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6080 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6081 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6082 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6083
6084 We don't reject:
6085
6086 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6087
6088 here since we get better error messages by leaving it to
6089 the qualifier checking routines. */
6090 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6091 &offset_qualifier));
6092 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6093 && base_qualifier != AARCH64_OPND_QLF_S_D)
6094 || offset_qualifier != base_qualifier)
6095 {
6096 set_syntax_error (_("invalid addressing mode"));
6097 goto failure;
6098 }
6099 info->qualifier = base_qualifier;
6100 goto regoff_addr;
6101
6102 case AARCH64_OPND_SYSREG:
6103 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
6104 == PARSE_FAIL)
6105 {
6106 set_syntax_error (_("unknown or missing system register name"));
6107 goto failure;
6108 }
6109 inst.base.operands[i].sysreg = val;
6110 break;
6111
6112 case AARCH64_OPND_PSTATEFIELD:
6113 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
6114 == PARSE_FAIL)
6115 {
6116 set_syntax_error (_("unknown or missing PSTATE field name"));
6117 goto failure;
6118 }
6119 inst.base.operands[i].pstatefield = val;
6120 break;
6121
6122 case AARCH64_OPND_SYSREG_IC:
6123 inst.base.operands[i].sysins_op =
6124 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6125 goto sys_reg_ins;
6126 case AARCH64_OPND_SYSREG_DC:
6127 inst.base.operands[i].sysins_op =
6128 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6129 goto sys_reg_ins;
6130 case AARCH64_OPND_SYSREG_AT:
6131 inst.base.operands[i].sysins_op =
6132 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6133 goto sys_reg_ins;
6134 case AARCH64_OPND_SYSREG_TLBI:
6135 inst.base.operands[i].sysins_op =
6136 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6137 sys_reg_ins:
6138 if (inst.base.operands[i].sysins_op == NULL)
6139 {
6140 set_fatal_syntax_error ( _("unknown or missing operation name"));
6141 goto failure;
6142 }
6143 break;
6144
6145 case AARCH64_OPND_BARRIER:
6146 case AARCH64_OPND_BARRIER_ISB:
6147 val = parse_barrier (&str);
6148 if (val != PARSE_FAIL
6149 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6150 {
6151 /* ISB only accepts options name 'sy'. */
6152 set_syntax_error
6153 (_("the specified option is not accepted in ISB"));
6154 /* Turn off backtrack as this optional operand is present. */
6155 backtrack_pos = 0;
6156 goto failure;
6157 }
6158 /* This is an extension to accept a 0..15 immediate. */
6159 if (val == PARSE_FAIL)
6160 po_imm_or_fail (0, 15);
6161 info->barrier = aarch64_barrier_options + val;
6162 break;
6163
6164 case AARCH64_OPND_PRFOP:
6165 val = parse_pldop (&str);
6166 /* This is an extension to accept a 0..31 immediate. */
6167 if (val == PARSE_FAIL)
6168 po_imm_or_fail (0, 31);
6169 inst.base.operands[i].prfop = aarch64_prfops + val;
6170 break;
6171
6172 case AARCH64_OPND_BARRIER_PSB:
6173 val = parse_barrier_psb (&str, &(info->hint_option));
6174 if (val == PARSE_FAIL)
6175 goto failure;
6176 break;
6177
6178 default:
6179 as_fatal (_("unhandled operand code %d"), operands[i]);
6180 }
6181
6182 /* If we get here, this operand was successfully parsed. */
6183 inst.base.operands[i].present = 1;
6184 continue;
6185
6186 failure:
6187 /* The parse routine should already have set the error, but in case
6188 not, set a default one here. */
6189 if (! error_p ())
6190 set_default_error ();
6191
6192 if (! backtrack_pos)
6193 goto parse_operands_return;
6194
6195 {
6196 /* We reach here because this operand is marked as optional, and
6197 either no operand was supplied or the operand was supplied but it
6198 was syntactically incorrect. In the latter case we report an
6199 error. In the former case we perform a few more checks before
6200 dropping through to the code to insert the default operand. */
6201
6202 char *tmp = backtrack_pos;
6203 char endchar = END_OF_INSN;
6204
6205 if (i != (aarch64_num_of_operands (opcode) - 1))
6206 endchar = ',';
6207 skip_past_char (&tmp, ',');
6208
6209 if (*tmp != endchar)
6210 /* The user has supplied an operand in the wrong format. */
6211 goto parse_operands_return;
6212
6213 /* Make sure there is not a comma before the optional operand.
6214 For example the fifth operand of 'sys' is optional:
6215
6216 sys #0,c0,c0,#0, <--- wrong
6217 sys #0,c0,c0,#0 <--- correct. */
6218 if (comma_skipped_p && i && endchar == END_OF_INSN)
6219 {
6220 set_fatal_syntax_error
6221 (_("unexpected comma before the omitted optional operand"));
6222 goto parse_operands_return;
6223 }
6224 }
6225
6226 /* Reaching here means we are dealing with an optional operand that is
6227 omitted from the assembly line. */
6228 gas_assert (optional_operand_p (opcode, i));
6229 info->present = 0;
6230 process_omitted_operand (operands[i], opcode, i, info);
6231
6232 /* Try again, skipping the optional operand at backtrack_pos. */
6233 str = backtrack_pos;
6234 backtrack_pos = 0;
6235
6236 /* Clear any error record after the omitted optional operand has been
6237 successfully handled. */
6238 clear_error ();
6239 }
6240
6241 /* Check if we have parsed all the operands. */
6242 if (*str != '\0' && ! error_p ())
6243 {
6244 /* Set I to the index of the last present operand; this is
6245 for the purpose of diagnostics. */
6246 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6247 ;
6248 set_fatal_syntax_error
6249 (_("unexpected characters following instruction"));
6250 }
6251
6252 parse_operands_return:
6253
6254 if (error_p ())
6255 {
6256 DEBUG_TRACE ("parsing FAIL: %s - %s",
6257 operand_mismatch_kind_names[get_error_kind ()],
6258 get_error_message ());
6259 /* Record the operand error properly; this is useful when there
6260 are multiple instruction templates for a mnemonic name, so that
6261 later on, we can select the error that most closely describes
6262 the problem. */
6263 record_operand_error (opcode, i, get_error_kind (),
6264 get_error_message ());
6265 return FALSE;
6266 }
6267 else
6268 {
6269 DEBUG_TRACE ("parsing SUCCESS");
6270 return TRUE;
6271 }
6272 }
6273
6274 /* It does some fix-up to provide some programmer friendly feature while
6275 keeping the libopcodes happy, i.e. libopcodes only accepts
6276 the preferred architectural syntax.
6277 Return FALSE if there is any failure; otherwise return TRUE. */
6278
6279 static bfd_boolean
6280 programmer_friendly_fixup (aarch64_instruction *instr)
6281 {
6282 aarch64_inst *base = &instr->base;
6283 const aarch64_opcode *opcode = base->opcode;
6284 enum aarch64_op op = opcode->op;
6285 aarch64_opnd_info *operands = base->operands;
6286
6287 DEBUG_TRACE ("enter");
6288
6289 switch (opcode->iclass)
6290 {
6291 case testbranch:
6292 /* TBNZ Xn|Wn, #uimm6, label
6293 Test and Branch Not Zero: conditionally jumps to label if bit number
6294 uimm6 in register Xn is not zero. The bit number implies the width of
6295 the register, which may be written and should be disassembled as Wn if
6296 uimm is less than 32. */
6297 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6298 {
6299 if (operands[1].imm.value >= 32)
6300 {
6301 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6302 0, 31);
6303 return FALSE;
6304 }
6305 operands[0].qualifier = AARCH64_OPND_QLF_X;
6306 }
6307 break;
6308 case loadlit:
6309 /* LDR Wt, label | =value
6310 As a convenience assemblers will typically permit the notation
6311 "=value" in conjunction with the pc-relative literal load instructions
6312 to automatically place an immediate value or symbolic address in a
6313 nearby literal pool and generate a hidden label which references it.
6314 ISREG has been set to 0 in the case of =value. */
6315 if (instr->gen_lit_pool
6316 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6317 {
6318 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6319 if (op == OP_LDRSW_LIT)
6320 size = 4;
6321 if (instr->reloc.exp.X_op != O_constant
6322 && instr->reloc.exp.X_op != O_big
6323 && instr->reloc.exp.X_op != O_symbol)
6324 {
6325 record_operand_error (opcode, 1,
6326 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6327 _("constant expression expected"));
6328 return FALSE;
6329 }
6330 if (! add_to_lit_pool (&instr->reloc.exp, size))
6331 {
6332 record_operand_error (opcode, 1,
6333 AARCH64_OPDE_OTHER_ERROR,
6334 _("literal pool insertion failed"));
6335 return FALSE;
6336 }
6337 }
6338 break;
6339 case log_shift:
6340 case bitfield:
6341 /* UXT[BHW] Wd, Wn
6342 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6343 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6344 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6345 A programmer-friendly assembler should accept a destination Xd in
6346 place of Wd, however that is not the preferred form for disassembly.
6347 */
6348 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6349 && operands[1].qualifier == AARCH64_OPND_QLF_W
6350 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6351 operands[0].qualifier = AARCH64_OPND_QLF_W;
6352 break;
6353
6354 case addsub_ext:
6355 {
6356 /* In the 64-bit form, the final register operand is written as Wm
6357 for all but the (possibly omitted) UXTX/LSL and SXTX
6358 operators.
6359 As a programmer-friendly assembler, we accept e.g.
6360 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6361 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6362 int idx = aarch64_operand_index (opcode->operands,
6363 AARCH64_OPND_Rm_EXT);
6364 gas_assert (idx == 1 || idx == 2);
6365 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6366 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6367 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6368 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6369 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6370 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6371 }
6372 break;
6373
6374 default:
6375 break;
6376 }
6377
6378 DEBUG_TRACE ("exit with SUCCESS");
6379 return TRUE;
6380 }
6381
6382 /* Check for loads and stores that will cause unpredictable behavior. */
6383
6384 static void
6385 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6386 {
6387 aarch64_inst *base = &instr->base;
6388 const aarch64_opcode *opcode = base->opcode;
6389 const aarch64_opnd_info *opnds = base->operands;
6390 switch (opcode->iclass)
6391 {
6392 case ldst_pos:
6393 case ldst_imm9:
6394 case ldst_unscaled:
6395 case ldst_unpriv:
6396 /* Loading/storing the base register is unpredictable if writeback. */
6397 if ((aarch64_get_operand_class (opnds[0].type)
6398 == AARCH64_OPND_CLASS_INT_REG)
6399 && opnds[0].reg.regno == opnds[1].addr.base_regno
6400 && opnds[1].addr.base_regno != REG_SP
6401 && opnds[1].addr.writeback)
6402 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6403 break;
6404 case ldstpair_off:
6405 case ldstnapair_offs:
6406 case ldstpair_indexed:
6407 /* Loading/storing the base register is unpredictable if writeback. */
6408 if ((aarch64_get_operand_class (opnds[0].type)
6409 == AARCH64_OPND_CLASS_INT_REG)
6410 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6411 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6412 && opnds[2].addr.base_regno != REG_SP
6413 && opnds[2].addr.writeback)
6414 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6415 /* Load operations must load different registers. */
6416 if ((opcode->opcode & (1 << 22))
6417 && opnds[0].reg.regno == opnds[1].reg.regno)
6418 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6419 break;
6420 default:
6421 break;
6422 }
6423 }
6424
6425 /* A wrapper function to interface with libopcodes on encoding and
6426 record the error message if there is any.
6427
6428 Return TRUE on success; otherwise return FALSE. */
6429
6430 static bfd_boolean
6431 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6432 aarch64_insn *code)
6433 {
6434 aarch64_operand_error error_info;
6435 error_info.kind = AARCH64_OPDE_NIL;
6436 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
6437 return TRUE;
6438 else
6439 {
6440 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6441 record_operand_error_info (opcode, &error_info);
6442 return FALSE;
6443 }
6444 }
6445
6446 #ifdef DEBUG_AARCH64
6447 static inline void
6448 dump_opcode_operands (const aarch64_opcode *opcode)
6449 {
6450 int i = 0;
6451 while (opcode->operands[i] != AARCH64_OPND_NIL)
6452 {
6453 aarch64_verbose ("\t\t opnd%d: %s", i,
6454 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6455 ? aarch64_get_operand_name (opcode->operands[i])
6456 : aarch64_get_operand_desc (opcode->operands[i]));
6457 ++i;
6458 }
6459 }
6460 #endif /* DEBUG_AARCH64 */
6461
6462 /* This is the guts of the machine-dependent assembler. STR points to a
6463 machine dependent instruction. This function is supposed to emit
6464 the frags/bytes it assembles to. */
6465
6466 void
6467 md_assemble (char *str)
6468 {
6469 char *p = str;
6470 templates *template;
6471 aarch64_opcode *opcode;
6472 aarch64_inst *inst_base;
6473 unsigned saved_cond;
6474
6475 /* Align the previous label if needed. */
6476 if (last_label_seen != NULL)
6477 {
6478 symbol_set_frag (last_label_seen, frag_now);
6479 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6480 S_SET_SEGMENT (last_label_seen, now_seg);
6481 }
6482
6483 inst.reloc.type = BFD_RELOC_UNUSED;
6484
6485 DEBUG_TRACE ("\n\n");
6486 DEBUG_TRACE ("==============================");
6487 DEBUG_TRACE ("Enter md_assemble with %s", str);
6488
6489 template = opcode_lookup (&p);
6490 if (!template)
6491 {
6492 /* It wasn't an instruction, but it might be a register alias of
6493 the form alias .req reg directive. */
6494 if (!create_register_alias (str, p))
6495 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6496 str);
6497 return;
6498 }
6499
6500 skip_whitespace (p);
6501 if (*p == ',')
6502 {
6503 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6504 get_mnemonic_name (str), str);
6505 return;
6506 }
6507
6508 init_operand_error_report ();
6509
6510 /* Sections are assumed to start aligned. In executable section, there is no
6511 MAP_DATA symbol pending. So we only align the address during
6512 MAP_DATA --> MAP_INSN transition.
6513 For other sections, this is not guaranteed. */
6514 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6515 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6516 frag_align_code (2, 0);
6517
6518 saved_cond = inst.cond;
6519 reset_aarch64_instruction (&inst);
6520 inst.cond = saved_cond;
6521
6522 /* Iterate through all opcode entries with the same mnemonic name. */
6523 do
6524 {
6525 opcode = template->opcode;
6526
6527 DEBUG_TRACE ("opcode %s found", opcode->name);
6528 #ifdef DEBUG_AARCH64
6529 if (debug_dump)
6530 dump_opcode_operands (opcode);
6531 #endif /* DEBUG_AARCH64 */
6532
6533 mapping_state (MAP_INSN);
6534
6535 inst_base = &inst.base;
6536 inst_base->opcode = opcode;
6537
6538 /* Truly conditionally executed instructions, e.g. b.cond. */
6539 if (opcode->flags & F_COND)
6540 {
6541 gas_assert (inst.cond != COND_ALWAYS);
6542 inst_base->cond = get_cond_from_value (inst.cond);
6543 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6544 }
6545 else if (inst.cond != COND_ALWAYS)
6546 {
6547 /* It shouldn't arrive here, where the assembly looks like a
6548 conditional instruction but the found opcode is unconditional. */
6549 gas_assert (0);
6550 continue;
6551 }
6552
6553 if (parse_operands (p, opcode)
6554 && programmer_friendly_fixup (&inst)
6555 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6556 {
6557 /* Check that this instruction is supported for this CPU. */
6558 if (!opcode->avariant
6559 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
6560 {
6561 as_bad (_("selected processor does not support `%s'"), str);
6562 return;
6563 }
6564
6565 warn_unpredictable_ldst (&inst, str);
6566
6567 if (inst.reloc.type == BFD_RELOC_UNUSED
6568 || !inst.reloc.need_libopcodes_p)
6569 output_inst (NULL);
6570 else
6571 {
6572 /* If there is relocation generated for the instruction,
6573 store the instruction information for the future fix-up. */
6574 struct aarch64_inst *copy;
6575 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6576 copy = XNEW (struct aarch64_inst);
6577 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6578 output_inst (copy);
6579 }
6580 return;
6581 }
6582
6583 template = template->next;
6584 if (template != NULL)
6585 {
6586 reset_aarch64_instruction (&inst);
6587 inst.cond = saved_cond;
6588 }
6589 }
6590 while (template != NULL);
6591
6592 /* Issue the error messages if any. */
6593 output_operand_error_report (str);
6594 }
6595
6596 /* Various frobbings of labels and their addresses. */
6597
6598 void
6599 aarch64_start_line_hook (void)
6600 {
6601 last_label_seen = NULL;
6602 }
6603
6604 void
6605 aarch64_frob_label (symbolS * sym)
6606 {
6607 last_label_seen = sym;
6608
6609 dwarf2_emit_label (sym);
6610 }
6611
6612 int
6613 aarch64_data_in_code (void)
6614 {
6615 if (!strncmp (input_line_pointer + 1, "data:", 5))
6616 {
6617 *input_line_pointer = '/';
6618 input_line_pointer += 5;
6619 *input_line_pointer = 0;
6620 return 1;
6621 }
6622
6623 return 0;
6624 }
6625
6626 char *
6627 aarch64_canonicalize_symbol_name (char *name)
6628 {
6629 int len;
6630
6631 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6632 *(name + len - 5) = 0;
6633
6634 return name;
6635 }
6636 \f
6637 /* Table of all register names defined by default. The user can
6638 define additional names with .req. Note that all register names
6639 should appear in both upper and lowercase variants. Some registers
6640 also have mixed-case names. */
6641
6642 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6643 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6644 #define REGSET16(p,t) \
6645 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6646 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6647 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6648 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
6649 #define REGSET31(p,t) \
6650 REGSET16(p, t), \
6651 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6652 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6653 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6654 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6655 #define REGSET(p,t) \
6656 REGSET31(p,t), REGNUM(p,31,t)
6657
6658 /* These go into aarch64_reg_hsh hash-table. */
6659 static const reg_entry reg_names[] = {
6660 /* Integer registers. */
6661 REGSET31 (x, R_64), REGSET31 (X, R_64),
6662 REGSET31 (w, R_32), REGSET31 (W, R_32),
6663
6664 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6665 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6666
6667 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6668 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6669
6670 /* Coprocessor register numbers. */
6671 REGSET (c, CN), REGSET (C, CN),
6672
6673 /* Floating-point single precision registers. */
6674 REGSET (s, FP_S), REGSET (S, FP_S),
6675
6676 /* Floating-point double precision registers. */
6677 REGSET (d, FP_D), REGSET (D, FP_D),
6678
6679 /* Floating-point half precision registers. */
6680 REGSET (h, FP_H), REGSET (H, FP_H),
6681
6682 /* Floating-point byte precision registers. */
6683 REGSET (b, FP_B), REGSET (B, FP_B),
6684
6685 /* Floating-point quad precision registers. */
6686 REGSET (q, FP_Q), REGSET (Q, FP_Q),
6687
6688 /* FP/SIMD registers. */
6689 REGSET (v, VN), REGSET (V, VN),
6690
6691 /* SVE vector registers. */
6692 REGSET (z, ZN), REGSET (Z, ZN),
6693
6694 /* SVE predicate registers. */
6695 REGSET16 (p, PN), REGSET16 (P, PN)
6696 };
6697
6698 #undef REGDEF
6699 #undef REGNUM
6700 #undef REGSET16
6701 #undef REGSET31
6702 #undef REGSET
6703
6704 #define N 1
6705 #define n 0
6706 #define Z 1
6707 #define z 0
6708 #define C 1
6709 #define c 0
6710 #define V 1
6711 #define v 0
6712 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
6713 static const asm_nzcv nzcv_names[] = {
6714 {"nzcv", B (n, z, c, v)},
6715 {"nzcV", B (n, z, c, V)},
6716 {"nzCv", B (n, z, C, v)},
6717 {"nzCV", B (n, z, C, V)},
6718 {"nZcv", B (n, Z, c, v)},
6719 {"nZcV", B (n, Z, c, V)},
6720 {"nZCv", B (n, Z, C, v)},
6721 {"nZCV", B (n, Z, C, V)},
6722 {"Nzcv", B (N, z, c, v)},
6723 {"NzcV", B (N, z, c, V)},
6724 {"NzCv", B (N, z, C, v)},
6725 {"NzCV", B (N, z, C, V)},
6726 {"NZcv", B (N, Z, c, v)},
6727 {"NZcV", B (N, Z, c, V)},
6728 {"NZCv", B (N, Z, C, v)},
6729 {"NZCV", B (N, Z, C, V)}
6730 };
6731
6732 #undef N
6733 #undef n
6734 #undef Z
6735 #undef z
6736 #undef C
6737 #undef c
6738 #undef V
6739 #undef v
6740 #undef B
6741 \f
6742 /* MD interface: bits in the object file. */
6743
6744 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
6745 for use in the a.out file, and stores them in the array pointed to by buf.
6746 This knows about the endian-ness of the target machine and does
6747 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
6748 2 (short) and 4 (long) Floating numbers are put out as a series of
6749 LITTLENUMS (shorts, here at least). */
6750
6751 void
6752 md_number_to_chars (char *buf, valueT val, int n)
6753 {
6754 if (target_big_endian)
6755 number_to_chars_bigendian (buf, val, n);
6756 else
6757 number_to_chars_littleendian (buf, val, n);
6758 }
6759
6760 /* MD interface: Sections. */
6761
6762 /* Estimate the size of a frag before relaxing. Assume everything fits in
6763 4 bytes. */
6764
6765 int
6766 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6767 {
6768 fragp->fr_var = 4;
6769 return 4;
6770 }
6771
6772 /* Round up a section size to the appropriate boundary. */
6773
6774 valueT
6775 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6776 {
6777 return size;
6778 }
6779
6780 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6781 of an rs_align_code fragment.
6782
6783 Here we fill the frag with the appropriate info for padding the
6784 output stream. The resulting frag will consist of a fixed (fr_fix)
6785 and of a repeating (fr_var) part.
6786
6787 The fixed content is always emitted before the repeating content and
6788 these two parts are used as follows in constructing the output:
6789 - the fixed part will be used to align to a valid instruction word
6790 boundary, in case that we start at a misaligned address; as no
6791 executable instruction can live at the misaligned location, we
6792 simply fill with zeros;
6793 - the variable part will be used to cover the remaining padding and
6794 we fill using the AArch64 NOP instruction.
6795
6796 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6797 enough storage space for up to 3 bytes for padding the back to a valid
6798 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6799
6800 void
6801 aarch64_handle_align (fragS * fragP)
6802 {
6803 /* NOP = d503201f */
6804 /* AArch64 instructions are always little-endian. */
6805 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6806
6807 int bytes, fix, noop_size;
6808 char *p;
6809
6810 if (fragP->fr_type != rs_align_code)
6811 return;
6812
6813 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6814 p = fragP->fr_literal + fragP->fr_fix;
6815
6816 #ifdef OBJ_ELF
6817 gas_assert (fragP->tc_frag_data.recorded);
6818 #endif
6819
6820 noop_size = sizeof (aarch64_noop);
6821
6822 fix = bytes & (noop_size - 1);
6823 if (fix)
6824 {
6825 #ifdef OBJ_ELF
6826 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6827 #endif
6828 memset (p, 0, fix);
6829 p += fix;
6830 fragP->fr_fix += fix;
6831 }
6832
6833 if (noop_size)
6834 memcpy (p, aarch64_noop, noop_size);
6835 fragP->fr_var = noop_size;
6836 }
6837
6838 /* Perform target specific initialisation of a frag.
6839 Note - despite the name this initialisation is not done when the frag
6840 is created, but only when its type is assigned. A frag can be created
6841 and used a long time before its type is set, so beware of assuming that
6842 this initialisationis performed first. */
6843
6844 #ifndef OBJ_ELF
6845 void
6846 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6847 int max_chars ATTRIBUTE_UNUSED)
6848 {
6849 }
6850
6851 #else /* OBJ_ELF is defined. */
6852 void
6853 aarch64_init_frag (fragS * fragP, int max_chars)
6854 {
6855 /* Record a mapping symbol for alignment frags. We will delete this
6856 later if the alignment ends up empty. */
6857 if (!fragP->tc_frag_data.recorded)
6858 fragP->tc_frag_data.recorded = 1;
6859
6860 switch (fragP->fr_type)
6861 {
6862 case rs_align_test:
6863 case rs_fill:
6864 mapping_state_2 (MAP_DATA, max_chars);
6865 break;
6866 case rs_align:
6867 /* PR 20364: We can get alignment frags in code sections,
6868 so do not just assume that we should use the MAP_DATA state. */
6869 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
6870 break;
6871 case rs_align_code:
6872 mapping_state_2 (MAP_INSN, max_chars);
6873 break;
6874 default:
6875 break;
6876 }
6877 }
6878 \f
6879 /* Initialize the DWARF-2 unwind information for this procedure. */
6880
6881 void
6882 tc_aarch64_frame_initial_instructions (void)
6883 {
6884 cfi_add_CFA_def_cfa (REG_SP, 0);
6885 }
6886 #endif /* OBJ_ELF */
6887
6888 /* Convert REGNAME to a DWARF-2 register number. */
6889
6890 int
6891 tc_aarch64_regname_to_dw2regnum (char *regname)
6892 {
6893 const reg_entry *reg = parse_reg (&regname);
6894 if (reg == NULL)
6895 return -1;
6896
6897 switch (reg->type)
6898 {
6899 case REG_TYPE_SP_32:
6900 case REG_TYPE_SP_64:
6901 case REG_TYPE_R_32:
6902 case REG_TYPE_R_64:
6903 return reg->number;
6904
6905 case REG_TYPE_FP_B:
6906 case REG_TYPE_FP_H:
6907 case REG_TYPE_FP_S:
6908 case REG_TYPE_FP_D:
6909 case REG_TYPE_FP_Q:
6910 return reg->number + 64;
6911
6912 default:
6913 break;
6914 }
6915 return -1;
6916 }
6917
6918 /* Implement DWARF2_ADDR_SIZE. */
6919
6920 int
6921 aarch64_dwarf2_addr_size (void)
6922 {
6923 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6924 if (ilp32_p)
6925 return 4;
6926 #endif
6927 return bfd_arch_bits_per_address (stdoutput) / 8;
6928 }
6929
6930 /* MD interface: Symbol and relocation handling. */
6931
6932 /* Return the address within the segment that a PC-relative fixup is
6933 relative to. For AArch64 PC-relative fixups applied to instructions
6934 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6935
6936 long
6937 md_pcrel_from_section (fixS * fixP, segT seg)
6938 {
6939 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6940
6941 /* If this is pc-relative and we are going to emit a relocation
6942 then we just want to put out any pipeline compensation that the linker
6943 will need. Otherwise we want to use the calculated base. */
6944 if (fixP->fx_pcrel
6945 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6946 || aarch64_force_relocation (fixP)))
6947 base = 0;
6948
6949 /* AArch64 should be consistent for all pc-relative relocations. */
6950 return base + AARCH64_PCREL_OFFSET;
6951 }
6952
6953 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6954 Otherwise we have no need to default values of symbols. */
6955
6956 symbolS *
6957 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6958 {
6959 #ifdef OBJ_ELF
6960 if (name[0] == '_' && name[1] == 'G'
6961 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6962 {
6963 if (!GOT_symbol)
6964 {
6965 if (symbol_find (name))
6966 as_bad (_("GOT already in the symbol table"));
6967
6968 GOT_symbol = symbol_new (name, undefined_section,
6969 (valueT) 0, &zero_address_frag);
6970 }
6971
6972 return GOT_symbol;
6973 }
6974 #endif
6975
6976 return 0;
6977 }
6978
6979 /* Return non-zero if the indicated VALUE has overflowed the maximum
6980 range expressible by a unsigned number with the indicated number of
6981 BITS. */
6982
6983 static bfd_boolean
6984 unsigned_overflow (valueT value, unsigned bits)
6985 {
6986 valueT lim;
6987 if (bits >= sizeof (valueT) * 8)
6988 return FALSE;
6989 lim = (valueT) 1 << bits;
6990 return (value >= lim);
6991 }
6992
6993
6994 /* Return non-zero if the indicated VALUE has overflowed the maximum
6995 range expressible by an signed number with the indicated number of
6996 BITS. */
6997
6998 static bfd_boolean
6999 signed_overflow (offsetT value, unsigned bits)
7000 {
7001 offsetT lim;
7002 if (bits >= sizeof (offsetT) * 8)
7003 return FALSE;
7004 lim = (offsetT) 1 << (bits - 1);
7005 return (value < -lim || value >= lim);
7006 }
7007
7008 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7009 unsigned immediate offset load/store instruction, try to encode it as
7010 an unscaled, 9-bit, signed immediate offset load/store instruction.
7011 Return TRUE if it is successful; otherwise return FALSE.
7012
7013 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7014 in response to the standard LDR/STR mnemonics when the immediate offset is
7015 unambiguous, i.e. when it is negative or unaligned. */
7016
7017 static bfd_boolean
7018 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7019 {
7020 int idx;
7021 enum aarch64_op new_op;
7022 const aarch64_opcode *new_opcode;
7023
7024 gas_assert (instr->opcode->iclass == ldst_pos);
7025
7026 switch (instr->opcode->op)
7027 {
7028 case OP_LDRB_POS:new_op = OP_LDURB; break;
7029 case OP_STRB_POS: new_op = OP_STURB; break;
7030 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7031 case OP_LDRH_POS: new_op = OP_LDURH; break;
7032 case OP_STRH_POS: new_op = OP_STURH; break;
7033 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7034 case OP_LDR_POS: new_op = OP_LDUR; break;
7035 case OP_STR_POS: new_op = OP_STUR; break;
7036 case OP_LDRF_POS: new_op = OP_LDURV; break;
7037 case OP_STRF_POS: new_op = OP_STURV; break;
7038 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7039 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7040 default: new_op = OP_NIL; break;
7041 }
7042
7043 if (new_op == OP_NIL)
7044 return FALSE;
7045
7046 new_opcode = aarch64_get_opcode (new_op);
7047 gas_assert (new_opcode != NULL);
7048
7049 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7050 instr->opcode->op, new_opcode->op);
7051
7052 aarch64_replace_opcode (instr, new_opcode);
7053
7054 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7055 qualifier matching may fail because the out-of-date qualifier will
7056 prevent the operand being updated with a new and correct qualifier. */
7057 idx = aarch64_operand_index (instr->opcode->operands,
7058 AARCH64_OPND_ADDR_SIMM9);
7059 gas_assert (idx == 1);
7060 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7061
7062 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7063
7064 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
7065 return FALSE;
7066
7067 return TRUE;
7068 }
7069
7070 /* Called by fix_insn to fix a MOV immediate alias instruction.
7071
7072 Operand for a generic move immediate instruction, which is an alias
7073 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7074 a 32-bit/64-bit immediate value into general register. An assembler error
7075 shall result if the immediate cannot be created by a single one of these
7076 instructions. If there is a choice, then to ensure reversability an
7077 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7078
7079 static void
7080 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7081 {
7082 const aarch64_opcode *opcode;
7083
7084 /* Need to check if the destination is SP/ZR. The check has to be done
7085 before any aarch64_replace_opcode. */
7086 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7087 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7088
7089 instr->operands[1].imm.value = value;
7090 instr->operands[1].skip = 0;
7091
7092 if (try_mov_wide_p)
7093 {
7094 /* Try the MOVZ alias. */
7095 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7096 aarch64_replace_opcode (instr, opcode);
7097 if (aarch64_opcode_encode (instr->opcode, instr,
7098 &instr->value, NULL, NULL))
7099 {
7100 put_aarch64_insn (buf, instr->value);
7101 return;
7102 }
7103 /* Try the MOVK alias. */
7104 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7105 aarch64_replace_opcode (instr, opcode);
7106 if (aarch64_opcode_encode (instr->opcode, instr,
7107 &instr->value, NULL, NULL))
7108 {
7109 put_aarch64_insn (buf, instr->value);
7110 return;
7111 }
7112 }
7113
7114 if (try_mov_bitmask_p)
7115 {
7116 /* Try the ORR alias. */
7117 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7118 aarch64_replace_opcode (instr, opcode);
7119 if (aarch64_opcode_encode (instr->opcode, instr,
7120 &instr->value, NULL, NULL))
7121 {
7122 put_aarch64_insn (buf, instr->value);
7123 return;
7124 }
7125 }
7126
7127 as_bad_where (fixP->fx_file, fixP->fx_line,
7128 _("immediate cannot be moved by a single instruction"));
7129 }
7130
7131 /* An instruction operand which is immediate related may have symbol used
7132 in the assembly, e.g.
7133
7134 mov w0, u32
7135 .set u32, 0x00ffff00
7136
7137 At the time when the assembly instruction is parsed, a referenced symbol,
7138 like 'u32' in the above example may not have been seen; a fixS is created
7139 in such a case and is handled here after symbols have been resolved.
7140 Instruction is fixed up with VALUE using the information in *FIXP plus
7141 extra information in FLAGS.
7142
7143 This function is called by md_apply_fix to fix up instructions that need
7144 a fix-up described above but does not involve any linker-time relocation. */
7145
7146 static void
7147 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7148 {
7149 int idx;
7150 uint32_t insn;
7151 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7152 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7153 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7154
7155 if (new_inst)
7156 {
7157 /* Now the instruction is about to be fixed-up, so the operand that
7158 was previously marked as 'ignored' needs to be unmarked in order
7159 to get the encoding done properly. */
7160 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7161 new_inst->operands[idx].skip = 0;
7162 }
7163
7164 gas_assert (opnd != AARCH64_OPND_NIL);
7165
7166 switch (opnd)
7167 {
7168 case AARCH64_OPND_EXCEPTION:
7169 if (unsigned_overflow (value, 16))
7170 as_bad_where (fixP->fx_file, fixP->fx_line,
7171 _("immediate out of range"));
7172 insn = get_aarch64_insn (buf);
7173 insn |= encode_svc_imm (value);
7174 put_aarch64_insn (buf, insn);
7175 break;
7176
7177 case AARCH64_OPND_AIMM:
7178 /* ADD or SUB with immediate.
7179 NOTE this assumes we come here with a add/sub shifted reg encoding
7180 3 322|2222|2 2 2 21111 111111
7181 1 098|7654|3 2 1 09876 543210 98765 43210
7182 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7183 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7184 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7185 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7186 ->
7187 3 322|2222|2 2 221111111111
7188 1 098|7654|3 2 109876543210 98765 43210
7189 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7190 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7191 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7192 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7193 Fields sf Rn Rd are already set. */
7194 insn = get_aarch64_insn (buf);
7195 if (value < 0)
7196 {
7197 /* Add <-> sub. */
7198 insn = reencode_addsub_switch_add_sub (insn);
7199 value = -value;
7200 }
7201
7202 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7203 && unsigned_overflow (value, 12))
7204 {
7205 /* Try to shift the value by 12 to make it fit. */
7206 if (((value >> 12) << 12) == value
7207 && ! unsigned_overflow (value, 12 + 12))
7208 {
7209 value >>= 12;
7210 insn |= encode_addsub_imm_shift_amount (1);
7211 }
7212 }
7213
7214 if (unsigned_overflow (value, 12))
7215 as_bad_where (fixP->fx_file, fixP->fx_line,
7216 _("immediate out of range"));
7217
7218 insn |= encode_addsub_imm (value);
7219
7220 put_aarch64_insn (buf, insn);
7221 break;
7222
7223 case AARCH64_OPND_SIMD_IMM:
7224 case AARCH64_OPND_SIMD_IMM_SFT:
7225 case AARCH64_OPND_LIMM:
7226 /* Bit mask immediate. */
7227 gas_assert (new_inst != NULL);
7228 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7229 new_inst->operands[idx].imm.value = value;
7230 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7231 &new_inst->value, NULL, NULL))
7232 put_aarch64_insn (buf, new_inst->value);
7233 else
7234 as_bad_where (fixP->fx_file, fixP->fx_line,
7235 _("invalid immediate"));
7236 break;
7237
7238 case AARCH64_OPND_HALF:
7239 /* 16-bit unsigned immediate. */
7240 if (unsigned_overflow (value, 16))
7241 as_bad_where (fixP->fx_file, fixP->fx_line,
7242 _("immediate out of range"));
7243 insn = get_aarch64_insn (buf);
7244 insn |= encode_movw_imm (value & 0xffff);
7245 put_aarch64_insn (buf, insn);
7246 break;
7247
7248 case AARCH64_OPND_IMM_MOV:
7249 /* Operand for a generic move immediate instruction, which is
7250 an alias instruction that generates a single MOVZ, MOVN or ORR
7251 instruction to loads a 32-bit/64-bit immediate value into general
7252 register. An assembler error shall result if the immediate cannot be
7253 created by a single one of these instructions. If there is a choice,
7254 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7255 and MOVZ or MOVN to ORR. */
7256 gas_assert (new_inst != NULL);
7257 fix_mov_imm_insn (fixP, buf, new_inst, value);
7258 break;
7259
7260 case AARCH64_OPND_ADDR_SIMM7:
7261 case AARCH64_OPND_ADDR_SIMM9:
7262 case AARCH64_OPND_ADDR_SIMM9_2:
7263 case AARCH64_OPND_ADDR_UIMM12:
7264 /* Immediate offset in an address. */
7265 insn = get_aarch64_insn (buf);
7266
7267 gas_assert (new_inst != NULL && new_inst->value == insn);
7268 gas_assert (new_inst->opcode->operands[1] == opnd
7269 || new_inst->opcode->operands[2] == opnd);
7270
7271 /* Get the index of the address operand. */
7272 if (new_inst->opcode->operands[1] == opnd)
7273 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7274 idx = 1;
7275 else
7276 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7277 idx = 2;
7278
7279 /* Update the resolved offset value. */
7280 new_inst->operands[idx].addr.offset.imm = value;
7281
7282 /* Encode/fix-up. */
7283 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7284 &new_inst->value, NULL, NULL))
7285 {
7286 put_aarch64_insn (buf, new_inst->value);
7287 break;
7288 }
7289 else if (new_inst->opcode->iclass == ldst_pos
7290 && try_to_encode_as_unscaled_ldst (new_inst))
7291 {
7292 put_aarch64_insn (buf, new_inst->value);
7293 break;
7294 }
7295
7296 as_bad_where (fixP->fx_file, fixP->fx_line,
7297 _("immediate offset out of range"));
7298 break;
7299
7300 default:
7301 gas_assert (0);
7302 as_fatal (_("unhandled operand code %d"), opnd);
7303 }
7304 }
7305
7306 /* Apply a fixup (fixP) to segment data, once it has been determined
7307 by our caller that we have all the info we need to fix it up.
7308
7309 Parameter valP is the pointer to the value of the bits. */
7310
7311 void
7312 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7313 {
7314 offsetT value = *valP;
7315 uint32_t insn;
7316 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7317 int scale;
7318 unsigned flags = fixP->fx_addnumber;
7319
7320 DEBUG_TRACE ("\n\n");
7321 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7322 DEBUG_TRACE ("Enter md_apply_fix");
7323
7324 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7325
7326 /* Note whether this will delete the relocation. */
7327
7328 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7329 fixP->fx_done = 1;
7330
7331 /* Process the relocations. */
7332 switch (fixP->fx_r_type)
7333 {
7334 case BFD_RELOC_NONE:
7335 /* This will need to go in the object file. */
7336 fixP->fx_done = 0;
7337 break;
7338
7339 case BFD_RELOC_8:
7340 case BFD_RELOC_8_PCREL:
7341 if (fixP->fx_done || !seg->use_rela_p)
7342 md_number_to_chars (buf, value, 1);
7343 break;
7344
7345 case BFD_RELOC_16:
7346 case BFD_RELOC_16_PCREL:
7347 if (fixP->fx_done || !seg->use_rela_p)
7348 md_number_to_chars (buf, value, 2);
7349 break;
7350
7351 case BFD_RELOC_32:
7352 case BFD_RELOC_32_PCREL:
7353 if (fixP->fx_done || !seg->use_rela_p)
7354 md_number_to_chars (buf, value, 4);
7355 break;
7356
7357 case BFD_RELOC_64:
7358 case BFD_RELOC_64_PCREL:
7359 if (fixP->fx_done || !seg->use_rela_p)
7360 md_number_to_chars (buf, value, 8);
7361 break;
7362
7363 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7364 /* We claim that these fixups have been processed here, even if
7365 in fact we generate an error because we do not have a reloc
7366 for them, so tc_gen_reloc() will reject them. */
7367 fixP->fx_done = 1;
7368 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7369 {
7370 as_bad_where (fixP->fx_file, fixP->fx_line,
7371 _("undefined symbol %s used as an immediate value"),
7372 S_GET_NAME (fixP->fx_addsy));
7373 goto apply_fix_return;
7374 }
7375 fix_insn (fixP, flags, value);
7376 break;
7377
7378 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7379 if (fixP->fx_done || !seg->use_rela_p)
7380 {
7381 if (value & 3)
7382 as_bad_where (fixP->fx_file, fixP->fx_line,
7383 _("pc-relative load offset not word aligned"));
7384 if (signed_overflow (value, 21))
7385 as_bad_where (fixP->fx_file, fixP->fx_line,
7386 _("pc-relative load offset out of range"));
7387 insn = get_aarch64_insn (buf);
7388 insn |= encode_ld_lit_ofs_19 (value >> 2);
7389 put_aarch64_insn (buf, insn);
7390 }
7391 break;
7392
7393 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7394 if (fixP->fx_done || !seg->use_rela_p)
7395 {
7396 if (signed_overflow (value, 21))
7397 as_bad_where (fixP->fx_file, fixP->fx_line,
7398 _("pc-relative address offset out of range"));
7399 insn = get_aarch64_insn (buf);
7400 insn |= encode_adr_imm (value);
7401 put_aarch64_insn (buf, insn);
7402 }
7403 break;
7404
7405 case BFD_RELOC_AARCH64_BRANCH19:
7406 if (fixP->fx_done || !seg->use_rela_p)
7407 {
7408 if (value & 3)
7409 as_bad_where (fixP->fx_file, fixP->fx_line,
7410 _("conditional branch target not word aligned"));
7411 if (signed_overflow (value, 21))
7412 as_bad_where (fixP->fx_file, fixP->fx_line,
7413 _("conditional branch out of range"));
7414 insn = get_aarch64_insn (buf);
7415 insn |= encode_cond_branch_ofs_19 (value >> 2);
7416 put_aarch64_insn (buf, insn);
7417 }
7418 break;
7419
7420 case BFD_RELOC_AARCH64_TSTBR14:
7421 if (fixP->fx_done || !seg->use_rela_p)
7422 {
7423 if (value & 3)
7424 as_bad_where (fixP->fx_file, fixP->fx_line,
7425 _("conditional branch target not word aligned"));
7426 if (signed_overflow (value, 16))
7427 as_bad_where (fixP->fx_file, fixP->fx_line,
7428 _("conditional branch out of range"));
7429 insn = get_aarch64_insn (buf);
7430 insn |= encode_tst_branch_ofs_14 (value >> 2);
7431 put_aarch64_insn (buf, insn);
7432 }
7433 break;
7434
7435 case BFD_RELOC_AARCH64_CALL26:
7436 case BFD_RELOC_AARCH64_JUMP26:
7437 if (fixP->fx_done || !seg->use_rela_p)
7438 {
7439 if (value & 3)
7440 as_bad_where (fixP->fx_file, fixP->fx_line,
7441 _("branch target not word aligned"));
7442 if (signed_overflow (value, 28))
7443 as_bad_where (fixP->fx_file, fixP->fx_line,
7444 _("branch out of range"));
7445 insn = get_aarch64_insn (buf);
7446 insn |= encode_branch_ofs_26 (value >> 2);
7447 put_aarch64_insn (buf, insn);
7448 }
7449 break;
7450
7451 case BFD_RELOC_AARCH64_MOVW_G0:
7452 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7453 case BFD_RELOC_AARCH64_MOVW_G0_S:
7454 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7455 scale = 0;
7456 goto movw_common;
7457 case BFD_RELOC_AARCH64_MOVW_G1:
7458 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7459 case BFD_RELOC_AARCH64_MOVW_G1_S:
7460 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7461 scale = 16;
7462 goto movw_common;
7463 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7464 scale = 0;
7465 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7466 /* Should always be exported to object file, see
7467 aarch64_force_relocation(). */
7468 gas_assert (!fixP->fx_done);
7469 gas_assert (seg->use_rela_p);
7470 goto movw_common;
7471 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7472 scale = 16;
7473 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7474 /* Should always be exported to object file, see
7475 aarch64_force_relocation(). */
7476 gas_assert (!fixP->fx_done);
7477 gas_assert (seg->use_rela_p);
7478 goto movw_common;
7479 case BFD_RELOC_AARCH64_MOVW_G2:
7480 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7481 case BFD_RELOC_AARCH64_MOVW_G2_S:
7482 scale = 32;
7483 goto movw_common;
7484 case BFD_RELOC_AARCH64_MOVW_G3:
7485 scale = 48;
7486 movw_common:
7487 if (fixP->fx_done || !seg->use_rela_p)
7488 {
7489 insn = get_aarch64_insn (buf);
7490
7491 if (!fixP->fx_done)
7492 {
7493 /* REL signed addend must fit in 16 bits */
7494 if (signed_overflow (value, 16))
7495 as_bad_where (fixP->fx_file, fixP->fx_line,
7496 _("offset out of range"));
7497 }
7498 else
7499 {
7500 /* Check for overflow and scale. */
7501 switch (fixP->fx_r_type)
7502 {
7503 case BFD_RELOC_AARCH64_MOVW_G0:
7504 case BFD_RELOC_AARCH64_MOVW_G1:
7505 case BFD_RELOC_AARCH64_MOVW_G2:
7506 case BFD_RELOC_AARCH64_MOVW_G3:
7507 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7508 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7509 if (unsigned_overflow (value, scale + 16))
7510 as_bad_where (fixP->fx_file, fixP->fx_line,
7511 _("unsigned value out of range"));
7512 break;
7513 case BFD_RELOC_AARCH64_MOVW_G0_S:
7514 case BFD_RELOC_AARCH64_MOVW_G1_S:
7515 case BFD_RELOC_AARCH64_MOVW_G2_S:
7516 /* NOTE: We can only come here with movz or movn. */
7517 if (signed_overflow (value, scale + 16))
7518 as_bad_where (fixP->fx_file, fixP->fx_line,
7519 _("signed value out of range"));
7520 if (value < 0)
7521 {
7522 /* Force use of MOVN. */
7523 value = ~value;
7524 insn = reencode_movzn_to_movn (insn);
7525 }
7526 else
7527 {
7528 /* Force use of MOVZ. */
7529 insn = reencode_movzn_to_movz (insn);
7530 }
7531 break;
7532 default:
7533 /* Unchecked relocations. */
7534 break;
7535 }
7536 value >>= scale;
7537 }
7538
7539 /* Insert value into MOVN/MOVZ/MOVK instruction. */
7540 insn |= encode_movw_imm (value & 0xffff);
7541
7542 put_aarch64_insn (buf, insn);
7543 }
7544 break;
7545
7546 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7547 fixP->fx_r_type = (ilp32_p
7548 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7549 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7550 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7551 /* Should always be exported to object file, see
7552 aarch64_force_relocation(). */
7553 gas_assert (!fixP->fx_done);
7554 gas_assert (seg->use_rela_p);
7555 break;
7556
7557 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7558 fixP->fx_r_type = (ilp32_p
7559 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7560 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
7561 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7562 /* Should always be exported to object file, see
7563 aarch64_force_relocation(). */
7564 gas_assert (!fixP->fx_done);
7565 gas_assert (seg->use_rela_p);
7566 break;
7567
7568 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7569 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7570 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7571 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7572 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7573 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7574 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7575 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7576 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7577 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7578 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7579 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7580 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7581 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7582 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7583 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7584 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7585 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7586 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7587 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7588 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7589 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7590 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7591 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7592 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7593 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7594 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7595 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7596 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7597 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7598 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7599 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7600 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7601 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7602 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7603 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7604 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7605 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7606 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7607 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7608 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7609 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7610 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7611 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7612 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7613 /* Should always be exported to object file, see
7614 aarch64_force_relocation(). */
7615 gas_assert (!fixP->fx_done);
7616 gas_assert (seg->use_rela_p);
7617 break;
7618
7619 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7620 /* Should always be exported to object file, see
7621 aarch64_force_relocation(). */
7622 fixP->fx_r_type = (ilp32_p
7623 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7624 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7625 gas_assert (!fixP->fx_done);
7626 gas_assert (seg->use_rela_p);
7627 break;
7628
7629 case BFD_RELOC_AARCH64_ADD_LO12:
7630 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7631 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7632 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7633 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7634 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7635 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7636 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7637 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7638 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7639 case BFD_RELOC_AARCH64_LDST128_LO12:
7640 case BFD_RELOC_AARCH64_LDST16_LO12:
7641 case BFD_RELOC_AARCH64_LDST32_LO12:
7642 case BFD_RELOC_AARCH64_LDST64_LO12:
7643 case BFD_RELOC_AARCH64_LDST8_LO12:
7644 /* Should always be exported to object file, see
7645 aarch64_force_relocation(). */
7646 gas_assert (!fixP->fx_done);
7647 gas_assert (seg->use_rela_p);
7648 break;
7649
7650 case BFD_RELOC_AARCH64_TLSDESC_ADD:
7651 case BFD_RELOC_AARCH64_TLSDESC_CALL:
7652 case BFD_RELOC_AARCH64_TLSDESC_LDR:
7653 break;
7654
7655 case BFD_RELOC_UNUSED:
7656 /* An error will already have been reported. */
7657 break;
7658
7659 default:
7660 as_bad_where (fixP->fx_file, fixP->fx_line,
7661 _("unexpected %s fixup"),
7662 bfd_get_reloc_code_name (fixP->fx_r_type));
7663 break;
7664 }
7665
7666 apply_fix_return:
7667 /* Free the allocated the struct aarch64_inst.
7668 N.B. currently there are very limited number of fix-up types actually use
7669 this field, so the impact on the performance should be minimal . */
7670 if (fixP->tc_fix_data.inst != NULL)
7671 free (fixP->tc_fix_data.inst);
7672
7673 return;
7674 }
7675
7676 /* Translate internal representation of relocation info to BFD target
7677 format. */
7678
7679 arelent *
7680 tc_gen_reloc (asection * section, fixS * fixp)
7681 {
7682 arelent *reloc;
7683 bfd_reloc_code_real_type code;
7684
7685 reloc = XNEW (arelent);
7686
7687 reloc->sym_ptr_ptr = XNEW (asymbol *);
7688 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
7689 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
7690
7691 if (fixp->fx_pcrel)
7692 {
7693 if (section->use_rela_p)
7694 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
7695 else
7696 fixp->fx_offset = reloc->address;
7697 }
7698 reloc->addend = fixp->fx_offset;
7699
7700 code = fixp->fx_r_type;
7701 switch (code)
7702 {
7703 case BFD_RELOC_16:
7704 if (fixp->fx_pcrel)
7705 code = BFD_RELOC_16_PCREL;
7706 break;
7707
7708 case BFD_RELOC_32:
7709 if (fixp->fx_pcrel)
7710 code = BFD_RELOC_32_PCREL;
7711 break;
7712
7713 case BFD_RELOC_64:
7714 if (fixp->fx_pcrel)
7715 code = BFD_RELOC_64_PCREL;
7716 break;
7717
7718 default:
7719 break;
7720 }
7721
7722 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
7723 if (reloc->howto == NULL)
7724 {
7725 as_bad_where (fixp->fx_file, fixp->fx_line,
7726 _
7727 ("cannot represent %s relocation in this object file format"),
7728 bfd_get_reloc_code_name (code));
7729 return NULL;
7730 }
7731
7732 return reloc;
7733 }
7734
7735 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
7736
7737 void
7738 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
7739 {
7740 bfd_reloc_code_real_type type;
7741 int pcrel = 0;
7742
7743 /* Pick a reloc.
7744 FIXME: @@ Should look at CPU word size. */
7745 switch (size)
7746 {
7747 case 1:
7748 type = BFD_RELOC_8;
7749 break;
7750 case 2:
7751 type = BFD_RELOC_16;
7752 break;
7753 case 4:
7754 type = BFD_RELOC_32;
7755 break;
7756 case 8:
7757 type = BFD_RELOC_64;
7758 break;
7759 default:
7760 as_bad (_("cannot do %u-byte relocation"), size);
7761 type = BFD_RELOC_UNUSED;
7762 break;
7763 }
7764
7765 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
7766 }
7767
7768 int
7769 aarch64_force_relocation (struct fix *fixp)
7770 {
7771 switch (fixp->fx_r_type)
7772 {
7773 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7774 /* Perform these "immediate" internal relocations
7775 even if the symbol is extern or weak. */
7776 return 0;
7777
7778 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7779 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7780 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7781 /* Pseudo relocs that need to be fixed up according to
7782 ilp32_p. */
7783 return 0;
7784
7785 case BFD_RELOC_AARCH64_ADD_LO12:
7786 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7787 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7788 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7789 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7790 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7791 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7792 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7793 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7794 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7795 case BFD_RELOC_AARCH64_LDST128_LO12:
7796 case BFD_RELOC_AARCH64_LDST16_LO12:
7797 case BFD_RELOC_AARCH64_LDST32_LO12:
7798 case BFD_RELOC_AARCH64_LDST64_LO12:
7799 case BFD_RELOC_AARCH64_LDST8_LO12:
7800 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7801 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7802 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7803 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7804 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7805 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7806 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7807 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7808 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7809 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7810 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7811 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7812 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7813 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7814 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7815 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7816 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7817 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7818 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7819 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7820 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7821 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7822 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7823 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7824 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7825 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7826 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7827 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7828 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7829 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7830 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7831 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7832 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7833 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7834 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7835 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7836 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7837 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7838 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7839 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7840 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7841 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7842 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7843 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7844 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7845 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7846 /* Always leave these relocations for the linker. */
7847 return 1;
7848
7849 default:
7850 break;
7851 }
7852
7853 return generic_force_reloc (fixp);
7854 }
7855
7856 #ifdef OBJ_ELF
7857
7858 const char *
7859 elf64_aarch64_target_format (void)
7860 {
7861 if (strcmp (TARGET_OS, "cloudabi") == 0)
7862 {
7863 /* FIXME: What to do for ilp32_p ? */
7864 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
7865 }
7866 if (target_big_endian)
7867 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
7868 else
7869 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
7870 }
7871
7872 void
7873 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
7874 {
7875 elf_frob_symbol (symp, puntp);
7876 }
7877 #endif
7878
7879 /* MD interface: Finalization. */
7880
7881 /* A good place to do this, although this was probably not intended
7882 for this kind of use. We need to dump the literal pool before
7883 references are made to a null symbol pointer. */
7884
7885 void
7886 aarch64_cleanup (void)
7887 {
7888 literal_pool *pool;
7889
7890 for (pool = list_of_pools; pool; pool = pool->next)
7891 {
7892 /* Put it at the end of the relevant section. */
7893 subseg_set (pool->section, pool->sub_section);
7894 s_ltorg (0);
7895 }
7896 }
7897
7898 #ifdef OBJ_ELF
7899 /* Remove any excess mapping symbols generated for alignment frags in
7900 SEC. We may have created a mapping symbol before a zero byte
7901 alignment; remove it if there's a mapping symbol after the
7902 alignment. */
7903 static void
7904 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
7905 void *dummy ATTRIBUTE_UNUSED)
7906 {
7907 segment_info_type *seginfo = seg_info (sec);
7908 fragS *fragp;
7909
7910 if (seginfo == NULL || seginfo->frchainP == NULL)
7911 return;
7912
7913 for (fragp = seginfo->frchainP->frch_root;
7914 fragp != NULL; fragp = fragp->fr_next)
7915 {
7916 symbolS *sym = fragp->tc_frag_data.last_map;
7917 fragS *next = fragp->fr_next;
7918
7919 /* Variable-sized frags have been converted to fixed size by
7920 this point. But if this was variable-sized to start with,
7921 there will be a fixed-size frag after it. So don't handle
7922 next == NULL. */
7923 if (sym == NULL || next == NULL)
7924 continue;
7925
7926 if (S_GET_VALUE (sym) < next->fr_address)
7927 /* Not at the end of this frag. */
7928 continue;
7929 know (S_GET_VALUE (sym) == next->fr_address);
7930
7931 do
7932 {
7933 if (next->tc_frag_data.first_map != NULL)
7934 {
7935 /* Next frag starts with a mapping symbol. Discard this
7936 one. */
7937 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7938 break;
7939 }
7940
7941 if (next->fr_next == NULL)
7942 {
7943 /* This mapping symbol is at the end of the section. Discard
7944 it. */
7945 know (next->fr_fix == 0 && next->fr_var == 0);
7946 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7947 break;
7948 }
7949
7950 /* As long as we have empty frags without any mapping symbols,
7951 keep looking. */
7952 /* If the next frag is non-empty and does not start with a
7953 mapping symbol, then this mapping symbol is required. */
7954 if (next->fr_address != next->fr_next->fr_address)
7955 break;
7956
7957 next = next->fr_next;
7958 }
7959 while (next != NULL);
7960 }
7961 }
7962 #endif
7963
7964 /* Adjust the symbol table. */
7965
7966 void
7967 aarch64_adjust_symtab (void)
7968 {
7969 #ifdef OBJ_ELF
7970 /* Remove any overlapping mapping symbols generated by alignment frags. */
7971 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7972 /* Now do generic ELF adjustments. */
7973 elf_adjust_symtab ();
7974 #endif
7975 }
7976
7977 static void
7978 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7979 {
7980 const char *hash_err;
7981
7982 hash_err = hash_insert (table, key, value);
7983 if (hash_err)
7984 printf ("Internal Error: Can't hash %s\n", key);
7985 }
7986
7987 static void
7988 fill_instruction_hash_table (void)
7989 {
7990 aarch64_opcode *opcode = aarch64_opcode_table;
7991
7992 while (opcode->name != NULL)
7993 {
7994 templates *templ, *new_templ;
7995 templ = hash_find (aarch64_ops_hsh, opcode->name);
7996
7997 new_templ = XNEW (templates);
7998 new_templ->opcode = opcode;
7999 new_templ->next = NULL;
8000
8001 if (!templ)
8002 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8003 else
8004 {
8005 new_templ->next = templ->next;
8006 templ->next = new_templ;
8007 }
8008 ++opcode;
8009 }
8010 }
8011
8012 static inline void
8013 convert_to_upper (char *dst, const char *src, size_t num)
8014 {
8015 unsigned int i;
8016 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8017 *dst = TOUPPER (*src);
8018 *dst = '\0';
8019 }
8020
8021 /* Assume STR point to a lower-case string, allocate, convert and return
8022 the corresponding upper-case string. */
8023 static inline const char*
8024 get_upper_str (const char *str)
8025 {
8026 char *ret;
8027 size_t len = strlen (str);
8028 ret = XNEWVEC (char, len + 1);
8029 convert_to_upper (ret, str, len);
8030 return ret;
8031 }
8032
8033 /* MD interface: Initialization. */
8034
8035 void
8036 md_begin (void)
8037 {
8038 unsigned mach;
8039 unsigned int i;
8040
8041 if ((aarch64_ops_hsh = hash_new ()) == NULL
8042 || (aarch64_cond_hsh = hash_new ()) == NULL
8043 || (aarch64_shift_hsh = hash_new ()) == NULL
8044 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8045 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8046 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8047 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8048 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8049 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8050 || (aarch64_reg_hsh = hash_new ()) == NULL
8051 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8052 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8053 || (aarch64_pldop_hsh = hash_new ()) == NULL
8054 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8055 as_fatal (_("virtual memory exhausted"));
8056
8057 fill_instruction_hash_table ();
8058
8059 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8060 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8061 (void *) (aarch64_sys_regs + i));
8062
8063 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8064 checked_hash_insert (aarch64_pstatefield_hsh,
8065 aarch64_pstatefields[i].name,
8066 (void *) (aarch64_pstatefields + i));
8067
8068 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8069 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8070 aarch64_sys_regs_ic[i].name,
8071 (void *) (aarch64_sys_regs_ic + i));
8072
8073 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8074 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8075 aarch64_sys_regs_dc[i].name,
8076 (void *) (aarch64_sys_regs_dc + i));
8077
8078 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8079 checked_hash_insert (aarch64_sys_regs_at_hsh,
8080 aarch64_sys_regs_at[i].name,
8081 (void *) (aarch64_sys_regs_at + i));
8082
8083 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8084 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8085 aarch64_sys_regs_tlbi[i].name,
8086 (void *) (aarch64_sys_regs_tlbi + i));
8087
8088 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8089 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8090 (void *) (reg_names + i));
8091
8092 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8093 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8094 (void *) (nzcv_names + i));
8095
8096 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8097 {
8098 const char *name = aarch64_operand_modifiers[i].name;
8099 checked_hash_insert (aarch64_shift_hsh, name,
8100 (void *) (aarch64_operand_modifiers + i));
8101 /* Also hash the name in the upper case. */
8102 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8103 (void *) (aarch64_operand_modifiers + i));
8104 }
8105
8106 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8107 {
8108 unsigned int j;
8109 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8110 the same condition code. */
8111 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8112 {
8113 const char *name = aarch64_conds[i].names[j];
8114 if (name == NULL)
8115 break;
8116 checked_hash_insert (aarch64_cond_hsh, name,
8117 (void *) (aarch64_conds + i));
8118 /* Also hash the name in the upper case. */
8119 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8120 (void *) (aarch64_conds + i));
8121 }
8122 }
8123
8124 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8125 {
8126 const char *name = aarch64_barrier_options[i].name;
8127 /* Skip xx00 - the unallocated values of option. */
8128 if ((i & 0x3) == 0)
8129 continue;
8130 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8131 (void *) (aarch64_barrier_options + i));
8132 /* Also hash the name in the upper case. */
8133 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8134 (void *) (aarch64_barrier_options + i));
8135 }
8136
8137 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8138 {
8139 const char* name = aarch64_prfops[i].name;
8140 /* Skip the unallocated hint encodings. */
8141 if (name == NULL)
8142 continue;
8143 checked_hash_insert (aarch64_pldop_hsh, name,
8144 (void *) (aarch64_prfops + i));
8145 /* Also hash the name in the upper case. */
8146 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8147 (void *) (aarch64_prfops + i));
8148 }
8149
8150 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8151 {
8152 const char* name = aarch64_hint_options[i].name;
8153
8154 checked_hash_insert (aarch64_hint_opt_hsh, name,
8155 (void *) (aarch64_hint_options + i));
8156 /* Also hash the name in the upper case. */
8157 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8158 (void *) (aarch64_hint_options + i));
8159 }
8160
8161 /* Set the cpu variant based on the command-line options. */
8162 if (!mcpu_cpu_opt)
8163 mcpu_cpu_opt = march_cpu_opt;
8164
8165 if (!mcpu_cpu_opt)
8166 mcpu_cpu_opt = &cpu_default;
8167
8168 cpu_variant = *mcpu_cpu_opt;
8169
8170 /* Record the CPU type. */
8171 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8172
8173 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8174 }
8175
8176 /* Command line processing. */
8177
8178 const char *md_shortopts = "m:";
8179
8180 #ifdef AARCH64_BI_ENDIAN
8181 #define OPTION_EB (OPTION_MD_BASE + 0)
8182 #define OPTION_EL (OPTION_MD_BASE + 1)
8183 #else
8184 #if TARGET_BYTES_BIG_ENDIAN
8185 #define OPTION_EB (OPTION_MD_BASE + 0)
8186 #else
8187 #define OPTION_EL (OPTION_MD_BASE + 1)
8188 #endif
8189 #endif
8190
8191 struct option md_longopts[] = {
8192 #ifdef OPTION_EB
8193 {"EB", no_argument, NULL, OPTION_EB},
8194 #endif
8195 #ifdef OPTION_EL
8196 {"EL", no_argument, NULL, OPTION_EL},
8197 #endif
8198 {NULL, no_argument, NULL, 0}
8199 };
8200
8201 size_t md_longopts_size = sizeof (md_longopts);
8202
8203 struct aarch64_option_table
8204 {
8205 const char *option; /* Option name to match. */
8206 const char *help; /* Help information. */
8207 int *var; /* Variable to change. */
8208 int value; /* What to change it to. */
8209 char *deprecated; /* If non-null, print this message. */
8210 };
8211
8212 static struct aarch64_option_table aarch64_opts[] = {
8213 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8214 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8215 NULL},
8216 #ifdef DEBUG_AARCH64
8217 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8218 #endif /* DEBUG_AARCH64 */
8219 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8220 NULL},
8221 {"mno-verbose-error", N_("do not output verbose error messages"),
8222 &verbose_error_p, 0, NULL},
8223 {NULL, NULL, NULL, 0, NULL}
8224 };
8225
8226 struct aarch64_cpu_option_table
8227 {
8228 const char *name;
8229 const aarch64_feature_set value;
8230 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8231 case. */
8232 const char *canonical_name;
8233 };
8234
8235 /* This list should, at a minimum, contain all the cpu names
8236 recognized by GCC. */
8237 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8238 {"all", AARCH64_ANY, NULL},
8239 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8240 AARCH64_FEATURE_CRC), "Cortex-A35"},
8241 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8242 AARCH64_FEATURE_CRC), "Cortex-A53"},
8243 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8244 AARCH64_FEATURE_CRC), "Cortex-A57"},
8245 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8246 AARCH64_FEATURE_CRC), "Cortex-A72"},
8247 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8248 AARCH64_FEATURE_CRC), "Cortex-A73"},
8249 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8250 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8251 "Samsung Exynos M1"},
8252 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8253 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8254 "Qualcomm QDF24XX"},
8255 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8256 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8257 "Cavium ThunderX"},
8258 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8259 AARCH64_FEATURE_CRYPTO),
8260 "Broadcom Vulcan"},
8261 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8262 in earlier releases and is superseded by 'xgene1' in all
8263 tools. */
8264 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8265 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8266 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8267 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8268 {"generic", AARCH64_ARCH_V8, NULL},
8269
8270 {NULL, AARCH64_ARCH_NONE, NULL}
8271 };
8272
8273 struct aarch64_arch_option_table
8274 {
8275 const char *name;
8276 const aarch64_feature_set value;
8277 };
8278
8279 /* This list should, at a minimum, contain all the architecture names
8280 recognized by GCC. */
8281 static const struct aarch64_arch_option_table aarch64_archs[] = {
8282 {"all", AARCH64_ANY},
8283 {"armv8-a", AARCH64_ARCH_V8},
8284 {"armv8.1-a", AARCH64_ARCH_V8_1},
8285 {"armv8.2-a", AARCH64_ARCH_V8_2},
8286 {NULL, AARCH64_ARCH_NONE}
8287 };
8288
8289 /* ISA extensions. */
8290 struct aarch64_option_cpu_value_table
8291 {
8292 const char *name;
8293 const aarch64_feature_set value;
8294 const aarch64_feature_set require; /* Feature dependencies. */
8295 };
8296
8297 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8298 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8299 AARCH64_ARCH_NONE},
8300 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
8301 AARCH64_ARCH_NONE},
8302 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8303 AARCH64_ARCH_NONE},
8304 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8305 AARCH64_ARCH_NONE},
8306 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8307 AARCH64_ARCH_NONE},
8308 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8309 AARCH64_ARCH_NONE},
8310 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8311 AARCH64_ARCH_NONE},
8312 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8313 AARCH64_ARCH_NONE},
8314 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8315 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8316 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8317 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8318 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8319 AARCH64_ARCH_NONE},
8320 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
8321 };
8322
8323 struct aarch64_long_option_table
8324 {
8325 const char *option; /* Substring to match. */
8326 const char *help; /* Help information. */
8327 int (*func) (const char *subopt); /* Function to decode sub-option. */
8328 char *deprecated; /* If non-null, print this message. */
8329 };
8330
8331 /* Transitive closure of features depending on set. */
8332 static aarch64_feature_set
8333 aarch64_feature_disable_set (aarch64_feature_set set)
8334 {
8335 const struct aarch64_option_cpu_value_table *opt;
8336 aarch64_feature_set prev = 0;
8337
8338 while (prev != set) {
8339 prev = set;
8340 for (opt = aarch64_features; opt->name != NULL; opt++)
8341 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
8342 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
8343 }
8344 return set;
8345 }
8346
8347 /* Transitive closure of dependencies of set. */
8348 static aarch64_feature_set
8349 aarch64_feature_enable_set (aarch64_feature_set set)
8350 {
8351 const struct aarch64_option_cpu_value_table *opt;
8352 aarch64_feature_set prev = 0;
8353
8354 while (prev != set) {
8355 prev = set;
8356 for (opt = aarch64_features; opt->name != NULL; opt++)
8357 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
8358 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
8359 }
8360 return set;
8361 }
8362
8363 static int
8364 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
8365 bfd_boolean ext_only)
8366 {
8367 /* We insist on extensions being added before being removed. We achieve
8368 this by using the ADDING_VALUE variable to indicate whether we are
8369 adding an extension (1) or removing it (0) and only allowing it to
8370 change in the order -1 -> 1 -> 0. */
8371 int adding_value = -1;
8372 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
8373
8374 /* Copy the feature set, so that we can modify it. */
8375 *ext_set = **opt_p;
8376 *opt_p = ext_set;
8377
8378 while (str != NULL && *str != 0)
8379 {
8380 const struct aarch64_option_cpu_value_table *opt;
8381 const char *ext = NULL;
8382 int optlen;
8383
8384 if (!ext_only)
8385 {
8386 if (*str != '+')
8387 {
8388 as_bad (_("invalid architectural extension"));
8389 return 0;
8390 }
8391
8392 ext = strchr (++str, '+');
8393 }
8394
8395 if (ext != NULL)
8396 optlen = ext - str;
8397 else
8398 optlen = strlen (str);
8399
8400 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
8401 {
8402 if (adding_value != 0)
8403 adding_value = 0;
8404 optlen -= 2;
8405 str += 2;
8406 }
8407 else if (optlen > 0)
8408 {
8409 if (adding_value == -1)
8410 adding_value = 1;
8411 else if (adding_value != 1)
8412 {
8413 as_bad (_("must specify extensions to add before specifying "
8414 "those to remove"));
8415 return FALSE;
8416 }
8417 }
8418
8419 if (optlen == 0)
8420 {
8421 as_bad (_("missing architectural extension"));
8422 return 0;
8423 }
8424
8425 gas_assert (adding_value != -1);
8426
8427 for (opt = aarch64_features; opt->name != NULL; opt++)
8428 if (strncmp (opt->name, str, optlen) == 0)
8429 {
8430 aarch64_feature_set set;
8431
8432 /* Add or remove the extension. */
8433 if (adding_value)
8434 {
8435 set = aarch64_feature_enable_set (opt->value);
8436 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
8437 }
8438 else
8439 {
8440 set = aarch64_feature_disable_set (opt->value);
8441 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
8442 }
8443 break;
8444 }
8445
8446 if (opt->name == NULL)
8447 {
8448 as_bad (_("unknown architectural extension `%s'"), str);
8449 return 0;
8450 }
8451
8452 str = ext;
8453 };
8454
8455 return 1;
8456 }
8457
8458 static int
8459 aarch64_parse_cpu (const char *str)
8460 {
8461 const struct aarch64_cpu_option_table *opt;
8462 const char *ext = strchr (str, '+');
8463 size_t optlen;
8464
8465 if (ext != NULL)
8466 optlen = ext - str;
8467 else
8468 optlen = strlen (str);
8469
8470 if (optlen == 0)
8471 {
8472 as_bad (_("missing cpu name `%s'"), str);
8473 return 0;
8474 }
8475
8476 for (opt = aarch64_cpus; opt->name != NULL; opt++)
8477 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8478 {
8479 mcpu_cpu_opt = &opt->value;
8480 if (ext != NULL)
8481 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
8482
8483 return 1;
8484 }
8485
8486 as_bad (_("unknown cpu `%s'"), str);
8487 return 0;
8488 }
8489
8490 static int
8491 aarch64_parse_arch (const char *str)
8492 {
8493 const struct aarch64_arch_option_table *opt;
8494 const char *ext = strchr (str, '+');
8495 size_t optlen;
8496
8497 if (ext != NULL)
8498 optlen = ext - str;
8499 else
8500 optlen = strlen (str);
8501
8502 if (optlen == 0)
8503 {
8504 as_bad (_("missing architecture name `%s'"), str);
8505 return 0;
8506 }
8507
8508 for (opt = aarch64_archs; opt->name != NULL; opt++)
8509 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8510 {
8511 march_cpu_opt = &opt->value;
8512 if (ext != NULL)
8513 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
8514
8515 return 1;
8516 }
8517
8518 as_bad (_("unknown architecture `%s'\n"), str);
8519 return 0;
8520 }
8521
8522 /* ABIs. */
8523 struct aarch64_option_abi_value_table
8524 {
8525 const char *name;
8526 enum aarch64_abi_type value;
8527 };
8528
8529 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
8530 {"ilp32", AARCH64_ABI_ILP32},
8531 {"lp64", AARCH64_ABI_LP64},
8532 };
8533
8534 static int
8535 aarch64_parse_abi (const char *str)
8536 {
8537 unsigned int i;
8538
8539 if (str[0] == '\0')
8540 {
8541 as_bad (_("missing abi name `%s'"), str);
8542 return 0;
8543 }
8544
8545 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
8546 if (strcmp (str, aarch64_abis[i].name) == 0)
8547 {
8548 aarch64_abi = aarch64_abis[i].value;
8549 return 1;
8550 }
8551
8552 as_bad (_("unknown abi `%s'\n"), str);
8553 return 0;
8554 }
8555
8556 static struct aarch64_long_option_table aarch64_long_opts[] = {
8557 #ifdef OBJ_ELF
8558 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
8559 aarch64_parse_abi, NULL},
8560 #endif /* OBJ_ELF */
8561 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
8562 aarch64_parse_cpu, NULL},
8563 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
8564 aarch64_parse_arch, NULL},
8565 {NULL, NULL, 0, NULL}
8566 };
8567
8568 int
8569 md_parse_option (int c, const char *arg)
8570 {
8571 struct aarch64_option_table *opt;
8572 struct aarch64_long_option_table *lopt;
8573
8574 switch (c)
8575 {
8576 #ifdef OPTION_EB
8577 case OPTION_EB:
8578 target_big_endian = 1;
8579 break;
8580 #endif
8581
8582 #ifdef OPTION_EL
8583 case OPTION_EL:
8584 target_big_endian = 0;
8585 break;
8586 #endif
8587
8588 case 'a':
8589 /* Listing option. Just ignore these, we don't support additional
8590 ones. */
8591 return 0;
8592
8593 default:
8594 for (opt = aarch64_opts; opt->option != NULL; opt++)
8595 {
8596 if (c == opt->option[0]
8597 && ((arg == NULL && opt->option[1] == 0)
8598 || streq (arg, opt->option + 1)))
8599 {
8600 /* If the option is deprecated, tell the user. */
8601 if (opt->deprecated != NULL)
8602 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
8603 arg ? arg : "", _(opt->deprecated));
8604
8605 if (opt->var != NULL)
8606 *opt->var = opt->value;
8607
8608 return 1;
8609 }
8610 }
8611
8612 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8613 {
8614 /* These options are expected to have an argument. */
8615 if (c == lopt->option[0]
8616 && arg != NULL
8617 && strncmp (arg, lopt->option + 1,
8618 strlen (lopt->option + 1)) == 0)
8619 {
8620 /* If the option is deprecated, tell the user. */
8621 if (lopt->deprecated != NULL)
8622 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
8623 _(lopt->deprecated));
8624
8625 /* Call the sup-option parser. */
8626 return lopt->func (arg + strlen (lopt->option) - 1);
8627 }
8628 }
8629
8630 return 0;
8631 }
8632
8633 return 1;
8634 }
8635
8636 void
8637 md_show_usage (FILE * fp)
8638 {
8639 struct aarch64_option_table *opt;
8640 struct aarch64_long_option_table *lopt;
8641
8642 fprintf (fp, _(" AArch64-specific assembler options:\n"));
8643
8644 for (opt = aarch64_opts; opt->option != NULL; opt++)
8645 if (opt->help != NULL)
8646 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
8647
8648 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8649 if (lopt->help != NULL)
8650 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
8651
8652 #ifdef OPTION_EB
8653 fprintf (fp, _("\
8654 -EB assemble code for a big-endian cpu\n"));
8655 #endif
8656
8657 #ifdef OPTION_EL
8658 fprintf (fp, _("\
8659 -EL assemble code for a little-endian cpu\n"));
8660 #endif
8661 }
8662
8663 /* Parse a .cpu directive. */
8664
8665 static void
8666 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
8667 {
8668 const struct aarch64_cpu_option_table *opt;
8669 char saved_char;
8670 char *name;
8671 char *ext;
8672 size_t optlen;
8673
8674 name = input_line_pointer;
8675 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8676 input_line_pointer++;
8677 saved_char = *input_line_pointer;
8678 *input_line_pointer = 0;
8679
8680 ext = strchr (name, '+');
8681
8682 if (ext != NULL)
8683 optlen = ext - name;
8684 else
8685 optlen = strlen (name);
8686
8687 /* Skip the first "all" entry. */
8688 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
8689 if (strlen (opt->name) == optlen
8690 && strncmp (name, opt->name, optlen) == 0)
8691 {
8692 mcpu_cpu_opt = &opt->value;
8693 if (ext != NULL)
8694 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8695 return;
8696
8697 cpu_variant = *mcpu_cpu_opt;
8698
8699 *input_line_pointer = saved_char;
8700 demand_empty_rest_of_line ();
8701 return;
8702 }
8703 as_bad (_("unknown cpu `%s'"), name);
8704 *input_line_pointer = saved_char;
8705 ignore_rest_of_line ();
8706 }
8707
8708
8709 /* Parse a .arch directive. */
8710
8711 static void
8712 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
8713 {
8714 const struct aarch64_arch_option_table *opt;
8715 char saved_char;
8716 char *name;
8717 char *ext;
8718 size_t optlen;
8719
8720 name = input_line_pointer;
8721 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8722 input_line_pointer++;
8723 saved_char = *input_line_pointer;
8724 *input_line_pointer = 0;
8725
8726 ext = strchr (name, '+');
8727
8728 if (ext != NULL)
8729 optlen = ext - name;
8730 else
8731 optlen = strlen (name);
8732
8733 /* Skip the first "all" entry. */
8734 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
8735 if (strlen (opt->name) == optlen
8736 && strncmp (name, opt->name, optlen) == 0)
8737 {
8738 mcpu_cpu_opt = &opt->value;
8739 if (ext != NULL)
8740 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8741 return;
8742
8743 cpu_variant = *mcpu_cpu_opt;
8744
8745 *input_line_pointer = saved_char;
8746 demand_empty_rest_of_line ();
8747 return;
8748 }
8749
8750 as_bad (_("unknown architecture `%s'\n"), name);
8751 *input_line_pointer = saved_char;
8752 ignore_rest_of_line ();
8753 }
8754
8755 /* Parse a .arch_extension directive. */
8756
8757 static void
8758 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
8759 {
8760 char saved_char;
8761 char *ext = input_line_pointer;;
8762
8763 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8764 input_line_pointer++;
8765 saved_char = *input_line_pointer;
8766 *input_line_pointer = 0;
8767
8768 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
8769 return;
8770
8771 cpu_variant = *mcpu_cpu_opt;
8772
8773 *input_line_pointer = saved_char;
8774 demand_empty_rest_of_line ();
8775 }
8776
8777 /* Copy symbol information. */
8778
8779 void
8780 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
8781 {
8782 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
8783 }