[AArch64] use subseg_text_p to check .text
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum neon_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q
87 };
88
89 /* Bits for DEFINED field in neon_type_el. */
90 #define NTA_HASTYPE 1
91 #define NTA_HASINDEX 2
92
93 struct neon_type_el
94 {
95 enum neon_el_type type;
96 unsigned char defined;
97 unsigned width;
98 int64_t index;
99 };
100
101 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
102
103 struct reloc
104 {
105 bfd_reloc_code_real_type type;
106 expressionS exp;
107 int pc_rel;
108 enum aarch64_opnd opnd;
109 uint32_t flags;
110 unsigned need_libopcodes_p : 1;
111 };
112
113 struct aarch64_instruction
114 {
115 /* libopcodes structure for instruction intermediate representation. */
116 aarch64_inst base;
117 /* Record assembly errors found during the parsing. */
118 struct
119 {
120 enum aarch64_operand_error_kind kind;
121 const char *error;
122 } parsing_error;
123 /* The condition that appears in the assembly line. */
124 int cond;
125 /* Relocation information (including the GAS internal fixup). */
126 struct reloc reloc;
127 /* Need to generate an immediate in the literal pool. */
128 unsigned gen_lit_pool : 1;
129 };
130
131 typedef struct aarch64_instruction aarch64_instruction;
132
133 static aarch64_instruction inst;
134
135 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
136 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
137
138 /* Diagnostics inline function utilites.
139
140 These are lightweight utlities which should only be called by parse_operands
141 and other parsers. GAS processes each assembly line by parsing it against
142 instruction template(s), in the case of multiple templates (for the same
143 mnemonic name), those templates are tried one by one until one succeeds or
144 all fail. An assembly line may fail a few templates before being
145 successfully parsed; an error saved here in most cases is not a user error
146 but an error indicating the current template is not the right template.
147 Therefore it is very important that errors can be saved at a low cost during
148 the parsing; we don't want to slow down the whole parsing by recording
149 non-user errors in detail.
150
151 Remember that the objective is to help GAS pick up the most approapriate
152 error message in the case of multiple templates, e.g. FMOV which has 8
153 templates. */
154
155 static inline void
156 clear_error (void)
157 {
158 inst.parsing_error.kind = AARCH64_OPDE_NIL;
159 inst.parsing_error.error = NULL;
160 }
161
162 static inline bfd_boolean
163 error_p (void)
164 {
165 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
166 }
167
168 static inline const char *
169 get_error_message (void)
170 {
171 return inst.parsing_error.error;
172 }
173
174 static inline enum aarch64_operand_error_kind
175 get_error_kind (void)
176 {
177 return inst.parsing_error.kind;
178 }
179
180 static inline void
181 set_error (enum aarch64_operand_error_kind kind, const char *error)
182 {
183 inst.parsing_error.kind = kind;
184 inst.parsing_error.error = error;
185 }
186
187 static inline void
188 set_recoverable_error (const char *error)
189 {
190 set_error (AARCH64_OPDE_RECOVERABLE, error);
191 }
192
193 /* Use the DESC field of the corresponding aarch64_operand entry to compose
194 the error message. */
195 static inline void
196 set_default_error (void)
197 {
198 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
199 }
200
201 static inline void
202 set_syntax_error (const char *error)
203 {
204 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
205 }
206
207 static inline void
208 set_first_syntax_error (const char *error)
209 {
210 if (! error_p ())
211 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
212 }
213
214 static inline void
215 set_fatal_syntax_error (const char *error)
216 {
217 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
218 }
219 \f
220 /* Number of littlenums required to hold an extended precision number. */
221 #define MAX_LITTLENUMS 6
222
223 /* Return value for certain parsers when the parsing fails; those parsers
224 return the information of the parsed result, e.g. register number, on
225 success. */
226 #define PARSE_FAIL -1
227
228 /* This is an invalid condition code that means no conditional field is
229 present. */
230 #define COND_ALWAYS 0x10
231
232 typedef struct
233 {
234 const char *template;
235 unsigned long value;
236 } asm_barrier_opt;
237
238 typedef struct
239 {
240 const char *template;
241 uint32_t value;
242 } asm_nzcv;
243
244 struct reloc_entry
245 {
246 char *name;
247 bfd_reloc_code_real_type reloc;
248 };
249
250 /* Structure for a hash table entry for a register. */
251 typedef struct
252 {
253 const char *name;
254 unsigned char number;
255 unsigned char type;
256 unsigned char builtin;
257 } reg_entry;
258
259 /* Macros to define the register types and masks for the purpose
260 of parsing. */
261
262 #undef AARCH64_REG_TYPES
263 #define AARCH64_REG_TYPES \
264 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
265 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
266 BASIC_REG_TYPE(SP_32) /* wsp */ \
267 BASIC_REG_TYPE(SP_64) /* sp */ \
268 BASIC_REG_TYPE(Z_32) /* wzr */ \
269 BASIC_REG_TYPE(Z_64) /* xzr */ \
270 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
271 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
272 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
273 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
274 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
275 BASIC_REG_TYPE(CN) /* c[0-7] */ \
276 BASIC_REG_TYPE(VN) /* v[0-31] */ \
277 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
278 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
279 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
280 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
281 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
282 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
283 /* Typecheck: any [BHSDQ]P FP. */ \
284 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
285 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
286 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
287 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
289 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
290 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
291 /* Any integer register; used for error messages only. */ \
292 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
293 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Pseudo type to mark the end of the enumerator sequence. */ \
296 BASIC_REG_TYPE(MAX)
297
298 #undef BASIC_REG_TYPE
299 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
300 #undef MULTI_REG_TYPE
301 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
302
303 /* Register type enumerators. */
304 typedef enum
305 {
306 /* A list of REG_TYPE_*. */
307 AARCH64_REG_TYPES
308 } aarch64_reg_type;
309
310 #undef BASIC_REG_TYPE
311 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
312 #undef REG_TYPE
313 #define REG_TYPE(T) (1 << REG_TYPE_##T)
314 #undef MULTI_REG_TYPE
315 #define MULTI_REG_TYPE(T,V) V,
316
317 /* Values indexed by aarch64_reg_type to assist the type checking. */
318 static const unsigned reg_type_masks[] =
319 {
320 AARCH64_REG_TYPES
321 };
322
323 #undef BASIC_REG_TYPE
324 #undef REG_TYPE
325 #undef MULTI_REG_TYPE
326 #undef AARCH64_REG_TYPES
327
328 /* Diagnostics used when we don't get a register of the expected type.
329 Note: this has to synchronized with aarch64_reg_type definitions
330 above. */
331 static const char *
332 get_reg_expected_msg (aarch64_reg_type reg_type)
333 {
334 const char *msg;
335
336 switch (reg_type)
337 {
338 case REG_TYPE_R_32:
339 msg = N_("integer 32-bit register expected");
340 break;
341 case REG_TYPE_R_64:
342 msg = N_("integer 64-bit register expected");
343 break;
344 case REG_TYPE_R_N:
345 msg = N_("integer register expected");
346 break;
347 case REG_TYPE_R_Z_SP:
348 msg = N_("integer, zero or SP register expected");
349 break;
350 case REG_TYPE_FP_B:
351 msg = N_("8-bit SIMD scalar register expected");
352 break;
353 case REG_TYPE_FP_H:
354 msg = N_("16-bit SIMD scalar or floating-point half precision "
355 "register expected");
356 break;
357 case REG_TYPE_FP_S:
358 msg = N_("32-bit SIMD scalar or floating-point single precision "
359 "register expected");
360 break;
361 case REG_TYPE_FP_D:
362 msg = N_("64-bit SIMD scalar or floating-point double precision "
363 "register expected");
364 break;
365 case REG_TYPE_FP_Q:
366 msg = N_("128-bit SIMD scalar or floating-point quad precision "
367 "register expected");
368 break;
369 case REG_TYPE_CN:
370 msg = N_("C0 - C15 expected");
371 break;
372 case REG_TYPE_R_Z_BHSDQ_V:
373 msg = N_("register expected");
374 break;
375 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
376 msg = N_("SIMD scalar or floating-point register expected");
377 break;
378 case REG_TYPE_VN: /* any V reg */
379 msg = N_("vector register expected");
380 break;
381 default:
382 as_fatal (_("invalid register type %d"), reg_type);
383 }
384 return msg;
385 }
386
387 /* Some well known registers that we refer to directly elsewhere. */
388 #define REG_SP 31
389
390 /* Instructions take 4 bytes in the object file. */
391 #define INSN_SIZE 4
392
393 /* Define some common error messages. */
394 #define BAD_SP _("SP not allowed here")
395
396 static struct hash_control *aarch64_ops_hsh;
397 static struct hash_control *aarch64_cond_hsh;
398 static struct hash_control *aarch64_shift_hsh;
399 static struct hash_control *aarch64_sys_regs_hsh;
400 static struct hash_control *aarch64_pstatefield_hsh;
401 static struct hash_control *aarch64_sys_regs_ic_hsh;
402 static struct hash_control *aarch64_sys_regs_dc_hsh;
403 static struct hash_control *aarch64_sys_regs_at_hsh;
404 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
405 static struct hash_control *aarch64_reg_hsh;
406 static struct hash_control *aarch64_barrier_opt_hsh;
407 static struct hash_control *aarch64_nzcv_hsh;
408 static struct hash_control *aarch64_pldop_hsh;
409
410 /* Stuff needed to resolve the label ambiguity
411 As:
412 ...
413 label: <insn>
414 may differ from:
415 ...
416 label:
417 <insn> */
418
419 static symbolS *last_label_seen;
420
421 /* Literal pool structure. Held on a per-section
422 and per-sub-section basis. */
423
424 #define MAX_LITERAL_POOL_SIZE 1024
425 typedef struct literal_expression
426 {
427 expressionS exp;
428 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
429 LITTLENUM_TYPE * bignum;
430 } literal_expression;
431
432 typedef struct literal_pool
433 {
434 literal_expression literals[MAX_LITERAL_POOL_SIZE];
435 unsigned int next_free_entry;
436 unsigned int id;
437 symbolS *symbol;
438 segT section;
439 subsegT sub_section;
440 int size;
441 struct literal_pool *next;
442 } literal_pool;
443
444 /* Pointer to a linked list of literal pools. */
445 static literal_pool *list_of_pools = NULL;
446 \f
447 /* Pure syntax. */
448
449 /* This array holds the chars that always start a comment. If the
450 pre-processor is disabled, these aren't very useful. */
451 const char comment_chars[] = "";
452
453 /* This array holds the chars that only start a comment at the beginning of
454 a line. If the line seems to have the form '# 123 filename'
455 .line and .file directives will appear in the pre-processed output. */
456 /* Note that input_file.c hand checks for '#' at the beginning of the
457 first line of the input file. This is because the compiler outputs
458 #NO_APP at the beginning of its output. */
459 /* Also note that comments like this one will always work. */
460 const char line_comment_chars[] = "#";
461
462 const char line_separator_chars[] = ";";
463
464 /* Chars that can be used to separate mant
465 from exp in floating point numbers. */
466 const char EXP_CHARS[] = "eE";
467
468 /* Chars that mean this number is a floating point constant. */
469 /* As in 0f12.456 */
470 /* or 0d1.2345e12 */
471
472 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
473
474 /* Prefix character that indicates the start of an immediate value. */
475 #define is_immediate_prefix(C) ((C) == '#')
476
477 /* Separator character handling. */
478
479 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
480
481 static inline bfd_boolean
482 skip_past_char (char **str, char c)
483 {
484 if (**str == c)
485 {
486 (*str)++;
487 return TRUE;
488 }
489 else
490 return FALSE;
491 }
492
493 #define skip_past_comma(str) skip_past_char (str, ',')
494
495 /* Arithmetic expressions (possibly involving symbols). */
496
497 static bfd_boolean in_my_get_expression_p = FALSE;
498
499 /* Third argument to my_get_expression. */
500 #define GE_NO_PREFIX 0
501 #define GE_OPT_PREFIX 1
502
503 /* Return TRUE if the string pointed by *STR is successfully parsed
504 as an valid expression; *EP will be filled with the information of
505 such an expression. Otherwise return FALSE. */
506
507 static bfd_boolean
508 my_get_expression (expressionS * ep, char **str, int prefix_mode,
509 int reject_absent)
510 {
511 char *save_in;
512 segT seg;
513 int prefix_present_p = 0;
514
515 switch (prefix_mode)
516 {
517 case GE_NO_PREFIX:
518 break;
519 case GE_OPT_PREFIX:
520 if (is_immediate_prefix (**str))
521 {
522 (*str)++;
523 prefix_present_p = 1;
524 }
525 break;
526 default:
527 abort ();
528 }
529
530 memset (ep, 0, sizeof (expressionS));
531
532 save_in = input_line_pointer;
533 input_line_pointer = *str;
534 in_my_get_expression_p = TRUE;
535 seg = expression (ep);
536 in_my_get_expression_p = FALSE;
537
538 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
539 {
540 /* We found a bad expression in md_operand(). */
541 *str = input_line_pointer;
542 input_line_pointer = save_in;
543 if (prefix_present_p && ! error_p ())
544 set_fatal_syntax_error (_("bad expression"));
545 else
546 set_first_syntax_error (_("bad expression"));
547 return FALSE;
548 }
549
550 #ifdef OBJ_AOUT
551 if (seg != absolute_section
552 && seg != text_section
553 && seg != data_section
554 && seg != bss_section && seg != undefined_section)
555 {
556 set_syntax_error (_("bad segment"));
557 *str = input_line_pointer;
558 input_line_pointer = save_in;
559 return FALSE;
560 }
561 #else
562 (void) seg;
563 #endif
564
565 *str = input_line_pointer;
566 input_line_pointer = save_in;
567 return TRUE;
568 }
569
570 /* Turn a string in input_line_pointer into a floating point constant
571 of type TYPE, and store the appropriate bytes in *LITP. The number
572 of LITTLENUMS emitted is stored in *SIZEP. An error message is
573 returned, or NULL on OK. */
574
575 char *
576 md_atof (int type, char *litP, int *sizeP)
577 {
578 return ieee_md_atof (type, litP, sizeP, target_big_endian);
579 }
580
581 /* We handle all bad expressions here, so that we can report the faulty
582 instruction in the error message. */
583 void
584 md_operand (expressionS * exp)
585 {
586 if (in_my_get_expression_p)
587 exp->X_op = O_illegal;
588 }
589
590 /* Immediate values. */
591
592 /* Errors may be set multiple times during parsing or bit encoding
593 (particularly in the Neon bits), but usually the earliest error which is set
594 will be the most meaningful. Avoid overwriting it with later (cascading)
595 errors by calling this function. */
596
597 static void
598 first_error (const char *error)
599 {
600 if (! error_p ())
601 set_syntax_error (error);
602 }
603
604 /* Similiar to first_error, but this function accepts formatted error
605 message. */
606 static void
607 first_error_fmt (const char *format, ...)
608 {
609 va_list args;
610 enum
611 { size = 100 };
612 /* N.B. this single buffer will not cause error messages for different
613 instructions to pollute each other; this is because at the end of
614 processing of each assembly line, error message if any will be
615 collected by as_bad. */
616 static char buffer[size];
617
618 if (! error_p ())
619 {
620 int ret ATTRIBUTE_UNUSED;
621 va_start (args, format);
622 ret = vsnprintf (buffer, size, format, args);
623 know (ret <= size - 1 && ret >= 0);
624 va_end (args);
625 set_syntax_error (buffer);
626 }
627 }
628
629 /* Register parsing. */
630
631 /* Generic register parser which is called by other specialized
632 register parsers.
633 CCP points to what should be the beginning of a register name.
634 If it is indeed a valid register name, advance CCP over it and
635 return the reg_entry structure; otherwise return NULL.
636 It does not issue diagnostics. */
637
638 static reg_entry *
639 parse_reg (char **ccp)
640 {
641 char *start = *ccp;
642 char *p;
643 reg_entry *reg;
644
645 #ifdef REGISTER_PREFIX
646 if (*start != REGISTER_PREFIX)
647 return NULL;
648 start++;
649 #endif
650
651 p = start;
652 if (!ISALPHA (*p) || !is_name_beginner (*p))
653 return NULL;
654
655 do
656 p++;
657 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
658
659 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
660
661 if (!reg)
662 return NULL;
663
664 *ccp = p;
665 return reg;
666 }
667
668 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
669 return FALSE. */
670 static bfd_boolean
671 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
672 {
673 if (reg->type == type)
674 return TRUE;
675
676 switch (type)
677 {
678 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
679 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
680 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
681 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
682 case REG_TYPE_VN: /* Vector register. */
683 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
684 return ((reg_type_masks[reg->type] & reg_type_masks[type])
685 == reg_type_masks[reg->type]);
686 default:
687 as_fatal ("unhandled type %d", type);
688 abort ();
689 }
690 }
691
692 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
693 Return the register number otherwise. *ISREG32 is set to one if the
694 register is 32-bit wide; *ISREGZERO is set to one if the register is
695 of type Z_32 or Z_64.
696 Note that this function does not issue any diagnostics. */
697
698 static int
699 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
700 int *isreg32, int *isregzero)
701 {
702 char *str = *ccp;
703 const reg_entry *reg = parse_reg (&str);
704
705 if (reg == NULL)
706 return PARSE_FAIL;
707
708 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
709 return PARSE_FAIL;
710
711 switch (reg->type)
712 {
713 case REG_TYPE_SP_32:
714 case REG_TYPE_SP_64:
715 if (reject_sp)
716 return PARSE_FAIL;
717 *isreg32 = reg->type == REG_TYPE_SP_32;
718 *isregzero = 0;
719 break;
720 case REG_TYPE_R_32:
721 case REG_TYPE_R_64:
722 *isreg32 = reg->type == REG_TYPE_R_32;
723 *isregzero = 0;
724 break;
725 case REG_TYPE_Z_32:
726 case REG_TYPE_Z_64:
727 if (reject_rz)
728 return PARSE_FAIL;
729 *isreg32 = reg->type == REG_TYPE_Z_32;
730 *isregzero = 1;
731 break;
732 default:
733 return PARSE_FAIL;
734 }
735
736 *ccp = str;
737
738 return reg->number;
739 }
740
741 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
742 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
743 otherwise return FALSE.
744
745 Accept only one occurrence of:
746 8b 16b 4h 8h 2s 4s 1d 2d
747 b h s d q */
748 static bfd_boolean
749 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
750 {
751 char *ptr = *str;
752 unsigned width;
753 unsigned element_size;
754 enum neon_el_type type;
755
756 /* skip '.' */
757 ptr++;
758
759 if (!ISDIGIT (*ptr))
760 {
761 width = 0;
762 goto elt_size;
763 }
764 width = strtoul (ptr, &ptr, 10);
765 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
766 {
767 first_error_fmt (_("bad size %d in vector width specifier"), width);
768 return FALSE;
769 }
770
771 elt_size:
772 switch (TOLOWER (*ptr))
773 {
774 case 'b':
775 type = NT_b;
776 element_size = 8;
777 break;
778 case 'h':
779 type = NT_h;
780 element_size = 16;
781 break;
782 case 's':
783 type = NT_s;
784 element_size = 32;
785 break;
786 case 'd':
787 type = NT_d;
788 element_size = 64;
789 break;
790 case 'q':
791 if (width == 1)
792 {
793 type = NT_q;
794 element_size = 128;
795 break;
796 }
797 /* fall through. */
798 default:
799 if (*ptr != '\0')
800 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
801 else
802 first_error (_("missing element size"));
803 return FALSE;
804 }
805 if (width != 0 && width * element_size != 64 && width * element_size != 128)
806 {
807 first_error_fmt (_
808 ("invalid element size %d and vector size combination %c"),
809 width, *ptr);
810 return FALSE;
811 }
812 ptr++;
813
814 parsed_type->type = type;
815 parsed_type->width = width;
816
817 *str = ptr;
818
819 return TRUE;
820 }
821
822 /* Parse a single type, e.g. ".8b", leading period included.
823 Only applicable to Vn registers.
824
825 Return TRUE on success; otherwise return FALSE. */
826 static bfd_boolean
827 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
828 {
829 char *str = *ccp;
830
831 if (*str == '.')
832 {
833 if (! parse_neon_type_for_operand (vectype, &str))
834 {
835 first_error (_("vector type expected"));
836 return FALSE;
837 }
838 }
839 else
840 return FALSE;
841
842 *ccp = str;
843
844 return TRUE;
845 }
846
847 /* Parse a register of the type TYPE.
848
849 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
850 name or the parsed register is not of TYPE.
851
852 Otherwise return the register number, and optionally fill in the actual
853 type of the register in *RTYPE when multiple alternatives were given, and
854 return the register shape and element index information in *TYPEINFO.
855
856 IN_REG_LIST should be set with TRUE if the caller is parsing a register
857 list. */
858
859 static int
860 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
861 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
862 {
863 char *str = *ccp;
864 const reg_entry *reg = parse_reg (&str);
865 struct neon_type_el atype;
866 struct neon_type_el parsetype;
867 bfd_boolean is_typed_vecreg = FALSE;
868
869 atype.defined = 0;
870 atype.type = NT_invtype;
871 atype.width = -1;
872 atype.index = 0;
873
874 if (reg == NULL)
875 {
876 if (typeinfo)
877 *typeinfo = atype;
878 set_default_error ();
879 return PARSE_FAIL;
880 }
881
882 if (! aarch64_check_reg_type (reg, type))
883 {
884 DEBUG_TRACE ("reg type check failed");
885 set_default_error ();
886 return PARSE_FAIL;
887 }
888 type = reg->type;
889
890 if (type == REG_TYPE_VN
891 && parse_neon_operand_type (&parsetype, &str))
892 {
893 /* Register if of the form Vn.[bhsdq]. */
894 is_typed_vecreg = TRUE;
895
896 if (parsetype.width == 0)
897 /* Expect index. In the new scheme we cannot have
898 Vn.[bhsdq] represent a scalar. Therefore any
899 Vn.[bhsdq] should have an index following it.
900 Except in reglists ofcourse. */
901 atype.defined |= NTA_HASINDEX;
902 else
903 atype.defined |= NTA_HASTYPE;
904
905 atype.type = parsetype.type;
906 atype.width = parsetype.width;
907 }
908
909 if (skip_past_char (&str, '['))
910 {
911 expressionS exp;
912
913 /* Reject Sn[index] syntax. */
914 if (!is_typed_vecreg)
915 {
916 first_error (_("this type of register can't be indexed"));
917 return PARSE_FAIL;
918 }
919
920 if (in_reg_list == TRUE)
921 {
922 first_error (_("index not allowed inside register list"));
923 return PARSE_FAIL;
924 }
925
926 atype.defined |= NTA_HASINDEX;
927
928 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
929
930 if (exp.X_op != O_constant)
931 {
932 first_error (_("constant expression required"));
933 return PARSE_FAIL;
934 }
935
936 if (! skip_past_char (&str, ']'))
937 return PARSE_FAIL;
938
939 atype.index = exp.X_add_number;
940 }
941 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
942 {
943 /* Indexed vector register expected. */
944 first_error (_("indexed vector register expected"));
945 return PARSE_FAIL;
946 }
947
948 /* A vector reg Vn should be typed or indexed. */
949 if (type == REG_TYPE_VN && atype.defined == 0)
950 {
951 first_error (_("invalid use of vector register"));
952 }
953
954 if (typeinfo)
955 *typeinfo = atype;
956
957 if (rtype)
958 *rtype = type;
959
960 *ccp = str;
961
962 return reg->number;
963 }
964
965 /* Parse register.
966
967 Return the register number on success; return PARSE_FAIL otherwise.
968
969 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
970 the register (e.g. NEON double or quad reg when either has been requested).
971
972 If this is a NEON vector register with additional type information, fill
973 in the struct pointed to by VECTYPE (if non-NULL).
974
975 This parser does not handle register list. */
976
977 static int
978 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
979 aarch64_reg_type *rtype, struct neon_type_el *vectype)
980 {
981 struct neon_type_el atype;
982 char *str = *ccp;
983 int reg = parse_typed_reg (&str, type, rtype, &atype,
984 /*in_reg_list= */ FALSE);
985
986 if (reg == PARSE_FAIL)
987 return PARSE_FAIL;
988
989 if (vectype)
990 *vectype = atype;
991
992 *ccp = str;
993
994 return reg;
995 }
996
997 static inline bfd_boolean
998 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
999 {
1000 return
1001 e1.type == e2.type
1002 && e1.defined == e2.defined
1003 && e1.width == e2.width && e1.index == e2.index;
1004 }
1005
1006 /* This function parses the NEON register list. On success, it returns
1007 the parsed register list information in the following encoded format:
1008
1009 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1010 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1011
1012 The information of the register shape and/or index is returned in
1013 *VECTYPE.
1014
1015 It returns PARSE_FAIL if the register list is invalid.
1016
1017 The list contains one to four registers.
1018 Each register can be one of:
1019 <Vt>.<T>[<index>]
1020 <Vt>.<T>
1021 All <T> should be identical.
1022 All <index> should be identical.
1023 There are restrictions on <Vt> numbers which are checked later
1024 (by reg_list_valid_p). */
1025
1026 static int
1027 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1028 {
1029 char *str = *ccp;
1030 int nb_regs;
1031 struct neon_type_el typeinfo, typeinfo_first;
1032 int val, val_range;
1033 int in_range;
1034 int ret_val;
1035 int i;
1036 bfd_boolean error = FALSE;
1037 bfd_boolean expect_index = FALSE;
1038
1039 if (*str != '{')
1040 {
1041 set_syntax_error (_("expecting {"));
1042 return PARSE_FAIL;
1043 }
1044 str++;
1045
1046 nb_regs = 0;
1047 typeinfo_first.defined = 0;
1048 typeinfo_first.type = NT_invtype;
1049 typeinfo_first.width = -1;
1050 typeinfo_first.index = 0;
1051 ret_val = 0;
1052 val = -1;
1053 val_range = -1;
1054 in_range = 0;
1055 do
1056 {
1057 if (in_range)
1058 {
1059 str++; /* skip over '-' */
1060 val_range = val;
1061 }
1062 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1063 /*in_reg_list= */ TRUE);
1064 if (val == PARSE_FAIL)
1065 {
1066 set_first_syntax_error (_("invalid vector register in list"));
1067 error = TRUE;
1068 continue;
1069 }
1070 /* reject [bhsd]n */
1071 if (typeinfo.defined == 0)
1072 {
1073 set_first_syntax_error (_("invalid scalar register in list"));
1074 error = TRUE;
1075 continue;
1076 }
1077
1078 if (typeinfo.defined & NTA_HASINDEX)
1079 expect_index = TRUE;
1080
1081 if (in_range)
1082 {
1083 if (val < val_range)
1084 {
1085 set_first_syntax_error
1086 (_("invalid range in vector register list"));
1087 error = TRUE;
1088 }
1089 val_range++;
1090 }
1091 else
1092 {
1093 val_range = val;
1094 if (nb_regs == 0)
1095 typeinfo_first = typeinfo;
1096 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1097 {
1098 set_first_syntax_error
1099 (_("type mismatch in vector register list"));
1100 error = TRUE;
1101 }
1102 }
1103 if (! error)
1104 for (i = val_range; i <= val; i++)
1105 {
1106 ret_val |= i << (5 * nb_regs);
1107 nb_regs++;
1108 }
1109 in_range = 0;
1110 }
1111 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1112
1113 skip_whitespace (str);
1114 if (*str != '}')
1115 {
1116 set_first_syntax_error (_("end of vector register list not found"));
1117 error = TRUE;
1118 }
1119 str++;
1120
1121 skip_whitespace (str);
1122
1123 if (expect_index)
1124 {
1125 if (skip_past_char (&str, '['))
1126 {
1127 expressionS exp;
1128
1129 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1130 if (exp.X_op != O_constant)
1131 {
1132 set_first_syntax_error (_("constant expression required."));
1133 error = TRUE;
1134 }
1135 if (! skip_past_char (&str, ']'))
1136 error = TRUE;
1137 else
1138 typeinfo_first.index = exp.X_add_number;
1139 }
1140 else
1141 {
1142 set_first_syntax_error (_("expected index"));
1143 error = TRUE;
1144 }
1145 }
1146
1147 if (nb_regs > 4)
1148 {
1149 set_first_syntax_error (_("too many registers in vector register list"));
1150 error = TRUE;
1151 }
1152 else if (nb_regs == 0)
1153 {
1154 set_first_syntax_error (_("empty vector register list"));
1155 error = TRUE;
1156 }
1157
1158 *ccp = str;
1159 if (! error)
1160 *vectype = typeinfo_first;
1161
1162 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1163 }
1164
1165 /* Directives: register aliases. */
1166
1167 static reg_entry *
1168 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1169 {
1170 reg_entry *new;
1171 const char *name;
1172
1173 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1174 {
1175 if (new->builtin)
1176 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1177 str);
1178
1179 /* Only warn about a redefinition if it's not defined as the
1180 same register. */
1181 else if (new->number != number || new->type != type)
1182 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1183
1184 return NULL;
1185 }
1186
1187 name = xstrdup (str);
1188 new = xmalloc (sizeof (reg_entry));
1189
1190 new->name = name;
1191 new->number = number;
1192 new->type = type;
1193 new->builtin = FALSE;
1194
1195 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1196 abort ();
1197
1198 return new;
1199 }
1200
1201 /* Look for the .req directive. This is of the form:
1202
1203 new_register_name .req existing_register_name
1204
1205 If we find one, or if it looks sufficiently like one that we want to
1206 handle any error here, return TRUE. Otherwise return FALSE. */
1207
1208 static bfd_boolean
1209 create_register_alias (char *newname, char *p)
1210 {
1211 const reg_entry *old;
1212 char *oldname, *nbuf;
1213 size_t nlen;
1214
1215 /* The input scrubber ensures that whitespace after the mnemonic is
1216 collapsed to single spaces. */
1217 oldname = p;
1218 if (strncmp (oldname, " .req ", 6) != 0)
1219 return FALSE;
1220
1221 oldname += 6;
1222 if (*oldname == '\0')
1223 return FALSE;
1224
1225 old = hash_find (aarch64_reg_hsh, oldname);
1226 if (!old)
1227 {
1228 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1229 return TRUE;
1230 }
1231
1232 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1233 the desired alias name, and p points to its end. If not, then
1234 the desired alias name is in the global original_case_string. */
1235 #ifdef TC_CASE_SENSITIVE
1236 nlen = p - newname;
1237 #else
1238 newname = original_case_string;
1239 nlen = strlen (newname);
1240 #endif
1241
1242 nbuf = alloca (nlen + 1);
1243 memcpy (nbuf, newname, nlen);
1244 nbuf[nlen] = '\0';
1245
1246 /* Create aliases under the new name as stated; an all-lowercase
1247 version of the new name; and an all-uppercase version of the new
1248 name. */
1249 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1250 {
1251 for (p = nbuf; *p; p++)
1252 *p = TOUPPER (*p);
1253
1254 if (strncmp (nbuf, newname, nlen))
1255 {
1256 /* If this attempt to create an additional alias fails, do not bother
1257 trying to create the all-lower case alias. We will fail and issue
1258 a second, duplicate error message. This situation arises when the
1259 programmer does something like:
1260 foo .req r0
1261 Foo .req r1
1262 The second .req creates the "Foo" alias but then fails to create
1263 the artificial FOO alias because it has already been created by the
1264 first .req. */
1265 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1266 return TRUE;
1267 }
1268
1269 for (p = nbuf; *p; p++)
1270 *p = TOLOWER (*p);
1271
1272 if (strncmp (nbuf, newname, nlen))
1273 insert_reg_alias (nbuf, old->number, old->type);
1274 }
1275
1276 return TRUE;
1277 }
1278
1279 /* Should never be called, as .req goes between the alias and the
1280 register name, not at the beginning of the line. */
1281 static void
1282 s_req (int a ATTRIBUTE_UNUSED)
1283 {
1284 as_bad (_("invalid syntax for .req directive"));
1285 }
1286
1287 /* The .unreq directive deletes an alias which was previously defined
1288 by .req. For example:
1289
1290 my_alias .req r11
1291 .unreq my_alias */
1292
1293 static void
1294 s_unreq (int a ATTRIBUTE_UNUSED)
1295 {
1296 char *name;
1297 char saved_char;
1298
1299 name = input_line_pointer;
1300
1301 while (*input_line_pointer != 0
1302 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1303 ++input_line_pointer;
1304
1305 saved_char = *input_line_pointer;
1306 *input_line_pointer = 0;
1307
1308 if (!*name)
1309 as_bad (_("invalid syntax for .unreq directive"));
1310 else
1311 {
1312 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1313
1314 if (!reg)
1315 as_bad (_("unknown register alias '%s'"), name);
1316 else if (reg->builtin)
1317 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1318 name);
1319 else
1320 {
1321 char *p;
1322 char *nbuf;
1323
1324 hash_delete (aarch64_reg_hsh, name, FALSE);
1325 free ((char *) reg->name);
1326 free (reg);
1327
1328 /* Also locate the all upper case and all lower case versions.
1329 Do not complain if we cannot find one or the other as it
1330 was probably deleted above. */
1331
1332 nbuf = strdup (name);
1333 for (p = nbuf; *p; p++)
1334 *p = TOUPPER (*p);
1335 reg = hash_find (aarch64_reg_hsh, nbuf);
1336 if (reg)
1337 {
1338 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1339 free ((char *) reg->name);
1340 free (reg);
1341 }
1342
1343 for (p = nbuf; *p; p++)
1344 *p = TOLOWER (*p);
1345 reg = hash_find (aarch64_reg_hsh, nbuf);
1346 if (reg)
1347 {
1348 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1349 free ((char *) reg->name);
1350 free (reg);
1351 }
1352
1353 free (nbuf);
1354 }
1355 }
1356
1357 *input_line_pointer = saved_char;
1358 demand_empty_rest_of_line ();
1359 }
1360
1361 /* Directives: Instruction set selection. */
1362
1363 #ifdef OBJ_ELF
1364 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1365 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1366 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1367 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1368
1369 /* Create a new mapping symbol for the transition to STATE. */
1370
1371 static void
1372 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1373 {
1374 symbolS *symbolP;
1375 const char *symname;
1376 int type;
1377
1378 switch (state)
1379 {
1380 case MAP_DATA:
1381 symname = "$d";
1382 type = BSF_NO_FLAGS;
1383 break;
1384 case MAP_INSN:
1385 symname = "$x";
1386 type = BSF_NO_FLAGS;
1387 break;
1388 default:
1389 abort ();
1390 }
1391
1392 symbolP = symbol_new (symname, now_seg, value, frag);
1393 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1394
1395 /* Save the mapping symbols for future reference. Also check that
1396 we do not place two mapping symbols at the same offset within a
1397 frag. We'll handle overlap between frags in
1398 check_mapping_symbols.
1399
1400 If .fill or other data filling directive generates zero sized data,
1401 the mapping symbol for the following code will have the same value
1402 as the one generated for the data filling directive. In this case,
1403 we replace the old symbol with the new one at the same address. */
1404 if (value == 0)
1405 {
1406 if (frag->tc_frag_data.first_map != NULL)
1407 {
1408 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1409 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1410 &symbol_lastP);
1411 }
1412 frag->tc_frag_data.first_map = symbolP;
1413 }
1414 if (frag->tc_frag_data.last_map != NULL)
1415 {
1416 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1417 S_GET_VALUE (symbolP));
1418 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1419 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1420 &symbol_lastP);
1421 }
1422 frag->tc_frag_data.last_map = symbolP;
1423 }
1424
1425 /* We must sometimes convert a region marked as code to data during
1426 code alignment, if an odd number of bytes have to be padded. The
1427 code mapping symbol is pushed to an aligned address. */
1428
1429 static void
1430 insert_data_mapping_symbol (enum mstate state,
1431 valueT value, fragS * frag, offsetT bytes)
1432 {
1433 /* If there was already a mapping symbol, remove it. */
1434 if (frag->tc_frag_data.last_map != NULL
1435 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1436 frag->fr_address + value)
1437 {
1438 symbolS *symp = frag->tc_frag_data.last_map;
1439
1440 if (value == 0)
1441 {
1442 know (frag->tc_frag_data.first_map == symp);
1443 frag->tc_frag_data.first_map = NULL;
1444 }
1445 frag->tc_frag_data.last_map = NULL;
1446 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1447 }
1448
1449 make_mapping_symbol (MAP_DATA, value, frag);
1450 make_mapping_symbol (state, value + bytes, frag);
1451 }
1452
1453 static void mapping_state_2 (enum mstate state, int max_chars);
1454
1455 /* Set the mapping state to STATE. Only call this when about to
1456 emit some STATE bytes to the file. */
1457
1458 void
1459 mapping_state (enum mstate state)
1460 {
1461 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1462
1463 if (mapstate == state)
1464 /* The mapping symbol has already been emitted.
1465 There is nothing else to do. */
1466 return;
1467
1468 if (state == MAP_INSN)
1469 /* AArch64 instructions require 4-byte alignment. When emitting
1470 instructions into any section, record the appropriate section
1471 alignment. */
1472 record_alignment (now_seg, 2);
1473
1474 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1475 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1476 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1477 evaluated later in the next else. */
1478 return;
1479 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1480 {
1481 /* Only add the symbol if the offset is > 0:
1482 if we're at the first frag, check it's size > 0;
1483 if we're not at the first frag, then for sure
1484 the offset is > 0. */
1485 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1486 const int add_symbol = (frag_now != frag_first)
1487 || (frag_now_fix () > 0);
1488
1489 if (add_symbol)
1490 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1491 }
1492 #undef TRANSITION
1493
1494 mapping_state_2 (state, 0);
1495 }
1496
1497 /* Same as mapping_state, but MAX_CHARS bytes have already been
1498 allocated. Put the mapping symbol that far back. */
1499
1500 static void
1501 mapping_state_2 (enum mstate state, int max_chars)
1502 {
1503 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1504
1505 if (!SEG_NORMAL (now_seg))
1506 return;
1507
1508 if (mapstate == state)
1509 /* The mapping symbol has already been emitted.
1510 There is nothing else to do. */
1511 return;
1512
1513 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1514 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1515 }
1516 #else
1517 #define mapping_state(x) /* nothing */
1518 #define mapping_state_2(x, y) /* nothing */
1519 #endif
1520
1521 /* Directives: sectioning and alignment. */
1522
1523 static void
1524 s_bss (int ignore ATTRIBUTE_UNUSED)
1525 {
1526 /* We don't support putting frags in the BSS segment, we fake it by
1527 marking in_bss, then looking at s_skip for clues. */
1528 subseg_set (bss_section, 0);
1529 demand_empty_rest_of_line ();
1530 mapping_state (MAP_DATA);
1531 }
1532
1533 static void
1534 s_even (int ignore ATTRIBUTE_UNUSED)
1535 {
1536 /* Never make frag if expect extra pass. */
1537 if (!need_pass_2)
1538 frag_align (1, 0, 0);
1539
1540 record_alignment (now_seg, 1);
1541
1542 demand_empty_rest_of_line ();
1543 }
1544
1545 /* Directives: Literal pools. */
1546
1547 static literal_pool *
1548 find_literal_pool (int size)
1549 {
1550 literal_pool *pool;
1551
1552 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1553 {
1554 if (pool->section == now_seg
1555 && pool->sub_section == now_subseg && pool->size == size)
1556 break;
1557 }
1558
1559 return pool;
1560 }
1561
1562 static literal_pool *
1563 find_or_make_literal_pool (int size)
1564 {
1565 /* Next literal pool ID number. */
1566 static unsigned int latest_pool_num = 1;
1567 literal_pool *pool;
1568
1569 pool = find_literal_pool (size);
1570
1571 if (pool == NULL)
1572 {
1573 /* Create a new pool. */
1574 pool = xmalloc (sizeof (*pool));
1575 if (!pool)
1576 return NULL;
1577
1578 /* Currently we always put the literal pool in the current text
1579 section. If we were generating "small" model code where we
1580 knew that all code and initialised data was within 1MB then
1581 we could output literals to mergeable, read-only data
1582 sections. */
1583
1584 pool->next_free_entry = 0;
1585 pool->section = now_seg;
1586 pool->sub_section = now_subseg;
1587 pool->size = size;
1588 pool->next = list_of_pools;
1589 pool->symbol = NULL;
1590
1591 /* Add it to the list. */
1592 list_of_pools = pool;
1593 }
1594
1595 /* New pools, and emptied pools, will have a NULL symbol. */
1596 if (pool->symbol == NULL)
1597 {
1598 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1599 (valueT) 0, &zero_address_frag);
1600 pool->id = latest_pool_num++;
1601 }
1602
1603 /* Done. */
1604 return pool;
1605 }
1606
1607 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1608 Return TRUE on success, otherwise return FALSE. */
1609 static bfd_boolean
1610 add_to_lit_pool (expressionS *exp, int size)
1611 {
1612 literal_pool *pool;
1613 unsigned int entry;
1614
1615 pool = find_or_make_literal_pool (size);
1616
1617 /* Check if this literal value is already in the pool. */
1618 for (entry = 0; entry < pool->next_free_entry; entry++)
1619 {
1620 expressionS * litexp = & pool->literals[entry].exp;
1621
1622 if ((litexp->X_op == exp->X_op)
1623 && (exp->X_op == O_constant)
1624 && (litexp->X_add_number == exp->X_add_number)
1625 && (litexp->X_unsigned == exp->X_unsigned))
1626 break;
1627
1628 if ((litexp->X_op == exp->X_op)
1629 && (exp->X_op == O_symbol)
1630 && (litexp->X_add_number == exp->X_add_number)
1631 && (litexp->X_add_symbol == exp->X_add_symbol)
1632 && (litexp->X_op_symbol == exp->X_op_symbol))
1633 break;
1634 }
1635
1636 /* Do we need to create a new entry? */
1637 if (entry == pool->next_free_entry)
1638 {
1639 if (entry >= MAX_LITERAL_POOL_SIZE)
1640 {
1641 set_syntax_error (_("literal pool overflow"));
1642 return FALSE;
1643 }
1644
1645 pool->literals[entry].exp = *exp;
1646 pool->next_free_entry += 1;
1647 if (exp->X_op == O_big)
1648 {
1649 /* PR 16688: Bignums are held in a single global array. We must
1650 copy and preserve that value now, before it is overwritten. */
1651 pool->literals[entry].bignum = xmalloc (CHARS_PER_LITTLENUM * exp->X_add_number);
1652 memcpy (pool->literals[entry].bignum, generic_bignum,
1653 CHARS_PER_LITTLENUM * exp->X_add_number);
1654 }
1655 else
1656 pool->literals[entry].bignum = NULL;
1657 }
1658
1659 exp->X_op = O_symbol;
1660 exp->X_add_number = ((int) entry) * size;
1661 exp->X_add_symbol = pool->symbol;
1662
1663 return TRUE;
1664 }
1665
1666 /* Can't use symbol_new here, so have to create a symbol and then at
1667 a later date assign it a value. Thats what these functions do. */
1668
1669 static void
1670 symbol_locate (symbolS * symbolP,
1671 const char *name,/* It is copied, the caller can modify. */
1672 segT segment, /* Segment identifier (SEG_<something>). */
1673 valueT valu, /* Symbol value. */
1674 fragS * frag) /* Associated fragment. */
1675 {
1676 size_t name_length;
1677 char *preserved_copy_of_name;
1678
1679 name_length = strlen (name) + 1; /* +1 for \0. */
1680 obstack_grow (&notes, name, name_length);
1681 preserved_copy_of_name = obstack_finish (&notes);
1682
1683 #ifdef tc_canonicalize_symbol_name
1684 preserved_copy_of_name =
1685 tc_canonicalize_symbol_name (preserved_copy_of_name);
1686 #endif
1687
1688 S_SET_NAME (symbolP, preserved_copy_of_name);
1689
1690 S_SET_SEGMENT (symbolP, segment);
1691 S_SET_VALUE (symbolP, valu);
1692 symbol_clear_list_pointers (symbolP);
1693
1694 symbol_set_frag (symbolP, frag);
1695
1696 /* Link to end of symbol chain. */
1697 {
1698 extern int symbol_table_frozen;
1699
1700 if (symbol_table_frozen)
1701 abort ();
1702 }
1703
1704 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1705
1706 obj_symbol_new_hook (symbolP);
1707
1708 #ifdef tc_symbol_new_hook
1709 tc_symbol_new_hook (symbolP);
1710 #endif
1711
1712 #ifdef DEBUG_SYMS
1713 verify_symbol_chain (symbol_rootP, symbol_lastP);
1714 #endif /* DEBUG_SYMS */
1715 }
1716
1717
1718 static void
1719 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1720 {
1721 unsigned int entry;
1722 literal_pool *pool;
1723 char sym_name[20];
1724 int align;
1725
1726 for (align = 2; align <= 4; align++)
1727 {
1728 int size = 1 << align;
1729
1730 pool = find_literal_pool (size);
1731 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1732 continue;
1733
1734 mapping_state (MAP_DATA);
1735
1736 /* Align pool as you have word accesses.
1737 Only make a frag if we have to. */
1738 if (!need_pass_2)
1739 frag_align (align, 0, 0);
1740
1741 record_alignment (now_seg, align);
1742
1743 sprintf (sym_name, "$$lit_\002%x", pool->id);
1744
1745 symbol_locate (pool->symbol, sym_name, now_seg,
1746 (valueT) frag_now_fix (), frag_now);
1747 symbol_table_insert (pool->symbol);
1748
1749 for (entry = 0; entry < pool->next_free_entry; entry++)
1750 {
1751 expressionS * exp = & pool->literals[entry].exp;
1752
1753 if (exp->X_op == O_big)
1754 {
1755 /* PR 16688: Restore the global bignum value. */
1756 gas_assert (pool->literals[entry].bignum != NULL);
1757 memcpy (generic_bignum, pool->literals[entry].bignum,
1758 CHARS_PER_LITTLENUM * exp->X_add_number);
1759 }
1760
1761 /* First output the expression in the instruction to the pool. */
1762 emit_expr (exp, size); /* .word|.xword */
1763
1764 if (exp->X_op == O_big)
1765 {
1766 free (pool->literals[entry].bignum);
1767 pool->literals[entry].bignum = NULL;
1768 }
1769 }
1770
1771 /* Mark the pool as empty. */
1772 pool->next_free_entry = 0;
1773 pool->symbol = NULL;
1774 }
1775 }
1776
1777 #ifdef OBJ_ELF
1778 /* Forward declarations for functions below, in the MD interface
1779 section. */
1780 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1781 static struct reloc_table_entry * find_reloc_table_entry (char **);
1782
1783 /* Directives: Data. */
1784 /* N.B. the support for relocation suffix in this directive needs to be
1785 implemented properly. */
1786
1787 static void
1788 s_aarch64_elf_cons (int nbytes)
1789 {
1790 expressionS exp;
1791
1792 #ifdef md_flush_pending_output
1793 md_flush_pending_output ();
1794 #endif
1795
1796 if (is_it_end_of_statement ())
1797 {
1798 demand_empty_rest_of_line ();
1799 return;
1800 }
1801
1802 #ifdef md_cons_align
1803 md_cons_align (nbytes);
1804 #endif
1805
1806 mapping_state (MAP_DATA);
1807 do
1808 {
1809 struct reloc_table_entry *reloc;
1810
1811 expression (&exp);
1812
1813 if (exp.X_op != O_symbol)
1814 emit_expr (&exp, (unsigned int) nbytes);
1815 else
1816 {
1817 skip_past_char (&input_line_pointer, '#');
1818 if (skip_past_char (&input_line_pointer, ':'))
1819 {
1820 reloc = find_reloc_table_entry (&input_line_pointer);
1821 if (reloc == NULL)
1822 as_bad (_("unrecognized relocation suffix"));
1823 else
1824 as_bad (_("unimplemented relocation suffix"));
1825 ignore_rest_of_line ();
1826 return;
1827 }
1828 else
1829 emit_expr (&exp, (unsigned int) nbytes);
1830 }
1831 }
1832 while (*input_line_pointer++ == ',');
1833
1834 /* Put terminator back into stream. */
1835 input_line_pointer--;
1836 demand_empty_rest_of_line ();
1837 }
1838
1839 #endif /* OBJ_ELF */
1840
1841 /* Output a 32-bit word, but mark as an instruction. */
1842
1843 static void
1844 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1845 {
1846 expressionS exp;
1847
1848 #ifdef md_flush_pending_output
1849 md_flush_pending_output ();
1850 #endif
1851
1852 if (is_it_end_of_statement ())
1853 {
1854 demand_empty_rest_of_line ();
1855 return;
1856 }
1857
1858 /* Sections are assumed to start aligned. In executable section, there is no
1859 MAP_DATA symbol pending. So we only align the address during
1860 MAP_DATA --> MAP_INSN transition.
1861 For other sections, this is not guaranteed, align it anyway. */
1862 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1863 if (!need_pass_2 && ((subseg_text_p (now_seg) && mapstate == MAP_DATA)
1864 || !subseg_text_p (now_seg)))
1865 frag_align_code (2, 0);
1866
1867 #ifdef OBJ_ELF
1868 mapping_state (MAP_INSN);
1869 #endif
1870
1871 do
1872 {
1873 expression (&exp);
1874 if (exp.X_op != O_constant)
1875 {
1876 as_bad (_("constant expression required"));
1877 ignore_rest_of_line ();
1878 return;
1879 }
1880
1881 if (target_big_endian)
1882 {
1883 unsigned int val = exp.X_add_number;
1884 exp.X_add_number = SWAP_32 (val);
1885 }
1886 emit_expr (&exp, 4);
1887 }
1888 while (*input_line_pointer++ == ',');
1889
1890 /* Put terminator back into stream. */
1891 input_line_pointer--;
1892 demand_empty_rest_of_line ();
1893 }
1894
1895 #ifdef OBJ_ELF
1896 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1897
1898 static void
1899 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1900 {
1901 expressionS exp;
1902
1903 /* Since we're just labelling the code, there's no need to define a
1904 mapping symbol. */
1905 expression (&exp);
1906 /* Make sure there is enough room in this frag for the following
1907 blr. This trick only works if the blr follows immediately after
1908 the .tlsdesc directive. */
1909 frag_grow (4);
1910 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1911 BFD_RELOC_AARCH64_TLSDESC_CALL);
1912
1913 demand_empty_rest_of_line ();
1914 }
1915 #endif /* OBJ_ELF */
1916
1917 static void s_aarch64_arch (int);
1918 static void s_aarch64_cpu (int);
1919 static void s_aarch64_arch_extension (int);
1920
1921 /* This table describes all the machine specific pseudo-ops the assembler
1922 has to support. The fields are:
1923 pseudo-op name without dot
1924 function to call to execute this pseudo-op
1925 Integer arg to pass to the function. */
1926
1927 const pseudo_typeS md_pseudo_table[] = {
1928 /* Never called because '.req' does not start a line. */
1929 {"req", s_req, 0},
1930 {"unreq", s_unreq, 0},
1931 {"bss", s_bss, 0},
1932 {"even", s_even, 0},
1933 {"ltorg", s_ltorg, 0},
1934 {"pool", s_ltorg, 0},
1935 {"cpu", s_aarch64_cpu, 0},
1936 {"arch", s_aarch64_arch, 0},
1937 {"arch_extension", s_aarch64_arch_extension, 0},
1938 {"inst", s_aarch64_inst, 0},
1939 #ifdef OBJ_ELF
1940 {"tlsdesccall", s_tlsdesccall, 0},
1941 {"word", s_aarch64_elf_cons, 4},
1942 {"long", s_aarch64_elf_cons, 4},
1943 {"xword", s_aarch64_elf_cons, 8},
1944 {"dword", s_aarch64_elf_cons, 8},
1945 #endif
1946 {0, 0, 0}
1947 };
1948 \f
1949
1950 /* Check whether STR points to a register name followed by a comma or the
1951 end of line; REG_TYPE indicates which register types are checked
1952 against. Return TRUE if STR is such a register name; otherwise return
1953 FALSE. The function does not intend to produce any diagnostics, but since
1954 the register parser aarch64_reg_parse, which is called by this function,
1955 does produce diagnostics, we call clear_error to clear any diagnostics
1956 that may be generated by aarch64_reg_parse.
1957 Also, the function returns FALSE directly if there is any user error
1958 present at the function entry. This prevents the existing diagnostics
1959 state from being spoiled.
1960 The function currently serves parse_constant_immediate and
1961 parse_big_immediate only. */
1962 static bfd_boolean
1963 reg_name_p (char *str, aarch64_reg_type reg_type)
1964 {
1965 int reg;
1966
1967 /* Prevent the diagnostics state from being spoiled. */
1968 if (error_p ())
1969 return FALSE;
1970
1971 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1972
1973 /* Clear the parsing error that may be set by the reg parser. */
1974 clear_error ();
1975
1976 if (reg == PARSE_FAIL)
1977 return FALSE;
1978
1979 skip_whitespace (str);
1980 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1981 return TRUE;
1982
1983 return FALSE;
1984 }
1985
1986 /* Parser functions used exclusively in instruction operands. */
1987
1988 /* Parse an immediate expression which may not be constant.
1989
1990 To prevent the expression parser from pushing a register name
1991 into the symbol table as an undefined symbol, firstly a check is
1992 done to find out whether STR is a valid register name followed
1993 by a comma or the end of line. Return FALSE if STR is such a
1994 string. */
1995
1996 static bfd_boolean
1997 parse_immediate_expression (char **str, expressionS *exp)
1998 {
1999 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
2000 {
2001 set_recoverable_error (_("immediate operand required"));
2002 return FALSE;
2003 }
2004
2005 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2006
2007 if (exp->X_op == O_absent)
2008 {
2009 set_fatal_syntax_error (_("missing immediate expression"));
2010 return FALSE;
2011 }
2012
2013 return TRUE;
2014 }
2015
2016 /* Constant immediate-value read function for use in insn parsing.
2017 STR points to the beginning of the immediate (with the optional
2018 leading #); *VAL receives the value.
2019
2020 Return TRUE on success; otherwise return FALSE. */
2021
2022 static bfd_boolean
2023 parse_constant_immediate (char **str, int64_t * val)
2024 {
2025 expressionS exp;
2026
2027 if (! parse_immediate_expression (str, &exp))
2028 return FALSE;
2029
2030 if (exp.X_op != O_constant)
2031 {
2032 set_syntax_error (_("constant expression required"));
2033 return FALSE;
2034 }
2035
2036 *val = exp.X_add_number;
2037 return TRUE;
2038 }
2039
2040 static uint32_t
2041 encode_imm_float_bits (uint32_t imm)
2042 {
2043 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2044 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2045 }
2046
2047 /* Return TRUE if the single-precision floating-point value encoded in IMM
2048 can be expressed in the AArch64 8-bit signed floating-point format with
2049 3-bit exponent and normalized 4 bits of precision; in other words, the
2050 floating-point value must be expressable as
2051 (+/-) n / 16 * power (2, r)
2052 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2053
2054 static bfd_boolean
2055 aarch64_imm_float_p (uint32_t imm)
2056 {
2057 /* If a single-precision floating-point value has the following bit
2058 pattern, it can be expressed in the AArch64 8-bit floating-point
2059 format:
2060
2061 3 32222222 2221111111111
2062 1 09876543 21098765432109876543210
2063 n Eeeeeexx xxxx0000000000000000000
2064
2065 where n, e and each x are either 0 or 1 independently, with
2066 E == ~ e. */
2067
2068 uint32_t pattern;
2069
2070 /* Prepare the pattern for 'Eeeeee'. */
2071 if (((imm >> 30) & 0x1) == 0)
2072 pattern = 0x3e000000;
2073 else
2074 pattern = 0x40000000;
2075
2076 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2077 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2078 }
2079
2080 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2081
2082 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2083 8-bit signed floating-point format with 3-bit exponent and normalized 4
2084 bits of precision (i.e. can be used in an FMOV instruction); return the
2085 equivalent single-precision encoding in *FPWORD.
2086
2087 Otherwise return FALSE. */
2088
2089 static bfd_boolean
2090 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2091 {
2092 /* If a double-precision floating-point value has the following bit
2093 pattern, it can be expressed in the AArch64 8-bit floating-point
2094 format:
2095
2096 6 66655555555 554444444...21111111111
2097 3 21098765432 109876543...098765432109876543210
2098 n Eeeeeeeeexx xxxx00000...000000000000000000000
2099
2100 where n, e and each x are either 0 or 1 independently, with
2101 E == ~ e. */
2102
2103 uint32_t pattern;
2104 uint32_t high32 = imm >> 32;
2105
2106 /* Lower 32 bits need to be 0s. */
2107 if ((imm & 0xffffffff) != 0)
2108 return FALSE;
2109
2110 /* Prepare the pattern for 'Eeeeeeeee'. */
2111 if (((high32 >> 30) & 0x1) == 0)
2112 pattern = 0x3fc00000;
2113 else
2114 pattern = 0x40000000;
2115
2116 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2117 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2118 {
2119 /* Convert to the single-precision encoding.
2120 i.e. convert
2121 n Eeeeeeeeexx xxxx00000...000000000000000000000
2122 to
2123 n Eeeeeexx xxxx0000000000000000000. */
2124 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2125 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2126 return TRUE;
2127 }
2128 else
2129 return FALSE;
2130 }
2131
2132 /* Parse a floating-point immediate. Return TRUE on success and return the
2133 value in *IMMED in the format of IEEE754 single-precision encoding.
2134 *CCP points to the start of the string; DP_P is TRUE when the immediate
2135 is expected to be in double-precision (N.B. this only matters when
2136 hexadecimal representation is involved).
2137
2138 N.B. 0.0 is accepted by this function. */
2139
2140 static bfd_boolean
2141 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2142 {
2143 char *str = *ccp;
2144 char *fpnum;
2145 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2146 int found_fpchar = 0;
2147 int64_t val = 0;
2148 unsigned fpword = 0;
2149 bfd_boolean hex_p = FALSE;
2150
2151 skip_past_char (&str, '#');
2152
2153 fpnum = str;
2154 skip_whitespace (fpnum);
2155
2156 if (strncmp (fpnum, "0x", 2) == 0)
2157 {
2158 /* Support the hexadecimal representation of the IEEE754 encoding.
2159 Double-precision is expected when DP_P is TRUE, otherwise the
2160 representation should be in single-precision. */
2161 if (! parse_constant_immediate (&str, &val))
2162 goto invalid_fp;
2163
2164 if (dp_p)
2165 {
2166 if (! aarch64_double_precision_fmovable (val, &fpword))
2167 goto invalid_fp;
2168 }
2169 else if ((uint64_t) val > 0xffffffff)
2170 goto invalid_fp;
2171 else
2172 fpword = val;
2173
2174 hex_p = TRUE;
2175 }
2176 else
2177 {
2178 /* We must not accidentally parse an integer as a floating-point number.
2179 Make sure that the value we parse is not an integer by checking for
2180 special characters '.' or 'e'. */
2181 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2182 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2183 {
2184 found_fpchar = 1;
2185 break;
2186 }
2187
2188 if (!found_fpchar)
2189 return FALSE;
2190 }
2191
2192 if (! hex_p)
2193 {
2194 int i;
2195
2196 if ((str = atof_ieee (str, 's', words)) == NULL)
2197 goto invalid_fp;
2198
2199 /* Our FP word must be 32 bits (single-precision FP). */
2200 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2201 {
2202 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2203 fpword |= words[i];
2204 }
2205 }
2206
2207 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2208 {
2209 *immed = fpword;
2210 *ccp = str;
2211 return TRUE;
2212 }
2213
2214 invalid_fp:
2215 set_fatal_syntax_error (_("invalid floating-point constant"));
2216 return FALSE;
2217 }
2218
2219 /* Less-generic immediate-value read function with the possibility of loading
2220 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2221 instructions.
2222
2223 To prevent the expression parser from pushing a register name into the
2224 symbol table as an undefined symbol, a check is firstly done to find
2225 out whether STR is a valid register name followed by a comma or the end
2226 of line. Return FALSE if STR is such a register. */
2227
2228 static bfd_boolean
2229 parse_big_immediate (char **str, int64_t *imm)
2230 {
2231 char *ptr = *str;
2232
2233 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2234 {
2235 set_syntax_error (_("immediate operand required"));
2236 return FALSE;
2237 }
2238
2239 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2240
2241 if (inst.reloc.exp.X_op == O_constant)
2242 *imm = inst.reloc.exp.X_add_number;
2243
2244 *str = ptr;
2245
2246 return TRUE;
2247 }
2248
2249 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2250 if NEED_LIBOPCODES is non-zero, the fixup will need
2251 assistance from the libopcodes. */
2252
2253 static inline void
2254 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2255 const aarch64_opnd_info *operand,
2256 int need_libopcodes_p)
2257 {
2258 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2259 reloc->opnd = operand->type;
2260 if (need_libopcodes_p)
2261 reloc->need_libopcodes_p = 1;
2262 };
2263
2264 /* Return TRUE if the instruction needs to be fixed up later internally by
2265 the GAS; otherwise return FALSE. */
2266
2267 static inline bfd_boolean
2268 aarch64_gas_internal_fixup_p (void)
2269 {
2270 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2271 }
2272
2273 /* Assign the immediate value to the relavant field in *OPERAND if
2274 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2275 needs an internal fixup in a later stage.
2276 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2277 IMM.VALUE that may get assigned with the constant. */
2278 static inline void
2279 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2280 aarch64_opnd_info *operand,
2281 int addr_off_p,
2282 int need_libopcodes_p,
2283 int skip_p)
2284 {
2285 if (reloc->exp.X_op == O_constant)
2286 {
2287 if (addr_off_p)
2288 operand->addr.offset.imm = reloc->exp.X_add_number;
2289 else
2290 operand->imm.value = reloc->exp.X_add_number;
2291 reloc->type = BFD_RELOC_UNUSED;
2292 }
2293 else
2294 {
2295 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2296 /* Tell libopcodes to ignore this operand or not. This is helpful
2297 when one of the operands needs to be fixed up later but we need
2298 libopcodes to check the other operands. */
2299 operand->skip = skip_p;
2300 }
2301 }
2302
2303 /* Relocation modifiers. Each entry in the table contains the textual
2304 name for the relocation which may be placed before a symbol used as
2305 a load/store offset, or add immediate. It must be surrounded by a
2306 leading and trailing colon, for example:
2307
2308 ldr x0, [x1, #:rello:varsym]
2309 add x0, x1, #:rello:varsym */
2310
2311 struct reloc_table_entry
2312 {
2313 const char *name;
2314 int pc_rel;
2315 bfd_reloc_code_real_type adr_type;
2316 bfd_reloc_code_real_type adrp_type;
2317 bfd_reloc_code_real_type movw_type;
2318 bfd_reloc_code_real_type add_type;
2319 bfd_reloc_code_real_type ldst_type;
2320 bfd_reloc_code_real_type ld_literal_type;
2321 };
2322
2323 static struct reloc_table_entry reloc_table[] = {
2324 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2325 {"lo12", 0,
2326 0, /* adr_type */
2327 0,
2328 0,
2329 BFD_RELOC_AARCH64_ADD_LO12,
2330 BFD_RELOC_AARCH64_LDST_LO12,
2331 0},
2332
2333 /* Higher 21 bits of pc-relative page offset: ADRP */
2334 {"pg_hi21", 1,
2335 0, /* adr_type */
2336 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2337 0,
2338 0,
2339 0,
2340 0},
2341
2342 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2343 {"pg_hi21_nc", 1,
2344 0, /* adr_type */
2345 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2346 0,
2347 0,
2348 0,
2349 0},
2350
2351 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2352 {"abs_g0", 0,
2353 0, /* adr_type */
2354 0,
2355 BFD_RELOC_AARCH64_MOVW_G0,
2356 0,
2357 0,
2358 0},
2359
2360 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2361 {"abs_g0_s", 0,
2362 0, /* adr_type */
2363 0,
2364 BFD_RELOC_AARCH64_MOVW_G0_S,
2365 0,
2366 0,
2367 0},
2368
2369 /* Less significant bits 0-15 of address/value: MOVK, no check */
2370 {"abs_g0_nc", 0,
2371 0, /* adr_type */
2372 0,
2373 BFD_RELOC_AARCH64_MOVW_G0_NC,
2374 0,
2375 0,
2376 0},
2377
2378 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2379 {"abs_g1", 0,
2380 0, /* adr_type */
2381 0,
2382 BFD_RELOC_AARCH64_MOVW_G1,
2383 0,
2384 0,
2385 0},
2386
2387 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2388 {"abs_g1_s", 0,
2389 0, /* adr_type */
2390 0,
2391 BFD_RELOC_AARCH64_MOVW_G1_S,
2392 0,
2393 0,
2394 0},
2395
2396 /* Less significant bits 16-31 of address/value: MOVK, no check */
2397 {"abs_g1_nc", 0,
2398 0, /* adr_type */
2399 0,
2400 BFD_RELOC_AARCH64_MOVW_G1_NC,
2401 0,
2402 0,
2403 0},
2404
2405 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2406 {"abs_g2", 0,
2407 0, /* adr_type */
2408 0,
2409 BFD_RELOC_AARCH64_MOVW_G2,
2410 0,
2411 0,
2412 0},
2413
2414 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2415 {"abs_g2_s", 0,
2416 0, /* adr_type */
2417 0,
2418 BFD_RELOC_AARCH64_MOVW_G2_S,
2419 0,
2420 0,
2421 0},
2422
2423 /* Less significant bits 32-47 of address/value: MOVK, no check */
2424 {"abs_g2_nc", 0,
2425 0, /* adr_type */
2426 0,
2427 BFD_RELOC_AARCH64_MOVW_G2_NC,
2428 0,
2429 0,
2430 0},
2431
2432 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2433 {"abs_g3", 0,
2434 0, /* adr_type */
2435 0,
2436 BFD_RELOC_AARCH64_MOVW_G3,
2437 0,
2438 0,
2439 0},
2440
2441 /* Get to the page containing GOT entry for a symbol. */
2442 {"got", 1,
2443 0, /* adr_type */
2444 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2445 0,
2446 0,
2447 0,
2448 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2449
2450 /* 12 bit offset into the page containing GOT entry for that symbol. */
2451 {"got_lo12", 0,
2452 0, /* adr_type */
2453 0,
2454 0,
2455 0,
2456 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2457 0},
2458
2459 /* Get to the page containing GOT TLS entry for a symbol */
2460 {"tlsgd", 0,
2461 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2462 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2463 0,
2464 0,
2465 0,
2466 0},
2467
2468 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2469 {"tlsgd_lo12", 0,
2470 0, /* adr_type */
2471 0,
2472 0,
2473 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2474 0,
2475 0},
2476
2477 /* Get to the page containing GOT TLS entry for a symbol */
2478 {"tlsdesc", 0,
2479 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2480 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2481 0,
2482 0,
2483 0,
2484 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2485
2486 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2487 {"tlsdesc_lo12", 0,
2488 0, /* adr_type */
2489 0,
2490 0,
2491 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2492 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2493 0},
2494
2495 /* Get to the page containing GOT TLS entry for a symbol */
2496 {"gottprel", 0,
2497 0, /* adr_type */
2498 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2499 0,
2500 0,
2501 0,
2502 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2503
2504 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2505 {"gottprel_lo12", 0,
2506 0, /* adr_type */
2507 0,
2508 0,
2509 0,
2510 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2511 0},
2512
2513 /* Get tp offset for a symbol. */
2514 {"tprel", 0,
2515 0, /* adr_type */
2516 0,
2517 0,
2518 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2519 0,
2520 0},
2521
2522 /* Get tp offset for a symbol. */
2523 {"tprel_lo12", 0,
2524 0, /* adr_type */
2525 0,
2526 0,
2527 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2528 0,
2529 0},
2530
2531 /* Get tp offset for a symbol. */
2532 {"tprel_hi12", 0,
2533 0, /* adr_type */
2534 0,
2535 0,
2536 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2537 0,
2538 0},
2539
2540 /* Get tp offset for a symbol. */
2541 {"tprel_lo12_nc", 0,
2542 0, /* adr_type */
2543 0,
2544 0,
2545 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2546 0,
2547 0},
2548
2549 /* Most significant bits 32-47 of address/value: MOVZ. */
2550 {"tprel_g2", 0,
2551 0, /* adr_type */
2552 0,
2553 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2554 0,
2555 0,
2556 0},
2557
2558 /* Most significant bits 16-31 of address/value: MOVZ. */
2559 {"tprel_g1", 0,
2560 0, /* adr_type */
2561 0,
2562 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2563 0,
2564 0,
2565 0},
2566
2567 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2568 {"tprel_g1_nc", 0,
2569 0, /* adr_type */
2570 0,
2571 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2572 0,
2573 0,
2574 0},
2575
2576 /* Most significant bits 0-15 of address/value: MOVZ. */
2577 {"tprel_g0", 0,
2578 0, /* adr_type */
2579 0,
2580 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2581 0,
2582 0,
2583 0},
2584
2585 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2586 {"tprel_g0_nc", 0,
2587 0, /* adr_type */
2588 0,
2589 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2590 0,
2591 0,
2592 0},
2593 };
2594
2595 /* Given the address of a pointer pointing to the textual name of a
2596 relocation as may appear in assembler source, attempt to find its
2597 details in reloc_table. The pointer will be updated to the character
2598 after the trailing colon. On failure, NULL will be returned;
2599 otherwise return the reloc_table_entry. */
2600
2601 static struct reloc_table_entry *
2602 find_reloc_table_entry (char **str)
2603 {
2604 unsigned int i;
2605 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2606 {
2607 int length = strlen (reloc_table[i].name);
2608
2609 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2610 && (*str)[length] == ':')
2611 {
2612 *str += (length + 1);
2613 return &reloc_table[i];
2614 }
2615 }
2616
2617 return NULL;
2618 }
2619
2620 /* Mode argument to parse_shift and parser_shifter_operand. */
2621 enum parse_shift_mode
2622 {
2623 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2624 "#imm{,lsl #n}" */
2625 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2626 "#imm" */
2627 SHIFTED_LSL, /* bare "lsl #n" */
2628 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2629 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2630 };
2631
2632 /* Parse a <shift> operator on an AArch64 data processing instruction.
2633 Return TRUE on success; otherwise return FALSE. */
2634 static bfd_boolean
2635 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2636 {
2637 const struct aarch64_name_value_pair *shift_op;
2638 enum aarch64_modifier_kind kind;
2639 expressionS exp;
2640 int exp_has_prefix;
2641 char *s = *str;
2642 char *p = s;
2643
2644 for (p = *str; ISALPHA (*p); p++)
2645 ;
2646
2647 if (p == *str)
2648 {
2649 set_syntax_error (_("shift expression expected"));
2650 return FALSE;
2651 }
2652
2653 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2654
2655 if (shift_op == NULL)
2656 {
2657 set_syntax_error (_("shift operator expected"));
2658 return FALSE;
2659 }
2660
2661 kind = aarch64_get_operand_modifier (shift_op);
2662
2663 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2664 {
2665 set_syntax_error (_("invalid use of 'MSL'"));
2666 return FALSE;
2667 }
2668
2669 switch (mode)
2670 {
2671 case SHIFTED_LOGIC_IMM:
2672 if (aarch64_extend_operator_p (kind) == TRUE)
2673 {
2674 set_syntax_error (_("extending shift is not permitted"));
2675 return FALSE;
2676 }
2677 break;
2678
2679 case SHIFTED_ARITH_IMM:
2680 if (kind == AARCH64_MOD_ROR)
2681 {
2682 set_syntax_error (_("'ROR' shift is not permitted"));
2683 return FALSE;
2684 }
2685 break;
2686
2687 case SHIFTED_LSL:
2688 if (kind != AARCH64_MOD_LSL)
2689 {
2690 set_syntax_error (_("only 'LSL' shift is permitted"));
2691 return FALSE;
2692 }
2693 break;
2694
2695 case SHIFTED_REG_OFFSET:
2696 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2697 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2698 {
2699 set_fatal_syntax_error
2700 (_("invalid shift for the register offset addressing mode"));
2701 return FALSE;
2702 }
2703 break;
2704
2705 case SHIFTED_LSL_MSL:
2706 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2707 {
2708 set_syntax_error (_("invalid shift operator"));
2709 return FALSE;
2710 }
2711 break;
2712
2713 default:
2714 abort ();
2715 }
2716
2717 /* Whitespace can appear here if the next thing is a bare digit. */
2718 skip_whitespace (p);
2719
2720 /* Parse shift amount. */
2721 exp_has_prefix = 0;
2722 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2723 exp.X_op = O_absent;
2724 else
2725 {
2726 if (is_immediate_prefix (*p))
2727 {
2728 p++;
2729 exp_has_prefix = 1;
2730 }
2731 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2732 }
2733 if (exp.X_op == O_absent)
2734 {
2735 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2736 {
2737 set_syntax_error (_("missing shift amount"));
2738 return FALSE;
2739 }
2740 operand->shifter.amount = 0;
2741 }
2742 else if (exp.X_op != O_constant)
2743 {
2744 set_syntax_error (_("constant shift amount required"));
2745 return FALSE;
2746 }
2747 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2748 {
2749 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2750 return FALSE;
2751 }
2752 else
2753 {
2754 operand->shifter.amount = exp.X_add_number;
2755 operand->shifter.amount_present = 1;
2756 }
2757
2758 operand->shifter.operator_present = 1;
2759 operand->shifter.kind = kind;
2760
2761 *str = p;
2762 return TRUE;
2763 }
2764
2765 /* Parse a <shifter_operand> for a data processing instruction:
2766
2767 #<immediate>
2768 #<immediate>, LSL #imm
2769
2770 Validation of immediate operands is deferred to md_apply_fix.
2771
2772 Return TRUE on success; otherwise return FALSE. */
2773
2774 static bfd_boolean
2775 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2776 enum parse_shift_mode mode)
2777 {
2778 char *p;
2779
2780 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2781 return FALSE;
2782
2783 p = *str;
2784
2785 /* Accept an immediate expression. */
2786 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2787 return FALSE;
2788
2789 /* Accept optional LSL for arithmetic immediate values. */
2790 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2791 if (! parse_shift (&p, operand, SHIFTED_LSL))
2792 return FALSE;
2793
2794 /* Not accept any shifter for logical immediate values. */
2795 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2796 && parse_shift (&p, operand, mode))
2797 {
2798 set_syntax_error (_("unexpected shift operator"));
2799 return FALSE;
2800 }
2801
2802 *str = p;
2803 return TRUE;
2804 }
2805
2806 /* Parse a <shifter_operand> for a data processing instruction:
2807
2808 <Rm>
2809 <Rm>, <shift>
2810 #<immediate>
2811 #<immediate>, LSL #imm
2812
2813 where <shift> is handled by parse_shift above, and the last two
2814 cases are handled by the function above.
2815
2816 Validation of immediate operands is deferred to md_apply_fix.
2817
2818 Return TRUE on success; otherwise return FALSE. */
2819
2820 static bfd_boolean
2821 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2822 enum parse_shift_mode mode)
2823 {
2824 int reg;
2825 int isreg32, isregzero;
2826 enum aarch64_operand_class opd_class
2827 = aarch64_get_operand_class (operand->type);
2828
2829 if ((reg =
2830 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2831 {
2832 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2833 {
2834 set_syntax_error (_("unexpected register in the immediate operand"));
2835 return FALSE;
2836 }
2837
2838 if (!isregzero && reg == REG_SP)
2839 {
2840 set_syntax_error (BAD_SP);
2841 return FALSE;
2842 }
2843
2844 operand->reg.regno = reg;
2845 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2846
2847 /* Accept optional shift operation on register. */
2848 if (! skip_past_comma (str))
2849 return TRUE;
2850
2851 if (! parse_shift (str, operand, mode))
2852 return FALSE;
2853
2854 return TRUE;
2855 }
2856 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2857 {
2858 set_syntax_error
2859 (_("integer register expected in the extended/shifted operand "
2860 "register"));
2861 return FALSE;
2862 }
2863
2864 /* We have a shifted immediate variable. */
2865 return parse_shifter_operand_imm (str, operand, mode);
2866 }
2867
2868 /* Return TRUE on success; return FALSE otherwise. */
2869
2870 static bfd_boolean
2871 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2872 enum parse_shift_mode mode)
2873 {
2874 char *p = *str;
2875
2876 /* Determine if we have the sequence of characters #: or just :
2877 coming next. If we do, then we check for a :rello: relocation
2878 modifier. If we don't, punt the whole lot to
2879 parse_shifter_operand. */
2880
2881 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2882 {
2883 struct reloc_table_entry *entry;
2884
2885 if (p[0] == '#')
2886 p += 2;
2887 else
2888 p++;
2889 *str = p;
2890
2891 /* Try to parse a relocation. Anything else is an error. */
2892 if (!(entry = find_reloc_table_entry (str)))
2893 {
2894 set_syntax_error (_("unknown relocation modifier"));
2895 return FALSE;
2896 }
2897
2898 if (entry->add_type == 0)
2899 {
2900 set_syntax_error
2901 (_("this relocation modifier is not allowed on this instruction"));
2902 return FALSE;
2903 }
2904
2905 /* Save str before we decompose it. */
2906 p = *str;
2907
2908 /* Next, we parse the expression. */
2909 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2910 return FALSE;
2911
2912 /* Record the relocation type (use the ADD variant here). */
2913 inst.reloc.type = entry->add_type;
2914 inst.reloc.pc_rel = entry->pc_rel;
2915
2916 /* If str is empty, we've reached the end, stop here. */
2917 if (**str == '\0')
2918 return TRUE;
2919
2920 /* Otherwise, we have a shifted reloc modifier, so rewind to
2921 recover the variable name and continue parsing for the shifter. */
2922 *str = p;
2923 return parse_shifter_operand_imm (str, operand, mode);
2924 }
2925
2926 return parse_shifter_operand (str, operand, mode);
2927 }
2928
2929 /* Parse all forms of an address expression. Information is written
2930 to *OPERAND and/or inst.reloc.
2931
2932 The A64 instruction set has the following addressing modes:
2933
2934 Offset
2935 [base] // in SIMD ld/st structure
2936 [base{,#0}] // in ld/st exclusive
2937 [base{,#imm}]
2938 [base,Xm{,LSL #imm}]
2939 [base,Xm,SXTX {#imm}]
2940 [base,Wm,(S|U)XTW {#imm}]
2941 Pre-indexed
2942 [base,#imm]!
2943 Post-indexed
2944 [base],#imm
2945 [base],Xm // in SIMD ld/st structure
2946 PC-relative (literal)
2947 label
2948 =immediate
2949
2950 (As a convenience, the notation "=immediate" is permitted in conjunction
2951 with the pc-relative literal load instructions to automatically place an
2952 immediate value or symbolic address in a nearby literal pool and generate
2953 a hidden label which references it.)
2954
2955 Upon a successful parsing, the address structure in *OPERAND will be
2956 filled in the following way:
2957
2958 .base_regno = <base>
2959 .offset.is_reg // 1 if the offset is a register
2960 .offset.imm = <imm>
2961 .offset.regno = <Rm>
2962
2963 For different addressing modes defined in the A64 ISA:
2964
2965 Offset
2966 .pcrel=0; .preind=1; .postind=0; .writeback=0
2967 Pre-indexed
2968 .pcrel=0; .preind=1; .postind=0; .writeback=1
2969 Post-indexed
2970 .pcrel=0; .preind=0; .postind=1; .writeback=1
2971 PC-relative (literal)
2972 .pcrel=1; .preind=1; .postind=0; .writeback=0
2973
2974 The shift/extension information, if any, will be stored in .shifter.
2975
2976 It is the caller's responsibility to check for addressing modes not
2977 supported by the instruction, and to set inst.reloc.type. */
2978
2979 static bfd_boolean
2980 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
2981 int accept_reg_post_index)
2982 {
2983 char *p = *str;
2984 int reg;
2985 int isreg32, isregzero;
2986 expressionS *exp = &inst.reloc.exp;
2987
2988 if (! skip_past_char (&p, '['))
2989 {
2990 /* =immediate or label. */
2991 operand->addr.pcrel = 1;
2992 operand->addr.preind = 1;
2993
2994 /* #:<reloc_op>:<symbol> */
2995 skip_past_char (&p, '#');
2996 if (reloc && skip_past_char (&p, ':'))
2997 {
2998 bfd_reloc_code_real_type ty;
2999 struct reloc_table_entry *entry;
3000
3001 /* Try to parse a relocation modifier. Anything else is
3002 an error. */
3003 entry = find_reloc_table_entry (&p);
3004 if (! entry)
3005 {
3006 set_syntax_error (_("unknown relocation modifier"));
3007 return FALSE;
3008 }
3009
3010 switch (operand->type)
3011 {
3012 case AARCH64_OPND_ADDR_PCREL21:
3013 /* adr */
3014 ty = entry->adr_type;
3015 break;
3016
3017 default:
3018 ty = entry->ld_literal_type;
3019 break;
3020 }
3021
3022 if (ty == 0)
3023 {
3024 set_syntax_error
3025 (_("this relocation modifier is not allowed on this "
3026 "instruction"));
3027 return FALSE;
3028 }
3029
3030 /* #:<reloc_op>: */
3031 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3032 {
3033 set_syntax_error (_("invalid relocation expression"));
3034 return FALSE;
3035 }
3036
3037 /* #:<reloc_op>:<expr> */
3038 /* Record the relocation type. */
3039 inst.reloc.type = ty;
3040 inst.reloc.pc_rel = entry->pc_rel;
3041 }
3042 else
3043 {
3044
3045 if (skip_past_char (&p, '='))
3046 /* =immediate; need to generate the literal in the literal pool. */
3047 inst.gen_lit_pool = 1;
3048
3049 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3050 {
3051 set_syntax_error (_("invalid address"));
3052 return FALSE;
3053 }
3054 }
3055
3056 *str = p;
3057 return TRUE;
3058 }
3059
3060 /* [ */
3061
3062 /* Accept SP and reject ZR */
3063 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
3064 if (reg == PARSE_FAIL || isreg32)
3065 {
3066 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3067 return FALSE;
3068 }
3069 operand->addr.base_regno = reg;
3070
3071 /* [Xn */
3072 if (skip_past_comma (&p))
3073 {
3074 /* [Xn, */
3075 operand->addr.preind = 1;
3076
3077 /* Reject SP and accept ZR */
3078 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3079 if (reg != PARSE_FAIL)
3080 {
3081 /* [Xn,Rm */
3082 operand->addr.offset.regno = reg;
3083 operand->addr.offset.is_reg = 1;
3084 /* Shifted index. */
3085 if (skip_past_comma (&p))
3086 {
3087 /* [Xn,Rm, */
3088 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3089 /* Use the diagnostics set in parse_shift, so not set new
3090 error message here. */
3091 return FALSE;
3092 }
3093 /* We only accept:
3094 [base,Xm{,LSL #imm}]
3095 [base,Xm,SXTX {#imm}]
3096 [base,Wm,(S|U)XTW {#imm}] */
3097 if (operand->shifter.kind == AARCH64_MOD_NONE
3098 || operand->shifter.kind == AARCH64_MOD_LSL
3099 || operand->shifter.kind == AARCH64_MOD_SXTX)
3100 {
3101 if (isreg32)
3102 {
3103 set_syntax_error (_("invalid use of 32-bit register offset"));
3104 return FALSE;
3105 }
3106 }
3107 else if (!isreg32)
3108 {
3109 set_syntax_error (_("invalid use of 64-bit register offset"));
3110 return FALSE;
3111 }
3112 }
3113 else
3114 {
3115 /* [Xn,#:<reloc_op>:<symbol> */
3116 skip_past_char (&p, '#');
3117 if (reloc && skip_past_char (&p, ':'))
3118 {
3119 struct reloc_table_entry *entry;
3120
3121 /* Try to parse a relocation modifier. Anything else is
3122 an error. */
3123 if (!(entry = find_reloc_table_entry (&p)))
3124 {
3125 set_syntax_error (_("unknown relocation modifier"));
3126 return FALSE;
3127 }
3128
3129 if (entry->ldst_type == 0)
3130 {
3131 set_syntax_error
3132 (_("this relocation modifier is not allowed on this "
3133 "instruction"));
3134 return FALSE;
3135 }
3136
3137 /* [Xn,#:<reloc_op>: */
3138 /* We now have the group relocation table entry corresponding to
3139 the name in the assembler source. Next, we parse the
3140 expression. */
3141 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3142 {
3143 set_syntax_error (_("invalid relocation expression"));
3144 return FALSE;
3145 }
3146
3147 /* [Xn,#:<reloc_op>:<expr> */
3148 /* Record the load/store relocation type. */
3149 inst.reloc.type = entry->ldst_type;
3150 inst.reloc.pc_rel = entry->pc_rel;
3151 }
3152 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3153 {
3154 set_syntax_error (_("invalid expression in the address"));
3155 return FALSE;
3156 }
3157 /* [Xn,<expr> */
3158 }
3159 }
3160
3161 if (! skip_past_char (&p, ']'))
3162 {
3163 set_syntax_error (_("']' expected"));
3164 return FALSE;
3165 }
3166
3167 if (skip_past_char (&p, '!'))
3168 {
3169 if (operand->addr.preind && operand->addr.offset.is_reg)
3170 {
3171 set_syntax_error (_("register offset not allowed in pre-indexed "
3172 "addressing mode"));
3173 return FALSE;
3174 }
3175 /* [Xn]! */
3176 operand->addr.writeback = 1;
3177 }
3178 else if (skip_past_comma (&p))
3179 {
3180 /* [Xn], */
3181 operand->addr.postind = 1;
3182 operand->addr.writeback = 1;
3183
3184 if (operand->addr.preind)
3185 {
3186 set_syntax_error (_("cannot combine pre- and post-indexing"));
3187 return FALSE;
3188 }
3189
3190 if (accept_reg_post_index
3191 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3192 &isregzero)) != PARSE_FAIL)
3193 {
3194 /* [Xn],Xm */
3195 if (isreg32)
3196 {
3197 set_syntax_error (_("invalid 32-bit register offset"));
3198 return FALSE;
3199 }
3200 operand->addr.offset.regno = reg;
3201 operand->addr.offset.is_reg = 1;
3202 }
3203 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3204 {
3205 /* [Xn],#expr */
3206 set_syntax_error (_("invalid expression in the address"));
3207 return FALSE;
3208 }
3209 }
3210
3211 /* If at this point neither .preind nor .postind is set, we have a
3212 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3213 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3214 {
3215 if (operand->addr.writeback)
3216 {
3217 /* Reject [Rn]! */
3218 set_syntax_error (_("missing offset in the pre-indexed address"));
3219 return FALSE;
3220 }
3221 operand->addr.preind = 1;
3222 inst.reloc.exp.X_op = O_constant;
3223 inst.reloc.exp.X_add_number = 0;
3224 }
3225
3226 *str = p;
3227 return TRUE;
3228 }
3229
3230 /* Return TRUE on success; otherwise return FALSE. */
3231 static bfd_boolean
3232 parse_address (char **str, aarch64_opnd_info *operand,
3233 int accept_reg_post_index)
3234 {
3235 return parse_address_main (str, operand, 0, accept_reg_post_index);
3236 }
3237
3238 /* Return TRUE on success; otherwise return FALSE. */
3239 static bfd_boolean
3240 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3241 {
3242 return parse_address_main (str, operand, 1, 0);
3243 }
3244
3245 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3246 Return TRUE on success; otherwise return FALSE. */
3247 static bfd_boolean
3248 parse_half (char **str, int *internal_fixup_p)
3249 {
3250 char *p, *saved;
3251 int dummy;
3252
3253 p = *str;
3254 skip_past_char (&p, '#');
3255
3256 gas_assert (internal_fixup_p);
3257 *internal_fixup_p = 0;
3258
3259 if (*p == ':')
3260 {
3261 struct reloc_table_entry *entry;
3262
3263 /* Try to parse a relocation. Anything else is an error. */
3264 ++p;
3265 if (!(entry = find_reloc_table_entry (&p)))
3266 {
3267 set_syntax_error (_("unknown relocation modifier"));
3268 return FALSE;
3269 }
3270
3271 if (entry->movw_type == 0)
3272 {
3273 set_syntax_error
3274 (_("this relocation modifier is not allowed on this instruction"));
3275 return FALSE;
3276 }
3277
3278 inst.reloc.type = entry->movw_type;
3279 }
3280 else
3281 *internal_fixup_p = 1;
3282
3283 /* Avoid parsing a register as a general symbol. */
3284 saved = p;
3285 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3286 return FALSE;
3287 p = saved;
3288
3289 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3290 return FALSE;
3291
3292 *str = p;
3293 return TRUE;
3294 }
3295
3296 /* Parse an operand for an ADRP instruction:
3297 ADRP <Xd>, <label>
3298 Return TRUE on success; otherwise return FALSE. */
3299
3300 static bfd_boolean
3301 parse_adrp (char **str)
3302 {
3303 char *p;
3304
3305 p = *str;
3306 if (*p == ':')
3307 {
3308 struct reloc_table_entry *entry;
3309
3310 /* Try to parse a relocation. Anything else is an error. */
3311 ++p;
3312 if (!(entry = find_reloc_table_entry (&p)))
3313 {
3314 set_syntax_error (_("unknown relocation modifier"));
3315 return FALSE;
3316 }
3317
3318 if (entry->adrp_type == 0)
3319 {
3320 set_syntax_error
3321 (_("this relocation modifier is not allowed on this instruction"));
3322 return FALSE;
3323 }
3324
3325 inst.reloc.type = entry->adrp_type;
3326 }
3327 else
3328 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3329
3330 inst.reloc.pc_rel = 1;
3331
3332 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3333 return FALSE;
3334
3335 *str = p;
3336 return TRUE;
3337 }
3338
3339 /* Miscellaneous. */
3340
3341 /* Parse an option for a preload instruction. Returns the encoding for the
3342 option, or PARSE_FAIL. */
3343
3344 static int
3345 parse_pldop (char **str)
3346 {
3347 char *p, *q;
3348 const struct aarch64_name_value_pair *o;
3349
3350 p = q = *str;
3351 while (ISALNUM (*q))
3352 q++;
3353
3354 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3355 if (!o)
3356 return PARSE_FAIL;
3357
3358 *str = q;
3359 return o->value;
3360 }
3361
3362 /* Parse an option for a barrier instruction. Returns the encoding for the
3363 option, or PARSE_FAIL. */
3364
3365 static int
3366 parse_barrier (char **str)
3367 {
3368 char *p, *q;
3369 const asm_barrier_opt *o;
3370
3371 p = q = *str;
3372 while (ISALPHA (*q))
3373 q++;
3374
3375 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3376 if (!o)
3377 return PARSE_FAIL;
3378
3379 *str = q;
3380 return o->value;
3381 }
3382
3383 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3384 Returns the encoding for the option, or PARSE_FAIL.
3385
3386 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3387 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3388
3389 static int
3390 parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
3391 {
3392 char *p, *q;
3393 char buf[32];
3394 const aarch64_sys_reg *o;
3395 int value;
3396
3397 p = buf;
3398 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3399 if (p < buf + 31)
3400 *p++ = TOLOWER (*q);
3401 *p = '\0';
3402 /* Assert that BUF be large enough. */
3403 gas_assert (p - buf == q - *str);
3404
3405 o = hash_find (sys_regs, buf);
3406 if (!o)
3407 {
3408 if (!imple_defined_p)
3409 return PARSE_FAIL;
3410 else
3411 {
3412 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3413 unsigned int op0, op1, cn, cm, op2;
3414
3415 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3416 != 5)
3417 return PARSE_FAIL;
3418 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3419 return PARSE_FAIL;
3420 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3421 }
3422 }
3423 else
3424 {
3425 if (aarch64_sys_reg_deprecated_p (o))
3426 as_warn (_("system register name '%s' is deprecated and may be "
3427 "removed in a future release"), buf);
3428 value = o->value;
3429 }
3430
3431 *str = q;
3432 return value;
3433 }
3434
3435 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3436 for the option, or NULL. */
3437
3438 static const aarch64_sys_ins_reg *
3439 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3440 {
3441 char *p, *q;
3442 char buf[32];
3443 const aarch64_sys_ins_reg *o;
3444
3445 p = buf;
3446 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3447 if (p < buf + 31)
3448 *p++ = TOLOWER (*q);
3449 *p = '\0';
3450
3451 o = hash_find (sys_ins_regs, buf);
3452 if (!o)
3453 return NULL;
3454
3455 *str = q;
3456 return o;
3457 }
3458 \f
3459 #define po_char_or_fail(chr) do { \
3460 if (! skip_past_char (&str, chr)) \
3461 goto failure; \
3462 } while (0)
3463
3464 #define po_reg_or_fail(regtype) do { \
3465 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3466 if (val == PARSE_FAIL) \
3467 { \
3468 set_default_error (); \
3469 goto failure; \
3470 } \
3471 } while (0)
3472
3473 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3474 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3475 &isreg32, &isregzero); \
3476 if (val == PARSE_FAIL) \
3477 { \
3478 set_default_error (); \
3479 goto failure; \
3480 } \
3481 info->reg.regno = val; \
3482 if (isreg32) \
3483 info->qualifier = AARCH64_OPND_QLF_W; \
3484 else \
3485 info->qualifier = AARCH64_OPND_QLF_X; \
3486 } while (0)
3487
3488 #define po_imm_nc_or_fail() do { \
3489 if (! parse_constant_immediate (&str, &val)) \
3490 goto failure; \
3491 } while (0)
3492
3493 #define po_imm_or_fail(min, max) do { \
3494 if (! parse_constant_immediate (&str, &val)) \
3495 goto failure; \
3496 if (val < min || val > max) \
3497 { \
3498 set_fatal_syntax_error (_("immediate value out of range "\
3499 #min " to "#max)); \
3500 goto failure; \
3501 } \
3502 } while (0)
3503
3504 #define po_misc_or_fail(expr) do { \
3505 if (!expr) \
3506 goto failure; \
3507 } while (0)
3508 \f
3509 /* encode the 12-bit imm field of Add/sub immediate */
3510 static inline uint32_t
3511 encode_addsub_imm (uint32_t imm)
3512 {
3513 return imm << 10;
3514 }
3515
3516 /* encode the shift amount field of Add/sub immediate */
3517 static inline uint32_t
3518 encode_addsub_imm_shift_amount (uint32_t cnt)
3519 {
3520 return cnt << 22;
3521 }
3522
3523
3524 /* encode the imm field of Adr instruction */
3525 static inline uint32_t
3526 encode_adr_imm (uint32_t imm)
3527 {
3528 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3529 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3530 }
3531
3532 /* encode the immediate field of Move wide immediate */
3533 static inline uint32_t
3534 encode_movw_imm (uint32_t imm)
3535 {
3536 return imm << 5;
3537 }
3538
3539 /* encode the 26-bit offset of unconditional branch */
3540 static inline uint32_t
3541 encode_branch_ofs_26 (uint32_t ofs)
3542 {
3543 return ofs & ((1 << 26) - 1);
3544 }
3545
3546 /* encode the 19-bit offset of conditional branch and compare & branch */
3547 static inline uint32_t
3548 encode_cond_branch_ofs_19 (uint32_t ofs)
3549 {
3550 return (ofs & ((1 << 19) - 1)) << 5;
3551 }
3552
3553 /* encode the 19-bit offset of ld literal */
3554 static inline uint32_t
3555 encode_ld_lit_ofs_19 (uint32_t ofs)
3556 {
3557 return (ofs & ((1 << 19) - 1)) << 5;
3558 }
3559
3560 /* Encode the 14-bit offset of test & branch. */
3561 static inline uint32_t
3562 encode_tst_branch_ofs_14 (uint32_t ofs)
3563 {
3564 return (ofs & ((1 << 14) - 1)) << 5;
3565 }
3566
3567 /* Encode the 16-bit imm field of svc/hvc/smc. */
3568 static inline uint32_t
3569 encode_svc_imm (uint32_t imm)
3570 {
3571 return imm << 5;
3572 }
3573
3574 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3575 static inline uint32_t
3576 reencode_addsub_switch_add_sub (uint32_t opcode)
3577 {
3578 return opcode ^ (1 << 30);
3579 }
3580
3581 static inline uint32_t
3582 reencode_movzn_to_movz (uint32_t opcode)
3583 {
3584 return opcode | (1 << 30);
3585 }
3586
3587 static inline uint32_t
3588 reencode_movzn_to_movn (uint32_t opcode)
3589 {
3590 return opcode & ~(1 << 30);
3591 }
3592
3593 /* Overall per-instruction processing. */
3594
3595 /* We need to be able to fix up arbitrary expressions in some statements.
3596 This is so that we can handle symbols that are an arbitrary distance from
3597 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3598 which returns part of an address in a form which will be valid for
3599 a data instruction. We do this by pushing the expression into a symbol
3600 in the expr_section, and creating a fix for that. */
3601
3602 static fixS *
3603 fix_new_aarch64 (fragS * frag,
3604 int where,
3605 short int size, expressionS * exp, int pc_rel, int reloc)
3606 {
3607 fixS *new_fix;
3608
3609 switch (exp->X_op)
3610 {
3611 case O_constant:
3612 case O_symbol:
3613 case O_add:
3614 case O_subtract:
3615 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3616 break;
3617
3618 default:
3619 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3620 pc_rel, reloc);
3621 break;
3622 }
3623 return new_fix;
3624 }
3625 \f
3626 /* Diagnostics on operands errors. */
3627
3628 /* By default, output verbose error message.
3629 Disable the verbose error message by -mno-verbose-error. */
3630 static int verbose_error_p = 1;
3631
3632 #ifdef DEBUG_AARCH64
3633 /* N.B. this is only for the purpose of debugging. */
3634 const char* operand_mismatch_kind_names[] =
3635 {
3636 "AARCH64_OPDE_NIL",
3637 "AARCH64_OPDE_RECOVERABLE",
3638 "AARCH64_OPDE_SYNTAX_ERROR",
3639 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3640 "AARCH64_OPDE_INVALID_VARIANT",
3641 "AARCH64_OPDE_OUT_OF_RANGE",
3642 "AARCH64_OPDE_UNALIGNED",
3643 "AARCH64_OPDE_REG_LIST",
3644 "AARCH64_OPDE_OTHER_ERROR",
3645 };
3646 #endif /* DEBUG_AARCH64 */
3647
3648 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3649
3650 When multiple errors of different kinds are found in the same assembly
3651 line, only the error of the highest severity will be picked up for
3652 issuing the diagnostics. */
3653
3654 static inline bfd_boolean
3655 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3656 enum aarch64_operand_error_kind rhs)
3657 {
3658 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3659 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3660 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3661 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3662 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3663 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3664 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3665 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3666 return lhs > rhs;
3667 }
3668
3669 /* Helper routine to get the mnemonic name from the assembly instruction
3670 line; should only be called for the diagnosis purpose, as there is
3671 string copy operation involved, which may affect the runtime
3672 performance if used in elsewhere. */
3673
3674 static const char*
3675 get_mnemonic_name (const char *str)
3676 {
3677 static char mnemonic[32];
3678 char *ptr;
3679
3680 /* Get the first 15 bytes and assume that the full name is included. */
3681 strncpy (mnemonic, str, 31);
3682 mnemonic[31] = '\0';
3683
3684 /* Scan up to the end of the mnemonic, which must end in white space,
3685 '.', or end of string. */
3686 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3687 ;
3688
3689 *ptr = '\0';
3690
3691 /* Append '...' to the truncated long name. */
3692 if (ptr - mnemonic == 31)
3693 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3694
3695 return mnemonic;
3696 }
3697
3698 static void
3699 reset_aarch64_instruction (aarch64_instruction *instruction)
3700 {
3701 memset (instruction, '\0', sizeof (aarch64_instruction));
3702 instruction->reloc.type = BFD_RELOC_UNUSED;
3703 }
3704
3705 /* Data strutures storing one user error in the assembly code related to
3706 operands. */
3707
3708 struct operand_error_record
3709 {
3710 const aarch64_opcode *opcode;
3711 aarch64_operand_error detail;
3712 struct operand_error_record *next;
3713 };
3714
3715 typedef struct operand_error_record operand_error_record;
3716
3717 struct operand_errors
3718 {
3719 operand_error_record *head;
3720 operand_error_record *tail;
3721 };
3722
3723 typedef struct operand_errors operand_errors;
3724
3725 /* Top-level data structure reporting user errors for the current line of
3726 the assembly code.
3727 The way md_assemble works is that all opcodes sharing the same mnemonic
3728 name are iterated to find a match to the assembly line. In this data
3729 structure, each of the such opcodes will have one operand_error_record
3730 allocated and inserted. In other words, excessive errors related with
3731 a single opcode are disregarded. */
3732 operand_errors operand_error_report;
3733
3734 /* Free record nodes. */
3735 static operand_error_record *free_opnd_error_record_nodes = NULL;
3736
3737 /* Initialize the data structure that stores the operand mismatch
3738 information on assembling one line of the assembly code. */
3739 static void
3740 init_operand_error_report (void)
3741 {
3742 if (operand_error_report.head != NULL)
3743 {
3744 gas_assert (operand_error_report.tail != NULL);
3745 operand_error_report.tail->next = free_opnd_error_record_nodes;
3746 free_opnd_error_record_nodes = operand_error_report.head;
3747 operand_error_report.head = NULL;
3748 operand_error_report.tail = NULL;
3749 return;
3750 }
3751 gas_assert (operand_error_report.tail == NULL);
3752 }
3753
3754 /* Return TRUE if some operand error has been recorded during the
3755 parsing of the current assembly line using the opcode *OPCODE;
3756 otherwise return FALSE. */
3757 static inline bfd_boolean
3758 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3759 {
3760 operand_error_record *record = operand_error_report.head;
3761 return record && record->opcode == opcode;
3762 }
3763
3764 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3765 OPCODE field is initialized with OPCODE.
3766 N.B. only one record for each opcode, i.e. the maximum of one error is
3767 recorded for each instruction template. */
3768
3769 static void
3770 add_operand_error_record (const operand_error_record* new_record)
3771 {
3772 const aarch64_opcode *opcode = new_record->opcode;
3773 operand_error_record* record = operand_error_report.head;
3774
3775 /* The record may have been created for this opcode. If not, we need
3776 to prepare one. */
3777 if (! opcode_has_operand_error_p (opcode))
3778 {
3779 /* Get one empty record. */
3780 if (free_opnd_error_record_nodes == NULL)
3781 {
3782 record = xmalloc (sizeof (operand_error_record));
3783 if (record == NULL)
3784 abort ();
3785 }
3786 else
3787 {
3788 record = free_opnd_error_record_nodes;
3789 free_opnd_error_record_nodes = record->next;
3790 }
3791 record->opcode = opcode;
3792 /* Insert at the head. */
3793 record->next = operand_error_report.head;
3794 operand_error_report.head = record;
3795 if (operand_error_report.tail == NULL)
3796 operand_error_report.tail = record;
3797 }
3798 else if (record->detail.kind != AARCH64_OPDE_NIL
3799 && record->detail.index <= new_record->detail.index
3800 && operand_error_higher_severity_p (record->detail.kind,
3801 new_record->detail.kind))
3802 {
3803 /* In the case of multiple errors found on operands related with a
3804 single opcode, only record the error of the leftmost operand and
3805 only if the error is of higher severity. */
3806 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3807 " the existing error %s on operand %d",
3808 operand_mismatch_kind_names[new_record->detail.kind],
3809 new_record->detail.index,
3810 operand_mismatch_kind_names[record->detail.kind],
3811 record->detail.index);
3812 return;
3813 }
3814
3815 record->detail = new_record->detail;
3816 }
3817
3818 static inline void
3819 record_operand_error_info (const aarch64_opcode *opcode,
3820 aarch64_operand_error *error_info)
3821 {
3822 operand_error_record record;
3823 record.opcode = opcode;
3824 record.detail = *error_info;
3825 add_operand_error_record (&record);
3826 }
3827
3828 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3829 error message *ERROR, for operand IDX (count from 0). */
3830
3831 static void
3832 record_operand_error (const aarch64_opcode *opcode, int idx,
3833 enum aarch64_operand_error_kind kind,
3834 const char* error)
3835 {
3836 aarch64_operand_error info;
3837 memset(&info, 0, sizeof (info));
3838 info.index = idx;
3839 info.kind = kind;
3840 info.error = error;
3841 record_operand_error_info (opcode, &info);
3842 }
3843
3844 static void
3845 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3846 enum aarch64_operand_error_kind kind,
3847 const char* error, const int *extra_data)
3848 {
3849 aarch64_operand_error info;
3850 info.index = idx;
3851 info.kind = kind;
3852 info.error = error;
3853 info.data[0] = extra_data[0];
3854 info.data[1] = extra_data[1];
3855 info.data[2] = extra_data[2];
3856 record_operand_error_info (opcode, &info);
3857 }
3858
3859 static void
3860 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3861 const char* error, int lower_bound,
3862 int upper_bound)
3863 {
3864 int data[3] = {lower_bound, upper_bound, 0};
3865 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3866 error, data);
3867 }
3868
3869 /* Remove the operand error record for *OPCODE. */
3870 static void ATTRIBUTE_UNUSED
3871 remove_operand_error_record (const aarch64_opcode *opcode)
3872 {
3873 if (opcode_has_operand_error_p (opcode))
3874 {
3875 operand_error_record* record = operand_error_report.head;
3876 gas_assert (record != NULL && operand_error_report.tail != NULL);
3877 operand_error_report.head = record->next;
3878 record->next = free_opnd_error_record_nodes;
3879 free_opnd_error_record_nodes = record;
3880 if (operand_error_report.head == NULL)
3881 {
3882 gas_assert (operand_error_report.tail == record);
3883 operand_error_report.tail = NULL;
3884 }
3885 }
3886 }
3887
3888 /* Given the instruction in *INSTR, return the index of the best matched
3889 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3890
3891 Return -1 if there is no qualifier sequence; return the first match
3892 if there is multiple matches found. */
3893
3894 static int
3895 find_best_match (const aarch64_inst *instr,
3896 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3897 {
3898 int i, num_opnds, max_num_matched, idx;
3899
3900 num_opnds = aarch64_num_of_operands (instr->opcode);
3901 if (num_opnds == 0)
3902 {
3903 DEBUG_TRACE ("no operand");
3904 return -1;
3905 }
3906
3907 max_num_matched = 0;
3908 idx = -1;
3909
3910 /* For each pattern. */
3911 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3912 {
3913 int j, num_matched;
3914 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3915
3916 /* Most opcodes has much fewer patterns in the list. */
3917 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3918 {
3919 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3920 if (i != 0 && idx == -1)
3921 /* If nothing has been matched, return the 1st sequence. */
3922 idx = 0;
3923 break;
3924 }
3925
3926 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3927 if (*qualifiers == instr->operands[j].qualifier)
3928 ++num_matched;
3929
3930 if (num_matched > max_num_matched)
3931 {
3932 max_num_matched = num_matched;
3933 idx = i;
3934 }
3935 }
3936
3937 DEBUG_TRACE ("return with %d", idx);
3938 return idx;
3939 }
3940
3941 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
3942 corresponding operands in *INSTR. */
3943
3944 static inline void
3945 assign_qualifier_sequence (aarch64_inst *instr,
3946 const aarch64_opnd_qualifier_t *qualifiers)
3947 {
3948 int i = 0;
3949 int num_opnds = aarch64_num_of_operands (instr->opcode);
3950 gas_assert (num_opnds);
3951 for (i = 0; i < num_opnds; ++i, ++qualifiers)
3952 instr->operands[i].qualifier = *qualifiers;
3953 }
3954
3955 /* Print operands for the diagnosis purpose. */
3956
3957 static void
3958 print_operands (char *buf, const aarch64_opcode *opcode,
3959 const aarch64_opnd_info *opnds)
3960 {
3961 int i;
3962
3963 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3964 {
3965 const size_t size = 128;
3966 char str[size];
3967
3968 /* We regard the opcode operand info more, however we also look into
3969 the inst->operands to support the disassembling of the optional
3970 operand.
3971 The two operand code should be the same in all cases, apart from
3972 when the operand can be optional. */
3973 if (opcode->operands[i] == AARCH64_OPND_NIL
3974 || opnds[i].type == AARCH64_OPND_NIL)
3975 break;
3976
3977 /* Generate the operand string in STR. */
3978 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
3979
3980 /* Delimiter. */
3981 if (str[0] != '\0')
3982 strcat (buf, i == 0 ? " " : ",");
3983
3984 /* Append the operand string. */
3985 strcat (buf, str);
3986 }
3987 }
3988
3989 /* Send to stderr a string as information. */
3990
3991 static void
3992 output_info (const char *format, ...)
3993 {
3994 char *file;
3995 unsigned int line;
3996 va_list args;
3997
3998 as_where (&file, &line);
3999 if (file)
4000 {
4001 if (line != 0)
4002 fprintf (stderr, "%s:%u: ", file, line);
4003 else
4004 fprintf (stderr, "%s: ", file);
4005 }
4006 fprintf (stderr, _("Info: "));
4007 va_start (args, format);
4008 vfprintf (stderr, format, args);
4009 va_end (args);
4010 (void) putc ('\n', stderr);
4011 }
4012
4013 /* Output one operand error record. */
4014
4015 static void
4016 output_operand_error_record (const operand_error_record *record, char *str)
4017 {
4018 const aarch64_operand_error *detail = &record->detail;
4019 int idx = detail->index;
4020 const aarch64_opcode *opcode = record->opcode;
4021 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4022 : AARCH64_OPND_NIL);
4023
4024 switch (detail->kind)
4025 {
4026 case AARCH64_OPDE_NIL:
4027 gas_assert (0);
4028 break;
4029
4030 case AARCH64_OPDE_SYNTAX_ERROR:
4031 case AARCH64_OPDE_RECOVERABLE:
4032 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4033 case AARCH64_OPDE_OTHER_ERROR:
4034 /* Use the prepared error message if there is, otherwise use the
4035 operand description string to describe the error. */
4036 if (detail->error != NULL)
4037 {
4038 if (idx < 0)
4039 as_bad (_("%s -- `%s'"), detail->error, str);
4040 else
4041 as_bad (_("%s at operand %d -- `%s'"),
4042 detail->error, idx + 1, str);
4043 }
4044 else
4045 {
4046 gas_assert (idx >= 0);
4047 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4048 aarch64_get_operand_desc (opd_code), str);
4049 }
4050 break;
4051
4052 case AARCH64_OPDE_INVALID_VARIANT:
4053 as_bad (_("operand mismatch -- `%s'"), str);
4054 if (verbose_error_p)
4055 {
4056 /* We will try to correct the erroneous instruction and also provide
4057 more information e.g. all other valid variants.
4058
4059 The string representation of the corrected instruction and other
4060 valid variants are generated by
4061
4062 1) obtaining the intermediate representation of the erroneous
4063 instruction;
4064 2) manipulating the IR, e.g. replacing the operand qualifier;
4065 3) printing out the instruction by calling the printer functions
4066 shared with the disassembler.
4067
4068 The limitation of this method is that the exact input assembly
4069 line cannot be accurately reproduced in some cases, for example an
4070 optional operand present in the actual assembly line will be
4071 omitted in the output; likewise for the optional syntax rules,
4072 e.g. the # before the immediate. Another limitation is that the
4073 assembly symbols and relocation operations in the assembly line
4074 currently cannot be printed out in the error report. Last but not
4075 least, when there is other error(s) co-exist with this error, the
4076 'corrected' instruction may be still incorrect, e.g. given
4077 'ldnp h0,h1,[x0,#6]!'
4078 this diagnosis will provide the version:
4079 'ldnp s0,s1,[x0,#6]!'
4080 which is still not right. */
4081 size_t len = strlen (get_mnemonic_name (str));
4082 int i, qlf_idx;
4083 bfd_boolean result;
4084 const size_t size = 2048;
4085 char buf[size];
4086 aarch64_inst *inst_base = &inst.base;
4087 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4088
4089 /* Init inst. */
4090 reset_aarch64_instruction (&inst);
4091 inst_base->opcode = opcode;
4092
4093 /* Reset the error report so that there is no side effect on the
4094 following operand parsing. */
4095 init_operand_error_report ();
4096
4097 /* Fill inst. */
4098 result = parse_operands (str + len, opcode)
4099 && programmer_friendly_fixup (&inst);
4100 gas_assert (result);
4101 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4102 NULL, NULL);
4103 gas_assert (!result);
4104
4105 /* Find the most matched qualifier sequence. */
4106 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4107 gas_assert (qlf_idx > -1);
4108
4109 /* Assign the qualifiers. */
4110 assign_qualifier_sequence (inst_base,
4111 opcode->qualifiers_list[qlf_idx]);
4112
4113 /* Print the hint. */
4114 output_info (_(" did you mean this?"));
4115 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4116 print_operands (buf, opcode, inst_base->operands);
4117 output_info (_(" %s"), buf);
4118
4119 /* Print out other variant(s) if there is any. */
4120 if (qlf_idx != 0 ||
4121 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4122 output_info (_(" other valid variant(s):"));
4123
4124 /* For each pattern. */
4125 qualifiers_list = opcode->qualifiers_list;
4126 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4127 {
4128 /* Most opcodes has much fewer patterns in the list.
4129 First NIL qualifier indicates the end in the list. */
4130 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4131 break;
4132
4133 if (i != qlf_idx)
4134 {
4135 /* Mnemonics name. */
4136 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4137
4138 /* Assign the qualifiers. */
4139 assign_qualifier_sequence (inst_base, *qualifiers_list);
4140
4141 /* Print instruction. */
4142 print_operands (buf, opcode, inst_base->operands);
4143
4144 output_info (_(" %s"), buf);
4145 }
4146 }
4147 }
4148 break;
4149
4150 case AARCH64_OPDE_OUT_OF_RANGE:
4151 if (detail->data[0] != detail->data[1])
4152 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4153 detail->error ? detail->error : _("immediate value"),
4154 detail->data[0], detail->data[1], idx + 1, str);
4155 else
4156 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4157 detail->error ? detail->error : _("immediate value"),
4158 detail->data[0], idx + 1, str);
4159 break;
4160
4161 case AARCH64_OPDE_REG_LIST:
4162 if (detail->data[0] == 1)
4163 as_bad (_("invalid number of registers in the list; "
4164 "only 1 register is expected at operand %d -- `%s'"),
4165 idx + 1, str);
4166 else
4167 as_bad (_("invalid number of registers in the list; "
4168 "%d registers are expected at operand %d -- `%s'"),
4169 detail->data[0], idx + 1, str);
4170 break;
4171
4172 case AARCH64_OPDE_UNALIGNED:
4173 as_bad (_("immediate value should be a multiple of "
4174 "%d at operand %d -- `%s'"),
4175 detail->data[0], idx + 1, str);
4176 break;
4177
4178 default:
4179 gas_assert (0);
4180 break;
4181 }
4182 }
4183
4184 /* Process and output the error message about the operand mismatching.
4185
4186 When this function is called, the operand error information had
4187 been collected for an assembly line and there will be multiple
4188 errors in the case of mulitple instruction templates; output the
4189 error message that most closely describes the problem. */
4190
4191 static void
4192 output_operand_error_report (char *str)
4193 {
4194 int largest_error_pos;
4195 const char *msg = NULL;
4196 enum aarch64_operand_error_kind kind;
4197 operand_error_record *curr;
4198 operand_error_record *head = operand_error_report.head;
4199 operand_error_record *record = NULL;
4200
4201 /* No error to report. */
4202 if (head == NULL)
4203 return;
4204
4205 gas_assert (head != NULL && operand_error_report.tail != NULL);
4206
4207 /* Only one error. */
4208 if (head == operand_error_report.tail)
4209 {
4210 DEBUG_TRACE ("single opcode entry with error kind: %s",
4211 operand_mismatch_kind_names[head->detail.kind]);
4212 output_operand_error_record (head, str);
4213 return;
4214 }
4215
4216 /* Find the error kind of the highest severity. */
4217 DEBUG_TRACE ("multiple opcode entres with error kind");
4218 kind = AARCH64_OPDE_NIL;
4219 for (curr = head; curr != NULL; curr = curr->next)
4220 {
4221 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4222 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4223 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4224 kind = curr->detail.kind;
4225 }
4226 gas_assert (kind != AARCH64_OPDE_NIL);
4227
4228 /* Pick up one of errors of KIND to report. */
4229 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4230 for (curr = head; curr != NULL; curr = curr->next)
4231 {
4232 if (curr->detail.kind != kind)
4233 continue;
4234 /* If there are multiple errors, pick up the one with the highest
4235 mismatching operand index. In the case of multiple errors with
4236 the equally highest operand index, pick up the first one or the
4237 first one with non-NULL error message. */
4238 if (curr->detail.index > largest_error_pos
4239 || (curr->detail.index == largest_error_pos && msg == NULL
4240 && curr->detail.error != NULL))
4241 {
4242 largest_error_pos = curr->detail.index;
4243 record = curr;
4244 msg = record->detail.error;
4245 }
4246 }
4247
4248 gas_assert (largest_error_pos != -2 && record != NULL);
4249 DEBUG_TRACE ("Pick up error kind %s to report",
4250 operand_mismatch_kind_names[record->detail.kind]);
4251
4252 /* Output. */
4253 output_operand_error_record (record, str);
4254 }
4255 \f
4256 /* Write an AARCH64 instruction to buf - always little-endian. */
4257 static void
4258 put_aarch64_insn (char *buf, uint32_t insn)
4259 {
4260 unsigned char *where = (unsigned char *) buf;
4261 where[0] = insn;
4262 where[1] = insn >> 8;
4263 where[2] = insn >> 16;
4264 where[3] = insn >> 24;
4265 }
4266
4267 static uint32_t
4268 get_aarch64_insn (char *buf)
4269 {
4270 unsigned char *where = (unsigned char *) buf;
4271 uint32_t result;
4272 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4273 return result;
4274 }
4275
4276 static void
4277 output_inst (struct aarch64_inst *new_inst)
4278 {
4279 char *to = NULL;
4280
4281 to = frag_more (INSN_SIZE);
4282
4283 frag_now->tc_frag_data.recorded = 1;
4284
4285 put_aarch64_insn (to, inst.base.value);
4286
4287 if (inst.reloc.type != BFD_RELOC_UNUSED)
4288 {
4289 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4290 INSN_SIZE, &inst.reloc.exp,
4291 inst.reloc.pc_rel,
4292 inst.reloc.type);
4293 DEBUG_TRACE ("Prepared relocation fix up");
4294 /* Don't check the addend value against the instruction size,
4295 that's the job of our code in md_apply_fix(). */
4296 fixp->fx_no_overflow = 1;
4297 if (new_inst != NULL)
4298 fixp->tc_fix_data.inst = new_inst;
4299 if (aarch64_gas_internal_fixup_p ())
4300 {
4301 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4302 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4303 fixp->fx_addnumber = inst.reloc.flags;
4304 }
4305 }
4306
4307 dwarf2_emit_insn (INSN_SIZE);
4308 }
4309
4310 /* Link together opcodes of the same name. */
4311
4312 struct templates
4313 {
4314 aarch64_opcode *opcode;
4315 struct templates *next;
4316 };
4317
4318 typedef struct templates templates;
4319
4320 static templates *
4321 lookup_mnemonic (const char *start, int len)
4322 {
4323 templates *templ = NULL;
4324
4325 templ = hash_find_n (aarch64_ops_hsh, start, len);
4326 return templ;
4327 }
4328
4329 /* Subroutine of md_assemble, responsible for looking up the primary
4330 opcode from the mnemonic the user wrote. STR points to the
4331 beginning of the mnemonic. */
4332
4333 static templates *
4334 opcode_lookup (char **str)
4335 {
4336 char *end, *base;
4337 const aarch64_cond *cond;
4338 char condname[16];
4339 int len;
4340
4341 /* Scan up to the end of the mnemonic, which must end in white space,
4342 '.', or end of string. */
4343 for (base = end = *str; is_part_of_name(*end); end++)
4344 if (*end == '.')
4345 break;
4346
4347 if (end == base)
4348 return 0;
4349
4350 inst.cond = COND_ALWAYS;
4351
4352 /* Handle a possible condition. */
4353 if (end[0] == '.')
4354 {
4355 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4356 if (cond)
4357 {
4358 inst.cond = cond->value;
4359 *str = end + 3;
4360 }
4361 else
4362 {
4363 *str = end;
4364 return 0;
4365 }
4366 }
4367 else
4368 *str = end;
4369
4370 len = end - base;
4371
4372 if (inst.cond == COND_ALWAYS)
4373 {
4374 /* Look for unaffixed mnemonic. */
4375 return lookup_mnemonic (base, len);
4376 }
4377 else if (len <= 13)
4378 {
4379 /* append ".c" to mnemonic if conditional */
4380 memcpy (condname, base, len);
4381 memcpy (condname + len, ".c", 2);
4382 base = condname;
4383 len += 2;
4384 return lookup_mnemonic (base, len);
4385 }
4386
4387 return NULL;
4388 }
4389
4390 /* Internal helper routine converting a vector neon_type_el structure
4391 *VECTYPE to a corresponding operand qualifier. */
4392
4393 static inline aarch64_opnd_qualifier_t
4394 vectype_to_qualifier (const struct neon_type_el *vectype)
4395 {
4396 /* Element size in bytes indexed by neon_el_type. */
4397 const unsigned char ele_size[5]
4398 = {1, 2, 4, 8, 16};
4399
4400 if (!vectype->defined || vectype->type == NT_invtype)
4401 goto vectype_conversion_fail;
4402
4403 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4404
4405 if (vectype->defined & NTA_HASINDEX)
4406 /* Vector element register. */
4407 return AARCH64_OPND_QLF_S_B + vectype->type;
4408 else
4409 {
4410 /* Vector register. */
4411 int reg_size = ele_size[vectype->type] * vectype->width;
4412 unsigned offset;
4413 if (reg_size != 16 && reg_size != 8)
4414 goto vectype_conversion_fail;
4415 /* The conversion is calculated based on the relation of the order of
4416 qualifiers to the vector element size and vector register size. */
4417 offset = (vectype->type == NT_q)
4418 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4419 gas_assert (offset <= 8);
4420 return AARCH64_OPND_QLF_V_8B + offset;
4421 }
4422
4423 vectype_conversion_fail:
4424 first_error (_("bad vector arrangement type"));
4425 return AARCH64_OPND_QLF_NIL;
4426 }
4427
4428 /* Process an optional operand that is found omitted from the assembly line.
4429 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4430 instruction's opcode entry while IDX is the index of this omitted operand.
4431 */
4432
4433 static void
4434 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4435 int idx, aarch64_opnd_info *operand)
4436 {
4437 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4438 gas_assert (optional_operand_p (opcode, idx));
4439 gas_assert (!operand->present);
4440
4441 switch (type)
4442 {
4443 case AARCH64_OPND_Rd:
4444 case AARCH64_OPND_Rn:
4445 case AARCH64_OPND_Rm:
4446 case AARCH64_OPND_Rt:
4447 case AARCH64_OPND_Rt2:
4448 case AARCH64_OPND_Rs:
4449 case AARCH64_OPND_Ra:
4450 case AARCH64_OPND_Rt_SYS:
4451 case AARCH64_OPND_Rd_SP:
4452 case AARCH64_OPND_Rn_SP:
4453 case AARCH64_OPND_Fd:
4454 case AARCH64_OPND_Fn:
4455 case AARCH64_OPND_Fm:
4456 case AARCH64_OPND_Fa:
4457 case AARCH64_OPND_Ft:
4458 case AARCH64_OPND_Ft2:
4459 case AARCH64_OPND_Sd:
4460 case AARCH64_OPND_Sn:
4461 case AARCH64_OPND_Sm:
4462 case AARCH64_OPND_Vd:
4463 case AARCH64_OPND_Vn:
4464 case AARCH64_OPND_Vm:
4465 case AARCH64_OPND_VdD1:
4466 case AARCH64_OPND_VnD1:
4467 operand->reg.regno = default_value;
4468 break;
4469
4470 case AARCH64_OPND_Ed:
4471 case AARCH64_OPND_En:
4472 case AARCH64_OPND_Em:
4473 operand->reglane.regno = default_value;
4474 break;
4475
4476 case AARCH64_OPND_IDX:
4477 case AARCH64_OPND_BIT_NUM:
4478 case AARCH64_OPND_IMMR:
4479 case AARCH64_OPND_IMMS:
4480 case AARCH64_OPND_SHLL_IMM:
4481 case AARCH64_OPND_IMM_VLSL:
4482 case AARCH64_OPND_IMM_VLSR:
4483 case AARCH64_OPND_CCMP_IMM:
4484 case AARCH64_OPND_FBITS:
4485 case AARCH64_OPND_UIMM4:
4486 case AARCH64_OPND_UIMM3_OP1:
4487 case AARCH64_OPND_UIMM3_OP2:
4488 case AARCH64_OPND_IMM:
4489 case AARCH64_OPND_WIDTH:
4490 case AARCH64_OPND_UIMM7:
4491 case AARCH64_OPND_NZCV:
4492 operand->imm.value = default_value;
4493 break;
4494
4495 case AARCH64_OPND_EXCEPTION:
4496 inst.reloc.type = BFD_RELOC_UNUSED;
4497 break;
4498
4499 case AARCH64_OPND_BARRIER_ISB:
4500 operand->barrier = aarch64_barrier_options + default_value;
4501
4502 default:
4503 break;
4504 }
4505 }
4506
4507 /* Process the relocation type for move wide instructions.
4508 Return TRUE on success; otherwise return FALSE. */
4509
4510 static bfd_boolean
4511 process_movw_reloc_info (void)
4512 {
4513 int is32;
4514 unsigned shift;
4515
4516 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4517
4518 if (inst.base.opcode->op == OP_MOVK)
4519 switch (inst.reloc.type)
4520 {
4521 case BFD_RELOC_AARCH64_MOVW_G0_S:
4522 case BFD_RELOC_AARCH64_MOVW_G1_S:
4523 case BFD_RELOC_AARCH64_MOVW_G2_S:
4524 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4525 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4526 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4527 set_syntax_error
4528 (_("the specified relocation type is not allowed for MOVK"));
4529 return FALSE;
4530 default:
4531 break;
4532 }
4533
4534 switch (inst.reloc.type)
4535 {
4536 case BFD_RELOC_AARCH64_MOVW_G0:
4537 case BFD_RELOC_AARCH64_MOVW_G0_S:
4538 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4539 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4540 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4541 shift = 0;
4542 break;
4543 case BFD_RELOC_AARCH64_MOVW_G1:
4544 case BFD_RELOC_AARCH64_MOVW_G1_S:
4545 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4546 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4547 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4548 shift = 16;
4549 break;
4550 case BFD_RELOC_AARCH64_MOVW_G2:
4551 case BFD_RELOC_AARCH64_MOVW_G2_S:
4552 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4553 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4554 if (is32)
4555 {
4556 set_fatal_syntax_error
4557 (_("the specified relocation type is not allowed for 32-bit "
4558 "register"));
4559 return FALSE;
4560 }
4561 shift = 32;
4562 break;
4563 case BFD_RELOC_AARCH64_MOVW_G3:
4564 if (is32)
4565 {
4566 set_fatal_syntax_error
4567 (_("the specified relocation type is not allowed for 32-bit "
4568 "register"));
4569 return FALSE;
4570 }
4571 shift = 48;
4572 break;
4573 default:
4574 /* More cases should be added when more MOVW-related relocation types
4575 are supported in GAS. */
4576 gas_assert (aarch64_gas_internal_fixup_p ());
4577 /* The shift amount should have already been set by the parser. */
4578 return TRUE;
4579 }
4580 inst.base.operands[1].shifter.amount = shift;
4581 return TRUE;
4582 }
4583
4584 /* A primitive log caculator. */
4585
4586 static inline unsigned int
4587 get_logsz (unsigned int size)
4588 {
4589 const unsigned char ls[16] =
4590 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4591 if (size > 16)
4592 {
4593 gas_assert (0);
4594 return -1;
4595 }
4596 gas_assert (ls[size - 1] != (unsigned char)-1);
4597 return ls[size - 1];
4598 }
4599
4600 /* Determine and return the real reloc type code for an instruction
4601 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4602
4603 static inline bfd_reloc_code_real_type
4604 ldst_lo12_determine_real_reloc_type (void)
4605 {
4606 int logsz;
4607 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4608 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4609
4610 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4611 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4612 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4613 BFD_RELOC_AARCH64_LDST128_LO12
4614 };
4615
4616 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4617 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4618
4619 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4620 opd1_qlf =
4621 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4622 1, opd0_qlf, 0);
4623 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4624
4625 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4626 gas_assert (logsz >= 0 && logsz <= 4);
4627
4628 return reloc_ldst_lo12[logsz];
4629 }
4630
4631 /* Check whether a register list REGINFO is valid. The registers must be
4632 numbered in increasing order (modulo 32), in increments of one or two.
4633
4634 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4635 increments of two.
4636
4637 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4638
4639 static bfd_boolean
4640 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4641 {
4642 uint32_t i, nb_regs, prev_regno, incr;
4643
4644 nb_regs = 1 + (reginfo & 0x3);
4645 reginfo >>= 2;
4646 prev_regno = reginfo & 0x1f;
4647 incr = accept_alternate ? 2 : 1;
4648
4649 for (i = 1; i < nb_regs; ++i)
4650 {
4651 uint32_t curr_regno;
4652 reginfo >>= 5;
4653 curr_regno = reginfo & 0x1f;
4654 if (curr_regno != ((prev_regno + incr) & 0x1f))
4655 return FALSE;
4656 prev_regno = curr_regno;
4657 }
4658
4659 return TRUE;
4660 }
4661
4662 /* Generic instruction operand parser. This does no encoding and no
4663 semantic validation; it merely squirrels values away in the inst
4664 structure. Returns TRUE or FALSE depending on whether the
4665 specified grammar matched. */
4666
4667 static bfd_boolean
4668 parse_operands (char *str, const aarch64_opcode *opcode)
4669 {
4670 int i;
4671 char *backtrack_pos = 0;
4672 const enum aarch64_opnd *operands = opcode->operands;
4673
4674 clear_error ();
4675 skip_whitespace (str);
4676
4677 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4678 {
4679 int64_t val;
4680 int isreg32, isregzero;
4681 int comma_skipped_p = 0;
4682 aarch64_reg_type rtype;
4683 struct neon_type_el vectype;
4684 aarch64_opnd_info *info = &inst.base.operands[i];
4685
4686 DEBUG_TRACE ("parse operand %d", i);
4687
4688 /* Assign the operand code. */
4689 info->type = operands[i];
4690
4691 if (optional_operand_p (opcode, i))
4692 {
4693 /* Remember where we are in case we need to backtrack. */
4694 gas_assert (!backtrack_pos);
4695 backtrack_pos = str;
4696 }
4697
4698 /* Expect comma between operands; the backtrack mechanizm will take
4699 care of cases of omitted optional operand. */
4700 if (i > 0 && ! skip_past_char (&str, ','))
4701 {
4702 set_syntax_error (_("comma expected between operands"));
4703 goto failure;
4704 }
4705 else
4706 comma_skipped_p = 1;
4707
4708 switch (operands[i])
4709 {
4710 case AARCH64_OPND_Rd:
4711 case AARCH64_OPND_Rn:
4712 case AARCH64_OPND_Rm:
4713 case AARCH64_OPND_Rt:
4714 case AARCH64_OPND_Rt2:
4715 case AARCH64_OPND_Rs:
4716 case AARCH64_OPND_Ra:
4717 case AARCH64_OPND_Rt_SYS:
4718 case AARCH64_OPND_PAIRREG:
4719 po_int_reg_or_fail (1, 0);
4720 break;
4721
4722 case AARCH64_OPND_Rd_SP:
4723 case AARCH64_OPND_Rn_SP:
4724 po_int_reg_or_fail (0, 1);
4725 break;
4726
4727 case AARCH64_OPND_Rm_EXT:
4728 case AARCH64_OPND_Rm_SFT:
4729 po_misc_or_fail (parse_shifter_operand
4730 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4731 ? SHIFTED_ARITH_IMM
4732 : SHIFTED_LOGIC_IMM)));
4733 if (!info->shifter.operator_present)
4734 {
4735 /* Default to LSL if not present. Libopcodes prefers shifter
4736 kind to be explicit. */
4737 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4738 info->shifter.kind = AARCH64_MOD_LSL;
4739 /* For Rm_EXT, libopcodes will carry out further check on whether
4740 or not stack pointer is used in the instruction (Recall that
4741 "the extend operator is not optional unless at least one of
4742 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4743 }
4744 break;
4745
4746 case AARCH64_OPND_Fd:
4747 case AARCH64_OPND_Fn:
4748 case AARCH64_OPND_Fm:
4749 case AARCH64_OPND_Fa:
4750 case AARCH64_OPND_Ft:
4751 case AARCH64_OPND_Ft2:
4752 case AARCH64_OPND_Sd:
4753 case AARCH64_OPND_Sn:
4754 case AARCH64_OPND_Sm:
4755 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4756 if (val == PARSE_FAIL)
4757 {
4758 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4759 goto failure;
4760 }
4761 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4762
4763 info->reg.regno = val;
4764 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4765 break;
4766
4767 case AARCH64_OPND_Vd:
4768 case AARCH64_OPND_Vn:
4769 case AARCH64_OPND_Vm:
4770 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4771 if (val == PARSE_FAIL)
4772 {
4773 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4774 goto failure;
4775 }
4776 if (vectype.defined & NTA_HASINDEX)
4777 goto failure;
4778
4779 info->reg.regno = val;
4780 info->qualifier = vectype_to_qualifier (&vectype);
4781 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4782 goto failure;
4783 break;
4784
4785 case AARCH64_OPND_VdD1:
4786 case AARCH64_OPND_VnD1:
4787 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4788 if (val == PARSE_FAIL)
4789 {
4790 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4791 goto failure;
4792 }
4793 if (vectype.type != NT_d || vectype.index != 1)
4794 {
4795 set_fatal_syntax_error
4796 (_("the top half of a 128-bit FP/SIMD register is expected"));
4797 goto failure;
4798 }
4799 info->reg.regno = val;
4800 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4801 here; it is correct for the purpose of encoding/decoding since
4802 only the register number is explicitly encoded in the related
4803 instructions, although this appears a bit hacky. */
4804 info->qualifier = AARCH64_OPND_QLF_S_D;
4805 break;
4806
4807 case AARCH64_OPND_Ed:
4808 case AARCH64_OPND_En:
4809 case AARCH64_OPND_Em:
4810 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4811 if (val == PARSE_FAIL)
4812 {
4813 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4814 goto failure;
4815 }
4816 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4817 goto failure;
4818
4819 info->reglane.regno = val;
4820 info->reglane.index = vectype.index;
4821 info->qualifier = vectype_to_qualifier (&vectype);
4822 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4823 goto failure;
4824 break;
4825
4826 case AARCH64_OPND_LVn:
4827 case AARCH64_OPND_LVt:
4828 case AARCH64_OPND_LVt_AL:
4829 case AARCH64_OPND_LEt:
4830 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4831 goto failure;
4832 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4833 {
4834 set_fatal_syntax_error (_("invalid register list"));
4835 goto failure;
4836 }
4837 info->reglist.first_regno = (val >> 2) & 0x1f;
4838 info->reglist.num_regs = (val & 0x3) + 1;
4839 if (operands[i] == AARCH64_OPND_LEt)
4840 {
4841 if (!(vectype.defined & NTA_HASINDEX))
4842 goto failure;
4843 info->reglist.has_index = 1;
4844 info->reglist.index = vectype.index;
4845 }
4846 else if (!(vectype.defined & NTA_HASTYPE))
4847 goto failure;
4848 info->qualifier = vectype_to_qualifier (&vectype);
4849 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4850 goto failure;
4851 break;
4852
4853 case AARCH64_OPND_Cn:
4854 case AARCH64_OPND_Cm:
4855 po_reg_or_fail (REG_TYPE_CN);
4856 if (val > 15)
4857 {
4858 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4859 goto failure;
4860 }
4861 inst.base.operands[i].reg.regno = val;
4862 break;
4863
4864 case AARCH64_OPND_SHLL_IMM:
4865 case AARCH64_OPND_IMM_VLSR:
4866 po_imm_or_fail (1, 64);
4867 info->imm.value = val;
4868 break;
4869
4870 case AARCH64_OPND_CCMP_IMM:
4871 case AARCH64_OPND_FBITS:
4872 case AARCH64_OPND_UIMM4:
4873 case AARCH64_OPND_UIMM3_OP1:
4874 case AARCH64_OPND_UIMM3_OP2:
4875 case AARCH64_OPND_IMM_VLSL:
4876 case AARCH64_OPND_IMM:
4877 case AARCH64_OPND_WIDTH:
4878 po_imm_nc_or_fail ();
4879 info->imm.value = val;
4880 break;
4881
4882 case AARCH64_OPND_UIMM7:
4883 po_imm_or_fail (0, 127);
4884 info->imm.value = val;
4885 break;
4886
4887 case AARCH64_OPND_IDX:
4888 case AARCH64_OPND_BIT_NUM:
4889 case AARCH64_OPND_IMMR:
4890 case AARCH64_OPND_IMMS:
4891 po_imm_or_fail (0, 63);
4892 info->imm.value = val;
4893 break;
4894
4895 case AARCH64_OPND_IMM0:
4896 po_imm_nc_or_fail ();
4897 if (val != 0)
4898 {
4899 set_fatal_syntax_error (_("immediate zero expected"));
4900 goto failure;
4901 }
4902 info->imm.value = 0;
4903 break;
4904
4905 case AARCH64_OPND_FPIMM0:
4906 {
4907 int qfloat;
4908 bfd_boolean res1 = FALSE, res2 = FALSE;
4909 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4910 it is probably not worth the effort to support it. */
4911 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
4912 && !(res2 = parse_constant_immediate (&str, &val)))
4913 goto failure;
4914 if ((res1 && qfloat == 0) || (res2 && val == 0))
4915 {
4916 info->imm.value = 0;
4917 info->imm.is_fp = 1;
4918 break;
4919 }
4920 set_fatal_syntax_error (_("immediate zero expected"));
4921 goto failure;
4922 }
4923
4924 case AARCH64_OPND_IMM_MOV:
4925 {
4926 char *saved = str;
4927 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
4928 reg_name_p (str, REG_TYPE_VN))
4929 goto failure;
4930 str = saved;
4931 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4932 GE_OPT_PREFIX, 1));
4933 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4934 later. fix_mov_imm_insn will try to determine a machine
4935 instruction (MOVZ, MOVN or ORR) for it and will issue an error
4936 message if the immediate cannot be moved by a single
4937 instruction. */
4938 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4939 inst.base.operands[i].skip = 1;
4940 }
4941 break;
4942
4943 case AARCH64_OPND_SIMD_IMM:
4944 case AARCH64_OPND_SIMD_IMM_SFT:
4945 if (! parse_big_immediate (&str, &val))
4946 goto failure;
4947 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4948 /* addr_off_p */ 0,
4949 /* need_libopcodes_p */ 1,
4950 /* skip_p */ 1);
4951 /* Parse shift.
4952 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
4953 shift, we don't check it here; we leave the checking to
4954 the libopcodes (operand_general_constraint_met_p). By
4955 doing this, we achieve better diagnostics. */
4956 if (skip_past_comma (&str)
4957 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
4958 goto failure;
4959 if (!info->shifter.operator_present
4960 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
4961 {
4962 /* Default to LSL if not present. Libopcodes prefers shifter
4963 kind to be explicit. */
4964 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4965 info->shifter.kind = AARCH64_MOD_LSL;
4966 }
4967 break;
4968
4969 case AARCH64_OPND_FPIMM:
4970 case AARCH64_OPND_SIMD_FPIMM:
4971 {
4972 int qfloat;
4973 bfd_boolean dp_p
4974 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
4975 == 8);
4976 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
4977 goto failure;
4978 if (qfloat == 0)
4979 {
4980 set_fatal_syntax_error (_("invalid floating-point constant"));
4981 goto failure;
4982 }
4983 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
4984 inst.base.operands[i].imm.is_fp = 1;
4985 }
4986 break;
4987
4988 case AARCH64_OPND_LIMM:
4989 po_misc_or_fail (parse_shifter_operand (&str, info,
4990 SHIFTED_LOGIC_IMM));
4991 if (info->shifter.operator_present)
4992 {
4993 set_fatal_syntax_error
4994 (_("shift not allowed for bitmask immediate"));
4995 goto failure;
4996 }
4997 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4998 /* addr_off_p */ 0,
4999 /* need_libopcodes_p */ 1,
5000 /* skip_p */ 1);
5001 break;
5002
5003 case AARCH64_OPND_AIMM:
5004 if (opcode->op == OP_ADD)
5005 /* ADD may have relocation types. */
5006 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5007 SHIFTED_ARITH_IMM));
5008 else
5009 po_misc_or_fail (parse_shifter_operand (&str, info,
5010 SHIFTED_ARITH_IMM));
5011 switch (inst.reloc.type)
5012 {
5013 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5014 info->shifter.amount = 12;
5015 break;
5016 case BFD_RELOC_UNUSED:
5017 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5018 if (info->shifter.kind != AARCH64_MOD_NONE)
5019 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5020 inst.reloc.pc_rel = 0;
5021 break;
5022 default:
5023 break;
5024 }
5025 info->imm.value = 0;
5026 if (!info->shifter.operator_present)
5027 {
5028 /* Default to LSL if not present. Libopcodes prefers shifter
5029 kind to be explicit. */
5030 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5031 info->shifter.kind = AARCH64_MOD_LSL;
5032 }
5033 break;
5034
5035 case AARCH64_OPND_HALF:
5036 {
5037 /* #<imm16> or relocation. */
5038 int internal_fixup_p;
5039 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5040 if (internal_fixup_p)
5041 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5042 skip_whitespace (str);
5043 if (skip_past_comma (&str))
5044 {
5045 /* {, LSL #<shift>} */
5046 if (! aarch64_gas_internal_fixup_p ())
5047 {
5048 set_fatal_syntax_error (_("can't mix relocation modifier "
5049 "with explicit shift"));
5050 goto failure;
5051 }
5052 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5053 }
5054 else
5055 inst.base.operands[i].shifter.amount = 0;
5056 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5057 inst.base.operands[i].imm.value = 0;
5058 if (! process_movw_reloc_info ())
5059 goto failure;
5060 }
5061 break;
5062
5063 case AARCH64_OPND_EXCEPTION:
5064 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
5065 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5066 /* addr_off_p */ 0,
5067 /* need_libopcodes_p */ 0,
5068 /* skip_p */ 1);
5069 break;
5070
5071 case AARCH64_OPND_NZCV:
5072 {
5073 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5074 if (nzcv != NULL)
5075 {
5076 str += 4;
5077 info->imm.value = nzcv->value;
5078 break;
5079 }
5080 po_imm_or_fail (0, 15);
5081 info->imm.value = val;
5082 }
5083 break;
5084
5085 case AARCH64_OPND_COND:
5086 case AARCH64_OPND_COND1:
5087 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5088 str += 2;
5089 if (info->cond == NULL)
5090 {
5091 set_syntax_error (_("invalid condition"));
5092 goto failure;
5093 }
5094 else if (operands[i] == AARCH64_OPND_COND1
5095 && (info->cond->value & 0xe) == 0xe)
5096 {
5097 /* Not allow AL or NV. */
5098 set_default_error ();
5099 goto failure;
5100 }
5101 break;
5102
5103 case AARCH64_OPND_ADDR_ADRP:
5104 po_misc_or_fail (parse_adrp (&str));
5105 /* Clear the value as operand needs to be relocated. */
5106 info->imm.value = 0;
5107 break;
5108
5109 case AARCH64_OPND_ADDR_PCREL14:
5110 case AARCH64_OPND_ADDR_PCREL19:
5111 case AARCH64_OPND_ADDR_PCREL21:
5112 case AARCH64_OPND_ADDR_PCREL26:
5113 po_misc_or_fail (parse_address_reloc (&str, info));
5114 if (!info->addr.pcrel)
5115 {
5116 set_syntax_error (_("invalid pc-relative address"));
5117 goto failure;
5118 }
5119 if (inst.gen_lit_pool
5120 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5121 {
5122 /* Only permit "=value" in the literal load instructions.
5123 The literal will be generated by programmer_friendly_fixup. */
5124 set_syntax_error (_("invalid use of \"=immediate\""));
5125 goto failure;
5126 }
5127 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5128 {
5129 set_syntax_error (_("unrecognized relocation suffix"));
5130 goto failure;
5131 }
5132 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5133 {
5134 info->imm.value = inst.reloc.exp.X_add_number;
5135 inst.reloc.type = BFD_RELOC_UNUSED;
5136 }
5137 else
5138 {
5139 info->imm.value = 0;
5140 if (inst.reloc.type == BFD_RELOC_UNUSED)
5141 switch (opcode->iclass)
5142 {
5143 case compbranch:
5144 case condbranch:
5145 /* e.g. CBZ or B.COND */
5146 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5147 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5148 break;
5149 case testbranch:
5150 /* e.g. TBZ */
5151 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5152 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5153 break;
5154 case branch_imm:
5155 /* e.g. B or BL */
5156 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5157 inst.reloc.type =
5158 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5159 : BFD_RELOC_AARCH64_JUMP26;
5160 break;
5161 case loadlit:
5162 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5163 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5164 break;
5165 case pcreladdr:
5166 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5167 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5168 break;
5169 default:
5170 gas_assert (0);
5171 abort ();
5172 }
5173 inst.reloc.pc_rel = 1;
5174 }
5175 break;
5176
5177 case AARCH64_OPND_ADDR_SIMPLE:
5178 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5179 /* [<Xn|SP>{, #<simm>}] */
5180 po_char_or_fail ('[');
5181 po_reg_or_fail (REG_TYPE_R64_SP);
5182 /* Accept optional ", #0". */
5183 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5184 && skip_past_char (&str, ','))
5185 {
5186 skip_past_char (&str, '#');
5187 if (! skip_past_char (&str, '0'))
5188 {
5189 set_fatal_syntax_error
5190 (_("the optional immediate offset can only be 0"));
5191 goto failure;
5192 }
5193 }
5194 po_char_or_fail (']');
5195 info->addr.base_regno = val;
5196 break;
5197
5198 case AARCH64_OPND_ADDR_REGOFF:
5199 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5200 po_misc_or_fail (parse_address (&str, info, 0));
5201 if (info->addr.pcrel || !info->addr.offset.is_reg
5202 || !info->addr.preind || info->addr.postind
5203 || info->addr.writeback)
5204 {
5205 set_syntax_error (_("invalid addressing mode"));
5206 goto failure;
5207 }
5208 if (!info->shifter.operator_present)
5209 {
5210 /* Default to LSL if not present. Libopcodes prefers shifter
5211 kind to be explicit. */
5212 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5213 info->shifter.kind = AARCH64_MOD_LSL;
5214 }
5215 /* Qualifier to be deduced by libopcodes. */
5216 break;
5217
5218 case AARCH64_OPND_ADDR_SIMM7:
5219 po_misc_or_fail (parse_address (&str, info, 0));
5220 if (info->addr.pcrel || info->addr.offset.is_reg
5221 || (!info->addr.preind && !info->addr.postind))
5222 {
5223 set_syntax_error (_("invalid addressing mode"));
5224 goto failure;
5225 }
5226 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5227 /* addr_off_p */ 1,
5228 /* need_libopcodes_p */ 1,
5229 /* skip_p */ 0);
5230 break;
5231
5232 case AARCH64_OPND_ADDR_SIMM9:
5233 case AARCH64_OPND_ADDR_SIMM9_2:
5234 po_misc_or_fail (parse_address_reloc (&str, info));
5235 if (info->addr.pcrel || info->addr.offset.is_reg
5236 || (!info->addr.preind && !info->addr.postind)
5237 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5238 && info->addr.writeback))
5239 {
5240 set_syntax_error (_("invalid addressing mode"));
5241 goto failure;
5242 }
5243 if (inst.reloc.type != BFD_RELOC_UNUSED)
5244 {
5245 set_syntax_error (_("relocation not allowed"));
5246 goto failure;
5247 }
5248 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5249 /* addr_off_p */ 1,
5250 /* need_libopcodes_p */ 1,
5251 /* skip_p */ 0);
5252 break;
5253
5254 case AARCH64_OPND_ADDR_UIMM12:
5255 po_misc_or_fail (parse_address_reloc (&str, info));
5256 if (info->addr.pcrel || info->addr.offset.is_reg
5257 || !info->addr.preind || info->addr.writeback)
5258 {
5259 set_syntax_error (_("invalid addressing mode"));
5260 goto failure;
5261 }
5262 if (inst.reloc.type == BFD_RELOC_UNUSED)
5263 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5264 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5265 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5266 /* Leave qualifier to be determined by libopcodes. */
5267 break;
5268
5269 case AARCH64_OPND_SIMD_ADDR_POST:
5270 /* [<Xn|SP>], <Xm|#<amount>> */
5271 po_misc_or_fail (parse_address (&str, info, 1));
5272 if (!info->addr.postind || !info->addr.writeback)
5273 {
5274 set_syntax_error (_("invalid addressing mode"));
5275 goto failure;
5276 }
5277 if (!info->addr.offset.is_reg)
5278 {
5279 if (inst.reloc.exp.X_op == O_constant)
5280 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5281 else
5282 {
5283 set_fatal_syntax_error
5284 (_("writeback value should be an immediate constant"));
5285 goto failure;
5286 }
5287 }
5288 /* No qualifier. */
5289 break;
5290
5291 case AARCH64_OPND_SYSREG:
5292 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1))
5293 == PARSE_FAIL)
5294 {
5295 set_syntax_error (_("unknown or missing system register name"));
5296 goto failure;
5297 }
5298 inst.base.operands[i].sysreg = val;
5299 break;
5300
5301 case AARCH64_OPND_PSTATEFIELD:
5302 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0))
5303 == PARSE_FAIL)
5304 {
5305 set_syntax_error (_("unknown or missing PSTATE field name"));
5306 goto failure;
5307 }
5308 inst.base.operands[i].pstatefield = val;
5309 break;
5310
5311 case AARCH64_OPND_SYSREG_IC:
5312 inst.base.operands[i].sysins_op =
5313 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5314 goto sys_reg_ins;
5315 case AARCH64_OPND_SYSREG_DC:
5316 inst.base.operands[i].sysins_op =
5317 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5318 goto sys_reg_ins;
5319 case AARCH64_OPND_SYSREG_AT:
5320 inst.base.operands[i].sysins_op =
5321 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5322 goto sys_reg_ins;
5323 case AARCH64_OPND_SYSREG_TLBI:
5324 inst.base.operands[i].sysins_op =
5325 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5326 sys_reg_ins:
5327 if (inst.base.operands[i].sysins_op == NULL)
5328 {
5329 set_fatal_syntax_error ( _("unknown or missing operation name"));
5330 goto failure;
5331 }
5332 break;
5333
5334 case AARCH64_OPND_BARRIER:
5335 case AARCH64_OPND_BARRIER_ISB:
5336 val = parse_barrier (&str);
5337 if (val != PARSE_FAIL
5338 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5339 {
5340 /* ISB only accepts options name 'sy'. */
5341 set_syntax_error
5342 (_("the specified option is not accepted in ISB"));
5343 /* Turn off backtrack as this optional operand is present. */
5344 backtrack_pos = 0;
5345 goto failure;
5346 }
5347 /* This is an extension to accept a 0..15 immediate. */
5348 if (val == PARSE_FAIL)
5349 po_imm_or_fail (0, 15);
5350 info->barrier = aarch64_barrier_options + val;
5351 break;
5352
5353 case AARCH64_OPND_PRFOP:
5354 val = parse_pldop (&str);
5355 /* This is an extension to accept a 0..31 immediate. */
5356 if (val == PARSE_FAIL)
5357 po_imm_or_fail (0, 31);
5358 inst.base.operands[i].prfop = aarch64_prfops + val;
5359 break;
5360
5361 default:
5362 as_fatal (_("unhandled operand code %d"), operands[i]);
5363 }
5364
5365 /* If we get here, this operand was successfully parsed. */
5366 inst.base.operands[i].present = 1;
5367 continue;
5368
5369 failure:
5370 /* The parse routine should already have set the error, but in case
5371 not, set a default one here. */
5372 if (! error_p ())
5373 set_default_error ();
5374
5375 if (! backtrack_pos)
5376 goto parse_operands_return;
5377
5378 {
5379 /* We reach here because this operand is marked as optional, and
5380 either no operand was supplied or the operand was supplied but it
5381 was syntactically incorrect. In the latter case we report an
5382 error. In the former case we perform a few more checks before
5383 dropping through to the code to insert the default operand. */
5384
5385 char *tmp = backtrack_pos;
5386 char endchar = END_OF_INSN;
5387
5388 if (i != (aarch64_num_of_operands (opcode) - 1))
5389 endchar = ',';
5390 skip_past_char (&tmp, ',');
5391
5392 if (*tmp != endchar)
5393 /* The user has supplied an operand in the wrong format. */
5394 goto parse_operands_return;
5395
5396 /* Make sure there is not a comma before the optional operand.
5397 For example the fifth operand of 'sys' is optional:
5398
5399 sys #0,c0,c0,#0, <--- wrong
5400 sys #0,c0,c0,#0 <--- correct. */
5401 if (comma_skipped_p && i && endchar == END_OF_INSN)
5402 {
5403 set_fatal_syntax_error
5404 (_("unexpected comma before the omitted optional operand"));
5405 goto parse_operands_return;
5406 }
5407 }
5408
5409 /* Reaching here means we are dealing with an optional operand that is
5410 omitted from the assembly line. */
5411 gas_assert (optional_operand_p (opcode, i));
5412 info->present = 0;
5413 process_omitted_operand (operands[i], opcode, i, info);
5414
5415 /* Try again, skipping the optional operand at backtrack_pos. */
5416 str = backtrack_pos;
5417 backtrack_pos = 0;
5418
5419 /* Clear any error record after the omitted optional operand has been
5420 successfully handled. */
5421 clear_error ();
5422 }
5423
5424 /* Check if we have parsed all the operands. */
5425 if (*str != '\0' && ! error_p ())
5426 {
5427 /* Set I to the index of the last present operand; this is
5428 for the purpose of diagnostics. */
5429 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5430 ;
5431 set_fatal_syntax_error
5432 (_("unexpected characters following instruction"));
5433 }
5434
5435 parse_operands_return:
5436
5437 if (error_p ())
5438 {
5439 DEBUG_TRACE ("parsing FAIL: %s - %s",
5440 operand_mismatch_kind_names[get_error_kind ()],
5441 get_error_message ());
5442 /* Record the operand error properly; this is useful when there
5443 are multiple instruction templates for a mnemonic name, so that
5444 later on, we can select the error that most closely describes
5445 the problem. */
5446 record_operand_error (opcode, i, get_error_kind (),
5447 get_error_message ());
5448 return FALSE;
5449 }
5450 else
5451 {
5452 DEBUG_TRACE ("parsing SUCCESS");
5453 return TRUE;
5454 }
5455 }
5456
5457 /* It does some fix-up to provide some programmer friendly feature while
5458 keeping the libopcodes happy, i.e. libopcodes only accepts
5459 the preferred architectural syntax.
5460 Return FALSE if there is any failure; otherwise return TRUE. */
5461
5462 static bfd_boolean
5463 programmer_friendly_fixup (aarch64_instruction *instr)
5464 {
5465 aarch64_inst *base = &instr->base;
5466 const aarch64_opcode *opcode = base->opcode;
5467 enum aarch64_op op = opcode->op;
5468 aarch64_opnd_info *operands = base->operands;
5469
5470 DEBUG_TRACE ("enter");
5471
5472 switch (opcode->iclass)
5473 {
5474 case testbranch:
5475 /* TBNZ Xn|Wn, #uimm6, label
5476 Test and Branch Not Zero: conditionally jumps to label if bit number
5477 uimm6 in register Xn is not zero. The bit number implies the width of
5478 the register, which may be written and should be disassembled as Wn if
5479 uimm is less than 32. */
5480 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5481 {
5482 if (operands[1].imm.value >= 32)
5483 {
5484 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5485 0, 31);
5486 return FALSE;
5487 }
5488 operands[0].qualifier = AARCH64_OPND_QLF_X;
5489 }
5490 break;
5491 case loadlit:
5492 /* LDR Wt, label | =value
5493 As a convenience assemblers will typically permit the notation
5494 "=value" in conjunction with the pc-relative literal load instructions
5495 to automatically place an immediate value or symbolic address in a
5496 nearby literal pool and generate a hidden label which references it.
5497 ISREG has been set to 0 in the case of =value. */
5498 if (instr->gen_lit_pool
5499 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5500 {
5501 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5502 if (op == OP_LDRSW_LIT)
5503 size = 4;
5504 if (instr->reloc.exp.X_op != O_constant
5505 && instr->reloc.exp.X_op != O_big
5506 && instr->reloc.exp.X_op != O_symbol)
5507 {
5508 record_operand_error (opcode, 1,
5509 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5510 _("constant expression expected"));
5511 return FALSE;
5512 }
5513 if (! add_to_lit_pool (&instr->reloc.exp, size))
5514 {
5515 record_operand_error (opcode, 1,
5516 AARCH64_OPDE_OTHER_ERROR,
5517 _("literal pool insertion failed"));
5518 return FALSE;
5519 }
5520 }
5521 break;
5522 case log_shift:
5523 case bitfield:
5524 /* UXT[BHW] Wd, Wn
5525 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5526 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5527 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5528 A programmer-friendly assembler should accept a destination Xd in
5529 place of Wd, however that is not the preferred form for disassembly.
5530 */
5531 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5532 && operands[1].qualifier == AARCH64_OPND_QLF_W
5533 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5534 operands[0].qualifier = AARCH64_OPND_QLF_W;
5535 break;
5536
5537 case addsub_ext:
5538 {
5539 /* In the 64-bit form, the final register operand is written as Wm
5540 for all but the (possibly omitted) UXTX/LSL and SXTX
5541 operators.
5542 As a programmer-friendly assembler, we accept e.g.
5543 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5544 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5545 int idx = aarch64_operand_index (opcode->operands,
5546 AARCH64_OPND_Rm_EXT);
5547 gas_assert (idx == 1 || idx == 2);
5548 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5549 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5550 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5551 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5552 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5553 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5554 }
5555 break;
5556
5557 default:
5558 break;
5559 }
5560
5561 DEBUG_TRACE ("exit with SUCCESS");
5562 return TRUE;
5563 }
5564
5565 /* Check for loads and stores that will cause unpredictable behavior. */
5566
5567 static void
5568 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
5569 {
5570 aarch64_inst *base = &instr->base;
5571 const aarch64_opcode *opcode = base->opcode;
5572 const aarch64_opnd_info *opnds = base->operands;
5573 switch (opcode->iclass)
5574 {
5575 case ldst_pos:
5576 case ldst_imm9:
5577 case ldst_unscaled:
5578 case ldst_unpriv:
5579 /* Loading/storing the base register is unpredictable if writeback. */
5580 if ((aarch64_get_operand_class (opnds[0].type)
5581 == AARCH64_OPND_CLASS_INT_REG)
5582 && opnds[0].reg.regno == opnds[1].addr.base_regno
5583 && opnds[1].addr.base_regno != REG_SP
5584 && opnds[1].addr.writeback)
5585 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5586 break;
5587 case ldstpair_off:
5588 case ldstnapair_offs:
5589 case ldstpair_indexed:
5590 /* Loading/storing the base register is unpredictable if writeback. */
5591 if ((aarch64_get_operand_class (opnds[0].type)
5592 == AARCH64_OPND_CLASS_INT_REG)
5593 && (opnds[0].reg.regno == opnds[2].addr.base_regno
5594 || opnds[1].reg.regno == opnds[2].addr.base_regno)
5595 && opnds[2].addr.base_regno != REG_SP
5596 && opnds[2].addr.writeback)
5597 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5598 /* Load operations must load different registers. */
5599 if ((opcode->opcode & (1 << 22))
5600 && opnds[0].reg.regno == opnds[1].reg.regno)
5601 as_warn (_("unpredictable load of register pair -- `%s'"), str);
5602 break;
5603 default:
5604 break;
5605 }
5606 }
5607
5608 /* A wrapper function to interface with libopcodes on encoding and
5609 record the error message if there is any.
5610
5611 Return TRUE on success; otherwise return FALSE. */
5612
5613 static bfd_boolean
5614 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5615 aarch64_insn *code)
5616 {
5617 aarch64_operand_error error_info;
5618 error_info.kind = AARCH64_OPDE_NIL;
5619 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5620 return TRUE;
5621 else
5622 {
5623 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5624 record_operand_error_info (opcode, &error_info);
5625 return FALSE;
5626 }
5627 }
5628
5629 #ifdef DEBUG_AARCH64
5630 static inline void
5631 dump_opcode_operands (const aarch64_opcode *opcode)
5632 {
5633 int i = 0;
5634 while (opcode->operands[i] != AARCH64_OPND_NIL)
5635 {
5636 aarch64_verbose ("\t\t opnd%d: %s", i,
5637 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5638 ? aarch64_get_operand_name (opcode->operands[i])
5639 : aarch64_get_operand_desc (opcode->operands[i]));
5640 ++i;
5641 }
5642 }
5643 #endif /* DEBUG_AARCH64 */
5644
5645 /* This is the guts of the machine-dependent assembler. STR points to a
5646 machine dependent instruction. This function is supposed to emit
5647 the frags/bytes it assembles to. */
5648
5649 void
5650 md_assemble (char *str)
5651 {
5652 char *p = str;
5653 templates *template;
5654 aarch64_opcode *opcode;
5655 aarch64_inst *inst_base;
5656 unsigned saved_cond;
5657
5658 /* Align the previous label if needed. */
5659 if (last_label_seen != NULL)
5660 {
5661 symbol_set_frag (last_label_seen, frag_now);
5662 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5663 S_SET_SEGMENT (last_label_seen, now_seg);
5664 }
5665
5666 inst.reloc.type = BFD_RELOC_UNUSED;
5667
5668 DEBUG_TRACE ("\n\n");
5669 DEBUG_TRACE ("==============================");
5670 DEBUG_TRACE ("Enter md_assemble with %s", str);
5671
5672 template = opcode_lookup (&p);
5673 if (!template)
5674 {
5675 /* It wasn't an instruction, but it might be a register alias of
5676 the form alias .req reg directive. */
5677 if (!create_register_alias (str, p))
5678 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5679 str);
5680 return;
5681 }
5682
5683 skip_whitespace (p);
5684 if (*p == ',')
5685 {
5686 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5687 get_mnemonic_name (str), str);
5688 return;
5689 }
5690
5691 init_operand_error_report ();
5692
5693 saved_cond = inst.cond;
5694 reset_aarch64_instruction (&inst);
5695 inst.cond = saved_cond;
5696
5697 /* Iterate through all opcode entries with the same mnemonic name. */
5698 do
5699 {
5700 opcode = template->opcode;
5701
5702 DEBUG_TRACE ("opcode %s found", opcode->name);
5703 #ifdef DEBUG_AARCH64
5704 if (debug_dump)
5705 dump_opcode_operands (opcode);
5706 #endif /* DEBUG_AARCH64 */
5707
5708 /* Sections are assumed to start aligned. In executable section, there is no
5709 MAP_DATA symbol pending. So we only align the address during
5710 MAP_DATA --> MAP_INSN transition.
5711 For other sections, this is not guaranteed, align it anyway. */
5712 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
5713 if (!need_pass_2 && ((subseg_text_p (now_seg) && mapstate == MAP_DATA)
5714 || !subseg_text_p (now_seg)))
5715 frag_align_code (2, 0);
5716
5717 mapping_state (MAP_INSN);
5718
5719 inst_base = &inst.base;
5720 inst_base->opcode = opcode;
5721
5722 /* Truly conditionally executed instructions, e.g. b.cond. */
5723 if (opcode->flags & F_COND)
5724 {
5725 gas_assert (inst.cond != COND_ALWAYS);
5726 inst_base->cond = get_cond_from_value (inst.cond);
5727 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5728 }
5729 else if (inst.cond != COND_ALWAYS)
5730 {
5731 /* It shouldn't arrive here, where the assembly looks like a
5732 conditional instruction but the found opcode is unconditional. */
5733 gas_assert (0);
5734 continue;
5735 }
5736
5737 if (parse_operands (p, opcode)
5738 && programmer_friendly_fixup (&inst)
5739 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5740 {
5741 /* Check that this instruction is supported for this CPU. */
5742 if (!opcode->avariant
5743 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5744 {
5745 as_bad (_("selected processor does not support `%s'"), str);
5746 return;
5747 }
5748
5749 warn_unpredictable_ldst (&inst, str);
5750
5751 if (inst.reloc.type == BFD_RELOC_UNUSED
5752 || !inst.reloc.need_libopcodes_p)
5753 output_inst (NULL);
5754 else
5755 {
5756 /* If there is relocation generated for the instruction,
5757 store the instruction information for the future fix-up. */
5758 struct aarch64_inst *copy;
5759 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5760 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5761 abort ();
5762 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5763 output_inst (copy);
5764 }
5765 return;
5766 }
5767
5768 template = template->next;
5769 if (template != NULL)
5770 {
5771 reset_aarch64_instruction (&inst);
5772 inst.cond = saved_cond;
5773 }
5774 }
5775 while (template != NULL);
5776
5777 /* Issue the error messages if any. */
5778 output_operand_error_report (str);
5779 }
5780
5781 /* Various frobbings of labels and their addresses. */
5782
5783 void
5784 aarch64_start_line_hook (void)
5785 {
5786 last_label_seen = NULL;
5787 }
5788
5789 void
5790 aarch64_frob_label (symbolS * sym)
5791 {
5792 last_label_seen = sym;
5793
5794 dwarf2_emit_label (sym);
5795 }
5796
5797 int
5798 aarch64_data_in_code (void)
5799 {
5800 if (!strncmp (input_line_pointer + 1, "data:", 5))
5801 {
5802 *input_line_pointer = '/';
5803 input_line_pointer += 5;
5804 *input_line_pointer = 0;
5805 return 1;
5806 }
5807
5808 return 0;
5809 }
5810
5811 char *
5812 aarch64_canonicalize_symbol_name (char *name)
5813 {
5814 int len;
5815
5816 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5817 *(name + len - 5) = 0;
5818
5819 return name;
5820 }
5821 \f
5822 /* Table of all register names defined by default. The user can
5823 define additional names with .req. Note that all register names
5824 should appear in both upper and lowercase variants. Some registers
5825 also have mixed-case names. */
5826
5827 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5828 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5829 #define REGSET31(p,t) \
5830 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5831 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5832 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5833 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5834 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5835 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5836 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5837 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5838 #define REGSET(p,t) \
5839 REGSET31(p,t), REGNUM(p,31,t)
5840
5841 /* These go into aarch64_reg_hsh hash-table. */
5842 static const reg_entry reg_names[] = {
5843 /* Integer registers. */
5844 REGSET31 (x, R_64), REGSET31 (X, R_64),
5845 REGSET31 (w, R_32), REGSET31 (W, R_32),
5846
5847 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5848 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5849
5850 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5851 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5852
5853 /* Coprocessor register numbers. */
5854 REGSET (c, CN), REGSET (C, CN),
5855
5856 /* Floating-point single precision registers. */
5857 REGSET (s, FP_S), REGSET (S, FP_S),
5858
5859 /* Floating-point double precision registers. */
5860 REGSET (d, FP_D), REGSET (D, FP_D),
5861
5862 /* Floating-point half precision registers. */
5863 REGSET (h, FP_H), REGSET (H, FP_H),
5864
5865 /* Floating-point byte precision registers. */
5866 REGSET (b, FP_B), REGSET (B, FP_B),
5867
5868 /* Floating-point quad precision registers. */
5869 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5870
5871 /* FP/SIMD registers. */
5872 REGSET (v, VN), REGSET (V, VN),
5873 };
5874
5875 #undef REGDEF
5876 #undef REGNUM
5877 #undef REGSET
5878
5879 #define N 1
5880 #define n 0
5881 #define Z 1
5882 #define z 0
5883 #define C 1
5884 #define c 0
5885 #define V 1
5886 #define v 0
5887 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5888 static const asm_nzcv nzcv_names[] = {
5889 {"nzcv", B (n, z, c, v)},
5890 {"nzcV", B (n, z, c, V)},
5891 {"nzCv", B (n, z, C, v)},
5892 {"nzCV", B (n, z, C, V)},
5893 {"nZcv", B (n, Z, c, v)},
5894 {"nZcV", B (n, Z, c, V)},
5895 {"nZCv", B (n, Z, C, v)},
5896 {"nZCV", B (n, Z, C, V)},
5897 {"Nzcv", B (N, z, c, v)},
5898 {"NzcV", B (N, z, c, V)},
5899 {"NzCv", B (N, z, C, v)},
5900 {"NzCV", B (N, z, C, V)},
5901 {"NZcv", B (N, Z, c, v)},
5902 {"NZcV", B (N, Z, c, V)},
5903 {"NZCv", B (N, Z, C, v)},
5904 {"NZCV", B (N, Z, C, V)}
5905 };
5906
5907 #undef N
5908 #undef n
5909 #undef Z
5910 #undef z
5911 #undef C
5912 #undef c
5913 #undef V
5914 #undef v
5915 #undef B
5916 \f
5917 /* MD interface: bits in the object file. */
5918
5919 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5920 for use in the a.out file, and stores them in the array pointed to by buf.
5921 This knows about the endian-ness of the target machine and does
5922 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5923 2 (short) and 4 (long) Floating numbers are put out as a series of
5924 LITTLENUMS (shorts, here at least). */
5925
5926 void
5927 md_number_to_chars (char *buf, valueT val, int n)
5928 {
5929 if (target_big_endian)
5930 number_to_chars_bigendian (buf, val, n);
5931 else
5932 number_to_chars_littleendian (buf, val, n);
5933 }
5934
5935 /* MD interface: Sections. */
5936
5937 /* Estimate the size of a frag before relaxing. Assume everything fits in
5938 4 bytes. */
5939
5940 int
5941 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
5942 {
5943 fragp->fr_var = 4;
5944 return 4;
5945 }
5946
5947 /* Round up a section size to the appropriate boundary. */
5948
5949 valueT
5950 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
5951 {
5952 return size;
5953 }
5954
5955 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
5956 of an rs_align_code fragment.
5957
5958 Here we fill the frag with the appropriate info for padding the
5959 output stream. The resulting frag will consist of a fixed (fr_fix)
5960 and of a repeating (fr_var) part.
5961
5962 The fixed content is always emitted before the repeating content and
5963 these two parts are used as follows in constructing the output:
5964 - the fixed part will be used to align to a valid instruction word
5965 boundary, in case that we start at a misaligned address; as no
5966 executable instruction can live at the misaligned location, we
5967 simply fill with zeros;
5968 - the variable part will be used to cover the remaining padding and
5969 we fill using the AArch64 NOP instruction.
5970
5971 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
5972 enough storage space for up to 3 bytes for padding the back to a valid
5973 instruction alignment and exactly 4 bytes to store the NOP pattern. */
5974
5975 void
5976 aarch64_handle_align (fragS * fragP)
5977 {
5978 /* NOP = d503201f */
5979 /* AArch64 instructions are always little-endian. */
5980 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
5981
5982 int bytes, fix, noop_size;
5983 char *p;
5984
5985 if (fragP->fr_type != rs_align_code)
5986 return;
5987
5988 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
5989 p = fragP->fr_literal + fragP->fr_fix;
5990
5991 #ifdef OBJ_ELF
5992 gas_assert (fragP->tc_frag_data.recorded);
5993 #endif
5994
5995 noop_size = sizeof (aarch64_noop);
5996
5997 fix = bytes & (noop_size - 1);
5998 if (fix)
5999 {
6000 #ifdef OBJ_ELF
6001 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6002 #endif
6003 memset (p, 0, fix);
6004 p += fix;
6005 fragP->fr_fix += fix;
6006 }
6007
6008 if (noop_size)
6009 memcpy (p, aarch64_noop, noop_size);
6010 fragP->fr_var = noop_size;
6011 }
6012
6013 /* Perform target specific initialisation of a frag.
6014 Note - despite the name this initialisation is not done when the frag
6015 is created, but only when its type is assigned. A frag can be created
6016 and used a long time before its type is set, so beware of assuming that
6017 this initialisationis performed first. */
6018
6019 #ifndef OBJ_ELF
6020 void
6021 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6022 int max_chars ATTRIBUTE_UNUSED)
6023 {
6024 }
6025
6026 #else /* OBJ_ELF is defined. */
6027 void
6028 aarch64_init_frag (fragS * fragP, int max_chars)
6029 {
6030 /* Record a mapping symbol for alignment frags. We will delete this
6031 later if the alignment ends up empty. */
6032 if (!fragP->tc_frag_data.recorded)
6033 {
6034 fragP->tc_frag_data.recorded = 1;
6035 switch (fragP->fr_type)
6036 {
6037 case rs_align:
6038 case rs_align_test:
6039 case rs_fill:
6040 mapping_state_2 (MAP_DATA, max_chars);
6041 break;
6042 case rs_align_code:
6043 mapping_state_2 (MAP_INSN, max_chars);
6044 break;
6045 default:
6046 break;
6047 }
6048 }
6049 }
6050 \f
6051 /* Initialize the DWARF-2 unwind information for this procedure. */
6052
6053 void
6054 tc_aarch64_frame_initial_instructions (void)
6055 {
6056 cfi_add_CFA_def_cfa (REG_SP, 0);
6057 }
6058 #endif /* OBJ_ELF */
6059
6060 /* Convert REGNAME to a DWARF-2 register number. */
6061
6062 int
6063 tc_aarch64_regname_to_dw2regnum (char *regname)
6064 {
6065 const reg_entry *reg = parse_reg (&regname);
6066 if (reg == NULL)
6067 return -1;
6068
6069 switch (reg->type)
6070 {
6071 case REG_TYPE_SP_32:
6072 case REG_TYPE_SP_64:
6073 case REG_TYPE_R_32:
6074 case REG_TYPE_R_64:
6075 return reg->number;
6076
6077 case REG_TYPE_FP_B:
6078 case REG_TYPE_FP_H:
6079 case REG_TYPE_FP_S:
6080 case REG_TYPE_FP_D:
6081 case REG_TYPE_FP_Q:
6082 return reg->number + 64;
6083
6084 default:
6085 break;
6086 }
6087 return -1;
6088 }
6089
6090 /* Implement DWARF2_ADDR_SIZE. */
6091
6092 int
6093 aarch64_dwarf2_addr_size (void)
6094 {
6095 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6096 if (ilp32_p)
6097 return 4;
6098 #endif
6099 return bfd_arch_bits_per_address (stdoutput) / 8;
6100 }
6101
6102 /* MD interface: Symbol and relocation handling. */
6103
6104 /* Return the address within the segment that a PC-relative fixup is
6105 relative to. For AArch64 PC-relative fixups applied to instructions
6106 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6107
6108 long
6109 md_pcrel_from_section (fixS * fixP, segT seg)
6110 {
6111 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6112
6113 /* If this is pc-relative and we are going to emit a relocation
6114 then we just want to put out any pipeline compensation that the linker
6115 will need. Otherwise we want to use the calculated base. */
6116 if (fixP->fx_pcrel
6117 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6118 || aarch64_force_relocation (fixP)))
6119 base = 0;
6120
6121 /* AArch64 should be consistent for all pc-relative relocations. */
6122 return base + AARCH64_PCREL_OFFSET;
6123 }
6124
6125 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6126 Otherwise we have no need to default values of symbols. */
6127
6128 symbolS *
6129 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6130 {
6131 #ifdef OBJ_ELF
6132 if (name[0] == '_' && name[1] == 'G'
6133 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6134 {
6135 if (!GOT_symbol)
6136 {
6137 if (symbol_find (name))
6138 as_bad (_("GOT already in the symbol table"));
6139
6140 GOT_symbol = symbol_new (name, undefined_section,
6141 (valueT) 0, &zero_address_frag);
6142 }
6143
6144 return GOT_symbol;
6145 }
6146 #endif
6147
6148 return 0;
6149 }
6150
6151 /* Return non-zero if the indicated VALUE has overflowed the maximum
6152 range expressible by a unsigned number with the indicated number of
6153 BITS. */
6154
6155 static bfd_boolean
6156 unsigned_overflow (valueT value, unsigned bits)
6157 {
6158 valueT lim;
6159 if (bits >= sizeof (valueT) * 8)
6160 return FALSE;
6161 lim = (valueT) 1 << bits;
6162 return (value >= lim);
6163 }
6164
6165
6166 /* Return non-zero if the indicated VALUE has overflowed the maximum
6167 range expressible by an signed number with the indicated number of
6168 BITS. */
6169
6170 static bfd_boolean
6171 signed_overflow (offsetT value, unsigned bits)
6172 {
6173 offsetT lim;
6174 if (bits >= sizeof (offsetT) * 8)
6175 return FALSE;
6176 lim = (offsetT) 1 << (bits - 1);
6177 return (value < -lim || value >= lim);
6178 }
6179
6180 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6181 unsigned immediate offset load/store instruction, try to encode it as
6182 an unscaled, 9-bit, signed immediate offset load/store instruction.
6183 Return TRUE if it is successful; otherwise return FALSE.
6184
6185 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6186 in response to the standard LDR/STR mnemonics when the immediate offset is
6187 unambiguous, i.e. when it is negative or unaligned. */
6188
6189 static bfd_boolean
6190 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6191 {
6192 int idx;
6193 enum aarch64_op new_op;
6194 const aarch64_opcode *new_opcode;
6195
6196 gas_assert (instr->opcode->iclass == ldst_pos);
6197
6198 switch (instr->opcode->op)
6199 {
6200 case OP_LDRB_POS:new_op = OP_LDURB; break;
6201 case OP_STRB_POS: new_op = OP_STURB; break;
6202 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6203 case OP_LDRH_POS: new_op = OP_LDURH; break;
6204 case OP_STRH_POS: new_op = OP_STURH; break;
6205 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6206 case OP_LDR_POS: new_op = OP_LDUR; break;
6207 case OP_STR_POS: new_op = OP_STUR; break;
6208 case OP_LDRF_POS: new_op = OP_LDURV; break;
6209 case OP_STRF_POS: new_op = OP_STURV; break;
6210 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6211 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6212 default: new_op = OP_NIL; break;
6213 }
6214
6215 if (new_op == OP_NIL)
6216 return FALSE;
6217
6218 new_opcode = aarch64_get_opcode (new_op);
6219 gas_assert (new_opcode != NULL);
6220
6221 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6222 instr->opcode->op, new_opcode->op);
6223
6224 aarch64_replace_opcode (instr, new_opcode);
6225
6226 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6227 qualifier matching may fail because the out-of-date qualifier will
6228 prevent the operand being updated with a new and correct qualifier. */
6229 idx = aarch64_operand_index (instr->opcode->operands,
6230 AARCH64_OPND_ADDR_SIMM9);
6231 gas_assert (idx == 1);
6232 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6233
6234 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6235
6236 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6237 return FALSE;
6238
6239 return TRUE;
6240 }
6241
6242 /* Called by fix_insn to fix a MOV immediate alias instruction.
6243
6244 Operand for a generic move immediate instruction, which is an alias
6245 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6246 a 32-bit/64-bit immediate value into general register. An assembler error
6247 shall result if the immediate cannot be created by a single one of these
6248 instructions. If there is a choice, then to ensure reversability an
6249 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6250
6251 static void
6252 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6253 {
6254 const aarch64_opcode *opcode;
6255
6256 /* Need to check if the destination is SP/ZR. The check has to be done
6257 before any aarch64_replace_opcode. */
6258 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6259 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6260
6261 instr->operands[1].imm.value = value;
6262 instr->operands[1].skip = 0;
6263
6264 if (try_mov_wide_p)
6265 {
6266 /* Try the MOVZ alias. */
6267 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6268 aarch64_replace_opcode (instr, opcode);
6269 if (aarch64_opcode_encode (instr->opcode, instr,
6270 &instr->value, NULL, NULL))
6271 {
6272 put_aarch64_insn (buf, instr->value);
6273 return;
6274 }
6275 /* Try the MOVK alias. */
6276 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6277 aarch64_replace_opcode (instr, opcode);
6278 if (aarch64_opcode_encode (instr->opcode, instr,
6279 &instr->value, NULL, NULL))
6280 {
6281 put_aarch64_insn (buf, instr->value);
6282 return;
6283 }
6284 }
6285
6286 if (try_mov_bitmask_p)
6287 {
6288 /* Try the ORR alias. */
6289 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6290 aarch64_replace_opcode (instr, opcode);
6291 if (aarch64_opcode_encode (instr->opcode, instr,
6292 &instr->value, NULL, NULL))
6293 {
6294 put_aarch64_insn (buf, instr->value);
6295 return;
6296 }
6297 }
6298
6299 as_bad_where (fixP->fx_file, fixP->fx_line,
6300 _("immediate cannot be moved by a single instruction"));
6301 }
6302
6303 /* An instruction operand which is immediate related may have symbol used
6304 in the assembly, e.g.
6305
6306 mov w0, u32
6307 .set u32, 0x00ffff00
6308
6309 At the time when the assembly instruction is parsed, a referenced symbol,
6310 like 'u32' in the above example may not have been seen; a fixS is created
6311 in such a case and is handled here after symbols have been resolved.
6312 Instruction is fixed up with VALUE using the information in *FIXP plus
6313 extra information in FLAGS.
6314
6315 This function is called by md_apply_fix to fix up instructions that need
6316 a fix-up described above but does not involve any linker-time relocation. */
6317
6318 static void
6319 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6320 {
6321 int idx;
6322 uint32_t insn;
6323 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6324 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6325 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6326
6327 if (new_inst)
6328 {
6329 /* Now the instruction is about to be fixed-up, so the operand that
6330 was previously marked as 'ignored' needs to be unmarked in order
6331 to get the encoding done properly. */
6332 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6333 new_inst->operands[idx].skip = 0;
6334 }
6335
6336 gas_assert (opnd != AARCH64_OPND_NIL);
6337
6338 switch (opnd)
6339 {
6340 case AARCH64_OPND_EXCEPTION:
6341 if (unsigned_overflow (value, 16))
6342 as_bad_where (fixP->fx_file, fixP->fx_line,
6343 _("immediate out of range"));
6344 insn = get_aarch64_insn (buf);
6345 insn |= encode_svc_imm (value);
6346 put_aarch64_insn (buf, insn);
6347 break;
6348
6349 case AARCH64_OPND_AIMM:
6350 /* ADD or SUB with immediate.
6351 NOTE this assumes we come here with a add/sub shifted reg encoding
6352 3 322|2222|2 2 2 21111 111111
6353 1 098|7654|3 2 1 09876 543210 98765 43210
6354 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6355 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6356 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6357 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6358 ->
6359 3 322|2222|2 2 221111111111
6360 1 098|7654|3 2 109876543210 98765 43210
6361 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6362 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6363 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6364 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6365 Fields sf Rn Rd are already set. */
6366 insn = get_aarch64_insn (buf);
6367 if (value < 0)
6368 {
6369 /* Add <-> sub. */
6370 insn = reencode_addsub_switch_add_sub (insn);
6371 value = -value;
6372 }
6373
6374 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6375 && unsigned_overflow (value, 12))
6376 {
6377 /* Try to shift the value by 12 to make it fit. */
6378 if (((value >> 12) << 12) == value
6379 && ! unsigned_overflow (value, 12 + 12))
6380 {
6381 value >>= 12;
6382 insn |= encode_addsub_imm_shift_amount (1);
6383 }
6384 }
6385
6386 if (unsigned_overflow (value, 12))
6387 as_bad_where (fixP->fx_file, fixP->fx_line,
6388 _("immediate out of range"));
6389
6390 insn |= encode_addsub_imm (value);
6391
6392 put_aarch64_insn (buf, insn);
6393 break;
6394
6395 case AARCH64_OPND_SIMD_IMM:
6396 case AARCH64_OPND_SIMD_IMM_SFT:
6397 case AARCH64_OPND_LIMM:
6398 /* Bit mask immediate. */
6399 gas_assert (new_inst != NULL);
6400 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6401 new_inst->operands[idx].imm.value = value;
6402 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6403 &new_inst->value, NULL, NULL))
6404 put_aarch64_insn (buf, new_inst->value);
6405 else
6406 as_bad_where (fixP->fx_file, fixP->fx_line,
6407 _("invalid immediate"));
6408 break;
6409
6410 case AARCH64_OPND_HALF:
6411 /* 16-bit unsigned immediate. */
6412 if (unsigned_overflow (value, 16))
6413 as_bad_where (fixP->fx_file, fixP->fx_line,
6414 _("immediate out of range"));
6415 insn = get_aarch64_insn (buf);
6416 insn |= encode_movw_imm (value & 0xffff);
6417 put_aarch64_insn (buf, insn);
6418 break;
6419
6420 case AARCH64_OPND_IMM_MOV:
6421 /* Operand for a generic move immediate instruction, which is
6422 an alias instruction that generates a single MOVZ, MOVN or ORR
6423 instruction to loads a 32-bit/64-bit immediate value into general
6424 register. An assembler error shall result if the immediate cannot be
6425 created by a single one of these instructions. If there is a choice,
6426 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6427 and MOVZ or MOVN to ORR. */
6428 gas_assert (new_inst != NULL);
6429 fix_mov_imm_insn (fixP, buf, new_inst, value);
6430 break;
6431
6432 case AARCH64_OPND_ADDR_SIMM7:
6433 case AARCH64_OPND_ADDR_SIMM9:
6434 case AARCH64_OPND_ADDR_SIMM9_2:
6435 case AARCH64_OPND_ADDR_UIMM12:
6436 /* Immediate offset in an address. */
6437 insn = get_aarch64_insn (buf);
6438
6439 gas_assert (new_inst != NULL && new_inst->value == insn);
6440 gas_assert (new_inst->opcode->operands[1] == opnd
6441 || new_inst->opcode->operands[2] == opnd);
6442
6443 /* Get the index of the address operand. */
6444 if (new_inst->opcode->operands[1] == opnd)
6445 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6446 idx = 1;
6447 else
6448 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6449 idx = 2;
6450
6451 /* Update the resolved offset value. */
6452 new_inst->operands[idx].addr.offset.imm = value;
6453
6454 /* Encode/fix-up. */
6455 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6456 &new_inst->value, NULL, NULL))
6457 {
6458 put_aarch64_insn (buf, new_inst->value);
6459 break;
6460 }
6461 else if (new_inst->opcode->iclass == ldst_pos
6462 && try_to_encode_as_unscaled_ldst (new_inst))
6463 {
6464 put_aarch64_insn (buf, new_inst->value);
6465 break;
6466 }
6467
6468 as_bad_where (fixP->fx_file, fixP->fx_line,
6469 _("immediate offset out of range"));
6470 break;
6471
6472 default:
6473 gas_assert (0);
6474 as_fatal (_("unhandled operand code %d"), opnd);
6475 }
6476 }
6477
6478 /* Apply a fixup (fixP) to segment data, once it has been determined
6479 by our caller that we have all the info we need to fix it up.
6480
6481 Parameter valP is the pointer to the value of the bits. */
6482
6483 void
6484 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6485 {
6486 offsetT value = *valP;
6487 uint32_t insn;
6488 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6489 int scale;
6490 unsigned flags = fixP->fx_addnumber;
6491
6492 DEBUG_TRACE ("\n\n");
6493 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6494 DEBUG_TRACE ("Enter md_apply_fix");
6495
6496 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6497
6498 /* Note whether this will delete the relocation. */
6499
6500 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6501 fixP->fx_done = 1;
6502
6503 /* Process the relocations. */
6504 switch (fixP->fx_r_type)
6505 {
6506 case BFD_RELOC_NONE:
6507 /* This will need to go in the object file. */
6508 fixP->fx_done = 0;
6509 break;
6510
6511 case BFD_RELOC_8:
6512 case BFD_RELOC_8_PCREL:
6513 if (fixP->fx_done || !seg->use_rela_p)
6514 md_number_to_chars (buf, value, 1);
6515 break;
6516
6517 case BFD_RELOC_16:
6518 case BFD_RELOC_16_PCREL:
6519 if (fixP->fx_done || !seg->use_rela_p)
6520 md_number_to_chars (buf, value, 2);
6521 break;
6522
6523 case BFD_RELOC_32:
6524 case BFD_RELOC_32_PCREL:
6525 if (fixP->fx_done || !seg->use_rela_p)
6526 md_number_to_chars (buf, value, 4);
6527 break;
6528
6529 case BFD_RELOC_64:
6530 case BFD_RELOC_64_PCREL:
6531 if (fixP->fx_done || !seg->use_rela_p)
6532 md_number_to_chars (buf, value, 8);
6533 break;
6534
6535 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6536 /* We claim that these fixups have been processed here, even if
6537 in fact we generate an error because we do not have a reloc
6538 for them, so tc_gen_reloc() will reject them. */
6539 fixP->fx_done = 1;
6540 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6541 {
6542 as_bad_where (fixP->fx_file, fixP->fx_line,
6543 _("undefined symbol %s used as an immediate value"),
6544 S_GET_NAME (fixP->fx_addsy));
6545 goto apply_fix_return;
6546 }
6547 fix_insn (fixP, flags, value);
6548 break;
6549
6550 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6551 if (fixP->fx_done || !seg->use_rela_p)
6552 {
6553 if (value & 3)
6554 as_bad_where (fixP->fx_file, fixP->fx_line,
6555 _("pc-relative load offset not word aligned"));
6556 if (signed_overflow (value, 21))
6557 as_bad_where (fixP->fx_file, fixP->fx_line,
6558 _("pc-relative load offset out of range"));
6559 insn = get_aarch64_insn (buf);
6560 insn |= encode_ld_lit_ofs_19 (value >> 2);
6561 put_aarch64_insn (buf, insn);
6562 }
6563 break;
6564
6565 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6566 if (fixP->fx_done || !seg->use_rela_p)
6567 {
6568 if (signed_overflow (value, 21))
6569 as_bad_where (fixP->fx_file, fixP->fx_line,
6570 _("pc-relative address offset out of range"));
6571 insn = get_aarch64_insn (buf);
6572 insn |= encode_adr_imm (value);
6573 put_aarch64_insn (buf, insn);
6574 }
6575 break;
6576
6577 case BFD_RELOC_AARCH64_BRANCH19:
6578 if (fixP->fx_done || !seg->use_rela_p)
6579 {
6580 if (value & 3)
6581 as_bad_where (fixP->fx_file, fixP->fx_line,
6582 _("conditional branch target not word aligned"));
6583 if (signed_overflow (value, 21))
6584 as_bad_where (fixP->fx_file, fixP->fx_line,
6585 _("conditional branch out of range"));
6586 insn = get_aarch64_insn (buf);
6587 insn |= encode_cond_branch_ofs_19 (value >> 2);
6588 put_aarch64_insn (buf, insn);
6589 }
6590 break;
6591
6592 case BFD_RELOC_AARCH64_TSTBR14:
6593 if (fixP->fx_done || !seg->use_rela_p)
6594 {
6595 if (value & 3)
6596 as_bad_where (fixP->fx_file, fixP->fx_line,
6597 _("conditional branch target not word aligned"));
6598 if (signed_overflow (value, 16))
6599 as_bad_where (fixP->fx_file, fixP->fx_line,
6600 _("conditional branch out of range"));
6601 insn = get_aarch64_insn (buf);
6602 insn |= encode_tst_branch_ofs_14 (value >> 2);
6603 put_aarch64_insn (buf, insn);
6604 }
6605 break;
6606
6607 case BFD_RELOC_AARCH64_JUMP26:
6608 case BFD_RELOC_AARCH64_CALL26:
6609 if (fixP->fx_done || !seg->use_rela_p)
6610 {
6611 if (value & 3)
6612 as_bad_where (fixP->fx_file, fixP->fx_line,
6613 _("branch target not word aligned"));
6614 if (signed_overflow (value, 28))
6615 as_bad_where (fixP->fx_file, fixP->fx_line,
6616 _("branch out of range"));
6617 insn = get_aarch64_insn (buf);
6618 insn |= encode_branch_ofs_26 (value >> 2);
6619 put_aarch64_insn (buf, insn);
6620 }
6621 break;
6622
6623 case BFD_RELOC_AARCH64_MOVW_G0:
6624 case BFD_RELOC_AARCH64_MOVW_G0_S:
6625 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6626 scale = 0;
6627 goto movw_common;
6628 case BFD_RELOC_AARCH64_MOVW_G1:
6629 case BFD_RELOC_AARCH64_MOVW_G1_S:
6630 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6631 scale = 16;
6632 goto movw_common;
6633 case BFD_RELOC_AARCH64_MOVW_G2:
6634 case BFD_RELOC_AARCH64_MOVW_G2_S:
6635 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6636 scale = 32;
6637 goto movw_common;
6638 case BFD_RELOC_AARCH64_MOVW_G3:
6639 scale = 48;
6640 movw_common:
6641 if (fixP->fx_done || !seg->use_rela_p)
6642 {
6643 insn = get_aarch64_insn (buf);
6644
6645 if (!fixP->fx_done)
6646 {
6647 /* REL signed addend must fit in 16 bits */
6648 if (signed_overflow (value, 16))
6649 as_bad_where (fixP->fx_file, fixP->fx_line,
6650 _("offset out of range"));
6651 }
6652 else
6653 {
6654 /* Check for overflow and scale. */
6655 switch (fixP->fx_r_type)
6656 {
6657 case BFD_RELOC_AARCH64_MOVW_G0:
6658 case BFD_RELOC_AARCH64_MOVW_G1:
6659 case BFD_RELOC_AARCH64_MOVW_G2:
6660 case BFD_RELOC_AARCH64_MOVW_G3:
6661 if (unsigned_overflow (value, scale + 16))
6662 as_bad_where (fixP->fx_file, fixP->fx_line,
6663 _("unsigned value out of range"));
6664 break;
6665 case BFD_RELOC_AARCH64_MOVW_G0_S:
6666 case BFD_RELOC_AARCH64_MOVW_G1_S:
6667 case BFD_RELOC_AARCH64_MOVW_G2_S:
6668 /* NOTE: We can only come here with movz or movn. */
6669 if (signed_overflow (value, scale + 16))
6670 as_bad_where (fixP->fx_file, fixP->fx_line,
6671 _("signed value out of range"));
6672 if (value < 0)
6673 {
6674 /* Force use of MOVN. */
6675 value = ~value;
6676 insn = reencode_movzn_to_movn (insn);
6677 }
6678 else
6679 {
6680 /* Force use of MOVZ. */
6681 insn = reencode_movzn_to_movz (insn);
6682 }
6683 break;
6684 default:
6685 /* Unchecked relocations. */
6686 break;
6687 }
6688 value >>= scale;
6689 }
6690
6691 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6692 insn |= encode_movw_imm (value & 0xffff);
6693
6694 put_aarch64_insn (buf, insn);
6695 }
6696 break;
6697
6698 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6699 fixP->fx_r_type = (ilp32_p
6700 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
6701 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
6702 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6703 /* Should always be exported to object file, see
6704 aarch64_force_relocation(). */
6705 gas_assert (!fixP->fx_done);
6706 gas_assert (seg->use_rela_p);
6707 break;
6708
6709 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6710 fixP->fx_r_type = (ilp32_p
6711 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
6712 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
6713 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6714 /* Should always be exported to object file, see
6715 aarch64_force_relocation(). */
6716 gas_assert (!fixP->fx_done);
6717 gas_assert (seg->use_rela_p);
6718 break;
6719
6720 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6721 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6722 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6723 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6724 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6725 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6726 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6727 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6728 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6729 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6730 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6731 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6732 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6733 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6734 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6735 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6736 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6737 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6738 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6739 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6740 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6741 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6742 /* Should always be exported to object file, see
6743 aarch64_force_relocation(). */
6744 gas_assert (!fixP->fx_done);
6745 gas_assert (seg->use_rela_p);
6746 break;
6747
6748 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6749 /* Should always be exported to object file, see
6750 aarch64_force_relocation(). */
6751 fixP->fx_r_type = (ilp32_p
6752 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
6753 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
6754 gas_assert (!fixP->fx_done);
6755 gas_assert (seg->use_rela_p);
6756 break;
6757
6758 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6759 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6760 case BFD_RELOC_AARCH64_ADD_LO12:
6761 case BFD_RELOC_AARCH64_LDST8_LO12:
6762 case BFD_RELOC_AARCH64_LDST16_LO12:
6763 case BFD_RELOC_AARCH64_LDST32_LO12:
6764 case BFD_RELOC_AARCH64_LDST64_LO12:
6765 case BFD_RELOC_AARCH64_LDST128_LO12:
6766 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6767 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6768 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6769 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6770 /* Should always be exported to object file, see
6771 aarch64_force_relocation(). */
6772 gas_assert (!fixP->fx_done);
6773 gas_assert (seg->use_rela_p);
6774 break;
6775
6776 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6777 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6778 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6779 break;
6780
6781 case BFD_RELOC_UNUSED:
6782 /* An error will already have been reported. */
6783 break;
6784
6785 default:
6786 as_bad_where (fixP->fx_file, fixP->fx_line,
6787 _("unexpected %s fixup"),
6788 bfd_get_reloc_code_name (fixP->fx_r_type));
6789 break;
6790 }
6791
6792 apply_fix_return:
6793 /* Free the allocated the struct aarch64_inst.
6794 N.B. currently there are very limited number of fix-up types actually use
6795 this field, so the impact on the performance should be minimal . */
6796 if (fixP->tc_fix_data.inst != NULL)
6797 free (fixP->tc_fix_data.inst);
6798
6799 return;
6800 }
6801
6802 /* Translate internal representation of relocation info to BFD target
6803 format. */
6804
6805 arelent *
6806 tc_gen_reloc (asection * section, fixS * fixp)
6807 {
6808 arelent *reloc;
6809 bfd_reloc_code_real_type code;
6810
6811 reloc = xmalloc (sizeof (arelent));
6812
6813 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6814 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6815 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6816
6817 if (fixp->fx_pcrel)
6818 {
6819 if (section->use_rela_p)
6820 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6821 else
6822 fixp->fx_offset = reloc->address;
6823 }
6824 reloc->addend = fixp->fx_offset;
6825
6826 code = fixp->fx_r_type;
6827 switch (code)
6828 {
6829 case BFD_RELOC_16:
6830 if (fixp->fx_pcrel)
6831 code = BFD_RELOC_16_PCREL;
6832 break;
6833
6834 case BFD_RELOC_32:
6835 if (fixp->fx_pcrel)
6836 code = BFD_RELOC_32_PCREL;
6837 break;
6838
6839 case BFD_RELOC_64:
6840 if (fixp->fx_pcrel)
6841 code = BFD_RELOC_64_PCREL;
6842 break;
6843
6844 default:
6845 break;
6846 }
6847
6848 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6849 if (reloc->howto == NULL)
6850 {
6851 as_bad_where (fixp->fx_file, fixp->fx_line,
6852 _
6853 ("cannot represent %s relocation in this object file format"),
6854 bfd_get_reloc_code_name (code));
6855 return NULL;
6856 }
6857
6858 return reloc;
6859 }
6860
6861 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6862
6863 void
6864 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6865 {
6866 bfd_reloc_code_real_type type;
6867 int pcrel = 0;
6868
6869 /* Pick a reloc.
6870 FIXME: @@ Should look at CPU word size. */
6871 switch (size)
6872 {
6873 case 1:
6874 type = BFD_RELOC_8;
6875 break;
6876 case 2:
6877 type = BFD_RELOC_16;
6878 break;
6879 case 4:
6880 type = BFD_RELOC_32;
6881 break;
6882 case 8:
6883 type = BFD_RELOC_64;
6884 break;
6885 default:
6886 as_bad (_("cannot do %u-byte relocation"), size);
6887 type = BFD_RELOC_UNUSED;
6888 break;
6889 }
6890
6891 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6892 }
6893
6894 int
6895 aarch64_force_relocation (struct fix *fixp)
6896 {
6897 switch (fixp->fx_r_type)
6898 {
6899 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6900 /* Perform these "immediate" internal relocations
6901 even if the symbol is extern or weak. */
6902 return 0;
6903
6904 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6905 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6906 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6907 /* Pseudo relocs that need to be fixed up according to
6908 ilp32_p. */
6909 return 0;
6910
6911 case BFD_RELOC_AARCH64_ADD_LO12:
6912 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6913 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6914 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6915 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6916 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6917 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6918 case BFD_RELOC_AARCH64_LDST128_LO12:
6919 case BFD_RELOC_AARCH64_LDST16_LO12:
6920 case BFD_RELOC_AARCH64_LDST32_LO12:
6921 case BFD_RELOC_AARCH64_LDST64_LO12:
6922 case BFD_RELOC_AARCH64_LDST8_LO12:
6923 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6924 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6925 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6926 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6927 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6928 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6929 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6930 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6931 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6932 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6933 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6934 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6935 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6936 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6937 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6938 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6939 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6940 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6941 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6942 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6943 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6944 /* Always leave these relocations for the linker. */
6945 return 1;
6946
6947 default:
6948 break;
6949 }
6950
6951 return generic_force_reloc (fixp);
6952 }
6953
6954 #ifdef OBJ_ELF
6955
6956 const char *
6957 elf64_aarch64_target_format (void)
6958 {
6959 if (target_big_endian)
6960 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
6961 else
6962 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
6963 }
6964
6965 void
6966 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
6967 {
6968 elf_frob_symbol (symp, puntp);
6969 }
6970 #endif
6971
6972 /* MD interface: Finalization. */
6973
6974 /* A good place to do this, although this was probably not intended
6975 for this kind of use. We need to dump the literal pool before
6976 references are made to a null symbol pointer. */
6977
6978 void
6979 aarch64_cleanup (void)
6980 {
6981 literal_pool *pool;
6982
6983 for (pool = list_of_pools; pool; pool = pool->next)
6984 {
6985 /* Put it at the end of the relevant section. */
6986 subseg_set (pool->section, pool->sub_section);
6987 s_ltorg (0);
6988 }
6989 }
6990
6991 #ifdef OBJ_ELF
6992 /* Remove any excess mapping symbols generated for alignment frags in
6993 SEC. We may have created a mapping symbol before a zero byte
6994 alignment; remove it if there's a mapping symbol after the
6995 alignment. */
6996 static void
6997 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
6998 void *dummy ATTRIBUTE_UNUSED)
6999 {
7000 segment_info_type *seginfo = seg_info (sec);
7001 fragS *fragp;
7002
7003 if (seginfo == NULL || seginfo->frchainP == NULL)
7004 return;
7005
7006 for (fragp = seginfo->frchainP->frch_root;
7007 fragp != NULL; fragp = fragp->fr_next)
7008 {
7009 symbolS *sym = fragp->tc_frag_data.last_map;
7010 fragS *next = fragp->fr_next;
7011
7012 /* Variable-sized frags have been converted to fixed size by
7013 this point. But if this was variable-sized to start with,
7014 there will be a fixed-size frag after it. So don't handle
7015 next == NULL. */
7016 if (sym == NULL || next == NULL)
7017 continue;
7018
7019 if (S_GET_VALUE (sym) < next->fr_address)
7020 /* Not at the end of this frag. */
7021 continue;
7022 know (S_GET_VALUE (sym) == next->fr_address);
7023
7024 do
7025 {
7026 if (next->tc_frag_data.first_map != NULL)
7027 {
7028 /* Next frag starts with a mapping symbol. Discard this
7029 one. */
7030 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7031 break;
7032 }
7033
7034 if (next->fr_next == NULL)
7035 {
7036 /* This mapping symbol is at the end of the section. Discard
7037 it. */
7038 know (next->fr_fix == 0 && next->fr_var == 0);
7039 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7040 break;
7041 }
7042
7043 /* As long as we have empty frags without any mapping symbols,
7044 keep looking. */
7045 /* If the next frag is non-empty and does not start with a
7046 mapping symbol, then this mapping symbol is required. */
7047 if (next->fr_address != next->fr_next->fr_address)
7048 break;
7049
7050 next = next->fr_next;
7051 }
7052 while (next != NULL);
7053 }
7054 }
7055 #endif
7056
7057 /* Adjust the symbol table. */
7058
7059 void
7060 aarch64_adjust_symtab (void)
7061 {
7062 #ifdef OBJ_ELF
7063 /* Remove any overlapping mapping symbols generated by alignment frags. */
7064 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7065 /* Now do generic ELF adjustments. */
7066 elf_adjust_symtab ();
7067 #endif
7068 }
7069
7070 static void
7071 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7072 {
7073 const char *hash_err;
7074
7075 hash_err = hash_insert (table, key, value);
7076 if (hash_err)
7077 printf ("Internal Error: Can't hash %s\n", key);
7078 }
7079
7080 static void
7081 fill_instruction_hash_table (void)
7082 {
7083 aarch64_opcode *opcode = aarch64_opcode_table;
7084
7085 while (opcode->name != NULL)
7086 {
7087 templates *templ, *new_templ;
7088 templ = hash_find (aarch64_ops_hsh, opcode->name);
7089
7090 new_templ = (templates *) xmalloc (sizeof (templates));
7091 new_templ->opcode = opcode;
7092 new_templ->next = NULL;
7093
7094 if (!templ)
7095 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
7096 else
7097 {
7098 new_templ->next = templ->next;
7099 templ->next = new_templ;
7100 }
7101 ++opcode;
7102 }
7103 }
7104
7105 static inline void
7106 convert_to_upper (char *dst, const char *src, size_t num)
7107 {
7108 unsigned int i;
7109 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
7110 *dst = TOUPPER (*src);
7111 *dst = '\0';
7112 }
7113
7114 /* Assume STR point to a lower-case string, allocate, convert and return
7115 the corresponding upper-case string. */
7116 static inline const char*
7117 get_upper_str (const char *str)
7118 {
7119 char *ret;
7120 size_t len = strlen (str);
7121 if ((ret = xmalloc (len + 1)) == NULL)
7122 abort ();
7123 convert_to_upper (ret, str, len);
7124 return ret;
7125 }
7126
7127 /* MD interface: Initialization. */
7128
7129 void
7130 md_begin (void)
7131 {
7132 unsigned mach;
7133 unsigned int i;
7134
7135 if ((aarch64_ops_hsh = hash_new ()) == NULL
7136 || (aarch64_cond_hsh = hash_new ()) == NULL
7137 || (aarch64_shift_hsh = hash_new ()) == NULL
7138 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7139 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7140 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7141 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7142 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7143 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7144 || (aarch64_reg_hsh = hash_new ()) == NULL
7145 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7146 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7147 || (aarch64_pldop_hsh = hash_new ()) == NULL)
7148 as_fatal (_("virtual memory exhausted"));
7149
7150 fill_instruction_hash_table ();
7151
7152 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7153 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7154 (void *) (aarch64_sys_regs + i));
7155
7156 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7157 checked_hash_insert (aarch64_pstatefield_hsh,
7158 aarch64_pstatefields[i].name,
7159 (void *) (aarch64_pstatefields + i));
7160
7161 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
7162 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7163 aarch64_sys_regs_ic[i].template,
7164 (void *) (aarch64_sys_regs_ic + i));
7165
7166 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
7167 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7168 aarch64_sys_regs_dc[i].template,
7169 (void *) (aarch64_sys_regs_dc + i));
7170
7171 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
7172 checked_hash_insert (aarch64_sys_regs_at_hsh,
7173 aarch64_sys_regs_at[i].template,
7174 (void *) (aarch64_sys_regs_at + i));
7175
7176 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
7177 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7178 aarch64_sys_regs_tlbi[i].template,
7179 (void *) (aarch64_sys_regs_tlbi + i));
7180
7181 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7182 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7183 (void *) (reg_names + i));
7184
7185 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7186 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7187 (void *) (nzcv_names + i));
7188
7189 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7190 {
7191 const char *name = aarch64_operand_modifiers[i].name;
7192 checked_hash_insert (aarch64_shift_hsh, name,
7193 (void *) (aarch64_operand_modifiers + i));
7194 /* Also hash the name in the upper case. */
7195 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7196 (void *) (aarch64_operand_modifiers + i));
7197 }
7198
7199 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7200 {
7201 unsigned int j;
7202 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7203 the same condition code. */
7204 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7205 {
7206 const char *name = aarch64_conds[i].names[j];
7207 if (name == NULL)
7208 break;
7209 checked_hash_insert (aarch64_cond_hsh, name,
7210 (void *) (aarch64_conds + i));
7211 /* Also hash the name in the upper case. */
7212 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7213 (void *) (aarch64_conds + i));
7214 }
7215 }
7216
7217 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7218 {
7219 const char *name = aarch64_barrier_options[i].name;
7220 /* Skip xx00 - the unallocated values of option. */
7221 if ((i & 0x3) == 0)
7222 continue;
7223 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7224 (void *) (aarch64_barrier_options + i));
7225 /* Also hash the name in the upper case. */
7226 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7227 (void *) (aarch64_barrier_options + i));
7228 }
7229
7230 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7231 {
7232 const char* name = aarch64_prfops[i].name;
7233 /* Skip the unallocated hint encodings. */
7234 if (name == NULL)
7235 continue;
7236 checked_hash_insert (aarch64_pldop_hsh, name,
7237 (void *) (aarch64_prfops + i));
7238 /* Also hash the name in the upper case. */
7239 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7240 (void *) (aarch64_prfops + i));
7241 }
7242
7243 /* Set the cpu variant based on the command-line options. */
7244 if (!mcpu_cpu_opt)
7245 mcpu_cpu_opt = march_cpu_opt;
7246
7247 if (!mcpu_cpu_opt)
7248 mcpu_cpu_opt = &cpu_default;
7249
7250 cpu_variant = *mcpu_cpu_opt;
7251
7252 /* Record the CPU type. */
7253 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7254
7255 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7256 }
7257
7258 /* Command line processing. */
7259
7260 const char *md_shortopts = "m:";
7261
7262 #ifdef AARCH64_BI_ENDIAN
7263 #define OPTION_EB (OPTION_MD_BASE + 0)
7264 #define OPTION_EL (OPTION_MD_BASE + 1)
7265 #else
7266 #if TARGET_BYTES_BIG_ENDIAN
7267 #define OPTION_EB (OPTION_MD_BASE + 0)
7268 #else
7269 #define OPTION_EL (OPTION_MD_BASE + 1)
7270 #endif
7271 #endif
7272
7273 struct option md_longopts[] = {
7274 #ifdef OPTION_EB
7275 {"EB", no_argument, NULL, OPTION_EB},
7276 #endif
7277 #ifdef OPTION_EL
7278 {"EL", no_argument, NULL, OPTION_EL},
7279 #endif
7280 {NULL, no_argument, NULL, 0}
7281 };
7282
7283 size_t md_longopts_size = sizeof (md_longopts);
7284
7285 struct aarch64_option_table
7286 {
7287 char *option; /* Option name to match. */
7288 char *help; /* Help information. */
7289 int *var; /* Variable to change. */
7290 int value; /* What to change it to. */
7291 char *deprecated; /* If non-null, print this message. */
7292 };
7293
7294 static struct aarch64_option_table aarch64_opts[] = {
7295 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7296 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7297 NULL},
7298 #ifdef DEBUG_AARCH64
7299 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7300 #endif /* DEBUG_AARCH64 */
7301 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7302 NULL},
7303 {"mno-verbose-error", N_("do not output verbose error messages"),
7304 &verbose_error_p, 0, NULL},
7305 {NULL, NULL, NULL, 0, NULL}
7306 };
7307
7308 struct aarch64_cpu_option_table
7309 {
7310 char *name;
7311 const aarch64_feature_set value;
7312 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7313 case. */
7314 const char *canonical_name;
7315 };
7316
7317 /* This list should, at a minimum, contain all the cpu names
7318 recognized by GCC. */
7319 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7320 {"all", AARCH64_ANY, NULL},
7321 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
7322 AARCH64_FEATURE_CRC), "Cortex-A53"},
7323 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
7324 AARCH64_FEATURE_CRC), "Cortex-A57"},
7325 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
7326 AARCH64_FEATURE_CRC), "Cortex-A72"},
7327 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
7328 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7329 "Samsung Exynos M1"},
7330 {"thunderx", AARCH64_ARCH_V8, "Cavium ThunderX"},
7331 /* The 'xgene-1' name is an older name for 'xgene1', which was used
7332 in earlier releases and is superseded by 'xgene1' in all
7333 tools. */
7334 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7335 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7336 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
7337 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
7338 {"generic", AARCH64_ARCH_V8, NULL},
7339
7340 {NULL, AARCH64_ARCH_NONE, NULL}
7341 };
7342
7343 struct aarch64_arch_option_table
7344 {
7345 char *name;
7346 const aarch64_feature_set value;
7347 };
7348
7349 /* This list should, at a minimum, contain all the architecture names
7350 recognized by GCC. */
7351 static const struct aarch64_arch_option_table aarch64_archs[] = {
7352 {"all", AARCH64_ANY},
7353 {"armv8-a", AARCH64_ARCH_V8},
7354 {NULL, AARCH64_ARCH_NONE}
7355 };
7356
7357 /* ISA extensions. */
7358 struct aarch64_option_cpu_value_table
7359 {
7360 char *name;
7361 const aarch64_feature_set value;
7362 };
7363
7364 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7365 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7366 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7367 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7368 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0)},
7369 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7370 {NULL, AARCH64_ARCH_NONE}
7371 };
7372
7373 struct aarch64_long_option_table
7374 {
7375 char *option; /* Substring to match. */
7376 char *help; /* Help information. */
7377 int (*func) (char *subopt); /* Function to decode sub-option. */
7378 char *deprecated; /* If non-null, print this message. */
7379 };
7380
7381 static int
7382 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p,
7383 bfd_boolean ext_only)
7384 {
7385 /* We insist on extensions being added before being removed. We achieve
7386 this by using the ADDING_VALUE variable to indicate whether we are
7387 adding an extension (1) or removing it (0) and only allowing it to
7388 change in the order -1 -> 1 -> 0. */
7389 int adding_value = -1;
7390 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7391
7392 /* Copy the feature set, so that we can modify it. */
7393 *ext_set = **opt_p;
7394 *opt_p = ext_set;
7395
7396 while (str != NULL && *str != 0)
7397 {
7398 const struct aarch64_option_cpu_value_table *opt;
7399 char *ext = NULL;
7400 int optlen;
7401
7402 if (!ext_only)
7403 {
7404 if (*str != '+')
7405 {
7406 as_bad (_("invalid architectural extension"));
7407 return 0;
7408 }
7409
7410 ext = strchr (++str, '+');
7411 }
7412
7413 if (ext != NULL)
7414 optlen = ext - str;
7415 else
7416 optlen = strlen (str);
7417
7418 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7419 {
7420 if (adding_value != 0)
7421 adding_value = 0;
7422 optlen -= 2;
7423 str += 2;
7424 }
7425 else if (optlen > 0)
7426 {
7427 if (adding_value == -1)
7428 adding_value = 1;
7429 else if (adding_value != 1)
7430 {
7431 as_bad (_("must specify extensions to add before specifying "
7432 "those to remove"));
7433 return FALSE;
7434 }
7435 }
7436
7437 if (optlen == 0)
7438 {
7439 as_bad (_("missing architectural extension"));
7440 return 0;
7441 }
7442
7443 gas_assert (adding_value != -1);
7444
7445 for (opt = aarch64_features; opt->name != NULL; opt++)
7446 if (strncmp (opt->name, str, optlen) == 0)
7447 {
7448 /* Add or remove the extension. */
7449 if (adding_value)
7450 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7451 else
7452 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7453 break;
7454 }
7455
7456 if (opt->name == NULL)
7457 {
7458 as_bad (_("unknown architectural extension `%s'"), str);
7459 return 0;
7460 }
7461
7462 str = ext;
7463 };
7464
7465 return 1;
7466 }
7467
7468 static int
7469 aarch64_parse_cpu (char *str)
7470 {
7471 const struct aarch64_cpu_option_table *opt;
7472 char *ext = strchr (str, '+');
7473 size_t optlen;
7474
7475 if (ext != NULL)
7476 optlen = ext - str;
7477 else
7478 optlen = strlen (str);
7479
7480 if (optlen == 0)
7481 {
7482 as_bad (_("missing cpu name `%s'"), str);
7483 return 0;
7484 }
7485
7486 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7487 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7488 {
7489 mcpu_cpu_opt = &opt->value;
7490 if (ext != NULL)
7491 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
7492
7493 return 1;
7494 }
7495
7496 as_bad (_("unknown cpu `%s'"), str);
7497 return 0;
7498 }
7499
7500 static int
7501 aarch64_parse_arch (char *str)
7502 {
7503 const struct aarch64_arch_option_table *opt;
7504 char *ext = strchr (str, '+');
7505 size_t optlen;
7506
7507 if (ext != NULL)
7508 optlen = ext - str;
7509 else
7510 optlen = strlen (str);
7511
7512 if (optlen == 0)
7513 {
7514 as_bad (_("missing architecture name `%s'"), str);
7515 return 0;
7516 }
7517
7518 for (opt = aarch64_archs; opt->name != NULL; opt++)
7519 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7520 {
7521 march_cpu_opt = &opt->value;
7522 if (ext != NULL)
7523 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
7524
7525 return 1;
7526 }
7527
7528 as_bad (_("unknown architecture `%s'\n"), str);
7529 return 0;
7530 }
7531
7532 /* ABIs. */
7533 struct aarch64_option_abi_value_table
7534 {
7535 char *name;
7536 enum aarch64_abi_type value;
7537 };
7538
7539 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7540 {"ilp32", AARCH64_ABI_ILP32},
7541 {"lp64", AARCH64_ABI_LP64},
7542 {NULL, 0}
7543 };
7544
7545 static int
7546 aarch64_parse_abi (char *str)
7547 {
7548 const struct aarch64_option_abi_value_table *opt;
7549 size_t optlen = strlen (str);
7550
7551 if (optlen == 0)
7552 {
7553 as_bad (_("missing abi name `%s'"), str);
7554 return 0;
7555 }
7556
7557 for (opt = aarch64_abis; opt->name != NULL; opt++)
7558 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7559 {
7560 aarch64_abi = opt->value;
7561 return 1;
7562 }
7563
7564 as_bad (_("unknown abi `%s'\n"), str);
7565 return 0;
7566 }
7567
7568 static struct aarch64_long_option_table aarch64_long_opts[] = {
7569 #ifdef OBJ_ELF
7570 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7571 aarch64_parse_abi, NULL},
7572 #endif /* OBJ_ELF */
7573 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7574 aarch64_parse_cpu, NULL},
7575 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7576 aarch64_parse_arch, NULL},
7577 {NULL, NULL, 0, NULL}
7578 };
7579
7580 int
7581 md_parse_option (int c, char *arg)
7582 {
7583 struct aarch64_option_table *opt;
7584 struct aarch64_long_option_table *lopt;
7585
7586 switch (c)
7587 {
7588 #ifdef OPTION_EB
7589 case OPTION_EB:
7590 target_big_endian = 1;
7591 break;
7592 #endif
7593
7594 #ifdef OPTION_EL
7595 case OPTION_EL:
7596 target_big_endian = 0;
7597 break;
7598 #endif
7599
7600 case 'a':
7601 /* Listing option. Just ignore these, we don't support additional
7602 ones. */
7603 return 0;
7604
7605 default:
7606 for (opt = aarch64_opts; opt->option != NULL; opt++)
7607 {
7608 if (c == opt->option[0]
7609 && ((arg == NULL && opt->option[1] == 0)
7610 || streq (arg, opt->option + 1)))
7611 {
7612 /* If the option is deprecated, tell the user. */
7613 if (opt->deprecated != NULL)
7614 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7615 arg ? arg : "", _(opt->deprecated));
7616
7617 if (opt->var != NULL)
7618 *opt->var = opt->value;
7619
7620 return 1;
7621 }
7622 }
7623
7624 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7625 {
7626 /* These options are expected to have an argument. */
7627 if (c == lopt->option[0]
7628 && arg != NULL
7629 && strncmp (arg, lopt->option + 1,
7630 strlen (lopt->option + 1)) == 0)
7631 {
7632 /* If the option is deprecated, tell the user. */
7633 if (lopt->deprecated != NULL)
7634 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7635 _(lopt->deprecated));
7636
7637 /* Call the sup-option parser. */
7638 return lopt->func (arg + strlen (lopt->option) - 1);
7639 }
7640 }
7641
7642 return 0;
7643 }
7644
7645 return 1;
7646 }
7647
7648 void
7649 md_show_usage (FILE * fp)
7650 {
7651 struct aarch64_option_table *opt;
7652 struct aarch64_long_option_table *lopt;
7653
7654 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7655
7656 for (opt = aarch64_opts; opt->option != NULL; opt++)
7657 if (opt->help != NULL)
7658 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7659
7660 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7661 if (lopt->help != NULL)
7662 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7663
7664 #ifdef OPTION_EB
7665 fprintf (fp, _("\
7666 -EB assemble code for a big-endian cpu\n"));
7667 #endif
7668
7669 #ifdef OPTION_EL
7670 fprintf (fp, _("\
7671 -EL assemble code for a little-endian cpu\n"));
7672 #endif
7673 }
7674
7675 /* Parse a .cpu directive. */
7676
7677 static void
7678 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7679 {
7680 const struct aarch64_cpu_option_table *opt;
7681 char saved_char;
7682 char *name;
7683 char *ext;
7684 size_t optlen;
7685
7686 name = input_line_pointer;
7687 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7688 input_line_pointer++;
7689 saved_char = *input_line_pointer;
7690 *input_line_pointer = 0;
7691
7692 ext = strchr (name, '+');
7693
7694 if (ext != NULL)
7695 optlen = ext - name;
7696 else
7697 optlen = strlen (name);
7698
7699 /* Skip the first "all" entry. */
7700 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7701 if (strlen (opt->name) == optlen
7702 && strncmp (name, opt->name, optlen) == 0)
7703 {
7704 mcpu_cpu_opt = &opt->value;
7705 if (ext != NULL)
7706 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7707 return;
7708
7709 cpu_variant = *mcpu_cpu_opt;
7710
7711 *input_line_pointer = saved_char;
7712 demand_empty_rest_of_line ();
7713 return;
7714 }
7715 as_bad (_("unknown cpu `%s'"), name);
7716 *input_line_pointer = saved_char;
7717 ignore_rest_of_line ();
7718 }
7719
7720
7721 /* Parse a .arch directive. */
7722
7723 static void
7724 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7725 {
7726 const struct aarch64_arch_option_table *opt;
7727 char saved_char;
7728 char *name;
7729 char *ext;
7730 size_t optlen;
7731
7732 name = input_line_pointer;
7733 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7734 input_line_pointer++;
7735 saved_char = *input_line_pointer;
7736 *input_line_pointer = 0;
7737
7738 ext = strchr (name, '+');
7739
7740 if (ext != NULL)
7741 optlen = ext - name;
7742 else
7743 optlen = strlen (name);
7744
7745 /* Skip the first "all" entry. */
7746 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7747 if (strlen (opt->name) == optlen
7748 && strncmp (name, opt->name, optlen) == 0)
7749 {
7750 mcpu_cpu_opt = &opt->value;
7751 if (ext != NULL)
7752 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7753 return;
7754
7755 cpu_variant = *mcpu_cpu_opt;
7756
7757 *input_line_pointer = saved_char;
7758 demand_empty_rest_of_line ();
7759 return;
7760 }
7761
7762 as_bad (_("unknown architecture `%s'\n"), name);
7763 *input_line_pointer = saved_char;
7764 ignore_rest_of_line ();
7765 }
7766
7767 /* Parse a .arch_extension directive. */
7768
7769 static void
7770 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
7771 {
7772 char saved_char;
7773 char *ext = input_line_pointer;;
7774
7775 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7776 input_line_pointer++;
7777 saved_char = *input_line_pointer;
7778 *input_line_pointer = 0;
7779
7780 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
7781 return;
7782
7783 cpu_variant = *mcpu_cpu_opt;
7784
7785 *input_line_pointer = saved_char;
7786 demand_empty_rest_of_line ();
7787 }
7788
7789 /* Copy symbol information. */
7790
7791 void
7792 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7793 {
7794 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7795 }