a633b04fd84695d270b616bf41e4b2603fe33d45
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_LP64 = 0,
66 AARCH64_ABI_ILP32 = 1
67 };
68
69 /* AArch64 ABI for the output file. */
70 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72 /* When non-zero, program to a 32-bit model, in which the C data types
73 int, long and all pointer types are 32-bit objects (ILP32); or to a
74 64-bit model, in which the C int type is 32-bits but the C long type
75 and all pointer types are 64-bit objects (LP64). */
76 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
77 #endif
78
79 enum neon_el_type
80 {
81 NT_invtype = -1,
82 NT_b,
83 NT_h,
84 NT_s,
85 NT_d,
86 NT_q
87 };
88
89 /* Bits for DEFINED field in neon_type_el. */
90 #define NTA_HASTYPE 1
91 #define NTA_HASINDEX 2
92
93 struct neon_type_el
94 {
95 enum neon_el_type type;
96 unsigned char defined;
97 unsigned width;
98 int64_t index;
99 };
100
101 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
102
103 struct reloc
104 {
105 bfd_reloc_code_real_type type;
106 expressionS exp;
107 int pc_rel;
108 enum aarch64_opnd opnd;
109 uint32_t flags;
110 unsigned need_libopcodes_p : 1;
111 };
112
113 struct aarch64_instruction
114 {
115 /* libopcodes structure for instruction intermediate representation. */
116 aarch64_inst base;
117 /* Record assembly errors found during the parsing. */
118 struct
119 {
120 enum aarch64_operand_error_kind kind;
121 const char *error;
122 } parsing_error;
123 /* The condition that appears in the assembly line. */
124 int cond;
125 /* Relocation information (including the GAS internal fixup). */
126 struct reloc reloc;
127 /* Need to generate an immediate in the literal pool. */
128 unsigned gen_lit_pool : 1;
129 };
130
131 typedef struct aarch64_instruction aarch64_instruction;
132
133 static aarch64_instruction inst;
134
135 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
136 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
137
138 /* Diagnostics inline function utilites.
139
140 These are lightweight utlities which should only be called by parse_operands
141 and other parsers. GAS processes each assembly line by parsing it against
142 instruction template(s), in the case of multiple templates (for the same
143 mnemonic name), those templates are tried one by one until one succeeds or
144 all fail. An assembly line may fail a few templates before being
145 successfully parsed; an error saved here in most cases is not a user error
146 but an error indicating the current template is not the right template.
147 Therefore it is very important that errors can be saved at a low cost during
148 the parsing; we don't want to slow down the whole parsing by recording
149 non-user errors in detail.
150
151 Remember that the objective is to help GAS pick up the most approapriate
152 error message in the case of multiple templates, e.g. FMOV which has 8
153 templates. */
154
155 static inline void
156 clear_error (void)
157 {
158 inst.parsing_error.kind = AARCH64_OPDE_NIL;
159 inst.parsing_error.error = NULL;
160 }
161
162 static inline bfd_boolean
163 error_p (void)
164 {
165 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
166 }
167
168 static inline const char *
169 get_error_message (void)
170 {
171 return inst.parsing_error.error;
172 }
173
174 static inline enum aarch64_operand_error_kind
175 get_error_kind (void)
176 {
177 return inst.parsing_error.kind;
178 }
179
180 static inline void
181 set_error (enum aarch64_operand_error_kind kind, const char *error)
182 {
183 inst.parsing_error.kind = kind;
184 inst.parsing_error.error = error;
185 }
186
187 static inline void
188 set_recoverable_error (const char *error)
189 {
190 set_error (AARCH64_OPDE_RECOVERABLE, error);
191 }
192
193 /* Use the DESC field of the corresponding aarch64_operand entry to compose
194 the error message. */
195 static inline void
196 set_default_error (void)
197 {
198 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
199 }
200
201 static inline void
202 set_syntax_error (const char *error)
203 {
204 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
205 }
206
207 static inline void
208 set_first_syntax_error (const char *error)
209 {
210 if (! error_p ())
211 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
212 }
213
214 static inline void
215 set_fatal_syntax_error (const char *error)
216 {
217 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
218 }
219 \f
220 /* Number of littlenums required to hold an extended precision number. */
221 #define MAX_LITTLENUMS 6
222
223 /* Return value for certain parsers when the parsing fails; those parsers
224 return the information of the parsed result, e.g. register number, on
225 success. */
226 #define PARSE_FAIL -1
227
228 /* This is an invalid condition code that means no conditional field is
229 present. */
230 #define COND_ALWAYS 0x10
231
232 typedef struct
233 {
234 const char *template;
235 unsigned long value;
236 } asm_barrier_opt;
237
238 typedef struct
239 {
240 const char *template;
241 uint32_t value;
242 } asm_nzcv;
243
244 struct reloc_entry
245 {
246 char *name;
247 bfd_reloc_code_real_type reloc;
248 };
249
250 /* Structure for a hash table entry for a register. */
251 typedef struct
252 {
253 const char *name;
254 unsigned char number;
255 unsigned char type;
256 unsigned char builtin;
257 } reg_entry;
258
259 /* Macros to define the register types and masks for the purpose
260 of parsing. */
261
262 #undef AARCH64_REG_TYPES
263 #define AARCH64_REG_TYPES \
264 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
265 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
266 BASIC_REG_TYPE(SP_32) /* wsp */ \
267 BASIC_REG_TYPE(SP_64) /* sp */ \
268 BASIC_REG_TYPE(Z_32) /* wzr */ \
269 BASIC_REG_TYPE(Z_64) /* xzr */ \
270 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
271 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
272 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
273 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
274 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
275 BASIC_REG_TYPE(CN) /* c[0-7] */ \
276 BASIC_REG_TYPE(VN) /* v[0-31] */ \
277 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
278 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
279 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
280 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
281 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
282 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
283 /* Typecheck: any [BHSDQ]P FP. */ \
284 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
285 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
286 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
287 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
289 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
290 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
291 /* Any integer register; used for error messages only. */ \
292 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
293 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Pseudo type to mark the end of the enumerator sequence. */ \
296 BASIC_REG_TYPE(MAX)
297
298 #undef BASIC_REG_TYPE
299 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
300 #undef MULTI_REG_TYPE
301 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
302
303 /* Register type enumerators. */
304 typedef enum
305 {
306 /* A list of REG_TYPE_*. */
307 AARCH64_REG_TYPES
308 } aarch64_reg_type;
309
310 #undef BASIC_REG_TYPE
311 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
312 #undef REG_TYPE
313 #define REG_TYPE(T) (1 << REG_TYPE_##T)
314 #undef MULTI_REG_TYPE
315 #define MULTI_REG_TYPE(T,V) V,
316
317 /* Values indexed by aarch64_reg_type to assist the type checking. */
318 static const unsigned reg_type_masks[] =
319 {
320 AARCH64_REG_TYPES
321 };
322
323 #undef BASIC_REG_TYPE
324 #undef REG_TYPE
325 #undef MULTI_REG_TYPE
326 #undef AARCH64_REG_TYPES
327
328 /* Diagnostics used when we don't get a register of the expected type.
329 Note: this has to synchronized with aarch64_reg_type definitions
330 above. */
331 static const char *
332 get_reg_expected_msg (aarch64_reg_type reg_type)
333 {
334 const char *msg;
335
336 switch (reg_type)
337 {
338 case REG_TYPE_R_32:
339 msg = N_("integer 32-bit register expected");
340 break;
341 case REG_TYPE_R_64:
342 msg = N_("integer 64-bit register expected");
343 break;
344 case REG_TYPE_R_N:
345 msg = N_("integer register expected");
346 break;
347 case REG_TYPE_R_Z_SP:
348 msg = N_("integer, zero or SP register expected");
349 break;
350 case REG_TYPE_FP_B:
351 msg = N_("8-bit SIMD scalar register expected");
352 break;
353 case REG_TYPE_FP_H:
354 msg = N_("16-bit SIMD scalar or floating-point half precision "
355 "register expected");
356 break;
357 case REG_TYPE_FP_S:
358 msg = N_("32-bit SIMD scalar or floating-point single precision "
359 "register expected");
360 break;
361 case REG_TYPE_FP_D:
362 msg = N_("64-bit SIMD scalar or floating-point double precision "
363 "register expected");
364 break;
365 case REG_TYPE_FP_Q:
366 msg = N_("128-bit SIMD scalar or floating-point quad precision "
367 "register expected");
368 break;
369 case REG_TYPE_CN:
370 msg = N_("C0 - C15 expected");
371 break;
372 case REG_TYPE_R_Z_BHSDQ_V:
373 msg = N_("register expected");
374 break;
375 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
376 msg = N_("SIMD scalar or floating-point register expected");
377 break;
378 case REG_TYPE_VN: /* any V reg */
379 msg = N_("vector register expected");
380 break;
381 default:
382 as_fatal (_("invalid register type %d"), reg_type);
383 }
384 return msg;
385 }
386
387 /* Some well known registers that we refer to directly elsewhere. */
388 #define REG_SP 31
389
390 /* Instructions take 4 bytes in the object file. */
391 #define INSN_SIZE 4
392
393 /* Define some common error messages. */
394 #define BAD_SP _("SP not allowed here")
395
396 static struct hash_control *aarch64_ops_hsh;
397 static struct hash_control *aarch64_cond_hsh;
398 static struct hash_control *aarch64_shift_hsh;
399 static struct hash_control *aarch64_sys_regs_hsh;
400 static struct hash_control *aarch64_pstatefield_hsh;
401 static struct hash_control *aarch64_sys_regs_ic_hsh;
402 static struct hash_control *aarch64_sys_regs_dc_hsh;
403 static struct hash_control *aarch64_sys_regs_at_hsh;
404 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
405 static struct hash_control *aarch64_reg_hsh;
406 static struct hash_control *aarch64_barrier_opt_hsh;
407 static struct hash_control *aarch64_nzcv_hsh;
408 static struct hash_control *aarch64_pldop_hsh;
409
410 /* Stuff needed to resolve the label ambiguity
411 As:
412 ...
413 label: <insn>
414 may differ from:
415 ...
416 label:
417 <insn> */
418
419 static symbolS *last_label_seen;
420
421 /* Literal pool structure. Held on a per-section
422 and per-sub-section basis. */
423
424 #define MAX_LITERAL_POOL_SIZE 1024
425 typedef struct literal_expression
426 {
427 expressionS exp;
428 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
429 LITTLENUM_TYPE * bignum;
430 } literal_expression;
431
432 typedef struct literal_pool
433 {
434 literal_expression literals[MAX_LITERAL_POOL_SIZE];
435 unsigned int next_free_entry;
436 unsigned int id;
437 symbolS *symbol;
438 segT section;
439 subsegT sub_section;
440 int size;
441 struct literal_pool *next;
442 } literal_pool;
443
444 /* Pointer to a linked list of literal pools. */
445 static literal_pool *list_of_pools = NULL;
446 \f
447 /* Pure syntax. */
448
449 /* This array holds the chars that always start a comment. If the
450 pre-processor is disabled, these aren't very useful. */
451 const char comment_chars[] = "";
452
453 /* This array holds the chars that only start a comment at the beginning of
454 a line. If the line seems to have the form '# 123 filename'
455 .line and .file directives will appear in the pre-processed output. */
456 /* Note that input_file.c hand checks for '#' at the beginning of the
457 first line of the input file. This is because the compiler outputs
458 #NO_APP at the beginning of its output. */
459 /* Also note that comments like this one will always work. */
460 const char line_comment_chars[] = "#";
461
462 const char line_separator_chars[] = ";";
463
464 /* Chars that can be used to separate mant
465 from exp in floating point numbers. */
466 const char EXP_CHARS[] = "eE";
467
468 /* Chars that mean this number is a floating point constant. */
469 /* As in 0f12.456 */
470 /* or 0d1.2345e12 */
471
472 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
473
474 /* Prefix character that indicates the start of an immediate value. */
475 #define is_immediate_prefix(C) ((C) == '#')
476
477 /* Separator character handling. */
478
479 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
480
481 static inline bfd_boolean
482 skip_past_char (char **str, char c)
483 {
484 if (**str == c)
485 {
486 (*str)++;
487 return TRUE;
488 }
489 else
490 return FALSE;
491 }
492
493 #define skip_past_comma(str) skip_past_char (str, ',')
494
495 /* Arithmetic expressions (possibly involving symbols). */
496
497 static bfd_boolean in_my_get_expression_p = FALSE;
498
499 /* Third argument to my_get_expression. */
500 #define GE_NO_PREFIX 0
501 #define GE_OPT_PREFIX 1
502
503 /* Return TRUE if the string pointed by *STR is successfully parsed
504 as an valid expression; *EP will be filled with the information of
505 such an expression. Otherwise return FALSE. */
506
507 static bfd_boolean
508 my_get_expression (expressionS * ep, char **str, int prefix_mode,
509 int reject_absent)
510 {
511 char *save_in;
512 segT seg;
513 int prefix_present_p = 0;
514
515 switch (prefix_mode)
516 {
517 case GE_NO_PREFIX:
518 break;
519 case GE_OPT_PREFIX:
520 if (is_immediate_prefix (**str))
521 {
522 (*str)++;
523 prefix_present_p = 1;
524 }
525 break;
526 default:
527 abort ();
528 }
529
530 memset (ep, 0, sizeof (expressionS));
531
532 save_in = input_line_pointer;
533 input_line_pointer = *str;
534 in_my_get_expression_p = TRUE;
535 seg = expression (ep);
536 in_my_get_expression_p = FALSE;
537
538 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
539 {
540 /* We found a bad expression in md_operand(). */
541 *str = input_line_pointer;
542 input_line_pointer = save_in;
543 if (prefix_present_p && ! error_p ())
544 set_fatal_syntax_error (_("bad expression"));
545 else
546 set_first_syntax_error (_("bad expression"));
547 return FALSE;
548 }
549
550 #ifdef OBJ_AOUT
551 if (seg != absolute_section
552 && seg != text_section
553 && seg != data_section
554 && seg != bss_section && seg != undefined_section)
555 {
556 set_syntax_error (_("bad segment"));
557 *str = input_line_pointer;
558 input_line_pointer = save_in;
559 return FALSE;
560 }
561 #else
562 (void) seg;
563 #endif
564
565 *str = input_line_pointer;
566 input_line_pointer = save_in;
567 return TRUE;
568 }
569
570 /* Turn a string in input_line_pointer into a floating point constant
571 of type TYPE, and store the appropriate bytes in *LITP. The number
572 of LITTLENUMS emitted is stored in *SIZEP. An error message is
573 returned, or NULL on OK. */
574
575 char *
576 md_atof (int type, char *litP, int *sizeP)
577 {
578 return ieee_md_atof (type, litP, sizeP, target_big_endian);
579 }
580
581 /* We handle all bad expressions here, so that we can report the faulty
582 instruction in the error message. */
583 void
584 md_operand (expressionS * exp)
585 {
586 if (in_my_get_expression_p)
587 exp->X_op = O_illegal;
588 }
589
590 /* Immediate values. */
591
592 /* Errors may be set multiple times during parsing or bit encoding
593 (particularly in the Neon bits), but usually the earliest error which is set
594 will be the most meaningful. Avoid overwriting it with later (cascading)
595 errors by calling this function. */
596
597 static void
598 first_error (const char *error)
599 {
600 if (! error_p ())
601 set_syntax_error (error);
602 }
603
604 /* Similiar to first_error, but this function accepts formatted error
605 message. */
606 static void
607 first_error_fmt (const char *format, ...)
608 {
609 va_list args;
610 enum
611 { size = 100 };
612 /* N.B. this single buffer will not cause error messages for different
613 instructions to pollute each other; this is because at the end of
614 processing of each assembly line, error message if any will be
615 collected by as_bad. */
616 static char buffer[size];
617
618 if (! error_p ())
619 {
620 int ret ATTRIBUTE_UNUSED;
621 va_start (args, format);
622 ret = vsnprintf (buffer, size, format, args);
623 know (ret <= size - 1 && ret >= 0);
624 va_end (args);
625 set_syntax_error (buffer);
626 }
627 }
628
629 /* Register parsing. */
630
631 /* Generic register parser which is called by other specialized
632 register parsers.
633 CCP points to what should be the beginning of a register name.
634 If it is indeed a valid register name, advance CCP over it and
635 return the reg_entry structure; otherwise return NULL.
636 It does not issue diagnostics. */
637
638 static reg_entry *
639 parse_reg (char **ccp)
640 {
641 char *start = *ccp;
642 char *p;
643 reg_entry *reg;
644
645 #ifdef REGISTER_PREFIX
646 if (*start != REGISTER_PREFIX)
647 return NULL;
648 start++;
649 #endif
650
651 p = start;
652 if (!ISALPHA (*p) || !is_name_beginner (*p))
653 return NULL;
654
655 do
656 p++;
657 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
658
659 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
660
661 if (!reg)
662 return NULL;
663
664 *ccp = p;
665 return reg;
666 }
667
668 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
669 return FALSE. */
670 static bfd_boolean
671 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
672 {
673 if (reg->type == type)
674 return TRUE;
675
676 switch (type)
677 {
678 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
679 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
680 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
681 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
682 case REG_TYPE_VN: /* Vector register. */
683 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
684 return ((reg_type_masks[reg->type] & reg_type_masks[type])
685 == reg_type_masks[reg->type]);
686 default:
687 as_fatal ("unhandled type %d", type);
688 abort ();
689 }
690 }
691
692 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
693 Return the register number otherwise. *ISREG32 is set to one if the
694 register is 32-bit wide; *ISREGZERO is set to one if the register is
695 of type Z_32 or Z_64.
696 Note that this function does not issue any diagnostics. */
697
698 static int
699 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
700 int *isreg32, int *isregzero)
701 {
702 char *str = *ccp;
703 const reg_entry *reg = parse_reg (&str);
704
705 if (reg == NULL)
706 return PARSE_FAIL;
707
708 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
709 return PARSE_FAIL;
710
711 switch (reg->type)
712 {
713 case REG_TYPE_SP_32:
714 case REG_TYPE_SP_64:
715 if (reject_sp)
716 return PARSE_FAIL;
717 *isreg32 = reg->type == REG_TYPE_SP_32;
718 *isregzero = 0;
719 break;
720 case REG_TYPE_R_32:
721 case REG_TYPE_R_64:
722 *isreg32 = reg->type == REG_TYPE_R_32;
723 *isregzero = 0;
724 break;
725 case REG_TYPE_Z_32:
726 case REG_TYPE_Z_64:
727 if (reject_rz)
728 return PARSE_FAIL;
729 *isreg32 = reg->type == REG_TYPE_Z_32;
730 *isregzero = 1;
731 break;
732 default:
733 return PARSE_FAIL;
734 }
735
736 *ccp = str;
737
738 return reg->number;
739 }
740
741 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
742 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
743 otherwise return FALSE.
744
745 Accept only one occurrence of:
746 8b 16b 4h 8h 2s 4s 1d 2d
747 b h s d q */
748 static bfd_boolean
749 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
750 {
751 char *ptr = *str;
752 unsigned width;
753 unsigned element_size;
754 enum neon_el_type type;
755
756 /* skip '.' */
757 ptr++;
758
759 if (!ISDIGIT (*ptr))
760 {
761 width = 0;
762 goto elt_size;
763 }
764 width = strtoul (ptr, &ptr, 10);
765 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
766 {
767 first_error_fmt (_("bad size %d in vector width specifier"), width);
768 return FALSE;
769 }
770
771 elt_size:
772 switch (TOLOWER (*ptr))
773 {
774 case 'b':
775 type = NT_b;
776 element_size = 8;
777 break;
778 case 'h':
779 type = NT_h;
780 element_size = 16;
781 break;
782 case 's':
783 type = NT_s;
784 element_size = 32;
785 break;
786 case 'd':
787 type = NT_d;
788 element_size = 64;
789 break;
790 case 'q':
791 if (width == 1)
792 {
793 type = NT_q;
794 element_size = 128;
795 break;
796 }
797 /* fall through. */
798 default:
799 if (*ptr != '\0')
800 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
801 else
802 first_error (_("missing element size"));
803 return FALSE;
804 }
805 if (width != 0 && width * element_size != 64 && width * element_size != 128)
806 {
807 first_error_fmt (_
808 ("invalid element size %d and vector size combination %c"),
809 width, *ptr);
810 return FALSE;
811 }
812 ptr++;
813
814 parsed_type->type = type;
815 parsed_type->width = width;
816
817 *str = ptr;
818
819 return TRUE;
820 }
821
822 /* Parse a single type, e.g. ".8b", leading period included.
823 Only applicable to Vn registers.
824
825 Return TRUE on success; otherwise return FALSE. */
826 static bfd_boolean
827 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
828 {
829 char *str = *ccp;
830
831 if (*str == '.')
832 {
833 if (! parse_neon_type_for_operand (vectype, &str))
834 {
835 first_error (_("vector type expected"));
836 return FALSE;
837 }
838 }
839 else
840 return FALSE;
841
842 *ccp = str;
843
844 return TRUE;
845 }
846
847 /* Parse a register of the type TYPE.
848
849 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
850 name or the parsed register is not of TYPE.
851
852 Otherwise return the register number, and optionally fill in the actual
853 type of the register in *RTYPE when multiple alternatives were given, and
854 return the register shape and element index information in *TYPEINFO.
855
856 IN_REG_LIST should be set with TRUE if the caller is parsing a register
857 list. */
858
859 static int
860 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
861 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
862 {
863 char *str = *ccp;
864 const reg_entry *reg = parse_reg (&str);
865 struct neon_type_el atype;
866 struct neon_type_el parsetype;
867 bfd_boolean is_typed_vecreg = FALSE;
868
869 atype.defined = 0;
870 atype.type = NT_invtype;
871 atype.width = -1;
872 atype.index = 0;
873
874 if (reg == NULL)
875 {
876 if (typeinfo)
877 *typeinfo = atype;
878 set_default_error ();
879 return PARSE_FAIL;
880 }
881
882 if (! aarch64_check_reg_type (reg, type))
883 {
884 DEBUG_TRACE ("reg type check failed");
885 set_default_error ();
886 return PARSE_FAIL;
887 }
888 type = reg->type;
889
890 if (type == REG_TYPE_VN
891 && parse_neon_operand_type (&parsetype, &str))
892 {
893 /* Register if of the form Vn.[bhsdq]. */
894 is_typed_vecreg = TRUE;
895
896 if (parsetype.width == 0)
897 /* Expect index. In the new scheme we cannot have
898 Vn.[bhsdq] represent a scalar. Therefore any
899 Vn.[bhsdq] should have an index following it.
900 Except in reglists ofcourse. */
901 atype.defined |= NTA_HASINDEX;
902 else
903 atype.defined |= NTA_HASTYPE;
904
905 atype.type = parsetype.type;
906 atype.width = parsetype.width;
907 }
908
909 if (skip_past_char (&str, '['))
910 {
911 expressionS exp;
912
913 /* Reject Sn[index] syntax. */
914 if (!is_typed_vecreg)
915 {
916 first_error (_("this type of register can't be indexed"));
917 return PARSE_FAIL;
918 }
919
920 if (in_reg_list == TRUE)
921 {
922 first_error (_("index not allowed inside register list"));
923 return PARSE_FAIL;
924 }
925
926 atype.defined |= NTA_HASINDEX;
927
928 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
929
930 if (exp.X_op != O_constant)
931 {
932 first_error (_("constant expression required"));
933 return PARSE_FAIL;
934 }
935
936 if (! skip_past_char (&str, ']'))
937 return PARSE_FAIL;
938
939 atype.index = exp.X_add_number;
940 }
941 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
942 {
943 /* Indexed vector register expected. */
944 first_error (_("indexed vector register expected"));
945 return PARSE_FAIL;
946 }
947
948 /* A vector reg Vn should be typed or indexed. */
949 if (type == REG_TYPE_VN && atype.defined == 0)
950 {
951 first_error (_("invalid use of vector register"));
952 }
953
954 if (typeinfo)
955 *typeinfo = atype;
956
957 if (rtype)
958 *rtype = type;
959
960 *ccp = str;
961
962 return reg->number;
963 }
964
965 /* Parse register.
966
967 Return the register number on success; return PARSE_FAIL otherwise.
968
969 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
970 the register (e.g. NEON double or quad reg when either has been requested).
971
972 If this is a NEON vector register with additional type information, fill
973 in the struct pointed to by VECTYPE (if non-NULL).
974
975 This parser does not handle register list. */
976
977 static int
978 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
979 aarch64_reg_type *rtype, struct neon_type_el *vectype)
980 {
981 struct neon_type_el atype;
982 char *str = *ccp;
983 int reg = parse_typed_reg (&str, type, rtype, &atype,
984 /*in_reg_list= */ FALSE);
985
986 if (reg == PARSE_FAIL)
987 return PARSE_FAIL;
988
989 if (vectype)
990 *vectype = atype;
991
992 *ccp = str;
993
994 return reg;
995 }
996
997 static inline bfd_boolean
998 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
999 {
1000 return
1001 e1.type == e2.type
1002 && e1.defined == e2.defined
1003 && e1.width == e2.width && e1.index == e2.index;
1004 }
1005
1006 /* This function parses the NEON register list. On success, it returns
1007 the parsed register list information in the following encoded format:
1008
1009 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1010 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1011
1012 The information of the register shape and/or index is returned in
1013 *VECTYPE.
1014
1015 It returns PARSE_FAIL if the register list is invalid.
1016
1017 The list contains one to four registers.
1018 Each register can be one of:
1019 <Vt>.<T>[<index>]
1020 <Vt>.<T>
1021 All <T> should be identical.
1022 All <index> should be identical.
1023 There are restrictions on <Vt> numbers which are checked later
1024 (by reg_list_valid_p). */
1025
1026 static int
1027 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1028 {
1029 char *str = *ccp;
1030 int nb_regs;
1031 struct neon_type_el typeinfo, typeinfo_first;
1032 int val, val_range;
1033 int in_range;
1034 int ret_val;
1035 int i;
1036 bfd_boolean error = FALSE;
1037 bfd_boolean expect_index = FALSE;
1038
1039 if (*str != '{')
1040 {
1041 set_syntax_error (_("expecting {"));
1042 return PARSE_FAIL;
1043 }
1044 str++;
1045
1046 nb_regs = 0;
1047 typeinfo_first.defined = 0;
1048 typeinfo_first.type = NT_invtype;
1049 typeinfo_first.width = -1;
1050 typeinfo_first.index = 0;
1051 ret_val = 0;
1052 val = -1;
1053 val_range = -1;
1054 in_range = 0;
1055 do
1056 {
1057 if (in_range)
1058 {
1059 str++; /* skip over '-' */
1060 val_range = val;
1061 }
1062 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1063 /*in_reg_list= */ TRUE);
1064 if (val == PARSE_FAIL)
1065 {
1066 set_first_syntax_error (_("invalid vector register in list"));
1067 error = TRUE;
1068 continue;
1069 }
1070 /* reject [bhsd]n */
1071 if (typeinfo.defined == 0)
1072 {
1073 set_first_syntax_error (_("invalid scalar register in list"));
1074 error = TRUE;
1075 continue;
1076 }
1077
1078 if (typeinfo.defined & NTA_HASINDEX)
1079 expect_index = TRUE;
1080
1081 if (in_range)
1082 {
1083 if (val < val_range)
1084 {
1085 set_first_syntax_error
1086 (_("invalid range in vector register list"));
1087 error = TRUE;
1088 }
1089 val_range++;
1090 }
1091 else
1092 {
1093 val_range = val;
1094 if (nb_regs == 0)
1095 typeinfo_first = typeinfo;
1096 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1097 {
1098 set_first_syntax_error
1099 (_("type mismatch in vector register list"));
1100 error = TRUE;
1101 }
1102 }
1103 if (! error)
1104 for (i = val_range; i <= val; i++)
1105 {
1106 ret_val |= i << (5 * nb_regs);
1107 nb_regs++;
1108 }
1109 in_range = 0;
1110 }
1111 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1112
1113 skip_whitespace (str);
1114 if (*str != '}')
1115 {
1116 set_first_syntax_error (_("end of vector register list not found"));
1117 error = TRUE;
1118 }
1119 str++;
1120
1121 skip_whitespace (str);
1122
1123 if (expect_index)
1124 {
1125 if (skip_past_char (&str, '['))
1126 {
1127 expressionS exp;
1128
1129 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1130 if (exp.X_op != O_constant)
1131 {
1132 set_first_syntax_error (_("constant expression required."));
1133 error = TRUE;
1134 }
1135 if (! skip_past_char (&str, ']'))
1136 error = TRUE;
1137 else
1138 typeinfo_first.index = exp.X_add_number;
1139 }
1140 else
1141 {
1142 set_first_syntax_error (_("expected index"));
1143 error = TRUE;
1144 }
1145 }
1146
1147 if (nb_regs > 4)
1148 {
1149 set_first_syntax_error (_("too many registers in vector register list"));
1150 error = TRUE;
1151 }
1152 else if (nb_regs == 0)
1153 {
1154 set_first_syntax_error (_("empty vector register list"));
1155 error = TRUE;
1156 }
1157
1158 *ccp = str;
1159 if (! error)
1160 *vectype = typeinfo_first;
1161
1162 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1163 }
1164
1165 /* Directives: register aliases. */
1166
1167 static reg_entry *
1168 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1169 {
1170 reg_entry *new;
1171 const char *name;
1172
1173 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1174 {
1175 if (new->builtin)
1176 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1177 str);
1178
1179 /* Only warn about a redefinition if it's not defined as the
1180 same register. */
1181 else if (new->number != number || new->type != type)
1182 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1183
1184 return NULL;
1185 }
1186
1187 name = xstrdup (str);
1188 new = xmalloc (sizeof (reg_entry));
1189
1190 new->name = name;
1191 new->number = number;
1192 new->type = type;
1193 new->builtin = FALSE;
1194
1195 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1196 abort ();
1197
1198 return new;
1199 }
1200
1201 /* Look for the .req directive. This is of the form:
1202
1203 new_register_name .req existing_register_name
1204
1205 If we find one, or if it looks sufficiently like one that we want to
1206 handle any error here, return TRUE. Otherwise return FALSE. */
1207
1208 static bfd_boolean
1209 create_register_alias (char *newname, char *p)
1210 {
1211 const reg_entry *old;
1212 char *oldname, *nbuf;
1213 size_t nlen;
1214
1215 /* The input scrubber ensures that whitespace after the mnemonic is
1216 collapsed to single spaces. */
1217 oldname = p;
1218 if (strncmp (oldname, " .req ", 6) != 0)
1219 return FALSE;
1220
1221 oldname += 6;
1222 if (*oldname == '\0')
1223 return FALSE;
1224
1225 old = hash_find (aarch64_reg_hsh, oldname);
1226 if (!old)
1227 {
1228 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1229 return TRUE;
1230 }
1231
1232 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1233 the desired alias name, and p points to its end. If not, then
1234 the desired alias name is in the global original_case_string. */
1235 #ifdef TC_CASE_SENSITIVE
1236 nlen = p - newname;
1237 #else
1238 newname = original_case_string;
1239 nlen = strlen (newname);
1240 #endif
1241
1242 nbuf = alloca (nlen + 1);
1243 memcpy (nbuf, newname, nlen);
1244 nbuf[nlen] = '\0';
1245
1246 /* Create aliases under the new name as stated; an all-lowercase
1247 version of the new name; and an all-uppercase version of the new
1248 name. */
1249 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1250 {
1251 for (p = nbuf; *p; p++)
1252 *p = TOUPPER (*p);
1253
1254 if (strncmp (nbuf, newname, nlen))
1255 {
1256 /* If this attempt to create an additional alias fails, do not bother
1257 trying to create the all-lower case alias. We will fail and issue
1258 a second, duplicate error message. This situation arises when the
1259 programmer does something like:
1260 foo .req r0
1261 Foo .req r1
1262 The second .req creates the "Foo" alias but then fails to create
1263 the artificial FOO alias because it has already been created by the
1264 first .req. */
1265 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1266 return TRUE;
1267 }
1268
1269 for (p = nbuf; *p; p++)
1270 *p = TOLOWER (*p);
1271
1272 if (strncmp (nbuf, newname, nlen))
1273 insert_reg_alias (nbuf, old->number, old->type);
1274 }
1275
1276 return TRUE;
1277 }
1278
1279 /* Should never be called, as .req goes between the alias and the
1280 register name, not at the beginning of the line. */
1281 static void
1282 s_req (int a ATTRIBUTE_UNUSED)
1283 {
1284 as_bad (_("invalid syntax for .req directive"));
1285 }
1286
1287 /* The .unreq directive deletes an alias which was previously defined
1288 by .req. For example:
1289
1290 my_alias .req r11
1291 .unreq my_alias */
1292
1293 static void
1294 s_unreq (int a ATTRIBUTE_UNUSED)
1295 {
1296 char *name;
1297 char saved_char;
1298
1299 name = input_line_pointer;
1300
1301 while (*input_line_pointer != 0
1302 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1303 ++input_line_pointer;
1304
1305 saved_char = *input_line_pointer;
1306 *input_line_pointer = 0;
1307
1308 if (!*name)
1309 as_bad (_("invalid syntax for .unreq directive"));
1310 else
1311 {
1312 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1313
1314 if (!reg)
1315 as_bad (_("unknown register alias '%s'"), name);
1316 else if (reg->builtin)
1317 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1318 name);
1319 else
1320 {
1321 char *p;
1322 char *nbuf;
1323
1324 hash_delete (aarch64_reg_hsh, name, FALSE);
1325 free ((char *) reg->name);
1326 free (reg);
1327
1328 /* Also locate the all upper case and all lower case versions.
1329 Do not complain if we cannot find one or the other as it
1330 was probably deleted above. */
1331
1332 nbuf = strdup (name);
1333 for (p = nbuf; *p; p++)
1334 *p = TOUPPER (*p);
1335 reg = hash_find (aarch64_reg_hsh, nbuf);
1336 if (reg)
1337 {
1338 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1339 free ((char *) reg->name);
1340 free (reg);
1341 }
1342
1343 for (p = nbuf; *p; p++)
1344 *p = TOLOWER (*p);
1345 reg = hash_find (aarch64_reg_hsh, nbuf);
1346 if (reg)
1347 {
1348 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1349 free ((char *) reg->name);
1350 free (reg);
1351 }
1352
1353 free (nbuf);
1354 }
1355 }
1356
1357 *input_line_pointer = saved_char;
1358 demand_empty_rest_of_line ();
1359 }
1360
1361 /* Directives: Instruction set selection. */
1362
1363 #ifdef OBJ_ELF
1364 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1365 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1366 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1367 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1368
1369 /* Create a new mapping symbol for the transition to STATE. */
1370
1371 static void
1372 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1373 {
1374 symbolS *symbolP;
1375 const char *symname;
1376 int type;
1377
1378 switch (state)
1379 {
1380 case MAP_DATA:
1381 symname = "$d";
1382 type = BSF_NO_FLAGS;
1383 break;
1384 case MAP_INSN:
1385 symname = "$x";
1386 type = BSF_NO_FLAGS;
1387 break;
1388 default:
1389 abort ();
1390 }
1391
1392 symbolP = symbol_new (symname, now_seg, value, frag);
1393 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1394
1395 /* Save the mapping symbols for future reference. Also check that
1396 we do not place two mapping symbols at the same offset within a
1397 frag. We'll handle overlap between frags in
1398 check_mapping_symbols.
1399
1400 If .fill or other data filling directive generates zero sized data,
1401 the mapping symbol for the following code will have the same value
1402 as the one generated for the data filling directive. In this case,
1403 we replace the old symbol with the new one at the same address. */
1404 if (value == 0)
1405 {
1406 if (frag->tc_frag_data.first_map != NULL)
1407 {
1408 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1409 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1410 &symbol_lastP);
1411 }
1412 frag->tc_frag_data.first_map = symbolP;
1413 }
1414 if (frag->tc_frag_data.last_map != NULL)
1415 {
1416 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1417 S_GET_VALUE (symbolP));
1418 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1419 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1420 &symbol_lastP);
1421 }
1422 frag->tc_frag_data.last_map = symbolP;
1423 }
1424
1425 /* We must sometimes convert a region marked as code to data during
1426 code alignment, if an odd number of bytes have to be padded. The
1427 code mapping symbol is pushed to an aligned address. */
1428
1429 static void
1430 insert_data_mapping_symbol (enum mstate state,
1431 valueT value, fragS * frag, offsetT bytes)
1432 {
1433 /* If there was already a mapping symbol, remove it. */
1434 if (frag->tc_frag_data.last_map != NULL
1435 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1436 frag->fr_address + value)
1437 {
1438 symbolS *symp = frag->tc_frag_data.last_map;
1439
1440 if (value == 0)
1441 {
1442 know (frag->tc_frag_data.first_map == symp);
1443 frag->tc_frag_data.first_map = NULL;
1444 }
1445 frag->tc_frag_data.last_map = NULL;
1446 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1447 }
1448
1449 make_mapping_symbol (MAP_DATA, value, frag);
1450 make_mapping_symbol (state, value + bytes, frag);
1451 }
1452
1453 static void mapping_state_2 (enum mstate state, int max_chars);
1454
1455 /* Set the mapping state to STATE. Only call this when about to
1456 emit some STATE bytes to the file. */
1457
1458 void
1459 mapping_state (enum mstate state)
1460 {
1461 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1462
1463 if (state == MAP_INSN)
1464 /* AArch64 instructions require 4-byte alignment. When emitting
1465 instructions into any section, record the appropriate section
1466 alignment. */
1467 record_alignment (now_seg, 2);
1468
1469 if (mapstate == state)
1470 /* The mapping symbol has already been emitted.
1471 There is nothing else to do. */
1472 return;
1473
1474 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1475 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1476 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1477 evaluated later in the next else. */
1478 return;
1479 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1480 {
1481 /* Only add the symbol if the offset is > 0:
1482 if we're at the first frag, check it's size > 0;
1483 if we're not at the first frag, then for sure
1484 the offset is > 0. */
1485 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1486 const int add_symbol = (frag_now != frag_first)
1487 || (frag_now_fix () > 0);
1488
1489 if (add_symbol)
1490 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1491 }
1492 #undef TRANSITION
1493
1494 mapping_state_2 (state, 0);
1495 }
1496
1497 /* Same as mapping_state, but MAX_CHARS bytes have already been
1498 allocated. Put the mapping symbol that far back. */
1499
1500 static void
1501 mapping_state_2 (enum mstate state, int max_chars)
1502 {
1503 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1504
1505 if (!SEG_NORMAL (now_seg))
1506 return;
1507
1508 if (mapstate == state)
1509 /* The mapping symbol has already been emitted.
1510 There is nothing else to do. */
1511 return;
1512
1513 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1514 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1515 }
1516 #else
1517 #define mapping_state(x) /* nothing */
1518 #define mapping_state_2(x, y) /* nothing */
1519 #endif
1520
1521 /* Directives: sectioning and alignment. */
1522
1523 static void
1524 s_bss (int ignore ATTRIBUTE_UNUSED)
1525 {
1526 /* We don't support putting frags in the BSS segment, we fake it by
1527 marking in_bss, then looking at s_skip for clues. */
1528 subseg_set (bss_section, 0);
1529 demand_empty_rest_of_line ();
1530 mapping_state (MAP_DATA);
1531 }
1532
1533 static void
1534 s_even (int ignore ATTRIBUTE_UNUSED)
1535 {
1536 /* Never make frag if expect extra pass. */
1537 if (!need_pass_2)
1538 frag_align (1, 0, 0);
1539
1540 record_alignment (now_seg, 1);
1541
1542 demand_empty_rest_of_line ();
1543 }
1544
1545 /* Directives: Literal pools. */
1546
1547 static literal_pool *
1548 find_literal_pool (int size)
1549 {
1550 literal_pool *pool;
1551
1552 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1553 {
1554 if (pool->section == now_seg
1555 && pool->sub_section == now_subseg && pool->size == size)
1556 break;
1557 }
1558
1559 return pool;
1560 }
1561
1562 static literal_pool *
1563 find_or_make_literal_pool (int size)
1564 {
1565 /* Next literal pool ID number. */
1566 static unsigned int latest_pool_num = 1;
1567 literal_pool *pool;
1568
1569 pool = find_literal_pool (size);
1570
1571 if (pool == NULL)
1572 {
1573 /* Create a new pool. */
1574 pool = xmalloc (sizeof (*pool));
1575 if (!pool)
1576 return NULL;
1577
1578 /* Currently we always put the literal pool in the current text
1579 section. If we were generating "small" model code where we
1580 knew that all code and initialised data was within 1MB then
1581 we could output literals to mergeable, read-only data
1582 sections. */
1583
1584 pool->next_free_entry = 0;
1585 pool->section = now_seg;
1586 pool->sub_section = now_subseg;
1587 pool->size = size;
1588 pool->next = list_of_pools;
1589 pool->symbol = NULL;
1590
1591 /* Add it to the list. */
1592 list_of_pools = pool;
1593 }
1594
1595 /* New pools, and emptied pools, will have a NULL symbol. */
1596 if (pool->symbol == NULL)
1597 {
1598 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1599 (valueT) 0, &zero_address_frag);
1600 pool->id = latest_pool_num++;
1601 }
1602
1603 /* Done. */
1604 return pool;
1605 }
1606
1607 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1608 Return TRUE on success, otherwise return FALSE. */
1609 static bfd_boolean
1610 add_to_lit_pool (expressionS *exp, int size)
1611 {
1612 literal_pool *pool;
1613 unsigned int entry;
1614
1615 pool = find_or_make_literal_pool (size);
1616
1617 /* Check if this literal value is already in the pool. */
1618 for (entry = 0; entry < pool->next_free_entry; entry++)
1619 {
1620 expressionS * litexp = & pool->literals[entry].exp;
1621
1622 if ((litexp->X_op == exp->X_op)
1623 && (exp->X_op == O_constant)
1624 && (litexp->X_add_number == exp->X_add_number)
1625 && (litexp->X_unsigned == exp->X_unsigned))
1626 break;
1627
1628 if ((litexp->X_op == exp->X_op)
1629 && (exp->X_op == O_symbol)
1630 && (litexp->X_add_number == exp->X_add_number)
1631 && (litexp->X_add_symbol == exp->X_add_symbol)
1632 && (litexp->X_op_symbol == exp->X_op_symbol))
1633 break;
1634 }
1635
1636 /* Do we need to create a new entry? */
1637 if (entry == pool->next_free_entry)
1638 {
1639 if (entry >= MAX_LITERAL_POOL_SIZE)
1640 {
1641 set_syntax_error (_("literal pool overflow"));
1642 return FALSE;
1643 }
1644
1645 pool->literals[entry].exp = *exp;
1646 pool->next_free_entry += 1;
1647 if (exp->X_op == O_big)
1648 {
1649 /* PR 16688: Bignums are held in a single global array. We must
1650 copy and preserve that value now, before it is overwritten. */
1651 pool->literals[entry].bignum = xmalloc (CHARS_PER_LITTLENUM * exp->X_add_number);
1652 memcpy (pool->literals[entry].bignum, generic_bignum,
1653 CHARS_PER_LITTLENUM * exp->X_add_number);
1654 }
1655 else
1656 pool->literals[entry].bignum = NULL;
1657 }
1658
1659 exp->X_op = O_symbol;
1660 exp->X_add_number = ((int) entry) * size;
1661 exp->X_add_symbol = pool->symbol;
1662
1663 return TRUE;
1664 }
1665
1666 /* Can't use symbol_new here, so have to create a symbol and then at
1667 a later date assign it a value. Thats what these functions do. */
1668
1669 static void
1670 symbol_locate (symbolS * symbolP,
1671 const char *name,/* It is copied, the caller can modify. */
1672 segT segment, /* Segment identifier (SEG_<something>). */
1673 valueT valu, /* Symbol value. */
1674 fragS * frag) /* Associated fragment. */
1675 {
1676 size_t name_length;
1677 char *preserved_copy_of_name;
1678
1679 name_length = strlen (name) + 1; /* +1 for \0. */
1680 obstack_grow (&notes, name, name_length);
1681 preserved_copy_of_name = obstack_finish (&notes);
1682
1683 #ifdef tc_canonicalize_symbol_name
1684 preserved_copy_of_name =
1685 tc_canonicalize_symbol_name (preserved_copy_of_name);
1686 #endif
1687
1688 S_SET_NAME (symbolP, preserved_copy_of_name);
1689
1690 S_SET_SEGMENT (symbolP, segment);
1691 S_SET_VALUE (symbolP, valu);
1692 symbol_clear_list_pointers (symbolP);
1693
1694 symbol_set_frag (symbolP, frag);
1695
1696 /* Link to end of symbol chain. */
1697 {
1698 extern int symbol_table_frozen;
1699
1700 if (symbol_table_frozen)
1701 abort ();
1702 }
1703
1704 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1705
1706 obj_symbol_new_hook (symbolP);
1707
1708 #ifdef tc_symbol_new_hook
1709 tc_symbol_new_hook (symbolP);
1710 #endif
1711
1712 #ifdef DEBUG_SYMS
1713 verify_symbol_chain (symbol_rootP, symbol_lastP);
1714 #endif /* DEBUG_SYMS */
1715 }
1716
1717
1718 static void
1719 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1720 {
1721 unsigned int entry;
1722 literal_pool *pool;
1723 char sym_name[20];
1724 int align;
1725
1726 for (align = 2; align <= 4; align++)
1727 {
1728 int size = 1 << align;
1729
1730 pool = find_literal_pool (size);
1731 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1732 continue;
1733
1734 mapping_state (MAP_DATA);
1735
1736 /* Align pool as you have word accesses.
1737 Only make a frag if we have to. */
1738 if (!need_pass_2)
1739 frag_align (align, 0, 0);
1740
1741 record_alignment (now_seg, align);
1742
1743 sprintf (sym_name, "$$lit_\002%x", pool->id);
1744
1745 symbol_locate (pool->symbol, sym_name, now_seg,
1746 (valueT) frag_now_fix (), frag_now);
1747 symbol_table_insert (pool->symbol);
1748
1749 for (entry = 0; entry < pool->next_free_entry; entry++)
1750 {
1751 expressionS * exp = & pool->literals[entry].exp;
1752
1753 if (exp->X_op == O_big)
1754 {
1755 /* PR 16688: Restore the global bignum value. */
1756 gas_assert (pool->literals[entry].bignum != NULL);
1757 memcpy (generic_bignum, pool->literals[entry].bignum,
1758 CHARS_PER_LITTLENUM * exp->X_add_number);
1759 }
1760
1761 /* First output the expression in the instruction to the pool. */
1762 emit_expr (exp, size); /* .word|.xword */
1763
1764 if (exp->X_op == O_big)
1765 {
1766 free (pool->literals[entry].bignum);
1767 pool->literals[entry].bignum = NULL;
1768 }
1769 }
1770
1771 /* Mark the pool as empty. */
1772 pool->next_free_entry = 0;
1773 pool->symbol = NULL;
1774 }
1775 }
1776
1777 #ifdef OBJ_ELF
1778 /* Forward declarations for functions below, in the MD interface
1779 section. */
1780 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1781 static struct reloc_table_entry * find_reloc_table_entry (char **);
1782
1783 /* Directives: Data. */
1784 /* N.B. the support for relocation suffix in this directive needs to be
1785 implemented properly. */
1786
1787 static void
1788 s_aarch64_elf_cons (int nbytes)
1789 {
1790 expressionS exp;
1791
1792 #ifdef md_flush_pending_output
1793 md_flush_pending_output ();
1794 #endif
1795
1796 if (is_it_end_of_statement ())
1797 {
1798 demand_empty_rest_of_line ();
1799 return;
1800 }
1801
1802 #ifdef md_cons_align
1803 md_cons_align (nbytes);
1804 #endif
1805
1806 mapping_state (MAP_DATA);
1807 do
1808 {
1809 struct reloc_table_entry *reloc;
1810
1811 expression (&exp);
1812
1813 if (exp.X_op != O_symbol)
1814 emit_expr (&exp, (unsigned int) nbytes);
1815 else
1816 {
1817 skip_past_char (&input_line_pointer, '#');
1818 if (skip_past_char (&input_line_pointer, ':'))
1819 {
1820 reloc = find_reloc_table_entry (&input_line_pointer);
1821 if (reloc == NULL)
1822 as_bad (_("unrecognized relocation suffix"));
1823 else
1824 as_bad (_("unimplemented relocation suffix"));
1825 ignore_rest_of_line ();
1826 return;
1827 }
1828 else
1829 emit_expr (&exp, (unsigned int) nbytes);
1830 }
1831 }
1832 while (*input_line_pointer++ == ',');
1833
1834 /* Put terminator back into stream. */
1835 input_line_pointer--;
1836 demand_empty_rest_of_line ();
1837 }
1838
1839 #endif /* OBJ_ELF */
1840
1841 /* Output a 32-bit word, but mark as an instruction. */
1842
1843 static void
1844 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1845 {
1846 expressionS exp;
1847
1848 #ifdef md_flush_pending_output
1849 md_flush_pending_output ();
1850 #endif
1851
1852 if (is_it_end_of_statement ())
1853 {
1854 demand_empty_rest_of_line ();
1855 return;
1856 }
1857
1858 /* Sections are assumed to start aligned. In executable section, there is no
1859 MAP_DATA symbol pending. So we only align the address during
1860 MAP_DATA --> MAP_INSN transition.
1861 For other sections, this is not guaranteed. */
1862 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1863 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1864 frag_align_code (2, 0);
1865
1866 #ifdef OBJ_ELF
1867 mapping_state (MAP_INSN);
1868 #endif
1869
1870 do
1871 {
1872 expression (&exp);
1873 if (exp.X_op != O_constant)
1874 {
1875 as_bad (_("constant expression required"));
1876 ignore_rest_of_line ();
1877 return;
1878 }
1879
1880 if (target_big_endian)
1881 {
1882 unsigned int val = exp.X_add_number;
1883 exp.X_add_number = SWAP_32 (val);
1884 }
1885 emit_expr (&exp, 4);
1886 }
1887 while (*input_line_pointer++ == ',');
1888
1889 /* Put terminator back into stream. */
1890 input_line_pointer--;
1891 demand_empty_rest_of_line ();
1892 }
1893
1894 #ifdef OBJ_ELF
1895 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1896
1897 static void
1898 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1899 {
1900 expressionS exp;
1901
1902 /* Since we're just labelling the code, there's no need to define a
1903 mapping symbol. */
1904 expression (&exp);
1905 /* Make sure there is enough room in this frag for the following
1906 blr. This trick only works if the blr follows immediately after
1907 the .tlsdesc directive. */
1908 frag_grow (4);
1909 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1910 BFD_RELOC_AARCH64_TLSDESC_CALL);
1911
1912 demand_empty_rest_of_line ();
1913 }
1914 #endif /* OBJ_ELF */
1915
1916 static void s_aarch64_arch (int);
1917 static void s_aarch64_cpu (int);
1918 static void s_aarch64_arch_extension (int);
1919
1920 /* This table describes all the machine specific pseudo-ops the assembler
1921 has to support. The fields are:
1922 pseudo-op name without dot
1923 function to call to execute this pseudo-op
1924 Integer arg to pass to the function. */
1925
1926 const pseudo_typeS md_pseudo_table[] = {
1927 /* Never called because '.req' does not start a line. */
1928 {"req", s_req, 0},
1929 {"unreq", s_unreq, 0},
1930 {"bss", s_bss, 0},
1931 {"even", s_even, 0},
1932 {"ltorg", s_ltorg, 0},
1933 {"pool", s_ltorg, 0},
1934 {"cpu", s_aarch64_cpu, 0},
1935 {"arch", s_aarch64_arch, 0},
1936 {"arch_extension", s_aarch64_arch_extension, 0},
1937 {"inst", s_aarch64_inst, 0},
1938 #ifdef OBJ_ELF
1939 {"tlsdesccall", s_tlsdesccall, 0},
1940 {"word", s_aarch64_elf_cons, 4},
1941 {"long", s_aarch64_elf_cons, 4},
1942 {"xword", s_aarch64_elf_cons, 8},
1943 {"dword", s_aarch64_elf_cons, 8},
1944 #endif
1945 {0, 0, 0}
1946 };
1947 \f
1948
1949 /* Check whether STR points to a register name followed by a comma or the
1950 end of line; REG_TYPE indicates which register types are checked
1951 against. Return TRUE if STR is such a register name; otherwise return
1952 FALSE. The function does not intend to produce any diagnostics, but since
1953 the register parser aarch64_reg_parse, which is called by this function,
1954 does produce diagnostics, we call clear_error to clear any diagnostics
1955 that may be generated by aarch64_reg_parse.
1956 Also, the function returns FALSE directly if there is any user error
1957 present at the function entry. This prevents the existing diagnostics
1958 state from being spoiled.
1959 The function currently serves parse_constant_immediate and
1960 parse_big_immediate only. */
1961 static bfd_boolean
1962 reg_name_p (char *str, aarch64_reg_type reg_type)
1963 {
1964 int reg;
1965
1966 /* Prevent the diagnostics state from being spoiled. */
1967 if (error_p ())
1968 return FALSE;
1969
1970 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1971
1972 /* Clear the parsing error that may be set by the reg parser. */
1973 clear_error ();
1974
1975 if (reg == PARSE_FAIL)
1976 return FALSE;
1977
1978 skip_whitespace (str);
1979 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1980 return TRUE;
1981
1982 return FALSE;
1983 }
1984
1985 /* Parser functions used exclusively in instruction operands. */
1986
1987 /* Parse an immediate expression which may not be constant.
1988
1989 To prevent the expression parser from pushing a register name
1990 into the symbol table as an undefined symbol, firstly a check is
1991 done to find out whether STR is a valid register name followed
1992 by a comma or the end of line. Return FALSE if STR is such a
1993 string. */
1994
1995 static bfd_boolean
1996 parse_immediate_expression (char **str, expressionS *exp)
1997 {
1998 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1999 {
2000 set_recoverable_error (_("immediate operand required"));
2001 return FALSE;
2002 }
2003
2004 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2005
2006 if (exp->X_op == O_absent)
2007 {
2008 set_fatal_syntax_error (_("missing immediate expression"));
2009 return FALSE;
2010 }
2011
2012 return TRUE;
2013 }
2014
2015 /* Constant immediate-value read function for use in insn parsing.
2016 STR points to the beginning of the immediate (with the optional
2017 leading #); *VAL receives the value.
2018
2019 Return TRUE on success; otherwise return FALSE. */
2020
2021 static bfd_boolean
2022 parse_constant_immediate (char **str, int64_t * val)
2023 {
2024 expressionS exp;
2025
2026 if (! parse_immediate_expression (str, &exp))
2027 return FALSE;
2028
2029 if (exp.X_op != O_constant)
2030 {
2031 set_syntax_error (_("constant expression required"));
2032 return FALSE;
2033 }
2034
2035 *val = exp.X_add_number;
2036 return TRUE;
2037 }
2038
2039 static uint32_t
2040 encode_imm_float_bits (uint32_t imm)
2041 {
2042 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2043 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2044 }
2045
2046 /* Return TRUE if the single-precision floating-point value encoded in IMM
2047 can be expressed in the AArch64 8-bit signed floating-point format with
2048 3-bit exponent and normalized 4 bits of precision; in other words, the
2049 floating-point value must be expressable as
2050 (+/-) n / 16 * power (2, r)
2051 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2052
2053 static bfd_boolean
2054 aarch64_imm_float_p (uint32_t imm)
2055 {
2056 /* If a single-precision floating-point value has the following bit
2057 pattern, it can be expressed in the AArch64 8-bit floating-point
2058 format:
2059
2060 3 32222222 2221111111111
2061 1 09876543 21098765432109876543210
2062 n Eeeeeexx xxxx0000000000000000000
2063
2064 where n, e and each x are either 0 or 1 independently, with
2065 E == ~ e. */
2066
2067 uint32_t pattern;
2068
2069 /* Prepare the pattern for 'Eeeeee'. */
2070 if (((imm >> 30) & 0x1) == 0)
2071 pattern = 0x3e000000;
2072 else
2073 pattern = 0x40000000;
2074
2075 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2076 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2077 }
2078
2079 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2080
2081 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2082 8-bit signed floating-point format with 3-bit exponent and normalized 4
2083 bits of precision (i.e. can be used in an FMOV instruction); return the
2084 equivalent single-precision encoding in *FPWORD.
2085
2086 Otherwise return FALSE. */
2087
2088 static bfd_boolean
2089 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2090 {
2091 /* If a double-precision floating-point value has the following bit
2092 pattern, it can be expressed in the AArch64 8-bit floating-point
2093 format:
2094
2095 6 66655555555 554444444...21111111111
2096 3 21098765432 109876543...098765432109876543210
2097 n Eeeeeeeeexx xxxx00000...000000000000000000000
2098
2099 where n, e and each x are either 0 or 1 independently, with
2100 E == ~ e. */
2101
2102 uint32_t pattern;
2103 uint32_t high32 = imm >> 32;
2104
2105 /* Lower 32 bits need to be 0s. */
2106 if ((imm & 0xffffffff) != 0)
2107 return FALSE;
2108
2109 /* Prepare the pattern for 'Eeeeeeeee'. */
2110 if (((high32 >> 30) & 0x1) == 0)
2111 pattern = 0x3fc00000;
2112 else
2113 pattern = 0x40000000;
2114
2115 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2116 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2117 {
2118 /* Convert to the single-precision encoding.
2119 i.e. convert
2120 n Eeeeeeeeexx xxxx00000...000000000000000000000
2121 to
2122 n Eeeeeexx xxxx0000000000000000000. */
2123 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2124 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2125 return TRUE;
2126 }
2127 else
2128 return FALSE;
2129 }
2130
2131 /* Parse a floating-point immediate. Return TRUE on success and return the
2132 value in *IMMED in the format of IEEE754 single-precision encoding.
2133 *CCP points to the start of the string; DP_P is TRUE when the immediate
2134 is expected to be in double-precision (N.B. this only matters when
2135 hexadecimal representation is involved).
2136
2137 N.B. 0.0 is accepted by this function. */
2138
2139 static bfd_boolean
2140 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2141 {
2142 char *str = *ccp;
2143 char *fpnum;
2144 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2145 int found_fpchar = 0;
2146 int64_t val = 0;
2147 unsigned fpword = 0;
2148 bfd_boolean hex_p = FALSE;
2149
2150 skip_past_char (&str, '#');
2151
2152 fpnum = str;
2153 skip_whitespace (fpnum);
2154
2155 if (strncmp (fpnum, "0x", 2) == 0)
2156 {
2157 /* Support the hexadecimal representation of the IEEE754 encoding.
2158 Double-precision is expected when DP_P is TRUE, otherwise the
2159 representation should be in single-precision. */
2160 if (! parse_constant_immediate (&str, &val))
2161 goto invalid_fp;
2162
2163 if (dp_p)
2164 {
2165 if (! aarch64_double_precision_fmovable (val, &fpword))
2166 goto invalid_fp;
2167 }
2168 else if ((uint64_t) val > 0xffffffff)
2169 goto invalid_fp;
2170 else
2171 fpword = val;
2172
2173 hex_p = TRUE;
2174 }
2175 else
2176 {
2177 /* We must not accidentally parse an integer as a floating-point number.
2178 Make sure that the value we parse is not an integer by checking for
2179 special characters '.' or 'e'. */
2180 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2181 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2182 {
2183 found_fpchar = 1;
2184 break;
2185 }
2186
2187 if (!found_fpchar)
2188 return FALSE;
2189 }
2190
2191 if (! hex_p)
2192 {
2193 int i;
2194
2195 if ((str = atof_ieee (str, 's', words)) == NULL)
2196 goto invalid_fp;
2197
2198 /* Our FP word must be 32 bits (single-precision FP). */
2199 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2200 {
2201 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2202 fpword |= words[i];
2203 }
2204 }
2205
2206 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2207 {
2208 *immed = fpword;
2209 *ccp = str;
2210 return TRUE;
2211 }
2212
2213 invalid_fp:
2214 set_fatal_syntax_error (_("invalid floating-point constant"));
2215 return FALSE;
2216 }
2217
2218 /* Less-generic immediate-value read function with the possibility of loading
2219 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2220 instructions.
2221
2222 To prevent the expression parser from pushing a register name into the
2223 symbol table as an undefined symbol, a check is firstly done to find
2224 out whether STR is a valid register name followed by a comma or the end
2225 of line. Return FALSE if STR is such a register. */
2226
2227 static bfd_boolean
2228 parse_big_immediate (char **str, int64_t *imm)
2229 {
2230 char *ptr = *str;
2231
2232 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2233 {
2234 set_syntax_error (_("immediate operand required"));
2235 return FALSE;
2236 }
2237
2238 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2239
2240 if (inst.reloc.exp.X_op == O_constant)
2241 *imm = inst.reloc.exp.X_add_number;
2242
2243 *str = ptr;
2244
2245 return TRUE;
2246 }
2247
2248 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2249 if NEED_LIBOPCODES is non-zero, the fixup will need
2250 assistance from the libopcodes. */
2251
2252 static inline void
2253 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2254 const aarch64_opnd_info *operand,
2255 int need_libopcodes_p)
2256 {
2257 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2258 reloc->opnd = operand->type;
2259 if (need_libopcodes_p)
2260 reloc->need_libopcodes_p = 1;
2261 };
2262
2263 /* Return TRUE if the instruction needs to be fixed up later internally by
2264 the GAS; otherwise return FALSE. */
2265
2266 static inline bfd_boolean
2267 aarch64_gas_internal_fixup_p (void)
2268 {
2269 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2270 }
2271
2272 /* Assign the immediate value to the relavant field in *OPERAND if
2273 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2274 needs an internal fixup in a later stage.
2275 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2276 IMM.VALUE that may get assigned with the constant. */
2277 static inline void
2278 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2279 aarch64_opnd_info *operand,
2280 int addr_off_p,
2281 int need_libopcodes_p,
2282 int skip_p)
2283 {
2284 if (reloc->exp.X_op == O_constant)
2285 {
2286 if (addr_off_p)
2287 operand->addr.offset.imm = reloc->exp.X_add_number;
2288 else
2289 operand->imm.value = reloc->exp.X_add_number;
2290 reloc->type = BFD_RELOC_UNUSED;
2291 }
2292 else
2293 {
2294 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2295 /* Tell libopcodes to ignore this operand or not. This is helpful
2296 when one of the operands needs to be fixed up later but we need
2297 libopcodes to check the other operands. */
2298 operand->skip = skip_p;
2299 }
2300 }
2301
2302 /* Relocation modifiers. Each entry in the table contains the textual
2303 name for the relocation which may be placed before a symbol used as
2304 a load/store offset, or add immediate. It must be surrounded by a
2305 leading and trailing colon, for example:
2306
2307 ldr x0, [x1, #:rello:varsym]
2308 add x0, x1, #:rello:varsym */
2309
2310 struct reloc_table_entry
2311 {
2312 const char *name;
2313 int pc_rel;
2314 bfd_reloc_code_real_type adr_type;
2315 bfd_reloc_code_real_type adrp_type;
2316 bfd_reloc_code_real_type movw_type;
2317 bfd_reloc_code_real_type add_type;
2318 bfd_reloc_code_real_type ldst_type;
2319 bfd_reloc_code_real_type ld_literal_type;
2320 };
2321
2322 static struct reloc_table_entry reloc_table[] = {
2323 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2324 {"lo12", 0,
2325 0, /* adr_type */
2326 0,
2327 0,
2328 BFD_RELOC_AARCH64_ADD_LO12,
2329 BFD_RELOC_AARCH64_LDST_LO12,
2330 0},
2331
2332 /* Higher 21 bits of pc-relative page offset: ADRP */
2333 {"pg_hi21", 1,
2334 0, /* adr_type */
2335 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2336 0,
2337 0,
2338 0,
2339 0},
2340
2341 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2342 {"pg_hi21_nc", 1,
2343 0, /* adr_type */
2344 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2345 0,
2346 0,
2347 0,
2348 0},
2349
2350 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2351 {"abs_g0", 0,
2352 0, /* adr_type */
2353 0,
2354 BFD_RELOC_AARCH64_MOVW_G0,
2355 0,
2356 0,
2357 0},
2358
2359 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2360 {"abs_g0_s", 0,
2361 0, /* adr_type */
2362 0,
2363 BFD_RELOC_AARCH64_MOVW_G0_S,
2364 0,
2365 0,
2366 0},
2367
2368 /* Less significant bits 0-15 of address/value: MOVK, no check */
2369 {"abs_g0_nc", 0,
2370 0, /* adr_type */
2371 0,
2372 BFD_RELOC_AARCH64_MOVW_G0_NC,
2373 0,
2374 0,
2375 0},
2376
2377 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2378 {"abs_g1", 0,
2379 0, /* adr_type */
2380 0,
2381 BFD_RELOC_AARCH64_MOVW_G1,
2382 0,
2383 0,
2384 0},
2385
2386 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2387 {"abs_g1_s", 0,
2388 0, /* adr_type */
2389 0,
2390 BFD_RELOC_AARCH64_MOVW_G1_S,
2391 0,
2392 0,
2393 0},
2394
2395 /* Less significant bits 16-31 of address/value: MOVK, no check */
2396 {"abs_g1_nc", 0,
2397 0, /* adr_type */
2398 0,
2399 BFD_RELOC_AARCH64_MOVW_G1_NC,
2400 0,
2401 0,
2402 0},
2403
2404 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2405 {"abs_g2", 0,
2406 0, /* adr_type */
2407 0,
2408 BFD_RELOC_AARCH64_MOVW_G2,
2409 0,
2410 0,
2411 0},
2412
2413 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2414 {"abs_g2_s", 0,
2415 0, /* adr_type */
2416 0,
2417 BFD_RELOC_AARCH64_MOVW_G2_S,
2418 0,
2419 0,
2420 0},
2421
2422 /* Less significant bits 32-47 of address/value: MOVK, no check */
2423 {"abs_g2_nc", 0,
2424 0, /* adr_type */
2425 0,
2426 BFD_RELOC_AARCH64_MOVW_G2_NC,
2427 0,
2428 0,
2429 0},
2430
2431 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2432 {"abs_g3", 0,
2433 0, /* adr_type */
2434 0,
2435 BFD_RELOC_AARCH64_MOVW_G3,
2436 0,
2437 0,
2438 0},
2439
2440 /* Get to the page containing GOT entry for a symbol. */
2441 {"got", 1,
2442 0, /* adr_type */
2443 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2444 0,
2445 0,
2446 0,
2447 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2448
2449 /* 12 bit offset into the page containing GOT entry for that symbol. */
2450 {"got_lo12", 0,
2451 0, /* adr_type */
2452 0,
2453 0,
2454 0,
2455 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2456 0},
2457
2458 /* 15 bit offset into the page containing GOT entry for that symbol. */
2459 {"gotoff_lo15", 0,
2460 0, /* adr_type */
2461 0,
2462 0,
2463 0,
2464 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2465 0},
2466
2467 /* Get to the page containing GOT TLS entry for a symbol */
2468 {"tlsgd", 0,
2469 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2470 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2471 0,
2472 0,
2473 0,
2474 0},
2475
2476 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2477 {"tlsgd_lo12", 0,
2478 0, /* adr_type */
2479 0,
2480 0,
2481 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2482 0,
2483 0},
2484
2485 /* Get to the page containing GOT TLS entry for a symbol */
2486 {"tlsdesc", 0,
2487 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2488 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2489 0,
2490 0,
2491 0,
2492 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2493
2494 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2495 {"tlsdesc_lo12", 0,
2496 0, /* adr_type */
2497 0,
2498 0,
2499 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2500 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2501 0},
2502
2503 /* Get to the page containing GOT TLS entry for a symbol.
2504 The same as GD, we allocate two consecutive GOT slots
2505 for module index and module offset, the only difference
2506 with GD is the module offset should be intialized to
2507 zero without any outstanding runtime relocation. */
2508 {"tlsldm", 0,
2509 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2510 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2511 0,
2512 0,
2513 0,
2514 0},
2515
2516 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2517 {"tlsldm_lo12_nc", 0,
2518 0, /* adr_type */
2519 0,
2520 0,
2521 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2522 0,
2523 0},
2524
2525 /* Get to the page containing GOT TLS entry for a symbol */
2526 {"gottprel", 0,
2527 0, /* adr_type */
2528 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2529 0,
2530 0,
2531 0,
2532 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2533
2534 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2535 {"gottprel_lo12", 0,
2536 0, /* adr_type */
2537 0,
2538 0,
2539 0,
2540 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2541 0},
2542
2543 /* Get tp offset for a symbol. */
2544 {"tprel", 0,
2545 0, /* adr_type */
2546 0,
2547 0,
2548 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2549 0,
2550 0},
2551
2552 /* Get tp offset for a symbol. */
2553 {"tprel_lo12", 0,
2554 0, /* adr_type */
2555 0,
2556 0,
2557 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2558 0,
2559 0},
2560
2561 /* Get tp offset for a symbol. */
2562 {"tprel_hi12", 0,
2563 0, /* adr_type */
2564 0,
2565 0,
2566 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2567 0,
2568 0},
2569
2570 /* Get tp offset for a symbol. */
2571 {"tprel_lo12_nc", 0,
2572 0, /* adr_type */
2573 0,
2574 0,
2575 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2576 0,
2577 0},
2578
2579 /* Most significant bits 32-47 of address/value: MOVZ. */
2580 {"tprel_g2", 0,
2581 0, /* adr_type */
2582 0,
2583 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2584 0,
2585 0,
2586 0},
2587
2588 /* Most significant bits 16-31 of address/value: MOVZ. */
2589 {"tprel_g1", 0,
2590 0, /* adr_type */
2591 0,
2592 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2593 0,
2594 0,
2595 0},
2596
2597 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2598 {"tprel_g1_nc", 0,
2599 0, /* adr_type */
2600 0,
2601 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2602 0,
2603 0,
2604 0},
2605
2606 /* Most significant bits 0-15 of address/value: MOVZ. */
2607 {"tprel_g0", 0,
2608 0, /* adr_type */
2609 0,
2610 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2611 0,
2612 0,
2613 0},
2614
2615 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2616 {"tprel_g0_nc", 0,
2617 0, /* adr_type */
2618 0,
2619 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2620 0,
2621 0,
2622 0},
2623
2624 /* 15bit offset from got entry to base address of GOT table. */
2625 {"gotpage_lo15", 0,
2626 0,
2627 0,
2628 0,
2629 0,
2630 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2631 0},
2632
2633 /* 14bit offset from got entry to base address of GOT table. */
2634 {"gotpage_lo14", 0,
2635 0,
2636 0,
2637 0,
2638 0,
2639 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2640 0},
2641 };
2642
2643 /* Given the address of a pointer pointing to the textual name of a
2644 relocation as may appear in assembler source, attempt to find its
2645 details in reloc_table. The pointer will be updated to the character
2646 after the trailing colon. On failure, NULL will be returned;
2647 otherwise return the reloc_table_entry. */
2648
2649 static struct reloc_table_entry *
2650 find_reloc_table_entry (char **str)
2651 {
2652 unsigned int i;
2653 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2654 {
2655 int length = strlen (reloc_table[i].name);
2656
2657 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2658 && (*str)[length] == ':')
2659 {
2660 *str += (length + 1);
2661 return &reloc_table[i];
2662 }
2663 }
2664
2665 return NULL;
2666 }
2667
2668 /* Mode argument to parse_shift and parser_shifter_operand. */
2669 enum parse_shift_mode
2670 {
2671 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2672 "#imm{,lsl #n}" */
2673 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2674 "#imm" */
2675 SHIFTED_LSL, /* bare "lsl #n" */
2676 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2677 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2678 };
2679
2680 /* Parse a <shift> operator on an AArch64 data processing instruction.
2681 Return TRUE on success; otherwise return FALSE. */
2682 static bfd_boolean
2683 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2684 {
2685 const struct aarch64_name_value_pair *shift_op;
2686 enum aarch64_modifier_kind kind;
2687 expressionS exp;
2688 int exp_has_prefix;
2689 char *s = *str;
2690 char *p = s;
2691
2692 for (p = *str; ISALPHA (*p); p++)
2693 ;
2694
2695 if (p == *str)
2696 {
2697 set_syntax_error (_("shift expression expected"));
2698 return FALSE;
2699 }
2700
2701 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2702
2703 if (shift_op == NULL)
2704 {
2705 set_syntax_error (_("shift operator expected"));
2706 return FALSE;
2707 }
2708
2709 kind = aarch64_get_operand_modifier (shift_op);
2710
2711 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2712 {
2713 set_syntax_error (_("invalid use of 'MSL'"));
2714 return FALSE;
2715 }
2716
2717 switch (mode)
2718 {
2719 case SHIFTED_LOGIC_IMM:
2720 if (aarch64_extend_operator_p (kind) == TRUE)
2721 {
2722 set_syntax_error (_("extending shift is not permitted"));
2723 return FALSE;
2724 }
2725 break;
2726
2727 case SHIFTED_ARITH_IMM:
2728 if (kind == AARCH64_MOD_ROR)
2729 {
2730 set_syntax_error (_("'ROR' shift is not permitted"));
2731 return FALSE;
2732 }
2733 break;
2734
2735 case SHIFTED_LSL:
2736 if (kind != AARCH64_MOD_LSL)
2737 {
2738 set_syntax_error (_("only 'LSL' shift is permitted"));
2739 return FALSE;
2740 }
2741 break;
2742
2743 case SHIFTED_REG_OFFSET:
2744 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2745 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2746 {
2747 set_fatal_syntax_error
2748 (_("invalid shift for the register offset addressing mode"));
2749 return FALSE;
2750 }
2751 break;
2752
2753 case SHIFTED_LSL_MSL:
2754 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2755 {
2756 set_syntax_error (_("invalid shift operator"));
2757 return FALSE;
2758 }
2759 break;
2760
2761 default:
2762 abort ();
2763 }
2764
2765 /* Whitespace can appear here if the next thing is a bare digit. */
2766 skip_whitespace (p);
2767
2768 /* Parse shift amount. */
2769 exp_has_prefix = 0;
2770 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2771 exp.X_op = O_absent;
2772 else
2773 {
2774 if (is_immediate_prefix (*p))
2775 {
2776 p++;
2777 exp_has_prefix = 1;
2778 }
2779 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2780 }
2781 if (exp.X_op == O_absent)
2782 {
2783 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2784 {
2785 set_syntax_error (_("missing shift amount"));
2786 return FALSE;
2787 }
2788 operand->shifter.amount = 0;
2789 }
2790 else if (exp.X_op != O_constant)
2791 {
2792 set_syntax_error (_("constant shift amount required"));
2793 return FALSE;
2794 }
2795 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2796 {
2797 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2798 return FALSE;
2799 }
2800 else
2801 {
2802 operand->shifter.amount = exp.X_add_number;
2803 operand->shifter.amount_present = 1;
2804 }
2805
2806 operand->shifter.operator_present = 1;
2807 operand->shifter.kind = kind;
2808
2809 *str = p;
2810 return TRUE;
2811 }
2812
2813 /* Parse a <shifter_operand> for a data processing instruction:
2814
2815 #<immediate>
2816 #<immediate>, LSL #imm
2817
2818 Validation of immediate operands is deferred to md_apply_fix.
2819
2820 Return TRUE on success; otherwise return FALSE. */
2821
2822 static bfd_boolean
2823 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2824 enum parse_shift_mode mode)
2825 {
2826 char *p;
2827
2828 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2829 return FALSE;
2830
2831 p = *str;
2832
2833 /* Accept an immediate expression. */
2834 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2835 return FALSE;
2836
2837 /* Accept optional LSL for arithmetic immediate values. */
2838 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2839 if (! parse_shift (&p, operand, SHIFTED_LSL))
2840 return FALSE;
2841
2842 /* Not accept any shifter for logical immediate values. */
2843 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2844 && parse_shift (&p, operand, mode))
2845 {
2846 set_syntax_error (_("unexpected shift operator"));
2847 return FALSE;
2848 }
2849
2850 *str = p;
2851 return TRUE;
2852 }
2853
2854 /* Parse a <shifter_operand> for a data processing instruction:
2855
2856 <Rm>
2857 <Rm>, <shift>
2858 #<immediate>
2859 #<immediate>, LSL #imm
2860
2861 where <shift> is handled by parse_shift above, and the last two
2862 cases are handled by the function above.
2863
2864 Validation of immediate operands is deferred to md_apply_fix.
2865
2866 Return TRUE on success; otherwise return FALSE. */
2867
2868 static bfd_boolean
2869 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2870 enum parse_shift_mode mode)
2871 {
2872 int reg;
2873 int isreg32, isregzero;
2874 enum aarch64_operand_class opd_class
2875 = aarch64_get_operand_class (operand->type);
2876
2877 if ((reg =
2878 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2879 {
2880 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2881 {
2882 set_syntax_error (_("unexpected register in the immediate operand"));
2883 return FALSE;
2884 }
2885
2886 if (!isregzero && reg == REG_SP)
2887 {
2888 set_syntax_error (BAD_SP);
2889 return FALSE;
2890 }
2891
2892 operand->reg.regno = reg;
2893 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2894
2895 /* Accept optional shift operation on register. */
2896 if (! skip_past_comma (str))
2897 return TRUE;
2898
2899 if (! parse_shift (str, operand, mode))
2900 return FALSE;
2901
2902 return TRUE;
2903 }
2904 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2905 {
2906 set_syntax_error
2907 (_("integer register expected in the extended/shifted operand "
2908 "register"));
2909 return FALSE;
2910 }
2911
2912 /* We have a shifted immediate variable. */
2913 return parse_shifter_operand_imm (str, operand, mode);
2914 }
2915
2916 /* Return TRUE on success; return FALSE otherwise. */
2917
2918 static bfd_boolean
2919 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2920 enum parse_shift_mode mode)
2921 {
2922 char *p = *str;
2923
2924 /* Determine if we have the sequence of characters #: or just :
2925 coming next. If we do, then we check for a :rello: relocation
2926 modifier. If we don't, punt the whole lot to
2927 parse_shifter_operand. */
2928
2929 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2930 {
2931 struct reloc_table_entry *entry;
2932
2933 if (p[0] == '#')
2934 p += 2;
2935 else
2936 p++;
2937 *str = p;
2938
2939 /* Try to parse a relocation. Anything else is an error. */
2940 if (!(entry = find_reloc_table_entry (str)))
2941 {
2942 set_syntax_error (_("unknown relocation modifier"));
2943 return FALSE;
2944 }
2945
2946 if (entry->add_type == 0)
2947 {
2948 set_syntax_error
2949 (_("this relocation modifier is not allowed on this instruction"));
2950 return FALSE;
2951 }
2952
2953 /* Save str before we decompose it. */
2954 p = *str;
2955
2956 /* Next, we parse the expression. */
2957 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2958 return FALSE;
2959
2960 /* Record the relocation type (use the ADD variant here). */
2961 inst.reloc.type = entry->add_type;
2962 inst.reloc.pc_rel = entry->pc_rel;
2963
2964 /* If str is empty, we've reached the end, stop here. */
2965 if (**str == '\0')
2966 return TRUE;
2967
2968 /* Otherwise, we have a shifted reloc modifier, so rewind to
2969 recover the variable name and continue parsing for the shifter. */
2970 *str = p;
2971 return parse_shifter_operand_imm (str, operand, mode);
2972 }
2973
2974 return parse_shifter_operand (str, operand, mode);
2975 }
2976
2977 /* Parse all forms of an address expression. Information is written
2978 to *OPERAND and/or inst.reloc.
2979
2980 The A64 instruction set has the following addressing modes:
2981
2982 Offset
2983 [base] // in SIMD ld/st structure
2984 [base{,#0}] // in ld/st exclusive
2985 [base{,#imm}]
2986 [base,Xm{,LSL #imm}]
2987 [base,Xm,SXTX {#imm}]
2988 [base,Wm,(S|U)XTW {#imm}]
2989 Pre-indexed
2990 [base,#imm]!
2991 Post-indexed
2992 [base],#imm
2993 [base],Xm // in SIMD ld/st structure
2994 PC-relative (literal)
2995 label
2996 =immediate
2997
2998 (As a convenience, the notation "=immediate" is permitted in conjunction
2999 with the pc-relative literal load instructions to automatically place an
3000 immediate value or symbolic address in a nearby literal pool and generate
3001 a hidden label which references it.)
3002
3003 Upon a successful parsing, the address structure in *OPERAND will be
3004 filled in the following way:
3005
3006 .base_regno = <base>
3007 .offset.is_reg // 1 if the offset is a register
3008 .offset.imm = <imm>
3009 .offset.regno = <Rm>
3010
3011 For different addressing modes defined in the A64 ISA:
3012
3013 Offset
3014 .pcrel=0; .preind=1; .postind=0; .writeback=0
3015 Pre-indexed
3016 .pcrel=0; .preind=1; .postind=0; .writeback=1
3017 Post-indexed
3018 .pcrel=0; .preind=0; .postind=1; .writeback=1
3019 PC-relative (literal)
3020 .pcrel=1; .preind=1; .postind=0; .writeback=0
3021
3022 The shift/extension information, if any, will be stored in .shifter.
3023
3024 It is the caller's responsibility to check for addressing modes not
3025 supported by the instruction, and to set inst.reloc.type. */
3026
3027 static bfd_boolean
3028 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
3029 int accept_reg_post_index)
3030 {
3031 char *p = *str;
3032 int reg;
3033 int isreg32, isregzero;
3034 expressionS *exp = &inst.reloc.exp;
3035
3036 if (! skip_past_char (&p, '['))
3037 {
3038 /* =immediate or label. */
3039 operand->addr.pcrel = 1;
3040 operand->addr.preind = 1;
3041
3042 /* #:<reloc_op>:<symbol> */
3043 skip_past_char (&p, '#');
3044 if (reloc && skip_past_char (&p, ':'))
3045 {
3046 bfd_reloc_code_real_type ty;
3047 struct reloc_table_entry *entry;
3048
3049 /* Try to parse a relocation modifier. Anything else is
3050 an error. */
3051 entry = find_reloc_table_entry (&p);
3052 if (! entry)
3053 {
3054 set_syntax_error (_("unknown relocation modifier"));
3055 return FALSE;
3056 }
3057
3058 switch (operand->type)
3059 {
3060 case AARCH64_OPND_ADDR_PCREL21:
3061 /* adr */
3062 ty = entry->adr_type;
3063 break;
3064
3065 default:
3066 ty = entry->ld_literal_type;
3067 break;
3068 }
3069
3070 if (ty == 0)
3071 {
3072 set_syntax_error
3073 (_("this relocation modifier is not allowed on this "
3074 "instruction"));
3075 return FALSE;
3076 }
3077
3078 /* #:<reloc_op>: */
3079 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3080 {
3081 set_syntax_error (_("invalid relocation expression"));
3082 return FALSE;
3083 }
3084
3085 /* #:<reloc_op>:<expr> */
3086 /* Record the relocation type. */
3087 inst.reloc.type = ty;
3088 inst.reloc.pc_rel = entry->pc_rel;
3089 }
3090 else
3091 {
3092
3093 if (skip_past_char (&p, '='))
3094 /* =immediate; need to generate the literal in the literal pool. */
3095 inst.gen_lit_pool = 1;
3096
3097 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3098 {
3099 set_syntax_error (_("invalid address"));
3100 return FALSE;
3101 }
3102 }
3103
3104 *str = p;
3105 return TRUE;
3106 }
3107
3108 /* [ */
3109
3110 /* Accept SP and reject ZR */
3111 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
3112 if (reg == PARSE_FAIL || isreg32)
3113 {
3114 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3115 return FALSE;
3116 }
3117 operand->addr.base_regno = reg;
3118
3119 /* [Xn */
3120 if (skip_past_comma (&p))
3121 {
3122 /* [Xn, */
3123 operand->addr.preind = 1;
3124
3125 /* Reject SP and accept ZR */
3126 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3127 if (reg != PARSE_FAIL)
3128 {
3129 /* [Xn,Rm */
3130 operand->addr.offset.regno = reg;
3131 operand->addr.offset.is_reg = 1;
3132 /* Shifted index. */
3133 if (skip_past_comma (&p))
3134 {
3135 /* [Xn,Rm, */
3136 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3137 /* Use the diagnostics set in parse_shift, so not set new
3138 error message here. */
3139 return FALSE;
3140 }
3141 /* We only accept:
3142 [base,Xm{,LSL #imm}]
3143 [base,Xm,SXTX {#imm}]
3144 [base,Wm,(S|U)XTW {#imm}] */
3145 if (operand->shifter.kind == AARCH64_MOD_NONE
3146 || operand->shifter.kind == AARCH64_MOD_LSL
3147 || operand->shifter.kind == AARCH64_MOD_SXTX)
3148 {
3149 if (isreg32)
3150 {
3151 set_syntax_error (_("invalid use of 32-bit register offset"));
3152 return FALSE;
3153 }
3154 }
3155 else if (!isreg32)
3156 {
3157 set_syntax_error (_("invalid use of 64-bit register offset"));
3158 return FALSE;
3159 }
3160 }
3161 else
3162 {
3163 /* [Xn,#:<reloc_op>:<symbol> */
3164 skip_past_char (&p, '#');
3165 if (reloc && skip_past_char (&p, ':'))
3166 {
3167 struct reloc_table_entry *entry;
3168
3169 /* Try to parse a relocation modifier. Anything else is
3170 an error. */
3171 if (!(entry = find_reloc_table_entry (&p)))
3172 {
3173 set_syntax_error (_("unknown relocation modifier"));
3174 return FALSE;
3175 }
3176
3177 if (entry->ldst_type == 0)
3178 {
3179 set_syntax_error
3180 (_("this relocation modifier is not allowed on this "
3181 "instruction"));
3182 return FALSE;
3183 }
3184
3185 /* [Xn,#:<reloc_op>: */
3186 /* We now have the group relocation table entry corresponding to
3187 the name in the assembler source. Next, we parse the
3188 expression. */
3189 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3190 {
3191 set_syntax_error (_("invalid relocation expression"));
3192 return FALSE;
3193 }
3194
3195 /* [Xn,#:<reloc_op>:<expr> */
3196 /* Record the load/store relocation type. */
3197 inst.reloc.type = entry->ldst_type;
3198 inst.reloc.pc_rel = entry->pc_rel;
3199 }
3200 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3201 {
3202 set_syntax_error (_("invalid expression in the address"));
3203 return FALSE;
3204 }
3205 /* [Xn,<expr> */
3206 }
3207 }
3208
3209 if (! skip_past_char (&p, ']'))
3210 {
3211 set_syntax_error (_("']' expected"));
3212 return FALSE;
3213 }
3214
3215 if (skip_past_char (&p, '!'))
3216 {
3217 if (operand->addr.preind && operand->addr.offset.is_reg)
3218 {
3219 set_syntax_error (_("register offset not allowed in pre-indexed "
3220 "addressing mode"));
3221 return FALSE;
3222 }
3223 /* [Xn]! */
3224 operand->addr.writeback = 1;
3225 }
3226 else if (skip_past_comma (&p))
3227 {
3228 /* [Xn], */
3229 operand->addr.postind = 1;
3230 operand->addr.writeback = 1;
3231
3232 if (operand->addr.preind)
3233 {
3234 set_syntax_error (_("cannot combine pre- and post-indexing"));
3235 return FALSE;
3236 }
3237
3238 if (accept_reg_post_index
3239 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3240 &isregzero)) != PARSE_FAIL)
3241 {
3242 /* [Xn],Xm */
3243 if (isreg32)
3244 {
3245 set_syntax_error (_("invalid 32-bit register offset"));
3246 return FALSE;
3247 }
3248 operand->addr.offset.regno = reg;
3249 operand->addr.offset.is_reg = 1;
3250 }
3251 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3252 {
3253 /* [Xn],#expr */
3254 set_syntax_error (_("invalid expression in the address"));
3255 return FALSE;
3256 }
3257 }
3258
3259 /* If at this point neither .preind nor .postind is set, we have a
3260 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3261 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3262 {
3263 if (operand->addr.writeback)
3264 {
3265 /* Reject [Rn]! */
3266 set_syntax_error (_("missing offset in the pre-indexed address"));
3267 return FALSE;
3268 }
3269 operand->addr.preind = 1;
3270 inst.reloc.exp.X_op = O_constant;
3271 inst.reloc.exp.X_add_number = 0;
3272 }
3273
3274 *str = p;
3275 return TRUE;
3276 }
3277
3278 /* Return TRUE on success; otherwise return FALSE. */
3279 static bfd_boolean
3280 parse_address (char **str, aarch64_opnd_info *operand,
3281 int accept_reg_post_index)
3282 {
3283 return parse_address_main (str, operand, 0, accept_reg_post_index);
3284 }
3285
3286 /* Return TRUE on success; otherwise return FALSE. */
3287 static bfd_boolean
3288 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3289 {
3290 return parse_address_main (str, operand, 1, 0);
3291 }
3292
3293 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3294 Return TRUE on success; otherwise return FALSE. */
3295 static bfd_boolean
3296 parse_half (char **str, int *internal_fixup_p)
3297 {
3298 char *p, *saved;
3299 int dummy;
3300
3301 p = *str;
3302 skip_past_char (&p, '#');
3303
3304 gas_assert (internal_fixup_p);
3305 *internal_fixup_p = 0;
3306
3307 if (*p == ':')
3308 {
3309 struct reloc_table_entry *entry;
3310
3311 /* Try to parse a relocation. Anything else is an error. */
3312 ++p;
3313 if (!(entry = find_reloc_table_entry (&p)))
3314 {
3315 set_syntax_error (_("unknown relocation modifier"));
3316 return FALSE;
3317 }
3318
3319 if (entry->movw_type == 0)
3320 {
3321 set_syntax_error
3322 (_("this relocation modifier is not allowed on this instruction"));
3323 return FALSE;
3324 }
3325
3326 inst.reloc.type = entry->movw_type;
3327 }
3328 else
3329 *internal_fixup_p = 1;
3330
3331 /* Avoid parsing a register as a general symbol. */
3332 saved = p;
3333 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3334 return FALSE;
3335 p = saved;
3336
3337 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3338 return FALSE;
3339
3340 *str = p;
3341 return TRUE;
3342 }
3343
3344 /* Parse an operand for an ADRP instruction:
3345 ADRP <Xd>, <label>
3346 Return TRUE on success; otherwise return FALSE. */
3347
3348 static bfd_boolean
3349 parse_adrp (char **str)
3350 {
3351 char *p;
3352
3353 p = *str;
3354 if (*p == ':')
3355 {
3356 struct reloc_table_entry *entry;
3357
3358 /* Try to parse a relocation. Anything else is an error. */
3359 ++p;
3360 if (!(entry = find_reloc_table_entry (&p)))
3361 {
3362 set_syntax_error (_("unknown relocation modifier"));
3363 return FALSE;
3364 }
3365
3366 if (entry->adrp_type == 0)
3367 {
3368 set_syntax_error
3369 (_("this relocation modifier is not allowed on this instruction"));
3370 return FALSE;
3371 }
3372
3373 inst.reloc.type = entry->adrp_type;
3374 }
3375 else
3376 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3377
3378 inst.reloc.pc_rel = 1;
3379
3380 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3381 return FALSE;
3382
3383 *str = p;
3384 return TRUE;
3385 }
3386
3387 /* Miscellaneous. */
3388
3389 /* Parse an option for a preload instruction. Returns the encoding for the
3390 option, or PARSE_FAIL. */
3391
3392 static int
3393 parse_pldop (char **str)
3394 {
3395 char *p, *q;
3396 const struct aarch64_name_value_pair *o;
3397
3398 p = q = *str;
3399 while (ISALNUM (*q))
3400 q++;
3401
3402 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3403 if (!o)
3404 return PARSE_FAIL;
3405
3406 *str = q;
3407 return o->value;
3408 }
3409
3410 /* Parse an option for a barrier instruction. Returns the encoding for the
3411 option, or PARSE_FAIL. */
3412
3413 static int
3414 parse_barrier (char **str)
3415 {
3416 char *p, *q;
3417 const asm_barrier_opt *o;
3418
3419 p = q = *str;
3420 while (ISALPHA (*q))
3421 q++;
3422
3423 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3424 if (!o)
3425 return PARSE_FAIL;
3426
3427 *str = q;
3428 return o->value;
3429 }
3430
3431 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3432 Returns the encoding for the option, or PARSE_FAIL.
3433
3434 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3435 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3436
3437 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3438 field, otherwise as a system register.
3439 */
3440
3441 static int
3442 parse_sys_reg (char **str, struct hash_control *sys_regs,
3443 int imple_defined_p, int pstatefield_p)
3444 {
3445 char *p, *q;
3446 char buf[32];
3447 const aarch64_sys_reg *o;
3448 int value;
3449
3450 p = buf;
3451 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3452 if (p < buf + 31)
3453 *p++ = TOLOWER (*q);
3454 *p = '\0';
3455 /* Assert that BUF be large enough. */
3456 gas_assert (p - buf == q - *str);
3457
3458 o = hash_find (sys_regs, buf);
3459 if (!o)
3460 {
3461 if (!imple_defined_p)
3462 return PARSE_FAIL;
3463 else
3464 {
3465 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3466 unsigned int op0, op1, cn, cm, op2;
3467
3468 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3469 != 5)
3470 return PARSE_FAIL;
3471 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3472 return PARSE_FAIL;
3473 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3474 }
3475 }
3476 else
3477 {
3478 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3479 as_bad (_("selected processor does not support PSTATE field "
3480 "name '%s'"), buf);
3481 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3482 as_bad (_("selected processor does not support system register "
3483 "name '%s'"), buf);
3484 if (aarch64_sys_reg_deprecated_p (o))
3485 as_warn (_("system register name '%s' is deprecated and may be "
3486 "removed in a future release"), buf);
3487 value = o->value;
3488 }
3489
3490 *str = q;
3491 return value;
3492 }
3493
3494 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3495 for the option, or NULL. */
3496
3497 static const aarch64_sys_ins_reg *
3498 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3499 {
3500 char *p, *q;
3501 char buf[32];
3502 const aarch64_sys_ins_reg *o;
3503
3504 p = buf;
3505 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3506 if (p < buf + 31)
3507 *p++ = TOLOWER (*q);
3508 *p = '\0';
3509
3510 o = hash_find (sys_ins_regs, buf);
3511 if (!o)
3512 return NULL;
3513
3514 *str = q;
3515 return o;
3516 }
3517 \f
3518 #define po_char_or_fail(chr) do { \
3519 if (! skip_past_char (&str, chr)) \
3520 goto failure; \
3521 } while (0)
3522
3523 #define po_reg_or_fail(regtype) do { \
3524 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3525 if (val == PARSE_FAIL) \
3526 { \
3527 set_default_error (); \
3528 goto failure; \
3529 } \
3530 } while (0)
3531
3532 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3533 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3534 &isreg32, &isregzero); \
3535 if (val == PARSE_FAIL) \
3536 { \
3537 set_default_error (); \
3538 goto failure; \
3539 } \
3540 info->reg.regno = val; \
3541 if (isreg32) \
3542 info->qualifier = AARCH64_OPND_QLF_W; \
3543 else \
3544 info->qualifier = AARCH64_OPND_QLF_X; \
3545 } while (0)
3546
3547 #define po_imm_nc_or_fail() do { \
3548 if (! parse_constant_immediate (&str, &val)) \
3549 goto failure; \
3550 } while (0)
3551
3552 #define po_imm_or_fail(min, max) do { \
3553 if (! parse_constant_immediate (&str, &val)) \
3554 goto failure; \
3555 if (val < min || val > max) \
3556 { \
3557 set_fatal_syntax_error (_("immediate value out of range "\
3558 #min " to "#max)); \
3559 goto failure; \
3560 } \
3561 } while (0)
3562
3563 #define po_misc_or_fail(expr) do { \
3564 if (!expr) \
3565 goto failure; \
3566 } while (0)
3567 \f
3568 /* encode the 12-bit imm field of Add/sub immediate */
3569 static inline uint32_t
3570 encode_addsub_imm (uint32_t imm)
3571 {
3572 return imm << 10;
3573 }
3574
3575 /* encode the shift amount field of Add/sub immediate */
3576 static inline uint32_t
3577 encode_addsub_imm_shift_amount (uint32_t cnt)
3578 {
3579 return cnt << 22;
3580 }
3581
3582
3583 /* encode the imm field of Adr instruction */
3584 static inline uint32_t
3585 encode_adr_imm (uint32_t imm)
3586 {
3587 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3588 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3589 }
3590
3591 /* encode the immediate field of Move wide immediate */
3592 static inline uint32_t
3593 encode_movw_imm (uint32_t imm)
3594 {
3595 return imm << 5;
3596 }
3597
3598 /* encode the 26-bit offset of unconditional branch */
3599 static inline uint32_t
3600 encode_branch_ofs_26 (uint32_t ofs)
3601 {
3602 return ofs & ((1 << 26) - 1);
3603 }
3604
3605 /* encode the 19-bit offset of conditional branch and compare & branch */
3606 static inline uint32_t
3607 encode_cond_branch_ofs_19 (uint32_t ofs)
3608 {
3609 return (ofs & ((1 << 19) - 1)) << 5;
3610 }
3611
3612 /* encode the 19-bit offset of ld literal */
3613 static inline uint32_t
3614 encode_ld_lit_ofs_19 (uint32_t ofs)
3615 {
3616 return (ofs & ((1 << 19) - 1)) << 5;
3617 }
3618
3619 /* Encode the 14-bit offset of test & branch. */
3620 static inline uint32_t
3621 encode_tst_branch_ofs_14 (uint32_t ofs)
3622 {
3623 return (ofs & ((1 << 14) - 1)) << 5;
3624 }
3625
3626 /* Encode the 16-bit imm field of svc/hvc/smc. */
3627 static inline uint32_t
3628 encode_svc_imm (uint32_t imm)
3629 {
3630 return imm << 5;
3631 }
3632
3633 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3634 static inline uint32_t
3635 reencode_addsub_switch_add_sub (uint32_t opcode)
3636 {
3637 return opcode ^ (1 << 30);
3638 }
3639
3640 static inline uint32_t
3641 reencode_movzn_to_movz (uint32_t opcode)
3642 {
3643 return opcode | (1 << 30);
3644 }
3645
3646 static inline uint32_t
3647 reencode_movzn_to_movn (uint32_t opcode)
3648 {
3649 return opcode & ~(1 << 30);
3650 }
3651
3652 /* Overall per-instruction processing. */
3653
3654 /* We need to be able to fix up arbitrary expressions in some statements.
3655 This is so that we can handle symbols that are an arbitrary distance from
3656 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3657 which returns part of an address in a form which will be valid for
3658 a data instruction. We do this by pushing the expression into a symbol
3659 in the expr_section, and creating a fix for that. */
3660
3661 static fixS *
3662 fix_new_aarch64 (fragS * frag,
3663 int where,
3664 short int size, expressionS * exp, int pc_rel, int reloc)
3665 {
3666 fixS *new_fix;
3667
3668 switch (exp->X_op)
3669 {
3670 case O_constant:
3671 case O_symbol:
3672 case O_add:
3673 case O_subtract:
3674 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3675 break;
3676
3677 default:
3678 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3679 pc_rel, reloc);
3680 break;
3681 }
3682 return new_fix;
3683 }
3684 \f
3685 /* Diagnostics on operands errors. */
3686
3687 /* By default, output verbose error message.
3688 Disable the verbose error message by -mno-verbose-error. */
3689 static int verbose_error_p = 1;
3690
3691 #ifdef DEBUG_AARCH64
3692 /* N.B. this is only for the purpose of debugging. */
3693 const char* operand_mismatch_kind_names[] =
3694 {
3695 "AARCH64_OPDE_NIL",
3696 "AARCH64_OPDE_RECOVERABLE",
3697 "AARCH64_OPDE_SYNTAX_ERROR",
3698 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3699 "AARCH64_OPDE_INVALID_VARIANT",
3700 "AARCH64_OPDE_OUT_OF_RANGE",
3701 "AARCH64_OPDE_UNALIGNED",
3702 "AARCH64_OPDE_REG_LIST",
3703 "AARCH64_OPDE_OTHER_ERROR",
3704 };
3705 #endif /* DEBUG_AARCH64 */
3706
3707 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3708
3709 When multiple errors of different kinds are found in the same assembly
3710 line, only the error of the highest severity will be picked up for
3711 issuing the diagnostics. */
3712
3713 static inline bfd_boolean
3714 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3715 enum aarch64_operand_error_kind rhs)
3716 {
3717 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3718 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3719 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3720 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3721 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3722 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3723 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3724 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3725 return lhs > rhs;
3726 }
3727
3728 /* Helper routine to get the mnemonic name from the assembly instruction
3729 line; should only be called for the diagnosis purpose, as there is
3730 string copy operation involved, which may affect the runtime
3731 performance if used in elsewhere. */
3732
3733 static const char*
3734 get_mnemonic_name (const char *str)
3735 {
3736 static char mnemonic[32];
3737 char *ptr;
3738
3739 /* Get the first 15 bytes and assume that the full name is included. */
3740 strncpy (mnemonic, str, 31);
3741 mnemonic[31] = '\0';
3742
3743 /* Scan up to the end of the mnemonic, which must end in white space,
3744 '.', or end of string. */
3745 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3746 ;
3747
3748 *ptr = '\0';
3749
3750 /* Append '...' to the truncated long name. */
3751 if (ptr - mnemonic == 31)
3752 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3753
3754 return mnemonic;
3755 }
3756
3757 static void
3758 reset_aarch64_instruction (aarch64_instruction *instruction)
3759 {
3760 memset (instruction, '\0', sizeof (aarch64_instruction));
3761 instruction->reloc.type = BFD_RELOC_UNUSED;
3762 }
3763
3764 /* Data strutures storing one user error in the assembly code related to
3765 operands. */
3766
3767 struct operand_error_record
3768 {
3769 const aarch64_opcode *opcode;
3770 aarch64_operand_error detail;
3771 struct operand_error_record *next;
3772 };
3773
3774 typedef struct operand_error_record operand_error_record;
3775
3776 struct operand_errors
3777 {
3778 operand_error_record *head;
3779 operand_error_record *tail;
3780 };
3781
3782 typedef struct operand_errors operand_errors;
3783
3784 /* Top-level data structure reporting user errors for the current line of
3785 the assembly code.
3786 The way md_assemble works is that all opcodes sharing the same mnemonic
3787 name are iterated to find a match to the assembly line. In this data
3788 structure, each of the such opcodes will have one operand_error_record
3789 allocated and inserted. In other words, excessive errors related with
3790 a single opcode are disregarded. */
3791 operand_errors operand_error_report;
3792
3793 /* Free record nodes. */
3794 static operand_error_record *free_opnd_error_record_nodes = NULL;
3795
3796 /* Initialize the data structure that stores the operand mismatch
3797 information on assembling one line of the assembly code. */
3798 static void
3799 init_operand_error_report (void)
3800 {
3801 if (operand_error_report.head != NULL)
3802 {
3803 gas_assert (operand_error_report.tail != NULL);
3804 operand_error_report.tail->next = free_opnd_error_record_nodes;
3805 free_opnd_error_record_nodes = operand_error_report.head;
3806 operand_error_report.head = NULL;
3807 operand_error_report.tail = NULL;
3808 return;
3809 }
3810 gas_assert (operand_error_report.tail == NULL);
3811 }
3812
3813 /* Return TRUE if some operand error has been recorded during the
3814 parsing of the current assembly line using the opcode *OPCODE;
3815 otherwise return FALSE. */
3816 static inline bfd_boolean
3817 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3818 {
3819 operand_error_record *record = operand_error_report.head;
3820 return record && record->opcode == opcode;
3821 }
3822
3823 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3824 OPCODE field is initialized with OPCODE.
3825 N.B. only one record for each opcode, i.e. the maximum of one error is
3826 recorded for each instruction template. */
3827
3828 static void
3829 add_operand_error_record (const operand_error_record* new_record)
3830 {
3831 const aarch64_opcode *opcode = new_record->opcode;
3832 operand_error_record* record = operand_error_report.head;
3833
3834 /* The record may have been created for this opcode. If not, we need
3835 to prepare one. */
3836 if (! opcode_has_operand_error_p (opcode))
3837 {
3838 /* Get one empty record. */
3839 if (free_opnd_error_record_nodes == NULL)
3840 {
3841 record = xmalloc (sizeof (operand_error_record));
3842 if (record == NULL)
3843 abort ();
3844 }
3845 else
3846 {
3847 record = free_opnd_error_record_nodes;
3848 free_opnd_error_record_nodes = record->next;
3849 }
3850 record->opcode = opcode;
3851 /* Insert at the head. */
3852 record->next = operand_error_report.head;
3853 operand_error_report.head = record;
3854 if (operand_error_report.tail == NULL)
3855 operand_error_report.tail = record;
3856 }
3857 else if (record->detail.kind != AARCH64_OPDE_NIL
3858 && record->detail.index <= new_record->detail.index
3859 && operand_error_higher_severity_p (record->detail.kind,
3860 new_record->detail.kind))
3861 {
3862 /* In the case of multiple errors found on operands related with a
3863 single opcode, only record the error of the leftmost operand and
3864 only if the error is of higher severity. */
3865 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3866 " the existing error %s on operand %d",
3867 operand_mismatch_kind_names[new_record->detail.kind],
3868 new_record->detail.index,
3869 operand_mismatch_kind_names[record->detail.kind],
3870 record->detail.index);
3871 return;
3872 }
3873
3874 record->detail = new_record->detail;
3875 }
3876
3877 static inline void
3878 record_operand_error_info (const aarch64_opcode *opcode,
3879 aarch64_operand_error *error_info)
3880 {
3881 operand_error_record record;
3882 record.opcode = opcode;
3883 record.detail = *error_info;
3884 add_operand_error_record (&record);
3885 }
3886
3887 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3888 error message *ERROR, for operand IDX (count from 0). */
3889
3890 static void
3891 record_operand_error (const aarch64_opcode *opcode, int idx,
3892 enum aarch64_operand_error_kind kind,
3893 const char* error)
3894 {
3895 aarch64_operand_error info;
3896 memset(&info, 0, sizeof (info));
3897 info.index = idx;
3898 info.kind = kind;
3899 info.error = error;
3900 record_operand_error_info (opcode, &info);
3901 }
3902
3903 static void
3904 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3905 enum aarch64_operand_error_kind kind,
3906 const char* error, const int *extra_data)
3907 {
3908 aarch64_operand_error info;
3909 info.index = idx;
3910 info.kind = kind;
3911 info.error = error;
3912 info.data[0] = extra_data[0];
3913 info.data[1] = extra_data[1];
3914 info.data[2] = extra_data[2];
3915 record_operand_error_info (opcode, &info);
3916 }
3917
3918 static void
3919 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3920 const char* error, int lower_bound,
3921 int upper_bound)
3922 {
3923 int data[3] = {lower_bound, upper_bound, 0};
3924 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3925 error, data);
3926 }
3927
3928 /* Remove the operand error record for *OPCODE. */
3929 static void ATTRIBUTE_UNUSED
3930 remove_operand_error_record (const aarch64_opcode *opcode)
3931 {
3932 if (opcode_has_operand_error_p (opcode))
3933 {
3934 operand_error_record* record = operand_error_report.head;
3935 gas_assert (record != NULL && operand_error_report.tail != NULL);
3936 operand_error_report.head = record->next;
3937 record->next = free_opnd_error_record_nodes;
3938 free_opnd_error_record_nodes = record;
3939 if (operand_error_report.head == NULL)
3940 {
3941 gas_assert (operand_error_report.tail == record);
3942 operand_error_report.tail = NULL;
3943 }
3944 }
3945 }
3946
3947 /* Given the instruction in *INSTR, return the index of the best matched
3948 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3949
3950 Return -1 if there is no qualifier sequence; return the first match
3951 if there is multiple matches found. */
3952
3953 static int
3954 find_best_match (const aarch64_inst *instr,
3955 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3956 {
3957 int i, num_opnds, max_num_matched, idx;
3958
3959 num_opnds = aarch64_num_of_operands (instr->opcode);
3960 if (num_opnds == 0)
3961 {
3962 DEBUG_TRACE ("no operand");
3963 return -1;
3964 }
3965
3966 max_num_matched = 0;
3967 idx = -1;
3968
3969 /* For each pattern. */
3970 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3971 {
3972 int j, num_matched;
3973 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3974
3975 /* Most opcodes has much fewer patterns in the list. */
3976 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3977 {
3978 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3979 if (i != 0 && idx == -1)
3980 /* If nothing has been matched, return the 1st sequence. */
3981 idx = 0;
3982 break;
3983 }
3984
3985 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3986 if (*qualifiers == instr->operands[j].qualifier)
3987 ++num_matched;
3988
3989 if (num_matched > max_num_matched)
3990 {
3991 max_num_matched = num_matched;
3992 idx = i;
3993 }
3994 }
3995
3996 DEBUG_TRACE ("return with %d", idx);
3997 return idx;
3998 }
3999
4000 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
4001 corresponding operands in *INSTR. */
4002
4003 static inline void
4004 assign_qualifier_sequence (aarch64_inst *instr,
4005 const aarch64_opnd_qualifier_t *qualifiers)
4006 {
4007 int i = 0;
4008 int num_opnds = aarch64_num_of_operands (instr->opcode);
4009 gas_assert (num_opnds);
4010 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4011 instr->operands[i].qualifier = *qualifiers;
4012 }
4013
4014 /* Print operands for the diagnosis purpose. */
4015
4016 static void
4017 print_operands (char *buf, const aarch64_opcode *opcode,
4018 const aarch64_opnd_info *opnds)
4019 {
4020 int i;
4021
4022 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4023 {
4024 const size_t size = 128;
4025 char str[size];
4026
4027 /* We regard the opcode operand info more, however we also look into
4028 the inst->operands to support the disassembling of the optional
4029 operand.
4030 The two operand code should be the same in all cases, apart from
4031 when the operand can be optional. */
4032 if (opcode->operands[i] == AARCH64_OPND_NIL
4033 || opnds[i].type == AARCH64_OPND_NIL)
4034 break;
4035
4036 /* Generate the operand string in STR. */
4037 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
4038
4039 /* Delimiter. */
4040 if (str[0] != '\0')
4041 strcat (buf, i == 0 ? " " : ",");
4042
4043 /* Append the operand string. */
4044 strcat (buf, str);
4045 }
4046 }
4047
4048 /* Send to stderr a string as information. */
4049
4050 static void
4051 output_info (const char *format, ...)
4052 {
4053 char *file;
4054 unsigned int line;
4055 va_list args;
4056
4057 as_where (&file, &line);
4058 if (file)
4059 {
4060 if (line != 0)
4061 fprintf (stderr, "%s:%u: ", file, line);
4062 else
4063 fprintf (stderr, "%s: ", file);
4064 }
4065 fprintf (stderr, _("Info: "));
4066 va_start (args, format);
4067 vfprintf (stderr, format, args);
4068 va_end (args);
4069 (void) putc ('\n', stderr);
4070 }
4071
4072 /* Output one operand error record. */
4073
4074 static void
4075 output_operand_error_record (const operand_error_record *record, char *str)
4076 {
4077 const aarch64_operand_error *detail = &record->detail;
4078 int idx = detail->index;
4079 const aarch64_opcode *opcode = record->opcode;
4080 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4081 : AARCH64_OPND_NIL);
4082
4083 switch (detail->kind)
4084 {
4085 case AARCH64_OPDE_NIL:
4086 gas_assert (0);
4087 break;
4088
4089 case AARCH64_OPDE_SYNTAX_ERROR:
4090 case AARCH64_OPDE_RECOVERABLE:
4091 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4092 case AARCH64_OPDE_OTHER_ERROR:
4093 /* Use the prepared error message if there is, otherwise use the
4094 operand description string to describe the error. */
4095 if (detail->error != NULL)
4096 {
4097 if (idx < 0)
4098 as_bad (_("%s -- `%s'"), detail->error, str);
4099 else
4100 as_bad (_("%s at operand %d -- `%s'"),
4101 detail->error, idx + 1, str);
4102 }
4103 else
4104 {
4105 gas_assert (idx >= 0);
4106 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
4107 aarch64_get_operand_desc (opd_code), str);
4108 }
4109 break;
4110
4111 case AARCH64_OPDE_INVALID_VARIANT:
4112 as_bad (_("operand mismatch -- `%s'"), str);
4113 if (verbose_error_p)
4114 {
4115 /* We will try to correct the erroneous instruction and also provide
4116 more information e.g. all other valid variants.
4117
4118 The string representation of the corrected instruction and other
4119 valid variants are generated by
4120
4121 1) obtaining the intermediate representation of the erroneous
4122 instruction;
4123 2) manipulating the IR, e.g. replacing the operand qualifier;
4124 3) printing out the instruction by calling the printer functions
4125 shared with the disassembler.
4126
4127 The limitation of this method is that the exact input assembly
4128 line cannot be accurately reproduced in some cases, for example an
4129 optional operand present in the actual assembly line will be
4130 omitted in the output; likewise for the optional syntax rules,
4131 e.g. the # before the immediate. Another limitation is that the
4132 assembly symbols and relocation operations in the assembly line
4133 currently cannot be printed out in the error report. Last but not
4134 least, when there is other error(s) co-exist with this error, the
4135 'corrected' instruction may be still incorrect, e.g. given
4136 'ldnp h0,h1,[x0,#6]!'
4137 this diagnosis will provide the version:
4138 'ldnp s0,s1,[x0,#6]!'
4139 which is still not right. */
4140 size_t len = strlen (get_mnemonic_name (str));
4141 int i, qlf_idx;
4142 bfd_boolean result;
4143 const size_t size = 2048;
4144 char buf[size];
4145 aarch64_inst *inst_base = &inst.base;
4146 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4147
4148 /* Init inst. */
4149 reset_aarch64_instruction (&inst);
4150 inst_base->opcode = opcode;
4151
4152 /* Reset the error report so that there is no side effect on the
4153 following operand parsing. */
4154 init_operand_error_report ();
4155
4156 /* Fill inst. */
4157 result = parse_operands (str + len, opcode)
4158 && programmer_friendly_fixup (&inst);
4159 gas_assert (result);
4160 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4161 NULL, NULL);
4162 gas_assert (!result);
4163
4164 /* Find the most matched qualifier sequence. */
4165 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4166 gas_assert (qlf_idx > -1);
4167
4168 /* Assign the qualifiers. */
4169 assign_qualifier_sequence (inst_base,
4170 opcode->qualifiers_list[qlf_idx]);
4171
4172 /* Print the hint. */
4173 output_info (_(" did you mean this?"));
4174 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4175 print_operands (buf, opcode, inst_base->operands);
4176 output_info (_(" %s"), buf);
4177
4178 /* Print out other variant(s) if there is any. */
4179 if (qlf_idx != 0 ||
4180 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4181 output_info (_(" other valid variant(s):"));
4182
4183 /* For each pattern. */
4184 qualifiers_list = opcode->qualifiers_list;
4185 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4186 {
4187 /* Most opcodes has much fewer patterns in the list.
4188 First NIL qualifier indicates the end in the list. */
4189 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4190 break;
4191
4192 if (i != qlf_idx)
4193 {
4194 /* Mnemonics name. */
4195 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4196
4197 /* Assign the qualifiers. */
4198 assign_qualifier_sequence (inst_base, *qualifiers_list);
4199
4200 /* Print instruction. */
4201 print_operands (buf, opcode, inst_base->operands);
4202
4203 output_info (_(" %s"), buf);
4204 }
4205 }
4206 }
4207 break;
4208
4209 case AARCH64_OPDE_OUT_OF_RANGE:
4210 if (detail->data[0] != detail->data[1])
4211 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4212 detail->error ? detail->error : _("immediate value"),
4213 detail->data[0], detail->data[1], idx + 1, str);
4214 else
4215 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4216 detail->error ? detail->error : _("immediate value"),
4217 detail->data[0], idx + 1, str);
4218 break;
4219
4220 case AARCH64_OPDE_REG_LIST:
4221 if (detail->data[0] == 1)
4222 as_bad (_("invalid number of registers in the list; "
4223 "only 1 register is expected at operand %d -- `%s'"),
4224 idx + 1, str);
4225 else
4226 as_bad (_("invalid number of registers in the list; "
4227 "%d registers are expected at operand %d -- `%s'"),
4228 detail->data[0], idx + 1, str);
4229 break;
4230
4231 case AARCH64_OPDE_UNALIGNED:
4232 as_bad (_("immediate value should be a multiple of "
4233 "%d at operand %d -- `%s'"),
4234 detail->data[0], idx + 1, str);
4235 break;
4236
4237 default:
4238 gas_assert (0);
4239 break;
4240 }
4241 }
4242
4243 /* Process and output the error message about the operand mismatching.
4244
4245 When this function is called, the operand error information had
4246 been collected for an assembly line and there will be multiple
4247 errors in the case of mulitple instruction templates; output the
4248 error message that most closely describes the problem. */
4249
4250 static void
4251 output_operand_error_report (char *str)
4252 {
4253 int largest_error_pos;
4254 const char *msg = NULL;
4255 enum aarch64_operand_error_kind kind;
4256 operand_error_record *curr;
4257 operand_error_record *head = operand_error_report.head;
4258 operand_error_record *record = NULL;
4259
4260 /* No error to report. */
4261 if (head == NULL)
4262 return;
4263
4264 gas_assert (head != NULL && operand_error_report.tail != NULL);
4265
4266 /* Only one error. */
4267 if (head == operand_error_report.tail)
4268 {
4269 DEBUG_TRACE ("single opcode entry with error kind: %s",
4270 operand_mismatch_kind_names[head->detail.kind]);
4271 output_operand_error_record (head, str);
4272 return;
4273 }
4274
4275 /* Find the error kind of the highest severity. */
4276 DEBUG_TRACE ("multiple opcode entres with error kind");
4277 kind = AARCH64_OPDE_NIL;
4278 for (curr = head; curr != NULL; curr = curr->next)
4279 {
4280 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4281 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4282 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4283 kind = curr->detail.kind;
4284 }
4285 gas_assert (kind != AARCH64_OPDE_NIL);
4286
4287 /* Pick up one of errors of KIND to report. */
4288 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4289 for (curr = head; curr != NULL; curr = curr->next)
4290 {
4291 if (curr->detail.kind != kind)
4292 continue;
4293 /* If there are multiple errors, pick up the one with the highest
4294 mismatching operand index. In the case of multiple errors with
4295 the equally highest operand index, pick up the first one or the
4296 first one with non-NULL error message. */
4297 if (curr->detail.index > largest_error_pos
4298 || (curr->detail.index == largest_error_pos && msg == NULL
4299 && curr->detail.error != NULL))
4300 {
4301 largest_error_pos = curr->detail.index;
4302 record = curr;
4303 msg = record->detail.error;
4304 }
4305 }
4306
4307 gas_assert (largest_error_pos != -2 && record != NULL);
4308 DEBUG_TRACE ("Pick up error kind %s to report",
4309 operand_mismatch_kind_names[record->detail.kind]);
4310
4311 /* Output. */
4312 output_operand_error_record (record, str);
4313 }
4314 \f
4315 /* Write an AARCH64 instruction to buf - always little-endian. */
4316 static void
4317 put_aarch64_insn (char *buf, uint32_t insn)
4318 {
4319 unsigned char *where = (unsigned char *) buf;
4320 where[0] = insn;
4321 where[1] = insn >> 8;
4322 where[2] = insn >> 16;
4323 where[3] = insn >> 24;
4324 }
4325
4326 static uint32_t
4327 get_aarch64_insn (char *buf)
4328 {
4329 unsigned char *where = (unsigned char *) buf;
4330 uint32_t result;
4331 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4332 return result;
4333 }
4334
4335 static void
4336 output_inst (struct aarch64_inst *new_inst)
4337 {
4338 char *to = NULL;
4339
4340 to = frag_more (INSN_SIZE);
4341
4342 frag_now->tc_frag_data.recorded = 1;
4343
4344 put_aarch64_insn (to, inst.base.value);
4345
4346 if (inst.reloc.type != BFD_RELOC_UNUSED)
4347 {
4348 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4349 INSN_SIZE, &inst.reloc.exp,
4350 inst.reloc.pc_rel,
4351 inst.reloc.type);
4352 DEBUG_TRACE ("Prepared relocation fix up");
4353 /* Don't check the addend value against the instruction size,
4354 that's the job of our code in md_apply_fix(). */
4355 fixp->fx_no_overflow = 1;
4356 if (new_inst != NULL)
4357 fixp->tc_fix_data.inst = new_inst;
4358 if (aarch64_gas_internal_fixup_p ())
4359 {
4360 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4361 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4362 fixp->fx_addnumber = inst.reloc.flags;
4363 }
4364 }
4365
4366 dwarf2_emit_insn (INSN_SIZE);
4367 }
4368
4369 /* Link together opcodes of the same name. */
4370
4371 struct templates
4372 {
4373 aarch64_opcode *opcode;
4374 struct templates *next;
4375 };
4376
4377 typedef struct templates templates;
4378
4379 static templates *
4380 lookup_mnemonic (const char *start, int len)
4381 {
4382 templates *templ = NULL;
4383
4384 templ = hash_find_n (aarch64_ops_hsh, start, len);
4385 return templ;
4386 }
4387
4388 /* Subroutine of md_assemble, responsible for looking up the primary
4389 opcode from the mnemonic the user wrote. STR points to the
4390 beginning of the mnemonic. */
4391
4392 static templates *
4393 opcode_lookup (char **str)
4394 {
4395 char *end, *base;
4396 const aarch64_cond *cond;
4397 char condname[16];
4398 int len;
4399
4400 /* Scan up to the end of the mnemonic, which must end in white space,
4401 '.', or end of string. */
4402 for (base = end = *str; is_part_of_name(*end); end++)
4403 if (*end == '.')
4404 break;
4405
4406 if (end == base)
4407 return 0;
4408
4409 inst.cond = COND_ALWAYS;
4410
4411 /* Handle a possible condition. */
4412 if (end[0] == '.')
4413 {
4414 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4415 if (cond)
4416 {
4417 inst.cond = cond->value;
4418 *str = end + 3;
4419 }
4420 else
4421 {
4422 *str = end;
4423 return 0;
4424 }
4425 }
4426 else
4427 *str = end;
4428
4429 len = end - base;
4430
4431 if (inst.cond == COND_ALWAYS)
4432 {
4433 /* Look for unaffixed mnemonic. */
4434 return lookup_mnemonic (base, len);
4435 }
4436 else if (len <= 13)
4437 {
4438 /* append ".c" to mnemonic if conditional */
4439 memcpy (condname, base, len);
4440 memcpy (condname + len, ".c", 2);
4441 base = condname;
4442 len += 2;
4443 return lookup_mnemonic (base, len);
4444 }
4445
4446 return NULL;
4447 }
4448
4449 /* Internal helper routine converting a vector neon_type_el structure
4450 *VECTYPE to a corresponding operand qualifier. */
4451
4452 static inline aarch64_opnd_qualifier_t
4453 vectype_to_qualifier (const struct neon_type_el *vectype)
4454 {
4455 /* Element size in bytes indexed by neon_el_type. */
4456 const unsigned char ele_size[5]
4457 = {1, 2, 4, 8, 16};
4458
4459 if (!vectype->defined || vectype->type == NT_invtype)
4460 goto vectype_conversion_fail;
4461
4462 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4463
4464 if (vectype->defined & NTA_HASINDEX)
4465 /* Vector element register. */
4466 return AARCH64_OPND_QLF_S_B + vectype->type;
4467 else
4468 {
4469 /* Vector register. */
4470 int reg_size = ele_size[vectype->type] * vectype->width;
4471 unsigned offset;
4472 if (reg_size != 16 && reg_size != 8)
4473 goto vectype_conversion_fail;
4474 /* The conversion is calculated based on the relation of the order of
4475 qualifiers to the vector element size and vector register size. */
4476 offset = (vectype->type == NT_q)
4477 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4478 gas_assert (offset <= 8);
4479 return AARCH64_OPND_QLF_V_8B + offset;
4480 }
4481
4482 vectype_conversion_fail:
4483 first_error (_("bad vector arrangement type"));
4484 return AARCH64_OPND_QLF_NIL;
4485 }
4486
4487 /* Process an optional operand that is found omitted from the assembly line.
4488 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4489 instruction's opcode entry while IDX is the index of this omitted operand.
4490 */
4491
4492 static void
4493 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4494 int idx, aarch64_opnd_info *operand)
4495 {
4496 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4497 gas_assert (optional_operand_p (opcode, idx));
4498 gas_assert (!operand->present);
4499
4500 switch (type)
4501 {
4502 case AARCH64_OPND_Rd:
4503 case AARCH64_OPND_Rn:
4504 case AARCH64_OPND_Rm:
4505 case AARCH64_OPND_Rt:
4506 case AARCH64_OPND_Rt2:
4507 case AARCH64_OPND_Rs:
4508 case AARCH64_OPND_Ra:
4509 case AARCH64_OPND_Rt_SYS:
4510 case AARCH64_OPND_Rd_SP:
4511 case AARCH64_OPND_Rn_SP:
4512 case AARCH64_OPND_Fd:
4513 case AARCH64_OPND_Fn:
4514 case AARCH64_OPND_Fm:
4515 case AARCH64_OPND_Fa:
4516 case AARCH64_OPND_Ft:
4517 case AARCH64_OPND_Ft2:
4518 case AARCH64_OPND_Sd:
4519 case AARCH64_OPND_Sn:
4520 case AARCH64_OPND_Sm:
4521 case AARCH64_OPND_Vd:
4522 case AARCH64_OPND_Vn:
4523 case AARCH64_OPND_Vm:
4524 case AARCH64_OPND_VdD1:
4525 case AARCH64_OPND_VnD1:
4526 operand->reg.regno = default_value;
4527 break;
4528
4529 case AARCH64_OPND_Ed:
4530 case AARCH64_OPND_En:
4531 case AARCH64_OPND_Em:
4532 operand->reglane.regno = default_value;
4533 break;
4534
4535 case AARCH64_OPND_IDX:
4536 case AARCH64_OPND_BIT_NUM:
4537 case AARCH64_OPND_IMMR:
4538 case AARCH64_OPND_IMMS:
4539 case AARCH64_OPND_SHLL_IMM:
4540 case AARCH64_OPND_IMM_VLSL:
4541 case AARCH64_OPND_IMM_VLSR:
4542 case AARCH64_OPND_CCMP_IMM:
4543 case AARCH64_OPND_FBITS:
4544 case AARCH64_OPND_UIMM4:
4545 case AARCH64_OPND_UIMM3_OP1:
4546 case AARCH64_OPND_UIMM3_OP2:
4547 case AARCH64_OPND_IMM:
4548 case AARCH64_OPND_WIDTH:
4549 case AARCH64_OPND_UIMM7:
4550 case AARCH64_OPND_NZCV:
4551 operand->imm.value = default_value;
4552 break;
4553
4554 case AARCH64_OPND_EXCEPTION:
4555 inst.reloc.type = BFD_RELOC_UNUSED;
4556 break;
4557
4558 case AARCH64_OPND_BARRIER_ISB:
4559 operand->barrier = aarch64_barrier_options + default_value;
4560
4561 default:
4562 break;
4563 }
4564 }
4565
4566 /* Process the relocation type for move wide instructions.
4567 Return TRUE on success; otherwise return FALSE. */
4568
4569 static bfd_boolean
4570 process_movw_reloc_info (void)
4571 {
4572 int is32;
4573 unsigned shift;
4574
4575 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4576
4577 if (inst.base.opcode->op == OP_MOVK)
4578 switch (inst.reloc.type)
4579 {
4580 case BFD_RELOC_AARCH64_MOVW_G0_S:
4581 case BFD_RELOC_AARCH64_MOVW_G1_S:
4582 case BFD_RELOC_AARCH64_MOVW_G2_S:
4583 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4584 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4585 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4586 set_syntax_error
4587 (_("the specified relocation type is not allowed for MOVK"));
4588 return FALSE;
4589 default:
4590 break;
4591 }
4592
4593 switch (inst.reloc.type)
4594 {
4595 case BFD_RELOC_AARCH64_MOVW_G0:
4596 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4597 case BFD_RELOC_AARCH64_MOVW_G0_S:
4598 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4599 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4600 shift = 0;
4601 break;
4602 case BFD_RELOC_AARCH64_MOVW_G1:
4603 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4604 case BFD_RELOC_AARCH64_MOVW_G1_S:
4605 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4606 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4607 shift = 16;
4608 break;
4609 case BFD_RELOC_AARCH64_MOVW_G2:
4610 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4611 case BFD_RELOC_AARCH64_MOVW_G2_S:
4612 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4613 if (is32)
4614 {
4615 set_fatal_syntax_error
4616 (_("the specified relocation type is not allowed for 32-bit "
4617 "register"));
4618 return FALSE;
4619 }
4620 shift = 32;
4621 break;
4622 case BFD_RELOC_AARCH64_MOVW_G3:
4623 if (is32)
4624 {
4625 set_fatal_syntax_error
4626 (_("the specified relocation type is not allowed for 32-bit "
4627 "register"));
4628 return FALSE;
4629 }
4630 shift = 48;
4631 break;
4632 default:
4633 /* More cases should be added when more MOVW-related relocation types
4634 are supported in GAS. */
4635 gas_assert (aarch64_gas_internal_fixup_p ());
4636 /* The shift amount should have already been set by the parser. */
4637 return TRUE;
4638 }
4639 inst.base.operands[1].shifter.amount = shift;
4640 return TRUE;
4641 }
4642
4643 /* A primitive log caculator. */
4644
4645 static inline unsigned int
4646 get_logsz (unsigned int size)
4647 {
4648 const unsigned char ls[16] =
4649 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4650 if (size > 16)
4651 {
4652 gas_assert (0);
4653 return -1;
4654 }
4655 gas_assert (ls[size - 1] != (unsigned char)-1);
4656 return ls[size - 1];
4657 }
4658
4659 /* Determine and return the real reloc type code for an instruction
4660 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4661
4662 static inline bfd_reloc_code_real_type
4663 ldst_lo12_determine_real_reloc_type (void)
4664 {
4665 int logsz;
4666 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4667 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4668
4669 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4670 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4671 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4672 BFD_RELOC_AARCH64_LDST128_LO12
4673 };
4674
4675 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4676 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4677
4678 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4679 opd1_qlf =
4680 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4681 1, opd0_qlf, 0);
4682 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4683
4684 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4685 gas_assert (logsz >= 0 && logsz <= 4);
4686
4687 return reloc_ldst_lo12[logsz];
4688 }
4689
4690 /* Check whether a register list REGINFO is valid. The registers must be
4691 numbered in increasing order (modulo 32), in increments of one or two.
4692
4693 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4694 increments of two.
4695
4696 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4697
4698 static bfd_boolean
4699 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4700 {
4701 uint32_t i, nb_regs, prev_regno, incr;
4702
4703 nb_regs = 1 + (reginfo & 0x3);
4704 reginfo >>= 2;
4705 prev_regno = reginfo & 0x1f;
4706 incr = accept_alternate ? 2 : 1;
4707
4708 for (i = 1; i < nb_regs; ++i)
4709 {
4710 uint32_t curr_regno;
4711 reginfo >>= 5;
4712 curr_regno = reginfo & 0x1f;
4713 if (curr_regno != ((prev_regno + incr) & 0x1f))
4714 return FALSE;
4715 prev_regno = curr_regno;
4716 }
4717
4718 return TRUE;
4719 }
4720
4721 /* Generic instruction operand parser. This does no encoding and no
4722 semantic validation; it merely squirrels values away in the inst
4723 structure. Returns TRUE or FALSE depending on whether the
4724 specified grammar matched. */
4725
4726 static bfd_boolean
4727 parse_operands (char *str, const aarch64_opcode *opcode)
4728 {
4729 int i;
4730 char *backtrack_pos = 0;
4731 const enum aarch64_opnd *operands = opcode->operands;
4732
4733 clear_error ();
4734 skip_whitespace (str);
4735
4736 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4737 {
4738 int64_t val;
4739 int isreg32, isregzero;
4740 int comma_skipped_p = 0;
4741 aarch64_reg_type rtype;
4742 struct neon_type_el vectype;
4743 aarch64_opnd_info *info = &inst.base.operands[i];
4744
4745 DEBUG_TRACE ("parse operand %d", i);
4746
4747 /* Assign the operand code. */
4748 info->type = operands[i];
4749
4750 if (optional_operand_p (opcode, i))
4751 {
4752 /* Remember where we are in case we need to backtrack. */
4753 gas_assert (!backtrack_pos);
4754 backtrack_pos = str;
4755 }
4756
4757 /* Expect comma between operands; the backtrack mechanizm will take
4758 care of cases of omitted optional operand. */
4759 if (i > 0 && ! skip_past_char (&str, ','))
4760 {
4761 set_syntax_error (_("comma expected between operands"));
4762 goto failure;
4763 }
4764 else
4765 comma_skipped_p = 1;
4766
4767 switch (operands[i])
4768 {
4769 case AARCH64_OPND_Rd:
4770 case AARCH64_OPND_Rn:
4771 case AARCH64_OPND_Rm:
4772 case AARCH64_OPND_Rt:
4773 case AARCH64_OPND_Rt2:
4774 case AARCH64_OPND_Rs:
4775 case AARCH64_OPND_Ra:
4776 case AARCH64_OPND_Rt_SYS:
4777 case AARCH64_OPND_PAIRREG:
4778 po_int_reg_or_fail (1, 0);
4779 break;
4780
4781 case AARCH64_OPND_Rd_SP:
4782 case AARCH64_OPND_Rn_SP:
4783 po_int_reg_or_fail (0, 1);
4784 break;
4785
4786 case AARCH64_OPND_Rm_EXT:
4787 case AARCH64_OPND_Rm_SFT:
4788 po_misc_or_fail (parse_shifter_operand
4789 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4790 ? SHIFTED_ARITH_IMM
4791 : SHIFTED_LOGIC_IMM)));
4792 if (!info->shifter.operator_present)
4793 {
4794 /* Default to LSL if not present. Libopcodes prefers shifter
4795 kind to be explicit. */
4796 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4797 info->shifter.kind = AARCH64_MOD_LSL;
4798 /* For Rm_EXT, libopcodes will carry out further check on whether
4799 or not stack pointer is used in the instruction (Recall that
4800 "the extend operator is not optional unless at least one of
4801 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4802 }
4803 break;
4804
4805 case AARCH64_OPND_Fd:
4806 case AARCH64_OPND_Fn:
4807 case AARCH64_OPND_Fm:
4808 case AARCH64_OPND_Fa:
4809 case AARCH64_OPND_Ft:
4810 case AARCH64_OPND_Ft2:
4811 case AARCH64_OPND_Sd:
4812 case AARCH64_OPND_Sn:
4813 case AARCH64_OPND_Sm:
4814 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4815 if (val == PARSE_FAIL)
4816 {
4817 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4818 goto failure;
4819 }
4820 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4821
4822 info->reg.regno = val;
4823 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4824 break;
4825
4826 case AARCH64_OPND_Vd:
4827 case AARCH64_OPND_Vn:
4828 case AARCH64_OPND_Vm:
4829 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4830 if (val == PARSE_FAIL)
4831 {
4832 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4833 goto failure;
4834 }
4835 if (vectype.defined & NTA_HASINDEX)
4836 goto failure;
4837
4838 info->reg.regno = val;
4839 info->qualifier = vectype_to_qualifier (&vectype);
4840 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4841 goto failure;
4842 break;
4843
4844 case AARCH64_OPND_VdD1:
4845 case AARCH64_OPND_VnD1:
4846 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4847 if (val == PARSE_FAIL)
4848 {
4849 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4850 goto failure;
4851 }
4852 if (vectype.type != NT_d || vectype.index != 1)
4853 {
4854 set_fatal_syntax_error
4855 (_("the top half of a 128-bit FP/SIMD register is expected"));
4856 goto failure;
4857 }
4858 info->reg.regno = val;
4859 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4860 here; it is correct for the purpose of encoding/decoding since
4861 only the register number is explicitly encoded in the related
4862 instructions, although this appears a bit hacky. */
4863 info->qualifier = AARCH64_OPND_QLF_S_D;
4864 break;
4865
4866 case AARCH64_OPND_Ed:
4867 case AARCH64_OPND_En:
4868 case AARCH64_OPND_Em:
4869 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4870 if (val == PARSE_FAIL)
4871 {
4872 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4873 goto failure;
4874 }
4875 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4876 goto failure;
4877
4878 info->reglane.regno = val;
4879 info->reglane.index = vectype.index;
4880 info->qualifier = vectype_to_qualifier (&vectype);
4881 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4882 goto failure;
4883 break;
4884
4885 case AARCH64_OPND_LVn:
4886 case AARCH64_OPND_LVt:
4887 case AARCH64_OPND_LVt_AL:
4888 case AARCH64_OPND_LEt:
4889 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4890 goto failure;
4891 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4892 {
4893 set_fatal_syntax_error (_("invalid register list"));
4894 goto failure;
4895 }
4896 info->reglist.first_regno = (val >> 2) & 0x1f;
4897 info->reglist.num_regs = (val & 0x3) + 1;
4898 if (operands[i] == AARCH64_OPND_LEt)
4899 {
4900 if (!(vectype.defined & NTA_HASINDEX))
4901 goto failure;
4902 info->reglist.has_index = 1;
4903 info->reglist.index = vectype.index;
4904 }
4905 else if (!(vectype.defined & NTA_HASTYPE))
4906 goto failure;
4907 info->qualifier = vectype_to_qualifier (&vectype);
4908 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4909 goto failure;
4910 break;
4911
4912 case AARCH64_OPND_Cn:
4913 case AARCH64_OPND_Cm:
4914 po_reg_or_fail (REG_TYPE_CN);
4915 if (val > 15)
4916 {
4917 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4918 goto failure;
4919 }
4920 inst.base.operands[i].reg.regno = val;
4921 break;
4922
4923 case AARCH64_OPND_SHLL_IMM:
4924 case AARCH64_OPND_IMM_VLSR:
4925 po_imm_or_fail (1, 64);
4926 info->imm.value = val;
4927 break;
4928
4929 case AARCH64_OPND_CCMP_IMM:
4930 case AARCH64_OPND_FBITS:
4931 case AARCH64_OPND_UIMM4:
4932 case AARCH64_OPND_UIMM3_OP1:
4933 case AARCH64_OPND_UIMM3_OP2:
4934 case AARCH64_OPND_IMM_VLSL:
4935 case AARCH64_OPND_IMM:
4936 case AARCH64_OPND_WIDTH:
4937 po_imm_nc_or_fail ();
4938 info->imm.value = val;
4939 break;
4940
4941 case AARCH64_OPND_UIMM7:
4942 po_imm_or_fail (0, 127);
4943 info->imm.value = val;
4944 break;
4945
4946 case AARCH64_OPND_IDX:
4947 case AARCH64_OPND_BIT_NUM:
4948 case AARCH64_OPND_IMMR:
4949 case AARCH64_OPND_IMMS:
4950 po_imm_or_fail (0, 63);
4951 info->imm.value = val;
4952 break;
4953
4954 case AARCH64_OPND_IMM0:
4955 po_imm_nc_or_fail ();
4956 if (val != 0)
4957 {
4958 set_fatal_syntax_error (_("immediate zero expected"));
4959 goto failure;
4960 }
4961 info->imm.value = 0;
4962 break;
4963
4964 case AARCH64_OPND_FPIMM0:
4965 {
4966 int qfloat;
4967 bfd_boolean res1 = FALSE, res2 = FALSE;
4968 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4969 it is probably not worth the effort to support it. */
4970 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
4971 && !(res2 = parse_constant_immediate (&str, &val)))
4972 goto failure;
4973 if ((res1 && qfloat == 0) || (res2 && val == 0))
4974 {
4975 info->imm.value = 0;
4976 info->imm.is_fp = 1;
4977 break;
4978 }
4979 set_fatal_syntax_error (_("immediate zero expected"));
4980 goto failure;
4981 }
4982
4983 case AARCH64_OPND_IMM_MOV:
4984 {
4985 char *saved = str;
4986 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
4987 reg_name_p (str, REG_TYPE_VN))
4988 goto failure;
4989 str = saved;
4990 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4991 GE_OPT_PREFIX, 1));
4992 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4993 later. fix_mov_imm_insn will try to determine a machine
4994 instruction (MOVZ, MOVN or ORR) for it and will issue an error
4995 message if the immediate cannot be moved by a single
4996 instruction. */
4997 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4998 inst.base.operands[i].skip = 1;
4999 }
5000 break;
5001
5002 case AARCH64_OPND_SIMD_IMM:
5003 case AARCH64_OPND_SIMD_IMM_SFT:
5004 if (! parse_big_immediate (&str, &val))
5005 goto failure;
5006 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5007 /* addr_off_p */ 0,
5008 /* need_libopcodes_p */ 1,
5009 /* skip_p */ 1);
5010 /* Parse shift.
5011 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5012 shift, we don't check it here; we leave the checking to
5013 the libopcodes (operand_general_constraint_met_p). By
5014 doing this, we achieve better diagnostics. */
5015 if (skip_past_comma (&str)
5016 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5017 goto failure;
5018 if (!info->shifter.operator_present
5019 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5020 {
5021 /* Default to LSL if not present. Libopcodes prefers shifter
5022 kind to be explicit. */
5023 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5024 info->shifter.kind = AARCH64_MOD_LSL;
5025 }
5026 break;
5027
5028 case AARCH64_OPND_FPIMM:
5029 case AARCH64_OPND_SIMD_FPIMM:
5030 {
5031 int qfloat;
5032 bfd_boolean dp_p
5033 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
5034 == 8);
5035 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
5036 goto failure;
5037 if (qfloat == 0)
5038 {
5039 set_fatal_syntax_error (_("invalid floating-point constant"));
5040 goto failure;
5041 }
5042 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5043 inst.base.operands[i].imm.is_fp = 1;
5044 }
5045 break;
5046
5047 case AARCH64_OPND_LIMM:
5048 po_misc_or_fail (parse_shifter_operand (&str, info,
5049 SHIFTED_LOGIC_IMM));
5050 if (info->shifter.operator_present)
5051 {
5052 set_fatal_syntax_error
5053 (_("shift not allowed for bitmask immediate"));
5054 goto failure;
5055 }
5056 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5057 /* addr_off_p */ 0,
5058 /* need_libopcodes_p */ 1,
5059 /* skip_p */ 1);
5060 break;
5061
5062 case AARCH64_OPND_AIMM:
5063 if (opcode->op == OP_ADD)
5064 /* ADD may have relocation types. */
5065 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5066 SHIFTED_ARITH_IMM));
5067 else
5068 po_misc_or_fail (parse_shifter_operand (&str, info,
5069 SHIFTED_ARITH_IMM));
5070 switch (inst.reloc.type)
5071 {
5072 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5073 info->shifter.amount = 12;
5074 break;
5075 case BFD_RELOC_UNUSED:
5076 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5077 if (info->shifter.kind != AARCH64_MOD_NONE)
5078 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5079 inst.reloc.pc_rel = 0;
5080 break;
5081 default:
5082 break;
5083 }
5084 info->imm.value = 0;
5085 if (!info->shifter.operator_present)
5086 {
5087 /* Default to LSL if not present. Libopcodes prefers shifter
5088 kind to be explicit. */
5089 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5090 info->shifter.kind = AARCH64_MOD_LSL;
5091 }
5092 break;
5093
5094 case AARCH64_OPND_HALF:
5095 {
5096 /* #<imm16> or relocation. */
5097 int internal_fixup_p;
5098 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5099 if (internal_fixup_p)
5100 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5101 skip_whitespace (str);
5102 if (skip_past_comma (&str))
5103 {
5104 /* {, LSL #<shift>} */
5105 if (! aarch64_gas_internal_fixup_p ())
5106 {
5107 set_fatal_syntax_error (_("can't mix relocation modifier "
5108 "with explicit shift"));
5109 goto failure;
5110 }
5111 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5112 }
5113 else
5114 inst.base.operands[i].shifter.amount = 0;
5115 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5116 inst.base.operands[i].imm.value = 0;
5117 if (! process_movw_reloc_info ())
5118 goto failure;
5119 }
5120 break;
5121
5122 case AARCH64_OPND_EXCEPTION:
5123 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
5124 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5125 /* addr_off_p */ 0,
5126 /* need_libopcodes_p */ 0,
5127 /* skip_p */ 1);
5128 break;
5129
5130 case AARCH64_OPND_NZCV:
5131 {
5132 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5133 if (nzcv != NULL)
5134 {
5135 str += 4;
5136 info->imm.value = nzcv->value;
5137 break;
5138 }
5139 po_imm_or_fail (0, 15);
5140 info->imm.value = val;
5141 }
5142 break;
5143
5144 case AARCH64_OPND_COND:
5145 case AARCH64_OPND_COND1:
5146 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5147 str += 2;
5148 if (info->cond == NULL)
5149 {
5150 set_syntax_error (_("invalid condition"));
5151 goto failure;
5152 }
5153 else if (operands[i] == AARCH64_OPND_COND1
5154 && (info->cond->value & 0xe) == 0xe)
5155 {
5156 /* Not allow AL or NV. */
5157 set_default_error ();
5158 goto failure;
5159 }
5160 break;
5161
5162 case AARCH64_OPND_ADDR_ADRP:
5163 po_misc_or_fail (parse_adrp (&str));
5164 /* Clear the value as operand needs to be relocated. */
5165 info->imm.value = 0;
5166 break;
5167
5168 case AARCH64_OPND_ADDR_PCREL14:
5169 case AARCH64_OPND_ADDR_PCREL19:
5170 case AARCH64_OPND_ADDR_PCREL21:
5171 case AARCH64_OPND_ADDR_PCREL26:
5172 po_misc_or_fail (parse_address_reloc (&str, info));
5173 if (!info->addr.pcrel)
5174 {
5175 set_syntax_error (_("invalid pc-relative address"));
5176 goto failure;
5177 }
5178 if (inst.gen_lit_pool
5179 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5180 {
5181 /* Only permit "=value" in the literal load instructions.
5182 The literal will be generated by programmer_friendly_fixup. */
5183 set_syntax_error (_("invalid use of \"=immediate\""));
5184 goto failure;
5185 }
5186 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5187 {
5188 set_syntax_error (_("unrecognized relocation suffix"));
5189 goto failure;
5190 }
5191 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5192 {
5193 info->imm.value = inst.reloc.exp.X_add_number;
5194 inst.reloc.type = BFD_RELOC_UNUSED;
5195 }
5196 else
5197 {
5198 info->imm.value = 0;
5199 if (inst.reloc.type == BFD_RELOC_UNUSED)
5200 switch (opcode->iclass)
5201 {
5202 case compbranch:
5203 case condbranch:
5204 /* e.g. CBZ or B.COND */
5205 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5206 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5207 break;
5208 case testbranch:
5209 /* e.g. TBZ */
5210 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5211 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5212 break;
5213 case branch_imm:
5214 /* e.g. B or BL */
5215 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5216 inst.reloc.type =
5217 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5218 : BFD_RELOC_AARCH64_JUMP26;
5219 break;
5220 case loadlit:
5221 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5222 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5223 break;
5224 case pcreladdr:
5225 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5226 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5227 break;
5228 default:
5229 gas_assert (0);
5230 abort ();
5231 }
5232 inst.reloc.pc_rel = 1;
5233 }
5234 break;
5235
5236 case AARCH64_OPND_ADDR_SIMPLE:
5237 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5238 /* [<Xn|SP>{, #<simm>}] */
5239 po_char_or_fail ('[');
5240 po_reg_or_fail (REG_TYPE_R64_SP);
5241 /* Accept optional ", #0". */
5242 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5243 && skip_past_char (&str, ','))
5244 {
5245 skip_past_char (&str, '#');
5246 if (! skip_past_char (&str, '0'))
5247 {
5248 set_fatal_syntax_error
5249 (_("the optional immediate offset can only be 0"));
5250 goto failure;
5251 }
5252 }
5253 po_char_or_fail (']');
5254 info->addr.base_regno = val;
5255 break;
5256
5257 case AARCH64_OPND_ADDR_REGOFF:
5258 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5259 po_misc_or_fail (parse_address (&str, info, 0));
5260 if (info->addr.pcrel || !info->addr.offset.is_reg
5261 || !info->addr.preind || info->addr.postind
5262 || info->addr.writeback)
5263 {
5264 set_syntax_error (_("invalid addressing mode"));
5265 goto failure;
5266 }
5267 if (!info->shifter.operator_present)
5268 {
5269 /* Default to LSL if not present. Libopcodes prefers shifter
5270 kind to be explicit. */
5271 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5272 info->shifter.kind = AARCH64_MOD_LSL;
5273 }
5274 /* Qualifier to be deduced by libopcodes. */
5275 break;
5276
5277 case AARCH64_OPND_ADDR_SIMM7:
5278 po_misc_or_fail (parse_address (&str, info, 0));
5279 if (info->addr.pcrel || info->addr.offset.is_reg
5280 || (!info->addr.preind && !info->addr.postind))
5281 {
5282 set_syntax_error (_("invalid addressing mode"));
5283 goto failure;
5284 }
5285 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5286 /* addr_off_p */ 1,
5287 /* need_libopcodes_p */ 1,
5288 /* skip_p */ 0);
5289 break;
5290
5291 case AARCH64_OPND_ADDR_SIMM9:
5292 case AARCH64_OPND_ADDR_SIMM9_2:
5293 po_misc_or_fail (parse_address_reloc (&str, info));
5294 if (info->addr.pcrel || info->addr.offset.is_reg
5295 || (!info->addr.preind && !info->addr.postind)
5296 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5297 && info->addr.writeback))
5298 {
5299 set_syntax_error (_("invalid addressing mode"));
5300 goto failure;
5301 }
5302 if (inst.reloc.type != BFD_RELOC_UNUSED)
5303 {
5304 set_syntax_error (_("relocation not allowed"));
5305 goto failure;
5306 }
5307 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5308 /* addr_off_p */ 1,
5309 /* need_libopcodes_p */ 1,
5310 /* skip_p */ 0);
5311 break;
5312
5313 case AARCH64_OPND_ADDR_UIMM12:
5314 po_misc_or_fail (parse_address_reloc (&str, info));
5315 if (info->addr.pcrel || info->addr.offset.is_reg
5316 || !info->addr.preind || info->addr.writeback)
5317 {
5318 set_syntax_error (_("invalid addressing mode"));
5319 goto failure;
5320 }
5321 if (inst.reloc.type == BFD_RELOC_UNUSED)
5322 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5323 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5324 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5325 /* Leave qualifier to be determined by libopcodes. */
5326 break;
5327
5328 case AARCH64_OPND_SIMD_ADDR_POST:
5329 /* [<Xn|SP>], <Xm|#<amount>> */
5330 po_misc_or_fail (parse_address (&str, info, 1));
5331 if (!info->addr.postind || !info->addr.writeback)
5332 {
5333 set_syntax_error (_("invalid addressing mode"));
5334 goto failure;
5335 }
5336 if (!info->addr.offset.is_reg)
5337 {
5338 if (inst.reloc.exp.X_op == O_constant)
5339 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5340 else
5341 {
5342 set_fatal_syntax_error
5343 (_("writeback value should be an immediate constant"));
5344 goto failure;
5345 }
5346 }
5347 /* No qualifier. */
5348 break;
5349
5350 case AARCH64_OPND_SYSREG:
5351 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
5352 == PARSE_FAIL)
5353 {
5354 set_syntax_error (_("unknown or missing system register name"));
5355 goto failure;
5356 }
5357 inst.base.operands[i].sysreg = val;
5358 break;
5359
5360 case AARCH64_OPND_PSTATEFIELD:
5361 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
5362 == PARSE_FAIL)
5363 {
5364 set_syntax_error (_("unknown or missing PSTATE field name"));
5365 goto failure;
5366 }
5367 inst.base.operands[i].pstatefield = val;
5368 break;
5369
5370 case AARCH64_OPND_SYSREG_IC:
5371 inst.base.operands[i].sysins_op =
5372 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5373 goto sys_reg_ins;
5374 case AARCH64_OPND_SYSREG_DC:
5375 inst.base.operands[i].sysins_op =
5376 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5377 goto sys_reg_ins;
5378 case AARCH64_OPND_SYSREG_AT:
5379 inst.base.operands[i].sysins_op =
5380 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5381 goto sys_reg_ins;
5382 case AARCH64_OPND_SYSREG_TLBI:
5383 inst.base.operands[i].sysins_op =
5384 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5385 sys_reg_ins:
5386 if (inst.base.operands[i].sysins_op == NULL)
5387 {
5388 set_fatal_syntax_error ( _("unknown or missing operation name"));
5389 goto failure;
5390 }
5391 break;
5392
5393 case AARCH64_OPND_BARRIER:
5394 case AARCH64_OPND_BARRIER_ISB:
5395 val = parse_barrier (&str);
5396 if (val != PARSE_FAIL
5397 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5398 {
5399 /* ISB only accepts options name 'sy'. */
5400 set_syntax_error
5401 (_("the specified option is not accepted in ISB"));
5402 /* Turn off backtrack as this optional operand is present. */
5403 backtrack_pos = 0;
5404 goto failure;
5405 }
5406 /* This is an extension to accept a 0..15 immediate. */
5407 if (val == PARSE_FAIL)
5408 po_imm_or_fail (0, 15);
5409 info->barrier = aarch64_barrier_options + val;
5410 break;
5411
5412 case AARCH64_OPND_PRFOP:
5413 val = parse_pldop (&str);
5414 /* This is an extension to accept a 0..31 immediate. */
5415 if (val == PARSE_FAIL)
5416 po_imm_or_fail (0, 31);
5417 inst.base.operands[i].prfop = aarch64_prfops + val;
5418 break;
5419
5420 default:
5421 as_fatal (_("unhandled operand code %d"), operands[i]);
5422 }
5423
5424 /* If we get here, this operand was successfully parsed. */
5425 inst.base.operands[i].present = 1;
5426 continue;
5427
5428 failure:
5429 /* The parse routine should already have set the error, but in case
5430 not, set a default one here. */
5431 if (! error_p ())
5432 set_default_error ();
5433
5434 if (! backtrack_pos)
5435 goto parse_operands_return;
5436
5437 {
5438 /* We reach here because this operand is marked as optional, and
5439 either no operand was supplied or the operand was supplied but it
5440 was syntactically incorrect. In the latter case we report an
5441 error. In the former case we perform a few more checks before
5442 dropping through to the code to insert the default operand. */
5443
5444 char *tmp = backtrack_pos;
5445 char endchar = END_OF_INSN;
5446
5447 if (i != (aarch64_num_of_operands (opcode) - 1))
5448 endchar = ',';
5449 skip_past_char (&tmp, ',');
5450
5451 if (*tmp != endchar)
5452 /* The user has supplied an operand in the wrong format. */
5453 goto parse_operands_return;
5454
5455 /* Make sure there is not a comma before the optional operand.
5456 For example the fifth operand of 'sys' is optional:
5457
5458 sys #0,c0,c0,#0, <--- wrong
5459 sys #0,c0,c0,#0 <--- correct. */
5460 if (comma_skipped_p && i && endchar == END_OF_INSN)
5461 {
5462 set_fatal_syntax_error
5463 (_("unexpected comma before the omitted optional operand"));
5464 goto parse_operands_return;
5465 }
5466 }
5467
5468 /* Reaching here means we are dealing with an optional operand that is
5469 omitted from the assembly line. */
5470 gas_assert (optional_operand_p (opcode, i));
5471 info->present = 0;
5472 process_omitted_operand (operands[i], opcode, i, info);
5473
5474 /* Try again, skipping the optional operand at backtrack_pos. */
5475 str = backtrack_pos;
5476 backtrack_pos = 0;
5477
5478 /* Clear any error record after the omitted optional operand has been
5479 successfully handled. */
5480 clear_error ();
5481 }
5482
5483 /* Check if we have parsed all the operands. */
5484 if (*str != '\0' && ! error_p ())
5485 {
5486 /* Set I to the index of the last present operand; this is
5487 for the purpose of diagnostics. */
5488 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5489 ;
5490 set_fatal_syntax_error
5491 (_("unexpected characters following instruction"));
5492 }
5493
5494 parse_operands_return:
5495
5496 if (error_p ())
5497 {
5498 DEBUG_TRACE ("parsing FAIL: %s - %s",
5499 operand_mismatch_kind_names[get_error_kind ()],
5500 get_error_message ());
5501 /* Record the operand error properly; this is useful when there
5502 are multiple instruction templates for a mnemonic name, so that
5503 later on, we can select the error that most closely describes
5504 the problem. */
5505 record_operand_error (opcode, i, get_error_kind (),
5506 get_error_message ());
5507 return FALSE;
5508 }
5509 else
5510 {
5511 DEBUG_TRACE ("parsing SUCCESS");
5512 return TRUE;
5513 }
5514 }
5515
5516 /* It does some fix-up to provide some programmer friendly feature while
5517 keeping the libopcodes happy, i.e. libopcodes only accepts
5518 the preferred architectural syntax.
5519 Return FALSE if there is any failure; otherwise return TRUE. */
5520
5521 static bfd_boolean
5522 programmer_friendly_fixup (aarch64_instruction *instr)
5523 {
5524 aarch64_inst *base = &instr->base;
5525 const aarch64_opcode *opcode = base->opcode;
5526 enum aarch64_op op = opcode->op;
5527 aarch64_opnd_info *operands = base->operands;
5528
5529 DEBUG_TRACE ("enter");
5530
5531 switch (opcode->iclass)
5532 {
5533 case testbranch:
5534 /* TBNZ Xn|Wn, #uimm6, label
5535 Test and Branch Not Zero: conditionally jumps to label if bit number
5536 uimm6 in register Xn is not zero. The bit number implies the width of
5537 the register, which may be written and should be disassembled as Wn if
5538 uimm is less than 32. */
5539 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5540 {
5541 if (operands[1].imm.value >= 32)
5542 {
5543 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5544 0, 31);
5545 return FALSE;
5546 }
5547 operands[0].qualifier = AARCH64_OPND_QLF_X;
5548 }
5549 break;
5550 case loadlit:
5551 /* LDR Wt, label | =value
5552 As a convenience assemblers will typically permit the notation
5553 "=value" in conjunction with the pc-relative literal load instructions
5554 to automatically place an immediate value or symbolic address in a
5555 nearby literal pool and generate a hidden label which references it.
5556 ISREG has been set to 0 in the case of =value. */
5557 if (instr->gen_lit_pool
5558 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5559 {
5560 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5561 if (op == OP_LDRSW_LIT)
5562 size = 4;
5563 if (instr->reloc.exp.X_op != O_constant
5564 && instr->reloc.exp.X_op != O_big
5565 && instr->reloc.exp.X_op != O_symbol)
5566 {
5567 record_operand_error (opcode, 1,
5568 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5569 _("constant expression expected"));
5570 return FALSE;
5571 }
5572 if (! add_to_lit_pool (&instr->reloc.exp, size))
5573 {
5574 record_operand_error (opcode, 1,
5575 AARCH64_OPDE_OTHER_ERROR,
5576 _("literal pool insertion failed"));
5577 return FALSE;
5578 }
5579 }
5580 break;
5581 case log_shift:
5582 case bitfield:
5583 /* UXT[BHW] Wd, Wn
5584 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5585 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5586 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5587 A programmer-friendly assembler should accept a destination Xd in
5588 place of Wd, however that is not the preferred form for disassembly.
5589 */
5590 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5591 && operands[1].qualifier == AARCH64_OPND_QLF_W
5592 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5593 operands[0].qualifier = AARCH64_OPND_QLF_W;
5594 break;
5595
5596 case addsub_ext:
5597 {
5598 /* In the 64-bit form, the final register operand is written as Wm
5599 for all but the (possibly omitted) UXTX/LSL and SXTX
5600 operators.
5601 As a programmer-friendly assembler, we accept e.g.
5602 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5603 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5604 int idx = aarch64_operand_index (opcode->operands,
5605 AARCH64_OPND_Rm_EXT);
5606 gas_assert (idx == 1 || idx == 2);
5607 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5608 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5609 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5610 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5611 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5612 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5613 }
5614 break;
5615
5616 default:
5617 break;
5618 }
5619
5620 DEBUG_TRACE ("exit with SUCCESS");
5621 return TRUE;
5622 }
5623
5624 /* Check for loads and stores that will cause unpredictable behavior. */
5625
5626 static void
5627 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
5628 {
5629 aarch64_inst *base = &instr->base;
5630 const aarch64_opcode *opcode = base->opcode;
5631 const aarch64_opnd_info *opnds = base->operands;
5632 switch (opcode->iclass)
5633 {
5634 case ldst_pos:
5635 case ldst_imm9:
5636 case ldst_unscaled:
5637 case ldst_unpriv:
5638 /* Loading/storing the base register is unpredictable if writeback. */
5639 if ((aarch64_get_operand_class (opnds[0].type)
5640 == AARCH64_OPND_CLASS_INT_REG)
5641 && opnds[0].reg.regno == opnds[1].addr.base_regno
5642 && opnds[1].addr.base_regno != REG_SP
5643 && opnds[1].addr.writeback)
5644 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5645 break;
5646 case ldstpair_off:
5647 case ldstnapair_offs:
5648 case ldstpair_indexed:
5649 /* Loading/storing the base register is unpredictable if writeback. */
5650 if ((aarch64_get_operand_class (opnds[0].type)
5651 == AARCH64_OPND_CLASS_INT_REG)
5652 && (opnds[0].reg.regno == opnds[2].addr.base_regno
5653 || opnds[1].reg.regno == opnds[2].addr.base_regno)
5654 && opnds[2].addr.base_regno != REG_SP
5655 && opnds[2].addr.writeback)
5656 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
5657 /* Load operations must load different registers. */
5658 if ((opcode->opcode & (1 << 22))
5659 && opnds[0].reg.regno == opnds[1].reg.regno)
5660 as_warn (_("unpredictable load of register pair -- `%s'"), str);
5661 break;
5662 default:
5663 break;
5664 }
5665 }
5666
5667 /* A wrapper function to interface with libopcodes on encoding and
5668 record the error message if there is any.
5669
5670 Return TRUE on success; otherwise return FALSE. */
5671
5672 static bfd_boolean
5673 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5674 aarch64_insn *code)
5675 {
5676 aarch64_operand_error error_info;
5677 error_info.kind = AARCH64_OPDE_NIL;
5678 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5679 return TRUE;
5680 else
5681 {
5682 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5683 record_operand_error_info (opcode, &error_info);
5684 return FALSE;
5685 }
5686 }
5687
5688 #ifdef DEBUG_AARCH64
5689 static inline void
5690 dump_opcode_operands (const aarch64_opcode *opcode)
5691 {
5692 int i = 0;
5693 while (opcode->operands[i] != AARCH64_OPND_NIL)
5694 {
5695 aarch64_verbose ("\t\t opnd%d: %s", i,
5696 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5697 ? aarch64_get_operand_name (opcode->operands[i])
5698 : aarch64_get_operand_desc (opcode->operands[i]));
5699 ++i;
5700 }
5701 }
5702 #endif /* DEBUG_AARCH64 */
5703
5704 /* This is the guts of the machine-dependent assembler. STR points to a
5705 machine dependent instruction. This function is supposed to emit
5706 the frags/bytes it assembles to. */
5707
5708 void
5709 md_assemble (char *str)
5710 {
5711 char *p = str;
5712 templates *template;
5713 aarch64_opcode *opcode;
5714 aarch64_inst *inst_base;
5715 unsigned saved_cond;
5716
5717 /* Align the previous label if needed. */
5718 if (last_label_seen != NULL)
5719 {
5720 symbol_set_frag (last_label_seen, frag_now);
5721 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5722 S_SET_SEGMENT (last_label_seen, now_seg);
5723 }
5724
5725 inst.reloc.type = BFD_RELOC_UNUSED;
5726
5727 DEBUG_TRACE ("\n\n");
5728 DEBUG_TRACE ("==============================");
5729 DEBUG_TRACE ("Enter md_assemble with %s", str);
5730
5731 template = opcode_lookup (&p);
5732 if (!template)
5733 {
5734 /* It wasn't an instruction, but it might be a register alias of
5735 the form alias .req reg directive. */
5736 if (!create_register_alias (str, p))
5737 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5738 str);
5739 return;
5740 }
5741
5742 skip_whitespace (p);
5743 if (*p == ',')
5744 {
5745 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5746 get_mnemonic_name (str), str);
5747 return;
5748 }
5749
5750 init_operand_error_report ();
5751
5752 /* Sections are assumed to start aligned. In executable section, there is no
5753 MAP_DATA symbol pending. So we only align the address during
5754 MAP_DATA --> MAP_INSN transition.
5755 For other sections, this is not guaranteed. */
5756 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
5757 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
5758 frag_align_code (2, 0);
5759
5760 saved_cond = inst.cond;
5761 reset_aarch64_instruction (&inst);
5762 inst.cond = saved_cond;
5763
5764 /* Iterate through all opcode entries with the same mnemonic name. */
5765 do
5766 {
5767 opcode = template->opcode;
5768
5769 DEBUG_TRACE ("opcode %s found", opcode->name);
5770 #ifdef DEBUG_AARCH64
5771 if (debug_dump)
5772 dump_opcode_operands (opcode);
5773 #endif /* DEBUG_AARCH64 */
5774
5775 mapping_state (MAP_INSN);
5776
5777 inst_base = &inst.base;
5778 inst_base->opcode = opcode;
5779
5780 /* Truly conditionally executed instructions, e.g. b.cond. */
5781 if (opcode->flags & F_COND)
5782 {
5783 gas_assert (inst.cond != COND_ALWAYS);
5784 inst_base->cond = get_cond_from_value (inst.cond);
5785 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5786 }
5787 else if (inst.cond != COND_ALWAYS)
5788 {
5789 /* It shouldn't arrive here, where the assembly looks like a
5790 conditional instruction but the found opcode is unconditional. */
5791 gas_assert (0);
5792 continue;
5793 }
5794
5795 if (parse_operands (p, opcode)
5796 && programmer_friendly_fixup (&inst)
5797 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5798 {
5799 /* Check that this instruction is supported for this CPU. */
5800 if (!opcode->avariant
5801 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5802 {
5803 as_bad (_("selected processor does not support `%s'"), str);
5804 return;
5805 }
5806
5807 warn_unpredictable_ldst (&inst, str);
5808
5809 if (inst.reloc.type == BFD_RELOC_UNUSED
5810 || !inst.reloc.need_libopcodes_p)
5811 output_inst (NULL);
5812 else
5813 {
5814 /* If there is relocation generated for the instruction,
5815 store the instruction information for the future fix-up. */
5816 struct aarch64_inst *copy;
5817 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5818 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5819 abort ();
5820 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5821 output_inst (copy);
5822 }
5823 return;
5824 }
5825
5826 template = template->next;
5827 if (template != NULL)
5828 {
5829 reset_aarch64_instruction (&inst);
5830 inst.cond = saved_cond;
5831 }
5832 }
5833 while (template != NULL);
5834
5835 /* Issue the error messages if any. */
5836 output_operand_error_report (str);
5837 }
5838
5839 /* Various frobbings of labels and their addresses. */
5840
5841 void
5842 aarch64_start_line_hook (void)
5843 {
5844 last_label_seen = NULL;
5845 }
5846
5847 void
5848 aarch64_frob_label (symbolS * sym)
5849 {
5850 last_label_seen = sym;
5851
5852 dwarf2_emit_label (sym);
5853 }
5854
5855 int
5856 aarch64_data_in_code (void)
5857 {
5858 if (!strncmp (input_line_pointer + 1, "data:", 5))
5859 {
5860 *input_line_pointer = '/';
5861 input_line_pointer += 5;
5862 *input_line_pointer = 0;
5863 return 1;
5864 }
5865
5866 return 0;
5867 }
5868
5869 char *
5870 aarch64_canonicalize_symbol_name (char *name)
5871 {
5872 int len;
5873
5874 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5875 *(name + len - 5) = 0;
5876
5877 return name;
5878 }
5879 \f
5880 /* Table of all register names defined by default. The user can
5881 define additional names with .req. Note that all register names
5882 should appear in both upper and lowercase variants. Some registers
5883 also have mixed-case names. */
5884
5885 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5886 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5887 #define REGSET31(p,t) \
5888 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5889 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5890 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5891 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5892 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5893 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5894 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5895 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5896 #define REGSET(p,t) \
5897 REGSET31(p,t), REGNUM(p,31,t)
5898
5899 /* These go into aarch64_reg_hsh hash-table. */
5900 static const reg_entry reg_names[] = {
5901 /* Integer registers. */
5902 REGSET31 (x, R_64), REGSET31 (X, R_64),
5903 REGSET31 (w, R_32), REGSET31 (W, R_32),
5904
5905 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5906 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5907
5908 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5909 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5910
5911 /* Coprocessor register numbers. */
5912 REGSET (c, CN), REGSET (C, CN),
5913
5914 /* Floating-point single precision registers. */
5915 REGSET (s, FP_S), REGSET (S, FP_S),
5916
5917 /* Floating-point double precision registers. */
5918 REGSET (d, FP_D), REGSET (D, FP_D),
5919
5920 /* Floating-point half precision registers. */
5921 REGSET (h, FP_H), REGSET (H, FP_H),
5922
5923 /* Floating-point byte precision registers. */
5924 REGSET (b, FP_B), REGSET (B, FP_B),
5925
5926 /* Floating-point quad precision registers. */
5927 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5928
5929 /* FP/SIMD registers. */
5930 REGSET (v, VN), REGSET (V, VN),
5931 };
5932
5933 #undef REGDEF
5934 #undef REGNUM
5935 #undef REGSET
5936
5937 #define N 1
5938 #define n 0
5939 #define Z 1
5940 #define z 0
5941 #define C 1
5942 #define c 0
5943 #define V 1
5944 #define v 0
5945 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5946 static const asm_nzcv nzcv_names[] = {
5947 {"nzcv", B (n, z, c, v)},
5948 {"nzcV", B (n, z, c, V)},
5949 {"nzCv", B (n, z, C, v)},
5950 {"nzCV", B (n, z, C, V)},
5951 {"nZcv", B (n, Z, c, v)},
5952 {"nZcV", B (n, Z, c, V)},
5953 {"nZCv", B (n, Z, C, v)},
5954 {"nZCV", B (n, Z, C, V)},
5955 {"Nzcv", B (N, z, c, v)},
5956 {"NzcV", B (N, z, c, V)},
5957 {"NzCv", B (N, z, C, v)},
5958 {"NzCV", B (N, z, C, V)},
5959 {"NZcv", B (N, Z, c, v)},
5960 {"NZcV", B (N, Z, c, V)},
5961 {"NZCv", B (N, Z, C, v)},
5962 {"NZCV", B (N, Z, C, V)}
5963 };
5964
5965 #undef N
5966 #undef n
5967 #undef Z
5968 #undef z
5969 #undef C
5970 #undef c
5971 #undef V
5972 #undef v
5973 #undef B
5974 \f
5975 /* MD interface: bits in the object file. */
5976
5977 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5978 for use in the a.out file, and stores them in the array pointed to by buf.
5979 This knows about the endian-ness of the target machine and does
5980 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5981 2 (short) and 4 (long) Floating numbers are put out as a series of
5982 LITTLENUMS (shorts, here at least). */
5983
5984 void
5985 md_number_to_chars (char *buf, valueT val, int n)
5986 {
5987 if (target_big_endian)
5988 number_to_chars_bigendian (buf, val, n);
5989 else
5990 number_to_chars_littleendian (buf, val, n);
5991 }
5992
5993 /* MD interface: Sections. */
5994
5995 /* Estimate the size of a frag before relaxing. Assume everything fits in
5996 4 bytes. */
5997
5998 int
5999 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6000 {
6001 fragp->fr_var = 4;
6002 return 4;
6003 }
6004
6005 /* Round up a section size to the appropriate boundary. */
6006
6007 valueT
6008 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6009 {
6010 return size;
6011 }
6012
6013 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6014 of an rs_align_code fragment.
6015
6016 Here we fill the frag with the appropriate info for padding the
6017 output stream. The resulting frag will consist of a fixed (fr_fix)
6018 and of a repeating (fr_var) part.
6019
6020 The fixed content is always emitted before the repeating content and
6021 these two parts are used as follows in constructing the output:
6022 - the fixed part will be used to align to a valid instruction word
6023 boundary, in case that we start at a misaligned address; as no
6024 executable instruction can live at the misaligned location, we
6025 simply fill with zeros;
6026 - the variable part will be used to cover the remaining padding and
6027 we fill using the AArch64 NOP instruction.
6028
6029 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6030 enough storage space for up to 3 bytes for padding the back to a valid
6031 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6032
6033 void
6034 aarch64_handle_align (fragS * fragP)
6035 {
6036 /* NOP = d503201f */
6037 /* AArch64 instructions are always little-endian. */
6038 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6039
6040 int bytes, fix, noop_size;
6041 char *p;
6042
6043 if (fragP->fr_type != rs_align_code)
6044 return;
6045
6046 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6047 p = fragP->fr_literal + fragP->fr_fix;
6048
6049 #ifdef OBJ_ELF
6050 gas_assert (fragP->tc_frag_data.recorded);
6051 #endif
6052
6053 noop_size = sizeof (aarch64_noop);
6054
6055 fix = bytes & (noop_size - 1);
6056 if (fix)
6057 {
6058 #ifdef OBJ_ELF
6059 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6060 #endif
6061 memset (p, 0, fix);
6062 p += fix;
6063 fragP->fr_fix += fix;
6064 }
6065
6066 if (noop_size)
6067 memcpy (p, aarch64_noop, noop_size);
6068 fragP->fr_var = noop_size;
6069 }
6070
6071 /* Perform target specific initialisation of a frag.
6072 Note - despite the name this initialisation is not done when the frag
6073 is created, but only when its type is assigned. A frag can be created
6074 and used a long time before its type is set, so beware of assuming that
6075 this initialisationis performed first. */
6076
6077 #ifndef OBJ_ELF
6078 void
6079 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6080 int max_chars ATTRIBUTE_UNUSED)
6081 {
6082 }
6083
6084 #else /* OBJ_ELF is defined. */
6085 void
6086 aarch64_init_frag (fragS * fragP, int max_chars)
6087 {
6088 /* Record a mapping symbol for alignment frags. We will delete this
6089 later if the alignment ends up empty. */
6090 if (!fragP->tc_frag_data.recorded)
6091 fragP->tc_frag_data.recorded = 1;
6092
6093 switch (fragP->fr_type)
6094 {
6095 case rs_align:
6096 case rs_align_test:
6097 case rs_fill:
6098 mapping_state_2 (MAP_DATA, max_chars);
6099 break;
6100 case rs_align_code:
6101 mapping_state_2 (MAP_INSN, max_chars);
6102 break;
6103 default:
6104 break;
6105 }
6106 }
6107 \f
6108 /* Initialize the DWARF-2 unwind information for this procedure. */
6109
6110 void
6111 tc_aarch64_frame_initial_instructions (void)
6112 {
6113 cfi_add_CFA_def_cfa (REG_SP, 0);
6114 }
6115 #endif /* OBJ_ELF */
6116
6117 /* Convert REGNAME to a DWARF-2 register number. */
6118
6119 int
6120 tc_aarch64_regname_to_dw2regnum (char *regname)
6121 {
6122 const reg_entry *reg = parse_reg (&regname);
6123 if (reg == NULL)
6124 return -1;
6125
6126 switch (reg->type)
6127 {
6128 case REG_TYPE_SP_32:
6129 case REG_TYPE_SP_64:
6130 case REG_TYPE_R_32:
6131 case REG_TYPE_R_64:
6132 return reg->number;
6133
6134 case REG_TYPE_FP_B:
6135 case REG_TYPE_FP_H:
6136 case REG_TYPE_FP_S:
6137 case REG_TYPE_FP_D:
6138 case REG_TYPE_FP_Q:
6139 return reg->number + 64;
6140
6141 default:
6142 break;
6143 }
6144 return -1;
6145 }
6146
6147 /* Implement DWARF2_ADDR_SIZE. */
6148
6149 int
6150 aarch64_dwarf2_addr_size (void)
6151 {
6152 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6153 if (ilp32_p)
6154 return 4;
6155 #endif
6156 return bfd_arch_bits_per_address (stdoutput) / 8;
6157 }
6158
6159 /* MD interface: Symbol and relocation handling. */
6160
6161 /* Return the address within the segment that a PC-relative fixup is
6162 relative to. For AArch64 PC-relative fixups applied to instructions
6163 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
6164
6165 long
6166 md_pcrel_from_section (fixS * fixP, segT seg)
6167 {
6168 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6169
6170 /* If this is pc-relative and we are going to emit a relocation
6171 then we just want to put out any pipeline compensation that the linker
6172 will need. Otherwise we want to use the calculated base. */
6173 if (fixP->fx_pcrel
6174 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6175 || aarch64_force_relocation (fixP)))
6176 base = 0;
6177
6178 /* AArch64 should be consistent for all pc-relative relocations. */
6179 return base + AARCH64_PCREL_OFFSET;
6180 }
6181
6182 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6183 Otherwise we have no need to default values of symbols. */
6184
6185 symbolS *
6186 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6187 {
6188 #ifdef OBJ_ELF
6189 if (name[0] == '_' && name[1] == 'G'
6190 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6191 {
6192 if (!GOT_symbol)
6193 {
6194 if (symbol_find (name))
6195 as_bad (_("GOT already in the symbol table"));
6196
6197 GOT_symbol = symbol_new (name, undefined_section,
6198 (valueT) 0, &zero_address_frag);
6199 }
6200
6201 return GOT_symbol;
6202 }
6203 #endif
6204
6205 return 0;
6206 }
6207
6208 /* Return non-zero if the indicated VALUE has overflowed the maximum
6209 range expressible by a unsigned number with the indicated number of
6210 BITS. */
6211
6212 static bfd_boolean
6213 unsigned_overflow (valueT value, unsigned bits)
6214 {
6215 valueT lim;
6216 if (bits >= sizeof (valueT) * 8)
6217 return FALSE;
6218 lim = (valueT) 1 << bits;
6219 return (value >= lim);
6220 }
6221
6222
6223 /* Return non-zero if the indicated VALUE has overflowed the maximum
6224 range expressible by an signed number with the indicated number of
6225 BITS. */
6226
6227 static bfd_boolean
6228 signed_overflow (offsetT value, unsigned bits)
6229 {
6230 offsetT lim;
6231 if (bits >= sizeof (offsetT) * 8)
6232 return FALSE;
6233 lim = (offsetT) 1 << (bits - 1);
6234 return (value < -lim || value >= lim);
6235 }
6236
6237 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6238 unsigned immediate offset load/store instruction, try to encode it as
6239 an unscaled, 9-bit, signed immediate offset load/store instruction.
6240 Return TRUE if it is successful; otherwise return FALSE.
6241
6242 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6243 in response to the standard LDR/STR mnemonics when the immediate offset is
6244 unambiguous, i.e. when it is negative or unaligned. */
6245
6246 static bfd_boolean
6247 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6248 {
6249 int idx;
6250 enum aarch64_op new_op;
6251 const aarch64_opcode *new_opcode;
6252
6253 gas_assert (instr->opcode->iclass == ldst_pos);
6254
6255 switch (instr->opcode->op)
6256 {
6257 case OP_LDRB_POS:new_op = OP_LDURB; break;
6258 case OP_STRB_POS: new_op = OP_STURB; break;
6259 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6260 case OP_LDRH_POS: new_op = OP_LDURH; break;
6261 case OP_STRH_POS: new_op = OP_STURH; break;
6262 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6263 case OP_LDR_POS: new_op = OP_LDUR; break;
6264 case OP_STR_POS: new_op = OP_STUR; break;
6265 case OP_LDRF_POS: new_op = OP_LDURV; break;
6266 case OP_STRF_POS: new_op = OP_STURV; break;
6267 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6268 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6269 default: new_op = OP_NIL; break;
6270 }
6271
6272 if (new_op == OP_NIL)
6273 return FALSE;
6274
6275 new_opcode = aarch64_get_opcode (new_op);
6276 gas_assert (new_opcode != NULL);
6277
6278 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6279 instr->opcode->op, new_opcode->op);
6280
6281 aarch64_replace_opcode (instr, new_opcode);
6282
6283 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6284 qualifier matching may fail because the out-of-date qualifier will
6285 prevent the operand being updated with a new and correct qualifier. */
6286 idx = aarch64_operand_index (instr->opcode->operands,
6287 AARCH64_OPND_ADDR_SIMM9);
6288 gas_assert (idx == 1);
6289 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6290
6291 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6292
6293 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6294 return FALSE;
6295
6296 return TRUE;
6297 }
6298
6299 /* Called by fix_insn to fix a MOV immediate alias instruction.
6300
6301 Operand for a generic move immediate instruction, which is an alias
6302 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6303 a 32-bit/64-bit immediate value into general register. An assembler error
6304 shall result if the immediate cannot be created by a single one of these
6305 instructions. If there is a choice, then to ensure reversability an
6306 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6307
6308 static void
6309 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6310 {
6311 const aarch64_opcode *opcode;
6312
6313 /* Need to check if the destination is SP/ZR. The check has to be done
6314 before any aarch64_replace_opcode. */
6315 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6316 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6317
6318 instr->operands[1].imm.value = value;
6319 instr->operands[1].skip = 0;
6320
6321 if (try_mov_wide_p)
6322 {
6323 /* Try the MOVZ alias. */
6324 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6325 aarch64_replace_opcode (instr, opcode);
6326 if (aarch64_opcode_encode (instr->opcode, instr,
6327 &instr->value, NULL, NULL))
6328 {
6329 put_aarch64_insn (buf, instr->value);
6330 return;
6331 }
6332 /* Try the MOVK alias. */
6333 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6334 aarch64_replace_opcode (instr, opcode);
6335 if (aarch64_opcode_encode (instr->opcode, instr,
6336 &instr->value, NULL, NULL))
6337 {
6338 put_aarch64_insn (buf, instr->value);
6339 return;
6340 }
6341 }
6342
6343 if (try_mov_bitmask_p)
6344 {
6345 /* Try the ORR alias. */
6346 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6347 aarch64_replace_opcode (instr, opcode);
6348 if (aarch64_opcode_encode (instr->opcode, instr,
6349 &instr->value, NULL, NULL))
6350 {
6351 put_aarch64_insn (buf, instr->value);
6352 return;
6353 }
6354 }
6355
6356 as_bad_where (fixP->fx_file, fixP->fx_line,
6357 _("immediate cannot be moved by a single instruction"));
6358 }
6359
6360 /* An instruction operand which is immediate related may have symbol used
6361 in the assembly, e.g.
6362
6363 mov w0, u32
6364 .set u32, 0x00ffff00
6365
6366 At the time when the assembly instruction is parsed, a referenced symbol,
6367 like 'u32' in the above example may not have been seen; a fixS is created
6368 in such a case and is handled here after symbols have been resolved.
6369 Instruction is fixed up with VALUE using the information in *FIXP plus
6370 extra information in FLAGS.
6371
6372 This function is called by md_apply_fix to fix up instructions that need
6373 a fix-up described above but does not involve any linker-time relocation. */
6374
6375 static void
6376 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6377 {
6378 int idx;
6379 uint32_t insn;
6380 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6381 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6382 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6383
6384 if (new_inst)
6385 {
6386 /* Now the instruction is about to be fixed-up, so the operand that
6387 was previously marked as 'ignored' needs to be unmarked in order
6388 to get the encoding done properly. */
6389 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6390 new_inst->operands[idx].skip = 0;
6391 }
6392
6393 gas_assert (opnd != AARCH64_OPND_NIL);
6394
6395 switch (opnd)
6396 {
6397 case AARCH64_OPND_EXCEPTION:
6398 if (unsigned_overflow (value, 16))
6399 as_bad_where (fixP->fx_file, fixP->fx_line,
6400 _("immediate out of range"));
6401 insn = get_aarch64_insn (buf);
6402 insn |= encode_svc_imm (value);
6403 put_aarch64_insn (buf, insn);
6404 break;
6405
6406 case AARCH64_OPND_AIMM:
6407 /* ADD or SUB with immediate.
6408 NOTE this assumes we come here with a add/sub shifted reg encoding
6409 3 322|2222|2 2 2 21111 111111
6410 1 098|7654|3 2 1 09876 543210 98765 43210
6411 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6412 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6413 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6414 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6415 ->
6416 3 322|2222|2 2 221111111111
6417 1 098|7654|3 2 109876543210 98765 43210
6418 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6419 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6420 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6421 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6422 Fields sf Rn Rd are already set. */
6423 insn = get_aarch64_insn (buf);
6424 if (value < 0)
6425 {
6426 /* Add <-> sub. */
6427 insn = reencode_addsub_switch_add_sub (insn);
6428 value = -value;
6429 }
6430
6431 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6432 && unsigned_overflow (value, 12))
6433 {
6434 /* Try to shift the value by 12 to make it fit. */
6435 if (((value >> 12) << 12) == value
6436 && ! unsigned_overflow (value, 12 + 12))
6437 {
6438 value >>= 12;
6439 insn |= encode_addsub_imm_shift_amount (1);
6440 }
6441 }
6442
6443 if (unsigned_overflow (value, 12))
6444 as_bad_where (fixP->fx_file, fixP->fx_line,
6445 _("immediate out of range"));
6446
6447 insn |= encode_addsub_imm (value);
6448
6449 put_aarch64_insn (buf, insn);
6450 break;
6451
6452 case AARCH64_OPND_SIMD_IMM:
6453 case AARCH64_OPND_SIMD_IMM_SFT:
6454 case AARCH64_OPND_LIMM:
6455 /* Bit mask immediate. */
6456 gas_assert (new_inst != NULL);
6457 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6458 new_inst->operands[idx].imm.value = value;
6459 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6460 &new_inst->value, NULL, NULL))
6461 put_aarch64_insn (buf, new_inst->value);
6462 else
6463 as_bad_where (fixP->fx_file, fixP->fx_line,
6464 _("invalid immediate"));
6465 break;
6466
6467 case AARCH64_OPND_HALF:
6468 /* 16-bit unsigned immediate. */
6469 if (unsigned_overflow (value, 16))
6470 as_bad_where (fixP->fx_file, fixP->fx_line,
6471 _("immediate out of range"));
6472 insn = get_aarch64_insn (buf);
6473 insn |= encode_movw_imm (value & 0xffff);
6474 put_aarch64_insn (buf, insn);
6475 break;
6476
6477 case AARCH64_OPND_IMM_MOV:
6478 /* Operand for a generic move immediate instruction, which is
6479 an alias instruction that generates a single MOVZ, MOVN or ORR
6480 instruction to loads a 32-bit/64-bit immediate value into general
6481 register. An assembler error shall result if the immediate cannot be
6482 created by a single one of these instructions. If there is a choice,
6483 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6484 and MOVZ or MOVN to ORR. */
6485 gas_assert (new_inst != NULL);
6486 fix_mov_imm_insn (fixP, buf, new_inst, value);
6487 break;
6488
6489 case AARCH64_OPND_ADDR_SIMM7:
6490 case AARCH64_OPND_ADDR_SIMM9:
6491 case AARCH64_OPND_ADDR_SIMM9_2:
6492 case AARCH64_OPND_ADDR_UIMM12:
6493 /* Immediate offset in an address. */
6494 insn = get_aarch64_insn (buf);
6495
6496 gas_assert (new_inst != NULL && new_inst->value == insn);
6497 gas_assert (new_inst->opcode->operands[1] == opnd
6498 || new_inst->opcode->operands[2] == opnd);
6499
6500 /* Get the index of the address operand. */
6501 if (new_inst->opcode->operands[1] == opnd)
6502 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6503 idx = 1;
6504 else
6505 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6506 idx = 2;
6507
6508 /* Update the resolved offset value. */
6509 new_inst->operands[idx].addr.offset.imm = value;
6510
6511 /* Encode/fix-up. */
6512 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6513 &new_inst->value, NULL, NULL))
6514 {
6515 put_aarch64_insn (buf, new_inst->value);
6516 break;
6517 }
6518 else if (new_inst->opcode->iclass == ldst_pos
6519 && try_to_encode_as_unscaled_ldst (new_inst))
6520 {
6521 put_aarch64_insn (buf, new_inst->value);
6522 break;
6523 }
6524
6525 as_bad_where (fixP->fx_file, fixP->fx_line,
6526 _("immediate offset out of range"));
6527 break;
6528
6529 default:
6530 gas_assert (0);
6531 as_fatal (_("unhandled operand code %d"), opnd);
6532 }
6533 }
6534
6535 /* Apply a fixup (fixP) to segment data, once it has been determined
6536 by our caller that we have all the info we need to fix it up.
6537
6538 Parameter valP is the pointer to the value of the bits. */
6539
6540 void
6541 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6542 {
6543 offsetT value = *valP;
6544 uint32_t insn;
6545 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6546 int scale;
6547 unsigned flags = fixP->fx_addnumber;
6548
6549 DEBUG_TRACE ("\n\n");
6550 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6551 DEBUG_TRACE ("Enter md_apply_fix");
6552
6553 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6554
6555 /* Note whether this will delete the relocation. */
6556
6557 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6558 fixP->fx_done = 1;
6559
6560 /* Process the relocations. */
6561 switch (fixP->fx_r_type)
6562 {
6563 case BFD_RELOC_NONE:
6564 /* This will need to go in the object file. */
6565 fixP->fx_done = 0;
6566 break;
6567
6568 case BFD_RELOC_8:
6569 case BFD_RELOC_8_PCREL:
6570 if (fixP->fx_done || !seg->use_rela_p)
6571 md_number_to_chars (buf, value, 1);
6572 break;
6573
6574 case BFD_RELOC_16:
6575 case BFD_RELOC_16_PCREL:
6576 if (fixP->fx_done || !seg->use_rela_p)
6577 md_number_to_chars (buf, value, 2);
6578 break;
6579
6580 case BFD_RELOC_32:
6581 case BFD_RELOC_32_PCREL:
6582 if (fixP->fx_done || !seg->use_rela_p)
6583 md_number_to_chars (buf, value, 4);
6584 break;
6585
6586 case BFD_RELOC_64:
6587 case BFD_RELOC_64_PCREL:
6588 if (fixP->fx_done || !seg->use_rela_p)
6589 md_number_to_chars (buf, value, 8);
6590 break;
6591
6592 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6593 /* We claim that these fixups have been processed here, even if
6594 in fact we generate an error because we do not have a reloc
6595 for them, so tc_gen_reloc() will reject them. */
6596 fixP->fx_done = 1;
6597 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6598 {
6599 as_bad_where (fixP->fx_file, fixP->fx_line,
6600 _("undefined symbol %s used as an immediate value"),
6601 S_GET_NAME (fixP->fx_addsy));
6602 goto apply_fix_return;
6603 }
6604 fix_insn (fixP, flags, value);
6605 break;
6606
6607 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6608 if (fixP->fx_done || !seg->use_rela_p)
6609 {
6610 if (value & 3)
6611 as_bad_where (fixP->fx_file, fixP->fx_line,
6612 _("pc-relative load offset not word aligned"));
6613 if (signed_overflow (value, 21))
6614 as_bad_where (fixP->fx_file, fixP->fx_line,
6615 _("pc-relative load offset out of range"));
6616 insn = get_aarch64_insn (buf);
6617 insn |= encode_ld_lit_ofs_19 (value >> 2);
6618 put_aarch64_insn (buf, insn);
6619 }
6620 break;
6621
6622 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6623 if (fixP->fx_done || !seg->use_rela_p)
6624 {
6625 if (signed_overflow (value, 21))
6626 as_bad_where (fixP->fx_file, fixP->fx_line,
6627 _("pc-relative address offset out of range"));
6628 insn = get_aarch64_insn (buf);
6629 insn |= encode_adr_imm (value);
6630 put_aarch64_insn (buf, insn);
6631 }
6632 break;
6633
6634 case BFD_RELOC_AARCH64_BRANCH19:
6635 if (fixP->fx_done || !seg->use_rela_p)
6636 {
6637 if (value & 3)
6638 as_bad_where (fixP->fx_file, fixP->fx_line,
6639 _("conditional branch target not word aligned"));
6640 if (signed_overflow (value, 21))
6641 as_bad_where (fixP->fx_file, fixP->fx_line,
6642 _("conditional branch out of range"));
6643 insn = get_aarch64_insn (buf);
6644 insn |= encode_cond_branch_ofs_19 (value >> 2);
6645 put_aarch64_insn (buf, insn);
6646 }
6647 break;
6648
6649 case BFD_RELOC_AARCH64_TSTBR14:
6650 if (fixP->fx_done || !seg->use_rela_p)
6651 {
6652 if (value & 3)
6653 as_bad_where (fixP->fx_file, fixP->fx_line,
6654 _("conditional branch target not word aligned"));
6655 if (signed_overflow (value, 16))
6656 as_bad_where (fixP->fx_file, fixP->fx_line,
6657 _("conditional branch out of range"));
6658 insn = get_aarch64_insn (buf);
6659 insn |= encode_tst_branch_ofs_14 (value >> 2);
6660 put_aarch64_insn (buf, insn);
6661 }
6662 break;
6663
6664 case BFD_RELOC_AARCH64_CALL26:
6665 case BFD_RELOC_AARCH64_JUMP26:
6666 if (fixP->fx_done || !seg->use_rela_p)
6667 {
6668 if (value & 3)
6669 as_bad_where (fixP->fx_file, fixP->fx_line,
6670 _("branch target not word aligned"));
6671 if (signed_overflow (value, 28))
6672 as_bad_where (fixP->fx_file, fixP->fx_line,
6673 _("branch out of range"));
6674 insn = get_aarch64_insn (buf);
6675 insn |= encode_branch_ofs_26 (value >> 2);
6676 put_aarch64_insn (buf, insn);
6677 }
6678 break;
6679
6680 case BFD_RELOC_AARCH64_MOVW_G0:
6681 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6682 case BFD_RELOC_AARCH64_MOVW_G0_S:
6683 scale = 0;
6684 goto movw_common;
6685 case BFD_RELOC_AARCH64_MOVW_G1:
6686 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6687 case BFD_RELOC_AARCH64_MOVW_G1_S:
6688 scale = 16;
6689 goto movw_common;
6690 case BFD_RELOC_AARCH64_MOVW_G2:
6691 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6692 case BFD_RELOC_AARCH64_MOVW_G2_S:
6693 scale = 32;
6694 goto movw_common;
6695 case BFD_RELOC_AARCH64_MOVW_G3:
6696 scale = 48;
6697 movw_common:
6698 if (fixP->fx_done || !seg->use_rela_p)
6699 {
6700 insn = get_aarch64_insn (buf);
6701
6702 if (!fixP->fx_done)
6703 {
6704 /* REL signed addend must fit in 16 bits */
6705 if (signed_overflow (value, 16))
6706 as_bad_where (fixP->fx_file, fixP->fx_line,
6707 _("offset out of range"));
6708 }
6709 else
6710 {
6711 /* Check for overflow and scale. */
6712 switch (fixP->fx_r_type)
6713 {
6714 case BFD_RELOC_AARCH64_MOVW_G0:
6715 case BFD_RELOC_AARCH64_MOVW_G1:
6716 case BFD_RELOC_AARCH64_MOVW_G2:
6717 case BFD_RELOC_AARCH64_MOVW_G3:
6718 if (unsigned_overflow (value, scale + 16))
6719 as_bad_where (fixP->fx_file, fixP->fx_line,
6720 _("unsigned value out of range"));
6721 break;
6722 case BFD_RELOC_AARCH64_MOVW_G0_S:
6723 case BFD_RELOC_AARCH64_MOVW_G1_S:
6724 case BFD_RELOC_AARCH64_MOVW_G2_S:
6725 /* NOTE: We can only come here with movz or movn. */
6726 if (signed_overflow (value, scale + 16))
6727 as_bad_where (fixP->fx_file, fixP->fx_line,
6728 _("signed value out of range"));
6729 if (value < 0)
6730 {
6731 /* Force use of MOVN. */
6732 value = ~value;
6733 insn = reencode_movzn_to_movn (insn);
6734 }
6735 else
6736 {
6737 /* Force use of MOVZ. */
6738 insn = reencode_movzn_to_movz (insn);
6739 }
6740 break;
6741 default:
6742 /* Unchecked relocations. */
6743 break;
6744 }
6745 value >>= scale;
6746 }
6747
6748 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6749 insn |= encode_movw_imm (value & 0xffff);
6750
6751 put_aarch64_insn (buf, insn);
6752 }
6753 break;
6754
6755 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6756 fixP->fx_r_type = (ilp32_p
6757 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
6758 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
6759 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6760 /* Should always be exported to object file, see
6761 aarch64_force_relocation(). */
6762 gas_assert (!fixP->fx_done);
6763 gas_assert (seg->use_rela_p);
6764 break;
6765
6766 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6767 fixP->fx_r_type = (ilp32_p
6768 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
6769 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
6770 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6771 /* Should always be exported to object file, see
6772 aarch64_force_relocation(). */
6773 gas_assert (!fixP->fx_done);
6774 gas_assert (seg->use_rela_p);
6775 break;
6776
6777 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6778 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6779 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6780 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6781 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6782 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6783 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6784 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6785 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6786 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6787 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6788 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6789 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6790 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6791 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6792 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6793 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6794 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6795 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6796 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6797 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6798 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6799 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6800 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6801 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6802 /* Should always be exported to object file, see
6803 aarch64_force_relocation(). */
6804 gas_assert (!fixP->fx_done);
6805 gas_assert (seg->use_rela_p);
6806 break;
6807
6808 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6809 /* Should always be exported to object file, see
6810 aarch64_force_relocation(). */
6811 fixP->fx_r_type = (ilp32_p
6812 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
6813 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
6814 gas_assert (!fixP->fx_done);
6815 gas_assert (seg->use_rela_p);
6816 break;
6817
6818 case BFD_RELOC_AARCH64_ADD_LO12:
6819 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6820 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6821 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6822 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6823 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6824 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6825 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
6826 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6827 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6828 case BFD_RELOC_AARCH64_LDST128_LO12:
6829 case BFD_RELOC_AARCH64_LDST16_LO12:
6830 case BFD_RELOC_AARCH64_LDST32_LO12:
6831 case BFD_RELOC_AARCH64_LDST64_LO12:
6832 case BFD_RELOC_AARCH64_LDST8_LO12:
6833 /* Should always be exported to object file, see
6834 aarch64_force_relocation(). */
6835 gas_assert (!fixP->fx_done);
6836 gas_assert (seg->use_rela_p);
6837 break;
6838
6839 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6840 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6841 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6842 break;
6843
6844 case BFD_RELOC_UNUSED:
6845 /* An error will already have been reported. */
6846 break;
6847
6848 default:
6849 as_bad_where (fixP->fx_file, fixP->fx_line,
6850 _("unexpected %s fixup"),
6851 bfd_get_reloc_code_name (fixP->fx_r_type));
6852 break;
6853 }
6854
6855 apply_fix_return:
6856 /* Free the allocated the struct aarch64_inst.
6857 N.B. currently there are very limited number of fix-up types actually use
6858 this field, so the impact on the performance should be minimal . */
6859 if (fixP->tc_fix_data.inst != NULL)
6860 free (fixP->tc_fix_data.inst);
6861
6862 return;
6863 }
6864
6865 /* Translate internal representation of relocation info to BFD target
6866 format. */
6867
6868 arelent *
6869 tc_gen_reloc (asection * section, fixS * fixp)
6870 {
6871 arelent *reloc;
6872 bfd_reloc_code_real_type code;
6873
6874 reloc = xmalloc (sizeof (arelent));
6875
6876 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6877 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6878 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6879
6880 if (fixp->fx_pcrel)
6881 {
6882 if (section->use_rela_p)
6883 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6884 else
6885 fixp->fx_offset = reloc->address;
6886 }
6887 reloc->addend = fixp->fx_offset;
6888
6889 code = fixp->fx_r_type;
6890 switch (code)
6891 {
6892 case BFD_RELOC_16:
6893 if (fixp->fx_pcrel)
6894 code = BFD_RELOC_16_PCREL;
6895 break;
6896
6897 case BFD_RELOC_32:
6898 if (fixp->fx_pcrel)
6899 code = BFD_RELOC_32_PCREL;
6900 break;
6901
6902 case BFD_RELOC_64:
6903 if (fixp->fx_pcrel)
6904 code = BFD_RELOC_64_PCREL;
6905 break;
6906
6907 default:
6908 break;
6909 }
6910
6911 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6912 if (reloc->howto == NULL)
6913 {
6914 as_bad_where (fixp->fx_file, fixp->fx_line,
6915 _
6916 ("cannot represent %s relocation in this object file format"),
6917 bfd_get_reloc_code_name (code));
6918 return NULL;
6919 }
6920
6921 return reloc;
6922 }
6923
6924 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6925
6926 void
6927 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6928 {
6929 bfd_reloc_code_real_type type;
6930 int pcrel = 0;
6931
6932 /* Pick a reloc.
6933 FIXME: @@ Should look at CPU word size. */
6934 switch (size)
6935 {
6936 case 1:
6937 type = BFD_RELOC_8;
6938 break;
6939 case 2:
6940 type = BFD_RELOC_16;
6941 break;
6942 case 4:
6943 type = BFD_RELOC_32;
6944 break;
6945 case 8:
6946 type = BFD_RELOC_64;
6947 break;
6948 default:
6949 as_bad (_("cannot do %u-byte relocation"), size);
6950 type = BFD_RELOC_UNUSED;
6951 break;
6952 }
6953
6954 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6955 }
6956
6957 int
6958 aarch64_force_relocation (struct fix *fixp)
6959 {
6960 switch (fixp->fx_r_type)
6961 {
6962 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6963 /* Perform these "immediate" internal relocations
6964 even if the symbol is extern or weak. */
6965 return 0;
6966
6967 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6968 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6969 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6970 /* Pseudo relocs that need to be fixed up according to
6971 ilp32_p. */
6972 return 0;
6973
6974 case BFD_RELOC_AARCH64_ADD_LO12:
6975 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6976 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6977 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6978 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6979 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6980 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6981 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
6982 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6983 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6984 case BFD_RELOC_AARCH64_LDST128_LO12:
6985 case BFD_RELOC_AARCH64_LDST16_LO12:
6986 case BFD_RELOC_AARCH64_LDST32_LO12:
6987 case BFD_RELOC_AARCH64_LDST64_LO12:
6988 case BFD_RELOC_AARCH64_LDST8_LO12:
6989 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6990 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6991 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6992 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6993 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6994 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6995 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6996 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6997 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6998 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6999 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7000 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7001 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7002 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7003 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7004 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7005 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7006 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7007 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7008 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7009 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7010 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7011 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7012 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7013 /* Always leave these relocations for the linker. */
7014 return 1;
7015
7016 default:
7017 break;
7018 }
7019
7020 return generic_force_reloc (fixp);
7021 }
7022
7023 #ifdef OBJ_ELF
7024
7025 const char *
7026 elf64_aarch64_target_format (void)
7027 {
7028 if (target_big_endian)
7029 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
7030 else
7031 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
7032 }
7033
7034 void
7035 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
7036 {
7037 elf_frob_symbol (symp, puntp);
7038 }
7039 #endif
7040
7041 /* MD interface: Finalization. */
7042
7043 /* A good place to do this, although this was probably not intended
7044 for this kind of use. We need to dump the literal pool before
7045 references are made to a null symbol pointer. */
7046
7047 void
7048 aarch64_cleanup (void)
7049 {
7050 literal_pool *pool;
7051
7052 for (pool = list_of_pools; pool; pool = pool->next)
7053 {
7054 /* Put it at the end of the relevant section. */
7055 subseg_set (pool->section, pool->sub_section);
7056 s_ltorg (0);
7057 }
7058 }
7059
7060 #ifdef OBJ_ELF
7061 /* Remove any excess mapping symbols generated for alignment frags in
7062 SEC. We may have created a mapping symbol before a zero byte
7063 alignment; remove it if there's a mapping symbol after the
7064 alignment. */
7065 static void
7066 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
7067 void *dummy ATTRIBUTE_UNUSED)
7068 {
7069 segment_info_type *seginfo = seg_info (sec);
7070 fragS *fragp;
7071
7072 if (seginfo == NULL || seginfo->frchainP == NULL)
7073 return;
7074
7075 for (fragp = seginfo->frchainP->frch_root;
7076 fragp != NULL; fragp = fragp->fr_next)
7077 {
7078 symbolS *sym = fragp->tc_frag_data.last_map;
7079 fragS *next = fragp->fr_next;
7080
7081 /* Variable-sized frags have been converted to fixed size by
7082 this point. But if this was variable-sized to start with,
7083 there will be a fixed-size frag after it. So don't handle
7084 next == NULL. */
7085 if (sym == NULL || next == NULL)
7086 continue;
7087
7088 if (S_GET_VALUE (sym) < next->fr_address)
7089 /* Not at the end of this frag. */
7090 continue;
7091 know (S_GET_VALUE (sym) == next->fr_address);
7092
7093 do
7094 {
7095 if (next->tc_frag_data.first_map != NULL)
7096 {
7097 /* Next frag starts with a mapping symbol. Discard this
7098 one. */
7099 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7100 break;
7101 }
7102
7103 if (next->fr_next == NULL)
7104 {
7105 /* This mapping symbol is at the end of the section. Discard
7106 it. */
7107 know (next->fr_fix == 0 && next->fr_var == 0);
7108 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
7109 break;
7110 }
7111
7112 /* As long as we have empty frags without any mapping symbols,
7113 keep looking. */
7114 /* If the next frag is non-empty and does not start with a
7115 mapping symbol, then this mapping symbol is required. */
7116 if (next->fr_address != next->fr_next->fr_address)
7117 break;
7118
7119 next = next->fr_next;
7120 }
7121 while (next != NULL);
7122 }
7123 }
7124 #endif
7125
7126 /* Adjust the symbol table. */
7127
7128 void
7129 aarch64_adjust_symtab (void)
7130 {
7131 #ifdef OBJ_ELF
7132 /* Remove any overlapping mapping symbols generated by alignment frags. */
7133 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
7134 /* Now do generic ELF adjustments. */
7135 elf_adjust_symtab ();
7136 #endif
7137 }
7138
7139 static void
7140 checked_hash_insert (struct hash_control *table, const char *key, void *value)
7141 {
7142 const char *hash_err;
7143
7144 hash_err = hash_insert (table, key, value);
7145 if (hash_err)
7146 printf ("Internal Error: Can't hash %s\n", key);
7147 }
7148
7149 static void
7150 fill_instruction_hash_table (void)
7151 {
7152 aarch64_opcode *opcode = aarch64_opcode_table;
7153
7154 while (opcode->name != NULL)
7155 {
7156 templates *templ, *new_templ;
7157 templ = hash_find (aarch64_ops_hsh, opcode->name);
7158
7159 new_templ = (templates *) xmalloc (sizeof (templates));
7160 new_templ->opcode = opcode;
7161 new_templ->next = NULL;
7162
7163 if (!templ)
7164 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
7165 else
7166 {
7167 new_templ->next = templ->next;
7168 templ->next = new_templ;
7169 }
7170 ++opcode;
7171 }
7172 }
7173
7174 static inline void
7175 convert_to_upper (char *dst, const char *src, size_t num)
7176 {
7177 unsigned int i;
7178 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
7179 *dst = TOUPPER (*src);
7180 *dst = '\0';
7181 }
7182
7183 /* Assume STR point to a lower-case string, allocate, convert and return
7184 the corresponding upper-case string. */
7185 static inline const char*
7186 get_upper_str (const char *str)
7187 {
7188 char *ret;
7189 size_t len = strlen (str);
7190 if ((ret = xmalloc (len + 1)) == NULL)
7191 abort ();
7192 convert_to_upper (ret, str, len);
7193 return ret;
7194 }
7195
7196 /* MD interface: Initialization. */
7197
7198 void
7199 md_begin (void)
7200 {
7201 unsigned mach;
7202 unsigned int i;
7203
7204 if ((aarch64_ops_hsh = hash_new ()) == NULL
7205 || (aarch64_cond_hsh = hash_new ()) == NULL
7206 || (aarch64_shift_hsh = hash_new ()) == NULL
7207 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7208 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7209 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7210 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7211 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7212 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7213 || (aarch64_reg_hsh = hash_new ()) == NULL
7214 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7215 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7216 || (aarch64_pldop_hsh = hash_new ()) == NULL)
7217 as_fatal (_("virtual memory exhausted"));
7218
7219 fill_instruction_hash_table ();
7220
7221 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7222 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7223 (void *) (aarch64_sys_regs + i));
7224
7225 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7226 checked_hash_insert (aarch64_pstatefield_hsh,
7227 aarch64_pstatefields[i].name,
7228 (void *) (aarch64_pstatefields + i));
7229
7230 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
7231 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7232 aarch64_sys_regs_ic[i].template,
7233 (void *) (aarch64_sys_regs_ic + i));
7234
7235 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
7236 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7237 aarch64_sys_regs_dc[i].template,
7238 (void *) (aarch64_sys_regs_dc + i));
7239
7240 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
7241 checked_hash_insert (aarch64_sys_regs_at_hsh,
7242 aarch64_sys_regs_at[i].template,
7243 (void *) (aarch64_sys_regs_at + i));
7244
7245 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
7246 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7247 aarch64_sys_regs_tlbi[i].template,
7248 (void *) (aarch64_sys_regs_tlbi + i));
7249
7250 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7251 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7252 (void *) (reg_names + i));
7253
7254 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7255 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7256 (void *) (nzcv_names + i));
7257
7258 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7259 {
7260 const char *name = aarch64_operand_modifiers[i].name;
7261 checked_hash_insert (aarch64_shift_hsh, name,
7262 (void *) (aarch64_operand_modifiers + i));
7263 /* Also hash the name in the upper case. */
7264 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7265 (void *) (aarch64_operand_modifiers + i));
7266 }
7267
7268 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7269 {
7270 unsigned int j;
7271 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7272 the same condition code. */
7273 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7274 {
7275 const char *name = aarch64_conds[i].names[j];
7276 if (name == NULL)
7277 break;
7278 checked_hash_insert (aarch64_cond_hsh, name,
7279 (void *) (aarch64_conds + i));
7280 /* Also hash the name in the upper case. */
7281 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7282 (void *) (aarch64_conds + i));
7283 }
7284 }
7285
7286 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7287 {
7288 const char *name = aarch64_barrier_options[i].name;
7289 /* Skip xx00 - the unallocated values of option. */
7290 if ((i & 0x3) == 0)
7291 continue;
7292 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7293 (void *) (aarch64_barrier_options + i));
7294 /* Also hash the name in the upper case. */
7295 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7296 (void *) (aarch64_barrier_options + i));
7297 }
7298
7299 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7300 {
7301 const char* name = aarch64_prfops[i].name;
7302 /* Skip the unallocated hint encodings. */
7303 if (name == NULL)
7304 continue;
7305 checked_hash_insert (aarch64_pldop_hsh, name,
7306 (void *) (aarch64_prfops + i));
7307 /* Also hash the name in the upper case. */
7308 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7309 (void *) (aarch64_prfops + i));
7310 }
7311
7312 /* Set the cpu variant based on the command-line options. */
7313 if (!mcpu_cpu_opt)
7314 mcpu_cpu_opt = march_cpu_opt;
7315
7316 if (!mcpu_cpu_opt)
7317 mcpu_cpu_opt = &cpu_default;
7318
7319 cpu_variant = *mcpu_cpu_opt;
7320
7321 /* Record the CPU type. */
7322 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7323
7324 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7325 }
7326
7327 /* Command line processing. */
7328
7329 const char *md_shortopts = "m:";
7330
7331 #ifdef AARCH64_BI_ENDIAN
7332 #define OPTION_EB (OPTION_MD_BASE + 0)
7333 #define OPTION_EL (OPTION_MD_BASE + 1)
7334 #else
7335 #if TARGET_BYTES_BIG_ENDIAN
7336 #define OPTION_EB (OPTION_MD_BASE + 0)
7337 #else
7338 #define OPTION_EL (OPTION_MD_BASE + 1)
7339 #endif
7340 #endif
7341
7342 struct option md_longopts[] = {
7343 #ifdef OPTION_EB
7344 {"EB", no_argument, NULL, OPTION_EB},
7345 #endif
7346 #ifdef OPTION_EL
7347 {"EL", no_argument, NULL, OPTION_EL},
7348 #endif
7349 {NULL, no_argument, NULL, 0}
7350 };
7351
7352 size_t md_longopts_size = sizeof (md_longopts);
7353
7354 struct aarch64_option_table
7355 {
7356 char *option; /* Option name to match. */
7357 char *help; /* Help information. */
7358 int *var; /* Variable to change. */
7359 int value; /* What to change it to. */
7360 char *deprecated; /* If non-null, print this message. */
7361 };
7362
7363 static struct aarch64_option_table aarch64_opts[] = {
7364 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7365 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7366 NULL},
7367 #ifdef DEBUG_AARCH64
7368 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7369 #endif /* DEBUG_AARCH64 */
7370 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7371 NULL},
7372 {"mno-verbose-error", N_("do not output verbose error messages"),
7373 &verbose_error_p, 0, NULL},
7374 {NULL, NULL, NULL, 0, NULL}
7375 };
7376
7377 struct aarch64_cpu_option_table
7378 {
7379 char *name;
7380 const aarch64_feature_set value;
7381 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7382 case. */
7383 const char *canonical_name;
7384 };
7385
7386 /* This list should, at a minimum, contain all the cpu names
7387 recognized by GCC. */
7388 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7389 {"all", AARCH64_ANY, NULL},
7390 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
7391 AARCH64_FEATURE_CRC), "Cortex-A53"},
7392 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
7393 AARCH64_FEATURE_CRC), "Cortex-A57"},
7394 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
7395 AARCH64_FEATURE_CRC), "Cortex-A72"},
7396 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
7397 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7398 "Samsung Exynos M1"},
7399 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
7400 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
7401 "Cavium ThunderX"},
7402 /* The 'xgene-1' name is an older name for 'xgene1', which was used
7403 in earlier releases and is superseded by 'xgene1' in all
7404 tools. */
7405 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7406 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7407 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
7408 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
7409 {"generic", AARCH64_ARCH_V8, NULL},
7410
7411 {NULL, AARCH64_ARCH_NONE, NULL}
7412 };
7413
7414 struct aarch64_arch_option_table
7415 {
7416 char *name;
7417 const aarch64_feature_set value;
7418 };
7419
7420 /* This list should, at a minimum, contain all the architecture names
7421 recognized by GCC. */
7422 static const struct aarch64_arch_option_table aarch64_archs[] = {
7423 {"all", AARCH64_ANY},
7424 {"armv8-a", AARCH64_ARCH_V8},
7425 {"armv8.1-a", AARCH64_ARCH_V8_1},
7426 {NULL, AARCH64_ARCH_NONE}
7427 };
7428
7429 /* ISA extensions. */
7430 struct aarch64_option_cpu_value_table
7431 {
7432 char *name;
7433 const aarch64_feature_set value;
7434 };
7435
7436 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7437 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7438 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7439 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7440 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0)},
7441 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7442 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0)},
7443 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0)},
7444 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_SIMD
7445 | AARCH64_FEATURE_RDMA, 0)},
7446 {NULL, AARCH64_ARCH_NONE}
7447 };
7448
7449 struct aarch64_long_option_table
7450 {
7451 char *option; /* Substring to match. */
7452 char *help; /* Help information. */
7453 int (*func) (char *subopt); /* Function to decode sub-option. */
7454 char *deprecated; /* If non-null, print this message. */
7455 };
7456
7457 static int
7458 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p,
7459 bfd_boolean ext_only)
7460 {
7461 /* We insist on extensions being added before being removed. We achieve
7462 this by using the ADDING_VALUE variable to indicate whether we are
7463 adding an extension (1) or removing it (0) and only allowing it to
7464 change in the order -1 -> 1 -> 0. */
7465 int adding_value = -1;
7466 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7467
7468 /* Copy the feature set, so that we can modify it. */
7469 *ext_set = **opt_p;
7470 *opt_p = ext_set;
7471
7472 while (str != NULL && *str != 0)
7473 {
7474 const struct aarch64_option_cpu_value_table *opt;
7475 char *ext = NULL;
7476 int optlen;
7477
7478 if (!ext_only)
7479 {
7480 if (*str != '+')
7481 {
7482 as_bad (_("invalid architectural extension"));
7483 return 0;
7484 }
7485
7486 ext = strchr (++str, '+');
7487 }
7488
7489 if (ext != NULL)
7490 optlen = ext - str;
7491 else
7492 optlen = strlen (str);
7493
7494 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7495 {
7496 if (adding_value != 0)
7497 adding_value = 0;
7498 optlen -= 2;
7499 str += 2;
7500 }
7501 else if (optlen > 0)
7502 {
7503 if (adding_value == -1)
7504 adding_value = 1;
7505 else if (adding_value != 1)
7506 {
7507 as_bad (_("must specify extensions to add before specifying "
7508 "those to remove"));
7509 return FALSE;
7510 }
7511 }
7512
7513 if (optlen == 0)
7514 {
7515 as_bad (_("missing architectural extension"));
7516 return 0;
7517 }
7518
7519 gas_assert (adding_value != -1);
7520
7521 for (opt = aarch64_features; opt->name != NULL; opt++)
7522 if (strncmp (opt->name, str, optlen) == 0)
7523 {
7524 /* Add or remove the extension. */
7525 if (adding_value)
7526 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7527 else
7528 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7529 break;
7530 }
7531
7532 if (opt->name == NULL)
7533 {
7534 as_bad (_("unknown architectural extension `%s'"), str);
7535 return 0;
7536 }
7537
7538 str = ext;
7539 };
7540
7541 return 1;
7542 }
7543
7544 static int
7545 aarch64_parse_cpu (char *str)
7546 {
7547 const struct aarch64_cpu_option_table *opt;
7548 char *ext = strchr (str, '+');
7549 size_t optlen;
7550
7551 if (ext != NULL)
7552 optlen = ext - str;
7553 else
7554 optlen = strlen (str);
7555
7556 if (optlen == 0)
7557 {
7558 as_bad (_("missing cpu name `%s'"), str);
7559 return 0;
7560 }
7561
7562 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7563 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7564 {
7565 mcpu_cpu_opt = &opt->value;
7566 if (ext != NULL)
7567 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
7568
7569 return 1;
7570 }
7571
7572 as_bad (_("unknown cpu `%s'"), str);
7573 return 0;
7574 }
7575
7576 static int
7577 aarch64_parse_arch (char *str)
7578 {
7579 const struct aarch64_arch_option_table *opt;
7580 char *ext = strchr (str, '+');
7581 size_t optlen;
7582
7583 if (ext != NULL)
7584 optlen = ext - str;
7585 else
7586 optlen = strlen (str);
7587
7588 if (optlen == 0)
7589 {
7590 as_bad (_("missing architecture name `%s'"), str);
7591 return 0;
7592 }
7593
7594 for (opt = aarch64_archs; opt->name != NULL; opt++)
7595 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7596 {
7597 march_cpu_opt = &opt->value;
7598 if (ext != NULL)
7599 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
7600
7601 return 1;
7602 }
7603
7604 as_bad (_("unknown architecture `%s'\n"), str);
7605 return 0;
7606 }
7607
7608 /* ABIs. */
7609 struct aarch64_option_abi_value_table
7610 {
7611 char *name;
7612 enum aarch64_abi_type value;
7613 };
7614
7615 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7616 {"ilp32", AARCH64_ABI_ILP32},
7617 {"lp64", AARCH64_ABI_LP64},
7618 {NULL, 0}
7619 };
7620
7621 static int
7622 aarch64_parse_abi (char *str)
7623 {
7624 const struct aarch64_option_abi_value_table *opt;
7625 size_t optlen = strlen (str);
7626
7627 if (optlen == 0)
7628 {
7629 as_bad (_("missing abi name `%s'"), str);
7630 return 0;
7631 }
7632
7633 for (opt = aarch64_abis; opt->name != NULL; opt++)
7634 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7635 {
7636 aarch64_abi = opt->value;
7637 return 1;
7638 }
7639
7640 as_bad (_("unknown abi `%s'\n"), str);
7641 return 0;
7642 }
7643
7644 static struct aarch64_long_option_table aarch64_long_opts[] = {
7645 #ifdef OBJ_ELF
7646 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7647 aarch64_parse_abi, NULL},
7648 #endif /* OBJ_ELF */
7649 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7650 aarch64_parse_cpu, NULL},
7651 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7652 aarch64_parse_arch, NULL},
7653 {NULL, NULL, 0, NULL}
7654 };
7655
7656 int
7657 md_parse_option (int c, char *arg)
7658 {
7659 struct aarch64_option_table *opt;
7660 struct aarch64_long_option_table *lopt;
7661
7662 switch (c)
7663 {
7664 #ifdef OPTION_EB
7665 case OPTION_EB:
7666 target_big_endian = 1;
7667 break;
7668 #endif
7669
7670 #ifdef OPTION_EL
7671 case OPTION_EL:
7672 target_big_endian = 0;
7673 break;
7674 #endif
7675
7676 case 'a':
7677 /* Listing option. Just ignore these, we don't support additional
7678 ones. */
7679 return 0;
7680
7681 default:
7682 for (opt = aarch64_opts; opt->option != NULL; opt++)
7683 {
7684 if (c == opt->option[0]
7685 && ((arg == NULL && opt->option[1] == 0)
7686 || streq (arg, opt->option + 1)))
7687 {
7688 /* If the option is deprecated, tell the user. */
7689 if (opt->deprecated != NULL)
7690 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7691 arg ? arg : "", _(opt->deprecated));
7692
7693 if (opt->var != NULL)
7694 *opt->var = opt->value;
7695
7696 return 1;
7697 }
7698 }
7699
7700 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7701 {
7702 /* These options are expected to have an argument. */
7703 if (c == lopt->option[0]
7704 && arg != NULL
7705 && strncmp (arg, lopt->option + 1,
7706 strlen (lopt->option + 1)) == 0)
7707 {
7708 /* If the option is deprecated, tell the user. */
7709 if (lopt->deprecated != NULL)
7710 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7711 _(lopt->deprecated));
7712
7713 /* Call the sup-option parser. */
7714 return lopt->func (arg + strlen (lopt->option) - 1);
7715 }
7716 }
7717
7718 return 0;
7719 }
7720
7721 return 1;
7722 }
7723
7724 void
7725 md_show_usage (FILE * fp)
7726 {
7727 struct aarch64_option_table *opt;
7728 struct aarch64_long_option_table *lopt;
7729
7730 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7731
7732 for (opt = aarch64_opts; opt->option != NULL; opt++)
7733 if (opt->help != NULL)
7734 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7735
7736 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7737 if (lopt->help != NULL)
7738 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7739
7740 #ifdef OPTION_EB
7741 fprintf (fp, _("\
7742 -EB assemble code for a big-endian cpu\n"));
7743 #endif
7744
7745 #ifdef OPTION_EL
7746 fprintf (fp, _("\
7747 -EL assemble code for a little-endian cpu\n"));
7748 #endif
7749 }
7750
7751 /* Parse a .cpu directive. */
7752
7753 static void
7754 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7755 {
7756 const struct aarch64_cpu_option_table *opt;
7757 char saved_char;
7758 char *name;
7759 char *ext;
7760 size_t optlen;
7761
7762 name = input_line_pointer;
7763 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7764 input_line_pointer++;
7765 saved_char = *input_line_pointer;
7766 *input_line_pointer = 0;
7767
7768 ext = strchr (name, '+');
7769
7770 if (ext != NULL)
7771 optlen = ext - name;
7772 else
7773 optlen = strlen (name);
7774
7775 /* Skip the first "all" entry. */
7776 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7777 if (strlen (opt->name) == optlen
7778 && strncmp (name, opt->name, optlen) == 0)
7779 {
7780 mcpu_cpu_opt = &opt->value;
7781 if (ext != NULL)
7782 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7783 return;
7784
7785 cpu_variant = *mcpu_cpu_opt;
7786
7787 *input_line_pointer = saved_char;
7788 demand_empty_rest_of_line ();
7789 return;
7790 }
7791 as_bad (_("unknown cpu `%s'"), name);
7792 *input_line_pointer = saved_char;
7793 ignore_rest_of_line ();
7794 }
7795
7796
7797 /* Parse a .arch directive. */
7798
7799 static void
7800 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7801 {
7802 const struct aarch64_arch_option_table *opt;
7803 char saved_char;
7804 char *name;
7805 char *ext;
7806 size_t optlen;
7807
7808 name = input_line_pointer;
7809 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7810 input_line_pointer++;
7811 saved_char = *input_line_pointer;
7812 *input_line_pointer = 0;
7813
7814 ext = strchr (name, '+');
7815
7816 if (ext != NULL)
7817 optlen = ext - name;
7818 else
7819 optlen = strlen (name);
7820
7821 /* Skip the first "all" entry. */
7822 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7823 if (strlen (opt->name) == optlen
7824 && strncmp (name, opt->name, optlen) == 0)
7825 {
7826 mcpu_cpu_opt = &opt->value;
7827 if (ext != NULL)
7828 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
7829 return;
7830
7831 cpu_variant = *mcpu_cpu_opt;
7832
7833 *input_line_pointer = saved_char;
7834 demand_empty_rest_of_line ();
7835 return;
7836 }
7837
7838 as_bad (_("unknown architecture `%s'\n"), name);
7839 *input_line_pointer = saved_char;
7840 ignore_rest_of_line ();
7841 }
7842
7843 /* Parse a .arch_extension directive. */
7844
7845 static void
7846 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
7847 {
7848 char saved_char;
7849 char *ext = input_line_pointer;;
7850
7851 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7852 input_line_pointer++;
7853 saved_char = *input_line_pointer;
7854 *input_line_pointer = 0;
7855
7856 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
7857 return;
7858
7859 cpu_variant = *mcpu_cpu_opt;
7860
7861 *input_line_pointer = saved_char;
7862 demand_empty_rest_of_line ();
7863 }
7864
7865 /* Copy symbol information. */
7866
7867 void
7868 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7869 {
7870 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7871 }