x86: tighten extend-to-32bit-address conditions
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 /* Types of processor to assemble for. */
42 #ifndef CPU_DEFAULT
43 #define CPU_DEFAULT AARCH64_ARCH_V8
44 #endif
45
46 #define streq(a, b) (strcmp (a, b) == 0)
47
48 #define END_OF_INSN '\0'
49
50 static aarch64_feature_set cpu_variant;
51
52 /* Variables that we set while parsing command-line options. Once all
53 options have been read we re-process these values to set the real
54 assembly flags. */
55 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
56 static const aarch64_feature_set *march_cpu_opt = NULL;
57
58 /* Constants for known architecture features. */
59 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
60
61 /* Currently active instruction sequence. */
62 static aarch64_instr_sequence *insn_sequence = NULL;
63
64 #ifdef OBJ_ELF
65 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
66 static symbolS *GOT_symbol;
67 #endif
68
69 /* Which ABI to use. */
70 enum aarch64_abi_type
71 {
72 AARCH64_ABI_NONE = 0,
73 AARCH64_ABI_LP64 = 1,
74 AARCH64_ABI_ILP32 = 2,
75 AARCH64_ABI_LLP64 = 3
76 };
77
78 unsigned int aarch64_sframe_cfa_sp_reg;
79 /* The other CFA base register for SFrame stack trace info. */
80 unsigned int aarch64_sframe_cfa_fp_reg;
81 unsigned int aarch64_sframe_cfa_ra_reg;
82
83 #ifndef DEFAULT_ARCH
84 #define DEFAULT_ARCH "aarch64"
85 #endif
86
87 #ifdef OBJ_ELF
88 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
89 static const char *default_arch = DEFAULT_ARCH;
90 #endif
91
92 /* AArch64 ABI for the output file. */
93 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
94
95 /* When non-zero, program to a 32-bit model, in which the C data types
96 int, long and all pointer types are 32-bit objects (ILP32); or to a
97 64-bit model, in which the C int type is 32-bits but the C long type
98 and all pointer types are 64-bit objects (LP64). */
99 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
100
101 /* When non zero, C types int and long are 32 bit,
102 pointers, however are 64 bit */
103 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
104
105 enum vector_el_type
106 {
107 NT_invtype = -1,
108 NT_b,
109 NT_h,
110 NT_s,
111 NT_d,
112 NT_q,
113 NT_zero,
114 NT_merge
115 };
116
117 /* Bits for DEFINED field in vector_type_el. */
118 #define NTA_HASTYPE 1
119 #define NTA_HASINDEX 2
120 #define NTA_HASVARWIDTH 4
121
122 struct vector_type_el
123 {
124 enum vector_el_type type;
125 unsigned char defined;
126 unsigned element_size;
127 unsigned width;
128 int64_t index;
129 };
130
131 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
132
133 struct reloc
134 {
135 bfd_reloc_code_real_type type;
136 expressionS exp;
137 int pc_rel;
138 enum aarch64_opnd opnd;
139 uint32_t flags;
140 unsigned need_libopcodes_p : 1;
141 };
142
143 struct aarch64_instruction
144 {
145 /* libopcodes structure for instruction intermediate representation. */
146 aarch64_inst base;
147 /* Record assembly errors found during the parsing. */
148 aarch64_operand_error parsing_error;
149 /* The condition that appears in the assembly line. */
150 int cond;
151 /* Relocation information (including the GAS internal fixup). */
152 struct reloc reloc;
153 /* Need to generate an immediate in the literal pool. */
154 unsigned gen_lit_pool : 1;
155 };
156
157 typedef struct aarch64_instruction aarch64_instruction;
158
159 static aarch64_instruction inst;
160
161 static bool parse_operands (char *, const aarch64_opcode *);
162 static bool programmer_friendly_fixup (aarch64_instruction *);
163
164 /* If an AARCH64_OPDE_SYNTAX_ERROR has no error string, its first three
165 data fields contain the following information:
166
167 data[0].i:
168 A mask of register types that would have been acceptable as bare
169 operands, outside of a register list. In addition, SEF_DEFAULT_ERROR
170 is set if a general parsing error occured for an operand (that is,
171 an error not related to registers, and having no error string).
172
173 data[1].i:
174 A mask of register types that would have been acceptable inside
175 a register list. In addition, SEF_IN_REGLIST is set if the
176 operand contained a '{' and if we got to the point of trying
177 to parse a register inside a list.
178
179 data[2].i:
180 The mask associated with the register that was actually seen, or 0
181 if none. A nonzero value describes a register inside a register
182 list if data[1].i & SEF_IN_REGLIST, otherwise it describes a bare
183 register.
184
185 The idea is that stringless errors from multiple opcode templates can
186 be ORed together to give a summary of the available alternatives. */
187 #define SEF_DEFAULT_ERROR (1U << 31)
188 #define SEF_IN_REGLIST (1U << 31)
189
190 /* Diagnostics inline function utilities.
191
192 These are lightweight utilities which should only be called by parse_operands
193 and other parsers. GAS processes each assembly line by parsing it against
194 instruction template(s), in the case of multiple templates (for the same
195 mnemonic name), those templates are tried one by one until one succeeds or
196 all fail. An assembly line may fail a few templates before being
197 successfully parsed; an error saved here in most cases is not a user error
198 but an error indicating the current template is not the right template.
199 Therefore it is very important that errors can be saved at a low cost during
200 the parsing; we don't want to slow down the whole parsing by recording
201 non-user errors in detail.
202
203 Remember that the objective is to help GAS pick up the most appropriate
204 error message in the case of multiple templates, e.g. FMOV which has 8
205 templates. */
206
207 static inline void
208 clear_error (void)
209 {
210 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
211 inst.parsing_error.kind = AARCH64_OPDE_NIL;
212 }
213
214 static inline bool
215 error_p (void)
216 {
217 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
218 }
219
220 static inline void
221 set_error (enum aarch64_operand_error_kind kind, const char *error)
222 {
223 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
224 inst.parsing_error.index = -1;
225 inst.parsing_error.kind = kind;
226 inst.parsing_error.error = error;
227 }
228
229 static inline void
230 set_recoverable_error (const char *error)
231 {
232 set_error (AARCH64_OPDE_RECOVERABLE, error);
233 }
234
235 /* Use the DESC field of the corresponding aarch64_operand entry to compose
236 the error message. */
237 static inline void
238 set_default_error (void)
239 {
240 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
241 inst.parsing_error.data[0].i = SEF_DEFAULT_ERROR;
242 }
243
244 static inline void
245 set_expected_error (unsigned int flags)
246 {
247 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
248 inst.parsing_error.data[0].i = flags;
249 }
250
251 static inline void
252 set_syntax_error (const char *error)
253 {
254 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
255 }
256
257 static inline void
258 set_first_syntax_error (const char *error)
259 {
260 if (! error_p ())
261 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
262 }
263
264 static inline void
265 set_fatal_syntax_error (const char *error)
266 {
267 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
268 }
269 \f
270 /* Return value for certain parsers when the parsing fails; those parsers
271 return the information of the parsed result, e.g. register number, on
272 success. */
273 #define PARSE_FAIL -1
274
275 /* This is an invalid condition code that means no conditional field is
276 present. */
277 #define COND_ALWAYS 0x10
278
279 typedef struct
280 {
281 const char *template;
282 uint32_t value;
283 } asm_nzcv;
284
285 struct reloc_entry
286 {
287 char *name;
288 bfd_reloc_code_real_type reloc;
289 };
290
291 /* Macros to define the register types and masks for the purpose
292 of parsing. */
293
294 #undef AARCH64_REG_TYPES
295 #define AARCH64_REG_TYPES \
296 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
297 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
298 BASIC_REG_TYPE(SP_32) /* wsp */ \
299 BASIC_REG_TYPE(SP_64) /* sp */ \
300 BASIC_REG_TYPE(ZR_32) /* wzr */ \
301 BASIC_REG_TYPE(ZR_64) /* xzr */ \
302 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
303 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
304 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
305 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
306 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
307 BASIC_REG_TYPE(V) /* v[0-31] */ \
308 BASIC_REG_TYPE(Z) /* z[0-31] */ \
309 BASIC_REG_TYPE(P) /* p[0-15] */ \
310 BASIC_REG_TYPE(PN) /* pn[0-15] */ \
311 BASIC_REG_TYPE(ZA) /* za */ \
312 BASIC_REG_TYPE(ZAT) /* za[0-15] (ZA tile) */ \
313 BASIC_REG_TYPE(ZATH) /* za[0-15]h (ZA tile horizontal slice) */ \
314 BASIC_REG_TYPE(ZATV) /* za[0-15]v (ZA tile vertical slice) */ \
315 BASIC_REG_TYPE(ZT0) /* zt0 */ \
316 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
317 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
318 /* Typecheck: same, plus SVE registers. */ \
319 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
320 | REG_TYPE(Z)) \
321 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
322 MULTI_REG_TYPE(R_ZR, REG_TYPE(R_32) | REG_TYPE(R_64) \
323 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
324 /* Typecheck: same, plus SVE registers. */ \
325 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
326 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) \
327 | REG_TYPE(Z)) \
328 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
329 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
330 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
331 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
332 MULTI_REG_TYPE(R_ZR_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
333 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
334 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
335 /* Typecheck: any [BHSDQ]P FP. */ \
336 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
337 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
338 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
339 MULTI_REG_TYPE(R_ZR_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
340 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
341 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
342 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
343 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
344 be used for SVE instructions, since Zn and Pn are valid symbols \
345 in other contexts. */ \
346 MULTI_REG_TYPE(R_ZR_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
347 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
348 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
349 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
350 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
351 | REG_TYPE(Z) | REG_TYPE(P)) \
352 /* Likewise, but with predicate-as-counter registers added. */ \
353 MULTI_REG_TYPE(R_ZR_SP_BHSDQ_VZP_PN, REG_TYPE(R_32) | REG_TYPE(R_64) \
354 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
355 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
356 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
357 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
358 | REG_TYPE(Z) | REG_TYPE(P) | REG_TYPE(PN)) \
359 /* Any integer register; used for error messages only. */ \
360 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
361 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
362 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
363 /* Any vector register. */ \
364 MULTI_REG_TYPE(VZ, REG_TYPE(V) | REG_TYPE(Z)) \
365 /* An SVE vector or predicate register. */ \
366 MULTI_REG_TYPE(ZP, REG_TYPE(Z) | REG_TYPE(P)) \
367 /* Any vector or predicate register. */ \
368 MULTI_REG_TYPE(VZP, REG_TYPE(V) | REG_TYPE(Z) | REG_TYPE(P)) \
369 /* The whole of ZA or a single tile. */ \
370 MULTI_REG_TYPE(ZA_ZAT, REG_TYPE(ZA) | REG_TYPE(ZAT)) \
371 /* A horizontal or vertical slice of a ZA tile. */ \
372 MULTI_REG_TYPE(ZATHV, REG_TYPE(ZATH) | REG_TYPE(ZATV)) \
373 /* Pseudo type to mark the end of the enumerator sequence. */ \
374 END_REG_TYPE(MAX)
375
376 #undef BASIC_REG_TYPE
377 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
378 #undef MULTI_REG_TYPE
379 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
380 #undef END_REG_TYPE
381 #define END_REG_TYPE(T) BASIC_REG_TYPE(T)
382
383 /* Register type enumerators. */
384 typedef enum aarch64_reg_type_
385 {
386 /* A list of REG_TYPE_*. */
387 AARCH64_REG_TYPES
388 } aarch64_reg_type;
389
390 #undef BASIC_REG_TYPE
391 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
392 #undef REG_TYPE
393 #define REG_TYPE(T) (1 << REG_TYPE_##T)
394 #undef MULTI_REG_TYPE
395 #define MULTI_REG_TYPE(T,V) V,
396 #undef END_REG_TYPE
397 #define END_REG_TYPE(T) 0
398
399 /* Structure for a hash table entry for a register. */
400 typedef struct
401 {
402 const char *name;
403 unsigned char number;
404 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
405 unsigned char builtin;
406 } reg_entry;
407
408 /* Values indexed by aarch64_reg_type to assist the type checking. */
409 static const unsigned reg_type_masks[] =
410 {
411 AARCH64_REG_TYPES
412 };
413
414 #undef BASIC_REG_TYPE
415 #undef REG_TYPE
416 #undef MULTI_REG_TYPE
417 #undef END_REG_TYPE
418 #undef AARCH64_REG_TYPES
419
420 /* We expected one of the registers in MASK to be specified. If a register
421 of some kind was specified, SEEN is a mask that contains that register,
422 otherwise it is zero.
423
424 If it is possible to provide a relatively pithy message that describes
425 the error exactly, return a string that does so, reporting the error
426 against "operand %d". Return null otherwise.
427
428 From a QoI perspective, any REG_TYPE_* that is passed as the first
429 argument to set_expected_reg_error should generally have its own message.
430 Providing messages for combinations of such REG_TYPE_*s can be useful if
431 it is possible to summarize the combination in a relatively natural way.
432 On the other hand, it seems better to avoid long lists of unrelated
433 things. */
434
435 static const char *
436 get_reg_expected_msg (unsigned int mask, unsigned int seen)
437 {
438 /* First handle messages that use SEEN. */
439 if ((mask & reg_type_masks[REG_TYPE_ZAT])
440 && (seen & reg_type_masks[REG_TYPE_ZATHV]))
441 return N_("expected an unsuffixed ZA tile at operand %d");
442
443 if ((mask & reg_type_masks[REG_TYPE_ZATHV])
444 && (seen & reg_type_masks[REG_TYPE_ZAT]))
445 return N_("missing horizontal or vertical suffix at operand %d");
446
447 if ((mask & reg_type_masks[REG_TYPE_ZA])
448 && (seen & (reg_type_masks[REG_TYPE_ZAT]
449 | reg_type_masks[REG_TYPE_ZATHV])))
450 return N_("expected 'za' rather than a ZA tile at operand %d");
451
452 if ((mask & reg_type_masks[REG_TYPE_PN])
453 && (seen & reg_type_masks[REG_TYPE_P]))
454 return N_("expected a predicate-as-counter rather than predicate-as-mask"
455 " register at operand %d");
456
457 if ((mask & reg_type_masks[REG_TYPE_P])
458 && (seen & reg_type_masks[REG_TYPE_PN]))
459 return N_("expected a predicate-as-mask rather than predicate-as-counter"
460 " register at operand %d");
461
462 /* Integer, zero and stack registers. */
463 if (mask == reg_type_masks[REG_TYPE_R_64])
464 return N_("expected a 64-bit integer register at operand %d");
465 if (mask == reg_type_masks[REG_TYPE_R_ZR])
466 return N_("expected an integer or zero register at operand %d");
467 if (mask == reg_type_masks[REG_TYPE_R_SP])
468 return N_("expected an integer or stack pointer register at operand %d");
469
470 /* Floating-point and SIMD registers. */
471 if (mask == reg_type_masks[REG_TYPE_BHSDQ])
472 return N_("expected a scalar SIMD or floating-point register"
473 " at operand %d");
474 if (mask == reg_type_masks[REG_TYPE_V])
475 return N_("expected an Advanced SIMD vector register at operand %d");
476 if (mask == reg_type_masks[REG_TYPE_Z])
477 return N_("expected an SVE vector register at operand %d");
478 if (mask == reg_type_masks[REG_TYPE_P]
479 || mask == (reg_type_masks[REG_TYPE_P] | reg_type_masks[REG_TYPE_PN]))
480 /* Use this error for "predicate-as-mask only" and "either kind of
481 predicate". We report a more specific error if P is used where
482 PN is expected, and vice versa, so the issue at this point is
483 "predicate-like" vs. "not predicate-like". */
484 return N_("expected an SVE predicate register at operand %d");
485 if (mask == reg_type_masks[REG_TYPE_PN])
486 return N_("expected an SVE predicate-as-counter register at operand %d");
487 if (mask == reg_type_masks[REG_TYPE_VZ])
488 return N_("expected a vector register at operand %d");
489 if (mask == reg_type_masks[REG_TYPE_ZP])
490 return N_("expected an SVE vector or predicate register at operand %d");
491 if (mask == reg_type_masks[REG_TYPE_VZP])
492 return N_("expected a vector or predicate register at operand %d");
493
494 /* SME-related registers. */
495 if (mask == reg_type_masks[REG_TYPE_ZA])
496 return N_("expected a ZA array vector at operand %d");
497 if (mask == (reg_type_masks[REG_TYPE_ZA_ZAT] | reg_type_masks[REG_TYPE_ZT0]))
498 return N_("expected ZT0 or a ZA mask at operand %d");
499 if (mask == reg_type_masks[REG_TYPE_ZAT])
500 return N_("expected a ZA tile at operand %d");
501 if (mask == reg_type_masks[REG_TYPE_ZATHV])
502 return N_("expected a ZA tile slice at operand %d");
503
504 /* Integer and vector combos. */
505 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_V]))
506 return N_("expected an integer register or Advanced SIMD vector register"
507 " at operand %d");
508 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_Z]))
509 return N_("expected an integer register or SVE vector register"
510 " at operand %d");
511 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_VZ]))
512 return N_("expected an integer or vector register at operand %d");
513 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_P]))
514 return N_("expected an integer or predicate register at operand %d");
515 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_VZP]))
516 return N_("expected an integer, vector or predicate register"
517 " at operand %d");
518
519 /* SVE and SME combos. */
520 if (mask == (reg_type_masks[REG_TYPE_Z] | reg_type_masks[REG_TYPE_ZATHV]))
521 return N_("expected an SVE vector register or ZA tile slice"
522 " at operand %d");
523
524 return NULL;
525 }
526
527 /* Record that we expected a register of type TYPE but didn't see one.
528 REG is the register that we actually saw, or null if we didn't see a
529 recognized register. FLAGS is SEF_IN_REGLIST if we are parsing the
530 contents of a register list, otherwise it is zero. */
531
532 static inline void
533 set_expected_reg_error (aarch64_reg_type type, const reg_entry *reg,
534 unsigned int flags)
535 {
536 assert (flags == 0 || flags == SEF_IN_REGLIST);
537 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
538 if (flags & SEF_IN_REGLIST)
539 inst.parsing_error.data[1].i = reg_type_masks[type] | flags;
540 else
541 inst.parsing_error.data[0].i = reg_type_masks[type];
542 if (reg)
543 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
544 }
545
546 /* Record that we expected a register list containing registers of type TYPE,
547 but didn't see the opening '{'. If we saw a register instead, REG is the
548 register that we saw, otherwise it is null. */
549
550 static inline void
551 set_expected_reglist_error (aarch64_reg_type type, const reg_entry *reg)
552 {
553 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
554 inst.parsing_error.data[1].i = reg_type_masks[type];
555 if (reg)
556 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
557 }
558
559 /* Some well known registers that we refer to directly elsewhere. */
560 #define REG_SP 31
561 #define REG_ZR 31
562
563 /* Instructions take 4 bytes in the object file. */
564 #define INSN_SIZE 4
565
566 static htab_t aarch64_ops_hsh;
567 static htab_t aarch64_cond_hsh;
568 static htab_t aarch64_shift_hsh;
569 static htab_t aarch64_sys_regs_hsh;
570 static htab_t aarch64_pstatefield_hsh;
571 static htab_t aarch64_sys_regs_ic_hsh;
572 static htab_t aarch64_sys_regs_dc_hsh;
573 static htab_t aarch64_sys_regs_at_hsh;
574 static htab_t aarch64_sys_regs_tlbi_hsh;
575 static htab_t aarch64_sys_regs_sr_hsh;
576 static htab_t aarch64_reg_hsh;
577 static htab_t aarch64_barrier_opt_hsh;
578 static htab_t aarch64_nzcv_hsh;
579 static htab_t aarch64_pldop_hsh;
580 static htab_t aarch64_hint_opt_hsh;
581
582 /* Stuff needed to resolve the label ambiguity
583 As:
584 ...
585 label: <insn>
586 may differ from:
587 ...
588 label:
589 <insn> */
590
591 static symbolS *last_label_seen;
592
593 /* Literal pool structure. Held on a per-section
594 and per-sub-section basis. */
595
596 #define MAX_LITERAL_POOL_SIZE 1024
597 typedef struct literal_expression
598 {
599 expressionS exp;
600 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
601 LITTLENUM_TYPE * bignum;
602 } literal_expression;
603
604 typedef struct literal_pool
605 {
606 literal_expression literals[MAX_LITERAL_POOL_SIZE];
607 unsigned int next_free_entry;
608 unsigned int id;
609 symbolS *symbol;
610 segT section;
611 subsegT sub_section;
612 int size;
613 struct literal_pool *next;
614 } literal_pool;
615
616 /* Pointer to a linked list of literal pools. */
617 static literal_pool *list_of_pools = NULL;
618 \f
619 /* Pure syntax. */
620
621 /* This array holds the chars that always start a comment. If the
622 pre-processor is disabled, these aren't very useful. */
623 const char comment_chars[] = "";
624
625 /* This array holds the chars that only start a comment at the beginning of
626 a line. If the line seems to have the form '# 123 filename'
627 .line and .file directives will appear in the pre-processed output. */
628 /* Note that input_file.c hand checks for '#' at the beginning of the
629 first line of the input file. This is because the compiler outputs
630 #NO_APP at the beginning of its output. */
631 /* Also note that comments like this one will always work. */
632 const char line_comment_chars[] = "#";
633
634 const char line_separator_chars[] = ";";
635
636 /* Chars that can be used to separate mant
637 from exp in floating point numbers. */
638 const char EXP_CHARS[] = "eE";
639
640 /* Chars that mean this number is a floating point constant. */
641 /* As in 0f12.456 */
642 /* or 0d1.2345e12 */
643
644 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
645
646 /* Prefix character that indicates the start of an immediate value. */
647 #define is_immediate_prefix(C) ((C) == '#')
648
649 /* Separator character handling. */
650
651 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
652
653 static inline bool
654 skip_past_char (char **str, char c)
655 {
656 if (**str == c)
657 {
658 (*str)++;
659 return true;
660 }
661 else
662 return false;
663 }
664
665 #define skip_past_comma(str) skip_past_char (str, ',')
666
667 /* Arithmetic expressions (possibly involving symbols). */
668
669 static bool in_aarch64_get_expression = false;
670
671 /* Third argument to aarch64_get_expression. */
672 #define GE_NO_PREFIX false
673 #define GE_OPT_PREFIX true
674
675 /* Fourth argument to aarch64_get_expression. */
676 #define ALLOW_ABSENT false
677 #define REJECT_ABSENT true
678
679 /* Return TRUE if the string pointed by *STR is successfully parsed
680 as an valid expression; *EP will be filled with the information of
681 such an expression. Otherwise return FALSE.
682
683 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
684 If REJECT_ABSENT is true then trat missing expressions as an error. */
685
686 static bool
687 aarch64_get_expression (expressionS * ep,
688 char ** str,
689 bool allow_immediate_prefix,
690 bool reject_absent)
691 {
692 char *save_in;
693 segT seg;
694 bool prefix_present = false;
695
696 if (allow_immediate_prefix)
697 {
698 if (is_immediate_prefix (**str))
699 {
700 (*str)++;
701 prefix_present = true;
702 }
703 }
704
705 memset (ep, 0, sizeof (expressionS));
706
707 save_in = input_line_pointer;
708 input_line_pointer = *str;
709 in_aarch64_get_expression = true;
710 seg = expression (ep);
711 in_aarch64_get_expression = false;
712
713 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
714 {
715 /* We found a bad expression in md_operand(). */
716 *str = input_line_pointer;
717 input_line_pointer = save_in;
718 if (prefix_present && ! error_p ())
719 set_fatal_syntax_error (_("bad expression"));
720 else
721 set_first_syntax_error (_("bad expression"));
722 return false;
723 }
724
725 #ifdef OBJ_AOUT
726 if (seg != absolute_section
727 && seg != text_section
728 && seg != data_section
729 && seg != bss_section
730 && seg != undefined_section)
731 {
732 set_syntax_error (_("bad segment"));
733 *str = input_line_pointer;
734 input_line_pointer = save_in;
735 return false;
736 }
737 #else
738 (void) seg;
739 #endif
740
741 *str = input_line_pointer;
742 input_line_pointer = save_in;
743 return true;
744 }
745
746 /* Turn a string in input_line_pointer into a floating point constant
747 of type TYPE, and store the appropriate bytes in *LITP. The number
748 of LITTLENUMS emitted is stored in *SIZEP. An error message is
749 returned, or NULL on OK. */
750
751 const char *
752 md_atof (int type, char *litP, int *sizeP)
753 {
754 return ieee_md_atof (type, litP, sizeP, target_big_endian);
755 }
756
757 /* We handle all bad expressions here, so that we can report the faulty
758 instruction in the error message. */
759 void
760 md_operand (expressionS * exp)
761 {
762 if (in_aarch64_get_expression)
763 exp->X_op = O_illegal;
764 }
765
766 /* Immediate values. */
767
768 /* Errors may be set multiple times during parsing or bit encoding
769 (particularly in the Neon bits), but usually the earliest error which is set
770 will be the most meaningful. Avoid overwriting it with later (cascading)
771 errors by calling this function. */
772
773 static void
774 first_error (const char *error)
775 {
776 if (! error_p ())
777 set_syntax_error (error);
778 }
779
780 /* Similar to first_error, but this function accepts formatted error
781 message. */
782 static void
783 first_error_fmt (const char *format, ...)
784 {
785 va_list args;
786 enum
787 { size = 100 };
788 /* N.B. this single buffer will not cause error messages for different
789 instructions to pollute each other; this is because at the end of
790 processing of each assembly line, error message if any will be
791 collected by as_bad. */
792 static char buffer[size];
793
794 if (! error_p ())
795 {
796 int ret ATTRIBUTE_UNUSED;
797 va_start (args, format);
798 ret = vsnprintf (buffer, size, format, args);
799 know (ret <= size - 1 && ret >= 0);
800 va_end (args);
801 set_syntax_error (buffer);
802 }
803 }
804
805 /* Internal helper routine converting a vector_type_el structure *VECTYPE
806 to a corresponding operand qualifier. */
807
808 static inline aarch64_opnd_qualifier_t
809 vectype_to_qualifier (const struct vector_type_el *vectype)
810 {
811 /* Element size in bytes indexed by vector_el_type. */
812 const unsigned char ele_size[5]
813 = {1, 2, 4, 8, 16};
814 const unsigned int ele_base [5] =
815 {
816 AARCH64_OPND_QLF_V_4B,
817 AARCH64_OPND_QLF_V_2H,
818 AARCH64_OPND_QLF_V_2S,
819 AARCH64_OPND_QLF_V_1D,
820 AARCH64_OPND_QLF_V_1Q
821 };
822
823 if (!vectype->defined || vectype->type == NT_invtype)
824 goto vectype_conversion_fail;
825
826 if (vectype->type == NT_zero)
827 return AARCH64_OPND_QLF_P_Z;
828 if (vectype->type == NT_merge)
829 return AARCH64_OPND_QLF_P_M;
830
831 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
832
833 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
834 {
835 /* Special case S_4B. */
836 if (vectype->type == NT_b && vectype->width == 4)
837 return AARCH64_OPND_QLF_S_4B;
838
839 /* Special case S_2H. */
840 if (vectype->type == NT_h && vectype->width == 2)
841 return AARCH64_OPND_QLF_S_2H;
842
843 /* Vector element register. */
844 return AARCH64_OPND_QLF_S_B + vectype->type;
845 }
846 else
847 {
848 /* Vector register. */
849 int reg_size = ele_size[vectype->type] * vectype->width;
850 unsigned offset;
851 unsigned shift;
852 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
853 goto vectype_conversion_fail;
854
855 /* The conversion is by calculating the offset from the base operand
856 qualifier for the vector type. The operand qualifiers are regular
857 enough that the offset can established by shifting the vector width by
858 a vector-type dependent amount. */
859 shift = 0;
860 if (vectype->type == NT_b)
861 shift = 3;
862 else if (vectype->type == NT_h || vectype->type == NT_s)
863 shift = 2;
864 else if (vectype->type >= NT_d)
865 shift = 1;
866 else
867 gas_assert (0);
868
869 offset = ele_base [vectype->type] + (vectype->width >> shift);
870 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
871 && offset <= AARCH64_OPND_QLF_V_1Q);
872 return offset;
873 }
874
875 vectype_conversion_fail:
876 first_error (_("bad vector arrangement type"));
877 return AARCH64_OPND_QLF_NIL;
878 }
879
880 /* Register parsing. */
881
882 /* Generic register parser which is called by other specialized
883 register parsers.
884 CCP points to what should be the beginning of a register name.
885 If it is indeed a valid register name, advance CCP over it and
886 return the reg_entry structure; otherwise return NULL.
887 It does not issue diagnostics. */
888
889 static reg_entry *
890 parse_reg (char **ccp)
891 {
892 char *start = *ccp;
893 char *p;
894 reg_entry *reg;
895
896 #ifdef REGISTER_PREFIX
897 if (*start != REGISTER_PREFIX)
898 return NULL;
899 start++;
900 #endif
901
902 p = start;
903 if (!ISALPHA (*p) || !is_name_beginner (*p))
904 return NULL;
905
906 do
907 p++;
908 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
909
910 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
911
912 if (!reg)
913 return NULL;
914
915 *ccp = p;
916 return reg;
917 }
918
919 /* Return the operand qualifier associated with all uses of REG, or
920 AARCH64_OPND_QLF_NIL if none. AARCH64_OPND_QLF_NIL means either
921 that qualifiers don't apply to REG or that qualifiers are added
922 using suffixes. */
923
924 static aarch64_opnd_qualifier_t
925 inherent_reg_qualifier (const reg_entry *reg)
926 {
927 switch (reg->type)
928 {
929 case REG_TYPE_R_32:
930 case REG_TYPE_SP_32:
931 case REG_TYPE_ZR_32:
932 return AARCH64_OPND_QLF_W;
933
934 case REG_TYPE_R_64:
935 case REG_TYPE_SP_64:
936 case REG_TYPE_ZR_64:
937 return AARCH64_OPND_QLF_X;
938
939 case REG_TYPE_FP_B:
940 case REG_TYPE_FP_H:
941 case REG_TYPE_FP_S:
942 case REG_TYPE_FP_D:
943 case REG_TYPE_FP_Q:
944 return AARCH64_OPND_QLF_S_B + (reg->type - REG_TYPE_FP_B);
945
946 default:
947 return AARCH64_OPND_QLF_NIL;
948 }
949 }
950
951 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
952 return FALSE. */
953 static bool
954 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
955 {
956 return (reg_type_masks[type] & (1 << reg->type)) != 0;
957 }
958
959 /* Try to parse a base or offset register. Allow SVE base and offset
960 registers if REG_TYPE includes SVE registers. Return the register
961 entry on success, setting *QUALIFIER to the register qualifier.
962 Return null otherwise.
963
964 Note that this function does not issue any diagnostics. */
965
966 static const reg_entry *
967 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
968 aarch64_opnd_qualifier_t *qualifier)
969 {
970 char *str = *ccp;
971 const reg_entry *reg = parse_reg (&str);
972
973 if (reg == NULL)
974 return NULL;
975
976 switch (reg->type)
977 {
978 case REG_TYPE_Z:
979 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_Z)) == 0
980 || str[0] != '.')
981 return NULL;
982 switch (TOLOWER (str[1]))
983 {
984 case 's':
985 *qualifier = AARCH64_OPND_QLF_S_S;
986 break;
987 case 'd':
988 *qualifier = AARCH64_OPND_QLF_S_D;
989 break;
990 default:
991 return NULL;
992 }
993 str += 2;
994 break;
995
996 default:
997 if (!aarch64_check_reg_type (reg, REG_TYPE_R_ZR_SP))
998 return NULL;
999 *qualifier = inherent_reg_qualifier (reg);
1000 break;
1001 }
1002
1003 *ccp = str;
1004
1005 return reg;
1006 }
1007
1008 /* Try to parse a base or offset register. Return the register entry
1009 on success, setting *QUALIFIER to the register qualifier. Return null
1010 otherwise.
1011
1012 Note that this function does not issue any diagnostics. */
1013
1014 static const reg_entry *
1015 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
1016 {
1017 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_ZR_SP, qualifier);
1018 }
1019
1020 /* Parse the qualifier of a vector register or vector element of type
1021 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
1022 succeeds; otherwise return FALSE.
1023
1024 Accept only one occurrence of:
1025 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
1026 b h s d q */
1027 static bool
1028 parse_vector_type_for_operand (aarch64_reg_type reg_type,
1029 struct vector_type_el *parsed_type, char **str)
1030 {
1031 char *ptr = *str;
1032 unsigned width;
1033 unsigned element_size;
1034 enum vector_el_type type;
1035
1036 /* skip '.' */
1037 gas_assert (*ptr == '.');
1038 ptr++;
1039
1040 if (reg_type != REG_TYPE_V || !ISDIGIT (*ptr))
1041 {
1042 width = 0;
1043 goto elt_size;
1044 }
1045 width = strtoul (ptr, &ptr, 10);
1046 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
1047 {
1048 first_error_fmt (_("bad size %d in vector width specifier"), width);
1049 return false;
1050 }
1051
1052 elt_size:
1053 switch (TOLOWER (*ptr))
1054 {
1055 case 'b':
1056 type = NT_b;
1057 element_size = 8;
1058 break;
1059 case 'h':
1060 type = NT_h;
1061 element_size = 16;
1062 break;
1063 case 's':
1064 type = NT_s;
1065 element_size = 32;
1066 break;
1067 case 'd':
1068 type = NT_d;
1069 element_size = 64;
1070 break;
1071 case 'q':
1072 if (reg_type != REG_TYPE_V || width == 1)
1073 {
1074 type = NT_q;
1075 element_size = 128;
1076 break;
1077 }
1078 /* fall through. */
1079 default:
1080 if (*ptr != '\0')
1081 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
1082 else
1083 first_error (_("missing element size"));
1084 return false;
1085 }
1086 if (width != 0 && width * element_size != 64
1087 && width * element_size != 128
1088 && !(width == 2 && element_size == 16)
1089 && !(width == 4 && element_size == 8))
1090 {
1091 first_error_fmt (_
1092 ("invalid element size %d and vector size combination %c"),
1093 width, *ptr);
1094 return false;
1095 }
1096 ptr++;
1097
1098 parsed_type->type = type;
1099 parsed_type->width = width;
1100 parsed_type->element_size = element_size;
1101
1102 *str = ptr;
1103
1104 return true;
1105 }
1106
1107 /* *STR contains an SVE zero/merge predication suffix. Parse it into
1108 *PARSED_TYPE and point *STR at the end of the suffix. */
1109
1110 static bool
1111 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
1112 {
1113 char *ptr = *str;
1114
1115 /* Skip '/'. */
1116 gas_assert (*ptr == '/');
1117 ptr++;
1118 switch (TOLOWER (*ptr))
1119 {
1120 case 'z':
1121 parsed_type->type = NT_zero;
1122 break;
1123 case 'm':
1124 parsed_type->type = NT_merge;
1125 break;
1126 default:
1127 if (*ptr != '\0' && *ptr != ',')
1128 first_error_fmt (_("unexpected character `%c' in predication type"),
1129 *ptr);
1130 else
1131 first_error (_("missing predication type"));
1132 return false;
1133 }
1134 parsed_type->width = 0;
1135 *str = ptr + 1;
1136 return true;
1137 }
1138
1139 /* Return true if CH is a valid suffix character for registers of
1140 type TYPE. */
1141
1142 static bool
1143 aarch64_valid_suffix_char_p (aarch64_reg_type type, char ch)
1144 {
1145 switch (type)
1146 {
1147 case REG_TYPE_V:
1148 case REG_TYPE_Z:
1149 case REG_TYPE_ZA:
1150 case REG_TYPE_ZAT:
1151 case REG_TYPE_ZATH:
1152 case REG_TYPE_ZATV:
1153 return ch == '.';
1154
1155 case REG_TYPE_P:
1156 case REG_TYPE_PN:
1157 return ch == '.' || ch == '/';
1158
1159 default:
1160 return false;
1161 }
1162 }
1163
1164 /* Parse an index expression at *STR, storing it in *IMM on success. */
1165
1166 static bool
1167 parse_index_expression (char **str, int64_t *imm)
1168 {
1169 expressionS exp;
1170
1171 aarch64_get_expression (&exp, str, GE_NO_PREFIX, REJECT_ABSENT);
1172 if (exp.X_op != O_constant)
1173 {
1174 first_error (_("constant expression required"));
1175 return false;
1176 }
1177 *imm = exp.X_add_number;
1178 return true;
1179 }
1180
1181 /* Parse a register of the type TYPE.
1182
1183 Return null if the string pointed to by *CCP is not a valid register
1184 name or the parsed register is not of TYPE.
1185
1186 Otherwise return the register, and optionally return the register
1187 shape and element index information in *TYPEINFO.
1188
1189 FLAGS includes PTR_IN_REGLIST if the caller is parsing a register list.
1190
1191 FLAGS includes PTR_FULL_REG if the function should ignore any potential
1192 register index.
1193
1194 FLAGS includes PTR_GOOD_MATCH if we are sufficiently far into parsing
1195 an operand that we can be confident that it is a good match. */
1196
1197 #define PTR_IN_REGLIST (1U << 0)
1198 #define PTR_FULL_REG (1U << 1)
1199 #define PTR_GOOD_MATCH (1U << 2)
1200
1201 static const reg_entry *
1202 parse_typed_reg (char **ccp, aarch64_reg_type type,
1203 struct vector_type_el *typeinfo, unsigned int flags)
1204 {
1205 char *str = *ccp;
1206 bool isalpha = ISALPHA (*str);
1207 const reg_entry *reg = parse_reg (&str);
1208 struct vector_type_el atype;
1209 struct vector_type_el parsetype;
1210 bool is_typed_vecreg = false;
1211 unsigned int err_flags = (flags & PTR_IN_REGLIST) ? SEF_IN_REGLIST : 0;
1212
1213 atype.defined = 0;
1214 atype.type = NT_invtype;
1215 atype.width = -1;
1216 atype.element_size = 0;
1217 atype.index = 0;
1218
1219 if (reg == NULL)
1220 {
1221 if (typeinfo)
1222 *typeinfo = atype;
1223 if (!isalpha && (flags & PTR_IN_REGLIST))
1224 set_fatal_syntax_error (_("syntax error in register list"));
1225 else if (flags & PTR_GOOD_MATCH)
1226 set_fatal_syntax_error (NULL);
1227 else
1228 set_expected_reg_error (type, reg, err_flags);
1229 return NULL;
1230 }
1231
1232 if (! aarch64_check_reg_type (reg, type))
1233 {
1234 DEBUG_TRACE ("reg type check failed");
1235 if (flags & PTR_GOOD_MATCH)
1236 set_fatal_syntax_error (NULL);
1237 else
1238 set_expected_reg_error (type, reg, err_flags);
1239 return NULL;
1240 }
1241 type = reg->type;
1242
1243 if (aarch64_valid_suffix_char_p (reg->type, *str))
1244 {
1245 if (*str == '.')
1246 {
1247 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1248 return NULL;
1249 if ((reg->type == REG_TYPE_ZAT
1250 || reg->type == REG_TYPE_ZATH
1251 || reg->type == REG_TYPE_ZATV)
1252 && reg->number * 8 >= parsetype.element_size)
1253 {
1254 set_syntax_error (_("ZA tile number out of range"));
1255 return NULL;
1256 }
1257 }
1258 else
1259 {
1260 if (!parse_predication_for_operand (&parsetype, &str))
1261 return NULL;
1262 }
1263
1264 /* Register if of the form Vn.[bhsdq]. */
1265 is_typed_vecreg = true;
1266
1267 if (type != REG_TYPE_V)
1268 {
1269 /* The width is always variable; we don't allow an integer width
1270 to be specified. */
1271 gas_assert (parsetype.width == 0);
1272 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1273 }
1274 else if (parsetype.width == 0)
1275 /* Expect index. In the new scheme we cannot have
1276 Vn.[bhsdq] represent a scalar. Therefore any
1277 Vn.[bhsdq] should have an index following it.
1278 Except in reglists of course. */
1279 atype.defined |= NTA_HASINDEX;
1280 else
1281 atype.defined |= NTA_HASTYPE;
1282
1283 atype.type = parsetype.type;
1284 atype.width = parsetype.width;
1285 }
1286
1287 if (!(flags & PTR_FULL_REG) && skip_past_char (&str, '['))
1288 {
1289 /* Reject Sn[index] syntax. */
1290 if (reg->type != REG_TYPE_Z
1291 && reg->type != REG_TYPE_PN
1292 && reg->type != REG_TYPE_ZT0
1293 && !is_typed_vecreg)
1294 {
1295 first_error (_("this type of register can't be indexed"));
1296 return NULL;
1297 }
1298
1299 if (flags & PTR_IN_REGLIST)
1300 {
1301 first_error (_("index not allowed inside register list"));
1302 return NULL;
1303 }
1304
1305 atype.defined |= NTA_HASINDEX;
1306
1307 if (!parse_index_expression (&str, &atype.index))
1308 return NULL;
1309
1310 if (! skip_past_char (&str, ']'))
1311 return NULL;
1312 }
1313 else if (!(flags & PTR_IN_REGLIST) && (atype.defined & NTA_HASINDEX) != 0)
1314 {
1315 /* Indexed vector register expected. */
1316 first_error (_("indexed vector register expected"));
1317 return NULL;
1318 }
1319
1320 /* A vector reg Vn should be typed or indexed. */
1321 if (type == REG_TYPE_V && atype.defined == 0)
1322 {
1323 first_error (_("invalid use of vector register"));
1324 }
1325
1326 if (typeinfo)
1327 *typeinfo = atype;
1328
1329 *ccp = str;
1330
1331 return reg;
1332 }
1333
1334 /* Parse register.
1335
1336 Return the register on success; return null otherwise.
1337
1338 If this is a NEON vector register with additional type information, fill
1339 in the struct pointed to by VECTYPE (if non-NULL).
1340
1341 This parser does not handle register lists. */
1342
1343 static const reg_entry *
1344 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1345 struct vector_type_el *vectype)
1346 {
1347 return parse_typed_reg (ccp, type, vectype, 0);
1348 }
1349
1350 static inline bool
1351 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1352 {
1353 return (e1.type == e2.type
1354 && e1.defined == e2.defined
1355 && e1.width == e2.width
1356 && e1.element_size == e2.element_size
1357 && e1.index == e2.index);
1358 }
1359
1360 /* Return the register number mask for registers of type REG_TYPE. */
1361
1362 static inline int
1363 reg_type_mask (aarch64_reg_type reg_type)
1364 {
1365 return reg_type == REG_TYPE_P ? 15 : 31;
1366 }
1367
1368 /* This function parses a list of vector registers of type TYPE.
1369 On success, it returns the parsed register list information in the
1370 following encoded format:
1371
1372 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1373 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1374
1375 The information of the register shape and/or index is returned in
1376 *VECTYPE.
1377
1378 It returns PARSE_FAIL if the register list is invalid.
1379
1380 The list contains one to four registers.
1381 Each register can be one of:
1382 <Vt>.<T>[<index>]
1383 <Vt>.<T>
1384 All <T> should be identical.
1385 All <index> should be identical.
1386 There are restrictions on <Vt> numbers which are checked later
1387 (by reg_list_valid_p). */
1388
1389 static int
1390 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1391 struct vector_type_el *vectype)
1392 {
1393 char *str = *ccp;
1394 int nb_regs;
1395 struct vector_type_el typeinfo, typeinfo_first;
1396 uint32_t val, val_range, mask;
1397 int in_range;
1398 int ret_val;
1399 bool error = false;
1400 bool expect_index = false;
1401 unsigned int ptr_flags = PTR_IN_REGLIST;
1402
1403 if (*str != '{')
1404 {
1405 set_expected_reglist_error (type, parse_reg (&str));
1406 return PARSE_FAIL;
1407 }
1408 str++;
1409
1410 nb_regs = 0;
1411 typeinfo_first.defined = 0;
1412 typeinfo_first.type = NT_invtype;
1413 typeinfo_first.width = -1;
1414 typeinfo_first.element_size = 0;
1415 typeinfo_first.index = 0;
1416 ret_val = 0;
1417 val = -1u;
1418 val_range = -1u;
1419 in_range = 0;
1420 mask = reg_type_mask (type);
1421 do
1422 {
1423 if (in_range)
1424 {
1425 str++; /* skip over '-' */
1426 val_range = val;
1427 }
1428 const reg_entry *reg = parse_typed_reg (&str, type, &typeinfo,
1429 ptr_flags);
1430 if (!reg)
1431 {
1432 set_first_syntax_error (_("invalid vector register in list"));
1433 error = true;
1434 continue;
1435 }
1436 val = reg->number;
1437 /* reject [bhsd]n */
1438 if (type == REG_TYPE_V && typeinfo.defined == 0)
1439 {
1440 set_first_syntax_error (_("invalid scalar register in list"));
1441 error = true;
1442 continue;
1443 }
1444
1445 if (typeinfo.defined & NTA_HASINDEX)
1446 expect_index = true;
1447
1448 if (in_range)
1449 {
1450 if (val == val_range)
1451 {
1452 set_first_syntax_error
1453 (_("invalid range in vector register list"));
1454 error = true;
1455 }
1456 val_range = (val_range + 1) & mask;
1457 }
1458 else
1459 {
1460 val_range = val;
1461 if (nb_regs == 0)
1462 typeinfo_first = typeinfo;
1463 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1464 {
1465 set_first_syntax_error
1466 (_("type mismatch in vector register list"));
1467 error = true;
1468 }
1469 }
1470 if (! error)
1471 for (;;)
1472 {
1473 ret_val |= val_range << ((5 * nb_regs) & 31);
1474 nb_regs++;
1475 if (val_range == val)
1476 break;
1477 val_range = (val_range + 1) & mask;
1478 }
1479 in_range = 0;
1480 ptr_flags |= PTR_GOOD_MATCH;
1481 }
1482 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1483
1484 skip_whitespace (str);
1485 if (*str != '}')
1486 {
1487 set_first_syntax_error (_("end of vector register list not found"));
1488 error = true;
1489 }
1490 str++;
1491
1492 skip_whitespace (str);
1493
1494 if (expect_index)
1495 {
1496 if (skip_past_char (&str, '['))
1497 {
1498 if (!parse_index_expression (&str, &typeinfo_first.index))
1499 error = true;
1500 if (! skip_past_char (&str, ']'))
1501 error = true;
1502 }
1503 else
1504 {
1505 set_first_syntax_error (_("expected index"));
1506 error = true;
1507 }
1508 }
1509
1510 if (nb_regs > 4)
1511 {
1512 set_first_syntax_error (_("too many registers in vector register list"));
1513 error = true;
1514 }
1515 else if (nb_regs == 0)
1516 {
1517 set_first_syntax_error (_("empty vector register list"));
1518 error = true;
1519 }
1520
1521 *ccp = str;
1522 if (! error)
1523 *vectype = typeinfo_first;
1524
1525 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1526 }
1527
1528 /* Directives: register aliases. */
1529
1530 static reg_entry *
1531 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1532 {
1533 reg_entry *new;
1534 const char *name;
1535
1536 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1537 {
1538 if (new->builtin)
1539 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1540 str);
1541
1542 /* Only warn about a redefinition if it's not defined as the
1543 same register. */
1544 else if (new->number != number || new->type != type)
1545 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1546
1547 return NULL;
1548 }
1549
1550 name = xstrdup (str);
1551 new = XNEW (reg_entry);
1552
1553 new->name = name;
1554 new->number = number;
1555 new->type = type;
1556 new->builtin = false;
1557
1558 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1559
1560 return new;
1561 }
1562
1563 /* Look for the .req directive. This is of the form:
1564
1565 new_register_name .req existing_register_name
1566
1567 If we find one, or if it looks sufficiently like one that we want to
1568 handle any error here, return TRUE. Otherwise return FALSE. */
1569
1570 static bool
1571 create_register_alias (char *newname, char *p)
1572 {
1573 const reg_entry *old;
1574 char *oldname, *nbuf;
1575 size_t nlen;
1576
1577 /* The input scrubber ensures that whitespace after the mnemonic is
1578 collapsed to single spaces. */
1579 oldname = p;
1580 if (!startswith (oldname, " .req "))
1581 return false;
1582
1583 oldname += 6;
1584 if (*oldname == '\0')
1585 return false;
1586
1587 old = str_hash_find (aarch64_reg_hsh, oldname);
1588 if (!old)
1589 {
1590 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1591 return true;
1592 }
1593
1594 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1595 the desired alias name, and p points to its end. If not, then
1596 the desired alias name is in the global original_case_string. */
1597 #ifdef TC_CASE_SENSITIVE
1598 nlen = p - newname;
1599 #else
1600 newname = original_case_string;
1601 nlen = strlen (newname);
1602 #endif
1603
1604 nbuf = xmemdup0 (newname, nlen);
1605
1606 /* Create aliases under the new name as stated; an all-lowercase
1607 version of the new name; and an all-uppercase version of the new
1608 name. */
1609 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1610 {
1611 for (p = nbuf; *p; p++)
1612 *p = TOUPPER (*p);
1613
1614 if (strncmp (nbuf, newname, nlen))
1615 {
1616 /* If this attempt to create an additional alias fails, do not bother
1617 trying to create the all-lower case alias. We will fail and issue
1618 a second, duplicate error message. This situation arises when the
1619 programmer does something like:
1620 foo .req r0
1621 Foo .req r1
1622 The second .req creates the "Foo" alias but then fails to create
1623 the artificial FOO alias because it has already been created by the
1624 first .req. */
1625 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1626 {
1627 free (nbuf);
1628 return true;
1629 }
1630 }
1631
1632 for (p = nbuf; *p; p++)
1633 *p = TOLOWER (*p);
1634
1635 if (strncmp (nbuf, newname, nlen))
1636 insert_reg_alias (nbuf, old->number, old->type);
1637 }
1638
1639 free (nbuf);
1640 return true;
1641 }
1642
1643 /* Should never be called, as .req goes between the alias and the
1644 register name, not at the beginning of the line. */
1645 static void
1646 s_req (int a ATTRIBUTE_UNUSED)
1647 {
1648 as_bad (_("invalid syntax for .req directive"));
1649 }
1650
1651 /* The .unreq directive deletes an alias which was previously defined
1652 by .req. For example:
1653
1654 my_alias .req r11
1655 .unreq my_alias */
1656
1657 static void
1658 s_unreq (int a ATTRIBUTE_UNUSED)
1659 {
1660 char *name;
1661 char saved_char;
1662
1663 name = input_line_pointer;
1664 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1665 saved_char = *input_line_pointer;
1666 *input_line_pointer = 0;
1667
1668 if (!*name)
1669 as_bad (_("invalid syntax for .unreq directive"));
1670 else
1671 {
1672 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1673
1674 if (!reg)
1675 as_bad (_("unknown register alias '%s'"), name);
1676 else if (reg->builtin)
1677 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1678 name);
1679 else
1680 {
1681 char *p;
1682 char *nbuf;
1683
1684 str_hash_delete (aarch64_reg_hsh, name);
1685 free ((char *) reg->name);
1686 free (reg);
1687
1688 /* Also locate the all upper case and all lower case versions.
1689 Do not complain if we cannot find one or the other as it
1690 was probably deleted above. */
1691
1692 nbuf = strdup (name);
1693 for (p = nbuf; *p; p++)
1694 *p = TOUPPER (*p);
1695 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1696 if (reg)
1697 {
1698 str_hash_delete (aarch64_reg_hsh, nbuf);
1699 free ((char *) reg->name);
1700 free (reg);
1701 }
1702
1703 for (p = nbuf; *p; p++)
1704 *p = TOLOWER (*p);
1705 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1706 if (reg)
1707 {
1708 str_hash_delete (aarch64_reg_hsh, nbuf);
1709 free ((char *) reg->name);
1710 free (reg);
1711 }
1712
1713 free (nbuf);
1714 }
1715 }
1716
1717 *input_line_pointer = saved_char;
1718 demand_empty_rest_of_line ();
1719 }
1720
1721 /* Directives: Instruction set selection. */
1722
1723 #if defined OBJ_ELF || defined OBJ_COFF
1724 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1725 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1726 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1727 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1728
1729 /* Create a new mapping symbol for the transition to STATE. */
1730
1731 static void
1732 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1733 {
1734 symbolS *symbolP;
1735 const char *symname;
1736 int type;
1737
1738 switch (state)
1739 {
1740 case MAP_DATA:
1741 symname = "$d";
1742 type = BSF_NO_FLAGS;
1743 break;
1744 case MAP_INSN:
1745 symname = "$x";
1746 type = BSF_NO_FLAGS;
1747 break;
1748 default:
1749 abort ();
1750 }
1751
1752 symbolP = symbol_new (symname, now_seg, frag, value);
1753 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1754
1755 /* Save the mapping symbols for future reference. Also check that
1756 we do not place two mapping symbols at the same offset within a
1757 frag. We'll handle overlap between frags in
1758 check_mapping_symbols.
1759
1760 If .fill or other data filling directive generates zero sized data,
1761 the mapping symbol for the following code will have the same value
1762 as the one generated for the data filling directive. In this case,
1763 we replace the old symbol with the new one at the same address. */
1764 if (value == 0)
1765 {
1766 if (frag->tc_frag_data.first_map != NULL)
1767 {
1768 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1769 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1770 &symbol_lastP);
1771 }
1772 frag->tc_frag_data.first_map = symbolP;
1773 }
1774 if (frag->tc_frag_data.last_map != NULL)
1775 {
1776 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1777 S_GET_VALUE (symbolP));
1778 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1779 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1780 &symbol_lastP);
1781 }
1782 frag->tc_frag_data.last_map = symbolP;
1783 }
1784
1785 /* We must sometimes convert a region marked as code to data during
1786 code alignment, if an odd number of bytes have to be padded. The
1787 code mapping symbol is pushed to an aligned address. */
1788
1789 static void
1790 insert_data_mapping_symbol (enum mstate state,
1791 valueT value, fragS * frag, offsetT bytes)
1792 {
1793 /* If there was already a mapping symbol, remove it. */
1794 if (frag->tc_frag_data.last_map != NULL
1795 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1796 frag->fr_address + value)
1797 {
1798 symbolS *symp = frag->tc_frag_data.last_map;
1799
1800 if (value == 0)
1801 {
1802 know (frag->tc_frag_data.first_map == symp);
1803 frag->tc_frag_data.first_map = NULL;
1804 }
1805 frag->tc_frag_data.last_map = NULL;
1806 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1807 }
1808
1809 make_mapping_symbol (MAP_DATA, value, frag);
1810 make_mapping_symbol (state, value + bytes, frag);
1811 }
1812
1813 static void mapping_state_2 (enum mstate state, int max_chars);
1814
1815 /* Set the mapping state to STATE. Only call this when about to
1816 emit some STATE bytes to the file. */
1817
1818 void
1819 mapping_state (enum mstate state)
1820 {
1821 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1822
1823 if (state == MAP_INSN)
1824 /* AArch64 instructions require 4-byte alignment. When emitting
1825 instructions into any section, record the appropriate section
1826 alignment. */
1827 record_alignment (now_seg, 2);
1828
1829 if (mapstate == state)
1830 /* The mapping symbol has already been emitted.
1831 There is nothing else to do. */
1832 return;
1833
1834 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1835 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1836 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1837 evaluated later in the next else. */
1838 return;
1839 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1840 {
1841 /* Only add the symbol if the offset is > 0:
1842 if we're at the first frag, check it's size > 0;
1843 if we're not at the first frag, then for sure
1844 the offset is > 0. */
1845 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1846 const int add_symbol = (frag_now != frag_first)
1847 || (frag_now_fix () > 0);
1848
1849 if (add_symbol)
1850 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1851 }
1852 #undef TRANSITION
1853
1854 mapping_state_2 (state, 0);
1855 }
1856
1857 /* Same as mapping_state, but MAX_CHARS bytes have already been
1858 allocated. Put the mapping symbol that far back. */
1859
1860 static void
1861 mapping_state_2 (enum mstate state, int max_chars)
1862 {
1863 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1864
1865 if (!SEG_NORMAL (now_seg))
1866 return;
1867
1868 if (mapstate == state)
1869 /* The mapping symbol has already been emitted.
1870 There is nothing else to do. */
1871 return;
1872
1873 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1874 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1875 }
1876 #else
1877 #define mapping_state(x) /* nothing */
1878 #define mapping_state_2(x, y) /* nothing */
1879 #endif
1880
1881 /* Directives: sectioning and alignment. */
1882
1883 static void
1884 s_bss (int ignore ATTRIBUTE_UNUSED)
1885 {
1886 /* We don't support putting frags in the BSS segment, we fake it by
1887 marking in_bss, then looking at s_skip for clues. */
1888 subseg_set (bss_section, 0);
1889 demand_empty_rest_of_line ();
1890 mapping_state (MAP_DATA);
1891 }
1892
1893 static void
1894 s_even (int ignore ATTRIBUTE_UNUSED)
1895 {
1896 /* Never make frag if expect extra pass. */
1897 if (!need_pass_2)
1898 frag_align (1, 0, 0);
1899
1900 record_alignment (now_seg, 1);
1901
1902 demand_empty_rest_of_line ();
1903 }
1904
1905 /* Directives: Literal pools. */
1906
1907 static literal_pool *
1908 find_literal_pool (int size)
1909 {
1910 literal_pool *pool;
1911
1912 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1913 {
1914 if (pool->section == now_seg
1915 && pool->sub_section == now_subseg && pool->size == size)
1916 break;
1917 }
1918
1919 return pool;
1920 }
1921
1922 static literal_pool *
1923 find_or_make_literal_pool (int size)
1924 {
1925 /* Next literal pool ID number. */
1926 static unsigned int latest_pool_num = 1;
1927 literal_pool *pool;
1928
1929 pool = find_literal_pool (size);
1930
1931 if (pool == NULL)
1932 {
1933 /* Create a new pool. */
1934 pool = XNEW (literal_pool);
1935 if (!pool)
1936 return NULL;
1937
1938 /* Currently we always put the literal pool in the current text
1939 section. If we were generating "small" model code where we
1940 knew that all code and initialised data was within 1MB then
1941 we could output literals to mergeable, read-only data
1942 sections. */
1943
1944 pool->next_free_entry = 0;
1945 pool->section = now_seg;
1946 pool->sub_section = now_subseg;
1947 pool->size = size;
1948 pool->next = list_of_pools;
1949 pool->symbol = NULL;
1950
1951 /* Add it to the list. */
1952 list_of_pools = pool;
1953 }
1954
1955 /* New pools, and emptied pools, will have a NULL symbol. */
1956 if (pool->symbol == NULL)
1957 {
1958 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1959 &zero_address_frag, 0);
1960 pool->id = latest_pool_num++;
1961 }
1962
1963 /* Done. */
1964 return pool;
1965 }
1966
1967 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1968 Return TRUE on success, otherwise return FALSE. */
1969 static bool
1970 add_to_lit_pool (expressionS *exp, int size)
1971 {
1972 literal_pool *pool;
1973 unsigned int entry;
1974
1975 pool = find_or_make_literal_pool (size);
1976
1977 /* Check if this literal value is already in the pool. */
1978 for (entry = 0; entry < pool->next_free_entry; entry++)
1979 {
1980 expressionS * litexp = & pool->literals[entry].exp;
1981
1982 if ((litexp->X_op == exp->X_op)
1983 && (exp->X_op == O_constant)
1984 && (litexp->X_add_number == exp->X_add_number)
1985 && (litexp->X_unsigned == exp->X_unsigned))
1986 break;
1987
1988 if ((litexp->X_op == exp->X_op)
1989 && (exp->X_op == O_symbol)
1990 && (litexp->X_add_number == exp->X_add_number)
1991 && (litexp->X_add_symbol == exp->X_add_symbol)
1992 && (litexp->X_op_symbol == exp->X_op_symbol))
1993 break;
1994 }
1995
1996 /* Do we need to create a new entry? */
1997 if (entry == pool->next_free_entry)
1998 {
1999 if (entry >= MAX_LITERAL_POOL_SIZE)
2000 {
2001 set_syntax_error (_("literal pool overflow"));
2002 return false;
2003 }
2004
2005 pool->literals[entry].exp = *exp;
2006 pool->next_free_entry += 1;
2007 if (exp->X_op == O_big)
2008 {
2009 /* PR 16688: Bignums are held in a single global array. We must
2010 copy and preserve that value now, before it is overwritten. */
2011 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
2012 exp->X_add_number);
2013 memcpy (pool->literals[entry].bignum, generic_bignum,
2014 CHARS_PER_LITTLENUM * exp->X_add_number);
2015 }
2016 else
2017 pool->literals[entry].bignum = NULL;
2018 }
2019
2020 exp->X_op = O_symbol;
2021 exp->X_add_number = ((int) entry) * size;
2022 exp->X_add_symbol = pool->symbol;
2023
2024 return true;
2025 }
2026
2027 /* Can't use symbol_new here, so have to create a symbol and then at
2028 a later date assign it a value. That's what these functions do. */
2029
2030 static void
2031 symbol_locate (symbolS * symbolP,
2032 const char *name,/* It is copied, the caller can modify. */
2033 segT segment, /* Segment identifier (SEG_<something>). */
2034 valueT valu, /* Symbol value. */
2035 fragS * frag) /* Associated fragment. */
2036 {
2037 size_t name_length;
2038 char *preserved_copy_of_name;
2039
2040 name_length = strlen (name) + 1; /* +1 for \0. */
2041 obstack_grow (&notes, name, name_length);
2042 preserved_copy_of_name = obstack_finish (&notes);
2043
2044 #ifdef tc_canonicalize_symbol_name
2045 preserved_copy_of_name =
2046 tc_canonicalize_symbol_name (preserved_copy_of_name);
2047 #endif
2048
2049 S_SET_NAME (symbolP, preserved_copy_of_name);
2050
2051 S_SET_SEGMENT (symbolP, segment);
2052 S_SET_VALUE (symbolP, valu);
2053 symbol_clear_list_pointers (symbolP);
2054
2055 symbol_set_frag (symbolP, frag);
2056
2057 /* Link to end of symbol chain. */
2058 {
2059 extern int symbol_table_frozen;
2060
2061 if (symbol_table_frozen)
2062 abort ();
2063 }
2064
2065 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
2066
2067 obj_symbol_new_hook (symbolP);
2068
2069 #ifdef tc_symbol_new_hook
2070 tc_symbol_new_hook (symbolP);
2071 #endif
2072
2073 #ifdef DEBUG_SYMS
2074 verify_symbol_chain (symbol_rootP, symbol_lastP);
2075 #endif /* DEBUG_SYMS */
2076 }
2077
2078
2079 static void
2080 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2081 {
2082 unsigned int entry;
2083 literal_pool *pool;
2084 char sym_name[20];
2085 int align;
2086
2087 for (align = 2; align <= 4; align++)
2088 {
2089 int size = 1 << align;
2090
2091 pool = find_literal_pool (size);
2092 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
2093 continue;
2094
2095 /* Align pool as you have word accesses.
2096 Only make a frag if we have to. */
2097 if (!need_pass_2)
2098 frag_align (align, 0, 0);
2099
2100 mapping_state (MAP_DATA);
2101
2102 record_alignment (now_seg, align);
2103
2104 sprintf (sym_name, "$$lit_\002%x", pool->id);
2105
2106 symbol_locate (pool->symbol, sym_name, now_seg,
2107 (valueT) frag_now_fix (), frag_now);
2108 symbol_table_insert (pool->symbol);
2109
2110 for (entry = 0; entry < pool->next_free_entry; entry++)
2111 {
2112 expressionS * exp = & pool->literals[entry].exp;
2113
2114 if (exp->X_op == O_big)
2115 {
2116 /* PR 16688: Restore the global bignum value. */
2117 gas_assert (pool->literals[entry].bignum != NULL);
2118 memcpy (generic_bignum, pool->literals[entry].bignum,
2119 CHARS_PER_LITTLENUM * exp->X_add_number);
2120 }
2121
2122 /* First output the expression in the instruction to the pool. */
2123 emit_expr (exp, size); /* .word|.xword */
2124
2125 if (exp->X_op == O_big)
2126 {
2127 free (pool->literals[entry].bignum);
2128 pool->literals[entry].bignum = NULL;
2129 }
2130 }
2131
2132 /* Mark the pool as empty. */
2133 pool->next_free_entry = 0;
2134 pool->symbol = NULL;
2135 }
2136 }
2137
2138 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2139 /* Forward declarations for functions below, in the MD interface
2140 section. */
2141 static struct reloc_table_entry * find_reloc_table_entry (char **);
2142
2143 /* Directives: Data. */
2144 /* N.B. the support for relocation suffix in this directive needs to be
2145 implemented properly. */
2146
2147 static void
2148 s_aarch64_cons (int nbytes)
2149 {
2150 expressionS exp;
2151
2152 #ifdef md_flush_pending_output
2153 md_flush_pending_output ();
2154 #endif
2155
2156 if (is_it_end_of_statement ())
2157 {
2158 demand_empty_rest_of_line ();
2159 return;
2160 }
2161
2162 #ifdef md_cons_align
2163 md_cons_align (nbytes);
2164 #endif
2165
2166 mapping_state (MAP_DATA);
2167 do
2168 {
2169 struct reloc_table_entry *reloc;
2170
2171 expression (&exp);
2172
2173 if (exp.X_op != O_symbol)
2174 emit_expr (&exp, (unsigned int) nbytes);
2175 else
2176 {
2177 skip_past_char (&input_line_pointer, '#');
2178 if (skip_past_char (&input_line_pointer, ':'))
2179 {
2180 reloc = find_reloc_table_entry (&input_line_pointer);
2181 if (reloc == NULL)
2182 as_bad (_("unrecognized relocation suffix"));
2183 else
2184 as_bad (_("unimplemented relocation suffix"));
2185 ignore_rest_of_line ();
2186 return;
2187 }
2188 else
2189 emit_expr (&exp, (unsigned int) nbytes);
2190 }
2191 }
2192 while (*input_line_pointer++ == ',');
2193
2194 /* Put terminator back into stream. */
2195 input_line_pointer--;
2196 demand_empty_rest_of_line ();
2197 }
2198 #endif
2199
2200 #ifdef OBJ_ELF
2201 /* Forward declarations for functions below, in the MD interface
2202 section. */
2203 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
2204
2205 /* Mark symbol that it follows a variant PCS convention. */
2206
2207 static void
2208 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2209 {
2210 char *name;
2211 char c;
2212 symbolS *sym;
2213 asymbol *bfdsym;
2214 elf_symbol_type *elfsym;
2215
2216 c = get_symbol_name (&name);
2217 if (!*name)
2218 as_bad (_("Missing symbol name in directive"));
2219 sym = symbol_find_or_make (name);
2220 restore_line_pointer (c);
2221 demand_empty_rest_of_line ();
2222 bfdsym = symbol_get_bfdsym (sym);
2223 elfsym = elf_symbol_from (bfdsym);
2224 gas_assert (elfsym);
2225 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2226 }
2227 #endif /* OBJ_ELF */
2228
2229 /* Output a 32-bit word, but mark as an instruction. */
2230
2231 static void
2232 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2233 {
2234 expressionS exp;
2235 unsigned n = 0;
2236
2237 #ifdef md_flush_pending_output
2238 md_flush_pending_output ();
2239 #endif
2240
2241 if (is_it_end_of_statement ())
2242 {
2243 demand_empty_rest_of_line ();
2244 return;
2245 }
2246
2247 /* Sections are assumed to start aligned. In executable section, there is no
2248 MAP_DATA symbol pending. So we only align the address during
2249 MAP_DATA --> MAP_INSN transition.
2250 For other sections, this is not guaranteed. */
2251 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2252 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2253 frag_align_code (2, 0);
2254
2255 #ifdef OBJ_ELF
2256 mapping_state (MAP_INSN);
2257 #endif
2258
2259 do
2260 {
2261 expression (&exp);
2262 if (exp.X_op != O_constant)
2263 {
2264 as_bad (_("constant expression required"));
2265 ignore_rest_of_line ();
2266 return;
2267 }
2268
2269 if (target_big_endian)
2270 {
2271 unsigned int val = exp.X_add_number;
2272 exp.X_add_number = SWAP_32 (val);
2273 }
2274 emit_expr (&exp, INSN_SIZE);
2275 ++n;
2276 }
2277 while (*input_line_pointer++ == ',');
2278
2279 dwarf2_emit_insn (n * INSN_SIZE);
2280
2281 /* Put terminator back into stream. */
2282 input_line_pointer--;
2283 demand_empty_rest_of_line ();
2284 }
2285
2286 static void
2287 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2288 {
2289 demand_empty_rest_of_line ();
2290 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2291 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2292 }
2293
2294 #ifdef OBJ_ELF
2295 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2296
2297 static void
2298 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2299 {
2300 expressionS exp;
2301
2302 expression (&exp);
2303 frag_grow (4);
2304 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2305 BFD_RELOC_AARCH64_TLSDESC_ADD);
2306
2307 demand_empty_rest_of_line ();
2308 }
2309
2310 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2311
2312 static void
2313 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2314 {
2315 expressionS exp;
2316
2317 /* Since we're just labelling the code, there's no need to define a
2318 mapping symbol. */
2319 expression (&exp);
2320 /* Make sure there is enough room in this frag for the following
2321 blr. This trick only works if the blr follows immediately after
2322 the .tlsdesc directive. */
2323 frag_grow (4);
2324 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2325 BFD_RELOC_AARCH64_TLSDESC_CALL);
2326
2327 demand_empty_rest_of_line ();
2328 }
2329
2330 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2331
2332 static void
2333 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2334 {
2335 expressionS exp;
2336
2337 expression (&exp);
2338 frag_grow (4);
2339 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2340 BFD_RELOC_AARCH64_TLSDESC_LDR);
2341
2342 demand_empty_rest_of_line ();
2343 }
2344 #endif /* OBJ_ELF */
2345
2346 #ifdef TE_PE
2347 static void
2348 s_secrel (int dummy ATTRIBUTE_UNUSED)
2349 {
2350 expressionS exp;
2351
2352 do
2353 {
2354 expression (&exp);
2355 if (exp.X_op == O_symbol)
2356 exp.X_op = O_secrel;
2357
2358 emit_expr (&exp, 4);
2359 }
2360 while (*input_line_pointer++ == ',');
2361
2362 input_line_pointer--;
2363 demand_empty_rest_of_line ();
2364 }
2365
2366 void
2367 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
2368 {
2369 expressionS exp;
2370
2371 exp.X_op = O_secrel;
2372 exp.X_add_symbol = symbol;
2373 exp.X_add_number = 0;
2374 emit_expr (&exp, size);
2375 }
2376
2377 static void
2378 s_secidx (int dummy ATTRIBUTE_UNUSED)
2379 {
2380 expressionS exp;
2381
2382 do
2383 {
2384 expression (&exp);
2385 if (exp.X_op == O_symbol)
2386 exp.X_op = O_secidx;
2387
2388 emit_expr (&exp, 2);
2389 }
2390 while (*input_line_pointer++ == ',');
2391
2392 input_line_pointer--;
2393 demand_empty_rest_of_line ();
2394 }
2395 #endif /* TE_PE */
2396
2397 static void s_aarch64_arch (int);
2398 static void s_aarch64_cpu (int);
2399 static void s_aarch64_arch_extension (int);
2400
2401 /* This table describes all the machine specific pseudo-ops the assembler
2402 has to support. The fields are:
2403 pseudo-op name without dot
2404 function to call to execute this pseudo-op
2405 Integer arg to pass to the function. */
2406
2407 const pseudo_typeS md_pseudo_table[] = {
2408 /* Never called because '.req' does not start a line. */
2409 {"req", s_req, 0},
2410 {"unreq", s_unreq, 0},
2411 {"bss", s_bss, 0},
2412 {"even", s_even, 0},
2413 {"ltorg", s_ltorg, 0},
2414 {"pool", s_ltorg, 0},
2415 {"cpu", s_aarch64_cpu, 0},
2416 {"arch", s_aarch64_arch, 0},
2417 {"arch_extension", s_aarch64_arch_extension, 0},
2418 {"inst", s_aarch64_inst, 0},
2419 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2420 #ifdef OBJ_ELF
2421 {"tlsdescadd", s_tlsdescadd, 0},
2422 {"tlsdesccall", s_tlsdesccall, 0},
2423 {"tlsdescldr", s_tlsdescldr, 0},
2424 {"variant_pcs", s_variant_pcs, 0},
2425 #endif
2426 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2427 {"word", s_aarch64_cons, 4},
2428 {"long", s_aarch64_cons, 4},
2429 {"xword", s_aarch64_cons, 8},
2430 {"dword", s_aarch64_cons, 8},
2431 #endif
2432 #ifdef TE_PE
2433 {"secrel32", s_secrel, 0},
2434 {"secidx", s_secidx, 0},
2435 #endif
2436 {"float16", float_cons, 'h'},
2437 {"bfloat16", float_cons, 'b'},
2438 {0, 0, 0}
2439 };
2440 \f
2441
2442 /* Check whether STR points to a register name followed by a comma or the
2443 end of line; REG_TYPE indicates which register types are checked
2444 against. Return TRUE if STR is such a register name; otherwise return
2445 FALSE. The function does not intend to produce any diagnostics, but since
2446 the register parser aarch64_reg_parse, which is called by this function,
2447 does produce diagnostics, we call clear_error to clear any diagnostics
2448 that may be generated by aarch64_reg_parse.
2449 Also, the function returns FALSE directly if there is any user error
2450 present at the function entry. This prevents the existing diagnostics
2451 state from being spoiled.
2452 The function currently serves parse_constant_immediate and
2453 parse_big_immediate only. */
2454 static bool
2455 reg_name_p (char *str, aarch64_reg_type reg_type)
2456 {
2457 const reg_entry *reg;
2458
2459 /* Prevent the diagnostics state from being spoiled. */
2460 if (error_p ())
2461 return false;
2462
2463 reg = aarch64_reg_parse (&str, reg_type, NULL);
2464
2465 /* Clear the parsing error that may be set by the reg parser. */
2466 clear_error ();
2467
2468 if (!reg)
2469 return false;
2470
2471 skip_whitespace (str);
2472 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2473 return true;
2474
2475 return false;
2476 }
2477
2478 /* Parser functions used exclusively in instruction operands. */
2479
2480 /* Parse an immediate expression which may not be constant.
2481
2482 To prevent the expression parser from pushing a register name
2483 into the symbol table as an undefined symbol, firstly a check is
2484 done to find out whether STR is a register of type REG_TYPE followed
2485 by a comma or the end of line. Return FALSE if STR is such a string. */
2486
2487 static bool
2488 parse_immediate_expression (char **str, expressionS *exp,
2489 aarch64_reg_type reg_type)
2490 {
2491 if (reg_name_p (*str, reg_type))
2492 {
2493 set_recoverable_error (_("immediate operand required"));
2494 return false;
2495 }
2496
2497 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2498
2499 if (exp->X_op == O_absent)
2500 {
2501 set_fatal_syntax_error (_("missing immediate expression"));
2502 return false;
2503 }
2504
2505 return true;
2506 }
2507
2508 /* Constant immediate-value read function for use in insn parsing.
2509 STR points to the beginning of the immediate (with the optional
2510 leading #); *VAL receives the value. REG_TYPE says which register
2511 names should be treated as registers rather than as symbolic immediates.
2512
2513 Return TRUE on success; otherwise return FALSE. */
2514
2515 static bool
2516 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2517 {
2518 expressionS exp;
2519
2520 if (! parse_immediate_expression (str, &exp, reg_type))
2521 return false;
2522
2523 if (exp.X_op != O_constant)
2524 {
2525 set_syntax_error (_("constant expression required"));
2526 return false;
2527 }
2528
2529 *val = exp.X_add_number;
2530 return true;
2531 }
2532
2533 static uint32_t
2534 encode_imm_float_bits (uint32_t imm)
2535 {
2536 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2537 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2538 }
2539
2540 /* Return TRUE if the single-precision floating-point value encoded in IMM
2541 can be expressed in the AArch64 8-bit signed floating-point format with
2542 3-bit exponent and normalized 4 bits of precision; in other words, the
2543 floating-point value must be expressable as
2544 (+/-) n / 16 * power (2, r)
2545 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2546
2547 static bool
2548 aarch64_imm_float_p (uint32_t imm)
2549 {
2550 /* If a single-precision floating-point value has the following bit
2551 pattern, it can be expressed in the AArch64 8-bit floating-point
2552 format:
2553
2554 3 32222222 2221111111111
2555 1 09876543 21098765432109876543210
2556 n Eeeeeexx xxxx0000000000000000000
2557
2558 where n, e and each x are either 0 or 1 independently, with
2559 E == ~ e. */
2560
2561 uint32_t pattern;
2562
2563 /* Prepare the pattern for 'Eeeeee'. */
2564 if (((imm >> 30) & 0x1) == 0)
2565 pattern = 0x3e000000;
2566 else
2567 pattern = 0x40000000;
2568
2569 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2570 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2571 }
2572
2573 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2574 as an IEEE float without any loss of precision. Store the value in
2575 *FPWORD if so. */
2576
2577 static bool
2578 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2579 {
2580 /* If a double-precision floating-point value has the following bit
2581 pattern, it can be expressed in a float:
2582
2583 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2584 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2585 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2586
2587 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2588 if Eeee_eeee != 1111_1111
2589
2590 where n, e, s and S are either 0 or 1 independently and where ~ is the
2591 inverse of E. */
2592
2593 uint32_t pattern;
2594 uint32_t high32 = imm >> 32;
2595 uint32_t low32 = imm;
2596
2597 /* Lower 29 bits need to be 0s. */
2598 if ((imm & 0x1fffffff) != 0)
2599 return false;
2600
2601 /* Prepare the pattern for 'Eeeeeeeee'. */
2602 if (((high32 >> 30) & 0x1) == 0)
2603 pattern = 0x38000000;
2604 else
2605 pattern = 0x40000000;
2606
2607 /* Check E~~~. */
2608 if ((high32 & 0x78000000) != pattern)
2609 return false;
2610
2611 /* Check Eeee_eeee != 1111_1111. */
2612 if ((high32 & 0x7ff00000) == 0x47f00000)
2613 return false;
2614
2615 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2616 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2617 | (low32 >> 29)); /* 3 S bits. */
2618 return true;
2619 }
2620
2621 /* Return true if we should treat OPERAND as a double-precision
2622 floating-point operand rather than a single-precision one. */
2623 static bool
2624 double_precision_operand_p (const aarch64_opnd_info *operand)
2625 {
2626 /* Check for unsuffixed SVE registers, which are allowed
2627 for LDR and STR but not in instructions that require an
2628 immediate. We get better error messages if we arbitrarily
2629 pick one size, parse the immediate normally, and then
2630 report the match failure in the normal way. */
2631 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2632 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2633 }
2634
2635 /* Parse a floating-point immediate. Return TRUE on success and return the
2636 value in *IMMED in the format of IEEE754 single-precision encoding.
2637 *CCP points to the start of the string; DP_P is TRUE when the immediate
2638 is expected to be in double-precision (N.B. this only matters when
2639 hexadecimal representation is involved). REG_TYPE says which register
2640 names should be treated as registers rather than as symbolic immediates.
2641
2642 This routine accepts any IEEE float; it is up to the callers to reject
2643 invalid ones. */
2644
2645 static bool
2646 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2647 aarch64_reg_type reg_type)
2648 {
2649 char *str = *ccp;
2650 char *fpnum;
2651 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2652 int64_t val = 0;
2653 unsigned fpword = 0;
2654 bool hex_p = false;
2655
2656 skip_past_char (&str, '#');
2657
2658 fpnum = str;
2659 skip_whitespace (fpnum);
2660
2661 if (startswith (fpnum, "0x"))
2662 {
2663 /* Support the hexadecimal representation of the IEEE754 encoding.
2664 Double-precision is expected when DP_P is TRUE, otherwise the
2665 representation should be in single-precision. */
2666 if (! parse_constant_immediate (&str, &val, reg_type))
2667 goto invalid_fp;
2668
2669 if (dp_p)
2670 {
2671 if (!can_convert_double_to_float (val, &fpword))
2672 goto invalid_fp;
2673 }
2674 else if ((uint64_t) val > 0xffffffff)
2675 goto invalid_fp;
2676 else
2677 fpword = val;
2678
2679 hex_p = true;
2680 }
2681 else if (reg_name_p (str, reg_type))
2682 {
2683 set_recoverable_error (_("immediate operand required"));
2684 return false;
2685 }
2686
2687 if (! hex_p)
2688 {
2689 int i;
2690
2691 if ((str = atof_ieee (str, 's', words)) == NULL)
2692 goto invalid_fp;
2693
2694 /* Our FP word must be 32 bits (single-precision FP). */
2695 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2696 {
2697 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2698 fpword |= words[i];
2699 }
2700 }
2701
2702 *immed = fpword;
2703 *ccp = str;
2704 return true;
2705
2706 invalid_fp:
2707 set_fatal_syntax_error (_("invalid floating-point constant"));
2708 return false;
2709 }
2710
2711 /* Less-generic immediate-value read function with the possibility of loading
2712 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2713 instructions.
2714
2715 To prevent the expression parser from pushing a register name into the
2716 symbol table as an undefined symbol, a check is firstly done to find
2717 out whether STR is a register of type REG_TYPE followed by a comma or
2718 the end of line. Return FALSE if STR is such a register. */
2719
2720 static bool
2721 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2722 {
2723 char *ptr = *str;
2724
2725 if (reg_name_p (ptr, reg_type))
2726 {
2727 set_syntax_error (_("immediate operand required"));
2728 return false;
2729 }
2730
2731 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2732
2733 if (inst.reloc.exp.X_op == O_constant)
2734 *imm = inst.reloc.exp.X_add_number;
2735
2736 *str = ptr;
2737
2738 return true;
2739 }
2740
2741 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2742 if NEED_LIBOPCODES is non-zero, the fixup will need
2743 assistance from the libopcodes. */
2744
2745 static inline void
2746 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2747 const aarch64_opnd_info *operand,
2748 int need_libopcodes_p)
2749 {
2750 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2751 reloc->opnd = operand->type;
2752 if (need_libopcodes_p)
2753 reloc->need_libopcodes_p = 1;
2754 };
2755
2756 /* Return TRUE if the instruction needs to be fixed up later internally by
2757 the GAS; otherwise return FALSE. */
2758
2759 static inline bool
2760 aarch64_gas_internal_fixup_p (void)
2761 {
2762 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2763 }
2764
2765 /* Assign the immediate value to the relevant field in *OPERAND if
2766 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2767 needs an internal fixup in a later stage.
2768 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2769 IMM.VALUE that may get assigned with the constant. */
2770 static inline void
2771 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2772 aarch64_opnd_info *operand,
2773 int addr_off_p,
2774 int need_libopcodes_p,
2775 int skip_p)
2776 {
2777 if (reloc->exp.X_op == O_constant)
2778 {
2779 if (addr_off_p)
2780 operand->addr.offset.imm = reloc->exp.X_add_number;
2781 else
2782 operand->imm.value = reloc->exp.X_add_number;
2783 reloc->type = BFD_RELOC_UNUSED;
2784 }
2785 else
2786 {
2787 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2788 /* Tell libopcodes to ignore this operand or not. This is helpful
2789 when one of the operands needs to be fixed up later but we need
2790 libopcodes to check the other operands. */
2791 operand->skip = skip_p;
2792 }
2793 }
2794
2795 /* Relocation modifiers. Each entry in the table contains the textual
2796 name for the relocation which may be placed before a symbol used as
2797 a load/store offset, or add immediate. It must be surrounded by a
2798 leading and trailing colon, for example:
2799
2800 ldr x0, [x1, #:rello:varsym]
2801 add x0, x1, #:rello:varsym */
2802
2803 struct reloc_table_entry
2804 {
2805 const char *name;
2806 int pc_rel;
2807 bfd_reloc_code_real_type adr_type;
2808 bfd_reloc_code_real_type adrp_type;
2809 bfd_reloc_code_real_type movw_type;
2810 bfd_reloc_code_real_type add_type;
2811 bfd_reloc_code_real_type ldst_type;
2812 bfd_reloc_code_real_type ld_literal_type;
2813 };
2814
2815 static struct reloc_table_entry reloc_table[] =
2816 {
2817 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2818 {"lo12", 0,
2819 0, /* adr_type */
2820 0,
2821 0,
2822 BFD_RELOC_AARCH64_ADD_LO12,
2823 BFD_RELOC_AARCH64_LDST_LO12,
2824 0},
2825
2826 /* Higher 21 bits of pc-relative page offset: ADRP */
2827 {"pg_hi21", 1,
2828 0, /* adr_type */
2829 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2830 0,
2831 0,
2832 0,
2833 0},
2834
2835 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2836 {"pg_hi21_nc", 1,
2837 0, /* adr_type */
2838 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2839 0,
2840 0,
2841 0,
2842 0},
2843
2844 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2845 {"abs_g0", 0,
2846 0, /* adr_type */
2847 0,
2848 BFD_RELOC_AARCH64_MOVW_G0,
2849 0,
2850 0,
2851 0},
2852
2853 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2854 {"abs_g0_s", 0,
2855 0, /* adr_type */
2856 0,
2857 BFD_RELOC_AARCH64_MOVW_G0_S,
2858 0,
2859 0,
2860 0},
2861
2862 /* Less significant bits 0-15 of address/value: MOVK, no check */
2863 {"abs_g0_nc", 0,
2864 0, /* adr_type */
2865 0,
2866 BFD_RELOC_AARCH64_MOVW_G0_NC,
2867 0,
2868 0,
2869 0},
2870
2871 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2872 {"abs_g1", 0,
2873 0, /* adr_type */
2874 0,
2875 BFD_RELOC_AARCH64_MOVW_G1,
2876 0,
2877 0,
2878 0},
2879
2880 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2881 {"abs_g1_s", 0,
2882 0, /* adr_type */
2883 0,
2884 BFD_RELOC_AARCH64_MOVW_G1_S,
2885 0,
2886 0,
2887 0},
2888
2889 /* Less significant bits 16-31 of address/value: MOVK, no check */
2890 {"abs_g1_nc", 0,
2891 0, /* adr_type */
2892 0,
2893 BFD_RELOC_AARCH64_MOVW_G1_NC,
2894 0,
2895 0,
2896 0},
2897
2898 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2899 {"abs_g2", 0,
2900 0, /* adr_type */
2901 0,
2902 BFD_RELOC_AARCH64_MOVW_G2,
2903 0,
2904 0,
2905 0},
2906
2907 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2908 {"abs_g2_s", 0,
2909 0, /* adr_type */
2910 0,
2911 BFD_RELOC_AARCH64_MOVW_G2_S,
2912 0,
2913 0,
2914 0},
2915
2916 /* Less significant bits 32-47 of address/value: MOVK, no check */
2917 {"abs_g2_nc", 0,
2918 0, /* adr_type */
2919 0,
2920 BFD_RELOC_AARCH64_MOVW_G2_NC,
2921 0,
2922 0,
2923 0},
2924
2925 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2926 {"abs_g3", 0,
2927 0, /* adr_type */
2928 0,
2929 BFD_RELOC_AARCH64_MOVW_G3,
2930 0,
2931 0,
2932 0},
2933
2934 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2935 {"prel_g0", 1,
2936 0, /* adr_type */
2937 0,
2938 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2939 0,
2940 0,
2941 0},
2942
2943 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2944 {"prel_g0_nc", 1,
2945 0, /* adr_type */
2946 0,
2947 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2948 0,
2949 0,
2950 0},
2951
2952 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2953 {"prel_g1", 1,
2954 0, /* adr_type */
2955 0,
2956 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2957 0,
2958 0,
2959 0},
2960
2961 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2962 {"prel_g1_nc", 1,
2963 0, /* adr_type */
2964 0,
2965 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2966 0,
2967 0,
2968 0},
2969
2970 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2971 {"prel_g2", 1,
2972 0, /* adr_type */
2973 0,
2974 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2975 0,
2976 0,
2977 0},
2978
2979 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2980 {"prel_g2_nc", 1,
2981 0, /* adr_type */
2982 0,
2983 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2984 0,
2985 0,
2986 0},
2987
2988 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2989 {"prel_g3", 1,
2990 0, /* adr_type */
2991 0,
2992 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2993 0,
2994 0,
2995 0},
2996
2997 /* Get to the page containing GOT entry for a symbol. */
2998 {"got", 1,
2999 0, /* adr_type */
3000 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
3001 0,
3002 0,
3003 0,
3004 BFD_RELOC_AARCH64_GOT_LD_PREL19},
3005
3006 /* 12 bit offset into the page containing GOT entry for that symbol. */
3007 {"got_lo12", 0,
3008 0, /* adr_type */
3009 0,
3010 0,
3011 0,
3012 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
3013 0},
3014
3015 /* 0-15 bits of address/value: MOVk, no check. */
3016 {"gotoff_g0_nc", 0,
3017 0, /* adr_type */
3018 0,
3019 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
3020 0,
3021 0,
3022 0},
3023
3024 /* Most significant bits 16-31 of address/value: MOVZ. */
3025 {"gotoff_g1", 0,
3026 0, /* adr_type */
3027 0,
3028 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
3029 0,
3030 0,
3031 0},
3032
3033 /* 15 bit offset into the page containing GOT entry for that symbol. */
3034 {"gotoff_lo15", 0,
3035 0, /* adr_type */
3036 0,
3037 0,
3038 0,
3039 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
3040 0},
3041
3042 /* Get to the page containing GOT TLS entry for a symbol */
3043 {"gottprel_g0_nc", 0,
3044 0, /* adr_type */
3045 0,
3046 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
3047 0,
3048 0,
3049 0},
3050
3051 /* Get to the page containing GOT TLS entry for a symbol */
3052 {"gottprel_g1", 0,
3053 0, /* adr_type */
3054 0,
3055 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
3056 0,
3057 0,
3058 0},
3059
3060 /* Get to the page containing GOT TLS entry for a symbol */
3061 {"tlsgd", 0,
3062 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
3063 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
3064 0,
3065 0,
3066 0,
3067 0},
3068
3069 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3070 {"tlsgd_lo12", 0,
3071 0, /* adr_type */
3072 0,
3073 0,
3074 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
3075 0,
3076 0},
3077
3078 /* Lower 16 bits address/value: MOVk. */
3079 {"tlsgd_g0_nc", 0,
3080 0, /* adr_type */
3081 0,
3082 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
3083 0,
3084 0,
3085 0},
3086
3087 /* Most significant bits 16-31 of address/value: MOVZ. */
3088 {"tlsgd_g1", 0,
3089 0, /* adr_type */
3090 0,
3091 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
3092 0,
3093 0,
3094 0},
3095
3096 /* Get to the page containing GOT TLS entry for a symbol */
3097 {"tlsdesc", 0,
3098 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
3099 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
3100 0,
3101 0,
3102 0,
3103 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
3104
3105 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3106 {"tlsdesc_lo12", 0,
3107 0, /* adr_type */
3108 0,
3109 0,
3110 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
3111 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
3112 0},
3113
3114 /* Get to the page containing GOT TLS entry for a symbol.
3115 The same as GD, we allocate two consecutive GOT slots
3116 for module index and module offset, the only difference
3117 with GD is the module offset should be initialized to
3118 zero without any outstanding runtime relocation. */
3119 {"tlsldm", 0,
3120 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
3121 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
3122 0,
3123 0,
3124 0,
3125 0},
3126
3127 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3128 {"tlsldm_lo12_nc", 0,
3129 0, /* adr_type */
3130 0,
3131 0,
3132 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
3133 0,
3134 0},
3135
3136 /* 12 bit offset into the module TLS base address. */
3137 {"dtprel_lo12", 0,
3138 0, /* adr_type */
3139 0,
3140 0,
3141 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
3142 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
3143 0},
3144
3145 /* Same as dtprel_lo12, no overflow check. */
3146 {"dtprel_lo12_nc", 0,
3147 0, /* adr_type */
3148 0,
3149 0,
3150 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
3151 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
3152 0},
3153
3154 /* bits[23:12] of offset to the module TLS base address. */
3155 {"dtprel_hi12", 0,
3156 0, /* adr_type */
3157 0,
3158 0,
3159 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
3160 0,
3161 0},
3162
3163 /* bits[15:0] of offset to the module TLS base address. */
3164 {"dtprel_g0", 0,
3165 0, /* adr_type */
3166 0,
3167 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
3168 0,
3169 0,
3170 0},
3171
3172 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
3173 {"dtprel_g0_nc", 0,
3174 0, /* adr_type */
3175 0,
3176 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
3177 0,
3178 0,
3179 0},
3180
3181 /* bits[31:16] of offset to the module TLS base address. */
3182 {"dtprel_g1", 0,
3183 0, /* adr_type */
3184 0,
3185 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
3186 0,
3187 0,
3188 0},
3189
3190 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
3191 {"dtprel_g1_nc", 0,
3192 0, /* adr_type */
3193 0,
3194 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
3195 0,
3196 0,
3197 0},
3198
3199 /* bits[47:32] of offset to the module TLS base address. */
3200 {"dtprel_g2", 0,
3201 0, /* adr_type */
3202 0,
3203 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
3204 0,
3205 0,
3206 0},
3207
3208 /* Lower 16 bit offset into GOT entry for a symbol */
3209 {"tlsdesc_off_g0_nc", 0,
3210 0, /* adr_type */
3211 0,
3212 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
3213 0,
3214 0,
3215 0},
3216
3217 /* Higher 16 bit offset into GOT entry for a symbol */
3218 {"tlsdesc_off_g1", 0,
3219 0, /* adr_type */
3220 0,
3221 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
3222 0,
3223 0,
3224 0},
3225
3226 /* Get to the page containing GOT TLS entry for a symbol */
3227 {"gottprel", 0,
3228 0, /* adr_type */
3229 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
3230 0,
3231 0,
3232 0,
3233 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
3234
3235 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3236 {"gottprel_lo12", 0,
3237 0, /* adr_type */
3238 0,
3239 0,
3240 0,
3241 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
3242 0},
3243
3244 /* Get tp offset for a symbol. */
3245 {"tprel", 0,
3246 0, /* adr_type */
3247 0,
3248 0,
3249 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3250 0,
3251 0},
3252
3253 /* Get tp offset for a symbol. */
3254 {"tprel_lo12", 0,
3255 0, /* adr_type */
3256 0,
3257 0,
3258 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3259 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3260 0},
3261
3262 /* Get tp offset for a symbol. */
3263 {"tprel_hi12", 0,
3264 0, /* adr_type */
3265 0,
3266 0,
3267 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3268 0,
3269 0},
3270
3271 /* Get tp offset for a symbol. */
3272 {"tprel_lo12_nc", 0,
3273 0, /* adr_type */
3274 0,
3275 0,
3276 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3277 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3278 0},
3279
3280 /* Most significant bits 32-47 of address/value: MOVZ. */
3281 {"tprel_g2", 0,
3282 0, /* adr_type */
3283 0,
3284 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3285 0,
3286 0,
3287 0},
3288
3289 /* Most significant bits 16-31 of address/value: MOVZ. */
3290 {"tprel_g1", 0,
3291 0, /* adr_type */
3292 0,
3293 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3294 0,
3295 0,
3296 0},
3297
3298 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3299 {"tprel_g1_nc", 0,
3300 0, /* adr_type */
3301 0,
3302 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3303 0,
3304 0,
3305 0},
3306
3307 /* Most significant bits 0-15 of address/value: MOVZ. */
3308 {"tprel_g0", 0,
3309 0, /* adr_type */
3310 0,
3311 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3312 0,
3313 0,
3314 0},
3315
3316 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3317 {"tprel_g0_nc", 0,
3318 0, /* adr_type */
3319 0,
3320 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3321 0,
3322 0,
3323 0},
3324
3325 /* 15bit offset from got entry to base address of GOT table. */
3326 {"gotpage_lo15", 0,
3327 0,
3328 0,
3329 0,
3330 0,
3331 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3332 0},
3333
3334 /* 14bit offset from got entry to base address of GOT table. */
3335 {"gotpage_lo14", 0,
3336 0,
3337 0,
3338 0,
3339 0,
3340 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3341 0},
3342 };
3343
3344 /* Given the address of a pointer pointing to the textual name of a
3345 relocation as may appear in assembler source, attempt to find its
3346 details in reloc_table. The pointer will be updated to the character
3347 after the trailing colon. On failure, NULL will be returned;
3348 otherwise return the reloc_table_entry. */
3349
3350 static struct reloc_table_entry *
3351 find_reloc_table_entry (char **str)
3352 {
3353 unsigned int i;
3354 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3355 {
3356 int length = strlen (reloc_table[i].name);
3357
3358 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3359 && (*str)[length] == ':')
3360 {
3361 *str += (length + 1);
3362 return &reloc_table[i];
3363 }
3364 }
3365
3366 return NULL;
3367 }
3368
3369 /* Returns 0 if the relocation should never be forced,
3370 1 if the relocation must be forced, and -1 if either
3371 result is OK. */
3372
3373 static signed int
3374 aarch64_force_reloc (unsigned int type)
3375 {
3376 switch (type)
3377 {
3378 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3379 /* Perform these "immediate" internal relocations
3380 even if the symbol is extern or weak. */
3381 return 0;
3382
3383 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3384 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3385 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3386 /* Pseudo relocs that need to be fixed up according to
3387 ilp32_p. */
3388 return 1;
3389
3390 case BFD_RELOC_AARCH64_ADD_LO12:
3391 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3392 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3393 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3394 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3395 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3396 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3397 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3398 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3399 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3400 case BFD_RELOC_AARCH64_LDST128_LO12:
3401 case BFD_RELOC_AARCH64_LDST16_LO12:
3402 case BFD_RELOC_AARCH64_LDST32_LO12:
3403 case BFD_RELOC_AARCH64_LDST64_LO12:
3404 case BFD_RELOC_AARCH64_LDST8_LO12:
3405 case BFD_RELOC_AARCH64_LDST_LO12:
3406 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3407 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3408 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3409 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3410 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3411 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3412 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3413 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3414 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3415 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3416 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3417 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3418 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3419 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3420 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3421 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3422 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3423 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3424 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3425 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3426 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3427 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3428 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3429 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3430 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3431 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3432 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3433 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3434 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3435 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3436 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3437 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3438 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3439 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3440 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3441 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3442 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3443 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3444 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3445 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3446 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3447 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3448 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3449 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3450 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3451 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3452 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3453 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3454 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3455 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3456 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3457 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3458 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3459 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3460 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3461 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3462 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3463 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3464 /* Always leave these relocations for the linker. */
3465 return 1;
3466
3467 default:
3468 return -1;
3469 }
3470 }
3471
3472 int
3473 aarch64_force_relocation (struct fix *fixp)
3474 {
3475 int res = aarch64_force_reloc (fixp->fx_r_type);
3476
3477 if (res == -1)
3478 return generic_force_reloc (fixp);
3479 return res;
3480 }
3481
3482 /* Mode argument to parse_shift and parser_shifter_operand. */
3483 enum parse_shift_mode
3484 {
3485 SHIFTED_NONE, /* no shifter allowed */
3486 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3487 "#imm{,lsl #n}" */
3488 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3489 "#imm" */
3490 SHIFTED_LSL, /* bare "lsl #n" */
3491 SHIFTED_MUL, /* bare "mul #n" */
3492 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3493 SHIFTED_MUL_VL, /* "mul vl" */
3494 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3495 };
3496
3497 /* Parse a <shift> operator on an AArch64 data processing instruction.
3498 Return TRUE on success; otherwise return FALSE. */
3499 static bool
3500 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3501 {
3502 const struct aarch64_name_value_pair *shift_op;
3503 enum aarch64_modifier_kind kind;
3504 expressionS exp;
3505 int exp_has_prefix;
3506 char *s = *str;
3507 char *p = s;
3508
3509 for (p = *str; ISALPHA (*p); p++)
3510 ;
3511
3512 if (p == *str)
3513 {
3514 set_syntax_error (_("shift expression expected"));
3515 return false;
3516 }
3517
3518 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3519
3520 if (shift_op == NULL)
3521 {
3522 set_syntax_error (_("shift operator expected"));
3523 return false;
3524 }
3525
3526 kind = aarch64_get_operand_modifier (shift_op);
3527
3528 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3529 {
3530 set_syntax_error (_("invalid use of 'MSL'"));
3531 return false;
3532 }
3533
3534 if (kind == AARCH64_MOD_MUL
3535 && mode != SHIFTED_MUL
3536 && mode != SHIFTED_MUL_VL)
3537 {
3538 set_syntax_error (_("invalid use of 'MUL'"));
3539 return false;
3540 }
3541
3542 switch (mode)
3543 {
3544 case SHIFTED_LOGIC_IMM:
3545 if (aarch64_extend_operator_p (kind))
3546 {
3547 set_syntax_error (_("extending shift is not permitted"));
3548 return false;
3549 }
3550 break;
3551
3552 case SHIFTED_ARITH_IMM:
3553 if (kind == AARCH64_MOD_ROR)
3554 {
3555 set_syntax_error (_("'ROR' shift is not permitted"));
3556 return false;
3557 }
3558 break;
3559
3560 case SHIFTED_LSL:
3561 if (kind != AARCH64_MOD_LSL)
3562 {
3563 set_syntax_error (_("only 'LSL' shift is permitted"));
3564 return false;
3565 }
3566 break;
3567
3568 case SHIFTED_MUL:
3569 if (kind != AARCH64_MOD_MUL)
3570 {
3571 set_syntax_error (_("only 'MUL' is permitted"));
3572 return false;
3573 }
3574 break;
3575
3576 case SHIFTED_MUL_VL:
3577 /* "MUL VL" consists of two separate tokens. Require the first
3578 token to be "MUL" and look for a following "VL". */
3579 if (kind == AARCH64_MOD_MUL)
3580 {
3581 skip_whitespace (p);
3582 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3583 {
3584 p += 2;
3585 kind = AARCH64_MOD_MUL_VL;
3586 break;
3587 }
3588 }
3589 set_syntax_error (_("only 'MUL VL' is permitted"));
3590 return false;
3591
3592 case SHIFTED_REG_OFFSET:
3593 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3594 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3595 {
3596 set_fatal_syntax_error
3597 (_("invalid shift for the register offset addressing mode"));
3598 return false;
3599 }
3600 break;
3601
3602 case SHIFTED_LSL_MSL:
3603 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3604 {
3605 set_syntax_error (_("invalid shift operator"));
3606 return false;
3607 }
3608 break;
3609
3610 default:
3611 abort ();
3612 }
3613
3614 /* Whitespace can appear here if the next thing is a bare digit. */
3615 skip_whitespace (p);
3616
3617 /* Parse shift amount. */
3618 exp_has_prefix = 0;
3619 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3620 exp.X_op = O_absent;
3621 else
3622 {
3623 if (is_immediate_prefix (*p))
3624 {
3625 p++;
3626 exp_has_prefix = 1;
3627 }
3628 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3629 }
3630 if (kind == AARCH64_MOD_MUL_VL)
3631 /* For consistency, give MUL VL the same shift amount as an implicit
3632 MUL #1. */
3633 operand->shifter.amount = 1;
3634 else if (exp.X_op == O_absent)
3635 {
3636 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3637 {
3638 set_syntax_error (_("missing shift amount"));
3639 return false;
3640 }
3641 operand->shifter.amount = 0;
3642 }
3643 else if (exp.X_op != O_constant)
3644 {
3645 set_syntax_error (_("constant shift amount required"));
3646 return false;
3647 }
3648 /* For parsing purposes, MUL #n has no inherent range. The range
3649 depends on the operand and will be checked by operand-specific
3650 routines. */
3651 else if (kind != AARCH64_MOD_MUL
3652 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3653 {
3654 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3655 return false;
3656 }
3657 else
3658 {
3659 operand->shifter.amount = exp.X_add_number;
3660 operand->shifter.amount_present = 1;
3661 }
3662
3663 operand->shifter.operator_present = 1;
3664 operand->shifter.kind = kind;
3665
3666 *str = p;
3667 return true;
3668 }
3669
3670 /* Parse a <shifter_operand> for a data processing instruction:
3671
3672 #<immediate>
3673 #<immediate>, LSL #imm
3674
3675 Validation of immediate operands is deferred to md_apply_fix.
3676
3677 Return TRUE on success; otherwise return FALSE. */
3678
3679 static bool
3680 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3681 enum parse_shift_mode mode)
3682 {
3683 char *p;
3684
3685 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3686 return false;
3687
3688 p = *str;
3689
3690 /* Accept an immediate expression. */
3691 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3692 REJECT_ABSENT))
3693 return false;
3694
3695 /* Accept optional LSL for arithmetic immediate values. */
3696 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3697 if (! parse_shift (&p, operand, SHIFTED_LSL))
3698 return false;
3699
3700 /* Not accept any shifter for logical immediate values. */
3701 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3702 && parse_shift (&p, operand, mode))
3703 {
3704 set_syntax_error (_("unexpected shift operator"));
3705 return false;
3706 }
3707
3708 *str = p;
3709 return true;
3710 }
3711
3712 /* Parse a <shifter_operand> for a data processing instruction:
3713
3714 <Rm>
3715 <Rm>, <shift>
3716 #<immediate>
3717 #<immediate>, LSL #imm
3718
3719 where <shift> is handled by parse_shift above, and the last two
3720 cases are handled by the function above.
3721
3722 Validation of immediate operands is deferred to md_apply_fix.
3723
3724 Return TRUE on success; otherwise return FALSE. */
3725
3726 static bool
3727 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3728 enum parse_shift_mode mode)
3729 {
3730 const reg_entry *reg;
3731 aarch64_opnd_qualifier_t qualifier;
3732 enum aarch64_operand_class opd_class
3733 = aarch64_get_operand_class (operand->type);
3734
3735 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3736 if (reg)
3737 {
3738 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3739 {
3740 set_syntax_error (_("unexpected register in the immediate operand"));
3741 return false;
3742 }
3743
3744 if (!aarch64_check_reg_type (reg, REG_TYPE_R_ZR))
3745 {
3746 set_expected_reg_error (REG_TYPE_R_ZR, reg, 0);
3747 return false;
3748 }
3749
3750 operand->reg.regno = reg->number;
3751 operand->qualifier = qualifier;
3752
3753 /* Accept optional shift operation on register. */
3754 if (! skip_past_comma (str))
3755 return true;
3756
3757 if (! parse_shift (str, operand, mode))
3758 return false;
3759
3760 return true;
3761 }
3762 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3763 {
3764 set_syntax_error
3765 (_("integer register expected in the extended/shifted operand "
3766 "register"));
3767 return false;
3768 }
3769
3770 /* We have a shifted immediate variable. */
3771 return parse_shifter_operand_imm (str, operand, mode);
3772 }
3773
3774 /* Return TRUE on success; return FALSE otherwise. */
3775
3776 static bool
3777 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3778 enum parse_shift_mode mode)
3779 {
3780 char *p = *str;
3781
3782 /* Determine if we have the sequence of characters #: or just :
3783 coming next. If we do, then we check for a :rello: relocation
3784 modifier. If we don't, punt the whole lot to
3785 parse_shifter_operand. */
3786
3787 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3788 {
3789 struct reloc_table_entry *entry;
3790
3791 if (p[0] == '#')
3792 p += 2;
3793 else
3794 p++;
3795 *str = p;
3796
3797 /* Try to parse a relocation. Anything else is an error. */
3798 if (!(entry = find_reloc_table_entry (str)))
3799 {
3800 set_syntax_error (_("unknown relocation modifier"));
3801 return false;
3802 }
3803
3804 if (entry->add_type == 0)
3805 {
3806 set_syntax_error
3807 (_("this relocation modifier is not allowed on this instruction"));
3808 return false;
3809 }
3810
3811 /* Save str before we decompose it. */
3812 p = *str;
3813
3814 /* Next, we parse the expression. */
3815 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3816 REJECT_ABSENT))
3817 return false;
3818
3819 /* Record the relocation type (use the ADD variant here). */
3820 inst.reloc.type = entry->add_type;
3821 inst.reloc.pc_rel = entry->pc_rel;
3822
3823 /* If str is empty, we've reached the end, stop here. */
3824 if (**str == '\0')
3825 return true;
3826
3827 /* Otherwise, we have a shifted reloc modifier, so rewind to
3828 recover the variable name and continue parsing for the shifter. */
3829 *str = p;
3830 return parse_shifter_operand_imm (str, operand, mode);
3831 }
3832
3833 return parse_shifter_operand (str, operand, mode);
3834 }
3835
3836 /* Parse all forms of an address expression. Information is written
3837 to *OPERAND and/or inst.reloc.
3838
3839 The A64 instruction set has the following addressing modes:
3840
3841 Offset
3842 [base] // in SIMD ld/st structure
3843 [base{,#0}] // in ld/st exclusive
3844 [base{,#imm}]
3845 [base,Xm{,LSL #imm}]
3846 [base,Xm,SXTX {#imm}]
3847 [base,Wm,(S|U)XTW {#imm}]
3848 Pre-indexed
3849 [base]! // in ldraa/ldrab exclusive
3850 [base,#imm]!
3851 Post-indexed
3852 [base],#imm
3853 [base],Xm // in SIMD ld/st structure
3854 PC-relative (literal)
3855 label
3856 SVE:
3857 [base,#imm,MUL VL]
3858 [base,Zm.D{,LSL #imm}]
3859 [base,Zm.S,(S|U)XTW {#imm}]
3860 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3861 [Zn.S,#imm]
3862 [Zn.D,#imm]
3863 [Zn.S{, Xm}]
3864 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3865 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3866 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3867
3868 (As a convenience, the notation "=immediate" is permitted in conjunction
3869 with the pc-relative literal load instructions to automatically place an
3870 immediate value or symbolic address in a nearby literal pool and generate
3871 a hidden label which references it.)
3872
3873 Upon a successful parsing, the address structure in *OPERAND will be
3874 filled in the following way:
3875
3876 .base_regno = <base>
3877 .offset.is_reg // 1 if the offset is a register
3878 .offset.imm = <imm>
3879 .offset.regno = <Rm>
3880
3881 For different addressing modes defined in the A64 ISA:
3882
3883 Offset
3884 .pcrel=0; .preind=1; .postind=0; .writeback=0
3885 Pre-indexed
3886 .pcrel=0; .preind=1; .postind=0; .writeback=1
3887 Post-indexed
3888 .pcrel=0; .preind=0; .postind=1; .writeback=1
3889 PC-relative (literal)
3890 .pcrel=1; .preind=1; .postind=0; .writeback=0
3891
3892 The shift/extension information, if any, will be stored in .shifter.
3893 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3894 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3895 corresponding register.
3896
3897 BASE_TYPE says which types of base register should be accepted and
3898 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3899 is the type of shifter that is allowed for immediate offsets,
3900 or SHIFTED_NONE if none.
3901
3902 In all other respects, it is the caller's responsibility to check
3903 for addressing modes not supported by the instruction, and to set
3904 inst.reloc.type. */
3905
3906 static bool
3907 parse_address_main (char **str, aarch64_opnd_info *operand,
3908 aarch64_opnd_qualifier_t *base_qualifier,
3909 aarch64_opnd_qualifier_t *offset_qualifier,
3910 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3911 enum parse_shift_mode imm_shift_mode)
3912 {
3913 char *p = *str;
3914 const reg_entry *reg;
3915 expressionS *exp = &inst.reloc.exp;
3916
3917 *base_qualifier = AARCH64_OPND_QLF_NIL;
3918 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3919 if (! skip_past_char (&p, '['))
3920 {
3921 /* =immediate or label. */
3922 operand->addr.pcrel = 1;
3923 operand->addr.preind = 1;
3924
3925 /* #:<reloc_op>:<symbol> */
3926 skip_past_char (&p, '#');
3927 if (skip_past_char (&p, ':'))
3928 {
3929 bfd_reloc_code_real_type ty;
3930 struct reloc_table_entry *entry;
3931
3932 /* Try to parse a relocation modifier. Anything else is
3933 an error. */
3934 entry = find_reloc_table_entry (&p);
3935 if (! entry)
3936 {
3937 set_syntax_error (_("unknown relocation modifier"));
3938 return false;
3939 }
3940
3941 switch (operand->type)
3942 {
3943 case AARCH64_OPND_ADDR_PCREL21:
3944 /* adr */
3945 ty = entry->adr_type;
3946 break;
3947
3948 default:
3949 ty = entry->ld_literal_type;
3950 break;
3951 }
3952
3953 if (ty == 0)
3954 {
3955 set_syntax_error
3956 (_("this relocation modifier is not allowed on this "
3957 "instruction"));
3958 return false;
3959 }
3960
3961 /* #:<reloc_op>: */
3962 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3963 {
3964 set_syntax_error (_("invalid relocation expression"));
3965 return false;
3966 }
3967 /* #:<reloc_op>:<expr> */
3968 /* Record the relocation type. */
3969 inst.reloc.type = ty;
3970 inst.reloc.pc_rel = entry->pc_rel;
3971 }
3972 else
3973 {
3974 if (skip_past_char (&p, '='))
3975 /* =immediate; need to generate the literal in the literal pool. */
3976 inst.gen_lit_pool = 1;
3977
3978 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3979 {
3980 set_syntax_error (_("invalid address"));
3981 return false;
3982 }
3983 }
3984
3985 *str = p;
3986 return true;
3987 }
3988
3989 /* [ */
3990
3991 bool alpha_base_p = ISALPHA (*p);
3992 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3993 if (!reg || !aarch64_check_reg_type (reg, base_type))
3994 {
3995 if (reg
3996 && aarch64_check_reg_type (reg, REG_TYPE_R_SP)
3997 && *base_qualifier == AARCH64_OPND_QLF_W)
3998 set_syntax_error (_("expected a 64-bit base register"));
3999 else if (alpha_base_p)
4000 set_syntax_error (_("invalid base register"));
4001 else
4002 set_syntax_error (_("expected a base register"));
4003 return false;
4004 }
4005 operand->addr.base_regno = reg->number;
4006
4007 /* [Xn */
4008 if (skip_past_comma (&p))
4009 {
4010 /* [Xn, */
4011 operand->addr.preind = 1;
4012
4013 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
4014 if (reg)
4015 {
4016 if (!aarch64_check_reg_type (reg, offset_type))
4017 {
4018 set_syntax_error (_("invalid offset register"));
4019 return false;
4020 }
4021
4022 /* [Xn,Rm */
4023 operand->addr.offset.regno = reg->number;
4024 operand->addr.offset.is_reg = 1;
4025 /* Shifted index. */
4026 if (skip_past_comma (&p))
4027 {
4028 /* [Xn,Rm, */
4029 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
4030 /* Use the diagnostics set in parse_shift, so not set new
4031 error message here. */
4032 return false;
4033 }
4034 /* We only accept:
4035 [base,Xm] # For vector plus scalar SVE2 indexing.
4036 [base,Xm{,LSL #imm}]
4037 [base,Xm,SXTX {#imm}]
4038 [base,Wm,(S|U)XTW {#imm}] */
4039 if (operand->shifter.kind == AARCH64_MOD_NONE
4040 || operand->shifter.kind == AARCH64_MOD_LSL
4041 || operand->shifter.kind == AARCH64_MOD_SXTX)
4042 {
4043 if (*offset_qualifier == AARCH64_OPND_QLF_W)
4044 {
4045 set_syntax_error (_("invalid use of 32-bit register offset"));
4046 return false;
4047 }
4048 if (aarch64_get_qualifier_esize (*base_qualifier)
4049 != aarch64_get_qualifier_esize (*offset_qualifier)
4050 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
4051 || *base_qualifier != AARCH64_OPND_QLF_S_S
4052 || *offset_qualifier != AARCH64_OPND_QLF_X))
4053 {
4054 set_syntax_error (_("offset has different size from base"));
4055 return false;
4056 }
4057 }
4058 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
4059 {
4060 set_syntax_error (_("invalid use of 64-bit register offset"));
4061 return false;
4062 }
4063 }
4064 else
4065 {
4066 /* [Xn,#:<reloc_op>:<symbol> */
4067 skip_past_char (&p, '#');
4068 if (skip_past_char (&p, ':'))
4069 {
4070 struct reloc_table_entry *entry;
4071
4072 /* Try to parse a relocation modifier. Anything else is
4073 an error. */
4074 if (!(entry = find_reloc_table_entry (&p)))
4075 {
4076 set_syntax_error (_("unknown relocation modifier"));
4077 return false;
4078 }
4079
4080 if (entry->ldst_type == 0)
4081 {
4082 set_syntax_error
4083 (_("this relocation modifier is not allowed on this "
4084 "instruction"));
4085 return false;
4086 }
4087
4088 /* [Xn,#:<reloc_op>: */
4089 /* We now have the group relocation table entry corresponding to
4090 the name in the assembler source. Next, we parse the
4091 expression. */
4092 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4093 {
4094 set_syntax_error (_("invalid relocation expression"));
4095 return false;
4096 }
4097
4098 /* [Xn,#:<reloc_op>:<expr> */
4099 /* Record the load/store relocation type. */
4100 inst.reloc.type = entry->ldst_type;
4101 inst.reloc.pc_rel = entry->pc_rel;
4102 }
4103 else
4104 {
4105 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4106 {
4107 set_syntax_error (_("invalid expression in the address"));
4108 return false;
4109 }
4110 /* [Xn,<expr> */
4111 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
4112 /* [Xn,<expr>,<shifter> */
4113 if (! parse_shift (&p, operand, imm_shift_mode))
4114 return false;
4115 }
4116 }
4117 }
4118
4119 if (! skip_past_char (&p, ']'))
4120 {
4121 set_syntax_error (_("']' expected"));
4122 return false;
4123 }
4124
4125 if (skip_past_char (&p, '!'))
4126 {
4127 if (operand->addr.preind && operand->addr.offset.is_reg)
4128 {
4129 set_syntax_error (_("register offset not allowed in pre-indexed "
4130 "addressing mode"));
4131 return false;
4132 }
4133 /* [Xn]! */
4134 operand->addr.writeback = 1;
4135 }
4136 else if (skip_past_comma (&p))
4137 {
4138 /* [Xn], */
4139 operand->addr.postind = 1;
4140 operand->addr.writeback = 1;
4141
4142 if (operand->addr.preind)
4143 {
4144 set_syntax_error (_("cannot combine pre- and post-indexing"));
4145 return false;
4146 }
4147
4148 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
4149 if (reg)
4150 {
4151 /* [Xn],Xm */
4152 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
4153 {
4154 set_syntax_error (_("invalid offset register"));
4155 return false;
4156 }
4157
4158 operand->addr.offset.regno = reg->number;
4159 operand->addr.offset.is_reg = 1;
4160 }
4161 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4162 {
4163 /* [Xn],#expr */
4164 set_syntax_error (_("invalid expression in the address"));
4165 return false;
4166 }
4167 }
4168
4169 /* If at this point neither .preind nor .postind is set, we have a
4170 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
4171 ldrab, accept [Rn] as a shorthand for [Rn,#0].
4172 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
4173 [Zn.<T>, xzr]. */
4174 if (operand->addr.preind == 0 && operand->addr.postind == 0)
4175 {
4176 if (operand->addr.writeback)
4177 {
4178 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
4179 {
4180 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
4181 operand->addr.offset.is_reg = 0;
4182 operand->addr.offset.imm = 0;
4183 operand->addr.preind = 1;
4184 }
4185 else
4186 {
4187 /* Reject [Rn]! */
4188 set_syntax_error (_("missing offset in the pre-indexed address"));
4189 return false;
4190 }
4191 }
4192 else
4193 {
4194 operand->addr.preind = 1;
4195 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
4196 {
4197 operand->addr.offset.is_reg = 1;
4198 operand->addr.offset.regno = REG_ZR;
4199 *offset_qualifier = AARCH64_OPND_QLF_X;
4200 }
4201 else
4202 {
4203 inst.reloc.exp.X_op = O_constant;
4204 inst.reloc.exp.X_add_number = 0;
4205 }
4206 }
4207 }
4208
4209 *str = p;
4210 return true;
4211 }
4212
4213 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
4214 on success. */
4215 static bool
4216 parse_address (char **str, aarch64_opnd_info *operand)
4217 {
4218 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4219 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4220 REG_TYPE_R64_SP, REG_TYPE_R_ZR, SHIFTED_NONE);
4221 }
4222
4223 /* Parse an address in which SVE vector registers and MUL VL are allowed.
4224 The arguments have the same meaning as for parse_address_main.
4225 Return TRUE on success. */
4226 static bool
4227 parse_sve_address (char **str, aarch64_opnd_info *operand,
4228 aarch64_opnd_qualifier_t *base_qualifier,
4229 aarch64_opnd_qualifier_t *offset_qualifier)
4230 {
4231 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
4232 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
4233 SHIFTED_MUL_VL);
4234 }
4235
4236 /* Parse a register X0-X30. The register must be 64-bit and register 31
4237 is unallocated. */
4238 static bool
4239 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
4240 {
4241 const reg_entry *reg = parse_reg (str);
4242 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
4243 {
4244 set_expected_reg_error (REG_TYPE_R_64, reg, 0);
4245 return false;
4246 }
4247 operand->reg.regno = reg->number;
4248 operand->qualifier = AARCH64_OPND_QLF_X;
4249 return true;
4250 }
4251
4252 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
4253 Return TRUE on success; otherwise return FALSE. */
4254 static bool
4255 parse_half (char **str, int *internal_fixup_p)
4256 {
4257 char *p = *str;
4258
4259 skip_past_char (&p, '#');
4260
4261 gas_assert (internal_fixup_p);
4262 *internal_fixup_p = 0;
4263
4264 if (*p == ':')
4265 {
4266 struct reloc_table_entry *entry;
4267
4268 /* Try to parse a relocation. Anything else is an error. */
4269 ++p;
4270
4271 if (!(entry = find_reloc_table_entry (&p)))
4272 {
4273 set_syntax_error (_("unknown relocation modifier"));
4274 return false;
4275 }
4276
4277 if (entry->movw_type == 0)
4278 {
4279 set_syntax_error
4280 (_("this relocation modifier is not allowed on this instruction"));
4281 return false;
4282 }
4283
4284 inst.reloc.type = entry->movw_type;
4285 }
4286 else
4287 *internal_fixup_p = 1;
4288
4289 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4290 return false;
4291
4292 *str = p;
4293 return true;
4294 }
4295
4296 /* Parse an operand for an ADRP instruction:
4297 ADRP <Xd>, <label>
4298 Return TRUE on success; otherwise return FALSE. */
4299
4300 static bool
4301 parse_adrp (char **str)
4302 {
4303 char *p;
4304
4305 p = *str;
4306 if (*p == ':')
4307 {
4308 struct reloc_table_entry *entry;
4309
4310 /* Try to parse a relocation. Anything else is an error. */
4311 ++p;
4312 if (!(entry = find_reloc_table_entry (&p)))
4313 {
4314 set_syntax_error (_("unknown relocation modifier"));
4315 return false;
4316 }
4317
4318 if (entry->adrp_type == 0)
4319 {
4320 set_syntax_error
4321 (_("this relocation modifier is not allowed on this instruction"));
4322 return false;
4323 }
4324
4325 inst.reloc.type = entry->adrp_type;
4326 }
4327 else
4328 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4329
4330 inst.reloc.pc_rel = 1;
4331 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4332 return false;
4333 *str = p;
4334 return true;
4335 }
4336
4337 /* Miscellaneous. */
4338
4339 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4340 of SIZE tokens in which index I gives the token for field value I,
4341 or is null if field value I is invalid. If the symbolic operand
4342 can also be given as a 0-based integer, REG_TYPE says which register
4343 names should be treated as registers rather than as symbolic immediates
4344 while parsing that integer. REG_TYPE is REG_TYPE_MAX otherwise.
4345
4346 Return true on success, moving *STR past the operand and storing the
4347 field value in *VAL. */
4348
4349 static int
4350 parse_enum_string (char **str, int64_t *val, const char *const *array,
4351 size_t size, aarch64_reg_type reg_type)
4352 {
4353 expressionS exp;
4354 char *p, *q;
4355 size_t i;
4356
4357 /* Match C-like tokens. */
4358 p = q = *str;
4359 while (ISALNUM (*q))
4360 q++;
4361
4362 for (i = 0; i < size; ++i)
4363 if (array[i]
4364 && strncasecmp (array[i], p, q - p) == 0
4365 && array[i][q - p] == 0)
4366 {
4367 *val = i;
4368 *str = q;
4369 return true;
4370 }
4371
4372 if (reg_type == REG_TYPE_MAX)
4373 return false;
4374
4375 if (!parse_immediate_expression (&p, &exp, reg_type))
4376 return false;
4377
4378 if (exp.X_op == O_constant
4379 && (uint64_t) exp.X_add_number < size)
4380 {
4381 *val = exp.X_add_number;
4382 *str = p;
4383 return true;
4384 }
4385
4386 /* Use the default error for this operand. */
4387 return false;
4388 }
4389
4390 /* Parse an option for a preload instruction. Returns the encoding for the
4391 option, or PARSE_FAIL. */
4392
4393 static int
4394 parse_pldop (char **str)
4395 {
4396 char *p, *q;
4397 const struct aarch64_name_value_pair *o;
4398
4399 p = q = *str;
4400 while (ISALNUM (*q))
4401 q++;
4402
4403 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4404 if (!o)
4405 return PARSE_FAIL;
4406
4407 *str = q;
4408 return o->value;
4409 }
4410
4411 /* Parse an option for a barrier instruction. Returns the encoding for the
4412 option, or PARSE_FAIL. */
4413
4414 static int
4415 parse_barrier (char **str)
4416 {
4417 char *p, *q;
4418 const struct aarch64_name_value_pair *o;
4419
4420 p = q = *str;
4421 while (ISALPHA (*q))
4422 q++;
4423
4424 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4425 if (!o)
4426 return PARSE_FAIL;
4427
4428 *str = q;
4429 return o->value;
4430 }
4431
4432 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4433 return 0 if successful. Otherwise return PARSE_FAIL. */
4434
4435 static int
4436 parse_barrier_psb (char **str,
4437 const struct aarch64_name_value_pair ** hint_opt)
4438 {
4439 char *p, *q;
4440 const struct aarch64_name_value_pair *o;
4441
4442 p = q = *str;
4443 while (ISALPHA (*q))
4444 q++;
4445
4446 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4447 if (!o)
4448 {
4449 set_fatal_syntax_error
4450 ( _("unknown or missing option to PSB/TSB"));
4451 return PARSE_FAIL;
4452 }
4453
4454 if (o->value != 0x11)
4455 {
4456 /* PSB only accepts option name 'CSYNC'. */
4457 set_syntax_error
4458 (_("the specified option is not accepted for PSB/TSB"));
4459 return PARSE_FAIL;
4460 }
4461
4462 *str = q;
4463 *hint_opt = o;
4464 return 0;
4465 }
4466
4467 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4468 return 0 if successful. Otherwise return PARSE_FAIL. */
4469
4470 static int
4471 parse_bti_operand (char **str,
4472 const struct aarch64_name_value_pair ** hint_opt)
4473 {
4474 char *p, *q;
4475 const struct aarch64_name_value_pair *o;
4476
4477 p = q = *str;
4478 while (ISALPHA (*q))
4479 q++;
4480
4481 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4482 if (!o)
4483 {
4484 set_fatal_syntax_error
4485 ( _("unknown option to BTI"));
4486 return PARSE_FAIL;
4487 }
4488
4489 switch (o->value)
4490 {
4491 /* Valid BTI operands. */
4492 case HINT_OPD_C:
4493 case HINT_OPD_J:
4494 case HINT_OPD_JC:
4495 break;
4496
4497 default:
4498 set_syntax_error
4499 (_("unknown option to BTI"));
4500 return PARSE_FAIL;
4501 }
4502
4503 *str = q;
4504 *hint_opt = o;
4505 return 0;
4506 }
4507
4508 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4509 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4510 on failure. Format:
4511
4512 REG_TYPE.QUALIFIER
4513
4514 Side effect: Update STR with current parse position of success.
4515
4516 FLAGS is as for parse_typed_reg. */
4517
4518 static const reg_entry *
4519 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4520 aarch64_opnd_qualifier_t *qualifier, unsigned int flags)
4521 {
4522 struct vector_type_el vectype;
4523 const reg_entry *reg = parse_typed_reg (str, reg_type, &vectype,
4524 PTR_FULL_REG | flags);
4525 if (!reg)
4526 return NULL;
4527
4528 if (vectype.type == NT_invtype)
4529 *qualifier = AARCH64_OPND_QLF_NIL;
4530 else
4531 {
4532 *qualifier = vectype_to_qualifier (&vectype);
4533 if (*qualifier == AARCH64_OPND_QLF_NIL)
4534 return NULL;
4535 }
4536
4537 return reg;
4538 }
4539
4540 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4541
4542 #<imm>
4543 <imm>
4544
4545 Function return TRUE if immediate was found, or FALSE.
4546 */
4547 static bool
4548 parse_sme_immediate (char **str, int64_t *imm)
4549 {
4550 int64_t val;
4551 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4552 return false;
4553
4554 *imm = val;
4555 return true;
4556 }
4557
4558 /* Parse index with selection register and immediate offset:
4559
4560 [<Wv>, <imm>]
4561 [<Wv>, #<imm>]
4562
4563 Return true on success, populating OPND with the parsed index. */
4564
4565 static bool
4566 parse_sme_za_index (char **str, struct aarch64_indexed_za *opnd)
4567 {
4568 const reg_entry *reg;
4569
4570 if (!skip_past_char (str, '['))
4571 {
4572 set_syntax_error (_("expected '['"));
4573 return false;
4574 }
4575
4576 /* The selection register, encoded in the 2-bit Rv field. */
4577 reg = parse_reg (str);
4578 if (reg == NULL || reg->type != REG_TYPE_R_32)
4579 {
4580 set_syntax_error (_("expected a 32-bit selection register"));
4581 return false;
4582 }
4583 opnd->index.regno = reg->number;
4584
4585 if (!skip_past_char (str, ','))
4586 {
4587 set_syntax_error (_("missing immediate offset"));
4588 return false;
4589 }
4590
4591 if (!parse_sme_immediate (str, &opnd->index.imm))
4592 {
4593 set_syntax_error (_("expected a constant immediate offset"));
4594 return false;
4595 }
4596
4597 if (skip_past_char (str, ':'))
4598 {
4599 int64_t end;
4600 if (!parse_sme_immediate (str, &end))
4601 {
4602 set_syntax_error (_("expected a constant immediate offset"));
4603 return false;
4604 }
4605 if (end < opnd->index.imm)
4606 {
4607 set_syntax_error (_("the last offset is less than the"
4608 " first offset"));
4609 return false;
4610 }
4611 if (end == opnd->index.imm)
4612 {
4613 set_syntax_error (_("the last offset is equal to the"
4614 " first offset"));
4615 return false;
4616 }
4617 opnd->index.countm1 = (uint64_t) end - opnd->index.imm;
4618 }
4619
4620 opnd->group_size = 0;
4621 if (skip_past_char (str, ','))
4622 {
4623 if (strncasecmp (*str, "vgx2", 4) == 0 && !ISALPHA ((*str)[4]))
4624 {
4625 *str += 4;
4626 opnd->group_size = 2;
4627 }
4628 else if (strncasecmp (*str, "vgx4", 4) == 0 && !ISALPHA ((*str)[4]))
4629 {
4630 *str += 4;
4631 opnd->group_size = 4;
4632 }
4633 else
4634 {
4635 set_syntax_error (_("invalid vector group size"));
4636 return false;
4637 }
4638 }
4639
4640 if (!skip_past_char (str, ']'))
4641 {
4642 set_syntax_error (_("expected ']'"));
4643 return false;
4644 }
4645
4646 return true;
4647 }
4648
4649 /* Parse a register of type REG_TYPE that might have an element type
4650 qualifier and that is indexed by two values: a 32-bit register,
4651 followed by an immediate. The ranges of the register and the
4652 immediate vary by opcode and are checked in libopcodes.
4653
4654 Return true on success, populating OPND with information about
4655 the operand and setting QUALIFIER to the register qualifier.
4656
4657 Field format examples:
4658
4659 <Pm>.<T>[<Wv>< #<imm>]
4660 ZA[<Wv>, #<imm>]
4661 <ZAn><HV>.<T>[<Wv>, #<imm>]
4662
4663 FLAGS is as for parse_typed_reg. */
4664
4665 static bool
4666 parse_dual_indexed_reg (char **str, aarch64_reg_type reg_type,
4667 struct aarch64_indexed_za *opnd,
4668 aarch64_opnd_qualifier_t *qualifier,
4669 unsigned int flags)
4670 {
4671 const reg_entry *reg = parse_reg_with_qual (str, reg_type, qualifier, flags);
4672 if (!reg)
4673 return false;
4674
4675 opnd->v = aarch64_check_reg_type (reg, REG_TYPE_ZATV);
4676 opnd->regno = reg->number;
4677
4678 return parse_sme_za_index (str, opnd);
4679 }
4680
4681 /* Like parse_sme_za_hv_tiles_operand, but expect braces around the
4682 operand. */
4683
4684 static bool
4685 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4686 struct aarch64_indexed_za *opnd,
4687 aarch64_opnd_qualifier_t *qualifier)
4688 {
4689 if (!skip_past_char (str, '{'))
4690 {
4691 set_expected_reglist_error (REG_TYPE_ZATHV, parse_reg (str));
4692 return false;
4693 }
4694
4695 if (!parse_dual_indexed_reg (str, REG_TYPE_ZATHV, opnd, qualifier,
4696 PTR_IN_REGLIST))
4697 return false;
4698
4699 if (!skip_past_char (str, '}'))
4700 {
4701 set_syntax_error (_("expected '}'"));
4702 return false;
4703 }
4704
4705 return true;
4706 }
4707
4708 /* Parse list of up to eight 64-bit element tile names separated by commas in
4709 SME's ZERO instruction:
4710
4711 ZERO { <mask> }
4712
4713 Function returns <mask>:
4714
4715 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4716 */
4717 static int
4718 parse_sme_zero_mask(char **str)
4719 {
4720 char *q;
4721 int mask;
4722 aarch64_opnd_qualifier_t qualifier;
4723 unsigned int ptr_flags = PTR_IN_REGLIST;
4724
4725 mask = 0x00;
4726 q = *str;
4727 do
4728 {
4729 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA_ZAT,
4730 &qualifier, ptr_flags);
4731 if (!reg)
4732 return PARSE_FAIL;
4733
4734 if (reg->type == REG_TYPE_ZA)
4735 {
4736 if (qualifier != AARCH64_OPND_QLF_NIL)
4737 {
4738 set_syntax_error ("ZA should not have a size suffix");
4739 return PARSE_FAIL;
4740 }
4741 /* { ZA } is assembled as all-ones immediate. */
4742 mask = 0xff;
4743 }
4744 else
4745 {
4746 int regno = reg->number;
4747 if (qualifier == AARCH64_OPND_QLF_S_B)
4748 {
4749 /* { ZA0.B } is assembled as all-ones immediate. */
4750 mask = 0xff;
4751 }
4752 else if (qualifier == AARCH64_OPND_QLF_S_H)
4753 mask |= 0x55 << regno;
4754 else if (qualifier == AARCH64_OPND_QLF_S_S)
4755 mask |= 0x11 << regno;
4756 else if (qualifier == AARCH64_OPND_QLF_S_D)
4757 mask |= 0x01 << regno;
4758 else if (qualifier == AARCH64_OPND_QLF_S_Q)
4759 {
4760 set_syntax_error (_("ZA tile masks do not operate at .Q"
4761 " granularity"));
4762 return PARSE_FAIL;
4763 }
4764 else if (qualifier == AARCH64_OPND_QLF_NIL)
4765 {
4766 set_syntax_error (_("missing ZA tile size"));
4767 return PARSE_FAIL;
4768 }
4769 else
4770 {
4771 set_syntax_error (_("invalid ZA tile"));
4772 return PARSE_FAIL;
4773 }
4774 }
4775 ptr_flags |= PTR_GOOD_MATCH;
4776 }
4777 while (skip_past_char (&q, ','));
4778
4779 *str = q;
4780 return mask;
4781 }
4782
4783 /* Wraps in curly braces <mask> operand ZERO instruction:
4784
4785 ZERO { <mask> }
4786
4787 Function returns value of <mask> bit-field.
4788 */
4789 static int
4790 parse_sme_list_of_64bit_tiles (char **str)
4791 {
4792 int regno;
4793
4794 if (!skip_past_char (str, '{'))
4795 {
4796 set_syntax_error (_("expected '{'"));
4797 return PARSE_FAIL;
4798 }
4799
4800 /* Empty <mask> list is an all-zeros immediate. */
4801 if (!skip_past_char (str, '}'))
4802 {
4803 regno = parse_sme_zero_mask (str);
4804 if (regno == PARSE_FAIL)
4805 return PARSE_FAIL;
4806
4807 if (!skip_past_char (str, '}'))
4808 {
4809 set_syntax_error (_("expected '}'"));
4810 return PARSE_FAIL;
4811 }
4812 }
4813 else
4814 regno = 0x00;
4815
4816 return regno;
4817 }
4818
4819 /* Parse streaming mode operand for SMSTART and SMSTOP.
4820
4821 {SM | ZA}
4822
4823 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4824 */
4825 static int
4826 parse_sme_sm_za (char **str)
4827 {
4828 char *p, *q;
4829
4830 p = q = *str;
4831 while (ISALPHA (*q))
4832 q++;
4833
4834 if ((q - p != 2)
4835 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4836 {
4837 set_syntax_error (_("expected SM or ZA operand"));
4838 return PARSE_FAIL;
4839 }
4840
4841 *str = q;
4842 return TOLOWER (p[0]);
4843 }
4844
4845 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4846 Returns the encoding for the option, or PARSE_FAIL.
4847
4848 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4849 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4850
4851 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4852 field, otherwise as a system register.
4853 */
4854
4855 static int
4856 parse_sys_reg (char **str, htab_t sys_regs,
4857 int imple_defined_p, int pstatefield_p,
4858 uint32_t* flags)
4859 {
4860 char *p, *q;
4861 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4862 const aarch64_sys_reg *o;
4863 int value;
4864
4865 p = buf;
4866 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4867 if (p < buf + (sizeof (buf) - 1))
4868 *p++ = TOLOWER (*q);
4869 *p = '\0';
4870
4871 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4872 valid system register. This is enforced by construction of the hash
4873 table. */
4874 if (p - buf != q - *str)
4875 return PARSE_FAIL;
4876
4877 o = str_hash_find (sys_regs, buf);
4878 if (!o)
4879 {
4880 if (!imple_defined_p)
4881 return PARSE_FAIL;
4882 else
4883 {
4884 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4885 unsigned int op0, op1, cn, cm, op2;
4886
4887 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4888 != 5)
4889 return PARSE_FAIL;
4890 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4891 return PARSE_FAIL;
4892 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4893 if (flags)
4894 *flags = 0;
4895 }
4896 }
4897 else
4898 {
4899 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4900 as_bad (_("selected processor does not support PSTATE field "
4901 "name '%s'"), buf);
4902 if (!pstatefield_p
4903 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4904 o->value, o->flags, o->features))
4905 as_bad (_("selected processor does not support system register "
4906 "name '%s'"), buf);
4907 if (aarch64_sys_reg_deprecated_p (o->flags))
4908 as_warn (_("system register name '%s' is deprecated and may be "
4909 "removed in a future release"), buf);
4910 value = o->value;
4911 if (flags)
4912 *flags = o->flags;
4913 }
4914
4915 *str = q;
4916 return value;
4917 }
4918
4919 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4920 for the option, or NULL. */
4921
4922 static const aarch64_sys_ins_reg *
4923 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4924 {
4925 char *p, *q;
4926 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4927 const aarch64_sys_ins_reg *o;
4928
4929 p = buf;
4930 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4931 if (p < buf + (sizeof (buf) - 1))
4932 *p++ = TOLOWER (*q);
4933 *p = '\0';
4934
4935 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4936 valid system register. This is enforced by construction of the hash
4937 table. */
4938 if (p - buf != q - *str)
4939 return NULL;
4940
4941 o = str_hash_find (sys_ins_regs, buf);
4942 if (!o)
4943 return NULL;
4944
4945 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4946 o->name, o->value, o->flags, 0))
4947 as_bad (_("selected processor does not support system register "
4948 "name '%s'"), buf);
4949 if (aarch64_sys_reg_deprecated_p (o->flags))
4950 as_warn (_("system register name '%s' is deprecated and may be "
4951 "removed in a future release"), buf);
4952
4953 *str = q;
4954 return o;
4955 }
4956 \f
4957 #define po_char_or_fail(chr) do { \
4958 if (! skip_past_char (&str, chr)) \
4959 goto failure; \
4960 } while (0)
4961
4962 #define po_reg_or_fail(regtype) do { \
4963 reg = aarch64_reg_parse (&str, regtype, NULL); \
4964 if (!reg) \
4965 goto failure; \
4966 } while (0)
4967
4968 #define po_int_fp_reg_or_fail(reg_type) do { \
4969 reg = parse_reg (&str); \
4970 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4971 { \
4972 set_expected_reg_error (reg_type, reg, 0); \
4973 goto failure; \
4974 } \
4975 info->reg.regno = reg->number; \
4976 info->qualifier = inherent_reg_qualifier (reg); \
4977 } while (0)
4978
4979 #define po_imm_nc_or_fail() do { \
4980 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4981 goto failure; \
4982 } while (0)
4983
4984 #define po_imm_or_fail(min, max) do { \
4985 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4986 goto failure; \
4987 if (val < min || val > max) \
4988 { \
4989 set_fatal_syntax_error (_("immediate value out of range "\
4990 #min " to "#max)); \
4991 goto failure; \
4992 } \
4993 } while (0)
4994
4995 #define po_enum_or_fail(array) do { \
4996 if (!parse_enum_string (&str, &val, array, \
4997 ARRAY_SIZE (array), imm_reg_type)) \
4998 goto failure; \
4999 } while (0)
5000
5001 #define po_strict_enum_or_fail(array) do { \
5002 if (!parse_enum_string (&str, &val, array, \
5003 ARRAY_SIZE (array), REG_TYPE_MAX)) \
5004 goto failure; \
5005 } while (0)
5006
5007 #define po_misc_or_fail(expr) do { \
5008 if (!expr) \
5009 goto failure; \
5010 } while (0)
5011 \f
5012 /* A primitive log calculator. */
5013
5014 static inline unsigned int
5015 get_log2 (unsigned int n)
5016 {
5017 unsigned int count = 0;
5018 while (n > 1)
5019 {
5020 n >>= 1;
5021 count += 1;
5022 }
5023 return count;
5024 }
5025
5026 /* encode the 12-bit imm field of Add/sub immediate */
5027 static inline uint32_t
5028 encode_addsub_imm (uint32_t imm)
5029 {
5030 return imm << 10;
5031 }
5032
5033 /* encode the shift amount field of Add/sub immediate */
5034 static inline uint32_t
5035 encode_addsub_imm_shift_amount (uint32_t cnt)
5036 {
5037 return cnt << 22;
5038 }
5039
5040
5041 /* encode the imm field of Adr instruction */
5042 static inline uint32_t
5043 encode_adr_imm (uint32_t imm)
5044 {
5045 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
5046 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
5047 }
5048
5049 /* encode the immediate field of Move wide immediate */
5050 static inline uint32_t
5051 encode_movw_imm (uint32_t imm)
5052 {
5053 return imm << 5;
5054 }
5055
5056 /* encode the 26-bit offset of unconditional branch */
5057 static inline uint32_t
5058 encode_branch_ofs_26 (uint32_t ofs)
5059 {
5060 return ofs & ((1 << 26) - 1);
5061 }
5062
5063 /* encode the 19-bit offset of conditional branch and compare & branch */
5064 static inline uint32_t
5065 encode_cond_branch_ofs_19 (uint32_t ofs)
5066 {
5067 return (ofs & ((1 << 19) - 1)) << 5;
5068 }
5069
5070 /* encode the 19-bit offset of ld literal */
5071 static inline uint32_t
5072 encode_ld_lit_ofs_19 (uint32_t ofs)
5073 {
5074 return (ofs & ((1 << 19) - 1)) << 5;
5075 }
5076
5077 /* Encode the 14-bit offset of test & branch. */
5078 static inline uint32_t
5079 encode_tst_branch_ofs_14 (uint32_t ofs)
5080 {
5081 return (ofs & ((1 << 14) - 1)) << 5;
5082 }
5083
5084 /* Encode the 16-bit imm field of svc/hvc/smc. */
5085 static inline uint32_t
5086 encode_svc_imm (uint32_t imm)
5087 {
5088 return imm << 5;
5089 }
5090
5091 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
5092 static inline uint32_t
5093 reencode_addsub_switch_add_sub (uint32_t opcode)
5094 {
5095 return opcode ^ (1 << 30);
5096 }
5097
5098 static inline uint32_t
5099 reencode_movzn_to_movz (uint32_t opcode)
5100 {
5101 return opcode | (1 << 30);
5102 }
5103
5104 static inline uint32_t
5105 reencode_movzn_to_movn (uint32_t opcode)
5106 {
5107 return opcode & ~(1 << 30);
5108 }
5109
5110 /* Overall per-instruction processing. */
5111
5112 /* We need to be able to fix up arbitrary expressions in some statements.
5113 This is so that we can handle symbols that are an arbitrary distance from
5114 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
5115 which returns part of an address in a form which will be valid for
5116 a data instruction. We do this by pushing the expression into a symbol
5117 in the expr_section, and creating a fix for that. */
5118
5119 static fixS *
5120 fix_new_aarch64 (fragS * frag,
5121 int where,
5122 short int size,
5123 expressionS * exp,
5124 int pc_rel,
5125 int reloc)
5126 {
5127 fixS *new_fix;
5128
5129 switch (exp->X_op)
5130 {
5131 case O_constant:
5132 case O_symbol:
5133 case O_add:
5134 case O_subtract:
5135 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5136 break;
5137
5138 default:
5139 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5140 pc_rel, reloc);
5141 break;
5142 }
5143 return new_fix;
5144 }
5145 \f
5146 /* Diagnostics on operands errors. */
5147
5148 /* By default, output verbose error message.
5149 Disable the verbose error message by -mno-verbose-error. */
5150 static int verbose_error_p = 1;
5151
5152 #ifdef DEBUG_AARCH64
5153 /* N.B. this is only for the purpose of debugging. */
5154 const char* operand_mismatch_kind_names[] =
5155 {
5156 "AARCH64_OPDE_NIL",
5157 "AARCH64_OPDE_RECOVERABLE",
5158 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5159 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5160 "AARCH64_OPDE_SYNTAX_ERROR",
5161 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5162 "AARCH64_OPDE_INVALID_VARIANT",
5163 "AARCH64_OPDE_INVALID_VG_SIZE",
5164 "AARCH64_OPDE_REG_LIST_LENGTH",
5165 "AARCH64_OPDE_REG_LIST_STRIDE",
5166 "AARCH64_OPDE_UNTIED_IMMS",
5167 "AARCH64_OPDE_UNTIED_OPERAND",
5168 "AARCH64_OPDE_OUT_OF_RANGE",
5169 "AARCH64_OPDE_UNALIGNED",
5170 "AARCH64_OPDE_OTHER_ERROR",
5171 "AARCH64_OPDE_INVALID_REGNO",
5172 };
5173 #endif /* DEBUG_AARCH64 */
5174
5175 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5176
5177 When multiple errors of different kinds are found in the same assembly
5178 line, only the error of the highest severity will be picked up for
5179 issuing the diagnostics. */
5180
5181 static inline bool
5182 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5183 enum aarch64_operand_error_kind rhs)
5184 {
5185 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5186 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5187 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5188 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5189 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5190 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5191 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5192 gas_assert (AARCH64_OPDE_INVALID_VG_SIZE > AARCH64_OPDE_INVALID_VARIANT);
5193 gas_assert (AARCH64_OPDE_REG_LIST_LENGTH > AARCH64_OPDE_INVALID_VG_SIZE);
5194 gas_assert (AARCH64_OPDE_REG_LIST_STRIDE > AARCH64_OPDE_REG_LIST_LENGTH);
5195 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_REG_LIST_STRIDE);
5196 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5197 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST_STRIDE);
5198 gas_assert (AARCH64_OPDE_INVALID_REGNO > AARCH64_OPDE_OTHER_ERROR);
5199 return lhs > rhs;
5200 }
5201
5202 /* Helper routine to get the mnemonic name from the assembly instruction
5203 line; should only be called for the diagnosis purpose, as there is
5204 string copy operation involved, which may affect the runtime
5205 performance if used in elsewhere. */
5206
5207 static const char*
5208 get_mnemonic_name (const char *str)
5209 {
5210 static char mnemonic[32];
5211 char *ptr;
5212
5213 /* Get the first 15 bytes and assume that the full name is included. */
5214 strncpy (mnemonic, str, 31);
5215 mnemonic[31] = '\0';
5216
5217 /* Scan up to the end of the mnemonic, which must end in white space,
5218 '.', or end of string. */
5219 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5220 ;
5221
5222 *ptr = '\0';
5223
5224 /* Append '...' to the truncated long name. */
5225 if (ptr - mnemonic == 31)
5226 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5227
5228 return mnemonic;
5229 }
5230
5231 static void
5232 reset_aarch64_instruction (aarch64_instruction *instruction)
5233 {
5234 memset (instruction, '\0', sizeof (aarch64_instruction));
5235 instruction->reloc.type = BFD_RELOC_UNUSED;
5236 }
5237
5238 /* Data structures storing one user error in the assembly code related to
5239 operands. */
5240
5241 struct operand_error_record
5242 {
5243 const aarch64_opcode *opcode;
5244 aarch64_operand_error detail;
5245 struct operand_error_record *next;
5246 };
5247
5248 typedef struct operand_error_record operand_error_record;
5249
5250 struct operand_errors
5251 {
5252 operand_error_record *head;
5253 operand_error_record *tail;
5254 };
5255
5256 typedef struct operand_errors operand_errors;
5257
5258 /* Top-level data structure reporting user errors for the current line of
5259 the assembly code.
5260 The way md_assemble works is that all opcodes sharing the same mnemonic
5261 name are iterated to find a match to the assembly line. In this data
5262 structure, each of the such opcodes will have one operand_error_record
5263 allocated and inserted. In other words, excessive errors related with
5264 a single opcode are disregarded. */
5265 operand_errors operand_error_report;
5266
5267 /* Free record nodes. */
5268 static operand_error_record *free_opnd_error_record_nodes = NULL;
5269
5270 /* Initialize the data structure that stores the operand mismatch
5271 information on assembling one line of the assembly code. */
5272 static void
5273 init_operand_error_report (void)
5274 {
5275 if (operand_error_report.head != NULL)
5276 {
5277 gas_assert (operand_error_report.tail != NULL);
5278 operand_error_report.tail->next = free_opnd_error_record_nodes;
5279 free_opnd_error_record_nodes = operand_error_report.head;
5280 operand_error_report.head = NULL;
5281 operand_error_report.tail = NULL;
5282 return;
5283 }
5284 gas_assert (operand_error_report.tail == NULL);
5285 }
5286
5287 /* Return TRUE if some operand error has been recorded during the
5288 parsing of the current assembly line using the opcode *OPCODE;
5289 otherwise return FALSE. */
5290 static inline bool
5291 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5292 {
5293 operand_error_record *record = operand_error_report.head;
5294 return record && record->opcode == opcode;
5295 }
5296
5297 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5298 OPCODE field is initialized with OPCODE.
5299 N.B. only one record for each opcode, i.e. the maximum of one error is
5300 recorded for each instruction template. */
5301
5302 static void
5303 add_operand_error_record (const operand_error_record* new_record)
5304 {
5305 const aarch64_opcode *opcode = new_record->opcode;
5306 operand_error_record* record = operand_error_report.head;
5307
5308 /* The record may have been created for this opcode. If not, we need
5309 to prepare one. */
5310 if (! opcode_has_operand_error_p (opcode))
5311 {
5312 /* Get one empty record. */
5313 if (free_opnd_error_record_nodes == NULL)
5314 {
5315 record = XNEW (operand_error_record);
5316 }
5317 else
5318 {
5319 record = free_opnd_error_record_nodes;
5320 free_opnd_error_record_nodes = record->next;
5321 }
5322 record->opcode = opcode;
5323 /* Insert at the head. */
5324 record->next = operand_error_report.head;
5325 operand_error_report.head = record;
5326 if (operand_error_report.tail == NULL)
5327 operand_error_report.tail = record;
5328 }
5329 else if (record->detail.kind != AARCH64_OPDE_NIL
5330 && record->detail.index <= new_record->detail.index
5331 && operand_error_higher_severity_p (record->detail.kind,
5332 new_record->detail.kind))
5333 {
5334 /* In the case of multiple errors found on operands related with a
5335 single opcode, only record the error of the leftmost operand and
5336 only if the error is of higher severity. */
5337 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5338 " the existing error %s on operand %d",
5339 operand_mismatch_kind_names[new_record->detail.kind],
5340 new_record->detail.index,
5341 operand_mismatch_kind_names[record->detail.kind],
5342 record->detail.index);
5343 return;
5344 }
5345
5346 record->detail = new_record->detail;
5347 }
5348
5349 static inline void
5350 record_operand_error_info (const aarch64_opcode *opcode,
5351 aarch64_operand_error *error_info)
5352 {
5353 operand_error_record record;
5354 record.opcode = opcode;
5355 record.detail = *error_info;
5356 add_operand_error_record (&record);
5357 }
5358
5359 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5360 error message *ERROR, for operand IDX (count from 0). */
5361
5362 static void
5363 record_operand_error (const aarch64_opcode *opcode, int idx,
5364 enum aarch64_operand_error_kind kind,
5365 const char* error)
5366 {
5367 aarch64_operand_error info;
5368 memset(&info, 0, sizeof (info));
5369 info.index = idx;
5370 info.kind = kind;
5371 info.error = error;
5372 info.non_fatal = false;
5373 record_operand_error_info (opcode, &info);
5374 }
5375
5376 static void
5377 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5378 enum aarch64_operand_error_kind kind,
5379 const char* error, const int *extra_data)
5380 {
5381 aarch64_operand_error info;
5382 info.index = idx;
5383 info.kind = kind;
5384 info.error = error;
5385 info.data[0].i = extra_data[0];
5386 info.data[1].i = extra_data[1];
5387 info.data[2].i = extra_data[2];
5388 info.non_fatal = false;
5389 record_operand_error_info (opcode, &info);
5390 }
5391
5392 static void
5393 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5394 const char* error, int lower_bound,
5395 int upper_bound)
5396 {
5397 int data[3] = {lower_bound, upper_bound, 0};
5398 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5399 error, data);
5400 }
5401
5402 /* Remove the operand error record for *OPCODE. */
5403 static void ATTRIBUTE_UNUSED
5404 remove_operand_error_record (const aarch64_opcode *opcode)
5405 {
5406 if (opcode_has_operand_error_p (opcode))
5407 {
5408 operand_error_record* record = operand_error_report.head;
5409 gas_assert (record != NULL && operand_error_report.tail != NULL);
5410 operand_error_report.head = record->next;
5411 record->next = free_opnd_error_record_nodes;
5412 free_opnd_error_record_nodes = record;
5413 if (operand_error_report.head == NULL)
5414 {
5415 gas_assert (operand_error_report.tail == record);
5416 operand_error_report.tail = NULL;
5417 }
5418 }
5419 }
5420
5421 /* Given the instruction in *INSTR, return the index of the best matched
5422 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5423
5424 Return -1 if there is no qualifier sequence; return the first match
5425 if there is multiple matches found. */
5426
5427 static int
5428 find_best_match (const aarch64_inst *instr,
5429 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5430 {
5431 int i, num_opnds, max_num_matched, idx;
5432
5433 num_opnds = aarch64_num_of_operands (instr->opcode);
5434 if (num_opnds == 0)
5435 {
5436 DEBUG_TRACE ("no operand");
5437 return -1;
5438 }
5439
5440 max_num_matched = 0;
5441 idx = 0;
5442
5443 /* For each pattern. */
5444 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5445 {
5446 int j, num_matched;
5447 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5448
5449 /* Most opcodes has much fewer patterns in the list. */
5450 if (empty_qualifier_sequence_p (qualifiers))
5451 {
5452 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5453 break;
5454 }
5455
5456 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5457 if (*qualifiers == instr->operands[j].qualifier)
5458 ++num_matched;
5459
5460 if (num_matched > max_num_matched)
5461 {
5462 max_num_matched = num_matched;
5463 idx = i;
5464 }
5465 }
5466
5467 DEBUG_TRACE ("return with %d", idx);
5468 return idx;
5469 }
5470
5471 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5472 corresponding operands in *INSTR. */
5473
5474 static inline void
5475 assign_qualifier_sequence (aarch64_inst *instr,
5476 const aarch64_opnd_qualifier_t *qualifiers)
5477 {
5478 int i = 0;
5479 int num_opnds = aarch64_num_of_operands (instr->opcode);
5480 gas_assert (num_opnds);
5481 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5482 instr->operands[i].qualifier = *qualifiers;
5483 }
5484
5485 /* Callback used by aarch64_print_operand to apply STYLE to the
5486 disassembler output created from FMT and ARGS. The STYLER object holds
5487 any required state. Must return a pointer to a string (created from FMT
5488 and ARGS) that will continue to be valid until the complete disassembled
5489 instruction has been printed.
5490
5491 We don't currently add any styling to the output of the disassembler as
5492 used within assembler error messages, and so STYLE is ignored here. A
5493 new string is allocated on the obstack help within STYLER and returned
5494 to the caller. */
5495
5496 static const char *aarch64_apply_style
5497 (struct aarch64_styler *styler,
5498 enum disassembler_style style ATTRIBUTE_UNUSED,
5499 const char *fmt, va_list args)
5500 {
5501 int res;
5502 char *ptr;
5503 struct obstack *stack = (struct obstack *) styler->state;
5504 va_list ap;
5505
5506 /* Calculate the required space. */
5507 va_copy (ap, args);
5508 res = vsnprintf (NULL, 0, fmt, ap);
5509 va_end (ap);
5510 gas_assert (res >= 0);
5511
5512 /* Allocate space on the obstack and format the result. */
5513 ptr = (char *) obstack_alloc (stack, res + 1);
5514 res = vsnprintf (ptr, (res + 1), fmt, args);
5515 gas_assert (res >= 0);
5516
5517 return ptr;
5518 }
5519
5520 /* Print operands for the diagnosis purpose. */
5521
5522 static void
5523 print_operands (char *buf, const aarch64_opcode *opcode,
5524 const aarch64_opnd_info *opnds)
5525 {
5526 int i;
5527 struct aarch64_styler styler;
5528 struct obstack content;
5529 obstack_init (&content);
5530
5531 styler.apply_style = aarch64_apply_style;
5532 styler.state = (void *) &content;
5533
5534 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5535 {
5536 char str[128];
5537 char cmt[128];
5538
5539 /* We regard the opcode operand info more, however we also look into
5540 the inst->operands to support the disassembling of the optional
5541 operand.
5542 The two operand code should be the same in all cases, apart from
5543 when the operand can be optional. */
5544 if (opcode->operands[i] == AARCH64_OPND_NIL
5545 || opnds[i].type == AARCH64_OPND_NIL)
5546 break;
5547
5548 /* Generate the operand string in STR. */
5549 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5550 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5551
5552 /* Delimiter. */
5553 if (str[0] != '\0')
5554 strcat (buf, i == 0 ? " " : ", ");
5555
5556 /* Append the operand string. */
5557 strcat (buf, str);
5558
5559 /* Append a comment. This works because only the last operand ever
5560 adds a comment. If that ever changes then we'll need to be
5561 smarter here. */
5562 if (cmt[0] != '\0')
5563 {
5564 strcat (buf, "\t// ");
5565 strcat (buf, cmt);
5566 }
5567 }
5568
5569 obstack_free (&content, NULL);
5570 }
5571
5572 /* Send to stderr a string as information. */
5573
5574 static void
5575 output_info (const char *format, ...)
5576 {
5577 const char *file;
5578 unsigned int line;
5579 va_list args;
5580
5581 file = as_where (&line);
5582 if (file)
5583 {
5584 if (line != 0)
5585 fprintf (stderr, "%s:%u: ", file, line);
5586 else
5587 fprintf (stderr, "%s: ", file);
5588 }
5589 fprintf (stderr, _("Info: "));
5590 va_start (args, format);
5591 vfprintf (stderr, format, args);
5592 va_end (args);
5593 (void) putc ('\n', stderr);
5594 }
5595
5596 /* See if the AARCH64_OPDE_SYNTAX_ERROR error described by DETAIL
5597 relates to registers or register lists. If so, return a string that
5598 reports the error against "operand %d", otherwise return null. */
5599
5600 static const char *
5601 get_reg_error_message (const aarch64_operand_error *detail)
5602 {
5603 /* Handle the case where we found a register that was expected
5604 to be in a register list outside of a register list. */
5605 if ((detail->data[1].i & detail->data[2].i) != 0
5606 && (detail->data[1].i & SEF_IN_REGLIST) == 0)
5607 return _("missing braces at operand %d");
5608
5609 /* If some opcodes expected a register, and we found a register,
5610 complain about the difference. */
5611 if (detail->data[2].i)
5612 {
5613 unsigned int expected = (detail->data[1].i & SEF_IN_REGLIST
5614 ? detail->data[1].i & ~SEF_IN_REGLIST
5615 : detail->data[0].i & ~SEF_DEFAULT_ERROR);
5616 const char *msg = get_reg_expected_msg (expected, detail->data[2].i);
5617 if (!msg)
5618 msg = N_("unexpected register type at operand %d");
5619 return msg;
5620 }
5621
5622 /* Handle the case where we got to the point of trying to parse a
5623 register within a register list, but didn't find a known register. */
5624 if (detail->data[1].i & SEF_IN_REGLIST)
5625 {
5626 unsigned int expected = detail->data[1].i & ~SEF_IN_REGLIST;
5627 const char *msg = get_reg_expected_msg (expected, 0);
5628 if (!msg)
5629 msg = _("invalid register list at operand %d");
5630 return msg;
5631 }
5632
5633 /* Punt if register-related problems weren't the only errors. */
5634 if (detail->data[0].i & SEF_DEFAULT_ERROR)
5635 return NULL;
5636
5637 /* Handle the case where the only acceptable things are registers. */
5638 if (detail->data[1].i == 0)
5639 {
5640 const char *msg = get_reg_expected_msg (detail->data[0].i, 0);
5641 if (!msg)
5642 msg = _("expected a register at operand %d");
5643 return msg;
5644 }
5645
5646 /* Handle the case where the only acceptable things are register lists,
5647 and there was no opening '{'. */
5648 if (detail->data[0].i == 0)
5649 return _("expected '{' at operand %d");
5650
5651 return _("expected a register or register list at operand %d");
5652 }
5653
5654 /* Output one operand error record. */
5655
5656 static void
5657 output_operand_error_record (const operand_error_record *record, char *str)
5658 {
5659 const aarch64_operand_error *detail = &record->detail;
5660 int idx = detail->index;
5661 const aarch64_opcode *opcode = record->opcode;
5662 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5663 : AARCH64_OPND_NIL);
5664
5665 typedef void (*handler_t)(const char *format, ...);
5666 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5667 const char *msg = detail->error;
5668
5669 switch (detail->kind)
5670 {
5671 case AARCH64_OPDE_NIL:
5672 gas_assert (0);
5673 break;
5674
5675 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5676 handler (_("this `%s' should have an immediately preceding `%s'"
5677 " -- `%s'"),
5678 detail->data[0].s, detail->data[1].s, str);
5679 break;
5680
5681 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5682 handler (_("the preceding `%s' should be followed by `%s` rather"
5683 " than `%s` -- `%s'"),
5684 detail->data[1].s, detail->data[0].s, opcode->name, str);
5685 break;
5686
5687 case AARCH64_OPDE_SYNTAX_ERROR:
5688 if (!msg && idx >= 0)
5689 {
5690 msg = get_reg_error_message (detail);
5691 if (msg)
5692 {
5693 char *full_msg = xasprintf (msg, idx + 1);
5694 handler (_("%s -- `%s'"), full_msg, str);
5695 free (full_msg);
5696 break;
5697 }
5698 }
5699 /* Fall through. */
5700
5701 case AARCH64_OPDE_RECOVERABLE:
5702 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5703 case AARCH64_OPDE_OTHER_ERROR:
5704 /* Use the prepared error message if there is, otherwise use the
5705 operand description string to describe the error. */
5706 if (msg != NULL)
5707 {
5708 if (idx < 0)
5709 handler (_("%s -- `%s'"), msg, str);
5710 else
5711 handler (_("%s at operand %d -- `%s'"),
5712 msg, idx + 1, str);
5713 }
5714 else
5715 {
5716 gas_assert (idx >= 0);
5717 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5718 aarch64_get_operand_desc (opd_code), str);
5719 }
5720 break;
5721
5722 case AARCH64_OPDE_INVALID_VARIANT:
5723 handler (_("operand mismatch -- `%s'"), str);
5724 if (verbose_error_p)
5725 {
5726 /* We will try to correct the erroneous instruction and also provide
5727 more information e.g. all other valid variants.
5728
5729 The string representation of the corrected instruction and other
5730 valid variants are generated by
5731
5732 1) obtaining the intermediate representation of the erroneous
5733 instruction;
5734 2) manipulating the IR, e.g. replacing the operand qualifier;
5735 3) printing out the instruction by calling the printer functions
5736 shared with the disassembler.
5737
5738 The limitation of this method is that the exact input assembly
5739 line cannot be accurately reproduced in some cases, for example an
5740 optional operand present in the actual assembly line will be
5741 omitted in the output; likewise for the optional syntax rules,
5742 e.g. the # before the immediate. Another limitation is that the
5743 assembly symbols and relocation operations in the assembly line
5744 currently cannot be printed out in the error report. Last but not
5745 least, when there is other error(s) co-exist with this error, the
5746 'corrected' instruction may be still incorrect, e.g. given
5747 'ldnp h0,h1,[x0,#6]!'
5748 this diagnosis will provide the version:
5749 'ldnp s0,s1,[x0,#6]!'
5750 which is still not right. */
5751 size_t len = strlen (get_mnemonic_name (str));
5752 int i, qlf_idx;
5753 bool result;
5754 char buf[2048];
5755 aarch64_inst *inst_base = &inst.base;
5756 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5757
5758 /* Init inst. */
5759 reset_aarch64_instruction (&inst);
5760 inst_base->opcode = opcode;
5761
5762 /* Reset the error report so that there is no side effect on the
5763 following operand parsing. */
5764 init_operand_error_report ();
5765
5766 /* Fill inst. */
5767 result = parse_operands (str + len, opcode)
5768 && programmer_friendly_fixup (&inst);
5769 gas_assert (result);
5770 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5771 NULL, NULL, insn_sequence);
5772 gas_assert (!result);
5773
5774 /* Find the most matched qualifier sequence. */
5775 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5776 gas_assert (qlf_idx > -1);
5777
5778 /* Assign the qualifiers. */
5779 assign_qualifier_sequence (inst_base,
5780 opcode->qualifiers_list[qlf_idx]);
5781
5782 /* Print the hint. */
5783 output_info (_(" did you mean this?"));
5784 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5785 print_operands (buf, opcode, inst_base->operands);
5786 output_info (_(" %s"), buf);
5787
5788 /* Print out other variant(s) if there is any. */
5789 if (qlf_idx != 0 ||
5790 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5791 output_info (_(" other valid variant(s):"));
5792
5793 /* For each pattern. */
5794 qualifiers_list = opcode->qualifiers_list;
5795 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5796 {
5797 /* Most opcodes has much fewer patterns in the list.
5798 First NIL qualifier indicates the end in the list. */
5799 if (empty_qualifier_sequence_p (*qualifiers_list))
5800 break;
5801
5802 if (i != qlf_idx)
5803 {
5804 /* Mnemonics name. */
5805 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5806
5807 /* Assign the qualifiers. */
5808 assign_qualifier_sequence (inst_base, *qualifiers_list);
5809
5810 /* Print instruction. */
5811 print_operands (buf, opcode, inst_base->operands);
5812
5813 output_info (_(" %s"), buf);
5814 }
5815 }
5816 }
5817 break;
5818
5819 case AARCH64_OPDE_UNTIED_IMMS:
5820 handler (_("operand %d must have the same immediate value "
5821 "as operand 1 -- `%s'"),
5822 detail->index + 1, str);
5823 break;
5824
5825 case AARCH64_OPDE_UNTIED_OPERAND:
5826 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5827 detail->index + 1, str);
5828 break;
5829
5830 case AARCH64_OPDE_INVALID_REGNO:
5831 handler (_("%s%d-%s%d expected at operand %d -- `%s'"),
5832 detail->data[0].s, detail->data[1].i,
5833 detail->data[0].s, detail->data[2].i, idx + 1, str);
5834 break;
5835
5836 case AARCH64_OPDE_OUT_OF_RANGE:
5837 if (detail->data[0].i != detail->data[1].i)
5838 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5839 msg ? msg : _("immediate value"),
5840 detail->data[0].i, detail->data[1].i, idx + 1, str);
5841 else
5842 handler (_("%s must be %d at operand %d -- `%s'"),
5843 msg ? msg : _("immediate value"),
5844 detail->data[0].i, idx + 1, str);
5845 break;
5846
5847 case AARCH64_OPDE_INVALID_VG_SIZE:
5848 if (detail->data[0].i == 0)
5849 handler (_("unexpected vector group size at operand %d -- `%s'"),
5850 idx + 1, str);
5851 else
5852 handler (_("operand %d must have a vector group size of %d -- `%s'"),
5853 idx + 1, detail->data[0].i, str);
5854 break;
5855
5856 case AARCH64_OPDE_REG_LIST_LENGTH:
5857 if (detail->data[0].i == (1 << 1))
5858 handler (_("expected a single-register list at operand %d -- `%s'"),
5859 idx + 1, str);
5860 else if ((detail->data[0].i & -detail->data[0].i) == detail->data[0].i)
5861 handler (_("expected a list of %d registers at operand %d -- `%s'"),
5862 get_log2 (detail->data[0].i), idx + 1, str);
5863 else if (detail->data[0].i == 0x14)
5864 handler (_("expected a list of %d or %d registers at"
5865 " operand %d -- `%s'"),
5866 2, 4, idx + 1, str);
5867 else
5868 handler (_("invalid number of registers in the list"
5869 " at operand %d -- `%s'"), idx + 1, str);
5870 break;
5871
5872 case AARCH64_OPDE_REG_LIST_STRIDE:
5873 if (detail->data[0].i == (1 << 1))
5874 handler (_("the register list must have a stride of %d"
5875 " at operand %d -- `%s'"), 1, idx + 1, str);
5876 else if (detail->data[0].i == 0x12 || detail->data[0].i == 0x102)
5877 handler (_("the register list must have a stride of %d or %d"
5878 " at operand %d -- `%s`"), 1,
5879 detail->data[0].i == 0x12 ? 4 : 8, idx + 1, str);
5880 else
5881 handler (_("invalid register stride at operand %d -- `%s'"),
5882 idx + 1, str);
5883 break;
5884
5885 case AARCH64_OPDE_UNALIGNED:
5886 handler (_("immediate value must be a multiple of "
5887 "%d at operand %d -- `%s'"),
5888 detail->data[0].i, idx + 1, str);
5889 break;
5890
5891 default:
5892 gas_assert (0);
5893 break;
5894 }
5895 }
5896
5897 /* Return true if the presence of error A against an instruction means
5898 that error B should not be reported. This is only used as a first pass,
5899 to pick the kind of error that we should report. */
5900
5901 static bool
5902 better_error_p (operand_error_record *a, operand_error_record *b)
5903 {
5904 /* For errors reported during parsing, prefer errors that relate to
5905 later operands, since that implies that the earlier operands were
5906 syntactically valid.
5907
5908 For example, if we see a register R instead of an immediate in
5909 operand N, we'll report that as a recoverable "immediate operand
5910 required" error. This is because there is often another opcode
5911 entry that accepts a register operand N, and any errors about R
5912 should be reported against the register forms of the instruction.
5913 But if no such register form exists, the recoverable error should
5914 still win over a syntax error against operand N-1.
5915
5916 For these purposes, count an error reported at the end of the
5917 assembly string as equivalent to an error reported against the
5918 final operand. This means that opcode entries that expect more
5919 operands win over "unexpected characters following instruction". */
5920 if (a->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR
5921 && b->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR)
5922 {
5923 int a_index = (a->detail.index < 0
5924 ? aarch64_num_of_operands (a->opcode) - 1
5925 : a->detail.index);
5926 int b_index = (b->detail.index < 0
5927 ? aarch64_num_of_operands (b->opcode) - 1
5928 : b->detail.index);
5929 if (a_index != b_index)
5930 return a_index > b_index;
5931 }
5932 return operand_error_higher_severity_p (a->detail.kind, b->detail.kind);
5933 }
5934
5935 /* Process and output the error message about the operand mismatching.
5936
5937 When this function is called, the operand error information had
5938 been collected for an assembly line and there will be multiple
5939 errors in the case of multiple instruction templates; output the
5940 error message that most closely describes the problem.
5941
5942 The errors to be printed can be filtered on printing all errors
5943 or only non-fatal errors. This distinction has to be made because
5944 the error buffer may already be filled with fatal errors we don't want to
5945 print due to the different instruction templates. */
5946
5947 static void
5948 output_operand_error_report (char *str, bool non_fatal_only)
5949 {
5950 enum aarch64_operand_error_kind kind;
5951 operand_error_record *curr;
5952 operand_error_record *head = operand_error_report.head;
5953 operand_error_record *record;
5954
5955 /* No error to report. */
5956 if (head == NULL)
5957 return;
5958
5959 gas_assert (head != NULL && operand_error_report.tail != NULL);
5960
5961 /* Only one error. */
5962 if (head == operand_error_report.tail)
5963 {
5964 /* If the only error is a non-fatal one and we don't want to print it,
5965 just exit. */
5966 if (!non_fatal_only || head->detail.non_fatal)
5967 {
5968 DEBUG_TRACE ("single opcode entry with error kind: %s",
5969 operand_mismatch_kind_names[head->detail.kind]);
5970 output_operand_error_record (head, str);
5971 }
5972 return;
5973 }
5974
5975 /* Find the error kind of the highest severity. */
5976 DEBUG_TRACE ("multiple opcode entries with error kind");
5977 record = NULL;
5978 for (curr = head; curr != NULL; curr = curr->next)
5979 {
5980 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5981 if (curr->detail.kind == AARCH64_OPDE_SYNTAX_ERROR)
5982 {
5983 DEBUG_TRACE ("\t%s [%x, %x, %x]",
5984 operand_mismatch_kind_names[curr->detail.kind],
5985 curr->detail.data[0].i, curr->detail.data[1].i,
5986 curr->detail.data[2].i);
5987 }
5988 else if (curr->detail.kind == AARCH64_OPDE_REG_LIST_LENGTH
5989 || curr->detail.kind == AARCH64_OPDE_REG_LIST_STRIDE)
5990 {
5991 DEBUG_TRACE ("\t%s [%x]",
5992 operand_mismatch_kind_names[curr->detail.kind],
5993 curr->detail.data[0].i);
5994 }
5995 else
5996 {
5997 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5998 }
5999 if ((!non_fatal_only || curr->detail.non_fatal)
6000 && (!record || better_error_p (curr, record)))
6001 record = curr;
6002 }
6003
6004 kind = (record ? record->detail.kind : AARCH64_OPDE_NIL);
6005 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
6006
6007 /* Pick up one of errors of KIND to report. */
6008 record = NULL;
6009 for (curr = head; curr != NULL; curr = curr->next)
6010 {
6011 /* If we don't want to print non-fatal errors then don't consider them
6012 at all. */
6013 if (curr->detail.kind != kind
6014 || (non_fatal_only && !curr->detail.non_fatal))
6015 continue;
6016 /* If there are multiple errors, pick up the one with the highest
6017 mismatching operand index. In the case of multiple errors with
6018 the equally highest operand index, pick up the first one or the
6019 first one with non-NULL error message. */
6020 if (!record || curr->detail.index > record->detail.index)
6021 record = curr;
6022 else if (curr->detail.index == record->detail.index
6023 && !record->detail.error)
6024 {
6025 if (curr->detail.error)
6026 record = curr;
6027 else if (kind == AARCH64_OPDE_SYNTAX_ERROR)
6028 {
6029 record->detail.data[0].i |= curr->detail.data[0].i;
6030 record->detail.data[1].i |= curr->detail.data[1].i;
6031 record->detail.data[2].i |= curr->detail.data[2].i;
6032 DEBUG_TRACE ("\t--> %s [%x, %x, %x]",
6033 operand_mismatch_kind_names[kind],
6034 curr->detail.data[0].i, curr->detail.data[1].i,
6035 curr->detail.data[2].i);
6036 }
6037 else if (kind == AARCH64_OPDE_REG_LIST_LENGTH
6038 || kind == AARCH64_OPDE_REG_LIST_STRIDE)
6039 {
6040 record->detail.data[0].i |= curr->detail.data[0].i;
6041 DEBUG_TRACE ("\t--> %s [%x]",
6042 operand_mismatch_kind_names[kind],
6043 curr->detail.data[0].i);
6044 }
6045 /* Pick the variant with the cloest match. */
6046 else if (kind == AARCH64_OPDE_INVALID_VARIANT
6047 && record->detail.data[0].i > curr->detail.data[0].i)
6048 record = curr;
6049 }
6050 }
6051
6052 /* The way errors are collected in the back-end is a bit non-intuitive. But
6053 essentially, because each operand template is tried recursively you may
6054 always have errors collected from the previous tried OPND. These are
6055 usually skipped if there is one successful match. However now with the
6056 non-fatal errors we have to ignore those previously collected hard errors
6057 when we're only interested in printing the non-fatal ones. This condition
6058 prevents us from printing errors that are not appropriate, since we did
6059 match a condition, but it also has warnings that it wants to print. */
6060 if (non_fatal_only && !record)
6061 return;
6062
6063 gas_assert (record);
6064 DEBUG_TRACE ("Pick up error kind %s to report",
6065 operand_mismatch_kind_names[kind]);
6066
6067 /* Output. */
6068 output_operand_error_record (record, str);
6069 }
6070 \f
6071 /* Write an AARCH64 instruction to buf - always little-endian. */
6072 static void
6073 put_aarch64_insn (char *buf, uint32_t insn)
6074 {
6075 unsigned char *where = (unsigned char *) buf;
6076 where[0] = insn;
6077 where[1] = insn >> 8;
6078 where[2] = insn >> 16;
6079 where[3] = insn >> 24;
6080 }
6081
6082 static uint32_t
6083 get_aarch64_insn (char *buf)
6084 {
6085 unsigned char *where = (unsigned char *) buf;
6086 uint32_t result;
6087 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
6088 | ((uint32_t) where[3] << 24)));
6089 return result;
6090 }
6091
6092 static void
6093 output_inst (struct aarch64_inst *new_inst)
6094 {
6095 char *to = NULL;
6096
6097 to = frag_more (INSN_SIZE);
6098
6099 frag_now->tc_frag_data.recorded = 1;
6100
6101 put_aarch64_insn (to, inst.base.value);
6102
6103 if (inst.reloc.type != BFD_RELOC_UNUSED)
6104 {
6105 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
6106 INSN_SIZE, &inst.reloc.exp,
6107 inst.reloc.pc_rel,
6108 inst.reloc.type);
6109 DEBUG_TRACE ("Prepared relocation fix up");
6110 /* Don't check the addend value against the instruction size,
6111 that's the job of our code in md_apply_fix(). */
6112 fixp->fx_no_overflow = 1;
6113 if (new_inst != NULL)
6114 fixp->tc_fix_data.inst = new_inst;
6115 if (aarch64_gas_internal_fixup_p ())
6116 {
6117 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
6118 fixp->tc_fix_data.opnd = inst.reloc.opnd;
6119 fixp->fx_addnumber = inst.reloc.flags;
6120 }
6121 }
6122
6123 dwarf2_emit_insn (INSN_SIZE);
6124 }
6125
6126 /* Link together opcodes of the same name. */
6127
6128 struct templates
6129 {
6130 const aarch64_opcode *opcode;
6131 struct templates *next;
6132 };
6133
6134 typedef struct templates templates;
6135
6136 static templates *
6137 lookup_mnemonic (const char *start, int len)
6138 {
6139 templates *templ = NULL;
6140
6141 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
6142 return templ;
6143 }
6144
6145 /* Subroutine of md_assemble, responsible for looking up the primary
6146 opcode from the mnemonic the user wrote. BASE points to the beginning
6147 of the mnemonic, DOT points to the first '.' within the mnemonic
6148 (if any) and END points to the end of the mnemonic. */
6149
6150 static templates *
6151 opcode_lookup (char *base, char *dot, char *end)
6152 {
6153 const aarch64_cond *cond;
6154 char condname[16];
6155 int len;
6156
6157 if (dot == end)
6158 return 0;
6159
6160 inst.cond = COND_ALWAYS;
6161
6162 /* Handle a possible condition. */
6163 if (dot)
6164 {
6165 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
6166 if (!cond)
6167 return 0;
6168 inst.cond = cond->value;
6169 len = dot - base;
6170 }
6171 else
6172 len = end - base;
6173
6174 if (inst.cond == COND_ALWAYS)
6175 {
6176 /* Look for unaffixed mnemonic. */
6177 return lookup_mnemonic (base, len);
6178 }
6179 else if (len <= 13)
6180 {
6181 /* append ".c" to mnemonic if conditional */
6182 memcpy (condname, base, len);
6183 memcpy (condname + len, ".c", 2);
6184 base = condname;
6185 len += 2;
6186 return lookup_mnemonic (base, len);
6187 }
6188
6189 return NULL;
6190 }
6191
6192 /* Process an optional operand that is found omitted from the assembly line.
6193 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
6194 instruction's opcode entry while IDX is the index of this omitted operand.
6195 */
6196
6197 static void
6198 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
6199 int idx, aarch64_opnd_info *operand)
6200 {
6201 aarch64_insn default_value = get_optional_operand_default_value (opcode);
6202 gas_assert (optional_operand_p (opcode, idx));
6203 gas_assert (!operand->present);
6204
6205 switch (type)
6206 {
6207 case AARCH64_OPND_Rd:
6208 case AARCH64_OPND_Rn:
6209 case AARCH64_OPND_Rm:
6210 case AARCH64_OPND_Rt:
6211 case AARCH64_OPND_Rt2:
6212 case AARCH64_OPND_Rt_LS64:
6213 case AARCH64_OPND_Rt_SP:
6214 case AARCH64_OPND_Rs:
6215 case AARCH64_OPND_Ra:
6216 case AARCH64_OPND_Rt_SYS:
6217 case AARCH64_OPND_Rd_SP:
6218 case AARCH64_OPND_Rn_SP:
6219 case AARCH64_OPND_Rm_SP:
6220 case AARCH64_OPND_Fd:
6221 case AARCH64_OPND_Fn:
6222 case AARCH64_OPND_Fm:
6223 case AARCH64_OPND_Fa:
6224 case AARCH64_OPND_Ft:
6225 case AARCH64_OPND_Ft2:
6226 case AARCH64_OPND_Sd:
6227 case AARCH64_OPND_Sn:
6228 case AARCH64_OPND_Sm:
6229 case AARCH64_OPND_Va:
6230 case AARCH64_OPND_Vd:
6231 case AARCH64_OPND_Vn:
6232 case AARCH64_OPND_Vm:
6233 case AARCH64_OPND_VdD1:
6234 case AARCH64_OPND_VnD1:
6235 operand->reg.regno = default_value;
6236 break;
6237
6238 case AARCH64_OPND_Ed:
6239 case AARCH64_OPND_En:
6240 case AARCH64_OPND_Em:
6241 case AARCH64_OPND_Em16:
6242 case AARCH64_OPND_SM3_IMM2:
6243 operand->reglane.regno = default_value;
6244 break;
6245
6246 case AARCH64_OPND_IDX:
6247 case AARCH64_OPND_BIT_NUM:
6248 case AARCH64_OPND_IMMR:
6249 case AARCH64_OPND_IMMS:
6250 case AARCH64_OPND_SHLL_IMM:
6251 case AARCH64_OPND_IMM_VLSL:
6252 case AARCH64_OPND_IMM_VLSR:
6253 case AARCH64_OPND_CCMP_IMM:
6254 case AARCH64_OPND_FBITS:
6255 case AARCH64_OPND_UIMM4:
6256 case AARCH64_OPND_UIMM3_OP1:
6257 case AARCH64_OPND_UIMM3_OP2:
6258 case AARCH64_OPND_IMM:
6259 case AARCH64_OPND_IMM_2:
6260 case AARCH64_OPND_WIDTH:
6261 case AARCH64_OPND_UIMM7:
6262 case AARCH64_OPND_NZCV:
6263 case AARCH64_OPND_SVE_PATTERN:
6264 case AARCH64_OPND_SVE_PRFOP:
6265 operand->imm.value = default_value;
6266 break;
6267
6268 case AARCH64_OPND_SVE_PATTERN_SCALED:
6269 operand->imm.value = default_value;
6270 operand->shifter.kind = AARCH64_MOD_MUL;
6271 operand->shifter.amount = 1;
6272 break;
6273
6274 case AARCH64_OPND_EXCEPTION:
6275 inst.reloc.type = BFD_RELOC_UNUSED;
6276 break;
6277
6278 case AARCH64_OPND_BARRIER_ISB:
6279 operand->barrier = aarch64_barrier_options + default_value;
6280 break;
6281
6282 case AARCH64_OPND_BTI_TARGET:
6283 operand->hint_option = aarch64_hint_options + default_value;
6284 break;
6285
6286 default:
6287 break;
6288 }
6289 }
6290
6291 /* Process the relocation type for move wide instructions.
6292 Return TRUE on success; otherwise return FALSE. */
6293
6294 static bool
6295 process_movw_reloc_info (void)
6296 {
6297 int is32;
6298 unsigned shift;
6299
6300 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6301
6302 if (inst.base.opcode->op == OP_MOVK)
6303 switch (inst.reloc.type)
6304 {
6305 case BFD_RELOC_AARCH64_MOVW_G0_S:
6306 case BFD_RELOC_AARCH64_MOVW_G1_S:
6307 case BFD_RELOC_AARCH64_MOVW_G2_S:
6308 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6309 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6310 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6311 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6312 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6313 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6314 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6315 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6316 set_syntax_error
6317 (_("the specified relocation type is not allowed for MOVK"));
6318 return false;
6319 default:
6320 break;
6321 }
6322
6323 switch (inst.reloc.type)
6324 {
6325 case BFD_RELOC_AARCH64_MOVW_G0:
6326 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6327 case BFD_RELOC_AARCH64_MOVW_G0_S:
6328 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6329 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6330 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6331 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6332 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6333 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6334 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6335 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6336 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6337 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6338 shift = 0;
6339 break;
6340 case BFD_RELOC_AARCH64_MOVW_G1:
6341 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6342 case BFD_RELOC_AARCH64_MOVW_G1_S:
6343 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6344 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6345 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6346 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6347 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6348 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6349 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6350 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6351 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6352 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6353 shift = 16;
6354 break;
6355 case BFD_RELOC_AARCH64_MOVW_G2:
6356 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6357 case BFD_RELOC_AARCH64_MOVW_G2_S:
6358 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6359 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6360 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6361 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6362 if (is32)
6363 {
6364 set_fatal_syntax_error
6365 (_("the specified relocation type is not allowed for 32-bit "
6366 "register"));
6367 return false;
6368 }
6369 shift = 32;
6370 break;
6371 case BFD_RELOC_AARCH64_MOVW_G3:
6372 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6373 if (is32)
6374 {
6375 set_fatal_syntax_error
6376 (_("the specified relocation type is not allowed for 32-bit "
6377 "register"));
6378 return false;
6379 }
6380 shift = 48;
6381 break;
6382 default:
6383 /* More cases should be added when more MOVW-related relocation types
6384 are supported in GAS. */
6385 gas_assert (aarch64_gas_internal_fixup_p ());
6386 /* The shift amount should have already been set by the parser. */
6387 return true;
6388 }
6389 inst.base.operands[1].shifter.amount = shift;
6390 return true;
6391 }
6392
6393 /* Determine and return the real reloc type code for an instruction
6394 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6395
6396 static inline bfd_reloc_code_real_type
6397 ldst_lo12_determine_real_reloc_type (void)
6398 {
6399 unsigned logsz, max_logsz;
6400 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6401 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6402
6403 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6404 {
6405 BFD_RELOC_AARCH64_LDST8_LO12,
6406 BFD_RELOC_AARCH64_LDST16_LO12,
6407 BFD_RELOC_AARCH64_LDST32_LO12,
6408 BFD_RELOC_AARCH64_LDST64_LO12,
6409 BFD_RELOC_AARCH64_LDST128_LO12
6410 },
6411 {
6412 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6413 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6414 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6415 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6416 BFD_RELOC_AARCH64_NONE
6417 },
6418 {
6419 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6420 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6421 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6422 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6423 BFD_RELOC_AARCH64_NONE
6424 },
6425 {
6426 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6427 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6428 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6429 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6430 BFD_RELOC_AARCH64_NONE
6431 },
6432 {
6433 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6434 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6435 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6436 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6437 BFD_RELOC_AARCH64_NONE
6438 }
6439 };
6440
6441 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6442 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6443 || (inst.reloc.type
6444 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6445 || (inst.reloc.type
6446 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6447 || (inst.reloc.type
6448 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6449 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6450
6451 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6452 opd1_qlf =
6453 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6454 1, opd0_qlf, 0);
6455 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6456
6457 logsz = get_log2 (aarch64_get_qualifier_esize (opd1_qlf));
6458
6459 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6460 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6461 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6462 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6463 max_logsz = 3;
6464 else
6465 max_logsz = 4;
6466
6467 if (logsz > max_logsz)
6468 {
6469 /* SEE PR 27904 for an example of this. */
6470 set_fatal_syntax_error
6471 (_("relocation qualifier does not match instruction size"));
6472 return BFD_RELOC_AARCH64_NONE;
6473 }
6474
6475 /* In reloc.c, these pseudo relocation types should be defined in similar
6476 order as above reloc_ldst_lo12 array. Because the array index calculation
6477 below relies on this. */
6478 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6479 }
6480
6481 /* Check whether a register list REGINFO is valid. The registers have type
6482 REG_TYPE and must be numbered in increasing order (modulo the register
6483 bank size). They must have a consistent stride.
6484
6485 Return true if the list is valid, describing it in LIST if so. */
6486
6487 static bool
6488 reg_list_valid_p (uint32_t reginfo, struct aarch64_reglist *list,
6489 aarch64_reg_type reg_type)
6490 {
6491 uint32_t i, nb_regs, prev_regno, incr, mask;
6492 mask = reg_type_mask (reg_type);
6493
6494 nb_regs = 1 + (reginfo & 0x3);
6495 reginfo >>= 2;
6496 prev_regno = reginfo & 0x1f;
6497 incr = 1;
6498
6499 list->first_regno = prev_regno;
6500 list->num_regs = nb_regs;
6501
6502 for (i = 1; i < nb_regs; ++i)
6503 {
6504 uint32_t curr_regno, curr_incr;
6505 reginfo >>= 5;
6506 curr_regno = reginfo & 0x1f;
6507 curr_incr = (curr_regno - prev_regno) & mask;
6508 if (curr_incr == 0)
6509 return false;
6510 else if (i == 1)
6511 incr = curr_incr;
6512 else if (curr_incr != incr)
6513 return false;
6514 prev_regno = curr_regno;
6515 }
6516
6517 list->stride = incr;
6518 return true;
6519 }
6520
6521 /* Generic instruction operand parser. This does no encoding and no
6522 semantic validation; it merely squirrels values away in the inst
6523 structure. Returns TRUE or FALSE depending on whether the
6524 specified grammar matched. */
6525
6526 static bool
6527 parse_operands (char *str, const aarch64_opcode *opcode)
6528 {
6529 int i;
6530 char *backtrack_pos = 0;
6531 const enum aarch64_opnd *operands = opcode->operands;
6532 aarch64_reg_type imm_reg_type;
6533
6534 clear_error ();
6535 skip_whitespace (str);
6536
6537 if (AARCH64_CPU_HAS_FEATURE (*opcode->avariant, AARCH64_FEATURE_SME2))
6538 imm_reg_type = REG_TYPE_R_ZR_SP_BHSDQ_VZP_PN;
6539 else if (AARCH64_CPU_HAS_ANY_FEATURES (*opcode->avariant,
6540 AARCH64_FEATURE_SVE
6541 | AARCH64_FEATURE_SVE2))
6542 imm_reg_type = REG_TYPE_R_ZR_SP_BHSDQ_VZP;
6543 else
6544 imm_reg_type = REG_TYPE_R_ZR_BHSDQ_V;
6545
6546 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6547 {
6548 int64_t val;
6549 const reg_entry *reg;
6550 int comma_skipped_p = 0;
6551 struct vector_type_el vectype;
6552 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6553 aarch64_opnd_info *info = &inst.base.operands[i];
6554 aarch64_reg_type reg_type;
6555
6556 DEBUG_TRACE ("parse operand %d", i);
6557
6558 /* Assign the operand code. */
6559 info->type = operands[i];
6560
6561 if (optional_operand_p (opcode, i))
6562 {
6563 /* Remember where we are in case we need to backtrack. */
6564 gas_assert (!backtrack_pos);
6565 backtrack_pos = str;
6566 }
6567
6568 /* Expect comma between operands; the backtrack mechanism will take
6569 care of cases of omitted optional operand. */
6570 if (i > 0 && ! skip_past_char (&str, ','))
6571 {
6572 set_syntax_error (_("comma expected between operands"));
6573 goto failure;
6574 }
6575 else
6576 comma_skipped_p = 1;
6577
6578 switch (operands[i])
6579 {
6580 case AARCH64_OPND_Rd:
6581 case AARCH64_OPND_Rn:
6582 case AARCH64_OPND_Rm:
6583 case AARCH64_OPND_Rt:
6584 case AARCH64_OPND_Rt2:
6585 case AARCH64_OPND_Rs:
6586 case AARCH64_OPND_Ra:
6587 case AARCH64_OPND_Rt_LS64:
6588 case AARCH64_OPND_Rt_SYS:
6589 case AARCH64_OPND_PAIRREG:
6590 case AARCH64_OPND_SVE_Rm:
6591 po_int_fp_reg_or_fail (REG_TYPE_R_ZR);
6592
6593 /* In LS64 load/store instructions Rt register number must be even
6594 and <=22. */
6595 if (operands[i] == AARCH64_OPND_Rt_LS64)
6596 {
6597 /* We've already checked if this is valid register.
6598 This will check if register number (Rt) is not undefined for LS64
6599 instructions:
6600 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6601 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6602 {
6603 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6604 goto failure;
6605 }
6606 }
6607 break;
6608
6609 case AARCH64_OPND_Rd_SP:
6610 case AARCH64_OPND_Rn_SP:
6611 case AARCH64_OPND_Rt_SP:
6612 case AARCH64_OPND_SVE_Rn_SP:
6613 case AARCH64_OPND_Rm_SP:
6614 po_int_fp_reg_or_fail (REG_TYPE_R_SP);
6615 break;
6616
6617 case AARCH64_OPND_Rm_EXT:
6618 case AARCH64_OPND_Rm_SFT:
6619 po_misc_or_fail (parse_shifter_operand
6620 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6621 ? SHIFTED_ARITH_IMM
6622 : SHIFTED_LOGIC_IMM)));
6623 if (!info->shifter.operator_present)
6624 {
6625 /* Default to LSL if not present. Libopcodes prefers shifter
6626 kind to be explicit. */
6627 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6628 info->shifter.kind = AARCH64_MOD_LSL;
6629 /* For Rm_EXT, libopcodes will carry out further check on whether
6630 or not stack pointer is used in the instruction (Recall that
6631 "the extend operator is not optional unless at least one of
6632 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6633 }
6634 break;
6635
6636 case AARCH64_OPND_Fd:
6637 case AARCH64_OPND_Fn:
6638 case AARCH64_OPND_Fm:
6639 case AARCH64_OPND_Fa:
6640 case AARCH64_OPND_Ft:
6641 case AARCH64_OPND_Ft2:
6642 case AARCH64_OPND_Sd:
6643 case AARCH64_OPND_Sn:
6644 case AARCH64_OPND_Sm:
6645 case AARCH64_OPND_SVE_VZn:
6646 case AARCH64_OPND_SVE_Vd:
6647 case AARCH64_OPND_SVE_Vm:
6648 case AARCH64_OPND_SVE_Vn:
6649 po_int_fp_reg_or_fail (REG_TYPE_BHSDQ);
6650 break;
6651
6652 case AARCH64_OPND_SVE_Pd:
6653 case AARCH64_OPND_SVE_Pg3:
6654 case AARCH64_OPND_SVE_Pg4_5:
6655 case AARCH64_OPND_SVE_Pg4_10:
6656 case AARCH64_OPND_SVE_Pg4_16:
6657 case AARCH64_OPND_SVE_Pm:
6658 case AARCH64_OPND_SVE_Pn:
6659 case AARCH64_OPND_SVE_Pt:
6660 case AARCH64_OPND_SME_Pm:
6661 reg_type = REG_TYPE_P;
6662 goto vector_reg;
6663
6664 case AARCH64_OPND_SVE_Za_5:
6665 case AARCH64_OPND_SVE_Za_16:
6666 case AARCH64_OPND_SVE_Zd:
6667 case AARCH64_OPND_SVE_Zm_5:
6668 case AARCH64_OPND_SVE_Zm_16:
6669 case AARCH64_OPND_SVE_Zn:
6670 case AARCH64_OPND_SVE_Zt:
6671 case AARCH64_OPND_SME_Zm:
6672 reg_type = REG_TYPE_Z;
6673 goto vector_reg;
6674
6675 case AARCH64_OPND_SVE_PNd:
6676 case AARCH64_OPND_SVE_PNg4_10:
6677 case AARCH64_OPND_SVE_PNn:
6678 case AARCH64_OPND_SVE_PNt:
6679 case AARCH64_OPND_SME_PNd3:
6680 case AARCH64_OPND_SME_PNg3:
6681 case AARCH64_OPND_SME_PNn:
6682 reg_type = REG_TYPE_PN;
6683 goto vector_reg;
6684
6685 case AARCH64_OPND_Va:
6686 case AARCH64_OPND_Vd:
6687 case AARCH64_OPND_Vn:
6688 case AARCH64_OPND_Vm:
6689 reg_type = REG_TYPE_V;
6690 vector_reg:
6691 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6692 if (!reg)
6693 goto failure;
6694 if (vectype.defined & NTA_HASINDEX)
6695 goto failure;
6696
6697 info->reg.regno = reg->number;
6698 if ((reg_type == REG_TYPE_P
6699 || reg_type == REG_TYPE_PN
6700 || reg_type == REG_TYPE_Z)
6701 && vectype.type == NT_invtype)
6702 /* Unqualified P and Z registers are allowed in certain
6703 contexts. Rely on F_STRICT qualifier checking to catch
6704 invalid uses. */
6705 info->qualifier = AARCH64_OPND_QLF_NIL;
6706 else
6707 {
6708 info->qualifier = vectype_to_qualifier (&vectype);
6709 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6710 goto failure;
6711 }
6712 break;
6713
6714 case AARCH64_OPND_VdD1:
6715 case AARCH64_OPND_VnD1:
6716 reg = aarch64_reg_parse (&str, REG_TYPE_V, &vectype);
6717 if (!reg)
6718 goto failure;
6719 if (vectype.type != NT_d || vectype.index != 1)
6720 {
6721 set_fatal_syntax_error
6722 (_("the top half of a 128-bit FP/SIMD register is expected"));
6723 goto failure;
6724 }
6725 info->reg.regno = reg->number;
6726 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6727 here; it is correct for the purpose of encoding/decoding since
6728 only the register number is explicitly encoded in the related
6729 instructions, although this appears a bit hacky. */
6730 info->qualifier = AARCH64_OPND_QLF_S_D;
6731 break;
6732
6733 case AARCH64_OPND_SVE_Zm3_INDEX:
6734 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6735 case AARCH64_OPND_SVE_Zm3_19_INDEX:
6736 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6737 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6738 case AARCH64_OPND_SVE_Zm4_INDEX:
6739 case AARCH64_OPND_SVE_Zn_INDEX:
6740 case AARCH64_OPND_SME_Zm_INDEX1:
6741 case AARCH64_OPND_SME_Zm_INDEX2:
6742 case AARCH64_OPND_SME_Zm_INDEX3_1:
6743 case AARCH64_OPND_SME_Zm_INDEX3_2:
6744 case AARCH64_OPND_SME_Zm_INDEX3_10:
6745 case AARCH64_OPND_SME_Zm_INDEX4_1:
6746 case AARCH64_OPND_SME_Zm_INDEX4_10:
6747 case AARCH64_OPND_SME_Zn_INDEX1_16:
6748 case AARCH64_OPND_SME_Zn_INDEX2_15:
6749 case AARCH64_OPND_SME_Zn_INDEX2_16:
6750 case AARCH64_OPND_SME_Zn_INDEX3_14:
6751 case AARCH64_OPND_SME_Zn_INDEX3_15:
6752 case AARCH64_OPND_SME_Zn_INDEX4_14:
6753 reg_type = REG_TYPE_Z;
6754 goto vector_reg_index;
6755
6756 case AARCH64_OPND_Ed:
6757 case AARCH64_OPND_En:
6758 case AARCH64_OPND_Em:
6759 case AARCH64_OPND_Em16:
6760 case AARCH64_OPND_SM3_IMM2:
6761 reg_type = REG_TYPE_V;
6762 vector_reg_index:
6763 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6764 if (!reg)
6765 goto failure;
6766 if (!(vectype.defined & NTA_HASINDEX))
6767 goto failure;
6768
6769 if (reg->type == REG_TYPE_Z && vectype.type == NT_invtype)
6770 /* Unqualified Zn[index] is allowed in LUTI2 instructions. */
6771 info->qualifier = AARCH64_OPND_QLF_NIL;
6772 else
6773 {
6774 if (vectype.type == NT_invtype)
6775 goto failure;
6776 info->qualifier = vectype_to_qualifier (&vectype);
6777 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6778 goto failure;
6779 }
6780
6781 info->reglane.regno = reg->number;
6782 info->reglane.index = vectype.index;
6783 break;
6784
6785 case AARCH64_OPND_SVE_ZnxN:
6786 case AARCH64_OPND_SVE_ZtxN:
6787 case AARCH64_OPND_SME_Zdnx2:
6788 case AARCH64_OPND_SME_Zdnx4:
6789 case AARCH64_OPND_SME_Zmx2:
6790 case AARCH64_OPND_SME_Zmx4:
6791 case AARCH64_OPND_SME_Znx2:
6792 case AARCH64_OPND_SME_Znx4:
6793 case AARCH64_OPND_SME_Ztx2_STRIDED:
6794 case AARCH64_OPND_SME_Ztx4_STRIDED:
6795 reg_type = REG_TYPE_Z;
6796 goto vector_reg_list;
6797
6798 case AARCH64_OPND_SME_Pdx2:
6799 case AARCH64_OPND_SME_PdxN:
6800 reg_type = REG_TYPE_P;
6801 goto vector_reg_list;
6802
6803 case AARCH64_OPND_LVn:
6804 case AARCH64_OPND_LVt:
6805 case AARCH64_OPND_LVt_AL:
6806 case AARCH64_OPND_LEt:
6807 reg_type = REG_TYPE_V;
6808 vector_reg_list:
6809 if (reg_type == REG_TYPE_Z
6810 && get_opcode_dependent_value (opcode) == 1
6811 && *str != '{')
6812 {
6813 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6814 if (!reg)
6815 goto failure;
6816 info->reglist.first_regno = reg->number;
6817 info->reglist.num_regs = 1;
6818 info->reglist.stride = 1;
6819 }
6820 else
6821 {
6822 val = parse_vector_reg_list (&str, reg_type, &vectype);
6823 if (val == PARSE_FAIL)
6824 goto failure;
6825
6826 if (! reg_list_valid_p (val, &info->reglist, reg_type))
6827 {
6828 set_fatal_syntax_error (_("invalid register list"));
6829 goto failure;
6830 }
6831
6832 if ((int) vectype.width > 0 && *str != ',')
6833 {
6834 set_fatal_syntax_error
6835 (_("expected element type rather than vector type"));
6836 goto failure;
6837 }
6838 }
6839 if (operands[i] == AARCH64_OPND_LEt)
6840 {
6841 if (!(vectype.defined & NTA_HASINDEX))
6842 goto failure;
6843 info->reglist.has_index = 1;
6844 info->reglist.index = vectype.index;
6845 }
6846 else
6847 {
6848 if (vectype.defined & NTA_HASINDEX)
6849 goto failure;
6850 if (!(vectype.defined & NTA_HASTYPE))
6851 {
6852 if (reg_type == REG_TYPE_Z || reg_type == REG_TYPE_P)
6853 set_fatal_syntax_error (_("missing type suffix"));
6854 goto failure;
6855 }
6856 }
6857 info->qualifier = vectype_to_qualifier (&vectype);
6858 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6859 goto failure;
6860 break;
6861
6862 case AARCH64_OPND_CRn:
6863 case AARCH64_OPND_CRm:
6864 {
6865 char prefix = *(str++);
6866 if (prefix != 'c' && prefix != 'C')
6867 goto failure;
6868
6869 po_imm_nc_or_fail ();
6870 if (val > 15)
6871 {
6872 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6873 goto failure;
6874 }
6875 info->qualifier = AARCH64_OPND_QLF_CR;
6876 info->imm.value = val;
6877 break;
6878 }
6879
6880 case AARCH64_OPND_SHLL_IMM:
6881 case AARCH64_OPND_IMM_VLSR:
6882 po_imm_or_fail (1, 64);
6883 info->imm.value = val;
6884 break;
6885
6886 case AARCH64_OPND_CCMP_IMM:
6887 case AARCH64_OPND_SIMM5:
6888 case AARCH64_OPND_FBITS:
6889 case AARCH64_OPND_TME_UIMM16:
6890 case AARCH64_OPND_UIMM4:
6891 case AARCH64_OPND_UIMM4_ADDG:
6892 case AARCH64_OPND_UIMM10:
6893 case AARCH64_OPND_UIMM3_OP1:
6894 case AARCH64_OPND_UIMM3_OP2:
6895 case AARCH64_OPND_IMM_VLSL:
6896 case AARCH64_OPND_IMM:
6897 case AARCH64_OPND_IMM_2:
6898 case AARCH64_OPND_WIDTH:
6899 case AARCH64_OPND_SVE_INV_LIMM:
6900 case AARCH64_OPND_SVE_LIMM:
6901 case AARCH64_OPND_SVE_LIMM_MOV:
6902 case AARCH64_OPND_SVE_SHLIMM_PRED:
6903 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6904 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6905 case AARCH64_OPND_SME_SHRIMM4:
6906 case AARCH64_OPND_SME_SHRIMM5:
6907 case AARCH64_OPND_SVE_SHRIMM_PRED:
6908 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6909 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6910 case AARCH64_OPND_SVE_SIMM5:
6911 case AARCH64_OPND_SVE_SIMM5B:
6912 case AARCH64_OPND_SVE_SIMM6:
6913 case AARCH64_OPND_SVE_SIMM8:
6914 case AARCH64_OPND_SVE_UIMM3:
6915 case AARCH64_OPND_SVE_UIMM7:
6916 case AARCH64_OPND_SVE_UIMM8:
6917 case AARCH64_OPND_SVE_UIMM8_53:
6918 case AARCH64_OPND_IMM_ROT1:
6919 case AARCH64_OPND_IMM_ROT2:
6920 case AARCH64_OPND_IMM_ROT3:
6921 case AARCH64_OPND_SVE_IMM_ROT1:
6922 case AARCH64_OPND_SVE_IMM_ROT2:
6923 case AARCH64_OPND_SVE_IMM_ROT3:
6924 case AARCH64_OPND_CSSC_SIMM8:
6925 case AARCH64_OPND_CSSC_UIMM8:
6926 po_imm_nc_or_fail ();
6927 info->imm.value = val;
6928 break;
6929
6930 case AARCH64_OPND_SVE_AIMM:
6931 case AARCH64_OPND_SVE_ASIMM:
6932 po_imm_nc_or_fail ();
6933 info->imm.value = val;
6934 skip_whitespace (str);
6935 if (skip_past_comma (&str))
6936 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6937 else
6938 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6939 break;
6940
6941 case AARCH64_OPND_SVE_PATTERN:
6942 po_enum_or_fail (aarch64_sve_pattern_array);
6943 info->imm.value = val;
6944 break;
6945
6946 case AARCH64_OPND_SVE_PATTERN_SCALED:
6947 po_enum_or_fail (aarch64_sve_pattern_array);
6948 info->imm.value = val;
6949 if (skip_past_comma (&str)
6950 && !parse_shift (&str, info, SHIFTED_MUL))
6951 goto failure;
6952 if (!info->shifter.operator_present)
6953 {
6954 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6955 info->shifter.kind = AARCH64_MOD_MUL;
6956 info->shifter.amount = 1;
6957 }
6958 break;
6959
6960 case AARCH64_OPND_SVE_PRFOP:
6961 po_enum_or_fail (aarch64_sve_prfop_array);
6962 info->imm.value = val;
6963 break;
6964
6965 case AARCH64_OPND_UIMM7:
6966 po_imm_or_fail (0, 127);
6967 info->imm.value = val;
6968 break;
6969
6970 case AARCH64_OPND_IDX:
6971 case AARCH64_OPND_MASK:
6972 case AARCH64_OPND_BIT_NUM:
6973 case AARCH64_OPND_IMMR:
6974 case AARCH64_OPND_IMMS:
6975 po_imm_or_fail (0, 63);
6976 info->imm.value = val;
6977 break;
6978
6979 case AARCH64_OPND_IMM0:
6980 po_imm_nc_or_fail ();
6981 if (val != 0)
6982 {
6983 set_fatal_syntax_error (_("immediate zero expected"));
6984 goto failure;
6985 }
6986 info->imm.value = 0;
6987 break;
6988
6989 case AARCH64_OPND_FPIMM0:
6990 {
6991 int qfloat;
6992 bool res1 = false, res2 = false;
6993 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6994 it is probably not worth the effort to support it. */
6995 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6996 imm_reg_type))
6997 && (error_p ()
6998 || !(res2 = parse_constant_immediate (&str, &val,
6999 imm_reg_type))))
7000 goto failure;
7001 if ((res1 && qfloat == 0) || (res2 && val == 0))
7002 {
7003 info->imm.value = 0;
7004 info->imm.is_fp = 1;
7005 break;
7006 }
7007 set_fatal_syntax_error (_("immediate zero expected"));
7008 goto failure;
7009 }
7010
7011 case AARCH64_OPND_IMM_MOV:
7012 {
7013 char *saved = str;
7014 if (reg_name_p (str, REG_TYPE_R_ZR_SP)
7015 || reg_name_p (str, REG_TYPE_V))
7016 goto failure;
7017 str = saved;
7018 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
7019 GE_OPT_PREFIX, REJECT_ABSENT));
7020 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
7021 later. fix_mov_imm_insn will try to determine a machine
7022 instruction (MOVZ, MOVN or ORR) for it and will issue an error
7023 message if the immediate cannot be moved by a single
7024 instruction. */
7025 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7026 inst.base.operands[i].skip = 1;
7027 }
7028 break;
7029
7030 case AARCH64_OPND_SIMD_IMM:
7031 case AARCH64_OPND_SIMD_IMM_SFT:
7032 if (! parse_big_immediate (&str, &val, imm_reg_type))
7033 goto failure;
7034 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7035 /* addr_off_p */ 0,
7036 /* need_libopcodes_p */ 1,
7037 /* skip_p */ 1);
7038 /* Parse shift.
7039 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
7040 shift, we don't check it here; we leave the checking to
7041 the libopcodes (operand_general_constraint_met_p). By
7042 doing this, we achieve better diagnostics. */
7043 if (skip_past_comma (&str)
7044 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
7045 goto failure;
7046 if (!info->shifter.operator_present
7047 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
7048 {
7049 /* Default to LSL if not present. Libopcodes prefers shifter
7050 kind to be explicit. */
7051 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7052 info->shifter.kind = AARCH64_MOD_LSL;
7053 }
7054 break;
7055
7056 case AARCH64_OPND_FPIMM:
7057 case AARCH64_OPND_SIMD_FPIMM:
7058 case AARCH64_OPND_SVE_FPIMM8:
7059 {
7060 int qfloat;
7061 bool dp_p;
7062
7063 dp_p = double_precision_operand_p (&inst.base.operands[0]);
7064 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
7065 || !aarch64_imm_float_p (qfloat))
7066 {
7067 if (!error_p ())
7068 set_fatal_syntax_error (_("invalid floating-point"
7069 " constant"));
7070 goto failure;
7071 }
7072 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
7073 inst.base.operands[i].imm.is_fp = 1;
7074 }
7075 break;
7076
7077 case AARCH64_OPND_SVE_I1_HALF_ONE:
7078 case AARCH64_OPND_SVE_I1_HALF_TWO:
7079 case AARCH64_OPND_SVE_I1_ZERO_ONE:
7080 {
7081 int qfloat;
7082 bool dp_p;
7083
7084 dp_p = double_precision_operand_p (&inst.base.operands[0]);
7085 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
7086 {
7087 if (!error_p ())
7088 set_fatal_syntax_error (_("invalid floating-point"
7089 " constant"));
7090 goto failure;
7091 }
7092 inst.base.operands[i].imm.value = qfloat;
7093 inst.base.operands[i].imm.is_fp = 1;
7094 }
7095 break;
7096
7097 case AARCH64_OPND_LIMM:
7098 po_misc_or_fail (parse_shifter_operand (&str, info,
7099 SHIFTED_LOGIC_IMM));
7100 if (info->shifter.operator_present)
7101 {
7102 set_fatal_syntax_error
7103 (_("shift not allowed for bitmask immediate"));
7104 goto failure;
7105 }
7106 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7107 /* addr_off_p */ 0,
7108 /* need_libopcodes_p */ 1,
7109 /* skip_p */ 1);
7110 break;
7111
7112 case AARCH64_OPND_AIMM:
7113 if (opcode->op == OP_ADD)
7114 /* ADD may have relocation types. */
7115 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
7116 SHIFTED_ARITH_IMM));
7117 else
7118 po_misc_or_fail (parse_shifter_operand (&str, info,
7119 SHIFTED_ARITH_IMM));
7120 switch (inst.reloc.type)
7121 {
7122 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7123 info->shifter.amount = 12;
7124 break;
7125 case BFD_RELOC_UNUSED:
7126 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
7127 if (info->shifter.kind != AARCH64_MOD_NONE)
7128 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
7129 inst.reloc.pc_rel = 0;
7130 break;
7131 default:
7132 break;
7133 }
7134 info->imm.value = 0;
7135 if (!info->shifter.operator_present)
7136 {
7137 /* Default to LSL if not present. Libopcodes prefers shifter
7138 kind to be explicit. */
7139 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7140 info->shifter.kind = AARCH64_MOD_LSL;
7141 }
7142 break;
7143
7144 case AARCH64_OPND_HALF:
7145 {
7146 /* #<imm16> or relocation. */
7147 int internal_fixup_p;
7148 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
7149 if (internal_fixup_p)
7150 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
7151 skip_whitespace (str);
7152 if (skip_past_comma (&str))
7153 {
7154 /* {, LSL #<shift>} */
7155 if (! aarch64_gas_internal_fixup_p ())
7156 {
7157 set_fatal_syntax_error (_("can't mix relocation modifier "
7158 "with explicit shift"));
7159 goto failure;
7160 }
7161 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
7162 }
7163 else
7164 inst.base.operands[i].shifter.amount = 0;
7165 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
7166 inst.base.operands[i].imm.value = 0;
7167 if (! process_movw_reloc_info ())
7168 goto failure;
7169 }
7170 break;
7171
7172 case AARCH64_OPND_EXCEPTION:
7173 case AARCH64_OPND_UNDEFINED:
7174 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
7175 imm_reg_type));
7176 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7177 /* addr_off_p */ 0,
7178 /* need_libopcodes_p */ 0,
7179 /* skip_p */ 1);
7180 break;
7181
7182 case AARCH64_OPND_NZCV:
7183 {
7184 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
7185 if (nzcv != NULL)
7186 {
7187 str += 4;
7188 info->imm.value = nzcv->value;
7189 break;
7190 }
7191 po_imm_or_fail (0, 15);
7192 info->imm.value = val;
7193 }
7194 break;
7195
7196 case AARCH64_OPND_COND:
7197 case AARCH64_OPND_COND1:
7198 {
7199 char *start = str;
7200 do
7201 str++;
7202 while (ISALPHA (*str));
7203 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
7204 if (info->cond == NULL)
7205 {
7206 set_syntax_error (_("invalid condition"));
7207 goto failure;
7208 }
7209 else if (operands[i] == AARCH64_OPND_COND1
7210 && (info->cond->value & 0xe) == 0xe)
7211 {
7212 /* Do not allow AL or NV. */
7213 set_default_error ();
7214 goto failure;
7215 }
7216 }
7217 break;
7218
7219 case AARCH64_OPND_ADDR_ADRP:
7220 po_misc_or_fail (parse_adrp (&str));
7221 /* Clear the value as operand needs to be relocated. */
7222 info->imm.value = 0;
7223 break;
7224
7225 case AARCH64_OPND_ADDR_PCREL14:
7226 case AARCH64_OPND_ADDR_PCREL19:
7227 case AARCH64_OPND_ADDR_PCREL21:
7228 case AARCH64_OPND_ADDR_PCREL26:
7229 po_misc_or_fail (parse_address (&str, info));
7230 if (!info->addr.pcrel)
7231 {
7232 set_syntax_error (_("invalid pc-relative address"));
7233 goto failure;
7234 }
7235 if (inst.gen_lit_pool
7236 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
7237 {
7238 /* Only permit "=value" in the literal load instructions.
7239 The literal will be generated by programmer_friendly_fixup. */
7240 set_syntax_error (_("invalid use of \"=immediate\""));
7241 goto failure;
7242 }
7243 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
7244 {
7245 set_syntax_error (_("unrecognized relocation suffix"));
7246 goto failure;
7247 }
7248 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
7249 {
7250 info->imm.value = inst.reloc.exp.X_add_number;
7251 inst.reloc.type = BFD_RELOC_UNUSED;
7252 }
7253 else
7254 {
7255 info->imm.value = 0;
7256 if (inst.reloc.type == BFD_RELOC_UNUSED)
7257 switch (opcode->iclass)
7258 {
7259 case compbranch:
7260 case condbranch:
7261 /* e.g. CBZ or B.COND */
7262 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7263 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
7264 break;
7265 case testbranch:
7266 /* e.g. TBZ */
7267 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
7268 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
7269 break;
7270 case branch_imm:
7271 /* e.g. B or BL */
7272 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
7273 inst.reloc.type =
7274 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
7275 : BFD_RELOC_AARCH64_JUMP26;
7276 break;
7277 case loadlit:
7278 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7279 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
7280 break;
7281 case pcreladdr:
7282 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
7283 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
7284 break;
7285 default:
7286 gas_assert (0);
7287 abort ();
7288 }
7289 inst.reloc.pc_rel = 1;
7290 }
7291 break;
7292
7293 case AARCH64_OPND_ADDR_SIMPLE:
7294 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
7295 {
7296 /* [<Xn|SP>{, #<simm>}] */
7297 char *start = str;
7298 /* First use the normal address-parsing routines, to get
7299 the usual syntax errors. */
7300 po_misc_or_fail (parse_address (&str, info));
7301 if (info->addr.pcrel || info->addr.offset.is_reg
7302 || !info->addr.preind || info->addr.postind
7303 || info->addr.writeback)
7304 {
7305 set_syntax_error (_("invalid addressing mode"));
7306 goto failure;
7307 }
7308
7309 /* Then retry, matching the specific syntax of these addresses. */
7310 str = start;
7311 po_char_or_fail ('[');
7312 po_reg_or_fail (REG_TYPE_R64_SP);
7313 /* Accept optional ", #0". */
7314 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7315 && skip_past_char (&str, ','))
7316 {
7317 skip_past_char (&str, '#');
7318 if (! skip_past_char (&str, '0'))
7319 {
7320 set_fatal_syntax_error
7321 (_("the optional immediate offset can only be 0"));
7322 goto failure;
7323 }
7324 }
7325 po_char_or_fail (']');
7326 break;
7327 }
7328
7329 case AARCH64_OPND_ADDR_REGOFF:
7330 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7331 po_misc_or_fail (parse_address (&str, info));
7332 regoff_addr:
7333 if (info->addr.pcrel || !info->addr.offset.is_reg
7334 || !info->addr.preind || info->addr.postind
7335 || info->addr.writeback)
7336 {
7337 set_syntax_error (_("invalid addressing mode"));
7338 goto failure;
7339 }
7340 if (!info->shifter.operator_present)
7341 {
7342 /* Default to LSL if not present. Libopcodes prefers shifter
7343 kind to be explicit. */
7344 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7345 info->shifter.kind = AARCH64_MOD_LSL;
7346 }
7347 /* Qualifier to be deduced by libopcodes. */
7348 break;
7349
7350 case AARCH64_OPND_ADDR_SIMM7:
7351 po_misc_or_fail (parse_address (&str, info));
7352 if (info->addr.pcrel || info->addr.offset.is_reg
7353 || (!info->addr.preind && !info->addr.postind))
7354 {
7355 set_syntax_error (_("invalid addressing mode"));
7356 goto failure;
7357 }
7358 if (inst.reloc.type != BFD_RELOC_UNUSED)
7359 {
7360 set_syntax_error (_("relocation not allowed"));
7361 goto failure;
7362 }
7363 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7364 /* addr_off_p */ 1,
7365 /* need_libopcodes_p */ 1,
7366 /* skip_p */ 0);
7367 break;
7368
7369 case AARCH64_OPND_ADDR_SIMM9:
7370 case AARCH64_OPND_ADDR_SIMM9_2:
7371 case AARCH64_OPND_ADDR_SIMM11:
7372 case AARCH64_OPND_ADDR_SIMM13:
7373 po_misc_or_fail (parse_address (&str, info));
7374 if (info->addr.pcrel || info->addr.offset.is_reg
7375 || (!info->addr.preind && !info->addr.postind)
7376 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7377 && info->addr.writeback))
7378 {
7379 set_syntax_error (_("invalid addressing mode"));
7380 goto failure;
7381 }
7382 if (inst.reloc.type != BFD_RELOC_UNUSED)
7383 {
7384 set_syntax_error (_("relocation not allowed"));
7385 goto failure;
7386 }
7387 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7388 /* addr_off_p */ 1,
7389 /* need_libopcodes_p */ 1,
7390 /* skip_p */ 0);
7391 break;
7392
7393 case AARCH64_OPND_ADDR_SIMM10:
7394 case AARCH64_OPND_ADDR_OFFSET:
7395 po_misc_or_fail (parse_address (&str, info));
7396 if (info->addr.pcrel || info->addr.offset.is_reg
7397 || !info->addr.preind || info->addr.postind)
7398 {
7399 set_syntax_error (_("invalid addressing mode"));
7400 goto failure;
7401 }
7402 if (inst.reloc.type != BFD_RELOC_UNUSED)
7403 {
7404 set_syntax_error (_("relocation not allowed"));
7405 goto failure;
7406 }
7407 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7408 /* addr_off_p */ 1,
7409 /* need_libopcodes_p */ 1,
7410 /* skip_p */ 0);
7411 break;
7412
7413 case AARCH64_OPND_ADDR_UIMM12:
7414 po_misc_or_fail (parse_address (&str, info));
7415 if (info->addr.pcrel || info->addr.offset.is_reg
7416 || !info->addr.preind || info->addr.writeback)
7417 {
7418 set_syntax_error (_("invalid addressing mode"));
7419 goto failure;
7420 }
7421 if (inst.reloc.type == BFD_RELOC_UNUSED)
7422 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7423 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7424 || (inst.reloc.type
7425 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7426 || (inst.reloc.type
7427 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7428 || (inst.reloc.type
7429 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7430 || (inst.reloc.type
7431 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7432 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7433 /* Leave qualifier to be determined by libopcodes. */
7434 break;
7435
7436 case AARCH64_OPND_SIMD_ADDR_POST:
7437 /* [<Xn|SP>], <Xm|#<amount>> */
7438 po_misc_or_fail (parse_address (&str, info));
7439 if (!info->addr.postind || !info->addr.writeback)
7440 {
7441 set_syntax_error (_("invalid addressing mode"));
7442 goto failure;
7443 }
7444 if (!info->addr.offset.is_reg)
7445 {
7446 if (inst.reloc.exp.X_op == O_constant)
7447 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7448 else
7449 {
7450 set_fatal_syntax_error
7451 (_("writeback value must be an immediate constant"));
7452 goto failure;
7453 }
7454 }
7455 /* No qualifier. */
7456 break;
7457
7458 case AARCH64_OPND_SME_SM_ZA:
7459 /* { SM | ZA } */
7460 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7461 {
7462 set_syntax_error (_("unknown or missing PSTATE field name"));
7463 goto failure;
7464 }
7465 info->reg.regno = val;
7466 break;
7467
7468 case AARCH64_OPND_SME_PnT_Wm_imm:
7469 if (!parse_dual_indexed_reg (&str, REG_TYPE_P,
7470 &info->indexed_za, &qualifier, 0))
7471 goto failure;
7472 info->qualifier = qualifier;
7473 break;
7474
7475 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7476 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7477 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7478 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7479 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7480 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7481 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7482 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7483 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7484 case AARCH64_OPND_SVE_ADDR_RI_U6:
7485 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7486 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7487 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7488 /* [X<n>{, #imm, MUL VL}]
7489 [X<n>{, #imm}]
7490 but recognizing SVE registers. */
7491 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7492 &offset_qualifier));
7493 if (base_qualifier != AARCH64_OPND_QLF_X)
7494 {
7495 set_syntax_error (_("invalid addressing mode"));
7496 goto failure;
7497 }
7498 sve_regimm:
7499 if (info->addr.pcrel || info->addr.offset.is_reg
7500 || !info->addr.preind || info->addr.writeback)
7501 {
7502 set_syntax_error (_("invalid addressing mode"));
7503 goto failure;
7504 }
7505 if (inst.reloc.type != BFD_RELOC_UNUSED
7506 || inst.reloc.exp.X_op != O_constant)
7507 {
7508 /* Make sure this has priority over
7509 "invalid addressing mode". */
7510 set_fatal_syntax_error (_("constant offset required"));
7511 goto failure;
7512 }
7513 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7514 break;
7515
7516 case AARCH64_OPND_SVE_ADDR_R:
7517 /* [<Xn|SP>{, <R><m>}]
7518 but recognizing SVE registers. */
7519 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7520 &offset_qualifier));
7521 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7522 {
7523 offset_qualifier = AARCH64_OPND_QLF_X;
7524 info->addr.offset.is_reg = 1;
7525 info->addr.offset.regno = 31;
7526 }
7527 else if (base_qualifier != AARCH64_OPND_QLF_X
7528 || offset_qualifier != AARCH64_OPND_QLF_X)
7529 {
7530 set_syntax_error (_("invalid addressing mode"));
7531 goto failure;
7532 }
7533 goto regoff_addr;
7534
7535 case AARCH64_OPND_SVE_ADDR_RR:
7536 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7537 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7538 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7539 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7540 case AARCH64_OPND_SVE_ADDR_RX:
7541 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7542 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7543 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7544 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7545 but recognizing SVE registers. */
7546 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7547 &offset_qualifier));
7548 if (base_qualifier != AARCH64_OPND_QLF_X
7549 || offset_qualifier != AARCH64_OPND_QLF_X)
7550 {
7551 set_syntax_error (_("invalid addressing mode"));
7552 goto failure;
7553 }
7554 goto regoff_addr;
7555
7556 case AARCH64_OPND_SVE_ADDR_RZ:
7557 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7558 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7559 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7560 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7561 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7562 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7563 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7564 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7565 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7566 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7567 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7568 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7569 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7570 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7571 &offset_qualifier));
7572 if (base_qualifier != AARCH64_OPND_QLF_X
7573 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7574 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7575 {
7576 set_syntax_error (_("invalid addressing mode"));
7577 goto failure;
7578 }
7579 info->qualifier = offset_qualifier;
7580 goto regoff_addr;
7581
7582 case AARCH64_OPND_SVE_ADDR_ZX:
7583 /* [Zn.<T>{, <Xm>}]. */
7584 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7585 &offset_qualifier));
7586 /* Things to check:
7587 base_qualifier either S_S or S_D
7588 offset_qualifier must be X
7589 */
7590 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7591 && base_qualifier != AARCH64_OPND_QLF_S_D)
7592 || offset_qualifier != AARCH64_OPND_QLF_X)
7593 {
7594 set_syntax_error (_("invalid addressing mode"));
7595 goto failure;
7596 }
7597 info->qualifier = base_qualifier;
7598 if (!info->addr.offset.is_reg || info->addr.pcrel
7599 || !info->addr.preind || info->addr.writeback
7600 || info->shifter.operator_present != 0)
7601 {
7602 set_syntax_error (_("invalid addressing mode"));
7603 goto failure;
7604 }
7605 info->shifter.kind = AARCH64_MOD_LSL;
7606 break;
7607
7608
7609 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7610 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7611 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7612 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7613 /* [Z<n>.<T>{, #imm}] */
7614 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7615 &offset_qualifier));
7616 if (base_qualifier != AARCH64_OPND_QLF_S_S
7617 && base_qualifier != AARCH64_OPND_QLF_S_D)
7618 {
7619 set_syntax_error (_("invalid addressing mode"));
7620 goto failure;
7621 }
7622 info->qualifier = base_qualifier;
7623 goto sve_regimm;
7624
7625 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7626 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7627 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7628 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7629 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7630
7631 We don't reject:
7632
7633 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7634
7635 here since we get better error messages by leaving it to
7636 the qualifier checking routines. */
7637 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7638 &offset_qualifier));
7639 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7640 && base_qualifier != AARCH64_OPND_QLF_S_D)
7641 || offset_qualifier != base_qualifier)
7642 {
7643 set_syntax_error (_("invalid addressing mode"));
7644 goto failure;
7645 }
7646 info->qualifier = base_qualifier;
7647 goto regoff_addr;
7648
7649 case AARCH64_OPND_SYSREG:
7650 {
7651 uint32_t sysreg_flags;
7652 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7653 &sysreg_flags)) == PARSE_FAIL)
7654 {
7655 set_syntax_error (_("unknown or missing system register name"));
7656 goto failure;
7657 }
7658 inst.base.operands[i].sysreg.value = val;
7659 inst.base.operands[i].sysreg.flags = sysreg_flags;
7660 break;
7661 }
7662
7663 case AARCH64_OPND_PSTATEFIELD:
7664 {
7665 uint32_t sysreg_flags;
7666 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7667 &sysreg_flags)) == PARSE_FAIL)
7668 {
7669 set_syntax_error (_("unknown or missing PSTATE field name"));
7670 goto failure;
7671 }
7672 inst.base.operands[i].pstatefield = val;
7673 inst.base.operands[i].sysreg.flags = sysreg_flags;
7674 break;
7675 }
7676
7677 case AARCH64_OPND_SYSREG_IC:
7678 inst.base.operands[i].sysins_op =
7679 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7680 goto sys_reg_ins;
7681
7682 case AARCH64_OPND_SYSREG_DC:
7683 inst.base.operands[i].sysins_op =
7684 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7685 goto sys_reg_ins;
7686
7687 case AARCH64_OPND_SYSREG_AT:
7688 inst.base.operands[i].sysins_op =
7689 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7690 goto sys_reg_ins;
7691
7692 case AARCH64_OPND_SYSREG_SR:
7693 inst.base.operands[i].sysins_op =
7694 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7695 goto sys_reg_ins;
7696
7697 case AARCH64_OPND_SYSREG_TLBI:
7698 inst.base.operands[i].sysins_op =
7699 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7700 sys_reg_ins:
7701 if (inst.base.operands[i].sysins_op == NULL)
7702 {
7703 set_fatal_syntax_error ( _("unknown or missing operation name"));
7704 goto failure;
7705 }
7706 break;
7707
7708 case AARCH64_OPND_BARRIER:
7709 case AARCH64_OPND_BARRIER_ISB:
7710 val = parse_barrier (&str);
7711 if (val != PARSE_FAIL
7712 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7713 {
7714 /* ISB only accepts options name 'sy'. */
7715 set_syntax_error
7716 (_("the specified option is not accepted in ISB"));
7717 /* Turn off backtrack as this optional operand is present. */
7718 backtrack_pos = 0;
7719 goto failure;
7720 }
7721 if (val != PARSE_FAIL
7722 && operands[i] == AARCH64_OPND_BARRIER)
7723 {
7724 /* Regular barriers accept options CRm (C0-C15).
7725 DSB nXS barrier variant accepts values > 15. */
7726 if (val < 0 || val > 15)
7727 {
7728 set_syntax_error (_("the specified option is not accepted in DSB"));
7729 goto failure;
7730 }
7731 }
7732 /* This is an extension to accept a 0..15 immediate. */
7733 if (val == PARSE_FAIL)
7734 po_imm_or_fail (0, 15);
7735 info->barrier = aarch64_barrier_options + val;
7736 break;
7737
7738 case AARCH64_OPND_BARRIER_DSB_NXS:
7739 val = parse_barrier (&str);
7740 if (val != PARSE_FAIL)
7741 {
7742 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7743 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7744 {
7745 set_syntax_error (_("the specified option is not accepted in DSB"));
7746 /* Turn off backtrack as this optional operand is present. */
7747 backtrack_pos = 0;
7748 goto failure;
7749 }
7750 }
7751 else
7752 {
7753 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7754 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7755 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7756 goto failure;
7757 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7758 {
7759 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7760 goto failure;
7761 }
7762 }
7763 /* Option index is encoded as 2-bit value in val<3:2>. */
7764 val = (val >> 2) - 4;
7765 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7766 break;
7767
7768 case AARCH64_OPND_PRFOP:
7769 val = parse_pldop (&str);
7770 /* This is an extension to accept a 0..31 immediate. */
7771 if (val == PARSE_FAIL)
7772 po_imm_or_fail (0, 31);
7773 inst.base.operands[i].prfop = aarch64_prfops + val;
7774 break;
7775
7776 case AARCH64_OPND_RPRFMOP:
7777 po_enum_or_fail (aarch64_rprfmop_array);
7778 info->imm.value = val;
7779 break;
7780
7781 case AARCH64_OPND_BARRIER_PSB:
7782 val = parse_barrier_psb (&str, &(info->hint_option));
7783 if (val == PARSE_FAIL)
7784 goto failure;
7785 break;
7786
7787 case AARCH64_OPND_SME_ZT0:
7788 po_reg_or_fail (REG_TYPE_ZT0);
7789 break;
7790
7791 case AARCH64_OPND_SME_ZT0_INDEX:
7792 reg = aarch64_reg_parse (&str, REG_TYPE_ZT0, &vectype);
7793 if (!reg || vectype.type != NT_invtype)
7794 goto failure;
7795 if (!(vectype.defined & NTA_HASINDEX))
7796 {
7797 set_syntax_error (_("missing register index"));
7798 goto failure;
7799 }
7800 info->imm.value = vectype.index;
7801 break;
7802
7803 case AARCH64_OPND_SME_ZT0_LIST:
7804 if (*str != '{')
7805 {
7806 set_expected_reglist_error (REG_TYPE_ZT0, parse_reg (&str));
7807 goto failure;
7808 }
7809 str++;
7810 if (!parse_typed_reg (&str, REG_TYPE_ZT0, &vectype, PTR_IN_REGLIST))
7811 goto failure;
7812 if (*str != '}')
7813 {
7814 set_syntax_error (_("expected '}' after ZT0"));
7815 goto failure;
7816 }
7817 str++;
7818 break;
7819
7820 case AARCH64_OPND_SME_PNn3_INDEX1:
7821 case AARCH64_OPND_SME_PNn3_INDEX2:
7822 reg = aarch64_reg_parse (&str, REG_TYPE_PN, &vectype);
7823 if (!reg)
7824 goto failure;
7825 if (!(vectype.defined & NTA_HASINDEX))
7826 {
7827 set_syntax_error (_("missing register index"));
7828 goto failure;
7829 }
7830 info->reglane.regno = reg->number;
7831 info->reglane.index = vectype.index;
7832 if (vectype.type == NT_invtype)
7833 info->qualifier = AARCH64_OPND_QLF_NIL;
7834 else
7835 info->qualifier = vectype_to_qualifier (&vectype);
7836 break;
7837
7838 case AARCH64_OPND_BTI_TARGET:
7839 val = parse_bti_operand (&str, &(info->hint_option));
7840 if (val == PARSE_FAIL)
7841 goto failure;
7842 break;
7843
7844 case AARCH64_OPND_SME_ZAda_2b:
7845 case AARCH64_OPND_SME_ZAda_3b:
7846 reg = parse_reg_with_qual (&str, REG_TYPE_ZAT, &qualifier, 0);
7847 if (!reg)
7848 goto failure;
7849 info->reg.regno = reg->number;
7850 info->qualifier = qualifier;
7851 break;
7852
7853 case AARCH64_OPND_SME_ZA_HV_idx_src:
7854 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
7855 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7856 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
7857 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7858 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr
7859 ? !parse_sme_za_hv_tiles_operand_with_braces (&str,
7860 &info->indexed_za,
7861 &qualifier)
7862 : !parse_dual_indexed_reg (&str, REG_TYPE_ZATHV,
7863 &info->indexed_za, &qualifier, 0))
7864 goto failure;
7865 info->qualifier = qualifier;
7866 break;
7867
7868 case AARCH64_OPND_SME_list_of_64bit_tiles:
7869 val = parse_sme_list_of_64bit_tiles (&str);
7870 if (val == PARSE_FAIL)
7871 goto failure;
7872 info->imm.value = val;
7873 break;
7874
7875 case AARCH64_OPND_SME_ZA_array_off1x4:
7876 case AARCH64_OPND_SME_ZA_array_off2x2:
7877 case AARCH64_OPND_SME_ZA_array_off2x4:
7878 case AARCH64_OPND_SME_ZA_array_off3_0:
7879 case AARCH64_OPND_SME_ZA_array_off3_5:
7880 case AARCH64_OPND_SME_ZA_array_off3x2:
7881 case AARCH64_OPND_SME_ZA_array_off4:
7882 if (!parse_dual_indexed_reg (&str, REG_TYPE_ZA,
7883 &info->indexed_za, &qualifier, 0))
7884 goto failure;
7885 info->qualifier = qualifier;
7886 break;
7887
7888 case AARCH64_OPND_SME_VLxN_10:
7889 case AARCH64_OPND_SME_VLxN_13:
7890 po_strict_enum_or_fail (aarch64_sme_vlxn_array);
7891 info->imm.value = val;
7892 break;
7893
7894 case AARCH64_OPND_MOPS_ADDR_Rd:
7895 case AARCH64_OPND_MOPS_ADDR_Rs:
7896 po_char_or_fail ('[');
7897 if (!parse_x0_to_x30 (&str, info))
7898 goto failure;
7899 po_char_or_fail (']');
7900 po_char_or_fail ('!');
7901 break;
7902
7903 case AARCH64_OPND_MOPS_WB_Rn:
7904 if (!parse_x0_to_x30 (&str, info))
7905 goto failure;
7906 po_char_or_fail ('!');
7907 break;
7908
7909 default:
7910 as_fatal (_("unhandled operand code %d"), operands[i]);
7911 }
7912
7913 /* If we get here, this operand was successfully parsed. */
7914 inst.base.operands[i].present = 1;
7915 continue;
7916
7917 failure:
7918 /* The parse routine should already have set the error, but in case
7919 not, set a default one here. */
7920 if (! error_p ())
7921 set_default_error ();
7922
7923 if (! backtrack_pos)
7924 goto parse_operands_return;
7925
7926 {
7927 /* We reach here because this operand is marked as optional, and
7928 either no operand was supplied or the operand was supplied but it
7929 was syntactically incorrect. In the latter case we report an
7930 error. In the former case we perform a few more checks before
7931 dropping through to the code to insert the default operand. */
7932
7933 char *tmp = backtrack_pos;
7934 char endchar = END_OF_INSN;
7935
7936 if (i != (aarch64_num_of_operands (opcode) - 1))
7937 endchar = ',';
7938 skip_past_char (&tmp, ',');
7939
7940 if (*tmp != endchar)
7941 /* The user has supplied an operand in the wrong format. */
7942 goto parse_operands_return;
7943
7944 /* Make sure there is not a comma before the optional operand.
7945 For example the fifth operand of 'sys' is optional:
7946
7947 sys #0,c0,c0,#0, <--- wrong
7948 sys #0,c0,c0,#0 <--- correct. */
7949 if (comma_skipped_p && i && endchar == END_OF_INSN)
7950 {
7951 set_fatal_syntax_error
7952 (_("unexpected comma before the omitted optional operand"));
7953 goto parse_operands_return;
7954 }
7955 }
7956
7957 /* Reaching here means we are dealing with an optional operand that is
7958 omitted from the assembly line. */
7959 gas_assert (optional_operand_p (opcode, i));
7960 info->present = 0;
7961 process_omitted_operand (operands[i], opcode, i, info);
7962
7963 /* Try again, skipping the optional operand at backtrack_pos. */
7964 str = backtrack_pos;
7965 backtrack_pos = 0;
7966
7967 /* Clear any error record after the omitted optional operand has been
7968 successfully handled. */
7969 clear_error ();
7970 }
7971
7972 /* Check if we have parsed all the operands. */
7973 if (*str != '\0' && ! error_p ())
7974 {
7975 /* Set I to the index of the last present operand; this is
7976 for the purpose of diagnostics. */
7977 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7978 ;
7979 set_fatal_syntax_error
7980 (_("unexpected characters following instruction"));
7981 }
7982
7983 parse_operands_return:
7984
7985 if (error_p ())
7986 {
7987 inst.parsing_error.index = i;
7988 DEBUG_TRACE ("parsing FAIL: %s - %s",
7989 operand_mismatch_kind_names[inst.parsing_error.kind],
7990 inst.parsing_error.error);
7991 /* Record the operand error properly; this is useful when there
7992 are multiple instruction templates for a mnemonic name, so that
7993 later on, we can select the error that most closely describes
7994 the problem. */
7995 record_operand_error_info (opcode, &inst.parsing_error);
7996 return false;
7997 }
7998 else
7999 {
8000 DEBUG_TRACE ("parsing SUCCESS");
8001 return true;
8002 }
8003 }
8004
8005 /* It does some fix-up to provide some programmer friendly feature while
8006 keeping the libopcodes happy, i.e. libopcodes only accepts
8007 the preferred architectural syntax.
8008 Return FALSE if there is any failure; otherwise return TRUE. */
8009
8010 static bool
8011 programmer_friendly_fixup (aarch64_instruction *instr)
8012 {
8013 aarch64_inst *base = &instr->base;
8014 const aarch64_opcode *opcode = base->opcode;
8015 enum aarch64_op op = opcode->op;
8016 aarch64_opnd_info *operands = base->operands;
8017
8018 DEBUG_TRACE ("enter");
8019
8020 switch (opcode->iclass)
8021 {
8022 case testbranch:
8023 /* TBNZ Xn|Wn, #uimm6, label
8024 Test and Branch Not Zero: conditionally jumps to label if bit number
8025 uimm6 in register Xn is not zero. The bit number implies the width of
8026 the register, which may be written and should be disassembled as Wn if
8027 uimm is less than 32. */
8028 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
8029 {
8030 if (operands[1].imm.value >= 32)
8031 {
8032 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
8033 0, 31);
8034 return false;
8035 }
8036 operands[0].qualifier = AARCH64_OPND_QLF_X;
8037 }
8038 break;
8039 case loadlit:
8040 /* LDR Wt, label | =value
8041 As a convenience assemblers will typically permit the notation
8042 "=value" in conjunction with the pc-relative literal load instructions
8043 to automatically place an immediate value or symbolic address in a
8044 nearby literal pool and generate a hidden label which references it.
8045 ISREG has been set to 0 in the case of =value. */
8046 if (instr->gen_lit_pool
8047 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
8048 {
8049 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
8050 if (op == OP_LDRSW_LIT)
8051 size = 4;
8052 if (instr->reloc.exp.X_op != O_constant
8053 && instr->reloc.exp.X_op != O_big
8054 && instr->reloc.exp.X_op != O_symbol)
8055 {
8056 record_operand_error (opcode, 1,
8057 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
8058 _("constant expression expected"));
8059 return false;
8060 }
8061 if (! add_to_lit_pool (&instr->reloc.exp, size))
8062 {
8063 record_operand_error (opcode, 1,
8064 AARCH64_OPDE_OTHER_ERROR,
8065 _("literal pool insertion failed"));
8066 return false;
8067 }
8068 }
8069 break;
8070 case log_shift:
8071 case bitfield:
8072 /* UXT[BHW] Wd, Wn
8073 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
8074 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
8075 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
8076 A programmer-friendly assembler should accept a destination Xd in
8077 place of Wd, however that is not the preferred form for disassembly.
8078 */
8079 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
8080 && operands[1].qualifier == AARCH64_OPND_QLF_W
8081 && operands[0].qualifier == AARCH64_OPND_QLF_X)
8082 operands[0].qualifier = AARCH64_OPND_QLF_W;
8083 break;
8084
8085 case addsub_ext:
8086 {
8087 /* In the 64-bit form, the final register operand is written as Wm
8088 for all but the (possibly omitted) UXTX/LSL and SXTX
8089 operators.
8090 As a programmer-friendly assembler, we accept e.g.
8091 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
8092 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
8093 int idx = aarch64_operand_index (opcode->operands,
8094 AARCH64_OPND_Rm_EXT);
8095 gas_assert (idx == 1 || idx == 2);
8096 if (operands[0].qualifier == AARCH64_OPND_QLF_X
8097 && operands[idx].qualifier == AARCH64_OPND_QLF_X
8098 && operands[idx].shifter.kind != AARCH64_MOD_LSL
8099 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
8100 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
8101 operands[idx].qualifier = AARCH64_OPND_QLF_W;
8102 }
8103 break;
8104
8105 default:
8106 break;
8107 }
8108
8109 DEBUG_TRACE ("exit with SUCCESS");
8110 return true;
8111 }
8112
8113 /* Check for loads and stores that will cause unpredictable behavior. */
8114
8115 static void
8116 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
8117 {
8118 aarch64_inst *base = &instr->base;
8119 const aarch64_opcode *opcode = base->opcode;
8120 const aarch64_opnd_info *opnds = base->operands;
8121 switch (opcode->iclass)
8122 {
8123 case ldst_pos:
8124 case ldst_imm9:
8125 case ldst_imm10:
8126 case ldst_unscaled:
8127 case ldst_unpriv:
8128 /* Loading/storing the base register is unpredictable if writeback. */
8129 if ((aarch64_get_operand_class (opnds[0].type)
8130 == AARCH64_OPND_CLASS_INT_REG)
8131 && opnds[0].reg.regno == opnds[1].addr.base_regno
8132 && opnds[1].addr.base_regno != REG_SP
8133 /* Exempt STG/STZG/ST2G/STZ2G. */
8134 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
8135 && opnds[1].addr.writeback)
8136 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
8137 break;
8138
8139 case ldstpair_off:
8140 case ldstnapair_offs:
8141 case ldstpair_indexed:
8142 /* Loading/storing the base register is unpredictable if writeback. */
8143 if ((aarch64_get_operand_class (opnds[0].type)
8144 == AARCH64_OPND_CLASS_INT_REG)
8145 && (opnds[0].reg.regno == opnds[2].addr.base_regno
8146 || opnds[1].reg.regno == opnds[2].addr.base_regno)
8147 && opnds[2].addr.base_regno != REG_SP
8148 /* Exempt STGP. */
8149 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
8150 && opnds[2].addr.writeback)
8151 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
8152 /* Load operations must load different registers. */
8153 if ((opcode->opcode & (1 << 22))
8154 && opnds[0].reg.regno == opnds[1].reg.regno)
8155 as_warn (_("unpredictable load of register pair -- `%s'"), str);
8156 break;
8157
8158 case ldstexcl:
8159 if ((aarch64_get_operand_class (opnds[0].type)
8160 == AARCH64_OPND_CLASS_INT_REG)
8161 && (aarch64_get_operand_class (opnds[1].type)
8162 == AARCH64_OPND_CLASS_INT_REG))
8163 {
8164 if ((opcode->opcode & (1 << 22)))
8165 {
8166 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
8167 if ((opcode->opcode & (1 << 21))
8168 && opnds[0].reg.regno == opnds[1].reg.regno)
8169 as_warn (_("unpredictable load of register pair -- `%s'"), str);
8170 }
8171 else
8172 {
8173 /* Store-Exclusive is unpredictable if Rt == Rs. */
8174 if (opnds[0].reg.regno == opnds[1].reg.regno)
8175 as_warn
8176 (_("unpredictable: identical transfer and status registers"
8177 " --`%s'"),str);
8178
8179 if (opnds[0].reg.regno == opnds[2].reg.regno)
8180 {
8181 if (!(opcode->opcode & (1 << 21)))
8182 /* Store-Exclusive is unpredictable if Rn == Rs. */
8183 as_warn
8184 (_("unpredictable: identical base and status registers"
8185 " --`%s'"),str);
8186 else
8187 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
8188 as_warn
8189 (_("unpredictable: "
8190 "identical transfer and status registers"
8191 " --`%s'"),str);
8192 }
8193
8194 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
8195 if ((opcode->opcode & (1 << 21))
8196 && opnds[0].reg.regno == opnds[3].reg.regno
8197 && opnds[3].reg.regno != REG_SP)
8198 as_warn (_("unpredictable: identical base and status registers"
8199 " --`%s'"),str);
8200 }
8201 }
8202 break;
8203
8204 default:
8205 break;
8206 }
8207 }
8208
8209 static void
8210 force_automatic_sequence_close (void)
8211 {
8212 struct aarch64_segment_info_type *tc_seg_info;
8213
8214 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8215 if (tc_seg_info->insn_sequence.instr)
8216 {
8217 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
8218 _("previous `%s' sequence has not been closed"),
8219 tc_seg_info->insn_sequence.instr->opcode->name);
8220 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
8221 }
8222 }
8223
8224 /* A wrapper function to interface with libopcodes on encoding and
8225 record the error message if there is any.
8226
8227 Return TRUE on success; otherwise return FALSE. */
8228
8229 static bool
8230 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
8231 aarch64_insn *code)
8232 {
8233 aarch64_operand_error error_info;
8234 memset (&error_info, '\0', sizeof (error_info));
8235 error_info.kind = AARCH64_OPDE_NIL;
8236 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
8237 && !error_info.non_fatal)
8238 return true;
8239
8240 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
8241 record_operand_error_info (opcode, &error_info);
8242 return error_info.non_fatal;
8243 }
8244
8245 #ifdef DEBUG_AARCH64
8246 static inline void
8247 dump_opcode_operands (const aarch64_opcode *opcode)
8248 {
8249 int i = 0;
8250 while (opcode->operands[i] != AARCH64_OPND_NIL)
8251 {
8252 aarch64_verbose ("\t\t opnd%d: %s", i,
8253 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
8254 ? aarch64_get_operand_name (opcode->operands[i])
8255 : aarch64_get_operand_desc (opcode->operands[i]));
8256 ++i;
8257 }
8258 }
8259 #endif /* DEBUG_AARCH64 */
8260
8261 /* This is the guts of the machine-dependent assembler. STR points to a
8262 machine dependent instruction. This function is supposed to emit
8263 the frags/bytes it assembles to. */
8264
8265 void
8266 md_assemble (char *str)
8267 {
8268 templates *template;
8269 const aarch64_opcode *opcode;
8270 struct aarch64_segment_info_type *tc_seg_info;
8271 aarch64_inst *inst_base;
8272 unsigned saved_cond;
8273
8274 /* Align the previous label if needed. */
8275 if (last_label_seen != NULL)
8276 {
8277 symbol_set_frag (last_label_seen, frag_now);
8278 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
8279 S_SET_SEGMENT (last_label_seen, now_seg);
8280 }
8281
8282 /* Update the current insn_sequence from the segment. */
8283 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8284 insn_sequence = &tc_seg_info->insn_sequence;
8285 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
8286
8287 inst.reloc.type = BFD_RELOC_UNUSED;
8288
8289 DEBUG_TRACE ("\n\n");
8290 DEBUG_TRACE ("==============================");
8291 DEBUG_TRACE ("Enter md_assemble with %s", str);
8292
8293 /* Scan up to the end of the mnemonic, which must end in whitespace,
8294 '.', or end of string. */
8295 char *p = str;
8296 char *dot = 0;
8297 for (; is_part_of_name (*p); p++)
8298 if (*p == '.' && !dot)
8299 dot = p;
8300
8301 if (p == str)
8302 {
8303 as_bad (_("unknown mnemonic -- `%s'"), str);
8304 return;
8305 }
8306
8307 if (!dot && create_register_alias (str, p))
8308 return;
8309
8310 template = opcode_lookup (str, dot, p);
8311 if (!template)
8312 {
8313 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
8314 str);
8315 return;
8316 }
8317
8318 skip_whitespace (p);
8319 if (*p == ',')
8320 {
8321 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
8322 get_mnemonic_name (str), str);
8323 return;
8324 }
8325
8326 init_operand_error_report ();
8327
8328 /* Sections are assumed to start aligned. In executable section, there is no
8329 MAP_DATA symbol pending. So we only align the address during
8330 MAP_DATA --> MAP_INSN transition.
8331 For other sections, this is not guaranteed. */
8332 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
8333 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
8334 frag_align_code (2, 0);
8335
8336 saved_cond = inst.cond;
8337 reset_aarch64_instruction (&inst);
8338 inst.cond = saved_cond;
8339
8340 /* Iterate through all opcode entries with the same mnemonic name. */
8341 do
8342 {
8343 opcode = template->opcode;
8344
8345 DEBUG_TRACE ("opcode %s found", opcode->name);
8346 #ifdef DEBUG_AARCH64
8347 if (debug_dump)
8348 dump_opcode_operands (opcode);
8349 #endif /* DEBUG_AARCH64 */
8350
8351 mapping_state (MAP_INSN);
8352
8353 inst_base = &inst.base;
8354 inst_base->opcode = opcode;
8355
8356 /* Truly conditionally executed instructions, e.g. b.cond. */
8357 if (opcode->flags & F_COND)
8358 {
8359 gas_assert (inst.cond != COND_ALWAYS);
8360 inst_base->cond = get_cond_from_value (inst.cond);
8361 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8362 }
8363 else if (inst.cond != COND_ALWAYS)
8364 {
8365 /* It shouldn't arrive here, where the assembly looks like a
8366 conditional instruction but the found opcode is unconditional. */
8367 gas_assert (0);
8368 continue;
8369 }
8370
8371 if (parse_operands (p, opcode)
8372 && programmer_friendly_fixup (&inst)
8373 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8374 {
8375 /* Check that this instruction is supported for this CPU. */
8376 if (!aarch64_cpu_supports_inst_p (cpu_variant, inst_base))
8377 {
8378 as_bad (_("selected processor does not support `%s'"), str);
8379 return;
8380 }
8381
8382 warn_unpredictable_ldst (&inst, str);
8383
8384 if (inst.reloc.type == BFD_RELOC_UNUSED
8385 || !inst.reloc.need_libopcodes_p)
8386 output_inst (NULL);
8387 else
8388 {
8389 /* If there is relocation generated for the instruction,
8390 store the instruction information for the future fix-up. */
8391 struct aarch64_inst *copy;
8392 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8393 copy = XNEW (struct aarch64_inst);
8394 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8395 output_inst (copy);
8396 }
8397
8398 /* Issue non-fatal messages if any. */
8399 output_operand_error_report (str, true);
8400 return;
8401 }
8402
8403 template = template->next;
8404 if (template != NULL)
8405 {
8406 reset_aarch64_instruction (&inst);
8407 inst.cond = saved_cond;
8408 }
8409 }
8410 while (template != NULL);
8411
8412 /* Issue the error messages if any. */
8413 output_operand_error_report (str, false);
8414 }
8415
8416 /* Various frobbings of labels and their addresses. */
8417
8418 void
8419 aarch64_start_line_hook (void)
8420 {
8421 last_label_seen = NULL;
8422 }
8423
8424 void
8425 aarch64_frob_label (symbolS * sym)
8426 {
8427 last_label_seen = sym;
8428
8429 dwarf2_emit_label (sym);
8430 }
8431
8432 void
8433 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8434 {
8435 /* Check to see if we have a block to close. */
8436 force_automatic_sequence_close ();
8437 }
8438
8439 int
8440 aarch64_data_in_code (void)
8441 {
8442 if (startswith (input_line_pointer + 1, "data:"))
8443 {
8444 *input_line_pointer = '/';
8445 input_line_pointer += 5;
8446 *input_line_pointer = 0;
8447 return 1;
8448 }
8449
8450 return 0;
8451 }
8452
8453 char *
8454 aarch64_canonicalize_symbol_name (char *name)
8455 {
8456 int len;
8457
8458 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8459 *(name + len - 5) = 0;
8460
8461 return name;
8462 }
8463 \f
8464 /* Table of all register names defined by default. The user can
8465 define additional names with .req. Note that all register names
8466 should appear in both upper and lowercase variants. Some registers
8467 also have mixed-case names. */
8468
8469 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8470 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8471 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8472 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8473 #define REGSET16(p,t) \
8474 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8475 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8476 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8477 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8478 #define REGSET16S(p,s,t) \
8479 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8480 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8481 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8482 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8483 #define REGSET31(p,t) \
8484 REGSET16(p, t), \
8485 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8486 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8487 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8488 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8489 #define REGSET(p,t) \
8490 REGSET31(p,t), REGNUM(p,31,t)
8491
8492 /* These go into aarch64_reg_hsh hash-table. */
8493 static const reg_entry reg_names[] = {
8494 /* Integer registers. */
8495 REGSET31 (x, R_64), REGSET31 (X, R_64),
8496 REGSET31 (w, R_32), REGSET31 (W, R_32),
8497
8498 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8499 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8500 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8501 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8502 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8503 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8504
8505 REGDEF (wzr, 31, ZR_32), REGDEF (WZR, 31, ZR_32),
8506 REGDEF (xzr, 31, ZR_64), REGDEF (XZR, 31, ZR_64),
8507
8508 /* Floating-point single precision registers. */
8509 REGSET (s, FP_S), REGSET (S, FP_S),
8510
8511 /* Floating-point double precision registers. */
8512 REGSET (d, FP_D), REGSET (D, FP_D),
8513
8514 /* Floating-point half precision registers. */
8515 REGSET (h, FP_H), REGSET (H, FP_H),
8516
8517 /* Floating-point byte precision registers. */
8518 REGSET (b, FP_B), REGSET (B, FP_B),
8519
8520 /* Floating-point quad precision registers. */
8521 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8522
8523 /* FP/SIMD registers. */
8524 REGSET (v, V), REGSET (V, V),
8525
8526 /* SVE vector registers. */
8527 REGSET (z, Z), REGSET (Z, Z),
8528
8529 /* SVE predicate(-as-mask) registers. */
8530 REGSET16 (p, P), REGSET16 (P, P),
8531
8532 /* SVE predicate-as-counter registers. */
8533 REGSET16 (pn, PN), REGSET16 (PN, PN),
8534
8535 /* SME ZA. We model this as a register because it acts syntactically
8536 like ZA0H, supporting qualifier suffixes and indexing. */
8537 REGDEF (za, 0, ZA), REGDEF (ZA, 0, ZA),
8538
8539 /* SME ZA tile registers. */
8540 REGSET16 (za, ZAT), REGSET16 (ZA, ZAT),
8541
8542 /* SME ZA tile registers (horizontal slice). */
8543 REGSET16S (za, h, ZATH), REGSET16S (ZA, H, ZATH),
8544
8545 /* SME ZA tile registers (vertical slice). */
8546 REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV),
8547
8548 /* SME2 ZT0. */
8549 REGDEF (zt0, 0, ZT0), REGDEF (ZT0, 0, ZT0)
8550 };
8551
8552 #undef REGDEF
8553 #undef REGDEF_ALIAS
8554 #undef REGNUM
8555 #undef REGSET16
8556 #undef REGSET31
8557 #undef REGSET
8558
8559 #define N 1
8560 #define n 0
8561 #define Z 1
8562 #define z 0
8563 #define C 1
8564 #define c 0
8565 #define V 1
8566 #define v 0
8567 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8568 static const asm_nzcv nzcv_names[] = {
8569 {"nzcv", B (n, z, c, v)},
8570 {"nzcV", B (n, z, c, V)},
8571 {"nzCv", B (n, z, C, v)},
8572 {"nzCV", B (n, z, C, V)},
8573 {"nZcv", B (n, Z, c, v)},
8574 {"nZcV", B (n, Z, c, V)},
8575 {"nZCv", B (n, Z, C, v)},
8576 {"nZCV", B (n, Z, C, V)},
8577 {"Nzcv", B (N, z, c, v)},
8578 {"NzcV", B (N, z, c, V)},
8579 {"NzCv", B (N, z, C, v)},
8580 {"NzCV", B (N, z, C, V)},
8581 {"NZcv", B (N, Z, c, v)},
8582 {"NZcV", B (N, Z, c, V)},
8583 {"NZCv", B (N, Z, C, v)},
8584 {"NZCV", B (N, Z, C, V)}
8585 };
8586
8587 #undef N
8588 #undef n
8589 #undef Z
8590 #undef z
8591 #undef C
8592 #undef c
8593 #undef V
8594 #undef v
8595 #undef B
8596 \f
8597 /* MD interface: bits in the object file. */
8598
8599 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8600 for use in the a.out file, and stores them in the array pointed to by buf.
8601 This knows about the endian-ness of the target machine and does
8602 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8603 2 (short) and 4 (long) Floating numbers are put out as a series of
8604 LITTLENUMS (shorts, here at least). */
8605
8606 void
8607 md_number_to_chars (char *buf, valueT val, int n)
8608 {
8609 if (target_big_endian)
8610 number_to_chars_bigendian (buf, val, n);
8611 else
8612 number_to_chars_littleendian (buf, val, n);
8613 }
8614
8615 /* MD interface: Sections. */
8616
8617 /* Estimate the size of a frag before relaxing. Assume everything fits in
8618 4 bytes. */
8619
8620 int
8621 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8622 {
8623 fragp->fr_var = 4;
8624 return 4;
8625 }
8626
8627 /* Round up a section size to the appropriate boundary. */
8628
8629 valueT
8630 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8631 {
8632 return size;
8633 }
8634
8635 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8636 of an rs_align_code fragment.
8637
8638 Here we fill the frag with the appropriate info for padding the
8639 output stream. The resulting frag will consist of a fixed (fr_fix)
8640 and of a repeating (fr_var) part.
8641
8642 The fixed content is always emitted before the repeating content and
8643 these two parts are used as follows in constructing the output:
8644 - the fixed part will be used to align to a valid instruction word
8645 boundary, in case that we start at a misaligned address; as no
8646 executable instruction can live at the misaligned location, we
8647 simply fill with zeros;
8648 - the variable part will be used to cover the remaining padding and
8649 we fill using the AArch64 NOP instruction.
8650
8651 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8652 enough storage space for up to 3 bytes for padding the back to a valid
8653 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8654
8655 void
8656 aarch64_handle_align (fragS * fragP)
8657 {
8658 /* NOP = d503201f */
8659 /* AArch64 instructions are always little-endian. */
8660 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8661
8662 int bytes, fix, noop_size;
8663 char *p;
8664
8665 if (fragP->fr_type != rs_align_code)
8666 return;
8667
8668 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8669 p = fragP->fr_literal + fragP->fr_fix;
8670
8671 #ifdef OBJ_ELF
8672 gas_assert (fragP->tc_frag_data.recorded);
8673 #endif
8674
8675 noop_size = sizeof (aarch64_noop);
8676
8677 fix = bytes & (noop_size - 1);
8678 if (fix)
8679 {
8680 #if defined OBJ_ELF || defined OBJ_COFF
8681 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8682 #endif
8683 memset (p, 0, fix);
8684 p += fix;
8685 fragP->fr_fix += fix;
8686 }
8687
8688 if (noop_size)
8689 memcpy (p, aarch64_noop, noop_size);
8690 fragP->fr_var = noop_size;
8691 }
8692
8693 /* Perform target specific initialisation of a frag.
8694 Note - despite the name this initialisation is not done when the frag
8695 is created, but only when its type is assigned. A frag can be created
8696 and used a long time before its type is set, so beware of assuming that
8697 this initialisation is performed first. */
8698
8699 #ifndef OBJ_ELF
8700 void
8701 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8702 int max_chars ATTRIBUTE_UNUSED)
8703 {
8704 }
8705
8706 #else /* OBJ_ELF is defined. */
8707 void
8708 aarch64_init_frag (fragS * fragP, int max_chars)
8709 {
8710 /* Record a mapping symbol for alignment frags. We will delete this
8711 later if the alignment ends up empty. */
8712 if (!fragP->tc_frag_data.recorded)
8713 fragP->tc_frag_data.recorded = 1;
8714
8715 /* PR 21809: Do not set a mapping state for debug sections
8716 - it just confuses other tools. */
8717 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8718 return;
8719
8720 switch (fragP->fr_type)
8721 {
8722 case rs_align_test:
8723 case rs_fill:
8724 mapping_state_2 (MAP_DATA, max_chars);
8725 break;
8726 case rs_align:
8727 /* PR 20364: We can get alignment frags in code sections,
8728 so do not just assume that we should use the MAP_DATA state. */
8729 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8730 break;
8731 case rs_align_code:
8732 mapping_state_2 (MAP_INSN, max_chars);
8733 break;
8734 default:
8735 break;
8736 }
8737 }
8738
8739 /* Whether SFrame stack trace info is supported. */
8740
8741 bool
8742 aarch64_support_sframe_p (void)
8743 {
8744 /* At this time, SFrame is supported for aarch64 only. */
8745 return (aarch64_abi == AARCH64_ABI_LP64);
8746 }
8747
8748 /* Specify if RA tracking is needed. */
8749
8750 bool
8751 aarch64_sframe_ra_tracking_p (void)
8752 {
8753 return true;
8754 }
8755
8756 /* Specify the fixed offset to recover RA from CFA.
8757 (useful only when RA tracking is not needed). */
8758
8759 offsetT
8760 aarch64_sframe_cfa_ra_offset (void)
8761 {
8762 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8763 }
8764
8765 /* Get the abi/arch indentifier for SFrame. */
8766
8767 unsigned char
8768 aarch64_sframe_get_abi_arch (void)
8769 {
8770 unsigned char sframe_abi_arch = 0;
8771
8772 if (aarch64_support_sframe_p ())
8773 {
8774 sframe_abi_arch = target_big_endian
8775 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8776 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8777 }
8778
8779 return sframe_abi_arch;
8780 }
8781
8782 #endif /* OBJ_ELF */
8783 \f
8784 /* Initialize the DWARF-2 unwind information for this procedure. */
8785
8786 void
8787 tc_aarch64_frame_initial_instructions (void)
8788 {
8789 cfi_add_CFA_def_cfa (REG_SP, 0);
8790 }
8791
8792 /* Convert REGNAME to a DWARF-2 register number. */
8793
8794 int
8795 tc_aarch64_regname_to_dw2regnum (char *regname)
8796 {
8797 const reg_entry *reg = parse_reg (&regname);
8798 if (reg == NULL)
8799 return -1;
8800
8801 switch (reg->type)
8802 {
8803 case REG_TYPE_SP_32:
8804 case REG_TYPE_SP_64:
8805 case REG_TYPE_R_32:
8806 case REG_TYPE_R_64:
8807 return reg->number;
8808
8809 case REG_TYPE_FP_B:
8810 case REG_TYPE_FP_H:
8811 case REG_TYPE_FP_S:
8812 case REG_TYPE_FP_D:
8813 case REG_TYPE_FP_Q:
8814 return reg->number + 64;
8815
8816 default:
8817 break;
8818 }
8819 return -1;
8820 }
8821
8822 /* Implement DWARF2_ADDR_SIZE. */
8823
8824 int
8825 aarch64_dwarf2_addr_size (void)
8826 {
8827 if (ilp32_p)
8828 return 4;
8829 else if (llp64_p)
8830 return 8;
8831 return bfd_arch_bits_per_address (stdoutput) / 8;
8832 }
8833
8834 /* MD interface: Symbol and relocation handling. */
8835
8836 /* Return the address within the segment that a PC-relative fixup is
8837 relative to. For AArch64 PC-relative fixups applied to instructions
8838 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8839
8840 long
8841 md_pcrel_from_section (fixS * fixP, segT seg)
8842 {
8843 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8844
8845 /* If this is pc-relative and we are going to emit a relocation
8846 then we just want to put out any pipeline compensation that the linker
8847 will need. Otherwise we want to use the calculated base. */
8848 if (fixP->fx_pcrel
8849 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8850 || aarch64_force_relocation (fixP)))
8851 base = 0;
8852
8853 /* AArch64 should be consistent for all pc-relative relocations. */
8854 return base + AARCH64_PCREL_OFFSET;
8855 }
8856
8857 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8858 Otherwise we have no need to default values of symbols. */
8859
8860 symbolS *
8861 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8862 {
8863 #ifdef OBJ_ELF
8864 if (name[0] == '_' && name[1] == 'G'
8865 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8866 {
8867 if (!GOT_symbol)
8868 {
8869 if (symbol_find (name))
8870 as_bad (_("GOT already in the symbol table"));
8871
8872 GOT_symbol = symbol_new (name, undefined_section,
8873 &zero_address_frag, 0);
8874 }
8875
8876 return GOT_symbol;
8877 }
8878 #endif
8879
8880 return 0;
8881 }
8882
8883 /* Return non-zero if the indicated VALUE has overflowed the maximum
8884 range expressible by a unsigned number with the indicated number of
8885 BITS. */
8886
8887 static bool
8888 unsigned_overflow (valueT value, unsigned bits)
8889 {
8890 valueT lim;
8891 if (bits >= sizeof (valueT) * 8)
8892 return false;
8893 lim = (valueT) 1 << bits;
8894 return (value >= lim);
8895 }
8896
8897
8898 /* Return non-zero if the indicated VALUE has overflowed the maximum
8899 range expressible by an signed number with the indicated number of
8900 BITS. */
8901
8902 static bool
8903 signed_overflow (offsetT value, unsigned bits)
8904 {
8905 offsetT lim;
8906 if (bits >= sizeof (offsetT) * 8)
8907 return false;
8908 lim = (offsetT) 1 << (bits - 1);
8909 return (value < -lim || value >= lim);
8910 }
8911
8912 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8913 unsigned immediate offset load/store instruction, try to encode it as
8914 an unscaled, 9-bit, signed immediate offset load/store instruction.
8915 Return TRUE if it is successful; otherwise return FALSE.
8916
8917 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8918 in response to the standard LDR/STR mnemonics when the immediate offset is
8919 unambiguous, i.e. when it is negative or unaligned. */
8920
8921 static bool
8922 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8923 {
8924 int idx;
8925 enum aarch64_op new_op;
8926 const aarch64_opcode *new_opcode;
8927
8928 gas_assert (instr->opcode->iclass == ldst_pos);
8929
8930 switch (instr->opcode->op)
8931 {
8932 case OP_LDRB_POS:new_op = OP_LDURB; break;
8933 case OP_STRB_POS: new_op = OP_STURB; break;
8934 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8935 case OP_LDRH_POS: new_op = OP_LDURH; break;
8936 case OP_STRH_POS: new_op = OP_STURH; break;
8937 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8938 case OP_LDR_POS: new_op = OP_LDUR; break;
8939 case OP_STR_POS: new_op = OP_STUR; break;
8940 case OP_LDRF_POS: new_op = OP_LDURV; break;
8941 case OP_STRF_POS: new_op = OP_STURV; break;
8942 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8943 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8944 default: new_op = OP_NIL; break;
8945 }
8946
8947 if (new_op == OP_NIL)
8948 return false;
8949
8950 new_opcode = aarch64_get_opcode (new_op);
8951 gas_assert (new_opcode != NULL);
8952
8953 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8954 instr->opcode->op, new_opcode->op);
8955
8956 aarch64_replace_opcode (instr, new_opcode);
8957
8958 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8959 qualifier matching may fail because the out-of-date qualifier will
8960 prevent the operand being updated with a new and correct qualifier. */
8961 idx = aarch64_operand_index (instr->opcode->operands,
8962 AARCH64_OPND_ADDR_SIMM9);
8963 gas_assert (idx == 1);
8964 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8965
8966 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8967
8968 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8969 insn_sequence))
8970 return false;
8971
8972 return true;
8973 }
8974
8975 /* Called by fix_insn to fix a MOV immediate alias instruction.
8976
8977 Operand for a generic move immediate instruction, which is an alias
8978 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8979 a 32-bit/64-bit immediate value into general register. An assembler error
8980 shall result if the immediate cannot be created by a single one of these
8981 instructions. If there is a choice, then to ensure reversability an
8982 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8983
8984 static void
8985 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8986 {
8987 const aarch64_opcode *opcode;
8988
8989 /* Need to check if the destination is SP/ZR. The check has to be done
8990 before any aarch64_replace_opcode. */
8991 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8992 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8993
8994 instr->operands[1].imm.value = value;
8995 instr->operands[1].skip = 0;
8996
8997 if (try_mov_wide_p)
8998 {
8999 /* Try the MOVZ alias. */
9000 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
9001 aarch64_replace_opcode (instr, opcode);
9002 if (aarch64_opcode_encode (instr->opcode, instr,
9003 &instr->value, NULL, NULL, insn_sequence))
9004 {
9005 put_aarch64_insn (buf, instr->value);
9006 return;
9007 }
9008 /* Try the MOVK alias. */
9009 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
9010 aarch64_replace_opcode (instr, opcode);
9011 if (aarch64_opcode_encode (instr->opcode, instr,
9012 &instr->value, NULL, NULL, insn_sequence))
9013 {
9014 put_aarch64_insn (buf, instr->value);
9015 return;
9016 }
9017 }
9018
9019 if (try_mov_bitmask_p)
9020 {
9021 /* Try the ORR alias. */
9022 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
9023 aarch64_replace_opcode (instr, opcode);
9024 if (aarch64_opcode_encode (instr->opcode, instr,
9025 &instr->value, NULL, NULL, insn_sequence))
9026 {
9027 put_aarch64_insn (buf, instr->value);
9028 return;
9029 }
9030 }
9031
9032 as_bad_where (fixP->fx_file, fixP->fx_line,
9033 _("immediate cannot be moved by a single instruction"));
9034 }
9035
9036 /* An instruction operand which is immediate related may have symbol used
9037 in the assembly, e.g.
9038
9039 mov w0, u32
9040 .set u32, 0x00ffff00
9041
9042 At the time when the assembly instruction is parsed, a referenced symbol,
9043 like 'u32' in the above example may not have been seen; a fixS is created
9044 in such a case and is handled here after symbols have been resolved.
9045 Instruction is fixed up with VALUE using the information in *FIXP plus
9046 extra information in FLAGS.
9047
9048 This function is called by md_apply_fix to fix up instructions that need
9049 a fix-up described above but does not involve any linker-time relocation. */
9050
9051 static void
9052 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
9053 {
9054 int idx;
9055 uint32_t insn;
9056 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
9057 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
9058 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
9059
9060 if (new_inst)
9061 {
9062 /* Now the instruction is about to be fixed-up, so the operand that
9063 was previously marked as 'ignored' needs to be unmarked in order
9064 to get the encoding done properly. */
9065 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
9066 new_inst->operands[idx].skip = 0;
9067 }
9068
9069 gas_assert (opnd != AARCH64_OPND_NIL);
9070
9071 switch (opnd)
9072 {
9073 case AARCH64_OPND_EXCEPTION:
9074 case AARCH64_OPND_UNDEFINED:
9075 if (unsigned_overflow (value, 16))
9076 as_bad_where (fixP->fx_file, fixP->fx_line,
9077 _("immediate out of range"));
9078 insn = get_aarch64_insn (buf);
9079 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
9080 put_aarch64_insn (buf, insn);
9081 break;
9082
9083 case AARCH64_OPND_AIMM:
9084 /* ADD or SUB with immediate.
9085 NOTE this assumes we come here with a add/sub shifted reg encoding
9086 3 322|2222|2 2 2 21111 111111
9087 1 098|7654|3 2 1 09876 543210 98765 43210
9088 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
9089 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
9090 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
9091 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
9092 ->
9093 3 322|2222|2 2 221111111111
9094 1 098|7654|3 2 109876543210 98765 43210
9095 11000000 sf 001|0001|shift imm12 Rn Rd ADD
9096 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
9097 51000000 sf 101|0001|shift imm12 Rn Rd SUB
9098 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
9099 Fields sf Rn Rd are already set. */
9100 insn = get_aarch64_insn (buf);
9101 if (value < 0)
9102 {
9103 /* Add <-> sub. */
9104 insn = reencode_addsub_switch_add_sub (insn);
9105 value = -value;
9106 }
9107
9108 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
9109 && unsigned_overflow (value, 12))
9110 {
9111 /* Try to shift the value by 12 to make it fit. */
9112 if (((value >> 12) << 12) == value
9113 && ! unsigned_overflow (value, 12 + 12))
9114 {
9115 value >>= 12;
9116 insn |= encode_addsub_imm_shift_amount (1);
9117 }
9118 }
9119
9120 if (unsigned_overflow (value, 12))
9121 as_bad_where (fixP->fx_file, fixP->fx_line,
9122 _("immediate out of range"));
9123
9124 insn |= encode_addsub_imm (value);
9125
9126 put_aarch64_insn (buf, insn);
9127 break;
9128
9129 case AARCH64_OPND_SIMD_IMM:
9130 case AARCH64_OPND_SIMD_IMM_SFT:
9131 case AARCH64_OPND_LIMM:
9132 /* Bit mask immediate. */
9133 gas_assert (new_inst != NULL);
9134 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
9135 new_inst->operands[idx].imm.value = value;
9136 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
9137 &new_inst->value, NULL, NULL, insn_sequence))
9138 put_aarch64_insn (buf, new_inst->value);
9139 else
9140 as_bad_where (fixP->fx_file, fixP->fx_line,
9141 _("invalid immediate"));
9142 break;
9143
9144 case AARCH64_OPND_HALF:
9145 /* 16-bit unsigned immediate. */
9146 if (unsigned_overflow (value, 16))
9147 as_bad_where (fixP->fx_file, fixP->fx_line,
9148 _("immediate out of range"));
9149 insn = get_aarch64_insn (buf);
9150 insn |= encode_movw_imm (value & 0xffff);
9151 put_aarch64_insn (buf, insn);
9152 break;
9153
9154 case AARCH64_OPND_IMM_MOV:
9155 /* Operand for a generic move immediate instruction, which is
9156 an alias instruction that generates a single MOVZ, MOVN or ORR
9157 instruction to loads a 32-bit/64-bit immediate value into general
9158 register. An assembler error shall result if the immediate cannot be
9159 created by a single one of these instructions. If there is a choice,
9160 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
9161 and MOVZ or MOVN to ORR. */
9162 gas_assert (new_inst != NULL);
9163 fix_mov_imm_insn (fixP, buf, new_inst, value);
9164 break;
9165
9166 case AARCH64_OPND_ADDR_SIMM7:
9167 case AARCH64_OPND_ADDR_SIMM9:
9168 case AARCH64_OPND_ADDR_SIMM9_2:
9169 case AARCH64_OPND_ADDR_SIMM10:
9170 case AARCH64_OPND_ADDR_UIMM12:
9171 case AARCH64_OPND_ADDR_SIMM11:
9172 case AARCH64_OPND_ADDR_SIMM13:
9173 /* Immediate offset in an address. */
9174 insn = get_aarch64_insn (buf);
9175
9176 gas_assert (new_inst != NULL && new_inst->value == insn);
9177 gas_assert (new_inst->opcode->operands[1] == opnd
9178 || new_inst->opcode->operands[2] == opnd);
9179
9180 /* Get the index of the address operand. */
9181 if (new_inst->opcode->operands[1] == opnd)
9182 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
9183 idx = 1;
9184 else
9185 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
9186 idx = 2;
9187
9188 /* Update the resolved offset value. */
9189 new_inst->operands[idx].addr.offset.imm = value;
9190
9191 /* Encode/fix-up. */
9192 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
9193 &new_inst->value, NULL, NULL, insn_sequence))
9194 {
9195 put_aarch64_insn (buf, new_inst->value);
9196 break;
9197 }
9198 else if (new_inst->opcode->iclass == ldst_pos
9199 && try_to_encode_as_unscaled_ldst (new_inst))
9200 {
9201 put_aarch64_insn (buf, new_inst->value);
9202 break;
9203 }
9204
9205 as_bad_where (fixP->fx_file, fixP->fx_line,
9206 _("immediate offset out of range"));
9207 break;
9208
9209 default:
9210 gas_assert (0);
9211 as_fatal (_("unhandled operand code %d"), opnd);
9212 }
9213 }
9214
9215 /* Apply a fixup (fixP) to segment data, once it has been determined
9216 by our caller that we have all the info we need to fix it up.
9217
9218 Parameter valP is the pointer to the value of the bits. */
9219
9220 void
9221 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
9222 {
9223 offsetT value = *valP;
9224 uint32_t insn;
9225 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
9226 int scale;
9227 unsigned flags = fixP->fx_addnumber;
9228
9229 DEBUG_TRACE ("\n\n");
9230 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
9231 DEBUG_TRACE ("Enter md_apply_fix");
9232
9233 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
9234
9235 /* Note whether this will delete the relocation. */
9236
9237 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
9238 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
9239 fixP->fx_done = 1;
9240
9241 /* Process the relocations. */
9242 switch (fixP->fx_r_type)
9243 {
9244 case BFD_RELOC_NONE:
9245 /* This will need to go in the object file. */
9246 fixP->fx_done = 0;
9247 break;
9248
9249 case BFD_RELOC_8:
9250 case BFD_RELOC_8_PCREL:
9251 if (fixP->fx_done || !seg->use_rela_p)
9252 md_number_to_chars (buf, value, 1);
9253 break;
9254
9255 case BFD_RELOC_16:
9256 case BFD_RELOC_16_PCREL:
9257 if (fixP->fx_done || !seg->use_rela_p)
9258 md_number_to_chars (buf, value, 2);
9259 break;
9260
9261 case BFD_RELOC_32:
9262 case BFD_RELOC_32_PCREL:
9263 if (fixP->fx_done || !seg->use_rela_p)
9264 md_number_to_chars (buf, value, 4);
9265 break;
9266
9267 case BFD_RELOC_64:
9268 case BFD_RELOC_64_PCREL:
9269 if (fixP->fx_done || !seg->use_rela_p)
9270 md_number_to_chars (buf, value, 8);
9271 break;
9272
9273 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
9274 /* We claim that these fixups have been processed here, even if
9275 in fact we generate an error because we do not have a reloc
9276 for them, so tc_gen_reloc() will reject them. */
9277 fixP->fx_done = 1;
9278 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
9279 {
9280 as_bad_where (fixP->fx_file, fixP->fx_line,
9281 _("undefined symbol %s used as an immediate value"),
9282 S_GET_NAME (fixP->fx_addsy));
9283 goto apply_fix_return;
9284 }
9285 fix_insn (fixP, flags, value);
9286 break;
9287
9288 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
9289 if (fixP->fx_done || !seg->use_rela_p)
9290 {
9291 if (value & 3)
9292 as_bad_where (fixP->fx_file, fixP->fx_line,
9293 _("pc-relative load offset not word aligned"));
9294 if (signed_overflow (value, 21))
9295 as_bad_where (fixP->fx_file, fixP->fx_line,
9296 _("pc-relative load offset out of range"));
9297 insn = get_aarch64_insn (buf);
9298 insn |= encode_ld_lit_ofs_19 (value >> 2);
9299 put_aarch64_insn (buf, insn);
9300 }
9301 break;
9302
9303 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
9304 if (fixP->fx_done || !seg->use_rela_p)
9305 {
9306 if (signed_overflow (value, 21))
9307 as_bad_where (fixP->fx_file, fixP->fx_line,
9308 _("pc-relative address offset out of range"));
9309 insn = get_aarch64_insn (buf);
9310 insn |= encode_adr_imm (value);
9311 put_aarch64_insn (buf, insn);
9312 }
9313 break;
9314
9315 case BFD_RELOC_AARCH64_BRANCH19:
9316 if (fixP->fx_done || !seg->use_rela_p)
9317 {
9318 if (value & 3)
9319 as_bad_where (fixP->fx_file, fixP->fx_line,
9320 _("conditional branch target not word aligned"));
9321 if (signed_overflow (value, 21))
9322 as_bad_where (fixP->fx_file, fixP->fx_line,
9323 _("conditional branch out of range"));
9324 insn = get_aarch64_insn (buf);
9325 insn |= encode_cond_branch_ofs_19 (value >> 2);
9326 put_aarch64_insn (buf, insn);
9327 }
9328 break;
9329
9330 case BFD_RELOC_AARCH64_TSTBR14:
9331 if (fixP->fx_done || !seg->use_rela_p)
9332 {
9333 if (value & 3)
9334 as_bad_where (fixP->fx_file, fixP->fx_line,
9335 _("conditional branch target not word aligned"));
9336 if (signed_overflow (value, 16))
9337 as_bad_where (fixP->fx_file, fixP->fx_line,
9338 _("conditional branch out of range"));
9339 insn = get_aarch64_insn (buf);
9340 insn |= encode_tst_branch_ofs_14 (value >> 2);
9341 put_aarch64_insn (buf, insn);
9342 }
9343 break;
9344
9345 case BFD_RELOC_AARCH64_CALL26:
9346 case BFD_RELOC_AARCH64_JUMP26:
9347 if (fixP->fx_done || !seg->use_rela_p)
9348 {
9349 if (value & 3)
9350 as_bad_where (fixP->fx_file, fixP->fx_line,
9351 _("branch target not word aligned"));
9352 if (signed_overflow (value, 28))
9353 as_bad_where (fixP->fx_file, fixP->fx_line,
9354 _("branch out of range"));
9355 insn = get_aarch64_insn (buf);
9356 insn |= encode_branch_ofs_26 (value >> 2);
9357 put_aarch64_insn (buf, insn);
9358 }
9359 break;
9360
9361 case BFD_RELOC_AARCH64_MOVW_G0:
9362 case BFD_RELOC_AARCH64_MOVW_G0_NC:
9363 case BFD_RELOC_AARCH64_MOVW_G0_S:
9364 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9365 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9366 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
9367 scale = 0;
9368 goto movw_common;
9369 case BFD_RELOC_AARCH64_MOVW_G1:
9370 case BFD_RELOC_AARCH64_MOVW_G1_NC:
9371 case BFD_RELOC_AARCH64_MOVW_G1_S:
9372 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9373 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9374 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
9375 scale = 16;
9376 goto movw_common;
9377 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9378 scale = 0;
9379 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9380 /* Should always be exported to object file, see
9381 aarch64_force_relocation(). */
9382 gas_assert (!fixP->fx_done);
9383 gas_assert (seg->use_rela_p);
9384 goto movw_common;
9385 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9386 scale = 16;
9387 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9388 /* Should always be exported to object file, see
9389 aarch64_force_relocation(). */
9390 gas_assert (!fixP->fx_done);
9391 gas_assert (seg->use_rela_p);
9392 goto movw_common;
9393 case BFD_RELOC_AARCH64_MOVW_G2:
9394 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9395 case BFD_RELOC_AARCH64_MOVW_G2_S:
9396 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9397 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9398 scale = 32;
9399 goto movw_common;
9400 case BFD_RELOC_AARCH64_MOVW_G3:
9401 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9402 scale = 48;
9403 movw_common:
9404 if (fixP->fx_done || !seg->use_rela_p)
9405 {
9406 insn = get_aarch64_insn (buf);
9407
9408 if (!fixP->fx_done)
9409 {
9410 /* REL signed addend must fit in 16 bits */
9411 if (signed_overflow (value, 16))
9412 as_bad_where (fixP->fx_file, fixP->fx_line,
9413 _("offset out of range"));
9414 }
9415 else
9416 {
9417 /* Check for overflow and scale. */
9418 switch (fixP->fx_r_type)
9419 {
9420 case BFD_RELOC_AARCH64_MOVW_G0:
9421 case BFD_RELOC_AARCH64_MOVW_G1:
9422 case BFD_RELOC_AARCH64_MOVW_G2:
9423 case BFD_RELOC_AARCH64_MOVW_G3:
9424 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9425 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9426 if (unsigned_overflow (value, scale + 16))
9427 as_bad_where (fixP->fx_file, fixP->fx_line,
9428 _("unsigned value out of range"));
9429 break;
9430 case BFD_RELOC_AARCH64_MOVW_G0_S:
9431 case BFD_RELOC_AARCH64_MOVW_G1_S:
9432 case BFD_RELOC_AARCH64_MOVW_G2_S:
9433 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9434 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9435 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9436 /* NOTE: We can only come here with movz or movn. */
9437 if (signed_overflow (value, scale + 16))
9438 as_bad_where (fixP->fx_file, fixP->fx_line,
9439 _("signed value out of range"));
9440 if (value < 0)
9441 {
9442 /* Force use of MOVN. */
9443 value = ~value;
9444 insn = reencode_movzn_to_movn (insn);
9445 }
9446 else
9447 {
9448 /* Force use of MOVZ. */
9449 insn = reencode_movzn_to_movz (insn);
9450 }
9451 break;
9452 default:
9453 /* Unchecked relocations. */
9454 break;
9455 }
9456 value >>= scale;
9457 }
9458
9459 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9460 insn |= encode_movw_imm (value & 0xffff);
9461
9462 put_aarch64_insn (buf, insn);
9463 }
9464 break;
9465
9466 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9467 fixP->fx_r_type = (ilp32_p
9468 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9469 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9470 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9471 /* Should always be exported to object file, see
9472 aarch64_force_relocation(). */
9473 gas_assert (!fixP->fx_done);
9474 gas_assert (seg->use_rela_p);
9475 break;
9476
9477 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9478 fixP->fx_r_type = (ilp32_p
9479 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9480 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9481 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9482 /* Should always be exported to object file, see
9483 aarch64_force_relocation(). */
9484 gas_assert (!fixP->fx_done);
9485 gas_assert (seg->use_rela_p);
9486 break;
9487
9488 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9489 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9490 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9491 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9492 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9493 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9494 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9495 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9496 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9497 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9498 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9499 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9500 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9501 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9502 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9503 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9504 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9505 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9506 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9507 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9508 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9509 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9510 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9511 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9512 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9513 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9514 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9515 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9516 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9517 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9518 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9519 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9520 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9521 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9522 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9523 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9524 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9525 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9526 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9527 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9528 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9529 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9530 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9531 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9532 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9533 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9534 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9535 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9536 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9537 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9538 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9539 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9540 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9541 /* Should always be exported to object file, see
9542 aarch64_force_relocation(). */
9543 gas_assert (!fixP->fx_done);
9544 gas_assert (seg->use_rela_p);
9545 break;
9546
9547 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9548 /* Should always be exported to object file, see
9549 aarch64_force_relocation(). */
9550 fixP->fx_r_type = (ilp32_p
9551 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9552 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9553 gas_assert (!fixP->fx_done);
9554 gas_assert (seg->use_rela_p);
9555 break;
9556
9557 case BFD_RELOC_AARCH64_ADD_LO12:
9558 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9559 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9560 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9561 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9562 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9563 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9564 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9565 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9566 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9567 case BFD_RELOC_AARCH64_LDST128_LO12:
9568 case BFD_RELOC_AARCH64_LDST16_LO12:
9569 case BFD_RELOC_AARCH64_LDST32_LO12:
9570 case BFD_RELOC_AARCH64_LDST64_LO12:
9571 case BFD_RELOC_AARCH64_LDST8_LO12:
9572 /* Should always be exported to object file, see
9573 aarch64_force_relocation(). */
9574 gas_assert (!fixP->fx_done);
9575 gas_assert (seg->use_rela_p);
9576 break;
9577
9578 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9579 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9580 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9581 break;
9582
9583 case BFD_RELOC_UNUSED:
9584 /* An error will already have been reported. */
9585 break;
9586
9587 case BFD_RELOC_RVA:
9588 case BFD_RELOC_32_SECREL:
9589 case BFD_RELOC_16_SECIDX:
9590 break;
9591
9592 default:
9593 as_bad_where (fixP->fx_file, fixP->fx_line,
9594 _("unexpected %s fixup"),
9595 bfd_get_reloc_code_name (fixP->fx_r_type));
9596 break;
9597 }
9598
9599 apply_fix_return:
9600 /* Free the allocated the struct aarch64_inst.
9601 N.B. currently there are very limited number of fix-up types actually use
9602 this field, so the impact on the performance should be minimal . */
9603 free (fixP->tc_fix_data.inst);
9604
9605 return;
9606 }
9607
9608 /* Translate internal representation of relocation info to BFD target
9609 format. */
9610
9611 arelent *
9612 tc_gen_reloc (asection * section, fixS * fixp)
9613 {
9614 arelent *reloc;
9615 bfd_reloc_code_real_type code;
9616
9617 reloc = XNEW (arelent);
9618
9619 reloc->sym_ptr_ptr = XNEW (asymbol *);
9620 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9621 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9622
9623 if (fixp->fx_pcrel)
9624 {
9625 if (section->use_rela_p)
9626 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9627 else
9628 fixp->fx_offset = reloc->address;
9629 }
9630 reloc->addend = fixp->fx_offset;
9631
9632 code = fixp->fx_r_type;
9633 switch (code)
9634 {
9635 case BFD_RELOC_16:
9636 if (fixp->fx_pcrel)
9637 code = BFD_RELOC_16_PCREL;
9638 break;
9639
9640 case BFD_RELOC_32:
9641 if (fixp->fx_pcrel)
9642 code = BFD_RELOC_32_PCREL;
9643 break;
9644
9645 case BFD_RELOC_64:
9646 if (fixp->fx_pcrel)
9647 code = BFD_RELOC_64_PCREL;
9648 break;
9649
9650 default:
9651 break;
9652 }
9653
9654 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9655 if (reloc->howto == NULL)
9656 {
9657 as_bad_where (fixp->fx_file, fixp->fx_line,
9658 _
9659 ("cannot represent %s relocation in this object file format"),
9660 bfd_get_reloc_code_name (code));
9661 return NULL;
9662 }
9663
9664 return reloc;
9665 }
9666
9667 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9668
9669 void
9670 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9671 {
9672 bfd_reloc_code_real_type type;
9673 int pcrel = 0;
9674
9675 #ifdef TE_PE
9676 if (exp->X_op == O_secrel)
9677 {
9678 exp->X_op = O_symbol;
9679 type = BFD_RELOC_32_SECREL;
9680 }
9681 else if (exp->X_op == O_secidx)
9682 {
9683 exp->X_op = O_symbol;
9684 type = BFD_RELOC_16_SECIDX;
9685 }
9686 else
9687 {
9688 #endif
9689 /* Pick a reloc.
9690 FIXME: @@ Should look at CPU word size. */
9691 switch (size)
9692 {
9693 case 1:
9694 type = BFD_RELOC_8;
9695 break;
9696 case 2:
9697 type = BFD_RELOC_16;
9698 break;
9699 case 4:
9700 type = BFD_RELOC_32;
9701 break;
9702 case 8:
9703 type = BFD_RELOC_64;
9704 break;
9705 default:
9706 as_bad (_("cannot do %u-byte relocation"), size);
9707 type = BFD_RELOC_UNUSED;
9708 break;
9709 }
9710 #ifdef TE_PE
9711 }
9712 #endif
9713
9714 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9715 }
9716
9717 /* Implement md_after_parse_args. This is the earliest time we need to decide
9718 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9719
9720 void
9721 aarch64_after_parse_args (void)
9722 {
9723 if (aarch64_abi != AARCH64_ABI_NONE)
9724 return;
9725
9726 #ifdef OBJ_ELF
9727 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9728 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9729 aarch64_abi = AARCH64_ABI_ILP32;
9730 else
9731 aarch64_abi = AARCH64_ABI_LP64;
9732 #else
9733 aarch64_abi = AARCH64_ABI_LLP64;
9734 #endif
9735 }
9736
9737 #ifdef OBJ_ELF
9738 const char *
9739 elf64_aarch64_target_format (void)
9740 {
9741 #ifdef TE_CLOUDABI
9742 /* FIXME: What to do for ilp32_p ? */
9743 if (target_big_endian)
9744 return "elf64-bigaarch64-cloudabi";
9745 else
9746 return "elf64-littleaarch64-cloudabi";
9747 #else
9748 if (target_big_endian)
9749 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9750 else
9751 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9752 #endif
9753 }
9754
9755 void
9756 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9757 {
9758 elf_frob_symbol (symp, puntp);
9759 }
9760 #elif defined OBJ_COFF
9761 const char *
9762 coff_aarch64_target_format (void)
9763 {
9764 return "pe-aarch64-little";
9765 }
9766 #endif
9767
9768 /* MD interface: Finalization. */
9769
9770 /* A good place to do this, although this was probably not intended
9771 for this kind of use. We need to dump the literal pool before
9772 references are made to a null symbol pointer. */
9773
9774 void
9775 aarch64_cleanup (void)
9776 {
9777 literal_pool *pool;
9778
9779 for (pool = list_of_pools; pool; pool = pool->next)
9780 {
9781 /* Put it at the end of the relevant section. */
9782 subseg_set (pool->section, pool->sub_section);
9783 s_ltorg (0);
9784 }
9785 }
9786
9787 #ifdef OBJ_ELF
9788 /* Remove any excess mapping symbols generated for alignment frags in
9789 SEC. We may have created a mapping symbol before a zero byte
9790 alignment; remove it if there's a mapping symbol after the
9791 alignment. */
9792 static void
9793 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9794 void *dummy ATTRIBUTE_UNUSED)
9795 {
9796 segment_info_type *seginfo = seg_info (sec);
9797 fragS *fragp;
9798
9799 if (seginfo == NULL || seginfo->frchainP == NULL)
9800 return;
9801
9802 for (fragp = seginfo->frchainP->frch_root;
9803 fragp != NULL; fragp = fragp->fr_next)
9804 {
9805 symbolS *sym = fragp->tc_frag_data.last_map;
9806 fragS *next = fragp->fr_next;
9807
9808 /* Variable-sized frags have been converted to fixed size by
9809 this point. But if this was variable-sized to start with,
9810 there will be a fixed-size frag after it. So don't handle
9811 next == NULL. */
9812 if (sym == NULL || next == NULL)
9813 continue;
9814
9815 if (S_GET_VALUE (sym) < next->fr_address)
9816 /* Not at the end of this frag. */
9817 continue;
9818 know (S_GET_VALUE (sym) == next->fr_address);
9819
9820 do
9821 {
9822 if (next->tc_frag_data.first_map != NULL)
9823 {
9824 /* Next frag starts with a mapping symbol. Discard this
9825 one. */
9826 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9827 break;
9828 }
9829
9830 if (next->fr_next == NULL)
9831 {
9832 /* This mapping symbol is at the end of the section. Discard
9833 it. */
9834 know (next->fr_fix == 0 && next->fr_var == 0);
9835 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9836 break;
9837 }
9838
9839 /* As long as we have empty frags without any mapping symbols,
9840 keep looking. */
9841 /* If the next frag is non-empty and does not start with a
9842 mapping symbol, then this mapping symbol is required. */
9843 if (next->fr_address != next->fr_next->fr_address)
9844 break;
9845
9846 next = next->fr_next;
9847 }
9848 while (next != NULL);
9849 }
9850 }
9851 #endif
9852
9853 /* Adjust the symbol table. */
9854
9855 void
9856 aarch64_adjust_symtab (void)
9857 {
9858 #ifdef OBJ_ELF
9859 /* Remove any overlapping mapping symbols generated by alignment frags. */
9860 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9861 /* Now do generic ELF adjustments. */
9862 elf_adjust_symtab ();
9863 #endif
9864 }
9865
9866 static void
9867 checked_hash_insert (htab_t table, const char *key, void *value)
9868 {
9869 str_hash_insert (table, key, value, 0);
9870 }
9871
9872 static void
9873 sysreg_hash_insert (htab_t table, const char *key, void *value)
9874 {
9875 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9876 checked_hash_insert (table, key, value);
9877 }
9878
9879 static void
9880 fill_instruction_hash_table (void)
9881 {
9882 const aarch64_opcode *opcode = aarch64_opcode_table;
9883
9884 while (opcode->name != NULL)
9885 {
9886 templates *templ, *new_templ;
9887 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9888
9889 new_templ = XNEW (templates);
9890 new_templ->opcode = opcode;
9891 new_templ->next = NULL;
9892
9893 if (!templ)
9894 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9895 else
9896 {
9897 new_templ->next = templ->next;
9898 templ->next = new_templ;
9899 }
9900 ++opcode;
9901 }
9902 }
9903
9904 static inline void
9905 convert_to_upper (char *dst, const char *src, size_t num)
9906 {
9907 unsigned int i;
9908 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9909 *dst = TOUPPER (*src);
9910 *dst = '\0';
9911 }
9912
9913 /* Assume STR point to a lower-case string, allocate, convert and return
9914 the corresponding upper-case string. */
9915 static inline const char*
9916 get_upper_str (const char *str)
9917 {
9918 char *ret;
9919 size_t len = strlen (str);
9920 ret = XNEWVEC (char, len + 1);
9921 convert_to_upper (ret, str, len);
9922 return ret;
9923 }
9924
9925 /* MD interface: Initialization. */
9926
9927 void
9928 md_begin (void)
9929 {
9930 unsigned mach;
9931 unsigned int i;
9932
9933 aarch64_ops_hsh = str_htab_create ();
9934 aarch64_cond_hsh = str_htab_create ();
9935 aarch64_shift_hsh = str_htab_create ();
9936 aarch64_sys_regs_hsh = str_htab_create ();
9937 aarch64_pstatefield_hsh = str_htab_create ();
9938 aarch64_sys_regs_ic_hsh = str_htab_create ();
9939 aarch64_sys_regs_dc_hsh = str_htab_create ();
9940 aarch64_sys_regs_at_hsh = str_htab_create ();
9941 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9942 aarch64_sys_regs_sr_hsh = str_htab_create ();
9943 aarch64_reg_hsh = str_htab_create ();
9944 aarch64_barrier_opt_hsh = str_htab_create ();
9945 aarch64_nzcv_hsh = str_htab_create ();
9946 aarch64_pldop_hsh = str_htab_create ();
9947 aarch64_hint_opt_hsh = str_htab_create ();
9948
9949 fill_instruction_hash_table ();
9950
9951 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9952 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9953 (void *) (aarch64_sys_regs + i));
9954
9955 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9956 sysreg_hash_insert (aarch64_pstatefield_hsh,
9957 aarch64_pstatefields[i].name,
9958 (void *) (aarch64_pstatefields + i));
9959
9960 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9961 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9962 aarch64_sys_regs_ic[i].name,
9963 (void *) (aarch64_sys_regs_ic + i));
9964
9965 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9966 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9967 aarch64_sys_regs_dc[i].name,
9968 (void *) (aarch64_sys_regs_dc + i));
9969
9970 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9971 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9972 aarch64_sys_regs_at[i].name,
9973 (void *) (aarch64_sys_regs_at + i));
9974
9975 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9976 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9977 aarch64_sys_regs_tlbi[i].name,
9978 (void *) (aarch64_sys_regs_tlbi + i));
9979
9980 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9981 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9982 aarch64_sys_regs_sr[i].name,
9983 (void *) (aarch64_sys_regs_sr + i));
9984
9985 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9986 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9987 (void *) (reg_names + i));
9988
9989 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9990 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9991 (void *) (nzcv_names + i));
9992
9993 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9994 {
9995 const char *name = aarch64_operand_modifiers[i].name;
9996 checked_hash_insert (aarch64_shift_hsh, name,
9997 (void *) (aarch64_operand_modifiers + i));
9998 /* Also hash the name in the upper case. */
9999 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
10000 (void *) (aarch64_operand_modifiers + i));
10001 }
10002
10003 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
10004 {
10005 unsigned int j;
10006 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
10007 the same condition code. */
10008 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
10009 {
10010 const char *name = aarch64_conds[i].names[j];
10011 if (name == NULL)
10012 break;
10013 checked_hash_insert (aarch64_cond_hsh, name,
10014 (void *) (aarch64_conds + i));
10015 /* Also hash the name in the upper case. */
10016 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
10017 (void *) (aarch64_conds + i));
10018 }
10019 }
10020
10021 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
10022 {
10023 const char *name = aarch64_barrier_options[i].name;
10024 /* Skip xx00 - the unallocated values of option. */
10025 if ((i & 0x3) == 0)
10026 continue;
10027 checked_hash_insert (aarch64_barrier_opt_hsh, name,
10028 (void *) (aarch64_barrier_options + i));
10029 /* Also hash the name in the upper case. */
10030 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
10031 (void *) (aarch64_barrier_options + i));
10032 }
10033
10034 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
10035 {
10036 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
10037 checked_hash_insert (aarch64_barrier_opt_hsh, name,
10038 (void *) (aarch64_barrier_dsb_nxs_options + i));
10039 /* Also hash the name in the upper case. */
10040 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
10041 (void *) (aarch64_barrier_dsb_nxs_options + i));
10042 }
10043
10044 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
10045 {
10046 const char* name = aarch64_prfops[i].name;
10047 /* Skip the unallocated hint encodings. */
10048 if (name == NULL)
10049 continue;
10050 checked_hash_insert (aarch64_pldop_hsh, name,
10051 (void *) (aarch64_prfops + i));
10052 /* Also hash the name in the upper case. */
10053 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
10054 (void *) (aarch64_prfops + i));
10055 }
10056
10057 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
10058 {
10059 const char* name = aarch64_hint_options[i].name;
10060 const char* upper_name = get_upper_str(name);
10061
10062 checked_hash_insert (aarch64_hint_opt_hsh, name,
10063 (void *) (aarch64_hint_options + i));
10064
10065 /* Also hash the name in the upper case if not the same. */
10066 if (strcmp (name, upper_name) != 0)
10067 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
10068 (void *) (aarch64_hint_options + i));
10069 }
10070
10071 /* Set the cpu variant based on the command-line options. */
10072 if (!mcpu_cpu_opt)
10073 mcpu_cpu_opt = march_cpu_opt;
10074
10075 if (!mcpu_cpu_opt)
10076 mcpu_cpu_opt = &cpu_default;
10077
10078 cpu_variant = *mcpu_cpu_opt;
10079
10080 /* Record the CPU type. */
10081 if(ilp32_p)
10082 mach = bfd_mach_aarch64_ilp32;
10083 else if (llp64_p)
10084 mach = bfd_mach_aarch64_llp64;
10085 else
10086 mach = bfd_mach_aarch64;
10087
10088 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
10089 #ifdef OBJ_ELF
10090 /* FIXME - is there a better way to do it ? */
10091 aarch64_sframe_cfa_sp_reg = 31;
10092 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
10093 aarch64_sframe_cfa_ra_reg = 30;
10094 #endif
10095 }
10096
10097 /* Command line processing. */
10098
10099 const char *md_shortopts = "m:";
10100
10101 #ifdef AARCH64_BI_ENDIAN
10102 #define OPTION_EB (OPTION_MD_BASE + 0)
10103 #define OPTION_EL (OPTION_MD_BASE + 1)
10104 #else
10105 #if TARGET_BYTES_BIG_ENDIAN
10106 #define OPTION_EB (OPTION_MD_BASE + 0)
10107 #else
10108 #define OPTION_EL (OPTION_MD_BASE + 1)
10109 #endif
10110 #endif
10111
10112 struct option md_longopts[] = {
10113 #ifdef OPTION_EB
10114 {"EB", no_argument, NULL, OPTION_EB},
10115 #endif
10116 #ifdef OPTION_EL
10117 {"EL", no_argument, NULL, OPTION_EL},
10118 #endif
10119 {NULL, no_argument, NULL, 0}
10120 };
10121
10122 size_t md_longopts_size = sizeof (md_longopts);
10123
10124 struct aarch64_option_table
10125 {
10126 const char *option; /* Option name to match. */
10127 const char *help; /* Help information. */
10128 int *var; /* Variable to change. */
10129 int value; /* What to change it to. */
10130 char *deprecated; /* If non-null, print this message. */
10131 };
10132
10133 static struct aarch64_option_table aarch64_opts[] = {
10134 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
10135 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
10136 NULL},
10137 #ifdef DEBUG_AARCH64
10138 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
10139 #endif /* DEBUG_AARCH64 */
10140 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
10141 NULL},
10142 {"mno-verbose-error", N_("do not output verbose error messages"),
10143 &verbose_error_p, 0, NULL},
10144 {NULL, NULL, NULL, 0, NULL}
10145 };
10146
10147 struct aarch64_cpu_option_table
10148 {
10149 const char *name;
10150 const aarch64_feature_set value;
10151 /* The canonical name of the CPU, or NULL to use NAME converted to upper
10152 case. */
10153 const char *canonical_name;
10154 };
10155
10156 /* This list should, at a minimum, contain all the cpu names
10157 recognized by GCC. */
10158 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
10159 {"all", AARCH64_ANY, NULL},
10160 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
10161 AARCH64_FEATURE_CRC), "Cortex-A34"},
10162 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
10163 AARCH64_FEATURE_CRC), "Cortex-A35"},
10164 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
10165 AARCH64_FEATURE_CRC), "Cortex-A53"},
10166 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
10167 AARCH64_FEATURE_CRC), "Cortex-A57"},
10168 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
10169 AARCH64_FEATURE_CRC), "Cortex-A72"},
10170 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
10171 AARCH64_FEATURE_CRC), "Cortex-A73"},
10172 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10173 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
10174 "Cortex-A55"},
10175 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10176 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
10177 "Cortex-A75"},
10178 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10179 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
10180 "Cortex-A76"},
10181 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10182 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
10183 | AARCH64_FEATURE_DOTPROD
10184 | AARCH64_FEATURE_SSBS),
10185 "Cortex-A76AE"},
10186 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10187 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
10188 | AARCH64_FEATURE_DOTPROD
10189 | AARCH64_FEATURE_SSBS),
10190 "Cortex-A77"},
10191 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10192 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
10193 | AARCH64_FEATURE_DOTPROD
10194 | AARCH64_FEATURE_SSBS),
10195 "Cortex-A65"},
10196 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10197 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
10198 | AARCH64_FEATURE_DOTPROD
10199 | AARCH64_FEATURE_SSBS),
10200 "Cortex-A65AE"},
10201 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10202 AARCH64_FEATURE_F16
10203 | AARCH64_FEATURE_RCPC
10204 | AARCH64_FEATURE_DOTPROD
10205 | AARCH64_FEATURE_SSBS
10206 | AARCH64_FEATURE_PROFILE),
10207 "Cortex-A78"},
10208 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10209 AARCH64_FEATURE_F16
10210 | AARCH64_FEATURE_RCPC
10211 | AARCH64_FEATURE_DOTPROD
10212 | AARCH64_FEATURE_SSBS
10213 | AARCH64_FEATURE_PROFILE),
10214 "Cortex-A78AE"},
10215 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10216 AARCH64_FEATURE_DOTPROD
10217 | AARCH64_FEATURE_F16
10218 | AARCH64_FEATURE_FLAGM
10219 | AARCH64_FEATURE_PAC
10220 | AARCH64_FEATURE_PROFILE
10221 | AARCH64_FEATURE_RCPC
10222 | AARCH64_FEATURE_SSBS),
10223 "Cortex-A78C"},
10224 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
10225 AARCH64_FEATURE_BFLOAT16
10226 | AARCH64_FEATURE_I8MM
10227 | AARCH64_FEATURE_MEMTAG
10228 | AARCH64_FEATURE_SVE2_BITPERM),
10229 "Cortex-A510"},
10230 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
10231 AARCH64_FEATURE_BFLOAT16
10232 | AARCH64_FEATURE_I8MM
10233 | AARCH64_FEATURE_MEMTAG
10234 | AARCH64_FEATURE_SVE2_BITPERM),
10235 "Cortex-A710"},
10236 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10237 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
10238 | AARCH64_FEATURE_DOTPROD
10239 | AARCH64_FEATURE_PROFILE),
10240 "Ares"},
10241 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
10242 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
10243 "Samsung Exynos M1"},
10244 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
10245 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
10246 | AARCH64_FEATURE_RDMA),
10247 "Qualcomm Falkor"},
10248 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10249 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
10250 | AARCH64_FEATURE_DOTPROD
10251 | AARCH64_FEATURE_SSBS),
10252 "Neoverse E1"},
10253 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10254 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
10255 | AARCH64_FEATURE_DOTPROD
10256 | AARCH64_FEATURE_PROFILE),
10257 "Neoverse N1"},
10258 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
10259 AARCH64_FEATURE_BFLOAT16
10260 | AARCH64_FEATURE_I8MM
10261 | AARCH64_FEATURE_F16
10262 | AARCH64_FEATURE_SVE
10263 | AARCH64_FEATURE_SVE2
10264 | AARCH64_FEATURE_SVE2_BITPERM
10265 | AARCH64_FEATURE_MEMTAG
10266 | AARCH64_FEATURE_RNG),
10267 "Neoverse N2"},
10268 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
10269 AARCH64_FEATURE_PROFILE
10270 | AARCH64_FEATURE_CVADP
10271 | AARCH64_FEATURE_SVE
10272 | AARCH64_FEATURE_SSBS
10273 | AARCH64_FEATURE_RNG
10274 | AARCH64_FEATURE_F16
10275 | AARCH64_FEATURE_BFLOAT16
10276 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
10277 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
10278 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
10279 | AARCH64_FEATURE_RDMA),
10280 "Qualcomm QDF24XX"},
10281 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
10282 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
10283 "Qualcomm Saphira"},
10284 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
10285 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
10286 "Cavium ThunderX"},
10287 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
10288 AARCH64_FEATURE_CRYPTO),
10289 "Broadcom Vulcan"},
10290 /* The 'xgene-1' name is an older name for 'xgene1', which was used
10291 in earlier releases and is superseded by 'xgene1' in all
10292 tools. */
10293 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
10294 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
10295 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
10296 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
10297 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
10298 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10299 AARCH64_FEATURE_F16
10300 | AARCH64_FEATURE_RCPC
10301 | AARCH64_FEATURE_DOTPROD
10302 | AARCH64_FEATURE_SSBS
10303 | AARCH64_FEATURE_PROFILE),
10304 "Cortex-X1"},
10305 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
10306 AARCH64_FEATURE_BFLOAT16
10307 | AARCH64_FEATURE_I8MM
10308 | AARCH64_FEATURE_MEMTAG
10309 | AARCH64_FEATURE_SVE2_BITPERM),
10310 "Cortex-X2"},
10311 {"generic", AARCH64_ARCH_V8, NULL},
10312
10313 {NULL, AARCH64_ARCH_NONE, NULL}
10314 };
10315
10316 struct aarch64_arch_option_table
10317 {
10318 const char *name;
10319 const aarch64_feature_set value;
10320 };
10321
10322 /* This list should, at a minimum, contain all the architecture names
10323 recognized by GCC. */
10324 static const struct aarch64_arch_option_table aarch64_archs[] = {
10325 {"all", AARCH64_ANY},
10326 {"armv8-a", AARCH64_ARCH_V8},
10327 {"armv8.1-a", AARCH64_ARCH_V8_1},
10328 {"armv8.2-a", AARCH64_ARCH_V8_2},
10329 {"armv8.3-a", AARCH64_ARCH_V8_3},
10330 {"armv8.4-a", AARCH64_ARCH_V8_4},
10331 {"armv8.5-a", AARCH64_ARCH_V8_5},
10332 {"armv8.6-a", AARCH64_ARCH_V8_6},
10333 {"armv8.7-a", AARCH64_ARCH_V8_7},
10334 {"armv8.8-a", AARCH64_ARCH_V8_8},
10335 {"armv8-r", AARCH64_ARCH_V8_R},
10336 {"armv9-a", AARCH64_ARCH_V9},
10337 {"armv9.1-a", AARCH64_ARCH_V9_1},
10338 {"armv9.2-a", AARCH64_ARCH_V9_2},
10339 {"armv9.3-a", AARCH64_ARCH_V9_3},
10340 {NULL, AARCH64_ARCH_NONE}
10341 };
10342
10343 /* ISA extensions. */
10344 struct aarch64_option_cpu_value_table
10345 {
10346 const char *name;
10347 const aarch64_feature_set value;
10348 const aarch64_feature_set require; /* Feature dependencies. */
10349 };
10350
10351 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
10352 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
10353 AARCH64_ARCH_NONE},
10354 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
10355 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10356 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
10357 AARCH64_ARCH_NONE},
10358 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
10359 AARCH64_ARCH_NONE},
10360 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
10361 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10362 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
10363 AARCH64_ARCH_NONE},
10364 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
10365 AARCH64_ARCH_NONE},
10366 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
10367 AARCH64_ARCH_NONE},
10368 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
10369 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10370 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
10371 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10372 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
10373 AARCH64_FEATURE (AARCH64_FEATURE_F16, 0)},
10374 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
10375 AARCH64_ARCH_NONE},
10376 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
10377 AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0)},
10378 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
10379 AARCH64_ARCH_NONE},
10380 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
10381 AARCH64_FEATURE (AARCH64_FEATURE_F16
10382 | AARCH64_FEATURE_SIMD, 0)},
10383 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
10384 AARCH64_ARCH_NONE},
10385 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
10386 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10387 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
10388 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10389 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
10390 AARCH64_ARCH_NONE},
10391 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
10392 AARCH64_ARCH_NONE},
10393 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
10394 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10395 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
10396 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10397 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
10398 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
10399 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
10400 AARCH64_ARCH_NONE},
10401 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
10402 AARCH64_ARCH_NONE},
10403 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
10404 AARCH64_ARCH_NONE},
10405 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
10406 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10407 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
10408 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10409 | AARCH64_FEATURE_SM4, 0)},
10410 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
10411 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10412 | AARCH64_FEATURE_AES, 0)},
10413 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
10414 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10415 | AARCH64_FEATURE_SHA3, 0)},
10416 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
10417 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
10418 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
10419 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10420 | AARCH64_FEATURE_BFLOAT16, 0)},
10421 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10422 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10423 {"sme-f64f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10424 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10425 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10426 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10427 {"sme-i16i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10428 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10429 {"sme2", AARCH64_FEATURE (AARCH64_FEATURE_SME2, 0),
10430 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10431 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
10432 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10433 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
10434 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10435 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
10436 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10437 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
10438 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10439 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
10440 AARCH64_ARCH_NONE},
10441 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
10442 AARCH64_ARCH_NONE},
10443 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
10444 AARCH64_ARCH_NONE},
10445 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
10446 AARCH64_ARCH_NONE},
10447 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
10448 AARCH64_ARCH_NONE},
10449 {"cssc", AARCH64_FEATURE (AARCH64_FEATURE_CSSC, 0),
10450 AARCH64_ARCH_NONE},
10451 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
10452 };
10453
10454 struct aarch64_long_option_table
10455 {
10456 const char *option; /* Substring to match. */
10457 const char *help; /* Help information. */
10458 int (*func) (const char *subopt); /* Function to decode sub-option. */
10459 char *deprecated; /* If non-null, print this message. */
10460 };
10461
10462 /* Transitive closure of features depending on set. */
10463 static aarch64_feature_set
10464 aarch64_feature_disable_set (aarch64_feature_set set)
10465 {
10466 const struct aarch64_option_cpu_value_table *opt;
10467 aarch64_feature_set prev = 0;
10468
10469 while (prev != set) {
10470 prev = set;
10471 for (opt = aarch64_features; opt->name != NULL; opt++)
10472 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10473 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10474 }
10475 return set;
10476 }
10477
10478 /* Transitive closure of dependencies of set. */
10479 static aarch64_feature_set
10480 aarch64_feature_enable_set (aarch64_feature_set set)
10481 {
10482 const struct aarch64_option_cpu_value_table *opt;
10483 aarch64_feature_set prev = 0;
10484
10485 while (prev != set) {
10486 prev = set;
10487 for (opt = aarch64_features; opt->name != NULL; opt++)
10488 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10489 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10490 }
10491 return set;
10492 }
10493
10494 static int
10495 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10496 bool ext_only)
10497 {
10498 /* We insist on extensions being added before being removed. We achieve
10499 this by using the ADDING_VALUE variable to indicate whether we are
10500 adding an extension (1) or removing it (0) and only allowing it to
10501 change in the order -1 -> 1 -> 0. */
10502 int adding_value = -1;
10503 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10504
10505 /* Copy the feature set, so that we can modify it. */
10506 *ext_set = **opt_p;
10507 *opt_p = ext_set;
10508
10509 while (str != NULL && *str != 0)
10510 {
10511 const struct aarch64_option_cpu_value_table *opt;
10512 const char *ext = NULL;
10513 int optlen;
10514
10515 if (!ext_only)
10516 {
10517 if (*str != '+')
10518 {
10519 as_bad (_("invalid architectural extension"));
10520 return 0;
10521 }
10522
10523 ext = strchr (++str, '+');
10524 }
10525
10526 if (ext != NULL)
10527 optlen = ext - str;
10528 else
10529 optlen = strlen (str);
10530
10531 if (optlen >= 2 && startswith (str, "no"))
10532 {
10533 if (adding_value != 0)
10534 adding_value = 0;
10535 optlen -= 2;
10536 str += 2;
10537 }
10538 else if (optlen > 0)
10539 {
10540 if (adding_value == -1)
10541 adding_value = 1;
10542 else if (adding_value != 1)
10543 {
10544 as_bad (_("must specify extensions to add before specifying "
10545 "those to remove"));
10546 return false;
10547 }
10548 }
10549
10550 if (optlen == 0)
10551 {
10552 as_bad (_("missing architectural extension"));
10553 return 0;
10554 }
10555
10556 gas_assert (adding_value != -1);
10557
10558 for (opt = aarch64_features; opt->name != NULL; opt++)
10559 if (strncmp (opt->name, str, optlen) == 0)
10560 {
10561 aarch64_feature_set set;
10562
10563 /* Add or remove the extension. */
10564 if (adding_value)
10565 {
10566 set = aarch64_feature_enable_set (opt->value);
10567 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10568 }
10569 else
10570 {
10571 set = aarch64_feature_disable_set (opt->value);
10572 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10573 }
10574 break;
10575 }
10576
10577 if (opt->name == NULL)
10578 {
10579 as_bad (_("unknown architectural extension `%s'"), str);
10580 return 0;
10581 }
10582
10583 str = ext;
10584 };
10585
10586 return 1;
10587 }
10588
10589 static int
10590 aarch64_parse_cpu (const char *str)
10591 {
10592 const struct aarch64_cpu_option_table *opt;
10593 const char *ext = strchr (str, '+');
10594 size_t optlen;
10595
10596 if (ext != NULL)
10597 optlen = ext - str;
10598 else
10599 optlen = strlen (str);
10600
10601 if (optlen == 0)
10602 {
10603 as_bad (_("missing cpu name `%s'"), str);
10604 return 0;
10605 }
10606
10607 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10608 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10609 {
10610 mcpu_cpu_opt = &opt->value;
10611 if (ext != NULL)
10612 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10613
10614 return 1;
10615 }
10616
10617 as_bad (_("unknown cpu `%s'"), str);
10618 return 0;
10619 }
10620
10621 static int
10622 aarch64_parse_arch (const char *str)
10623 {
10624 const struct aarch64_arch_option_table *opt;
10625 const char *ext = strchr (str, '+');
10626 size_t optlen;
10627
10628 if (ext != NULL)
10629 optlen = ext - str;
10630 else
10631 optlen = strlen (str);
10632
10633 if (optlen == 0)
10634 {
10635 as_bad (_("missing architecture name `%s'"), str);
10636 return 0;
10637 }
10638
10639 for (opt = aarch64_archs; opt->name != NULL; opt++)
10640 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10641 {
10642 march_cpu_opt = &opt->value;
10643 if (ext != NULL)
10644 return aarch64_parse_features (ext, &march_cpu_opt, false);
10645
10646 return 1;
10647 }
10648
10649 as_bad (_("unknown architecture `%s'\n"), str);
10650 return 0;
10651 }
10652
10653 /* ABIs. */
10654 struct aarch64_option_abi_value_table
10655 {
10656 const char *name;
10657 enum aarch64_abi_type value;
10658 };
10659
10660 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10661 #ifdef OBJ_ELF
10662 {"ilp32", AARCH64_ABI_ILP32},
10663 {"lp64", AARCH64_ABI_LP64},
10664 #else
10665 {"llp64", AARCH64_ABI_LLP64},
10666 #endif
10667 };
10668
10669 static int
10670 aarch64_parse_abi (const char *str)
10671 {
10672 unsigned int i;
10673
10674 if (str[0] == '\0')
10675 {
10676 as_bad (_("missing abi name `%s'"), str);
10677 return 0;
10678 }
10679
10680 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10681 if (strcmp (str, aarch64_abis[i].name) == 0)
10682 {
10683 aarch64_abi = aarch64_abis[i].value;
10684 return 1;
10685 }
10686
10687 as_bad (_("unknown abi `%s'\n"), str);
10688 return 0;
10689 }
10690
10691 static struct aarch64_long_option_table aarch64_long_opts[] = {
10692 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10693 aarch64_parse_abi, NULL},
10694 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10695 aarch64_parse_cpu, NULL},
10696 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10697 aarch64_parse_arch, NULL},
10698 {NULL, NULL, 0, NULL}
10699 };
10700
10701 int
10702 md_parse_option (int c, const char *arg)
10703 {
10704 struct aarch64_option_table *opt;
10705 struct aarch64_long_option_table *lopt;
10706
10707 switch (c)
10708 {
10709 #ifdef OPTION_EB
10710 case OPTION_EB:
10711 target_big_endian = 1;
10712 break;
10713 #endif
10714
10715 #ifdef OPTION_EL
10716 case OPTION_EL:
10717 target_big_endian = 0;
10718 break;
10719 #endif
10720
10721 case 'a':
10722 /* Listing option. Just ignore these, we don't support additional
10723 ones. */
10724 return 0;
10725
10726 default:
10727 for (opt = aarch64_opts; opt->option != NULL; opt++)
10728 {
10729 if (c == opt->option[0]
10730 && ((arg == NULL && opt->option[1] == 0)
10731 || streq (arg, opt->option + 1)))
10732 {
10733 /* If the option is deprecated, tell the user. */
10734 if (opt->deprecated != NULL)
10735 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10736 arg ? arg : "", _(opt->deprecated));
10737
10738 if (opt->var != NULL)
10739 *opt->var = opt->value;
10740
10741 return 1;
10742 }
10743 }
10744
10745 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10746 {
10747 /* These options are expected to have an argument. */
10748 if (c == lopt->option[0]
10749 && arg != NULL
10750 && startswith (arg, lopt->option + 1))
10751 {
10752 /* If the option is deprecated, tell the user. */
10753 if (lopt->deprecated != NULL)
10754 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10755 _(lopt->deprecated));
10756
10757 /* Call the sup-option parser. */
10758 return lopt->func (arg + strlen (lopt->option) - 1);
10759 }
10760 }
10761
10762 return 0;
10763 }
10764
10765 return 1;
10766 }
10767
10768 void
10769 md_show_usage (FILE * fp)
10770 {
10771 struct aarch64_option_table *opt;
10772 struct aarch64_long_option_table *lopt;
10773
10774 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10775
10776 for (opt = aarch64_opts; opt->option != NULL; opt++)
10777 if (opt->help != NULL)
10778 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10779
10780 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10781 if (lopt->help != NULL)
10782 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10783
10784 #ifdef OPTION_EB
10785 fprintf (fp, _("\
10786 -EB assemble code for a big-endian cpu\n"));
10787 #endif
10788
10789 #ifdef OPTION_EL
10790 fprintf (fp, _("\
10791 -EL assemble code for a little-endian cpu\n"));
10792 #endif
10793 }
10794
10795 /* Parse a .cpu directive. */
10796
10797 static void
10798 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10799 {
10800 const struct aarch64_cpu_option_table *opt;
10801 char saved_char;
10802 char *name;
10803 char *ext;
10804 size_t optlen;
10805
10806 name = input_line_pointer;
10807 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10808 saved_char = *input_line_pointer;
10809 *input_line_pointer = 0;
10810
10811 ext = strchr (name, '+');
10812
10813 if (ext != NULL)
10814 optlen = ext - name;
10815 else
10816 optlen = strlen (name);
10817
10818 /* Skip the first "all" entry. */
10819 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10820 if (strlen (opt->name) == optlen
10821 && strncmp (name, opt->name, optlen) == 0)
10822 {
10823 mcpu_cpu_opt = &opt->value;
10824 if (ext != NULL)
10825 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10826 return;
10827
10828 cpu_variant = *mcpu_cpu_opt;
10829
10830 *input_line_pointer = saved_char;
10831 demand_empty_rest_of_line ();
10832 return;
10833 }
10834 as_bad (_("unknown cpu `%s'"), name);
10835 *input_line_pointer = saved_char;
10836 ignore_rest_of_line ();
10837 }
10838
10839
10840 /* Parse a .arch directive. */
10841
10842 static void
10843 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10844 {
10845 const struct aarch64_arch_option_table *opt;
10846 char saved_char;
10847 char *name;
10848 char *ext;
10849 size_t optlen;
10850
10851 name = input_line_pointer;
10852 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10853 saved_char = *input_line_pointer;
10854 *input_line_pointer = 0;
10855
10856 ext = strchr (name, '+');
10857
10858 if (ext != NULL)
10859 optlen = ext - name;
10860 else
10861 optlen = strlen (name);
10862
10863 /* Skip the first "all" entry. */
10864 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10865 if (strlen (opt->name) == optlen
10866 && strncmp (name, opt->name, optlen) == 0)
10867 {
10868 mcpu_cpu_opt = &opt->value;
10869 if (ext != NULL)
10870 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10871 return;
10872
10873 cpu_variant = *mcpu_cpu_opt;
10874
10875 *input_line_pointer = saved_char;
10876 demand_empty_rest_of_line ();
10877 return;
10878 }
10879
10880 as_bad (_("unknown architecture `%s'\n"), name);
10881 *input_line_pointer = saved_char;
10882 ignore_rest_of_line ();
10883 }
10884
10885 /* Parse a .arch_extension directive. */
10886
10887 static void
10888 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10889 {
10890 char saved_char;
10891 char *ext = input_line_pointer;
10892
10893 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10894 saved_char = *input_line_pointer;
10895 *input_line_pointer = 0;
10896
10897 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10898 return;
10899
10900 cpu_variant = *mcpu_cpu_opt;
10901
10902 *input_line_pointer = saved_char;
10903 demand_empty_rest_of_line ();
10904 }
10905
10906 /* Copy symbol information. */
10907
10908 void
10909 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10910 {
10911 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10912 }
10913
10914 #ifdef OBJ_ELF
10915 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10916 This is needed so AArch64 specific st_other values can be independently
10917 specified for an IFUNC resolver (that is called by the dynamic linker)
10918 and the symbol it resolves (aliased to the resolver). In particular,
10919 if a function symbol has special st_other value set via directives,
10920 then attaching an IFUNC resolver to that symbol should not override
10921 the st_other setting. Requiring the directive on the IFUNC resolver
10922 symbol would be unexpected and problematic in C code, where the two
10923 symbols appear as two independent function declarations. */
10924
10925 void
10926 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10927 {
10928 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10929 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10930 /* If size is unset, copy size from src. Because we don't track whether
10931 .size has been used, we can't differentiate .size dest, 0 from the case
10932 where dest's size is unset. */
10933 if (!destelf->size && S_GET_SIZE (dest) == 0)
10934 {
10935 if (srcelf->size)
10936 {
10937 destelf->size = XNEW (expressionS);
10938 *destelf->size = *srcelf->size;
10939 }
10940 S_SET_SIZE (dest, S_GET_SIZE (src));
10941 }
10942 }
10943 #endif