Remove path name from test case
[binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 #define streq(a, b) (strcmp (a, b) == 0)
42
43 #define END_OF_INSN '\0'
44
45 static aarch64_feature_set cpu_variant;
46
47 /* Variables that we set while parsing command-line options. Once all
48 options have been read we re-process these values to set the real
49 assembly flags. */
50 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
51 static const aarch64_feature_set *march_cpu_opt = NULL;
52
53 /* Constants for known architecture features. */
54 static const aarch64_feature_set cpu_default = AARCH64_ARCH_FEATURES (V8A);
55
56 /* Currently active instruction sequence. */
57 static aarch64_instr_sequence *insn_sequence = NULL;
58
59 #ifdef OBJ_ELF
60 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
61 static symbolS *GOT_symbol;
62 #endif
63
64 /* Which ABI to use. */
65 enum aarch64_abi_type
66 {
67 AARCH64_ABI_NONE = 0,
68 AARCH64_ABI_LP64 = 1,
69 AARCH64_ABI_ILP32 = 2,
70 AARCH64_ABI_LLP64 = 3
71 };
72
73 unsigned int aarch64_sframe_cfa_sp_reg;
74 /* The other CFA base register for SFrame stack trace info. */
75 unsigned int aarch64_sframe_cfa_fp_reg;
76 unsigned int aarch64_sframe_cfa_ra_reg;
77
78 #ifndef DEFAULT_ARCH
79 #define DEFAULT_ARCH "aarch64"
80 #endif
81
82 #ifdef OBJ_ELF
83 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
84 static const char *default_arch = DEFAULT_ARCH;
85 #endif
86
87 /* AArch64 ABI for the output file. */
88 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
89
90 /* When non-zero, program to a 32-bit model, in which the C data types
91 int, long and all pointer types are 32-bit objects (ILP32); or to a
92 64-bit model, in which the C int type is 32-bits but the C long type
93 and all pointer types are 64-bit objects (LP64). */
94 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
95
96 /* When non zero, C types int and long are 32 bit,
97 pointers, however are 64 bit */
98 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
99
100 enum vector_el_type
101 {
102 NT_invtype = -1,
103 NT_b,
104 NT_h,
105 NT_s,
106 NT_d,
107 NT_q,
108 NT_zero,
109 NT_merge
110 };
111
112 /* Bits for DEFINED field in vector_type_el. */
113 #define NTA_HASTYPE 1
114 #define NTA_HASINDEX 2
115 #define NTA_HASVARWIDTH 4
116
117 struct vector_type_el
118 {
119 enum vector_el_type type;
120 unsigned char defined;
121 unsigned element_size;
122 unsigned width;
123 int64_t index;
124 };
125
126 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
127
128 struct reloc
129 {
130 bfd_reloc_code_real_type type;
131 expressionS exp;
132 int pc_rel;
133 enum aarch64_opnd opnd;
134 uint32_t flags;
135 unsigned need_libopcodes_p : 1;
136 };
137
138 struct aarch64_instruction
139 {
140 /* libopcodes structure for instruction intermediate representation. */
141 aarch64_inst base;
142 /* Record assembly errors found during the parsing. */
143 aarch64_operand_error parsing_error;
144 /* The condition that appears in the assembly line. */
145 int cond;
146 /* Relocation information (including the GAS internal fixup). */
147 struct reloc reloc;
148 /* Need to generate an immediate in the literal pool. */
149 unsigned gen_lit_pool : 1;
150 };
151
152 typedef struct aarch64_instruction aarch64_instruction;
153
154 static aarch64_instruction inst;
155
156 static bool parse_operands (char *, const aarch64_opcode *);
157 static bool programmer_friendly_fixup (aarch64_instruction *);
158
159 /* If an AARCH64_OPDE_SYNTAX_ERROR has no error string, its first three
160 data fields contain the following information:
161
162 data[0].i:
163 A mask of register types that would have been acceptable as bare
164 operands, outside of a register list. In addition, SEF_DEFAULT_ERROR
165 is set if a general parsing error occured for an operand (that is,
166 an error not related to registers, and having no error string).
167
168 data[1].i:
169 A mask of register types that would have been acceptable inside
170 a register list. In addition, SEF_IN_REGLIST is set if the
171 operand contained a '{' and if we got to the point of trying
172 to parse a register inside a list.
173
174 data[2].i:
175 The mask associated with the register that was actually seen, or 0
176 if none. A nonzero value describes a register inside a register
177 list if data[1].i & SEF_IN_REGLIST, otherwise it describes a bare
178 register.
179
180 The idea is that stringless errors from multiple opcode templates can
181 be ORed together to give a summary of the available alternatives. */
182 #define SEF_DEFAULT_ERROR (1U << 31)
183 #define SEF_IN_REGLIST (1U << 31)
184
185 /* Diagnostics inline function utilities.
186
187 These are lightweight utilities which should only be called by parse_operands
188 and other parsers. GAS processes each assembly line by parsing it against
189 instruction template(s), in the case of multiple templates (for the same
190 mnemonic name), those templates are tried one by one until one succeeds or
191 all fail. An assembly line may fail a few templates before being
192 successfully parsed; an error saved here in most cases is not a user error
193 but an error indicating the current template is not the right template.
194 Therefore it is very important that errors can be saved at a low cost during
195 the parsing; we don't want to slow down the whole parsing by recording
196 non-user errors in detail.
197
198 Remember that the objective is to help GAS pick up the most appropriate
199 error message in the case of multiple templates, e.g. FMOV which has 8
200 templates. */
201
202 static inline void
203 clear_error (void)
204 {
205 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
206 inst.parsing_error.kind = AARCH64_OPDE_NIL;
207 }
208
209 static inline bool
210 error_p (void)
211 {
212 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
213 }
214
215 static inline void
216 set_error (enum aarch64_operand_error_kind kind, const char *error)
217 {
218 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
219 inst.parsing_error.index = -1;
220 inst.parsing_error.kind = kind;
221 inst.parsing_error.error = error;
222 }
223
224 static inline void
225 set_recoverable_error (const char *error)
226 {
227 set_error (AARCH64_OPDE_RECOVERABLE, error);
228 }
229
230 /* Use the DESC field of the corresponding aarch64_operand entry to compose
231 the error message. */
232 static inline void
233 set_default_error (void)
234 {
235 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
236 inst.parsing_error.data[0].i = SEF_DEFAULT_ERROR;
237 }
238
239 static inline void
240 set_syntax_error (const char *error)
241 {
242 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
243 }
244
245 static inline void
246 set_first_syntax_error (const char *error)
247 {
248 if (! error_p ())
249 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
250 }
251
252 static inline void
253 set_fatal_syntax_error (const char *error)
254 {
255 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
256 }
257 \f
258 /* Return value for certain parsers when the parsing fails; those parsers
259 return the information of the parsed result, e.g. register number, on
260 success. */
261 #define PARSE_FAIL -1
262
263 /* This is an invalid condition code that means no conditional field is
264 present. */
265 #define COND_ALWAYS 0x10
266
267 typedef struct
268 {
269 const char *template;
270 uint32_t value;
271 } asm_nzcv;
272
273 struct reloc_entry
274 {
275 char *name;
276 bfd_reloc_code_real_type reloc;
277 };
278
279 /* Macros to define the register types and masks for the purpose
280 of parsing. */
281
282 #undef AARCH64_REG_TYPES
283 #define AARCH64_REG_TYPES \
284 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
285 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
286 BASIC_REG_TYPE(SP_32) /* wsp */ \
287 BASIC_REG_TYPE(SP_64) /* sp */ \
288 BASIC_REG_TYPE(ZR_32) /* wzr */ \
289 BASIC_REG_TYPE(ZR_64) /* xzr */ \
290 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
291 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
292 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
293 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
294 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
295 BASIC_REG_TYPE(V) /* v[0-31] */ \
296 BASIC_REG_TYPE(Z) /* z[0-31] */ \
297 BASIC_REG_TYPE(P) /* p[0-15] */ \
298 BASIC_REG_TYPE(PN) /* pn[0-15] */ \
299 BASIC_REG_TYPE(ZA) /* za */ \
300 BASIC_REG_TYPE(ZAT) /* za[0-15] (ZA tile) */ \
301 BASIC_REG_TYPE(ZATH) /* za[0-15]h (ZA tile horizontal slice) */ \
302 BASIC_REG_TYPE(ZATV) /* za[0-15]v (ZA tile vertical slice) */ \
303 BASIC_REG_TYPE(ZT0) /* zt0 */ \
304 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
305 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
306 /* Typecheck: same, plus SVE registers. */ \
307 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
308 | REG_TYPE(Z)) \
309 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
310 MULTI_REG_TYPE(R_ZR, REG_TYPE(R_32) | REG_TYPE(R_64) \
311 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
312 /* Typecheck: same, plus SVE registers. */ \
313 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
314 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) \
315 | REG_TYPE(Z)) \
316 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
317 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
318 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
319 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
320 MULTI_REG_TYPE(R_ZR_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
321 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
322 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
323 /* Typecheck: any [BHSDQ]P FP. */ \
324 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
325 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
326 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
327 MULTI_REG_TYPE(R_ZR_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
328 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
329 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
330 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
331 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
332 be used for SVE instructions, since Zn and Pn are valid symbols \
333 in other contexts. */ \
334 MULTI_REG_TYPE(R_ZR_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
335 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
336 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
337 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
338 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
339 | REG_TYPE(Z) | REG_TYPE(P)) \
340 /* Likewise, but with predicate-as-counter registers added. */ \
341 MULTI_REG_TYPE(R_ZR_SP_BHSDQ_VZP_PN, REG_TYPE(R_32) | REG_TYPE(R_64) \
342 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
343 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64) | REG_TYPE(V) \
344 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
345 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
346 | REG_TYPE(Z) | REG_TYPE(P) | REG_TYPE(PN)) \
347 /* Any integer register; used for error messages only. */ \
348 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
349 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
350 | REG_TYPE(ZR_32) | REG_TYPE(ZR_64)) \
351 /* Any vector register. */ \
352 MULTI_REG_TYPE(VZ, REG_TYPE(V) | REG_TYPE(Z)) \
353 /* An SVE vector or predicate register. */ \
354 MULTI_REG_TYPE(ZP, REG_TYPE(Z) | REG_TYPE(P)) \
355 /* Any vector or predicate register. */ \
356 MULTI_REG_TYPE(VZP, REG_TYPE(V) | REG_TYPE(Z) | REG_TYPE(P)) \
357 /* The whole of ZA or a single tile. */ \
358 MULTI_REG_TYPE(ZA_ZAT, REG_TYPE(ZA) | REG_TYPE(ZAT)) \
359 /* A horizontal or vertical slice of a ZA tile. */ \
360 MULTI_REG_TYPE(ZATHV, REG_TYPE(ZATH) | REG_TYPE(ZATV)) \
361 /* Pseudo type to mark the end of the enumerator sequence. */ \
362 END_REG_TYPE(MAX)
363
364 #undef BASIC_REG_TYPE
365 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
366 #undef MULTI_REG_TYPE
367 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
368 #undef END_REG_TYPE
369 #define END_REG_TYPE(T) BASIC_REG_TYPE(T)
370
371 /* Register type enumerators. */
372 typedef enum aarch64_reg_type_
373 {
374 /* A list of REG_TYPE_*. */
375 AARCH64_REG_TYPES
376 } aarch64_reg_type;
377
378 #undef BASIC_REG_TYPE
379 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
380 #undef REG_TYPE
381 #define REG_TYPE(T) (1 << REG_TYPE_##T)
382 #undef MULTI_REG_TYPE
383 #define MULTI_REG_TYPE(T,V) V,
384 #undef END_REG_TYPE
385 #define END_REG_TYPE(T) 0
386
387 /* Structure for a hash table entry for a register. */
388 typedef struct
389 {
390 const char *name;
391 unsigned char number;
392 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
393 unsigned char builtin;
394 } reg_entry;
395
396 /* Values indexed by aarch64_reg_type to assist the type checking. */
397 static const unsigned reg_type_masks[] =
398 {
399 AARCH64_REG_TYPES
400 };
401
402 #undef BASIC_REG_TYPE
403 #undef REG_TYPE
404 #undef MULTI_REG_TYPE
405 #undef END_REG_TYPE
406 #undef AARCH64_REG_TYPES
407
408 /* We expected one of the registers in MASK to be specified. If a register
409 of some kind was specified, SEEN is a mask that contains that register,
410 otherwise it is zero.
411
412 If it is possible to provide a relatively pithy message that describes
413 the error exactly, return a string that does so, reporting the error
414 against "operand %d". Return null otherwise.
415
416 From a QoI perspective, any REG_TYPE_* that is passed as the first
417 argument to set_expected_reg_error should generally have its own message.
418 Providing messages for combinations of such REG_TYPE_*s can be useful if
419 it is possible to summarize the combination in a relatively natural way.
420 On the other hand, it seems better to avoid long lists of unrelated
421 things. */
422
423 static const char *
424 get_reg_expected_msg (unsigned int mask, unsigned int seen)
425 {
426 /* First handle messages that use SEEN. */
427 if ((mask & reg_type_masks[REG_TYPE_ZAT])
428 && (seen & reg_type_masks[REG_TYPE_ZATHV]))
429 return N_("expected an unsuffixed ZA tile at operand %d");
430
431 if ((mask & reg_type_masks[REG_TYPE_ZATHV])
432 && (seen & reg_type_masks[REG_TYPE_ZAT]))
433 return N_("missing horizontal or vertical suffix at operand %d");
434
435 if ((mask & reg_type_masks[REG_TYPE_ZA])
436 && (seen & (reg_type_masks[REG_TYPE_ZAT]
437 | reg_type_masks[REG_TYPE_ZATHV])))
438 return N_("expected 'za' rather than a ZA tile at operand %d");
439
440 if ((mask & reg_type_masks[REG_TYPE_PN])
441 && (seen & reg_type_masks[REG_TYPE_P]))
442 return N_("expected a predicate-as-counter rather than predicate-as-mask"
443 " register at operand %d");
444
445 if ((mask & reg_type_masks[REG_TYPE_P])
446 && (seen & reg_type_masks[REG_TYPE_PN]))
447 return N_("expected a predicate-as-mask rather than predicate-as-counter"
448 " register at operand %d");
449
450 /* Integer, zero and stack registers. */
451 if (mask == reg_type_masks[REG_TYPE_R_64])
452 return N_("expected a 64-bit integer register at operand %d");
453 if (mask == reg_type_masks[REG_TYPE_R_ZR])
454 return N_("expected an integer or zero register at operand %d");
455 if (mask == reg_type_masks[REG_TYPE_R_SP])
456 return N_("expected an integer or stack pointer register at operand %d");
457
458 /* Floating-point and SIMD registers. */
459 if (mask == reg_type_masks[REG_TYPE_BHSDQ])
460 return N_("expected a scalar SIMD or floating-point register"
461 " at operand %d");
462 if (mask == reg_type_masks[REG_TYPE_V])
463 return N_("expected an Advanced SIMD vector register at operand %d");
464 if (mask == reg_type_masks[REG_TYPE_Z])
465 return N_("expected an SVE vector register at operand %d");
466 if (mask == reg_type_masks[REG_TYPE_P]
467 || mask == (reg_type_masks[REG_TYPE_P] | reg_type_masks[REG_TYPE_PN]))
468 /* Use this error for "predicate-as-mask only" and "either kind of
469 predicate". We report a more specific error if P is used where
470 PN is expected, and vice versa, so the issue at this point is
471 "predicate-like" vs. "not predicate-like". */
472 return N_("expected an SVE predicate register at operand %d");
473 if (mask == reg_type_masks[REG_TYPE_PN])
474 return N_("expected an SVE predicate-as-counter register at operand %d");
475 if (mask == reg_type_masks[REG_TYPE_VZ])
476 return N_("expected a vector register at operand %d");
477 if (mask == reg_type_masks[REG_TYPE_ZP])
478 return N_("expected an SVE vector or predicate register at operand %d");
479 if (mask == reg_type_masks[REG_TYPE_VZP])
480 return N_("expected a vector or predicate register at operand %d");
481
482 /* SME-related registers. */
483 if (mask == reg_type_masks[REG_TYPE_ZA])
484 return N_("expected a ZA array vector at operand %d");
485 if (mask == (reg_type_masks[REG_TYPE_ZA_ZAT] | reg_type_masks[REG_TYPE_ZT0]))
486 return N_("expected ZT0 or a ZA mask at operand %d");
487 if (mask == reg_type_masks[REG_TYPE_ZAT])
488 return N_("expected a ZA tile at operand %d");
489 if (mask == reg_type_masks[REG_TYPE_ZATHV])
490 return N_("expected a ZA tile slice at operand %d");
491
492 /* Integer and vector combos. */
493 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_V]))
494 return N_("expected an integer register or Advanced SIMD vector register"
495 " at operand %d");
496 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_Z]))
497 return N_("expected an integer register or SVE vector register"
498 " at operand %d");
499 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_VZ]))
500 return N_("expected an integer or vector register at operand %d");
501 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_P]))
502 return N_("expected an integer or predicate register at operand %d");
503 if (mask == (reg_type_masks[REG_TYPE_R_ZR] | reg_type_masks[REG_TYPE_VZP]))
504 return N_("expected an integer, vector or predicate register"
505 " at operand %d");
506
507 /* SVE and SME combos. */
508 if (mask == (reg_type_masks[REG_TYPE_Z] | reg_type_masks[REG_TYPE_ZATHV]))
509 return N_("expected an SVE vector register or ZA tile slice"
510 " at operand %d");
511
512 return NULL;
513 }
514
515 /* Record that we expected a register of type TYPE but didn't see one.
516 REG is the register that we actually saw, or null if we didn't see a
517 recognized register. FLAGS is SEF_IN_REGLIST if we are parsing the
518 contents of a register list, otherwise it is zero. */
519
520 static inline void
521 set_expected_reg_error (aarch64_reg_type type, const reg_entry *reg,
522 unsigned int flags)
523 {
524 assert (flags == 0 || flags == SEF_IN_REGLIST);
525 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
526 if (flags & SEF_IN_REGLIST)
527 inst.parsing_error.data[1].i = reg_type_masks[type] | flags;
528 else
529 inst.parsing_error.data[0].i = reg_type_masks[type];
530 if (reg)
531 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
532 }
533
534 /* Record that we expected a register list containing registers of type TYPE,
535 but didn't see the opening '{'. If we saw a register instead, REG is the
536 register that we saw, otherwise it is null. */
537
538 static inline void
539 set_expected_reglist_error (aarch64_reg_type type, const reg_entry *reg)
540 {
541 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
542 inst.parsing_error.data[1].i = reg_type_masks[type];
543 if (reg)
544 inst.parsing_error.data[2].i = reg_type_masks[reg->type];
545 }
546
547 /* Some well known registers that we refer to directly elsewhere. */
548 #define REG_SP 31
549 #define REG_ZR 31
550
551 /* Instructions take 4 bytes in the object file. */
552 #define INSN_SIZE 4
553
554 static htab_t aarch64_ops_hsh;
555 static htab_t aarch64_cond_hsh;
556 static htab_t aarch64_shift_hsh;
557 static htab_t aarch64_sys_regs_hsh;
558 static htab_t aarch64_pstatefield_hsh;
559 static htab_t aarch64_sys_regs_ic_hsh;
560 static htab_t aarch64_sys_regs_dc_hsh;
561 static htab_t aarch64_sys_regs_at_hsh;
562 static htab_t aarch64_sys_regs_tlbi_hsh;
563 static htab_t aarch64_sys_regs_sr_hsh;
564 static htab_t aarch64_reg_hsh;
565 static htab_t aarch64_barrier_opt_hsh;
566 static htab_t aarch64_nzcv_hsh;
567 static htab_t aarch64_pldop_hsh;
568 static htab_t aarch64_hint_opt_hsh;
569
570 /* Stuff needed to resolve the label ambiguity
571 As:
572 ...
573 label: <insn>
574 may differ from:
575 ...
576 label:
577 <insn> */
578
579 static symbolS *last_label_seen;
580
581 /* Literal pool structure. Held on a per-section
582 and per-sub-section basis. */
583
584 #define MAX_LITERAL_POOL_SIZE 1024
585 typedef struct literal_expression
586 {
587 expressionS exp;
588 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
589 LITTLENUM_TYPE * bignum;
590 } literal_expression;
591
592 typedef struct literal_pool
593 {
594 literal_expression literals[MAX_LITERAL_POOL_SIZE];
595 unsigned int next_free_entry;
596 unsigned int id;
597 symbolS *symbol;
598 segT section;
599 subsegT sub_section;
600 int size;
601 struct literal_pool *next;
602 } literal_pool;
603
604 /* Pointer to a linked list of literal pools. */
605 static literal_pool *list_of_pools = NULL;
606 \f
607 /* Pure syntax. */
608
609 /* This array holds the chars that always start a comment. If the
610 pre-processor is disabled, these aren't very useful. */
611 const char comment_chars[] = "";
612
613 /* This array holds the chars that only start a comment at the beginning of
614 a line. If the line seems to have the form '# 123 filename'
615 .line and .file directives will appear in the pre-processed output. */
616 /* Note that input_file.c hand checks for '#' at the beginning of the
617 first line of the input file. This is because the compiler outputs
618 #NO_APP at the beginning of its output. */
619 /* Also note that comments like this one will always work. */
620 const char line_comment_chars[] = "#";
621
622 const char line_separator_chars[] = ";";
623
624 /* Chars that can be used to separate mant
625 from exp in floating point numbers. */
626 const char EXP_CHARS[] = "eE";
627
628 /* Chars that mean this number is a floating point constant. */
629 /* As in 0f12.456 */
630 /* or 0d1.2345e12 */
631
632 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
633
634 /* Prefix character that indicates the start of an immediate value. */
635 #define is_immediate_prefix(C) ((C) == '#')
636
637 /* Separator character handling. */
638
639 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
640
641 static inline bool
642 skip_past_char (char **str, char c)
643 {
644 if (**str == c)
645 {
646 (*str)++;
647 return true;
648 }
649 else
650 return false;
651 }
652
653 #define skip_past_comma(str) skip_past_char (str, ',')
654
655 /* Arithmetic expressions (possibly involving symbols). */
656
657 static bool in_aarch64_get_expression = false;
658
659 /* Third argument to aarch64_get_expression. */
660 #define GE_NO_PREFIX false
661 #define GE_OPT_PREFIX true
662
663 /* Fourth argument to aarch64_get_expression. */
664 #define ALLOW_ABSENT false
665 #define REJECT_ABSENT true
666
667 /* Return TRUE if the string pointed by *STR is successfully parsed
668 as an valid expression; *EP will be filled with the information of
669 such an expression. Otherwise return FALSE.
670
671 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
672 If REJECT_ABSENT is true then trat missing expressions as an error. */
673
674 static bool
675 aarch64_get_expression (expressionS * ep,
676 char ** str,
677 bool allow_immediate_prefix,
678 bool reject_absent)
679 {
680 char *save_in;
681 segT seg;
682 bool prefix_present = false;
683
684 if (allow_immediate_prefix)
685 {
686 if (is_immediate_prefix (**str))
687 {
688 (*str)++;
689 prefix_present = true;
690 }
691 }
692
693 memset (ep, 0, sizeof (expressionS));
694
695 save_in = input_line_pointer;
696 input_line_pointer = *str;
697 in_aarch64_get_expression = true;
698 seg = expression (ep);
699 in_aarch64_get_expression = false;
700
701 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
702 {
703 /* We found a bad expression in md_operand(). */
704 *str = input_line_pointer;
705 input_line_pointer = save_in;
706 if (prefix_present && ! error_p ())
707 set_fatal_syntax_error (_("bad expression"));
708 else
709 set_first_syntax_error (_("bad expression"));
710 return false;
711 }
712
713 #ifdef OBJ_AOUT
714 if (seg != absolute_section
715 && seg != text_section
716 && seg != data_section
717 && seg != bss_section
718 && seg != undefined_section)
719 {
720 set_syntax_error (_("bad segment"));
721 *str = input_line_pointer;
722 input_line_pointer = save_in;
723 return false;
724 }
725 #else
726 (void) seg;
727 #endif
728
729 *str = input_line_pointer;
730 input_line_pointer = save_in;
731 return true;
732 }
733
734 /* Turn a string in input_line_pointer into a floating point constant
735 of type TYPE, and store the appropriate bytes in *LITP. The number
736 of LITTLENUMS emitted is stored in *SIZEP. An error message is
737 returned, or NULL on OK. */
738
739 const char *
740 md_atof (int type, char *litP, int *sizeP)
741 {
742 return ieee_md_atof (type, litP, sizeP, target_big_endian);
743 }
744
745 /* We handle all bad expressions here, so that we can report the faulty
746 instruction in the error message. */
747 void
748 md_operand (expressionS * exp)
749 {
750 if (in_aarch64_get_expression)
751 exp->X_op = O_illegal;
752 }
753
754 /* Immediate values. */
755
756 /* Errors may be set multiple times during parsing or bit encoding
757 (particularly in the Neon bits), but usually the earliest error which is set
758 will be the most meaningful. Avoid overwriting it with later (cascading)
759 errors by calling this function. */
760
761 static void
762 first_error (const char *error)
763 {
764 if (! error_p ())
765 set_syntax_error (error);
766 }
767
768 /* Similar to first_error, but this function accepts formatted error
769 message. */
770 static void
771 first_error_fmt (const char *format, ...)
772 {
773 va_list args;
774 enum
775 { size = 100 };
776 /* N.B. this single buffer will not cause error messages for different
777 instructions to pollute each other; this is because at the end of
778 processing of each assembly line, error message if any will be
779 collected by as_bad. */
780 static char buffer[size];
781
782 if (! error_p ())
783 {
784 int ret ATTRIBUTE_UNUSED;
785 va_start (args, format);
786 ret = vsnprintf (buffer, size, format, args);
787 know (ret <= size - 1 && ret >= 0);
788 va_end (args);
789 set_syntax_error (buffer);
790 }
791 }
792
793 /* Internal helper routine converting a vector_type_el structure *VECTYPE
794 to a corresponding operand qualifier. */
795
796 static inline aarch64_opnd_qualifier_t
797 vectype_to_qualifier (const struct vector_type_el *vectype)
798 {
799 /* Element size in bytes indexed by vector_el_type. */
800 const unsigned char ele_size[5]
801 = {1, 2, 4, 8, 16};
802 const unsigned int ele_base [5] =
803 {
804 AARCH64_OPND_QLF_V_4B,
805 AARCH64_OPND_QLF_V_2H,
806 AARCH64_OPND_QLF_V_2S,
807 AARCH64_OPND_QLF_V_1D,
808 AARCH64_OPND_QLF_V_1Q
809 };
810
811 if (!vectype->defined || vectype->type == NT_invtype)
812 goto vectype_conversion_fail;
813
814 if (vectype->type == NT_zero)
815 return AARCH64_OPND_QLF_P_Z;
816 if (vectype->type == NT_merge)
817 return AARCH64_OPND_QLF_P_M;
818
819 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
820
821 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
822 {
823 /* Special case S_4B. */
824 if (vectype->type == NT_b && vectype->width == 4)
825 return AARCH64_OPND_QLF_S_4B;
826
827 /* Special case S_2H. */
828 if (vectype->type == NT_h && vectype->width == 2)
829 return AARCH64_OPND_QLF_S_2H;
830
831 /* Vector element register. */
832 return AARCH64_OPND_QLF_S_B + vectype->type;
833 }
834 else
835 {
836 /* Vector register. */
837 int reg_size = ele_size[vectype->type] * vectype->width;
838 unsigned offset;
839 unsigned shift;
840 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
841 goto vectype_conversion_fail;
842
843 /* The conversion is by calculating the offset from the base operand
844 qualifier for the vector type. The operand qualifiers are regular
845 enough that the offset can established by shifting the vector width by
846 a vector-type dependent amount. */
847 shift = 0;
848 if (vectype->type == NT_b)
849 shift = 3;
850 else if (vectype->type == NT_h || vectype->type == NT_s)
851 shift = 2;
852 else if (vectype->type >= NT_d)
853 shift = 1;
854 else
855 gas_assert (0);
856
857 offset = ele_base [vectype->type] + (vectype->width >> shift);
858 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
859 && offset <= AARCH64_OPND_QLF_V_1Q);
860 return offset;
861 }
862
863 vectype_conversion_fail:
864 first_error (_("bad vector arrangement type"));
865 return AARCH64_OPND_QLF_NIL;
866 }
867
868 /* Register parsing. */
869
870 /* Generic register parser which is called by other specialized
871 register parsers.
872 CCP points to what should be the beginning of a register name.
873 If it is indeed a valid register name, advance CCP over it and
874 return the reg_entry structure; otherwise return NULL.
875 It does not issue diagnostics. */
876
877 static reg_entry *
878 parse_reg (char **ccp)
879 {
880 char *start = *ccp;
881 char *p;
882 reg_entry *reg;
883
884 #ifdef REGISTER_PREFIX
885 if (*start != REGISTER_PREFIX)
886 return NULL;
887 start++;
888 #endif
889
890 p = start;
891 if (!ISALPHA (*p) || !is_name_beginner (*p))
892 return NULL;
893
894 do
895 p++;
896 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
897
898 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
899
900 if (!reg)
901 return NULL;
902
903 *ccp = p;
904 return reg;
905 }
906
907 /* Return the operand qualifier associated with all uses of REG, or
908 AARCH64_OPND_QLF_NIL if none. AARCH64_OPND_QLF_NIL means either
909 that qualifiers don't apply to REG or that qualifiers are added
910 using suffixes. */
911
912 static aarch64_opnd_qualifier_t
913 inherent_reg_qualifier (const reg_entry *reg)
914 {
915 switch (reg->type)
916 {
917 case REG_TYPE_R_32:
918 case REG_TYPE_SP_32:
919 case REG_TYPE_ZR_32:
920 return AARCH64_OPND_QLF_W;
921
922 case REG_TYPE_R_64:
923 case REG_TYPE_SP_64:
924 case REG_TYPE_ZR_64:
925 return AARCH64_OPND_QLF_X;
926
927 case REG_TYPE_FP_B:
928 case REG_TYPE_FP_H:
929 case REG_TYPE_FP_S:
930 case REG_TYPE_FP_D:
931 case REG_TYPE_FP_Q:
932 return AARCH64_OPND_QLF_S_B + (reg->type - REG_TYPE_FP_B);
933
934 default:
935 return AARCH64_OPND_QLF_NIL;
936 }
937 }
938
939 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
940 return FALSE. */
941 static bool
942 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
943 {
944 return (reg_type_masks[type] & (1 << reg->type)) != 0;
945 }
946
947 /* Try to parse a base or offset register. Allow SVE base and offset
948 registers if REG_TYPE includes SVE registers. Return the register
949 entry on success, setting *QUALIFIER to the register qualifier.
950 Return null otherwise.
951
952 Note that this function does not issue any diagnostics. */
953
954 static const reg_entry *
955 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
956 aarch64_opnd_qualifier_t *qualifier)
957 {
958 char *str = *ccp;
959 const reg_entry *reg = parse_reg (&str);
960
961 if (reg == NULL)
962 return NULL;
963
964 switch (reg->type)
965 {
966 case REG_TYPE_Z:
967 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_Z)) == 0
968 || str[0] != '.')
969 return NULL;
970 switch (TOLOWER (str[1]))
971 {
972 case 's':
973 *qualifier = AARCH64_OPND_QLF_S_S;
974 break;
975 case 'd':
976 *qualifier = AARCH64_OPND_QLF_S_D;
977 break;
978 default:
979 return NULL;
980 }
981 str += 2;
982 break;
983
984 default:
985 if (!aarch64_check_reg_type (reg, REG_TYPE_R_ZR_SP))
986 return NULL;
987 *qualifier = inherent_reg_qualifier (reg);
988 break;
989 }
990
991 *ccp = str;
992
993 return reg;
994 }
995
996 /* Try to parse a base or offset register. Return the register entry
997 on success, setting *QUALIFIER to the register qualifier. Return null
998 otherwise.
999
1000 Note that this function does not issue any diagnostics. */
1001
1002 static const reg_entry *
1003 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
1004 {
1005 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_ZR_SP, qualifier);
1006 }
1007
1008 /* Parse the qualifier of a vector register or vector element of type
1009 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
1010 succeeds; otherwise return FALSE.
1011
1012 Accept only one occurrence of:
1013 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
1014 b h s d q */
1015 static bool
1016 parse_vector_type_for_operand (aarch64_reg_type reg_type,
1017 struct vector_type_el *parsed_type, char **str)
1018 {
1019 char *ptr = *str;
1020 unsigned width;
1021 unsigned element_size;
1022 enum vector_el_type type;
1023
1024 /* skip '.' */
1025 gas_assert (*ptr == '.');
1026 ptr++;
1027
1028 if (reg_type != REG_TYPE_V || !ISDIGIT (*ptr))
1029 {
1030 width = 0;
1031 goto elt_size;
1032 }
1033 width = strtoul (ptr, &ptr, 10);
1034 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
1035 {
1036 first_error_fmt (_("bad size %d in vector width specifier"), width);
1037 return false;
1038 }
1039
1040 elt_size:
1041 switch (TOLOWER (*ptr))
1042 {
1043 case 'b':
1044 type = NT_b;
1045 element_size = 8;
1046 break;
1047 case 'h':
1048 type = NT_h;
1049 element_size = 16;
1050 break;
1051 case 's':
1052 type = NT_s;
1053 element_size = 32;
1054 break;
1055 case 'd':
1056 type = NT_d;
1057 element_size = 64;
1058 break;
1059 case 'q':
1060 if (reg_type != REG_TYPE_V || width == 1)
1061 {
1062 type = NT_q;
1063 element_size = 128;
1064 break;
1065 }
1066 /* fall through. */
1067 default:
1068 if (*ptr != '\0')
1069 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
1070 else
1071 first_error (_("missing element size"));
1072 return false;
1073 }
1074 if (width != 0 && width * element_size != 64
1075 && width * element_size != 128
1076 && !(width == 2 && element_size == 16)
1077 && !(width == 4 && element_size == 8))
1078 {
1079 first_error_fmt (_
1080 ("invalid element size %d and vector size combination %c"),
1081 width, *ptr);
1082 return false;
1083 }
1084 ptr++;
1085
1086 parsed_type->type = type;
1087 parsed_type->width = width;
1088 parsed_type->element_size = element_size;
1089
1090 *str = ptr;
1091
1092 return true;
1093 }
1094
1095 /* *STR contains an SVE zero/merge predication suffix. Parse it into
1096 *PARSED_TYPE and point *STR at the end of the suffix. */
1097
1098 static bool
1099 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
1100 {
1101 char *ptr = *str;
1102
1103 /* Skip '/'. */
1104 gas_assert (*ptr == '/');
1105 ptr++;
1106 switch (TOLOWER (*ptr))
1107 {
1108 case 'z':
1109 parsed_type->type = NT_zero;
1110 break;
1111 case 'm':
1112 parsed_type->type = NT_merge;
1113 break;
1114 default:
1115 if (*ptr != '\0' && *ptr != ',')
1116 first_error_fmt (_("unexpected character `%c' in predication type"),
1117 *ptr);
1118 else
1119 first_error (_("missing predication type"));
1120 return false;
1121 }
1122 parsed_type->width = 0;
1123 *str = ptr + 1;
1124 return true;
1125 }
1126
1127 /* Return true if CH is a valid suffix character for registers of
1128 type TYPE. */
1129
1130 static bool
1131 aarch64_valid_suffix_char_p (aarch64_reg_type type, char ch)
1132 {
1133 switch (type)
1134 {
1135 case REG_TYPE_V:
1136 case REG_TYPE_Z:
1137 case REG_TYPE_ZA:
1138 case REG_TYPE_ZAT:
1139 case REG_TYPE_ZATH:
1140 case REG_TYPE_ZATV:
1141 return ch == '.';
1142
1143 case REG_TYPE_P:
1144 case REG_TYPE_PN:
1145 return ch == '.' || ch == '/';
1146
1147 default:
1148 return false;
1149 }
1150 }
1151
1152 /* Parse an index expression at *STR, storing it in *IMM on success. */
1153
1154 static bool
1155 parse_index_expression (char **str, int64_t *imm)
1156 {
1157 expressionS exp;
1158
1159 aarch64_get_expression (&exp, str, GE_NO_PREFIX, REJECT_ABSENT);
1160 if (exp.X_op != O_constant)
1161 {
1162 first_error (_("constant expression required"));
1163 return false;
1164 }
1165 *imm = exp.X_add_number;
1166 return true;
1167 }
1168
1169 /* Parse a register of the type TYPE.
1170
1171 Return null if the string pointed to by *CCP is not a valid register
1172 name or the parsed register is not of TYPE.
1173
1174 Otherwise return the register, and optionally return the register
1175 shape and element index information in *TYPEINFO.
1176
1177 FLAGS includes PTR_IN_REGLIST if the caller is parsing a register list.
1178
1179 FLAGS includes PTR_FULL_REG if the function should ignore any potential
1180 register index.
1181
1182 FLAGS includes PTR_GOOD_MATCH if we are sufficiently far into parsing
1183 an operand that we can be confident that it is a good match. */
1184
1185 #define PTR_IN_REGLIST (1U << 0)
1186 #define PTR_FULL_REG (1U << 1)
1187 #define PTR_GOOD_MATCH (1U << 2)
1188
1189 static const reg_entry *
1190 parse_typed_reg (char **ccp, aarch64_reg_type type,
1191 struct vector_type_el *typeinfo, unsigned int flags)
1192 {
1193 char *str = *ccp;
1194 bool isalpha = ISALPHA (*str);
1195 const reg_entry *reg = parse_reg (&str);
1196 struct vector_type_el atype;
1197 struct vector_type_el parsetype;
1198 bool is_typed_vecreg = false;
1199 unsigned int err_flags = (flags & PTR_IN_REGLIST) ? SEF_IN_REGLIST : 0;
1200
1201 atype.defined = 0;
1202 atype.type = NT_invtype;
1203 atype.width = -1;
1204 atype.element_size = 0;
1205 atype.index = 0;
1206
1207 if (reg == NULL)
1208 {
1209 if (typeinfo)
1210 *typeinfo = atype;
1211 if (!isalpha && (flags & PTR_IN_REGLIST))
1212 set_fatal_syntax_error (_("syntax error in register list"));
1213 else if (flags & PTR_GOOD_MATCH)
1214 set_fatal_syntax_error (NULL);
1215 else
1216 set_expected_reg_error (type, reg, err_flags);
1217 return NULL;
1218 }
1219
1220 if (! aarch64_check_reg_type (reg, type))
1221 {
1222 DEBUG_TRACE ("reg type check failed");
1223 if (flags & PTR_GOOD_MATCH)
1224 set_fatal_syntax_error (NULL);
1225 else
1226 set_expected_reg_error (type, reg, err_flags);
1227 return NULL;
1228 }
1229 type = reg->type;
1230
1231 if (aarch64_valid_suffix_char_p (reg->type, *str))
1232 {
1233 if (*str == '.')
1234 {
1235 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1236 return NULL;
1237 if ((reg->type == REG_TYPE_ZAT
1238 || reg->type == REG_TYPE_ZATH
1239 || reg->type == REG_TYPE_ZATV)
1240 && reg->number * 8 >= parsetype.element_size)
1241 {
1242 set_syntax_error (_("ZA tile number out of range"));
1243 return NULL;
1244 }
1245 }
1246 else
1247 {
1248 if (!parse_predication_for_operand (&parsetype, &str))
1249 return NULL;
1250 }
1251
1252 /* Register if of the form Vn.[bhsdq]. */
1253 is_typed_vecreg = true;
1254
1255 if (type != REG_TYPE_V)
1256 {
1257 /* The width is always variable; we don't allow an integer width
1258 to be specified. */
1259 gas_assert (parsetype.width == 0);
1260 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1261 }
1262 else if (parsetype.width == 0)
1263 /* Expect index. In the new scheme we cannot have
1264 Vn.[bhsdq] represent a scalar. Therefore any
1265 Vn.[bhsdq] should have an index following it.
1266 Except in reglists of course. */
1267 atype.defined |= NTA_HASINDEX;
1268 else
1269 atype.defined |= NTA_HASTYPE;
1270
1271 atype.type = parsetype.type;
1272 atype.width = parsetype.width;
1273 }
1274
1275 if (!(flags & PTR_FULL_REG) && skip_past_char (&str, '['))
1276 {
1277 /* Reject Sn[index] syntax. */
1278 if (reg->type != REG_TYPE_Z
1279 && reg->type != REG_TYPE_PN
1280 && reg->type != REG_TYPE_ZT0
1281 && !is_typed_vecreg)
1282 {
1283 first_error (_("this type of register can't be indexed"));
1284 return NULL;
1285 }
1286
1287 if (flags & PTR_IN_REGLIST)
1288 {
1289 first_error (_("index not allowed inside register list"));
1290 return NULL;
1291 }
1292
1293 atype.defined |= NTA_HASINDEX;
1294
1295 if (!parse_index_expression (&str, &atype.index))
1296 return NULL;
1297
1298 if (! skip_past_char (&str, ']'))
1299 return NULL;
1300 }
1301 else if (!(flags & PTR_IN_REGLIST) && (atype.defined & NTA_HASINDEX) != 0)
1302 {
1303 /* Indexed vector register expected. */
1304 first_error (_("indexed vector register expected"));
1305 return NULL;
1306 }
1307
1308 /* A vector reg Vn should be typed or indexed. */
1309 if (type == REG_TYPE_V && atype.defined == 0)
1310 {
1311 first_error (_("invalid use of vector register"));
1312 }
1313
1314 if (typeinfo)
1315 *typeinfo = atype;
1316
1317 *ccp = str;
1318
1319 return reg;
1320 }
1321
1322 /* Parse register.
1323
1324 Return the register on success; return null otherwise.
1325
1326 If this is a NEON vector register with additional type information, fill
1327 in the struct pointed to by VECTYPE (if non-NULL).
1328
1329 This parser does not handle register lists. */
1330
1331 static const reg_entry *
1332 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1333 struct vector_type_el *vectype)
1334 {
1335 return parse_typed_reg (ccp, type, vectype, 0);
1336 }
1337
1338 static inline bool
1339 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1340 {
1341 return (e1.type == e2.type
1342 && e1.defined == e2.defined
1343 && e1.width == e2.width
1344 && e1.element_size == e2.element_size
1345 && e1.index == e2.index);
1346 }
1347
1348 /* Return the register number mask for registers of type REG_TYPE. */
1349
1350 static inline int
1351 reg_type_mask (aarch64_reg_type reg_type)
1352 {
1353 return reg_type == REG_TYPE_P ? 15 : 31;
1354 }
1355
1356 /* This function parses a list of vector registers of type TYPE.
1357 On success, it returns the parsed register list information in the
1358 following encoded format:
1359
1360 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1361 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1362
1363 The information of the register shape and/or index is returned in
1364 *VECTYPE.
1365
1366 It returns PARSE_FAIL if the register list is invalid.
1367
1368 The list contains one to four registers.
1369 Each register can be one of:
1370 <Vt>.<T>[<index>]
1371 <Vt>.<T>
1372 All <T> should be identical.
1373 All <index> should be identical.
1374 There are restrictions on <Vt> numbers which are checked later
1375 (by reg_list_valid_p). */
1376
1377 static int
1378 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1379 struct vector_type_el *vectype)
1380 {
1381 char *str = *ccp;
1382 int nb_regs;
1383 struct vector_type_el typeinfo, typeinfo_first;
1384 uint32_t val, val_range, mask;
1385 int in_range;
1386 int ret_val;
1387 bool error = false;
1388 bool expect_index = false;
1389 unsigned int ptr_flags = PTR_IN_REGLIST;
1390
1391 if (*str != '{')
1392 {
1393 set_expected_reglist_error (type, parse_reg (&str));
1394 return PARSE_FAIL;
1395 }
1396 str++;
1397
1398 nb_regs = 0;
1399 typeinfo_first.defined = 0;
1400 typeinfo_first.type = NT_invtype;
1401 typeinfo_first.width = -1;
1402 typeinfo_first.element_size = 0;
1403 typeinfo_first.index = 0;
1404 ret_val = 0;
1405 val = -1u;
1406 val_range = -1u;
1407 in_range = 0;
1408 mask = reg_type_mask (type);
1409 do
1410 {
1411 if (in_range)
1412 {
1413 str++; /* skip over '-' */
1414 val_range = val;
1415 }
1416 const reg_entry *reg = parse_typed_reg (&str, type, &typeinfo,
1417 ptr_flags);
1418 if (!reg)
1419 {
1420 set_first_syntax_error (_("invalid vector register in list"));
1421 error = true;
1422 continue;
1423 }
1424 val = reg->number;
1425 /* reject [bhsd]n */
1426 if (type == REG_TYPE_V && typeinfo.defined == 0)
1427 {
1428 set_first_syntax_error (_("invalid scalar register in list"));
1429 error = true;
1430 continue;
1431 }
1432
1433 if (typeinfo.defined & NTA_HASINDEX)
1434 expect_index = true;
1435
1436 if (in_range)
1437 {
1438 if (val == val_range)
1439 {
1440 set_first_syntax_error
1441 (_("invalid range in vector register list"));
1442 error = true;
1443 }
1444 val_range = (val_range + 1) & mask;
1445 }
1446 else
1447 {
1448 val_range = val;
1449 if (nb_regs == 0)
1450 typeinfo_first = typeinfo;
1451 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1452 {
1453 set_first_syntax_error
1454 (_("type mismatch in vector register list"));
1455 error = true;
1456 }
1457 }
1458 if (! error)
1459 for (;;)
1460 {
1461 ret_val |= val_range << ((5 * nb_regs) & 31);
1462 nb_regs++;
1463 if (val_range == val)
1464 break;
1465 val_range = (val_range + 1) & mask;
1466 }
1467 in_range = 0;
1468 ptr_flags |= PTR_GOOD_MATCH;
1469 }
1470 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1471
1472 skip_whitespace (str);
1473 if (*str != '}')
1474 {
1475 set_first_syntax_error (_("end of vector register list not found"));
1476 error = true;
1477 }
1478 str++;
1479
1480 skip_whitespace (str);
1481
1482 if (expect_index)
1483 {
1484 if (skip_past_char (&str, '['))
1485 {
1486 if (!parse_index_expression (&str, &typeinfo_first.index))
1487 error = true;
1488 if (! skip_past_char (&str, ']'))
1489 error = true;
1490 }
1491 else
1492 {
1493 set_first_syntax_error (_("expected index"));
1494 error = true;
1495 }
1496 }
1497
1498 if (nb_regs > 4)
1499 {
1500 set_first_syntax_error (_("too many registers in vector register list"));
1501 error = true;
1502 }
1503 else if (nb_regs == 0)
1504 {
1505 set_first_syntax_error (_("empty vector register list"));
1506 error = true;
1507 }
1508
1509 *ccp = str;
1510 if (! error)
1511 *vectype = typeinfo_first;
1512
1513 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1514 }
1515
1516 /* Directives: register aliases. */
1517
1518 static reg_entry *
1519 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1520 {
1521 reg_entry *new;
1522 const char *name;
1523
1524 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1525 {
1526 if (new->builtin)
1527 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1528 str);
1529
1530 /* Only warn about a redefinition if it's not defined as the
1531 same register. */
1532 else if (new->number != number || new->type != type)
1533 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1534
1535 return NULL;
1536 }
1537
1538 name = xstrdup (str);
1539 new = XNEW (reg_entry);
1540
1541 new->name = name;
1542 new->number = number;
1543 new->type = type;
1544 new->builtin = false;
1545
1546 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1547
1548 return new;
1549 }
1550
1551 /* Look for the .req directive. This is of the form:
1552
1553 new_register_name .req existing_register_name
1554
1555 If we find one, or if it looks sufficiently like one that we want to
1556 handle any error here, return TRUE. Otherwise return FALSE. */
1557
1558 static bool
1559 create_register_alias (char *newname, char *p)
1560 {
1561 const reg_entry *old;
1562 char *oldname, *nbuf;
1563 size_t nlen;
1564
1565 /* The input scrubber ensures that whitespace after the mnemonic is
1566 collapsed to single spaces. */
1567 oldname = p;
1568 if (!startswith (oldname, " .req "))
1569 return false;
1570
1571 oldname += 6;
1572 if (*oldname == '\0')
1573 return false;
1574
1575 old = str_hash_find (aarch64_reg_hsh, oldname);
1576 if (!old)
1577 {
1578 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1579 return true;
1580 }
1581
1582 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1583 the desired alias name, and p points to its end. If not, then
1584 the desired alias name is in the global original_case_string. */
1585 #ifdef TC_CASE_SENSITIVE
1586 nlen = p - newname;
1587 #else
1588 newname = original_case_string;
1589 nlen = strlen (newname);
1590 #endif
1591
1592 nbuf = xmemdup0 (newname, nlen);
1593
1594 /* Create aliases under the new name as stated; an all-lowercase
1595 version of the new name; and an all-uppercase version of the new
1596 name. */
1597 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1598 {
1599 for (p = nbuf; *p; p++)
1600 *p = TOUPPER (*p);
1601
1602 if (strncmp (nbuf, newname, nlen))
1603 {
1604 /* If this attempt to create an additional alias fails, do not bother
1605 trying to create the all-lower case alias. We will fail and issue
1606 a second, duplicate error message. This situation arises when the
1607 programmer does something like:
1608 foo .req r0
1609 Foo .req r1
1610 The second .req creates the "Foo" alias but then fails to create
1611 the artificial FOO alias because it has already been created by the
1612 first .req. */
1613 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1614 {
1615 free (nbuf);
1616 return true;
1617 }
1618 }
1619
1620 for (p = nbuf; *p; p++)
1621 *p = TOLOWER (*p);
1622
1623 if (strncmp (nbuf, newname, nlen))
1624 insert_reg_alias (nbuf, old->number, old->type);
1625 }
1626
1627 free (nbuf);
1628 return true;
1629 }
1630
1631 /* Should never be called, as .req goes between the alias and the
1632 register name, not at the beginning of the line. */
1633 static void
1634 s_req (int a ATTRIBUTE_UNUSED)
1635 {
1636 as_bad (_("invalid syntax for .req directive"));
1637 }
1638
1639 /* The .unreq directive deletes an alias which was previously defined
1640 by .req. For example:
1641
1642 my_alias .req r11
1643 .unreq my_alias */
1644
1645 static void
1646 s_unreq (int a ATTRIBUTE_UNUSED)
1647 {
1648 char *name;
1649 char saved_char;
1650
1651 name = input_line_pointer;
1652 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1653 saved_char = *input_line_pointer;
1654 *input_line_pointer = 0;
1655
1656 if (!*name)
1657 as_bad (_("invalid syntax for .unreq directive"));
1658 else
1659 {
1660 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1661
1662 if (!reg)
1663 as_bad (_("unknown register alias '%s'"), name);
1664 else if (reg->builtin)
1665 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1666 name);
1667 else
1668 {
1669 char *p;
1670 char *nbuf;
1671
1672 str_hash_delete (aarch64_reg_hsh, name);
1673 free ((char *) reg->name);
1674 free (reg);
1675
1676 /* Also locate the all upper case and all lower case versions.
1677 Do not complain if we cannot find one or the other as it
1678 was probably deleted above. */
1679
1680 nbuf = strdup (name);
1681 for (p = nbuf; *p; p++)
1682 *p = TOUPPER (*p);
1683 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1684 if (reg)
1685 {
1686 str_hash_delete (aarch64_reg_hsh, nbuf);
1687 free ((char *) reg->name);
1688 free (reg);
1689 }
1690
1691 for (p = nbuf; *p; p++)
1692 *p = TOLOWER (*p);
1693 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1694 if (reg)
1695 {
1696 str_hash_delete (aarch64_reg_hsh, nbuf);
1697 free ((char *) reg->name);
1698 free (reg);
1699 }
1700
1701 free (nbuf);
1702 }
1703 }
1704
1705 *input_line_pointer = saved_char;
1706 demand_empty_rest_of_line ();
1707 }
1708
1709 /* Directives: Instruction set selection. */
1710
1711 #if defined OBJ_ELF || defined OBJ_COFF
1712 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1713 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1714 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1715 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1716
1717 /* Create a new mapping symbol for the transition to STATE. */
1718
1719 static void
1720 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1721 {
1722 symbolS *symbolP;
1723 const char *symname;
1724 int type;
1725
1726 switch (state)
1727 {
1728 case MAP_DATA:
1729 symname = "$d";
1730 type = BSF_NO_FLAGS;
1731 break;
1732 case MAP_INSN:
1733 symname = "$x";
1734 type = BSF_NO_FLAGS;
1735 break;
1736 default:
1737 abort ();
1738 }
1739
1740 symbolP = symbol_new (symname, now_seg, frag, value);
1741 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1742
1743 /* Save the mapping symbols for future reference. Also check that
1744 we do not place two mapping symbols at the same offset within a
1745 frag. We'll handle overlap between frags in
1746 check_mapping_symbols.
1747
1748 If .fill or other data filling directive generates zero sized data,
1749 the mapping symbol for the following code will have the same value
1750 as the one generated for the data filling directive. In this case,
1751 we replace the old symbol with the new one at the same address. */
1752 if (value == 0)
1753 {
1754 if (frag->tc_frag_data.first_map != NULL)
1755 {
1756 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1757 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1758 &symbol_lastP);
1759 }
1760 frag->tc_frag_data.first_map = symbolP;
1761 }
1762 if (frag->tc_frag_data.last_map != NULL)
1763 {
1764 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1765 S_GET_VALUE (symbolP));
1766 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1767 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1768 &symbol_lastP);
1769 }
1770 frag->tc_frag_data.last_map = symbolP;
1771 }
1772
1773 /* We must sometimes convert a region marked as code to data during
1774 code alignment, if an odd number of bytes have to be padded. The
1775 code mapping symbol is pushed to an aligned address. */
1776
1777 static void
1778 insert_data_mapping_symbol (enum mstate state,
1779 valueT value, fragS * frag, offsetT bytes)
1780 {
1781 /* If there was already a mapping symbol, remove it. */
1782 if (frag->tc_frag_data.last_map != NULL
1783 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1784 frag->fr_address + value)
1785 {
1786 symbolS *symp = frag->tc_frag_data.last_map;
1787
1788 if (value == 0)
1789 {
1790 know (frag->tc_frag_data.first_map == symp);
1791 frag->tc_frag_data.first_map = NULL;
1792 }
1793 frag->tc_frag_data.last_map = NULL;
1794 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1795 }
1796
1797 make_mapping_symbol (MAP_DATA, value, frag);
1798 make_mapping_symbol (state, value + bytes, frag);
1799 }
1800
1801 static void mapping_state_2 (enum mstate state, int max_chars);
1802
1803 /* Set the mapping state to STATE. Only call this when about to
1804 emit some STATE bytes to the file. */
1805
1806 void
1807 mapping_state (enum mstate state)
1808 {
1809 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1810
1811 if (state == MAP_INSN)
1812 /* AArch64 instructions require 4-byte alignment. When emitting
1813 instructions into any section, record the appropriate section
1814 alignment. */
1815 record_alignment (now_seg, 2);
1816
1817 if (mapstate == state)
1818 /* The mapping symbol has already been emitted.
1819 There is nothing else to do. */
1820 return;
1821
1822 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1823 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1824 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1825 evaluated later in the next else. */
1826 return;
1827 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1828 {
1829 /* Only add the symbol if the offset is > 0:
1830 if we're at the first frag, check it's size > 0;
1831 if we're not at the first frag, then for sure
1832 the offset is > 0. */
1833 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1834 const int add_symbol = (frag_now != frag_first)
1835 || (frag_now_fix () > 0);
1836
1837 if (add_symbol)
1838 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1839 }
1840 #undef TRANSITION
1841
1842 mapping_state_2 (state, 0);
1843 }
1844
1845 /* Same as mapping_state, but MAX_CHARS bytes have already been
1846 allocated. Put the mapping symbol that far back. */
1847
1848 static void
1849 mapping_state_2 (enum mstate state, int max_chars)
1850 {
1851 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1852
1853 if (!SEG_NORMAL (now_seg))
1854 return;
1855
1856 if (mapstate == state)
1857 /* The mapping symbol has already been emitted.
1858 There is nothing else to do. */
1859 return;
1860
1861 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1862 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1863 }
1864 #else
1865 #define mapping_state(x) /* nothing */
1866 #define mapping_state_2(x, y) /* nothing */
1867 #endif
1868
1869 /* Directives: sectioning and alignment. */
1870
1871 static void
1872 s_bss (int ignore ATTRIBUTE_UNUSED)
1873 {
1874 /* We don't support putting frags in the BSS segment, we fake it by
1875 marking in_bss, then looking at s_skip for clues. */
1876 subseg_set (bss_section, 0);
1877 demand_empty_rest_of_line ();
1878 mapping_state (MAP_DATA);
1879 }
1880
1881 static void
1882 s_even (int ignore ATTRIBUTE_UNUSED)
1883 {
1884 /* Never make frag if expect extra pass. */
1885 if (!need_pass_2)
1886 frag_align (1, 0, 0);
1887
1888 record_alignment (now_seg, 1);
1889
1890 demand_empty_rest_of_line ();
1891 }
1892
1893 /* Directives: Literal pools. */
1894
1895 static literal_pool *
1896 find_literal_pool (int size)
1897 {
1898 literal_pool *pool;
1899
1900 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1901 {
1902 if (pool->section == now_seg
1903 && pool->sub_section == now_subseg && pool->size == size)
1904 break;
1905 }
1906
1907 return pool;
1908 }
1909
1910 static literal_pool *
1911 find_or_make_literal_pool (int size)
1912 {
1913 /* Next literal pool ID number. */
1914 static unsigned int latest_pool_num = 1;
1915 literal_pool *pool;
1916
1917 pool = find_literal_pool (size);
1918
1919 if (pool == NULL)
1920 {
1921 /* Create a new pool. */
1922 pool = XNEW (literal_pool);
1923 if (!pool)
1924 return NULL;
1925
1926 /* Currently we always put the literal pool in the current text
1927 section. If we were generating "small" model code where we
1928 knew that all code and initialised data was within 1MB then
1929 we could output literals to mergeable, read-only data
1930 sections. */
1931
1932 pool->next_free_entry = 0;
1933 pool->section = now_seg;
1934 pool->sub_section = now_subseg;
1935 pool->size = size;
1936 pool->next = list_of_pools;
1937 pool->symbol = NULL;
1938
1939 /* Add it to the list. */
1940 list_of_pools = pool;
1941 }
1942
1943 /* New pools, and emptied pools, will have a NULL symbol. */
1944 if (pool->symbol == NULL)
1945 {
1946 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1947 &zero_address_frag, 0);
1948 pool->id = latest_pool_num++;
1949 }
1950
1951 /* Done. */
1952 return pool;
1953 }
1954
1955 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1956 Return TRUE on success, otherwise return FALSE. */
1957 static bool
1958 add_to_lit_pool (expressionS *exp, int size)
1959 {
1960 literal_pool *pool;
1961 unsigned int entry;
1962
1963 pool = find_or_make_literal_pool (size);
1964
1965 /* Check if this literal value is already in the pool. */
1966 for (entry = 0; entry < pool->next_free_entry; entry++)
1967 {
1968 expressionS * litexp = & pool->literals[entry].exp;
1969
1970 if ((litexp->X_op == exp->X_op)
1971 && (exp->X_op == O_constant)
1972 && (litexp->X_add_number == exp->X_add_number)
1973 && (litexp->X_unsigned == exp->X_unsigned))
1974 break;
1975
1976 if ((litexp->X_op == exp->X_op)
1977 && (exp->X_op == O_symbol)
1978 && (litexp->X_add_number == exp->X_add_number)
1979 && (litexp->X_add_symbol == exp->X_add_symbol)
1980 && (litexp->X_op_symbol == exp->X_op_symbol))
1981 break;
1982 }
1983
1984 /* Do we need to create a new entry? */
1985 if (entry == pool->next_free_entry)
1986 {
1987 if (entry >= MAX_LITERAL_POOL_SIZE)
1988 {
1989 set_syntax_error (_("literal pool overflow"));
1990 return false;
1991 }
1992
1993 pool->literals[entry].exp = *exp;
1994 pool->next_free_entry += 1;
1995 if (exp->X_op == O_big)
1996 {
1997 /* PR 16688: Bignums are held in a single global array. We must
1998 copy and preserve that value now, before it is overwritten. */
1999 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
2000 exp->X_add_number);
2001 memcpy (pool->literals[entry].bignum, generic_bignum,
2002 CHARS_PER_LITTLENUM * exp->X_add_number);
2003 }
2004 else
2005 pool->literals[entry].bignum = NULL;
2006 }
2007
2008 exp->X_op = O_symbol;
2009 exp->X_add_number = ((int) entry) * size;
2010 exp->X_add_symbol = pool->symbol;
2011
2012 return true;
2013 }
2014
2015 /* Can't use symbol_new here, so have to create a symbol and then at
2016 a later date assign it a value. That's what these functions do. */
2017
2018 static void
2019 symbol_locate (symbolS * symbolP,
2020 const char *name,/* It is copied, the caller can modify. */
2021 segT segment, /* Segment identifier (SEG_<something>). */
2022 valueT valu, /* Symbol value. */
2023 fragS * frag) /* Associated fragment. */
2024 {
2025 size_t name_length;
2026 char *preserved_copy_of_name;
2027
2028 name_length = strlen (name) + 1; /* +1 for \0. */
2029 obstack_grow (&notes, name, name_length);
2030 preserved_copy_of_name = obstack_finish (&notes);
2031
2032 #ifdef tc_canonicalize_symbol_name
2033 preserved_copy_of_name =
2034 tc_canonicalize_symbol_name (preserved_copy_of_name);
2035 #endif
2036
2037 S_SET_NAME (symbolP, preserved_copy_of_name);
2038
2039 S_SET_SEGMENT (symbolP, segment);
2040 S_SET_VALUE (symbolP, valu);
2041 symbol_clear_list_pointers (symbolP);
2042
2043 symbol_set_frag (symbolP, frag);
2044
2045 /* Link to end of symbol chain. */
2046 {
2047 extern int symbol_table_frozen;
2048
2049 if (symbol_table_frozen)
2050 abort ();
2051 }
2052
2053 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
2054
2055 obj_symbol_new_hook (symbolP);
2056
2057 #ifdef tc_symbol_new_hook
2058 tc_symbol_new_hook (symbolP);
2059 #endif
2060
2061 #ifdef DEBUG_SYMS
2062 verify_symbol_chain (symbol_rootP, symbol_lastP);
2063 #endif /* DEBUG_SYMS */
2064 }
2065
2066
2067 static void
2068 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2069 {
2070 unsigned int entry;
2071 literal_pool *pool;
2072 char sym_name[20];
2073 int align;
2074
2075 for (align = 2; align <= 4; align++)
2076 {
2077 int size = 1 << align;
2078
2079 pool = find_literal_pool (size);
2080 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
2081 continue;
2082
2083 /* Align pool as you have word accesses.
2084 Only make a frag if we have to. */
2085 if (!need_pass_2)
2086 frag_align (align, 0, 0);
2087
2088 mapping_state (MAP_DATA);
2089
2090 record_alignment (now_seg, align);
2091
2092 sprintf (sym_name, "$$lit_\002%x", pool->id);
2093
2094 symbol_locate (pool->symbol, sym_name, now_seg,
2095 (valueT) frag_now_fix (), frag_now);
2096 symbol_table_insert (pool->symbol);
2097
2098 for (entry = 0; entry < pool->next_free_entry; entry++)
2099 {
2100 expressionS * exp = & pool->literals[entry].exp;
2101
2102 if (exp->X_op == O_big)
2103 {
2104 /* PR 16688: Restore the global bignum value. */
2105 gas_assert (pool->literals[entry].bignum != NULL);
2106 memcpy (generic_bignum, pool->literals[entry].bignum,
2107 CHARS_PER_LITTLENUM * exp->X_add_number);
2108 }
2109
2110 /* First output the expression in the instruction to the pool. */
2111 emit_expr (exp, size); /* .word|.xword */
2112
2113 if (exp->X_op == O_big)
2114 {
2115 free (pool->literals[entry].bignum);
2116 pool->literals[entry].bignum = NULL;
2117 }
2118 }
2119
2120 /* Mark the pool as empty. */
2121 pool->next_free_entry = 0;
2122 pool->symbol = NULL;
2123 }
2124 }
2125
2126 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2127 /* Forward declarations for functions below, in the MD interface
2128 section. */
2129 static struct reloc_table_entry * find_reloc_table_entry (char **);
2130
2131 /* Directives: Data. */
2132 /* N.B. the support for relocation suffix in this directive needs to be
2133 implemented properly. */
2134
2135 static void
2136 s_aarch64_cons (int nbytes)
2137 {
2138 expressionS exp;
2139
2140 #ifdef md_flush_pending_output
2141 md_flush_pending_output ();
2142 #endif
2143
2144 if (is_it_end_of_statement ())
2145 {
2146 demand_empty_rest_of_line ();
2147 return;
2148 }
2149
2150 #ifdef md_cons_align
2151 md_cons_align (nbytes);
2152 #endif
2153
2154 mapping_state (MAP_DATA);
2155 do
2156 {
2157 struct reloc_table_entry *reloc;
2158
2159 expression (&exp);
2160
2161 if (exp.X_op != O_symbol)
2162 emit_expr (&exp, (unsigned int) nbytes);
2163 else
2164 {
2165 skip_past_char (&input_line_pointer, '#');
2166 if (skip_past_char (&input_line_pointer, ':'))
2167 {
2168 reloc = find_reloc_table_entry (&input_line_pointer);
2169 if (reloc == NULL)
2170 as_bad (_("unrecognized relocation suffix"));
2171 else
2172 as_bad (_("unimplemented relocation suffix"));
2173 ignore_rest_of_line ();
2174 return;
2175 }
2176 else
2177 emit_expr (&exp, (unsigned int) nbytes);
2178 }
2179 }
2180 while (*input_line_pointer++ == ',');
2181
2182 /* Put terminator back into stream. */
2183 input_line_pointer--;
2184 demand_empty_rest_of_line ();
2185 }
2186 #endif
2187
2188 #ifdef OBJ_ELF
2189 /* Forward declarations for functions below, in the MD interface
2190 section. */
2191 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
2192
2193 /* Mark symbol that it follows a variant PCS convention. */
2194
2195 static void
2196 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2197 {
2198 char *name;
2199 char c;
2200 symbolS *sym;
2201 asymbol *bfdsym;
2202 elf_symbol_type *elfsym;
2203
2204 c = get_symbol_name (&name);
2205 if (!*name)
2206 as_bad (_("Missing symbol name in directive"));
2207 sym = symbol_find_or_make (name);
2208 restore_line_pointer (c);
2209 demand_empty_rest_of_line ();
2210 bfdsym = symbol_get_bfdsym (sym);
2211 elfsym = elf_symbol_from (bfdsym);
2212 gas_assert (elfsym);
2213 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2214 }
2215 #endif /* OBJ_ELF */
2216
2217 /* Output a 32-bit word, but mark as an instruction. */
2218
2219 static void
2220 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2221 {
2222 expressionS exp;
2223 unsigned n = 0;
2224
2225 #ifdef md_flush_pending_output
2226 md_flush_pending_output ();
2227 #endif
2228
2229 if (is_it_end_of_statement ())
2230 {
2231 demand_empty_rest_of_line ();
2232 return;
2233 }
2234
2235 /* Sections are assumed to start aligned. In executable section, there is no
2236 MAP_DATA symbol pending. So we only align the address during
2237 MAP_DATA --> MAP_INSN transition.
2238 For other sections, this is not guaranteed. */
2239 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2240 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2241 frag_align_code (2, 0);
2242
2243 #ifdef OBJ_ELF
2244 mapping_state (MAP_INSN);
2245 #endif
2246
2247 do
2248 {
2249 expression (&exp);
2250 if (exp.X_op != O_constant)
2251 {
2252 as_bad (_("constant expression required"));
2253 ignore_rest_of_line ();
2254 return;
2255 }
2256
2257 if (target_big_endian)
2258 {
2259 unsigned int val = exp.X_add_number;
2260 exp.X_add_number = SWAP_32 (val);
2261 }
2262 emit_expr (&exp, INSN_SIZE);
2263 ++n;
2264 }
2265 while (*input_line_pointer++ == ',');
2266
2267 dwarf2_emit_insn (n * INSN_SIZE);
2268
2269 /* Put terminator back into stream. */
2270 input_line_pointer--;
2271 demand_empty_rest_of_line ();
2272 }
2273
2274 static void
2275 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2276 {
2277 demand_empty_rest_of_line ();
2278 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2279 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2280 }
2281
2282 #ifdef OBJ_ELF
2283 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2284
2285 static void
2286 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2287 {
2288 expressionS exp;
2289
2290 expression (&exp);
2291 frag_grow (4);
2292 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2293 BFD_RELOC_AARCH64_TLSDESC_ADD);
2294
2295 demand_empty_rest_of_line ();
2296 }
2297
2298 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2299
2300 static void
2301 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2302 {
2303 expressionS exp;
2304
2305 /* Since we're just labelling the code, there's no need to define a
2306 mapping symbol. */
2307 expression (&exp);
2308 /* Make sure there is enough room in this frag for the following
2309 blr. This trick only works if the blr follows immediately after
2310 the .tlsdesc directive. */
2311 frag_grow (4);
2312 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2313 BFD_RELOC_AARCH64_TLSDESC_CALL);
2314
2315 demand_empty_rest_of_line ();
2316 }
2317
2318 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2319
2320 static void
2321 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2322 {
2323 expressionS exp;
2324
2325 expression (&exp);
2326 frag_grow (4);
2327 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2328 BFD_RELOC_AARCH64_TLSDESC_LDR);
2329
2330 demand_empty_rest_of_line ();
2331 }
2332 #endif /* OBJ_ELF */
2333
2334 #ifdef TE_PE
2335 static void
2336 s_secrel (int dummy ATTRIBUTE_UNUSED)
2337 {
2338 expressionS exp;
2339
2340 do
2341 {
2342 expression (&exp);
2343 if (exp.X_op == O_symbol)
2344 exp.X_op = O_secrel;
2345
2346 emit_expr (&exp, 4);
2347 }
2348 while (*input_line_pointer++ == ',');
2349
2350 input_line_pointer--;
2351 demand_empty_rest_of_line ();
2352 }
2353
2354 void
2355 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
2356 {
2357 expressionS exp;
2358
2359 exp.X_op = O_secrel;
2360 exp.X_add_symbol = symbol;
2361 exp.X_add_number = 0;
2362 emit_expr (&exp, size);
2363 }
2364
2365 static void
2366 s_secidx (int dummy ATTRIBUTE_UNUSED)
2367 {
2368 expressionS exp;
2369
2370 do
2371 {
2372 expression (&exp);
2373 if (exp.X_op == O_symbol)
2374 exp.X_op = O_secidx;
2375
2376 emit_expr (&exp, 2);
2377 }
2378 while (*input_line_pointer++ == ',');
2379
2380 input_line_pointer--;
2381 demand_empty_rest_of_line ();
2382 }
2383 #endif /* TE_PE */
2384
2385 static void s_aarch64_arch (int);
2386 static void s_aarch64_cpu (int);
2387 static void s_aarch64_arch_extension (int);
2388
2389 /* This table describes all the machine specific pseudo-ops the assembler
2390 has to support. The fields are:
2391 pseudo-op name without dot
2392 function to call to execute this pseudo-op
2393 Integer arg to pass to the function. */
2394
2395 const pseudo_typeS md_pseudo_table[] = {
2396 /* Never called because '.req' does not start a line. */
2397 {"req", s_req, 0},
2398 {"unreq", s_unreq, 0},
2399 {"bss", s_bss, 0},
2400 {"even", s_even, 0},
2401 {"ltorg", s_ltorg, 0},
2402 {"pool", s_ltorg, 0},
2403 {"cpu", s_aarch64_cpu, 0},
2404 {"arch", s_aarch64_arch, 0},
2405 {"arch_extension", s_aarch64_arch_extension, 0},
2406 {"inst", s_aarch64_inst, 0},
2407 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2408 #ifdef OBJ_ELF
2409 {"tlsdescadd", s_tlsdescadd, 0},
2410 {"tlsdesccall", s_tlsdesccall, 0},
2411 {"tlsdescldr", s_tlsdescldr, 0},
2412 {"variant_pcs", s_variant_pcs, 0},
2413 #endif
2414 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2415 {"word", s_aarch64_cons, 4},
2416 {"long", s_aarch64_cons, 4},
2417 {"xword", s_aarch64_cons, 8},
2418 {"dword", s_aarch64_cons, 8},
2419 #endif
2420 #ifdef TE_PE
2421 {"secrel32", s_secrel, 0},
2422 {"secidx", s_secidx, 0},
2423 #endif
2424 {"float16", float_cons, 'h'},
2425 {"bfloat16", float_cons, 'b'},
2426 {0, 0, 0}
2427 };
2428 \f
2429
2430 /* Check whether STR points to a register name followed by a comma or the
2431 end of line; REG_TYPE indicates which register types are checked
2432 against. Return TRUE if STR is such a register name; otherwise return
2433 FALSE. The function does not intend to produce any diagnostics, but since
2434 the register parser aarch64_reg_parse, which is called by this function,
2435 does produce diagnostics, we call clear_error to clear any diagnostics
2436 that may be generated by aarch64_reg_parse.
2437 Also, the function returns FALSE directly if there is any user error
2438 present at the function entry. This prevents the existing diagnostics
2439 state from being spoiled.
2440 The function currently serves parse_constant_immediate and
2441 parse_big_immediate only. */
2442 static bool
2443 reg_name_p (char *str, aarch64_reg_type reg_type)
2444 {
2445 const reg_entry *reg;
2446
2447 /* Prevent the diagnostics state from being spoiled. */
2448 if (error_p ())
2449 return false;
2450
2451 reg = aarch64_reg_parse (&str, reg_type, NULL);
2452
2453 /* Clear the parsing error that may be set by the reg parser. */
2454 clear_error ();
2455
2456 if (!reg)
2457 return false;
2458
2459 skip_whitespace (str);
2460 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2461 return true;
2462
2463 return false;
2464 }
2465
2466 /* Parser functions used exclusively in instruction operands. */
2467
2468 /* Parse an immediate expression which may not be constant.
2469
2470 To prevent the expression parser from pushing a register name
2471 into the symbol table as an undefined symbol, firstly a check is
2472 done to find out whether STR is a register of type REG_TYPE followed
2473 by a comma or the end of line. Return FALSE if STR is such a string. */
2474
2475 static bool
2476 parse_immediate_expression (char **str, expressionS *exp,
2477 aarch64_reg_type reg_type)
2478 {
2479 if (reg_name_p (*str, reg_type))
2480 {
2481 set_recoverable_error (_("immediate operand required"));
2482 return false;
2483 }
2484
2485 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2486
2487 if (exp->X_op == O_absent)
2488 {
2489 set_fatal_syntax_error (_("missing immediate expression"));
2490 return false;
2491 }
2492
2493 return true;
2494 }
2495
2496 /* Constant immediate-value read function for use in insn parsing.
2497 STR points to the beginning of the immediate (with the optional
2498 leading #); *VAL receives the value. REG_TYPE says which register
2499 names should be treated as registers rather than as symbolic immediates.
2500
2501 Return TRUE on success; otherwise return FALSE. */
2502
2503 static bool
2504 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2505 {
2506 expressionS exp;
2507
2508 if (! parse_immediate_expression (str, &exp, reg_type))
2509 return false;
2510
2511 if (exp.X_op != O_constant)
2512 {
2513 set_syntax_error (_("constant expression required"));
2514 return false;
2515 }
2516
2517 *val = exp.X_add_number;
2518 return true;
2519 }
2520
2521 static uint32_t
2522 encode_imm_float_bits (uint32_t imm)
2523 {
2524 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2525 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2526 }
2527
2528 /* Return TRUE if the single-precision floating-point value encoded in IMM
2529 can be expressed in the AArch64 8-bit signed floating-point format with
2530 3-bit exponent and normalized 4 bits of precision; in other words, the
2531 floating-point value must be expressable as
2532 (+/-) n / 16 * power (2, r)
2533 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2534
2535 static bool
2536 aarch64_imm_float_p (uint32_t imm)
2537 {
2538 /* If a single-precision floating-point value has the following bit
2539 pattern, it can be expressed in the AArch64 8-bit floating-point
2540 format:
2541
2542 3 32222222 2221111111111
2543 1 09876543 21098765432109876543210
2544 n Eeeeeexx xxxx0000000000000000000
2545
2546 where n, e and each x are either 0 or 1 independently, with
2547 E == ~ e. */
2548
2549 uint32_t pattern;
2550
2551 /* Prepare the pattern for 'Eeeeee'. */
2552 if (((imm >> 30) & 0x1) == 0)
2553 pattern = 0x3e000000;
2554 else
2555 pattern = 0x40000000;
2556
2557 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2558 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2559 }
2560
2561 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2562 as an IEEE float without any loss of precision. Store the value in
2563 *FPWORD if so. */
2564
2565 static bool
2566 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2567 {
2568 /* If a double-precision floating-point value has the following bit
2569 pattern, it can be expressed in a float:
2570
2571 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2572 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2573 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2574
2575 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2576 if Eeee_eeee != 1111_1111
2577
2578 where n, e, s and S are either 0 or 1 independently and where ~ is the
2579 inverse of E. */
2580
2581 uint32_t pattern;
2582 uint32_t high32 = imm >> 32;
2583 uint32_t low32 = imm;
2584
2585 /* Lower 29 bits need to be 0s. */
2586 if ((imm & 0x1fffffff) != 0)
2587 return false;
2588
2589 /* Prepare the pattern for 'Eeeeeeeee'. */
2590 if (((high32 >> 30) & 0x1) == 0)
2591 pattern = 0x38000000;
2592 else
2593 pattern = 0x40000000;
2594
2595 /* Check E~~~. */
2596 if ((high32 & 0x78000000) != pattern)
2597 return false;
2598
2599 /* Check Eeee_eeee != 1111_1111. */
2600 if ((high32 & 0x7ff00000) == 0x47f00000)
2601 return false;
2602
2603 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2604 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2605 | (low32 >> 29)); /* 3 S bits. */
2606 return true;
2607 }
2608
2609 /* Return true if we should treat OPERAND as a double-precision
2610 floating-point operand rather than a single-precision one. */
2611 static bool
2612 double_precision_operand_p (const aarch64_opnd_info *operand)
2613 {
2614 /* Check for unsuffixed SVE registers, which are allowed
2615 for LDR and STR but not in instructions that require an
2616 immediate. We get better error messages if we arbitrarily
2617 pick one size, parse the immediate normally, and then
2618 report the match failure in the normal way. */
2619 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2620 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2621 }
2622
2623 /* Parse a floating-point immediate. Return TRUE on success and return the
2624 value in *IMMED in the format of IEEE754 single-precision encoding.
2625 *CCP points to the start of the string; DP_P is TRUE when the immediate
2626 is expected to be in double-precision (N.B. this only matters when
2627 hexadecimal representation is involved). REG_TYPE says which register
2628 names should be treated as registers rather than as symbolic immediates.
2629
2630 This routine accepts any IEEE float; it is up to the callers to reject
2631 invalid ones. */
2632
2633 static bool
2634 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2635 aarch64_reg_type reg_type)
2636 {
2637 char *str = *ccp;
2638 char *fpnum;
2639 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2640 int64_t val = 0;
2641 unsigned fpword = 0;
2642 bool hex_p = false;
2643
2644 skip_past_char (&str, '#');
2645
2646 fpnum = str;
2647 skip_whitespace (fpnum);
2648
2649 if (startswith (fpnum, "0x"))
2650 {
2651 /* Support the hexadecimal representation of the IEEE754 encoding.
2652 Double-precision is expected when DP_P is TRUE, otherwise the
2653 representation should be in single-precision. */
2654 if (! parse_constant_immediate (&str, &val, reg_type))
2655 goto invalid_fp;
2656
2657 if (dp_p)
2658 {
2659 if (!can_convert_double_to_float (val, &fpword))
2660 goto invalid_fp;
2661 }
2662 else if ((uint64_t) val > 0xffffffff)
2663 goto invalid_fp;
2664 else
2665 fpword = val;
2666
2667 hex_p = true;
2668 }
2669 else if (reg_name_p (str, reg_type))
2670 {
2671 set_recoverable_error (_("immediate operand required"));
2672 return false;
2673 }
2674
2675 if (! hex_p)
2676 {
2677 int i;
2678
2679 if ((str = atof_ieee (str, 's', words)) == NULL)
2680 goto invalid_fp;
2681
2682 /* Our FP word must be 32 bits (single-precision FP). */
2683 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2684 {
2685 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2686 fpword |= words[i];
2687 }
2688 }
2689
2690 *immed = fpword;
2691 *ccp = str;
2692 return true;
2693
2694 invalid_fp:
2695 set_fatal_syntax_error (_("invalid floating-point constant"));
2696 return false;
2697 }
2698
2699 /* Less-generic immediate-value read function with the possibility of loading
2700 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2701 instructions.
2702
2703 To prevent the expression parser from pushing a register name into the
2704 symbol table as an undefined symbol, a check is firstly done to find
2705 out whether STR is a register of type REG_TYPE followed by a comma or
2706 the end of line. Return FALSE if STR is such a register. */
2707
2708 static bool
2709 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2710 {
2711 char *ptr = *str;
2712
2713 if (reg_name_p (ptr, reg_type))
2714 {
2715 set_syntax_error (_("immediate operand required"));
2716 return false;
2717 }
2718
2719 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2720
2721 if (inst.reloc.exp.X_op == O_constant)
2722 *imm = inst.reloc.exp.X_add_number;
2723
2724 *str = ptr;
2725
2726 return true;
2727 }
2728
2729 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2730 if NEED_LIBOPCODES is non-zero, the fixup will need
2731 assistance from the libopcodes. */
2732
2733 static inline void
2734 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2735 const aarch64_opnd_info *operand,
2736 int need_libopcodes_p)
2737 {
2738 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2739 reloc->opnd = operand->type;
2740 if (need_libopcodes_p)
2741 reloc->need_libopcodes_p = 1;
2742 };
2743
2744 /* Return TRUE if the instruction needs to be fixed up later internally by
2745 the GAS; otherwise return FALSE. */
2746
2747 static inline bool
2748 aarch64_gas_internal_fixup_p (void)
2749 {
2750 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2751 }
2752
2753 /* Assign the immediate value to the relevant field in *OPERAND if
2754 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2755 needs an internal fixup in a later stage.
2756 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2757 IMM.VALUE that may get assigned with the constant. */
2758 static inline void
2759 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2760 aarch64_opnd_info *operand,
2761 int addr_off_p,
2762 int need_libopcodes_p,
2763 int skip_p)
2764 {
2765 if (reloc->exp.X_op == O_constant)
2766 {
2767 if (addr_off_p)
2768 operand->addr.offset.imm = reloc->exp.X_add_number;
2769 else
2770 operand->imm.value = reloc->exp.X_add_number;
2771 reloc->type = BFD_RELOC_UNUSED;
2772 }
2773 else
2774 {
2775 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2776 /* Tell libopcodes to ignore this operand or not. This is helpful
2777 when one of the operands needs to be fixed up later but we need
2778 libopcodes to check the other operands. */
2779 operand->skip = skip_p;
2780 }
2781 }
2782
2783 /* Relocation modifiers. Each entry in the table contains the textual
2784 name for the relocation which may be placed before a symbol used as
2785 a load/store offset, or add immediate. It must be surrounded by a
2786 leading and trailing colon, for example:
2787
2788 ldr x0, [x1, #:rello:varsym]
2789 add x0, x1, #:rello:varsym */
2790
2791 struct reloc_table_entry
2792 {
2793 const char *name;
2794 int pc_rel;
2795 bfd_reloc_code_real_type adr_type;
2796 bfd_reloc_code_real_type adrp_type;
2797 bfd_reloc_code_real_type movw_type;
2798 bfd_reloc_code_real_type add_type;
2799 bfd_reloc_code_real_type ldst_type;
2800 bfd_reloc_code_real_type ld_literal_type;
2801 };
2802
2803 static struct reloc_table_entry reloc_table[] =
2804 {
2805 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2806 {"lo12", 0,
2807 0, /* adr_type */
2808 0,
2809 0,
2810 BFD_RELOC_AARCH64_ADD_LO12,
2811 BFD_RELOC_AARCH64_LDST_LO12,
2812 0},
2813
2814 /* Higher 21 bits of pc-relative page offset: ADRP */
2815 {"pg_hi21", 1,
2816 0, /* adr_type */
2817 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2818 0,
2819 0,
2820 0,
2821 0},
2822
2823 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2824 {"pg_hi21_nc", 1,
2825 0, /* adr_type */
2826 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2827 0,
2828 0,
2829 0,
2830 0},
2831
2832 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2833 {"abs_g0", 0,
2834 0, /* adr_type */
2835 0,
2836 BFD_RELOC_AARCH64_MOVW_G0,
2837 0,
2838 0,
2839 0},
2840
2841 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2842 {"abs_g0_s", 0,
2843 0, /* adr_type */
2844 0,
2845 BFD_RELOC_AARCH64_MOVW_G0_S,
2846 0,
2847 0,
2848 0},
2849
2850 /* Less significant bits 0-15 of address/value: MOVK, no check */
2851 {"abs_g0_nc", 0,
2852 0, /* adr_type */
2853 0,
2854 BFD_RELOC_AARCH64_MOVW_G0_NC,
2855 0,
2856 0,
2857 0},
2858
2859 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2860 {"abs_g1", 0,
2861 0, /* adr_type */
2862 0,
2863 BFD_RELOC_AARCH64_MOVW_G1,
2864 0,
2865 0,
2866 0},
2867
2868 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2869 {"abs_g1_s", 0,
2870 0, /* adr_type */
2871 0,
2872 BFD_RELOC_AARCH64_MOVW_G1_S,
2873 0,
2874 0,
2875 0},
2876
2877 /* Less significant bits 16-31 of address/value: MOVK, no check */
2878 {"abs_g1_nc", 0,
2879 0, /* adr_type */
2880 0,
2881 BFD_RELOC_AARCH64_MOVW_G1_NC,
2882 0,
2883 0,
2884 0},
2885
2886 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2887 {"abs_g2", 0,
2888 0, /* adr_type */
2889 0,
2890 BFD_RELOC_AARCH64_MOVW_G2,
2891 0,
2892 0,
2893 0},
2894
2895 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2896 {"abs_g2_s", 0,
2897 0, /* adr_type */
2898 0,
2899 BFD_RELOC_AARCH64_MOVW_G2_S,
2900 0,
2901 0,
2902 0},
2903
2904 /* Less significant bits 32-47 of address/value: MOVK, no check */
2905 {"abs_g2_nc", 0,
2906 0, /* adr_type */
2907 0,
2908 BFD_RELOC_AARCH64_MOVW_G2_NC,
2909 0,
2910 0,
2911 0},
2912
2913 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2914 {"abs_g3", 0,
2915 0, /* adr_type */
2916 0,
2917 BFD_RELOC_AARCH64_MOVW_G3,
2918 0,
2919 0,
2920 0},
2921
2922 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2923 {"prel_g0", 1,
2924 0, /* adr_type */
2925 0,
2926 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2927 0,
2928 0,
2929 0},
2930
2931 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2932 {"prel_g0_nc", 1,
2933 0, /* adr_type */
2934 0,
2935 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2936 0,
2937 0,
2938 0},
2939
2940 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2941 {"prel_g1", 1,
2942 0, /* adr_type */
2943 0,
2944 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2945 0,
2946 0,
2947 0},
2948
2949 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2950 {"prel_g1_nc", 1,
2951 0, /* adr_type */
2952 0,
2953 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2954 0,
2955 0,
2956 0},
2957
2958 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2959 {"prel_g2", 1,
2960 0, /* adr_type */
2961 0,
2962 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2963 0,
2964 0,
2965 0},
2966
2967 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2968 {"prel_g2_nc", 1,
2969 0, /* adr_type */
2970 0,
2971 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2972 0,
2973 0,
2974 0},
2975
2976 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2977 {"prel_g3", 1,
2978 0, /* adr_type */
2979 0,
2980 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2981 0,
2982 0,
2983 0},
2984
2985 /* Get to the page containing GOT entry for a symbol. */
2986 {"got", 1,
2987 0, /* adr_type */
2988 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2989 0,
2990 0,
2991 0,
2992 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2993
2994 /* 12 bit offset into the page containing GOT entry for that symbol. */
2995 {"got_lo12", 0,
2996 0, /* adr_type */
2997 0,
2998 0,
2999 0,
3000 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
3001 0},
3002
3003 /* 0-15 bits of address/value: MOVk, no check. */
3004 {"gotoff_g0_nc", 0,
3005 0, /* adr_type */
3006 0,
3007 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
3008 0,
3009 0,
3010 0},
3011
3012 /* Most significant bits 16-31 of address/value: MOVZ. */
3013 {"gotoff_g1", 0,
3014 0, /* adr_type */
3015 0,
3016 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
3017 0,
3018 0,
3019 0},
3020
3021 /* 15 bit offset into the page containing GOT entry for that symbol. */
3022 {"gotoff_lo15", 0,
3023 0, /* adr_type */
3024 0,
3025 0,
3026 0,
3027 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
3028 0},
3029
3030 /* Get to the page containing GOT TLS entry for a symbol */
3031 {"gottprel_g0_nc", 0,
3032 0, /* adr_type */
3033 0,
3034 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
3035 0,
3036 0,
3037 0},
3038
3039 /* Get to the page containing GOT TLS entry for a symbol */
3040 {"gottprel_g1", 0,
3041 0, /* adr_type */
3042 0,
3043 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
3044 0,
3045 0,
3046 0},
3047
3048 /* Get to the page containing GOT TLS entry for a symbol */
3049 {"tlsgd", 0,
3050 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
3051 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
3052 0,
3053 0,
3054 0,
3055 0},
3056
3057 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3058 {"tlsgd_lo12", 0,
3059 0, /* adr_type */
3060 0,
3061 0,
3062 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
3063 0,
3064 0},
3065
3066 /* Lower 16 bits address/value: MOVk. */
3067 {"tlsgd_g0_nc", 0,
3068 0, /* adr_type */
3069 0,
3070 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
3071 0,
3072 0,
3073 0},
3074
3075 /* Most significant bits 16-31 of address/value: MOVZ. */
3076 {"tlsgd_g1", 0,
3077 0, /* adr_type */
3078 0,
3079 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
3080 0,
3081 0,
3082 0},
3083
3084 /* Get to the page containing GOT TLS entry for a symbol */
3085 {"tlsdesc", 0,
3086 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
3087 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
3088 0,
3089 0,
3090 0,
3091 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
3092
3093 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3094 {"tlsdesc_lo12", 0,
3095 0, /* adr_type */
3096 0,
3097 0,
3098 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
3099 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
3100 0},
3101
3102 /* Get to the page containing GOT TLS entry for a symbol.
3103 The same as GD, we allocate two consecutive GOT slots
3104 for module index and module offset, the only difference
3105 with GD is the module offset should be initialized to
3106 zero without any outstanding runtime relocation. */
3107 {"tlsldm", 0,
3108 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
3109 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
3110 0,
3111 0,
3112 0,
3113 0},
3114
3115 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3116 {"tlsldm_lo12_nc", 0,
3117 0, /* adr_type */
3118 0,
3119 0,
3120 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
3121 0,
3122 0},
3123
3124 /* 12 bit offset into the module TLS base address. */
3125 {"dtprel_lo12", 0,
3126 0, /* adr_type */
3127 0,
3128 0,
3129 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
3130 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
3131 0},
3132
3133 /* Same as dtprel_lo12, no overflow check. */
3134 {"dtprel_lo12_nc", 0,
3135 0, /* adr_type */
3136 0,
3137 0,
3138 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
3139 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
3140 0},
3141
3142 /* bits[23:12] of offset to the module TLS base address. */
3143 {"dtprel_hi12", 0,
3144 0, /* adr_type */
3145 0,
3146 0,
3147 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
3148 0,
3149 0},
3150
3151 /* bits[15:0] of offset to the module TLS base address. */
3152 {"dtprel_g0", 0,
3153 0, /* adr_type */
3154 0,
3155 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
3156 0,
3157 0,
3158 0},
3159
3160 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
3161 {"dtprel_g0_nc", 0,
3162 0, /* adr_type */
3163 0,
3164 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
3165 0,
3166 0,
3167 0},
3168
3169 /* bits[31:16] of offset to the module TLS base address. */
3170 {"dtprel_g1", 0,
3171 0, /* adr_type */
3172 0,
3173 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
3174 0,
3175 0,
3176 0},
3177
3178 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
3179 {"dtprel_g1_nc", 0,
3180 0, /* adr_type */
3181 0,
3182 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
3183 0,
3184 0,
3185 0},
3186
3187 /* bits[47:32] of offset to the module TLS base address. */
3188 {"dtprel_g2", 0,
3189 0, /* adr_type */
3190 0,
3191 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
3192 0,
3193 0,
3194 0},
3195
3196 /* Lower 16 bit offset into GOT entry for a symbol */
3197 {"tlsdesc_off_g0_nc", 0,
3198 0, /* adr_type */
3199 0,
3200 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
3201 0,
3202 0,
3203 0},
3204
3205 /* Higher 16 bit offset into GOT entry for a symbol */
3206 {"tlsdesc_off_g1", 0,
3207 0, /* adr_type */
3208 0,
3209 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
3210 0,
3211 0,
3212 0},
3213
3214 /* Get to the page containing GOT TLS entry for a symbol */
3215 {"gottprel", 0,
3216 0, /* adr_type */
3217 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
3218 0,
3219 0,
3220 0,
3221 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
3222
3223 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3224 {"gottprel_lo12", 0,
3225 0, /* adr_type */
3226 0,
3227 0,
3228 0,
3229 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
3230 0},
3231
3232 /* Get tp offset for a symbol. */
3233 {"tprel", 0,
3234 0, /* adr_type */
3235 0,
3236 0,
3237 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3238 0,
3239 0},
3240
3241 /* Get tp offset for a symbol. */
3242 {"tprel_lo12", 0,
3243 0, /* adr_type */
3244 0,
3245 0,
3246 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3247 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3248 0},
3249
3250 /* Get tp offset for a symbol. */
3251 {"tprel_hi12", 0,
3252 0, /* adr_type */
3253 0,
3254 0,
3255 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3256 0,
3257 0},
3258
3259 /* Get tp offset for a symbol. */
3260 {"tprel_lo12_nc", 0,
3261 0, /* adr_type */
3262 0,
3263 0,
3264 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3265 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3266 0},
3267
3268 /* Most significant bits 32-47 of address/value: MOVZ. */
3269 {"tprel_g2", 0,
3270 0, /* adr_type */
3271 0,
3272 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3273 0,
3274 0,
3275 0},
3276
3277 /* Most significant bits 16-31 of address/value: MOVZ. */
3278 {"tprel_g1", 0,
3279 0, /* adr_type */
3280 0,
3281 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3282 0,
3283 0,
3284 0},
3285
3286 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3287 {"tprel_g1_nc", 0,
3288 0, /* adr_type */
3289 0,
3290 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3291 0,
3292 0,
3293 0},
3294
3295 /* Most significant bits 0-15 of address/value: MOVZ. */
3296 {"tprel_g0", 0,
3297 0, /* adr_type */
3298 0,
3299 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3300 0,
3301 0,
3302 0},
3303
3304 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3305 {"tprel_g0_nc", 0,
3306 0, /* adr_type */
3307 0,
3308 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3309 0,
3310 0,
3311 0},
3312
3313 /* 15bit offset from got entry to base address of GOT table. */
3314 {"gotpage_lo15", 0,
3315 0,
3316 0,
3317 0,
3318 0,
3319 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3320 0},
3321
3322 /* 14bit offset from got entry to base address of GOT table. */
3323 {"gotpage_lo14", 0,
3324 0,
3325 0,
3326 0,
3327 0,
3328 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3329 0},
3330 };
3331
3332 /* Given the address of a pointer pointing to the textual name of a
3333 relocation as may appear in assembler source, attempt to find its
3334 details in reloc_table. The pointer will be updated to the character
3335 after the trailing colon. On failure, NULL will be returned;
3336 otherwise return the reloc_table_entry. */
3337
3338 static struct reloc_table_entry *
3339 find_reloc_table_entry (char **str)
3340 {
3341 unsigned int i;
3342 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3343 {
3344 int length = strlen (reloc_table[i].name);
3345
3346 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3347 && (*str)[length] == ':')
3348 {
3349 *str += (length + 1);
3350 return &reloc_table[i];
3351 }
3352 }
3353
3354 return NULL;
3355 }
3356
3357 /* Returns 0 if the relocation should never be forced,
3358 1 if the relocation must be forced, and -1 if either
3359 result is OK. */
3360
3361 static signed int
3362 aarch64_force_reloc (unsigned int type)
3363 {
3364 switch (type)
3365 {
3366 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3367 /* Perform these "immediate" internal relocations
3368 even if the symbol is extern or weak. */
3369 return 0;
3370
3371 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3372 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3373 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3374 /* Pseudo relocs that need to be fixed up according to
3375 ilp32_p. */
3376 return 1;
3377
3378 case BFD_RELOC_AARCH64_ADD_LO12:
3379 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3380 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3381 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3382 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3383 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3384 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3385 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3386 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3387 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3388 case BFD_RELOC_AARCH64_LDST128_LO12:
3389 case BFD_RELOC_AARCH64_LDST16_LO12:
3390 case BFD_RELOC_AARCH64_LDST32_LO12:
3391 case BFD_RELOC_AARCH64_LDST64_LO12:
3392 case BFD_RELOC_AARCH64_LDST8_LO12:
3393 case BFD_RELOC_AARCH64_LDST_LO12:
3394 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3395 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3396 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3397 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3398 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3399 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3400 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3401 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3402 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3403 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3404 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3405 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3406 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3407 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3408 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3409 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3410 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3411 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3412 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3413 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3414 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3415 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3416 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3417 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3418 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3419 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3420 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3421 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3422 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3423 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3424 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3425 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3426 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3427 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3428 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3429 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3430 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3431 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3432 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3433 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3434 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3435 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3436 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3437 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3438 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3439 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3440 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3441 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3442 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3443 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3444 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3445 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3446 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3447 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3448 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3449 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3450 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3451 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3452 /* Always leave these relocations for the linker. */
3453 return 1;
3454
3455 default:
3456 return -1;
3457 }
3458 }
3459
3460 int
3461 aarch64_force_relocation (struct fix *fixp)
3462 {
3463 int res = aarch64_force_reloc (fixp->fx_r_type);
3464
3465 if (res == -1)
3466 return generic_force_reloc (fixp);
3467 return res;
3468 }
3469
3470 /* Mode argument to parse_shift and parser_shifter_operand. */
3471 enum parse_shift_mode
3472 {
3473 SHIFTED_NONE, /* no shifter allowed */
3474 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3475 "#imm{,lsl #n}" */
3476 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3477 "#imm" */
3478 SHIFTED_LSL, /* bare "lsl #n" */
3479 SHIFTED_MUL, /* bare "mul #n" */
3480 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3481 SHIFTED_MUL_VL, /* "mul vl" */
3482 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3483 };
3484
3485 /* Parse a <shift> operator on an AArch64 data processing instruction.
3486 Return TRUE on success; otherwise return FALSE. */
3487 static bool
3488 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3489 {
3490 const struct aarch64_name_value_pair *shift_op;
3491 enum aarch64_modifier_kind kind;
3492 expressionS exp;
3493 int exp_has_prefix;
3494 char *s = *str;
3495 char *p = s;
3496
3497 for (p = *str; ISALPHA (*p); p++)
3498 ;
3499
3500 if (p == *str)
3501 {
3502 set_syntax_error (_("shift expression expected"));
3503 return false;
3504 }
3505
3506 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3507
3508 if (shift_op == NULL)
3509 {
3510 set_syntax_error (_("shift operator expected"));
3511 return false;
3512 }
3513
3514 kind = aarch64_get_operand_modifier (shift_op);
3515
3516 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3517 {
3518 set_syntax_error (_("invalid use of 'MSL'"));
3519 return false;
3520 }
3521
3522 if (kind == AARCH64_MOD_MUL
3523 && mode != SHIFTED_MUL
3524 && mode != SHIFTED_MUL_VL)
3525 {
3526 set_syntax_error (_("invalid use of 'MUL'"));
3527 return false;
3528 }
3529
3530 switch (mode)
3531 {
3532 case SHIFTED_LOGIC_IMM:
3533 if (aarch64_extend_operator_p (kind))
3534 {
3535 set_syntax_error (_("extending shift is not permitted"));
3536 return false;
3537 }
3538 break;
3539
3540 case SHIFTED_ARITH_IMM:
3541 if (kind == AARCH64_MOD_ROR)
3542 {
3543 set_syntax_error (_("'ROR' shift is not permitted"));
3544 return false;
3545 }
3546 break;
3547
3548 case SHIFTED_LSL:
3549 if (kind != AARCH64_MOD_LSL)
3550 {
3551 set_syntax_error (_("only 'LSL' shift is permitted"));
3552 return false;
3553 }
3554 break;
3555
3556 case SHIFTED_MUL:
3557 if (kind != AARCH64_MOD_MUL)
3558 {
3559 set_syntax_error (_("only 'MUL' is permitted"));
3560 return false;
3561 }
3562 break;
3563
3564 case SHIFTED_MUL_VL:
3565 /* "MUL VL" consists of two separate tokens. Require the first
3566 token to be "MUL" and look for a following "VL". */
3567 if (kind == AARCH64_MOD_MUL)
3568 {
3569 skip_whitespace (p);
3570 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3571 {
3572 p += 2;
3573 kind = AARCH64_MOD_MUL_VL;
3574 break;
3575 }
3576 }
3577 set_syntax_error (_("only 'MUL VL' is permitted"));
3578 return false;
3579
3580 case SHIFTED_REG_OFFSET:
3581 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3582 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3583 {
3584 set_fatal_syntax_error
3585 (_("invalid shift for the register offset addressing mode"));
3586 return false;
3587 }
3588 break;
3589
3590 case SHIFTED_LSL_MSL:
3591 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3592 {
3593 set_syntax_error (_("invalid shift operator"));
3594 return false;
3595 }
3596 break;
3597
3598 default:
3599 abort ();
3600 }
3601
3602 /* Whitespace can appear here if the next thing is a bare digit. */
3603 skip_whitespace (p);
3604
3605 /* Parse shift amount. */
3606 exp_has_prefix = 0;
3607 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3608 exp.X_op = O_absent;
3609 else
3610 {
3611 if (is_immediate_prefix (*p))
3612 {
3613 p++;
3614 exp_has_prefix = 1;
3615 }
3616 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3617 }
3618 if (kind == AARCH64_MOD_MUL_VL)
3619 /* For consistency, give MUL VL the same shift amount as an implicit
3620 MUL #1. */
3621 operand->shifter.amount = 1;
3622 else if (exp.X_op == O_absent)
3623 {
3624 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3625 {
3626 set_syntax_error (_("missing shift amount"));
3627 return false;
3628 }
3629 operand->shifter.amount = 0;
3630 }
3631 else if (exp.X_op != O_constant)
3632 {
3633 set_syntax_error (_("constant shift amount required"));
3634 return false;
3635 }
3636 /* For parsing purposes, MUL #n has no inherent range. The range
3637 depends on the operand and will be checked by operand-specific
3638 routines. */
3639 else if (kind != AARCH64_MOD_MUL
3640 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3641 {
3642 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3643 return false;
3644 }
3645 else
3646 {
3647 operand->shifter.amount = exp.X_add_number;
3648 operand->shifter.amount_present = 1;
3649 }
3650
3651 operand->shifter.operator_present = 1;
3652 operand->shifter.kind = kind;
3653
3654 *str = p;
3655 return true;
3656 }
3657
3658 /* Parse a <shifter_operand> for a data processing instruction:
3659
3660 #<immediate>
3661 #<immediate>, LSL #imm
3662
3663 Validation of immediate operands is deferred to md_apply_fix.
3664
3665 Return TRUE on success; otherwise return FALSE. */
3666
3667 static bool
3668 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3669 enum parse_shift_mode mode)
3670 {
3671 char *p;
3672
3673 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3674 return false;
3675
3676 p = *str;
3677
3678 /* Accept an immediate expression. */
3679 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3680 REJECT_ABSENT))
3681 return false;
3682
3683 /* Accept optional LSL for arithmetic immediate values. */
3684 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3685 if (! parse_shift (&p, operand, SHIFTED_LSL))
3686 return false;
3687
3688 /* Not accept any shifter for logical immediate values. */
3689 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3690 && parse_shift (&p, operand, mode))
3691 {
3692 set_syntax_error (_("unexpected shift operator"));
3693 return false;
3694 }
3695
3696 *str = p;
3697 return true;
3698 }
3699
3700 /* Parse a <shifter_operand> for a data processing instruction:
3701
3702 <Rm>
3703 <Rm>, <shift>
3704 #<immediate>
3705 #<immediate>, LSL #imm
3706
3707 where <shift> is handled by parse_shift above, and the last two
3708 cases are handled by the function above.
3709
3710 Validation of immediate operands is deferred to md_apply_fix.
3711
3712 Return TRUE on success; otherwise return FALSE. */
3713
3714 static bool
3715 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3716 enum parse_shift_mode mode)
3717 {
3718 const reg_entry *reg;
3719 aarch64_opnd_qualifier_t qualifier;
3720 enum aarch64_operand_class opd_class
3721 = aarch64_get_operand_class (operand->type);
3722
3723 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3724 if (reg)
3725 {
3726 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3727 {
3728 set_syntax_error (_("unexpected register in the immediate operand"));
3729 return false;
3730 }
3731
3732 if (!aarch64_check_reg_type (reg, REG_TYPE_R_ZR))
3733 {
3734 set_expected_reg_error (REG_TYPE_R_ZR, reg, 0);
3735 return false;
3736 }
3737
3738 operand->reg.regno = reg->number;
3739 operand->qualifier = qualifier;
3740
3741 /* Accept optional shift operation on register. */
3742 if (! skip_past_comma (str))
3743 return true;
3744
3745 if (! parse_shift (str, operand, mode))
3746 return false;
3747
3748 return true;
3749 }
3750 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3751 {
3752 set_syntax_error
3753 (_("integer register expected in the extended/shifted operand "
3754 "register"));
3755 return false;
3756 }
3757
3758 /* We have a shifted immediate variable. */
3759 return parse_shifter_operand_imm (str, operand, mode);
3760 }
3761
3762 /* Return TRUE on success; return FALSE otherwise. */
3763
3764 static bool
3765 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3766 enum parse_shift_mode mode)
3767 {
3768 char *p = *str;
3769
3770 /* Determine if we have the sequence of characters #: or just :
3771 coming next. If we do, then we check for a :rello: relocation
3772 modifier. If we don't, punt the whole lot to
3773 parse_shifter_operand. */
3774
3775 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3776 {
3777 struct reloc_table_entry *entry;
3778
3779 if (p[0] == '#')
3780 p += 2;
3781 else
3782 p++;
3783 *str = p;
3784
3785 /* Try to parse a relocation. Anything else is an error. */
3786 if (!(entry = find_reloc_table_entry (str)))
3787 {
3788 set_syntax_error (_("unknown relocation modifier"));
3789 return false;
3790 }
3791
3792 if (entry->add_type == 0)
3793 {
3794 set_syntax_error
3795 (_("this relocation modifier is not allowed on this instruction"));
3796 return false;
3797 }
3798
3799 /* Save str before we decompose it. */
3800 p = *str;
3801
3802 /* Next, we parse the expression. */
3803 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3804 REJECT_ABSENT))
3805 return false;
3806
3807 /* Record the relocation type (use the ADD variant here). */
3808 inst.reloc.type = entry->add_type;
3809 inst.reloc.pc_rel = entry->pc_rel;
3810
3811 /* If str is empty, we've reached the end, stop here. */
3812 if (**str == '\0')
3813 return true;
3814
3815 /* Otherwise, we have a shifted reloc modifier, so rewind to
3816 recover the variable name and continue parsing for the shifter. */
3817 *str = p;
3818 return parse_shifter_operand_imm (str, operand, mode);
3819 }
3820
3821 return parse_shifter_operand (str, operand, mode);
3822 }
3823
3824 /* Parse all forms of an address expression. Information is written
3825 to *OPERAND and/or inst.reloc.
3826
3827 The A64 instruction set has the following addressing modes:
3828
3829 Offset
3830 [base] // in SIMD ld/st structure
3831 [base{,#0}] // in ld/st exclusive
3832 [base{,#imm}]
3833 [base,Xm{,LSL #imm}]
3834 [base,Xm,SXTX {#imm}]
3835 [base,Wm,(S|U)XTW {#imm}]
3836 Pre-indexed
3837 [base]! // in ldraa/ldrab exclusive
3838 [base,#imm]!
3839 Post-indexed
3840 [base],#imm
3841 [base],Xm // in SIMD ld/st structure
3842 PC-relative (literal)
3843 label
3844 SVE:
3845 [base,#imm,MUL VL]
3846 [base,Zm.D{,LSL #imm}]
3847 [base,Zm.S,(S|U)XTW {#imm}]
3848 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3849 [Zn.S,#imm]
3850 [Zn.D,#imm]
3851 [Zn.S{, Xm}]
3852 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3853 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3854 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3855
3856 (As a convenience, the notation "=immediate" is permitted in conjunction
3857 with the pc-relative literal load instructions to automatically place an
3858 immediate value or symbolic address in a nearby literal pool and generate
3859 a hidden label which references it.)
3860
3861 Upon a successful parsing, the address structure in *OPERAND will be
3862 filled in the following way:
3863
3864 .base_regno = <base>
3865 .offset.is_reg // 1 if the offset is a register
3866 .offset.imm = <imm>
3867 .offset.regno = <Rm>
3868
3869 For different addressing modes defined in the A64 ISA:
3870
3871 Offset
3872 .pcrel=0; .preind=1; .postind=0; .writeback=0
3873 Pre-indexed
3874 .pcrel=0; .preind=1; .postind=0; .writeback=1
3875 Post-indexed
3876 .pcrel=0; .preind=0; .postind=1; .writeback=1
3877 PC-relative (literal)
3878 .pcrel=1; .preind=1; .postind=0; .writeback=0
3879
3880 The shift/extension information, if any, will be stored in .shifter.
3881 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3882 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3883 corresponding register.
3884
3885 BASE_TYPE says which types of base register should be accepted and
3886 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3887 is the type of shifter that is allowed for immediate offsets,
3888 or SHIFTED_NONE if none.
3889
3890 In all other respects, it is the caller's responsibility to check
3891 for addressing modes not supported by the instruction, and to set
3892 inst.reloc.type. */
3893
3894 static bool
3895 parse_address_main (char **str, aarch64_opnd_info *operand,
3896 aarch64_opnd_qualifier_t *base_qualifier,
3897 aarch64_opnd_qualifier_t *offset_qualifier,
3898 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3899 enum parse_shift_mode imm_shift_mode)
3900 {
3901 char *p = *str;
3902 const reg_entry *reg;
3903 expressionS *exp = &inst.reloc.exp;
3904
3905 *base_qualifier = AARCH64_OPND_QLF_NIL;
3906 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3907 if (! skip_past_char (&p, '['))
3908 {
3909 /* =immediate or label. */
3910 operand->addr.pcrel = 1;
3911 operand->addr.preind = 1;
3912
3913 /* #:<reloc_op>:<symbol> */
3914 skip_past_char (&p, '#');
3915 if (skip_past_char (&p, ':'))
3916 {
3917 bfd_reloc_code_real_type ty;
3918 struct reloc_table_entry *entry;
3919
3920 /* Try to parse a relocation modifier. Anything else is
3921 an error. */
3922 entry = find_reloc_table_entry (&p);
3923 if (! entry)
3924 {
3925 set_syntax_error (_("unknown relocation modifier"));
3926 return false;
3927 }
3928
3929 switch (operand->type)
3930 {
3931 case AARCH64_OPND_ADDR_PCREL21:
3932 /* adr */
3933 ty = entry->adr_type;
3934 break;
3935
3936 default:
3937 ty = entry->ld_literal_type;
3938 break;
3939 }
3940
3941 if (ty == 0)
3942 {
3943 set_syntax_error
3944 (_("this relocation modifier is not allowed on this "
3945 "instruction"));
3946 return false;
3947 }
3948
3949 /* #:<reloc_op>: */
3950 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3951 {
3952 set_syntax_error (_("invalid relocation expression"));
3953 return false;
3954 }
3955 /* #:<reloc_op>:<expr> */
3956 /* Record the relocation type. */
3957 inst.reloc.type = ty;
3958 inst.reloc.pc_rel = entry->pc_rel;
3959 }
3960 else
3961 {
3962 if (skip_past_char (&p, '='))
3963 /* =immediate; need to generate the literal in the literal pool. */
3964 inst.gen_lit_pool = 1;
3965
3966 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3967 {
3968 set_syntax_error (_("invalid address"));
3969 return false;
3970 }
3971 }
3972
3973 *str = p;
3974 return true;
3975 }
3976
3977 /* [ */
3978
3979 bool alpha_base_p = ISALPHA (*p);
3980 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3981 if (!reg || !aarch64_check_reg_type (reg, base_type))
3982 {
3983 if (reg
3984 && aarch64_check_reg_type (reg, REG_TYPE_R_SP)
3985 && *base_qualifier == AARCH64_OPND_QLF_W)
3986 set_syntax_error (_("expected a 64-bit base register"));
3987 else if (alpha_base_p)
3988 set_syntax_error (_("invalid base register"));
3989 else
3990 set_syntax_error (_("expected a base register"));
3991 return false;
3992 }
3993 operand->addr.base_regno = reg->number;
3994
3995 /* [Xn */
3996 if (skip_past_comma (&p))
3997 {
3998 /* [Xn, */
3999 operand->addr.preind = 1;
4000
4001 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
4002 if (reg)
4003 {
4004 if (!aarch64_check_reg_type (reg, offset_type))
4005 {
4006 set_syntax_error (_("invalid offset register"));
4007 return false;
4008 }
4009
4010 /* [Xn,Rm */
4011 operand->addr.offset.regno = reg->number;
4012 operand->addr.offset.is_reg = 1;
4013 /* Shifted index. */
4014 if (skip_past_comma (&p))
4015 {
4016 /* [Xn,Rm, */
4017 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
4018 /* Use the diagnostics set in parse_shift, so not set new
4019 error message here. */
4020 return false;
4021 }
4022 /* We only accept:
4023 [base,Xm] # For vector plus scalar SVE2 indexing.
4024 [base,Xm{,LSL #imm}]
4025 [base,Xm,SXTX {#imm}]
4026 [base,Wm,(S|U)XTW {#imm}] */
4027 if (operand->shifter.kind == AARCH64_MOD_NONE
4028 || operand->shifter.kind == AARCH64_MOD_LSL
4029 || operand->shifter.kind == AARCH64_MOD_SXTX)
4030 {
4031 if (*offset_qualifier == AARCH64_OPND_QLF_W)
4032 {
4033 set_syntax_error (_("invalid use of 32-bit register offset"));
4034 return false;
4035 }
4036 if (aarch64_get_qualifier_esize (*base_qualifier)
4037 != aarch64_get_qualifier_esize (*offset_qualifier)
4038 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
4039 || *base_qualifier != AARCH64_OPND_QLF_S_S
4040 || *offset_qualifier != AARCH64_OPND_QLF_X))
4041 {
4042 set_syntax_error (_("offset has different size from base"));
4043 return false;
4044 }
4045 }
4046 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
4047 {
4048 set_syntax_error (_("invalid use of 64-bit register offset"));
4049 return false;
4050 }
4051 }
4052 else
4053 {
4054 /* [Xn,#:<reloc_op>:<symbol> */
4055 skip_past_char (&p, '#');
4056 if (skip_past_char (&p, ':'))
4057 {
4058 struct reloc_table_entry *entry;
4059
4060 /* Try to parse a relocation modifier. Anything else is
4061 an error. */
4062 if (!(entry = find_reloc_table_entry (&p)))
4063 {
4064 set_syntax_error (_("unknown relocation modifier"));
4065 return false;
4066 }
4067
4068 if (entry->ldst_type == 0)
4069 {
4070 set_syntax_error
4071 (_("this relocation modifier is not allowed on this "
4072 "instruction"));
4073 return false;
4074 }
4075
4076 /* [Xn,#:<reloc_op>: */
4077 /* We now have the group relocation table entry corresponding to
4078 the name in the assembler source. Next, we parse the
4079 expression. */
4080 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4081 {
4082 set_syntax_error (_("invalid relocation expression"));
4083 return false;
4084 }
4085
4086 /* [Xn,#:<reloc_op>:<expr> */
4087 /* Record the load/store relocation type. */
4088 inst.reloc.type = entry->ldst_type;
4089 inst.reloc.pc_rel = entry->pc_rel;
4090 }
4091 else
4092 {
4093 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4094 {
4095 set_syntax_error (_("invalid expression in the address"));
4096 return false;
4097 }
4098 /* [Xn,<expr> */
4099 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
4100 /* [Xn,<expr>,<shifter> */
4101 if (! parse_shift (&p, operand, imm_shift_mode))
4102 return false;
4103 }
4104 }
4105 }
4106
4107 if (! skip_past_char (&p, ']'))
4108 {
4109 set_syntax_error (_("']' expected"));
4110 return false;
4111 }
4112
4113 if (skip_past_char (&p, '!'))
4114 {
4115 if (operand->addr.preind && operand->addr.offset.is_reg)
4116 {
4117 set_syntax_error (_("register offset not allowed in pre-indexed "
4118 "addressing mode"));
4119 return false;
4120 }
4121 /* [Xn]! */
4122 operand->addr.writeback = 1;
4123 }
4124 else if (skip_past_comma (&p))
4125 {
4126 /* [Xn], */
4127 operand->addr.postind = 1;
4128 operand->addr.writeback = 1;
4129
4130 if (operand->addr.preind)
4131 {
4132 set_syntax_error (_("cannot combine pre- and post-indexing"));
4133 return false;
4134 }
4135
4136 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
4137 if (reg)
4138 {
4139 /* [Xn],Xm */
4140 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
4141 {
4142 set_syntax_error (_("invalid offset register"));
4143 return false;
4144 }
4145
4146 operand->addr.offset.regno = reg->number;
4147 operand->addr.offset.is_reg = 1;
4148 }
4149 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
4150 {
4151 /* [Xn],#expr */
4152 set_syntax_error (_("invalid expression in the address"));
4153 return false;
4154 }
4155 }
4156
4157 /* If at this point neither .preind nor .postind is set, we have a
4158 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
4159 ldrab, accept [Rn] as a shorthand for [Rn,#0].
4160 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
4161 [Zn.<T>, xzr]. */
4162 if (operand->addr.preind == 0 && operand->addr.postind == 0)
4163 {
4164 if (operand->addr.writeback)
4165 {
4166 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
4167 {
4168 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
4169 operand->addr.offset.is_reg = 0;
4170 operand->addr.offset.imm = 0;
4171 operand->addr.preind = 1;
4172 }
4173 else
4174 {
4175 /* Reject [Rn]! */
4176 set_syntax_error (_("missing offset in the pre-indexed address"));
4177 return false;
4178 }
4179 }
4180 else
4181 {
4182 operand->addr.preind = 1;
4183 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
4184 {
4185 operand->addr.offset.is_reg = 1;
4186 operand->addr.offset.regno = REG_ZR;
4187 *offset_qualifier = AARCH64_OPND_QLF_X;
4188 }
4189 else
4190 {
4191 inst.reloc.exp.X_op = O_constant;
4192 inst.reloc.exp.X_add_number = 0;
4193 }
4194 }
4195 }
4196
4197 *str = p;
4198 return true;
4199 }
4200
4201 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
4202 on success. */
4203 static bool
4204 parse_address (char **str, aarch64_opnd_info *operand)
4205 {
4206 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4207 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4208 REG_TYPE_R64_SP, REG_TYPE_R_ZR, SHIFTED_NONE);
4209 }
4210
4211 /* Parse an address in which SVE vector registers and MUL VL are allowed.
4212 The arguments have the same meaning as for parse_address_main.
4213 Return TRUE on success. */
4214 static bool
4215 parse_sve_address (char **str, aarch64_opnd_info *operand,
4216 aarch64_opnd_qualifier_t *base_qualifier,
4217 aarch64_opnd_qualifier_t *offset_qualifier)
4218 {
4219 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
4220 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
4221 SHIFTED_MUL_VL);
4222 }
4223
4224 /* Parse a register X0-X30. The register must be 64-bit and register 31
4225 is unallocated. */
4226 static bool
4227 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
4228 {
4229 const reg_entry *reg = parse_reg (str);
4230 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
4231 {
4232 set_expected_reg_error (REG_TYPE_R_64, reg, 0);
4233 return false;
4234 }
4235 operand->reg.regno = reg->number;
4236 operand->qualifier = AARCH64_OPND_QLF_X;
4237 return true;
4238 }
4239
4240 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
4241 Return TRUE on success; otherwise return FALSE. */
4242 static bool
4243 parse_half (char **str, int *internal_fixup_p)
4244 {
4245 char *p = *str;
4246
4247 skip_past_char (&p, '#');
4248
4249 gas_assert (internal_fixup_p);
4250 *internal_fixup_p = 0;
4251
4252 if (*p == ':')
4253 {
4254 struct reloc_table_entry *entry;
4255
4256 /* Try to parse a relocation. Anything else is an error. */
4257 ++p;
4258
4259 if (!(entry = find_reloc_table_entry (&p)))
4260 {
4261 set_syntax_error (_("unknown relocation modifier"));
4262 return false;
4263 }
4264
4265 if (entry->movw_type == 0)
4266 {
4267 set_syntax_error
4268 (_("this relocation modifier is not allowed on this instruction"));
4269 return false;
4270 }
4271
4272 inst.reloc.type = entry->movw_type;
4273 }
4274 else
4275 *internal_fixup_p = 1;
4276
4277 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4278 return false;
4279
4280 *str = p;
4281 return true;
4282 }
4283
4284 /* Parse an operand for an ADRP instruction:
4285 ADRP <Xd>, <label>
4286 Return TRUE on success; otherwise return FALSE. */
4287
4288 static bool
4289 parse_adrp (char **str)
4290 {
4291 char *p;
4292
4293 p = *str;
4294 if (*p == ':')
4295 {
4296 struct reloc_table_entry *entry;
4297
4298 /* Try to parse a relocation. Anything else is an error. */
4299 ++p;
4300 if (!(entry = find_reloc_table_entry (&p)))
4301 {
4302 set_syntax_error (_("unknown relocation modifier"));
4303 return false;
4304 }
4305
4306 if (entry->adrp_type == 0)
4307 {
4308 set_syntax_error
4309 (_("this relocation modifier is not allowed on this instruction"));
4310 return false;
4311 }
4312
4313 inst.reloc.type = entry->adrp_type;
4314 }
4315 else
4316 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4317
4318 inst.reloc.pc_rel = 1;
4319 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4320 return false;
4321 *str = p;
4322 return true;
4323 }
4324
4325 /* Miscellaneous. */
4326
4327 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4328 of SIZE tokens in which index I gives the token for field value I,
4329 or is null if field value I is invalid. If the symbolic operand
4330 can also be given as a 0-based integer, REG_TYPE says which register
4331 names should be treated as registers rather than as symbolic immediates
4332 while parsing that integer. REG_TYPE is REG_TYPE_MAX otherwise.
4333
4334 Return true on success, moving *STR past the operand and storing the
4335 field value in *VAL. */
4336
4337 static int
4338 parse_enum_string (char **str, int64_t *val, const char *const *array,
4339 size_t size, aarch64_reg_type reg_type)
4340 {
4341 expressionS exp;
4342 char *p, *q;
4343 size_t i;
4344
4345 /* Match C-like tokens. */
4346 p = q = *str;
4347 while (ISALNUM (*q))
4348 q++;
4349
4350 for (i = 0; i < size; ++i)
4351 if (array[i]
4352 && strncasecmp (array[i], p, q - p) == 0
4353 && array[i][q - p] == 0)
4354 {
4355 *val = i;
4356 *str = q;
4357 return true;
4358 }
4359
4360 if (reg_type == REG_TYPE_MAX)
4361 return false;
4362
4363 if (!parse_immediate_expression (&p, &exp, reg_type))
4364 return false;
4365
4366 if (exp.X_op == O_constant
4367 && (uint64_t) exp.X_add_number < size)
4368 {
4369 *val = exp.X_add_number;
4370 *str = p;
4371 return true;
4372 }
4373
4374 /* Use the default error for this operand. */
4375 return false;
4376 }
4377
4378 /* Parse an option for a preload instruction. Returns the encoding for the
4379 option, or PARSE_FAIL. */
4380
4381 static int
4382 parse_pldop (char **str)
4383 {
4384 char *p, *q;
4385 const struct aarch64_name_value_pair *o;
4386
4387 p = q = *str;
4388 while (ISALNUM (*q))
4389 q++;
4390
4391 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4392 if (!o)
4393 return PARSE_FAIL;
4394
4395 *str = q;
4396 return o->value;
4397 }
4398
4399 /* Parse an option for a barrier instruction. Returns the encoding for the
4400 option, or PARSE_FAIL. */
4401
4402 static int
4403 parse_barrier (char **str)
4404 {
4405 char *p, *q;
4406 const struct aarch64_name_value_pair *o;
4407
4408 p = q = *str;
4409 while (ISALPHA (*q))
4410 q++;
4411
4412 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4413 if (!o)
4414 return PARSE_FAIL;
4415
4416 *str = q;
4417 return o->value;
4418 }
4419
4420 /* Parse an option for barrier, bti and guarded control stack data
4421 synchronization instructions. Return true on matching the target
4422 options else return false. */
4423
4424 static bool
4425 parse_hint_opt (const char *name, char **str,
4426 const struct aarch64_name_value_pair ** hint_opt)
4427 {
4428 char *p, *q;
4429 const struct aarch64_name_value_pair *o;
4430
4431 p = q = *str;
4432 while (ISALPHA (*q))
4433 q++;
4434
4435 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4436 if (!o)
4437 return false;
4438
4439 if ((strcmp ("gcsb", name) == 0 && o->value != HINT_OPD_DSYNC)
4440 || ((strcmp ("psb", name) == 0 || strcmp ("tsb", name) == 0)
4441 && o->value != HINT_OPD_CSYNC)
4442 || ((strcmp ("bti", name) == 0)
4443 && (o->value != HINT_OPD_C && o->value != HINT_OPD_J
4444 && o->value != HINT_OPD_JC)))
4445 return false;
4446
4447 *str = q;
4448 *hint_opt = o;
4449 return true;
4450 }
4451
4452 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4453 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4454 on failure. Format:
4455
4456 REG_TYPE.QUALIFIER
4457
4458 Side effect: Update STR with current parse position of success.
4459
4460 FLAGS is as for parse_typed_reg. */
4461
4462 static const reg_entry *
4463 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4464 aarch64_opnd_qualifier_t *qualifier, unsigned int flags)
4465 {
4466 struct vector_type_el vectype;
4467 const reg_entry *reg = parse_typed_reg (str, reg_type, &vectype,
4468 PTR_FULL_REG | flags);
4469 if (!reg)
4470 return NULL;
4471
4472 if (vectype.type == NT_invtype)
4473 *qualifier = AARCH64_OPND_QLF_NIL;
4474 else
4475 {
4476 *qualifier = vectype_to_qualifier (&vectype);
4477 if (*qualifier == AARCH64_OPND_QLF_NIL)
4478 return NULL;
4479 }
4480
4481 return reg;
4482 }
4483
4484 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4485
4486 #<imm>
4487 <imm>
4488
4489 Function return TRUE if immediate was found, or FALSE.
4490 */
4491 static bool
4492 parse_sme_immediate (char **str, int64_t *imm)
4493 {
4494 int64_t val;
4495 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4496 return false;
4497
4498 *imm = val;
4499 return true;
4500 }
4501
4502 /* Parse index with selection register and immediate offset:
4503
4504 [<Wv>, <imm>]
4505 [<Wv>, #<imm>]
4506
4507 Return true on success, populating OPND with the parsed index. */
4508
4509 static bool
4510 parse_sme_za_index (char **str, struct aarch64_indexed_za *opnd)
4511 {
4512 const reg_entry *reg;
4513
4514 if (!skip_past_char (str, '['))
4515 {
4516 set_syntax_error (_("expected '['"));
4517 return false;
4518 }
4519
4520 /* The selection register, encoded in the 2-bit Rv field. */
4521 reg = parse_reg (str);
4522 if (reg == NULL || reg->type != REG_TYPE_R_32)
4523 {
4524 set_syntax_error (_("expected a 32-bit selection register"));
4525 return false;
4526 }
4527 opnd->index.regno = reg->number;
4528
4529 if (!skip_past_char (str, ','))
4530 {
4531 set_syntax_error (_("missing immediate offset"));
4532 return false;
4533 }
4534
4535 if (!parse_sme_immediate (str, &opnd->index.imm))
4536 {
4537 set_syntax_error (_("expected a constant immediate offset"));
4538 return false;
4539 }
4540
4541 if (skip_past_char (str, ':'))
4542 {
4543 int64_t end;
4544 if (!parse_sme_immediate (str, &end))
4545 {
4546 set_syntax_error (_("expected a constant immediate offset"));
4547 return false;
4548 }
4549 if (end < opnd->index.imm)
4550 {
4551 set_syntax_error (_("the last offset is less than the"
4552 " first offset"));
4553 return false;
4554 }
4555 if (end == opnd->index.imm)
4556 {
4557 set_syntax_error (_("the last offset is equal to the"
4558 " first offset"));
4559 return false;
4560 }
4561 opnd->index.countm1 = (uint64_t) end - opnd->index.imm;
4562 }
4563
4564 opnd->group_size = 0;
4565 if (skip_past_char (str, ','))
4566 {
4567 if (strncasecmp (*str, "vgx2", 4) == 0 && !ISALPHA ((*str)[4]))
4568 {
4569 *str += 4;
4570 opnd->group_size = 2;
4571 }
4572 else if (strncasecmp (*str, "vgx4", 4) == 0 && !ISALPHA ((*str)[4]))
4573 {
4574 *str += 4;
4575 opnd->group_size = 4;
4576 }
4577 else
4578 {
4579 set_syntax_error (_("invalid vector group size"));
4580 return false;
4581 }
4582 }
4583
4584 if (!skip_past_char (str, ']'))
4585 {
4586 set_syntax_error (_("expected ']'"));
4587 return false;
4588 }
4589
4590 return true;
4591 }
4592
4593 /* Parse a register of type REG_TYPE that might have an element type
4594 qualifier and that is indexed by two values: a 32-bit register,
4595 followed by an immediate. The ranges of the register and the
4596 immediate vary by opcode and are checked in libopcodes.
4597
4598 Return true on success, populating OPND with information about
4599 the operand and setting QUALIFIER to the register qualifier.
4600
4601 Field format examples:
4602
4603 <Pm>.<T>[<Wv>< #<imm>]
4604 ZA[<Wv>, #<imm>]
4605 <ZAn><HV>.<T>[<Wv>, #<imm>]
4606
4607 FLAGS is as for parse_typed_reg. */
4608
4609 static bool
4610 parse_dual_indexed_reg (char **str, aarch64_reg_type reg_type,
4611 struct aarch64_indexed_za *opnd,
4612 aarch64_opnd_qualifier_t *qualifier,
4613 unsigned int flags)
4614 {
4615 const reg_entry *reg = parse_reg_with_qual (str, reg_type, qualifier, flags);
4616 if (!reg)
4617 return false;
4618
4619 opnd->v = aarch64_check_reg_type (reg, REG_TYPE_ZATV);
4620 opnd->regno = reg->number;
4621
4622 return parse_sme_za_index (str, opnd);
4623 }
4624
4625 /* Like parse_sme_za_hv_tiles_operand, but expect braces around the
4626 operand. */
4627
4628 static bool
4629 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4630 struct aarch64_indexed_za *opnd,
4631 aarch64_opnd_qualifier_t *qualifier)
4632 {
4633 if (!skip_past_char (str, '{'))
4634 {
4635 set_expected_reglist_error (REG_TYPE_ZATHV, parse_reg (str));
4636 return false;
4637 }
4638
4639 if (!parse_dual_indexed_reg (str, REG_TYPE_ZATHV, opnd, qualifier,
4640 PTR_IN_REGLIST))
4641 return false;
4642
4643 if (!skip_past_char (str, '}'))
4644 {
4645 set_syntax_error (_("expected '}'"));
4646 return false;
4647 }
4648
4649 return true;
4650 }
4651
4652 /* Parse list of up to eight 64-bit element tile names separated by commas in
4653 SME's ZERO instruction:
4654
4655 ZERO { <mask> }
4656
4657 Function returns <mask>:
4658
4659 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4660 */
4661 static int
4662 parse_sme_zero_mask(char **str)
4663 {
4664 char *q;
4665 int mask;
4666 aarch64_opnd_qualifier_t qualifier;
4667 unsigned int ptr_flags = PTR_IN_REGLIST;
4668
4669 mask = 0x00;
4670 q = *str;
4671 do
4672 {
4673 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA_ZAT,
4674 &qualifier, ptr_flags);
4675 if (!reg)
4676 return PARSE_FAIL;
4677
4678 if (reg->type == REG_TYPE_ZA)
4679 {
4680 if (qualifier != AARCH64_OPND_QLF_NIL)
4681 {
4682 set_syntax_error ("ZA should not have a size suffix");
4683 return PARSE_FAIL;
4684 }
4685 /* { ZA } is assembled as all-ones immediate. */
4686 mask = 0xff;
4687 }
4688 else
4689 {
4690 int regno = reg->number;
4691 if (qualifier == AARCH64_OPND_QLF_S_B)
4692 {
4693 /* { ZA0.B } is assembled as all-ones immediate. */
4694 mask = 0xff;
4695 }
4696 else if (qualifier == AARCH64_OPND_QLF_S_H)
4697 mask |= 0x55 << regno;
4698 else if (qualifier == AARCH64_OPND_QLF_S_S)
4699 mask |= 0x11 << regno;
4700 else if (qualifier == AARCH64_OPND_QLF_S_D)
4701 mask |= 0x01 << regno;
4702 else if (qualifier == AARCH64_OPND_QLF_S_Q)
4703 {
4704 set_syntax_error (_("ZA tile masks do not operate at .Q"
4705 " granularity"));
4706 return PARSE_FAIL;
4707 }
4708 else if (qualifier == AARCH64_OPND_QLF_NIL)
4709 {
4710 set_syntax_error (_("missing ZA tile size"));
4711 return PARSE_FAIL;
4712 }
4713 else
4714 {
4715 set_syntax_error (_("invalid ZA tile"));
4716 return PARSE_FAIL;
4717 }
4718 }
4719 ptr_flags |= PTR_GOOD_MATCH;
4720 }
4721 while (skip_past_char (&q, ','));
4722
4723 *str = q;
4724 return mask;
4725 }
4726
4727 /* Wraps in curly braces <mask> operand ZERO instruction:
4728
4729 ZERO { <mask> }
4730
4731 Function returns value of <mask> bit-field.
4732 */
4733 static int
4734 parse_sme_list_of_64bit_tiles (char **str)
4735 {
4736 int regno;
4737
4738 if (!skip_past_char (str, '{'))
4739 {
4740 set_syntax_error (_("expected '{'"));
4741 return PARSE_FAIL;
4742 }
4743
4744 /* Empty <mask> list is an all-zeros immediate. */
4745 if (!skip_past_char (str, '}'))
4746 {
4747 regno = parse_sme_zero_mask (str);
4748 if (regno == PARSE_FAIL)
4749 return PARSE_FAIL;
4750
4751 if (!skip_past_char (str, '}'))
4752 {
4753 set_syntax_error (_("expected '}'"));
4754 return PARSE_FAIL;
4755 }
4756 }
4757 else
4758 regno = 0x00;
4759
4760 return regno;
4761 }
4762
4763 /* Parse streaming mode operand for SMSTART and SMSTOP.
4764
4765 {SM | ZA}
4766
4767 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4768 */
4769 static int
4770 parse_sme_sm_za (char **str)
4771 {
4772 char *p, *q;
4773
4774 p = q = *str;
4775 while (ISALPHA (*q))
4776 q++;
4777
4778 if ((q - p != 2)
4779 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4780 {
4781 set_syntax_error (_("expected SM or ZA operand"));
4782 return PARSE_FAIL;
4783 }
4784
4785 *str = q;
4786 return TOLOWER (p[0]);
4787 }
4788
4789 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4790 Returns the encoding for the option, or PARSE_FAIL.
4791
4792 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4793 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4794
4795 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4796 field, otherwise as a system register.
4797 */
4798
4799 static int
4800 parse_sys_reg (char **str, htab_t sys_regs,
4801 int imple_defined_p, int pstatefield_p,
4802 uint32_t* flags)
4803 {
4804 char *p, *q;
4805 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4806 const aarch64_sys_reg *o;
4807 int value;
4808
4809 p = buf;
4810 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4811 if (p < buf + (sizeof (buf) - 1))
4812 *p++ = TOLOWER (*q);
4813 *p = '\0';
4814
4815 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4816 valid system register. This is enforced by construction of the hash
4817 table. */
4818 if (p - buf != q - *str)
4819 return PARSE_FAIL;
4820
4821 o = str_hash_find (sys_regs, buf);
4822 if (!o)
4823 {
4824 if (!imple_defined_p)
4825 return PARSE_FAIL;
4826 else
4827 {
4828 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4829 unsigned int op0, op1, cn, cm, op2;
4830
4831 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4832 != 5)
4833 return PARSE_FAIL;
4834 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4835 return PARSE_FAIL;
4836 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4837 if (flags)
4838 *flags = 0;
4839 }
4840 }
4841 else
4842 {
4843 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4844 as_bad (_("selected processor does not support PSTATE field "
4845 "name '%s'"), buf);
4846 if (!pstatefield_p
4847 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4848 o->value, o->flags,
4849 &o->features))
4850 as_bad (_("selected processor does not support system register "
4851 "name '%s'"), buf);
4852 if (aarch64_sys_reg_deprecated_p (o->flags))
4853 as_warn (_("system register name '%s' is deprecated and may be "
4854 "removed in a future release"), buf);
4855 value = o->value;
4856 if (flags)
4857 *flags = o->flags;
4858 }
4859
4860 *str = q;
4861 return value;
4862 }
4863
4864 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4865 for the option, or NULL. */
4866
4867 static const aarch64_sys_ins_reg *
4868 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4869 {
4870 char *p, *q;
4871 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4872 const aarch64_sys_ins_reg *o;
4873
4874 p = buf;
4875 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4876 if (p < buf + (sizeof (buf) - 1))
4877 *p++ = TOLOWER (*q);
4878 *p = '\0';
4879
4880 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4881 valid system register. This is enforced by construction of the hash
4882 table. */
4883 if (p - buf != q - *str)
4884 return NULL;
4885
4886 o = str_hash_find (sys_ins_regs, buf);
4887 if (!o)
4888 return NULL;
4889
4890 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4891 o->name, o->value, o->flags, 0))
4892 as_bad (_("selected processor does not support system register "
4893 "name '%s'"), buf);
4894 if (aarch64_sys_reg_deprecated_p (o->flags))
4895 as_warn (_("system register name '%s' is deprecated and may be "
4896 "removed in a future release"), buf);
4897
4898 *str = q;
4899 return o;
4900 }
4901 \f
4902 #define po_char_or_fail(chr) do { \
4903 if (! skip_past_char (&str, chr)) \
4904 goto failure; \
4905 } while (0)
4906
4907 #define po_reg_or_fail(regtype) do { \
4908 reg = aarch64_reg_parse (&str, regtype, NULL); \
4909 if (!reg) \
4910 goto failure; \
4911 } while (0)
4912
4913 #define po_int_fp_reg_or_fail(reg_type) do { \
4914 reg = parse_reg (&str); \
4915 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4916 { \
4917 set_expected_reg_error (reg_type, reg, 0); \
4918 goto failure; \
4919 } \
4920 info->reg.regno = reg->number; \
4921 info->qualifier = inherent_reg_qualifier (reg); \
4922 } while (0)
4923
4924 #define po_imm_nc_or_fail() do { \
4925 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4926 goto failure; \
4927 } while (0)
4928
4929 #define po_imm_or_fail(min, max) do { \
4930 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4931 goto failure; \
4932 if (val < min || val > max) \
4933 { \
4934 set_fatal_syntax_error (_("immediate value out of range "\
4935 #min " to "#max)); \
4936 goto failure; \
4937 } \
4938 } while (0)
4939
4940 #define po_enum_or_fail(array) do { \
4941 if (!parse_enum_string (&str, &val, array, \
4942 ARRAY_SIZE (array), imm_reg_type)) \
4943 goto failure; \
4944 } while (0)
4945
4946 #define po_strict_enum_or_fail(array) do { \
4947 if (!parse_enum_string (&str, &val, array, \
4948 ARRAY_SIZE (array), REG_TYPE_MAX)) \
4949 goto failure; \
4950 } while (0)
4951
4952 #define po_misc_or_fail(expr) do { \
4953 if (!expr) \
4954 goto failure; \
4955 } while (0)
4956 \f
4957 /* A primitive log calculator. */
4958
4959 static inline unsigned int
4960 get_log2 (unsigned int n)
4961 {
4962 unsigned int count = 0;
4963 while (n > 1)
4964 {
4965 n >>= 1;
4966 count += 1;
4967 }
4968 return count;
4969 }
4970
4971 /* encode the 12-bit imm field of Add/sub immediate */
4972 static inline uint32_t
4973 encode_addsub_imm (uint32_t imm)
4974 {
4975 return imm << 10;
4976 }
4977
4978 /* encode the shift amount field of Add/sub immediate */
4979 static inline uint32_t
4980 encode_addsub_imm_shift_amount (uint32_t cnt)
4981 {
4982 return cnt << 22;
4983 }
4984
4985
4986 /* encode the imm field of Adr instruction */
4987 static inline uint32_t
4988 encode_adr_imm (uint32_t imm)
4989 {
4990 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4991 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4992 }
4993
4994 /* encode the immediate field of Move wide immediate */
4995 static inline uint32_t
4996 encode_movw_imm (uint32_t imm)
4997 {
4998 return imm << 5;
4999 }
5000
5001 /* encode the 26-bit offset of unconditional branch */
5002 static inline uint32_t
5003 encode_branch_ofs_26 (uint32_t ofs)
5004 {
5005 return ofs & ((1 << 26) - 1);
5006 }
5007
5008 /* encode the 19-bit offset of conditional branch and compare & branch */
5009 static inline uint32_t
5010 encode_cond_branch_ofs_19 (uint32_t ofs)
5011 {
5012 return (ofs & ((1 << 19) - 1)) << 5;
5013 }
5014
5015 /* encode the 19-bit offset of ld literal */
5016 static inline uint32_t
5017 encode_ld_lit_ofs_19 (uint32_t ofs)
5018 {
5019 return (ofs & ((1 << 19) - 1)) << 5;
5020 }
5021
5022 /* Encode the 14-bit offset of test & branch. */
5023 static inline uint32_t
5024 encode_tst_branch_ofs_14 (uint32_t ofs)
5025 {
5026 return (ofs & ((1 << 14) - 1)) << 5;
5027 }
5028
5029 /* Encode the 16-bit imm field of svc/hvc/smc. */
5030 static inline uint32_t
5031 encode_svc_imm (uint32_t imm)
5032 {
5033 return imm << 5;
5034 }
5035
5036 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
5037 static inline uint32_t
5038 reencode_addsub_switch_add_sub (uint32_t opcode)
5039 {
5040 return opcode ^ (1 << 30);
5041 }
5042
5043 static inline uint32_t
5044 reencode_movzn_to_movz (uint32_t opcode)
5045 {
5046 return opcode | (1 << 30);
5047 }
5048
5049 static inline uint32_t
5050 reencode_movzn_to_movn (uint32_t opcode)
5051 {
5052 return opcode & ~(1 << 30);
5053 }
5054
5055 /* Overall per-instruction processing. */
5056
5057 /* We need to be able to fix up arbitrary expressions in some statements.
5058 This is so that we can handle symbols that are an arbitrary distance from
5059 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
5060 which returns part of an address in a form which will be valid for
5061 a data instruction. We do this by pushing the expression into a symbol
5062 in the expr_section, and creating a fix for that. */
5063
5064 static fixS *
5065 fix_new_aarch64 (fragS * frag,
5066 int where,
5067 short int size,
5068 expressionS * exp,
5069 int pc_rel,
5070 int reloc)
5071 {
5072 fixS *new_fix;
5073
5074 switch (exp->X_op)
5075 {
5076 case O_constant:
5077 case O_symbol:
5078 case O_add:
5079 case O_subtract:
5080 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5081 break;
5082
5083 default:
5084 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5085 pc_rel, reloc);
5086 break;
5087 }
5088 return new_fix;
5089 }
5090 \f
5091 /* Diagnostics on operands errors. */
5092
5093 /* By default, output verbose error message.
5094 Disable the verbose error message by -mno-verbose-error. */
5095 static int verbose_error_p = 1;
5096
5097 #ifdef DEBUG_AARCH64
5098 /* N.B. this is only for the purpose of debugging. */
5099 const char* operand_mismatch_kind_names[] =
5100 {
5101 "AARCH64_OPDE_NIL",
5102 "AARCH64_OPDE_RECOVERABLE",
5103 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5104 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5105 "AARCH64_OPDE_SYNTAX_ERROR",
5106 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5107 "AARCH64_OPDE_INVALID_VARIANT",
5108 "AARCH64_OPDE_INVALID_VG_SIZE",
5109 "AARCH64_OPDE_REG_LIST_LENGTH",
5110 "AARCH64_OPDE_REG_LIST_STRIDE",
5111 "AARCH64_OPDE_UNTIED_IMMS",
5112 "AARCH64_OPDE_UNTIED_OPERAND",
5113 "AARCH64_OPDE_OUT_OF_RANGE",
5114 "AARCH64_OPDE_UNALIGNED",
5115 "AARCH64_OPDE_OTHER_ERROR",
5116 "AARCH64_OPDE_INVALID_REGNO",
5117 };
5118 #endif /* DEBUG_AARCH64 */
5119
5120 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5121
5122 When multiple errors of different kinds are found in the same assembly
5123 line, only the error of the highest severity will be picked up for
5124 issuing the diagnostics. */
5125
5126 static inline bool
5127 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5128 enum aarch64_operand_error_kind rhs)
5129 {
5130 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5131 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5132 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5133 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5134 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5135 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5136 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5137 gas_assert (AARCH64_OPDE_INVALID_VG_SIZE > AARCH64_OPDE_INVALID_VARIANT);
5138 gas_assert (AARCH64_OPDE_REG_LIST_LENGTH > AARCH64_OPDE_INVALID_VG_SIZE);
5139 gas_assert (AARCH64_OPDE_REG_LIST_STRIDE > AARCH64_OPDE_REG_LIST_LENGTH);
5140 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_REG_LIST_STRIDE);
5141 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5142 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST_STRIDE);
5143 gas_assert (AARCH64_OPDE_INVALID_REGNO > AARCH64_OPDE_OTHER_ERROR);
5144 return lhs > rhs;
5145 }
5146
5147 /* Helper routine to get the mnemonic name from the assembly instruction
5148 line; should only be called for the diagnosis purpose, as there is
5149 string copy operation involved, which may affect the runtime
5150 performance if used in elsewhere. */
5151
5152 static const char*
5153 get_mnemonic_name (const char *str)
5154 {
5155 static char mnemonic[32];
5156 char *ptr;
5157
5158 /* Get the first 15 bytes and assume that the full name is included. */
5159 strncpy (mnemonic, str, 31);
5160 mnemonic[31] = '\0';
5161
5162 /* Scan up to the end of the mnemonic, which must end in white space,
5163 '.', or end of string. */
5164 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5165 ;
5166
5167 *ptr = '\0';
5168
5169 /* Append '...' to the truncated long name. */
5170 if (ptr - mnemonic == 31)
5171 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5172
5173 return mnemonic;
5174 }
5175
5176 static void
5177 reset_aarch64_instruction (aarch64_instruction *instruction)
5178 {
5179 memset (instruction, '\0', sizeof (aarch64_instruction));
5180 instruction->reloc.type = BFD_RELOC_UNUSED;
5181 }
5182
5183 /* Data structures storing one user error in the assembly code related to
5184 operands. */
5185
5186 struct operand_error_record
5187 {
5188 const aarch64_opcode *opcode;
5189 aarch64_operand_error detail;
5190 struct operand_error_record *next;
5191 };
5192
5193 typedef struct operand_error_record operand_error_record;
5194
5195 struct operand_errors
5196 {
5197 operand_error_record *head;
5198 operand_error_record *tail;
5199 };
5200
5201 typedef struct operand_errors operand_errors;
5202
5203 /* Top-level data structure reporting user errors for the current line of
5204 the assembly code.
5205 The way md_assemble works is that all opcodes sharing the same mnemonic
5206 name are iterated to find a match to the assembly line. In this data
5207 structure, each of the such opcodes will have one operand_error_record
5208 allocated and inserted. In other words, excessive errors related with
5209 a single opcode are disregarded. */
5210 operand_errors operand_error_report;
5211
5212 /* Free record nodes. */
5213 static operand_error_record *free_opnd_error_record_nodes = NULL;
5214
5215 /* Initialize the data structure that stores the operand mismatch
5216 information on assembling one line of the assembly code. */
5217 static void
5218 init_operand_error_report (void)
5219 {
5220 if (operand_error_report.head != NULL)
5221 {
5222 gas_assert (operand_error_report.tail != NULL);
5223 operand_error_report.tail->next = free_opnd_error_record_nodes;
5224 free_opnd_error_record_nodes = operand_error_report.head;
5225 operand_error_report.head = NULL;
5226 operand_error_report.tail = NULL;
5227 return;
5228 }
5229 gas_assert (operand_error_report.tail == NULL);
5230 }
5231
5232 /* Return TRUE if some operand error has been recorded during the
5233 parsing of the current assembly line using the opcode *OPCODE;
5234 otherwise return FALSE. */
5235 static inline bool
5236 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5237 {
5238 operand_error_record *record = operand_error_report.head;
5239 return record && record->opcode == opcode;
5240 }
5241
5242 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5243 OPCODE field is initialized with OPCODE.
5244 N.B. only one record for each opcode, i.e. the maximum of one error is
5245 recorded for each instruction template. */
5246
5247 static void
5248 add_operand_error_record (const operand_error_record* new_record)
5249 {
5250 const aarch64_opcode *opcode = new_record->opcode;
5251 operand_error_record* record = operand_error_report.head;
5252
5253 /* The record may have been created for this opcode. If not, we need
5254 to prepare one. */
5255 if (! opcode_has_operand_error_p (opcode))
5256 {
5257 /* Get one empty record. */
5258 if (free_opnd_error_record_nodes == NULL)
5259 {
5260 record = XNEW (operand_error_record);
5261 }
5262 else
5263 {
5264 record = free_opnd_error_record_nodes;
5265 free_opnd_error_record_nodes = record->next;
5266 }
5267 record->opcode = opcode;
5268 /* Insert at the head. */
5269 record->next = operand_error_report.head;
5270 operand_error_report.head = record;
5271 if (operand_error_report.tail == NULL)
5272 operand_error_report.tail = record;
5273 }
5274 else if (record->detail.kind != AARCH64_OPDE_NIL
5275 && record->detail.index <= new_record->detail.index
5276 && operand_error_higher_severity_p (record->detail.kind,
5277 new_record->detail.kind))
5278 {
5279 /* In the case of multiple errors found on operands related with a
5280 single opcode, only record the error of the leftmost operand and
5281 only if the error is of higher severity. */
5282 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5283 " the existing error %s on operand %d",
5284 operand_mismatch_kind_names[new_record->detail.kind],
5285 new_record->detail.index,
5286 operand_mismatch_kind_names[record->detail.kind],
5287 record->detail.index);
5288 return;
5289 }
5290
5291 record->detail = new_record->detail;
5292 }
5293
5294 static inline void
5295 record_operand_error_info (const aarch64_opcode *opcode,
5296 aarch64_operand_error *error_info)
5297 {
5298 operand_error_record record;
5299 record.opcode = opcode;
5300 record.detail = *error_info;
5301 add_operand_error_record (&record);
5302 }
5303
5304 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5305 error message *ERROR, for operand IDX (count from 0). */
5306
5307 static void
5308 record_operand_error (const aarch64_opcode *opcode, int idx,
5309 enum aarch64_operand_error_kind kind,
5310 const char* error)
5311 {
5312 aarch64_operand_error info;
5313 memset(&info, 0, sizeof (info));
5314 info.index = idx;
5315 info.kind = kind;
5316 info.error = error;
5317 info.non_fatal = false;
5318 record_operand_error_info (opcode, &info);
5319 }
5320
5321 static void
5322 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5323 enum aarch64_operand_error_kind kind,
5324 const char* error, const int *extra_data)
5325 {
5326 aarch64_operand_error info;
5327 info.index = idx;
5328 info.kind = kind;
5329 info.error = error;
5330 info.data[0].i = extra_data[0];
5331 info.data[1].i = extra_data[1];
5332 info.data[2].i = extra_data[2];
5333 info.non_fatal = false;
5334 record_operand_error_info (opcode, &info);
5335 }
5336
5337 static void
5338 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5339 const char* error, int lower_bound,
5340 int upper_bound)
5341 {
5342 int data[3] = {lower_bound, upper_bound, 0};
5343 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5344 error, data);
5345 }
5346
5347 /* Remove the operand error record for *OPCODE. */
5348 static void ATTRIBUTE_UNUSED
5349 remove_operand_error_record (const aarch64_opcode *opcode)
5350 {
5351 if (opcode_has_operand_error_p (opcode))
5352 {
5353 operand_error_record* record = operand_error_report.head;
5354 gas_assert (record != NULL && operand_error_report.tail != NULL);
5355 operand_error_report.head = record->next;
5356 record->next = free_opnd_error_record_nodes;
5357 free_opnd_error_record_nodes = record;
5358 if (operand_error_report.head == NULL)
5359 {
5360 gas_assert (operand_error_report.tail == record);
5361 operand_error_report.tail = NULL;
5362 }
5363 }
5364 }
5365
5366 /* Given the instruction in *INSTR, return the index of the best matched
5367 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5368
5369 Return -1 if there is no qualifier sequence; return the first match
5370 if there is multiple matches found. */
5371
5372 static int
5373 find_best_match (const aarch64_inst *instr,
5374 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5375 {
5376 int i, num_opnds, max_num_matched, idx;
5377
5378 num_opnds = aarch64_num_of_operands (instr->opcode);
5379 if (num_opnds == 0)
5380 {
5381 DEBUG_TRACE ("no operand");
5382 return -1;
5383 }
5384
5385 max_num_matched = 0;
5386 idx = 0;
5387
5388 /* For each pattern. */
5389 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5390 {
5391 int j, num_matched;
5392 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5393
5394 /* Most opcodes has much fewer patterns in the list. */
5395 if (empty_qualifier_sequence_p (qualifiers))
5396 {
5397 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5398 break;
5399 }
5400
5401 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5402 if (*qualifiers == instr->operands[j].qualifier)
5403 ++num_matched;
5404
5405 if (num_matched > max_num_matched)
5406 {
5407 max_num_matched = num_matched;
5408 idx = i;
5409 }
5410 }
5411
5412 DEBUG_TRACE ("return with %d", idx);
5413 return idx;
5414 }
5415
5416 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5417 corresponding operands in *INSTR. */
5418
5419 static inline void
5420 assign_qualifier_sequence (aarch64_inst *instr,
5421 const aarch64_opnd_qualifier_t *qualifiers)
5422 {
5423 int i = 0;
5424 int num_opnds = aarch64_num_of_operands (instr->opcode);
5425 gas_assert (num_opnds);
5426 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5427 instr->operands[i].qualifier = *qualifiers;
5428 }
5429
5430 /* Callback used by aarch64_print_operand to apply STYLE to the
5431 disassembler output created from FMT and ARGS. The STYLER object holds
5432 any required state. Must return a pointer to a string (created from FMT
5433 and ARGS) that will continue to be valid until the complete disassembled
5434 instruction has been printed.
5435
5436 We don't currently add any styling to the output of the disassembler as
5437 used within assembler error messages, and so STYLE is ignored here. A
5438 new string is allocated on the obstack help within STYLER and returned
5439 to the caller. */
5440
5441 static const char *aarch64_apply_style
5442 (struct aarch64_styler *styler,
5443 enum disassembler_style style ATTRIBUTE_UNUSED,
5444 const char *fmt, va_list args)
5445 {
5446 int res;
5447 char *ptr;
5448 struct obstack *stack = (struct obstack *) styler->state;
5449 va_list ap;
5450
5451 /* Calculate the required space. */
5452 va_copy (ap, args);
5453 res = vsnprintf (NULL, 0, fmt, ap);
5454 va_end (ap);
5455 gas_assert (res >= 0);
5456
5457 /* Allocate space on the obstack and format the result. */
5458 ptr = (char *) obstack_alloc (stack, res + 1);
5459 res = vsnprintf (ptr, (res + 1), fmt, args);
5460 gas_assert (res >= 0);
5461
5462 return ptr;
5463 }
5464
5465 /* Print operands for the diagnosis purpose. */
5466
5467 static void
5468 print_operands (char *buf, const aarch64_opcode *opcode,
5469 const aarch64_opnd_info *opnds)
5470 {
5471 int i;
5472 struct aarch64_styler styler;
5473 struct obstack content;
5474 obstack_init (&content);
5475
5476 styler.apply_style = aarch64_apply_style;
5477 styler.state = (void *) &content;
5478
5479 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5480 {
5481 char str[128];
5482 char cmt[128];
5483
5484 /* We regard the opcode operand info more, however we also look into
5485 the inst->operands to support the disassembling of the optional
5486 operand.
5487 The two operand code should be the same in all cases, apart from
5488 when the operand can be optional. */
5489 if (opcode->operands[i] == AARCH64_OPND_NIL
5490 || opnds[i].type == AARCH64_OPND_NIL)
5491 break;
5492
5493 /* Generate the operand string in STR. */
5494 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5495 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5496
5497 /* Delimiter. */
5498 if (str[0] != '\0')
5499 strcat (buf, i == 0 ? " " : ", ");
5500
5501 /* Append the operand string. */
5502 strcat (buf, str);
5503
5504 /* Append a comment. This works because only the last operand ever
5505 adds a comment. If that ever changes then we'll need to be
5506 smarter here. */
5507 if (cmt[0] != '\0')
5508 {
5509 strcat (buf, "\t// ");
5510 strcat (buf, cmt);
5511 }
5512 }
5513
5514 obstack_free (&content, NULL);
5515 }
5516
5517 /* Send to stderr a string as information. */
5518
5519 static void
5520 output_info (const char *format, ...)
5521 {
5522 const char *file;
5523 unsigned int line;
5524 va_list args;
5525
5526 file = as_where (&line);
5527 if (file)
5528 {
5529 if (line != 0)
5530 fprintf (stderr, "%s:%u: ", file, line);
5531 else
5532 fprintf (stderr, "%s: ", file);
5533 }
5534 fprintf (stderr, _("Info: "));
5535 va_start (args, format);
5536 vfprintf (stderr, format, args);
5537 va_end (args);
5538 (void) putc ('\n', stderr);
5539 }
5540
5541 /* See if the AARCH64_OPDE_SYNTAX_ERROR error described by DETAIL
5542 relates to registers or register lists. If so, return a string that
5543 reports the error against "operand %d", otherwise return null. */
5544
5545 static const char *
5546 get_reg_error_message (const aarch64_operand_error *detail)
5547 {
5548 /* Handle the case where we found a register that was expected
5549 to be in a register list outside of a register list. */
5550 if ((detail->data[1].i & detail->data[2].i) != 0
5551 && (detail->data[1].i & SEF_IN_REGLIST) == 0)
5552 return _("missing braces at operand %d");
5553
5554 /* If some opcodes expected a register, and we found a register,
5555 complain about the difference. */
5556 if (detail->data[2].i)
5557 {
5558 unsigned int expected = (detail->data[1].i & SEF_IN_REGLIST
5559 ? detail->data[1].i & ~SEF_IN_REGLIST
5560 : detail->data[0].i & ~SEF_DEFAULT_ERROR);
5561 const char *msg = get_reg_expected_msg (expected, detail->data[2].i);
5562 if (!msg)
5563 msg = N_("unexpected register type at operand %d");
5564 return msg;
5565 }
5566
5567 /* Handle the case where we got to the point of trying to parse a
5568 register within a register list, but didn't find a known register. */
5569 if (detail->data[1].i & SEF_IN_REGLIST)
5570 {
5571 unsigned int expected = detail->data[1].i & ~SEF_IN_REGLIST;
5572 const char *msg = get_reg_expected_msg (expected, 0);
5573 if (!msg)
5574 msg = _("invalid register list at operand %d");
5575 return msg;
5576 }
5577
5578 /* Punt if register-related problems weren't the only errors. */
5579 if (detail->data[0].i & SEF_DEFAULT_ERROR)
5580 return NULL;
5581
5582 /* Handle the case where the only acceptable things are registers. */
5583 if (detail->data[1].i == 0)
5584 {
5585 const char *msg = get_reg_expected_msg (detail->data[0].i, 0);
5586 if (!msg)
5587 msg = _("expected a register at operand %d");
5588 return msg;
5589 }
5590
5591 /* Handle the case where the only acceptable things are register lists,
5592 and there was no opening '{'. */
5593 if (detail->data[0].i == 0)
5594 return _("expected '{' at operand %d");
5595
5596 return _("expected a register or register list at operand %d");
5597 }
5598
5599 /* Output one operand error record. */
5600
5601 static void
5602 output_operand_error_record (const operand_error_record *record, char *str)
5603 {
5604 const aarch64_operand_error *detail = &record->detail;
5605 int idx = detail->index;
5606 const aarch64_opcode *opcode = record->opcode;
5607 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5608 : AARCH64_OPND_NIL);
5609
5610 typedef void (*handler_t)(const char *format, ...);
5611 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5612 const char *msg = detail->error;
5613
5614 switch (detail->kind)
5615 {
5616 case AARCH64_OPDE_NIL:
5617 gas_assert (0);
5618 break;
5619
5620 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5621 handler (_("this `%s' should have an immediately preceding `%s'"
5622 " -- `%s'"),
5623 detail->data[0].s, detail->data[1].s, str);
5624 break;
5625
5626 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5627 handler (_("the preceding `%s' should be followed by `%s` rather"
5628 " than `%s` -- `%s'"),
5629 detail->data[1].s, detail->data[0].s, opcode->name, str);
5630 break;
5631
5632 case AARCH64_OPDE_SYNTAX_ERROR:
5633 if (!msg && idx >= 0)
5634 {
5635 msg = get_reg_error_message (detail);
5636 if (msg)
5637 {
5638 char *full_msg = xasprintf (msg, idx + 1);
5639 handler (_("%s -- `%s'"), full_msg, str);
5640 free (full_msg);
5641 break;
5642 }
5643 }
5644 /* Fall through. */
5645
5646 case AARCH64_OPDE_RECOVERABLE:
5647 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5648 case AARCH64_OPDE_OTHER_ERROR:
5649 /* Use the prepared error message if there is, otherwise use the
5650 operand description string to describe the error. */
5651 if (msg != NULL)
5652 {
5653 if (idx < 0)
5654 handler (_("%s -- `%s'"), msg, str);
5655 else
5656 handler (_("%s at operand %d -- `%s'"),
5657 msg, idx + 1, str);
5658 }
5659 else
5660 {
5661 gas_assert (idx >= 0);
5662 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5663 aarch64_get_operand_desc (opd_code), str);
5664 }
5665 break;
5666
5667 case AARCH64_OPDE_INVALID_VARIANT:
5668 handler (_("operand mismatch -- `%s'"), str);
5669 if (verbose_error_p)
5670 {
5671 /* We will try to correct the erroneous instruction and also provide
5672 more information e.g. all other valid variants.
5673
5674 The string representation of the corrected instruction and other
5675 valid variants are generated by
5676
5677 1) obtaining the intermediate representation of the erroneous
5678 instruction;
5679 2) manipulating the IR, e.g. replacing the operand qualifier;
5680 3) printing out the instruction by calling the printer functions
5681 shared with the disassembler.
5682
5683 The limitation of this method is that the exact input assembly
5684 line cannot be accurately reproduced in some cases, for example an
5685 optional operand present in the actual assembly line will be
5686 omitted in the output; likewise for the optional syntax rules,
5687 e.g. the # before the immediate. Another limitation is that the
5688 assembly symbols and relocation operations in the assembly line
5689 currently cannot be printed out in the error report. Last but not
5690 least, when there is other error(s) co-exist with this error, the
5691 'corrected' instruction may be still incorrect, e.g. given
5692 'ldnp h0,h1,[x0,#6]!'
5693 this diagnosis will provide the version:
5694 'ldnp s0,s1,[x0,#6]!'
5695 which is still not right. */
5696 size_t len = strlen (get_mnemonic_name (str));
5697 int i, qlf_idx;
5698 bool result;
5699 char buf[2048];
5700 aarch64_inst *inst_base = &inst.base;
5701 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5702
5703 /* Init inst. */
5704 reset_aarch64_instruction (&inst);
5705 inst_base->opcode = opcode;
5706
5707 /* Reset the error report so that there is no side effect on the
5708 following operand parsing. */
5709 init_operand_error_report ();
5710
5711 /* Fill inst. */
5712 result = parse_operands (str + len, opcode)
5713 && programmer_friendly_fixup (&inst);
5714 gas_assert (result);
5715 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5716 NULL, NULL, insn_sequence);
5717 gas_assert (!result);
5718
5719 /* Find the most matched qualifier sequence. */
5720 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5721 gas_assert (qlf_idx > -1);
5722
5723 /* Assign the qualifiers. */
5724 assign_qualifier_sequence (inst_base,
5725 opcode->qualifiers_list[qlf_idx]);
5726
5727 /* Print the hint. */
5728 output_info (_(" did you mean this?"));
5729 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5730 print_operands (buf, opcode, inst_base->operands);
5731 output_info (_(" %s"), buf);
5732
5733 /* Print out other variant(s) if there is any. */
5734 if (qlf_idx != 0 ||
5735 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5736 output_info (_(" other valid variant(s):"));
5737
5738 /* For each pattern. */
5739 qualifiers_list = opcode->qualifiers_list;
5740 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5741 {
5742 /* Most opcodes has much fewer patterns in the list.
5743 First NIL qualifier indicates the end in the list. */
5744 if (empty_qualifier_sequence_p (*qualifiers_list))
5745 break;
5746
5747 if (i != qlf_idx)
5748 {
5749 /* Mnemonics name. */
5750 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5751
5752 /* Assign the qualifiers. */
5753 assign_qualifier_sequence (inst_base, *qualifiers_list);
5754
5755 /* Print instruction. */
5756 print_operands (buf, opcode, inst_base->operands);
5757
5758 output_info (_(" %s"), buf);
5759 }
5760 }
5761 }
5762 break;
5763
5764 case AARCH64_OPDE_UNTIED_IMMS:
5765 handler (_("operand %d must have the same immediate value "
5766 "as operand 1 -- `%s'"),
5767 detail->index + 1, str);
5768 break;
5769
5770 case AARCH64_OPDE_UNTIED_OPERAND:
5771 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5772 detail->index + 1, str);
5773 break;
5774
5775 case AARCH64_OPDE_INVALID_REGNO:
5776 handler (_("%s%d-%s%d expected at operand %d -- `%s'"),
5777 detail->data[0].s, detail->data[1].i,
5778 detail->data[0].s, detail->data[2].i, idx + 1, str);
5779 break;
5780
5781 case AARCH64_OPDE_OUT_OF_RANGE:
5782 if (detail->data[0].i != detail->data[1].i)
5783 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5784 msg ? msg : _("immediate value"),
5785 detail->data[0].i, detail->data[1].i, idx + 1, str);
5786 else
5787 handler (_("%s must be %d at operand %d -- `%s'"),
5788 msg ? msg : _("immediate value"),
5789 detail->data[0].i, idx + 1, str);
5790 break;
5791
5792 case AARCH64_OPDE_INVALID_VG_SIZE:
5793 if (detail->data[0].i == 0)
5794 handler (_("unexpected vector group size at operand %d -- `%s'"),
5795 idx + 1, str);
5796 else
5797 handler (_("operand %d must have a vector group size of %d -- `%s'"),
5798 idx + 1, detail->data[0].i, str);
5799 break;
5800
5801 case AARCH64_OPDE_REG_LIST_LENGTH:
5802 if (detail->data[0].i == (1 << 1))
5803 handler (_("expected a single-register list at operand %d -- `%s'"),
5804 idx + 1, str);
5805 else if ((detail->data[0].i & -detail->data[0].i) == detail->data[0].i)
5806 handler (_("expected a list of %d registers at operand %d -- `%s'"),
5807 get_log2 (detail->data[0].i), idx + 1, str);
5808 else if (detail->data[0].i == 0x14)
5809 handler (_("expected a list of %d or %d registers at"
5810 " operand %d -- `%s'"),
5811 2, 4, idx + 1, str);
5812 else
5813 handler (_("invalid number of registers in the list"
5814 " at operand %d -- `%s'"), idx + 1, str);
5815 break;
5816
5817 case AARCH64_OPDE_REG_LIST_STRIDE:
5818 if (detail->data[0].i == (1 << 1))
5819 handler (_("the register list must have a stride of %d"
5820 " at operand %d -- `%s'"), 1, idx + 1, str);
5821 else if (detail->data[0].i == 0x12 || detail->data[0].i == 0x102)
5822 handler (_("the register list must have a stride of %d or %d"
5823 " at operand %d -- `%s`"), 1,
5824 detail->data[0].i == 0x12 ? 4 : 8, idx + 1, str);
5825 else
5826 handler (_("invalid register stride at operand %d -- `%s'"),
5827 idx + 1, str);
5828 break;
5829
5830 case AARCH64_OPDE_UNALIGNED:
5831 handler (_("immediate value must be a multiple of "
5832 "%d at operand %d -- `%s'"),
5833 detail->data[0].i, idx + 1, str);
5834 break;
5835
5836 default:
5837 gas_assert (0);
5838 break;
5839 }
5840 }
5841
5842 /* Return true if the presence of error A against an instruction means
5843 that error B should not be reported. This is only used as a first pass,
5844 to pick the kind of error that we should report. */
5845
5846 static bool
5847 better_error_p (operand_error_record *a, operand_error_record *b)
5848 {
5849 /* For errors reported during parsing, prefer errors that relate to
5850 later operands, since that implies that the earlier operands were
5851 syntactically valid.
5852
5853 For example, if we see a register R instead of an immediate in
5854 operand N, we'll report that as a recoverable "immediate operand
5855 required" error. This is because there is often another opcode
5856 entry that accepts a register operand N, and any errors about R
5857 should be reported against the register forms of the instruction.
5858 But if no such register form exists, the recoverable error should
5859 still win over a syntax error against operand N-1.
5860
5861 For these purposes, count an error reported at the end of the
5862 assembly string as equivalent to an error reported against the
5863 final operand. This means that opcode entries that expect more
5864 operands win over "unexpected characters following instruction". */
5865 if (a->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR
5866 && b->detail.kind <= AARCH64_OPDE_FATAL_SYNTAX_ERROR)
5867 {
5868 int a_index = (a->detail.index < 0
5869 ? aarch64_num_of_operands (a->opcode) - 1
5870 : a->detail.index);
5871 int b_index = (b->detail.index < 0
5872 ? aarch64_num_of_operands (b->opcode) - 1
5873 : b->detail.index);
5874 if (a_index != b_index)
5875 return a_index > b_index;
5876 }
5877 return operand_error_higher_severity_p (a->detail.kind, b->detail.kind);
5878 }
5879
5880 /* Process and output the error message about the operand mismatching.
5881
5882 When this function is called, the operand error information had
5883 been collected for an assembly line and there will be multiple
5884 errors in the case of multiple instruction templates; output the
5885 error message that most closely describes the problem.
5886
5887 The errors to be printed can be filtered on printing all errors
5888 or only non-fatal errors. This distinction has to be made because
5889 the error buffer may already be filled with fatal errors we don't want to
5890 print due to the different instruction templates. */
5891
5892 static void
5893 output_operand_error_report (char *str, bool non_fatal_only)
5894 {
5895 enum aarch64_operand_error_kind kind;
5896 operand_error_record *curr;
5897 operand_error_record *head = operand_error_report.head;
5898 operand_error_record *record;
5899
5900 /* No error to report. */
5901 if (head == NULL)
5902 return;
5903
5904 gas_assert (head != NULL && operand_error_report.tail != NULL);
5905
5906 /* Only one error. */
5907 if (head == operand_error_report.tail)
5908 {
5909 /* If the only error is a non-fatal one and we don't want to print it,
5910 just exit. */
5911 if (!non_fatal_only || head->detail.non_fatal)
5912 {
5913 DEBUG_TRACE ("single opcode entry with error kind: %s",
5914 operand_mismatch_kind_names[head->detail.kind]);
5915 output_operand_error_record (head, str);
5916 }
5917 return;
5918 }
5919
5920 /* Find the error kind of the highest severity. */
5921 DEBUG_TRACE ("multiple opcode entries with error kind");
5922 record = NULL;
5923 for (curr = head; curr != NULL; curr = curr->next)
5924 {
5925 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5926 if (curr->detail.kind == AARCH64_OPDE_SYNTAX_ERROR)
5927 {
5928 DEBUG_TRACE ("\t%s [%x, %x, %x]",
5929 operand_mismatch_kind_names[curr->detail.kind],
5930 curr->detail.data[0].i, curr->detail.data[1].i,
5931 curr->detail.data[2].i);
5932 }
5933 else if (curr->detail.kind == AARCH64_OPDE_REG_LIST_LENGTH
5934 || curr->detail.kind == AARCH64_OPDE_REG_LIST_STRIDE)
5935 {
5936 DEBUG_TRACE ("\t%s [%x]",
5937 operand_mismatch_kind_names[curr->detail.kind],
5938 curr->detail.data[0].i);
5939 }
5940 else
5941 {
5942 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5943 }
5944 if ((!non_fatal_only || curr->detail.non_fatal)
5945 && (!record || better_error_p (curr, record)))
5946 record = curr;
5947 }
5948
5949 kind = (record ? record->detail.kind : AARCH64_OPDE_NIL);
5950 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5951
5952 /* Pick up one of errors of KIND to report. */
5953 record = NULL;
5954 for (curr = head; curr != NULL; curr = curr->next)
5955 {
5956 /* If we don't want to print non-fatal errors then don't consider them
5957 at all. */
5958 if (curr->detail.kind != kind
5959 || (non_fatal_only && !curr->detail.non_fatal))
5960 continue;
5961 /* If there are multiple errors, pick up the one with the highest
5962 mismatching operand index. In the case of multiple errors with
5963 the equally highest operand index, pick up the first one or the
5964 first one with non-NULL error message. */
5965 if (!record || curr->detail.index > record->detail.index)
5966 record = curr;
5967 else if (curr->detail.index == record->detail.index
5968 && !record->detail.error)
5969 {
5970 if (curr->detail.error)
5971 record = curr;
5972 else if (kind == AARCH64_OPDE_SYNTAX_ERROR)
5973 {
5974 record->detail.data[0].i |= curr->detail.data[0].i;
5975 record->detail.data[1].i |= curr->detail.data[1].i;
5976 record->detail.data[2].i |= curr->detail.data[2].i;
5977 DEBUG_TRACE ("\t--> %s [%x, %x, %x]",
5978 operand_mismatch_kind_names[kind],
5979 curr->detail.data[0].i, curr->detail.data[1].i,
5980 curr->detail.data[2].i);
5981 }
5982 else if (kind == AARCH64_OPDE_REG_LIST_LENGTH
5983 || kind == AARCH64_OPDE_REG_LIST_STRIDE)
5984 {
5985 record->detail.data[0].i |= curr->detail.data[0].i;
5986 DEBUG_TRACE ("\t--> %s [%x]",
5987 operand_mismatch_kind_names[kind],
5988 curr->detail.data[0].i);
5989 }
5990 /* Pick the variant with the cloest match. */
5991 else if (kind == AARCH64_OPDE_INVALID_VARIANT
5992 && record->detail.data[0].i > curr->detail.data[0].i)
5993 record = curr;
5994 }
5995 }
5996
5997 /* The way errors are collected in the back-end is a bit non-intuitive. But
5998 essentially, because each operand template is tried recursively you may
5999 always have errors collected from the previous tried OPND. These are
6000 usually skipped if there is one successful match. However now with the
6001 non-fatal errors we have to ignore those previously collected hard errors
6002 when we're only interested in printing the non-fatal ones. This condition
6003 prevents us from printing errors that are not appropriate, since we did
6004 match a condition, but it also has warnings that it wants to print. */
6005 if (non_fatal_only && !record)
6006 return;
6007
6008 gas_assert (record);
6009 DEBUG_TRACE ("Pick up error kind %s to report",
6010 operand_mismatch_kind_names[kind]);
6011
6012 /* Output. */
6013 output_operand_error_record (record, str);
6014 }
6015 \f
6016 /* Write an AARCH64 instruction to buf - always little-endian. */
6017 static void
6018 put_aarch64_insn (char *buf, uint32_t insn)
6019 {
6020 unsigned char *where = (unsigned char *) buf;
6021 where[0] = insn;
6022 where[1] = insn >> 8;
6023 where[2] = insn >> 16;
6024 where[3] = insn >> 24;
6025 }
6026
6027 static uint32_t
6028 get_aarch64_insn (char *buf)
6029 {
6030 unsigned char *where = (unsigned char *) buf;
6031 uint32_t result;
6032 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
6033 | ((uint32_t) where[3] << 24)));
6034 return result;
6035 }
6036
6037 static void
6038 output_inst (struct aarch64_inst *new_inst)
6039 {
6040 char *to = NULL;
6041
6042 to = frag_more (INSN_SIZE);
6043
6044 frag_now->tc_frag_data.recorded = 1;
6045
6046 put_aarch64_insn (to, inst.base.value);
6047
6048 if (inst.reloc.type != BFD_RELOC_UNUSED)
6049 {
6050 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
6051 INSN_SIZE, &inst.reloc.exp,
6052 inst.reloc.pc_rel,
6053 inst.reloc.type);
6054 DEBUG_TRACE ("Prepared relocation fix up");
6055 /* Don't check the addend value against the instruction size,
6056 that's the job of our code in md_apply_fix(). */
6057 fixp->fx_no_overflow = 1;
6058 if (new_inst != NULL)
6059 fixp->tc_fix_data.inst = new_inst;
6060 if (aarch64_gas_internal_fixup_p ())
6061 {
6062 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
6063 fixp->tc_fix_data.opnd = inst.reloc.opnd;
6064 fixp->fx_addnumber = inst.reloc.flags;
6065 }
6066 }
6067
6068 dwarf2_emit_insn (INSN_SIZE);
6069 }
6070
6071 /* Link together opcodes of the same name. */
6072
6073 struct templates
6074 {
6075 const aarch64_opcode *opcode;
6076 struct templates *next;
6077 };
6078
6079 typedef struct templates templates;
6080
6081 static templates *
6082 lookup_mnemonic (const char *start, int len)
6083 {
6084 templates *templ = NULL;
6085
6086 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
6087 return templ;
6088 }
6089
6090 /* Subroutine of md_assemble, responsible for looking up the primary
6091 opcode from the mnemonic the user wrote. BASE points to the beginning
6092 of the mnemonic, DOT points to the first '.' within the mnemonic
6093 (if any) and END points to the end of the mnemonic. */
6094
6095 static templates *
6096 opcode_lookup (char *base, char *dot, char *end)
6097 {
6098 const aarch64_cond *cond;
6099 char condname[16];
6100 int len;
6101
6102 if (dot == end)
6103 return 0;
6104
6105 inst.cond = COND_ALWAYS;
6106
6107 /* Handle a possible condition. */
6108 if (dot)
6109 {
6110 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
6111 if (!cond)
6112 return 0;
6113 inst.cond = cond->value;
6114 len = dot - base;
6115 }
6116 else
6117 len = end - base;
6118
6119 if (inst.cond == COND_ALWAYS)
6120 {
6121 /* Look for unaffixed mnemonic. */
6122 return lookup_mnemonic (base, len);
6123 }
6124 else if (len <= 13)
6125 {
6126 /* append ".c" to mnemonic if conditional */
6127 memcpy (condname, base, len);
6128 memcpy (condname + len, ".c", 2);
6129 base = condname;
6130 len += 2;
6131 return lookup_mnemonic (base, len);
6132 }
6133
6134 return NULL;
6135 }
6136
6137 /* Process an optional operand that is found omitted from the assembly line.
6138 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
6139 instruction's opcode entry while IDX is the index of this omitted operand.
6140 */
6141
6142 static void
6143 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
6144 int idx, aarch64_opnd_info *operand)
6145 {
6146 aarch64_insn default_value = get_optional_operand_default_value (opcode);
6147 gas_assert (optional_operand_p (opcode, idx));
6148 gas_assert (!operand->present);
6149
6150 switch (type)
6151 {
6152 case AARCH64_OPND_Rd:
6153 case AARCH64_OPND_Rn:
6154 case AARCH64_OPND_Rm:
6155 case AARCH64_OPND_Rt:
6156 case AARCH64_OPND_Rt2:
6157 case AARCH64_OPND_Rt_LS64:
6158 case AARCH64_OPND_Rt_SP:
6159 case AARCH64_OPND_Rs:
6160 case AARCH64_OPND_Ra:
6161 case AARCH64_OPND_Rt_SYS:
6162 case AARCH64_OPND_Rd_SP:
6163 case AARCH64_OPND_Rn_SP:
6164 case AARCH64_OPND_Rm_SP:
6165 case AARCH64_OPND_Fd:
6166 case AARCH64_OPND_Fn:
6167 case AARCH64_OPND_Fm:
6168 case AARCH64_OPND_Fa:
6169 case AARCH64_OPND_Ft:
6170 case AARCH64_OPND_Ft2:
6171 case AARCH64_OPND_Sd:
6172 case AARCH64_OPND_Sn:
6173 case AARCH64_OPND_Sm:
6174 case AARCH64_OPND_Va:
6175 case AARCH64_OPND_Vd:
6176 case AARCH64_OPND_Vn:
6177 case AARCH64_OPND_Vm:
6178 case AARCH64_OPND_VdD1:
6179 case AARCH64_OPND_VnD1:
6180 operand->reg.regno = default_value;
6181 break;
6182
6183 case AARCH64_OPND_Ed:
6184 case AARCH64_OPND_En:
6185 case AARCH64_OPND_Em:
6186 case AARCH64_OPND_Em16:
6187 case AARCH64_OPND_SM3_IMM2:
6188 operand->reglane.regno = default_value;
6189 break;
6190
6191 case AARCH64_OPND_IDX:
6192 case AARCH64_OPND_BIT_NUM:
6193 case AARCH64_OPND_IMMR:
6194 case AARCH64_OPND_IMMS:
6195 case AARCH64_OPND_SHLL_IMM:
6196 case AARCH64_OPND_IMM_VLSL:
6197 case AARCH64_OPND_IMM_VLSR:
6198 case AARCH64_OPND_CCMP_IMM:
6199 case AARCH64_OPND_FBITS:
6200 case AARCH64_OPND_UIMM4:
6201 case AARCH64_OPND_UIMM3_OP1:
6202 case AARCH64_OPND_UIMM3_OP2:
6203 case AARCH64_OPND_IMM:
6204 case AARCH64_OPND_IMM_2:
6205 case AARCH64_OPND_WIDTH:
6206 case AARCH64_OPND_UIMM7:
6207 case AARCH64_OPND_NZCV:
6208 case AARCH64_OPND_SVE_PATTERN:
6209 case AARCH64_OPND_SVE_PRFOP:
6210 operand->imm.value = default_value;
6211 break;
6212
6213 case AARCH64_OPND_SVE_PATTERN_SCALED:
6214 operand->imm.value = default_value;
6215 operand->shifter.kind = AARCH64_MOD_MUL;
6216 operand->shifter.amount = 1;
6217 break;
6218
6219 case AARCH64_OPND_EXCEPTION:
6220 inst.reloc.type = BFD_RELOC_UNUSED;
6221 break;
6222
6223 case AARCH64_OPND_BARRIER_ISB:
6224 operand->barrier = aarch64_barrier_options + default_value;
6225 break;
6226
6227 case AARCH64_OPND_BTI_TARGET:
6228 operand->hint_option = aarch64_hint_options + default_value;
6229 break;
6230
6231 default:
6232 break;
6233 }
6234 }
6235
6236 /* Process the relocation type for move wide instructions.
6237 Return TRUE on success; otherwise return FALSE. */
6238
6239 static bool
6240 process_movw_reloc_info (void)
6241 {
6242 int is32;
6243 unsigned shift;
6244
6245 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6246
6247 if (inst.base.opcode->op == OP_MOVK)
6248 switch (inst.reloc.type)
6249 {
6250 case BFD_RELOC_AARCH64_MOVW_G0_S:
6251 case BFD_RELOC_AARCH64_MOVW_G1_S:
6252 case BFD_RELOC_AARCH64_MOVW_G2_S:
6253 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6254 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6255 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6256 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6257 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6258 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6259 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6260 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6261 set_syntax_error
6262 (_("the specified relocation type is not allowed for MOVK"));
6263 return false;
6264 default:
6265 break;
6266 }
6267
6268 switch (inst.reloc.type)
6269 {
6270 case BFD_RELOC_AARCH64_MOVW_G0:
6271 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6272 case BFD_RELOC_AARCH64_MOVW_G0_S:
6273 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6274 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6275 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6276 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6277 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6278 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6279 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6280 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6281 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6282 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6283 shift = 0;
6284 break;
6285 case BFD_RELOC_AARCH64_MOVW_G1:
6286 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6287 case BFD_RELOC_AARCH64_MOVW_G1_S:
6288 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6289 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6290 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6291 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6292 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6293 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6294 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6295 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6296 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6297 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6298 shift = 16;
6299 break;
6300 case BFD_RELOC_AARCH64_MOVW_G2:
6301 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6302 case BFD_RELOC_AARCH64_MOVW_G2_S:
6303 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6304 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6305 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6306 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6307 if (is32)
6308 {
6309 set_fatal_syntax_error
6310 (_("the specified relocation type is not allowed for 32-bit "
6311 "register"));
6312 return false;
6313 }
6314 shift = 32;
6315 break;
6316 case BFD_RELOC_AARCH64_MOVW_G3:
6317 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6318 if (is32)
6319 {
6320 set_fatal_syntax_error
6321 (_("the specified relocation type is not allowed for 32-bit "
6322 "register"));
6323 return false;
6324 }
6325 shift = 48;
6326 break;
6327 default:
6328 /* More cases should be added when more MOVW-related relocation types
6329 are supported in GAS. */
6330 gas_assert (aarch64_gas_internal_fixup_p ());
6331 /* The shift amount should have already been set by the parser. */
6332 return true;
6333 }
6334 inst.base.operands[1].shifter.amount = shift;
6335 return true;
6336 }
6337
6338 /* Determine and return the real reloc type code for an instruction
6339 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6340
6341 static inline bfd_reloc_code_real_type
6342 ldst_lo12_determine_real_reloc_type (void)
6343 {
6344 unsigned logsz, max_logsz;
6345 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6346 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6347
6348 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6349 {
6350 BFD_RELOC_AARCH64_LDST8_LO12,
6351 BFD_RELOC_AARCH64_LDST16_LO12,
6352 BFD_RELOC_AARCH64_LDST32_LO12,
6353 BFD_RELOC_AARCH64_LDST64_LO12,
6354 BFD_RELOC_AARCH64_LDST128_LO12
6355 },
6356 {
6357 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6358 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6359 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6360 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6361 BFD_RELOC_AARCH64_NONE
6362 },
6363 {
6364 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6365 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6366 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6367 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6368 BFD_RELOC_AARCH64_NONE
6369 },
6370 {
6371 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6372 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6373 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6374 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6375 BFD_RELOC_AARCH64_NONE
6376 },
6377 {
6378 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6379 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6380 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6381 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6382 BFD_RELOC_AARCH64_NONE
6383 }
6384 };
6385
6386 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6387 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6388 || (inst.reloc.type
6389 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6390 || (inst.reloc.type
6391 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6392 || (inst.reloc.type
6393 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6394 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6395
6396 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6397 opd1_qlf =
6398 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6399 1, opd0_qlf, 0);
6400 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6401
6402 logsz = get_log2 (aarch64_get_qualifier_esize (opd1_qlf));
6403
6404 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6405 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6406 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6407 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6408 max_logsz = 3;
6409 else
6410 max_logsz = 4;
6411
6412 if (logsz > max_logsz)
6413 {
6414 /* SEE PR 27904 for an example of this. */
6415 set_fatal_syntax_error
6416 (_("relocation qualifier does not match instruction size"));
6417 return BFD_RELOC_AARCH64_NONE;
6418 }
6419
6420 /* In reloc.c, these pseudo relocation types should be defined in similar
6421 order as above reloc_ldst_lo12 array. Because the array index calculation
6422 below relies on this. */
6423 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6424 }
6425
6426 /* Check whether a register list REGINFO is valid. The registers have type
6427 REG_TYPE and must be numbered in increasing order (modulo the register
6428 bank size). They must have a consistent stride.
6429
6430 Return true if the list is valid, describing it in LIST if so. */
6431
6432 static bool
6433 reg_list_valid_p (uint32_t reginfo, struct aarch64_reglist *list,
6434 aarch64_reg_type reg_type)
6435 {
6436 uint32_t i, nb_regs, prev_regno, incr, mask;
6437 mask = reg_type_mask (reg_type);
6438
6439 nb_regs = 1 + (reginfo & 0x3);
6440 reginfo >>= 2;
6441 prev_regno = reginfo & 0x1f;
6442 incr = 1;
6443
6444 list->first_regno = prev_regno;
6445 list->num_regs = nb_regs;
6446
6447 for (i = 1; i < nb_regs; ++i)
6448 {
6449 uint32_t curr_regno, curr_incr;
6450 reginfo >>= 5;
6451 curr_regno = reginfo & 0x1f;
6452 curr_incr = (curr_regno - prev_regno) & mask;
6453 if (curr_incr == 0)
6454 return false;
6455 else if (i == 1)
6456 incr = curr_incr;
6457 else if (curr_incr != incr)
6458 return false;
6459 prev_regno = curr_regno;
6460 }
6461
6462 list->stride = incr;
6463 return true;
6464 }
6465
6466 /* Generic instruction operand parser. This does no encoding and no
6467 semantic validation; it merely squirrels values away in the inst
6468 structure. Returns TRUE or FALSE depending on whether the
6469 specified grammar matched. */
6470
6471 static bool
6472 parse_operands (char *str, const aarch64_opcode *opcode)
6473 {
6474 int i;
6475 char *backtrack_pos = 0;
6476 const enum aarch64_opnd *operands = opcode->operands;
6477 aarch64_reg_type imm_reg_type;
6478
6479 clear_error ();
6480 skip_whitespace (str);
6481
6482 if (AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SME2))
6483 imm_reg_type = REG_TYPE_R_ZR_SP_BHSDQ_VZP_PN;
6484 else if (AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE)
6485 || AARCH64_CPU_HAS_FEATURE (*opcode->avariant, SVE2))
6486 imm_reg_type = REG_TYPE_R_ZR_SP_BHSDQ_VZP;
6487 else
6488 imm_reg_type = REG_TYPE_R_ZR_BHSDQ_V;
6489
6490 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6491 {
6492 int64_t val;
6493 const reg_entry *reg;
6494 int comma_skipped_p = 0;
6495 struct vector_type_el vectype;
6496 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6497 aarch64_opnd_info *info = &inst.base.operands[i];
6498 aarch64_reg_type reg_type;
6499
6500 DEBUG_TRACE ("parse operand %d", i);
6501
6502 /* Assign the operand code. */
6503 info->type = operands[i];
6504
6505 if (optional_operand_p (opcode, i))
6506 {
6507 /* Remember where we are in case we need to backtrack. */
6508 gas_assert (!backtrack_pos);
6509 backtrack_pos = str;
6510 }
6511
6512 /* Expect comma between operands; the backtrack mechanism will take
6513 care of cases of omitted optional operand. */
6514 if (i > 0 && ! skip_past_char (&str, ','))
6515 {
6516 set_syntax_error (_("comma expected between operands"));
6517 goto failure;
6518 }
6519 else
6520 comma_skipped_p = 1;
6521
6522 switch (operands[i])
6523 {
6524 case AARCH64_OPND_Rd:
6525 case AARCH64_OPND_Rn:
6526 case AARCH64_OPND_Rm:
6527 case AARCH64_OPND_Rt:
6528 case AARCH64_OPND_Rt2:
6529 case AARCH64_OPND_X16:
6530 case AARCH64_OPND_Rs:
6531 case AARCH64_OPND_Ra:
6532 case AARCH64_OPND_Rt_LS64:
6533 case AARCH64_OPND_Rt_SYS:
6534 case AARCH64_OPND_PAIRREG:
6535 case AARCH64_OPND_SVE_Rm:
6536 po_int_fp_reg_or_fail (REG_TYPE_R_ZR);
6537
6538 /* In LS64 load/store instructions Rt register number must be even
6539 and <=22. */
6540 if (operands[i] == AARCH64_OPND_Rt_LS64)
6541 {
6542 /* We've already checked if this is valid register.
6543 This will check if register number (Rt) is not undefined for
6544 LS64 instructions:
6545 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6546 if ((info->reg.regno & 0x18) == 0x18
6547 || (info->reg.regno & 0x01) == 0x01)
6548 {
6549 set_syntax_error
6550 (_("invalid Rt register number in 64-byte load/store"));
6551 goto failure;
6552 }
6553 }
6554 else if (operands[i] == AARCH64_OPND_X16)
6555 {
6556 if (info->reg.regno != 16)
6557 {
6558 goto failure;
6559 }
6560 }
6561 break;
6562
6563 case AARCH64_OPND_Rd_SP:
6564 case AARCH64_OPND_Rn_SP:
6565 case AARCH64_OPND_Rt_SP:
6566 case AARCH64_OPND_SVE_Rn_SP:
6567 case AARCH64_OPND_Rm_SP:
6568 po_int_fp_reg_or_fail (REG_TYPE_R_SP);
6569 break;
6570
6571 case AARCH64_OPND_Rm_EXT:
6572 case AARCH64_OPND_Rm_SFT:
6573 po_misc_or_fail (parse_shifter_operand
6574 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6575 ? SHIFTED_ARITH_IMM
6576 : SHIFTED_LOGIC_IMM)));
6577 if (!info->shifter.operator_present)
6578 {
6579 /* Default to LSL if not present. Libopcodes prefers shifter
6580 kind to be explicit. */
6581 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6582 info->shifter.kind = AARCH64_MOD_LSL;
6583 /* For Rm_EXT, libopcodes will carry out further check on whether
6584 or not stack pointer is used in the instruction (Recall that
6585 "the extend operator is not optional unless at least one of
6586 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6587 }
6588 break;
6589
6590 case AARCH64_OPND_Fd:
6591 case AARCH64_OPND_Fn:
6592 case AARCH64_OPND_Fm:
6593 case AARCH64_OPND_Fa:
6594 case AARCH64_OPND_Ft:
6595 case AARCH64_OPND_Ft2:
6596 case AARCH64_OPND_Sd:
6597 case AARCH64_OPND_Sn:
6598 case AARCH64_OPND_Sm:
6599 case AARCH64_OPND_SVE_VZn:
6600 case AARCH64_OPND_SVE_Vd:
6601 case AARCH64_OPND_SVE_Vm:
6602 case AARCH64_OPND_SVE_Vn:
6603 po_int_fp_reg_or_fail (REG_TYPE_BHSDQ);
6604 break;
6605
6606 case AARCH64_OPND_SVE_Pd:
6607 case AARCH64_OPND_SVE_Pg3:
6608 case AARCH64_OPND_SVE_Pg4_5:
6609 case AARCH64_OPND_SVE_Pg4_10:
6610 case AARCH64_OPND_SVE_Pg4_16:
6611 case AARCH64_OPND_SVE_Pm:
6612 case AARCH64_OPND_SVE_Pn:
6613 case AARCH64_OPND_SVE_Pt:
6614 case AARCH64_OPND_SME_Pm:
6615 reg_type = REG_TYPE_P;
6616 goto vector_reg;
6617
6618 case AARCH64_OPND_SVE_Za_5:
6619 case AARCH64_OPND_SVE_Za_16:
6620 case AARCH64_OPND_SVE_Zd:
6621 case AARCH64_OPND_SVE_Zm_5:
6622 case AARCH64_OPND_SVE_Zm_16:
6623 case AARCH64_OPND_SVE_Zn:
6624 case AARCH64_OPND_SVE_Zt:
6625 case AARCH64_OPND_SME_Zm:
6626 reg_type = REG_TYPE_Z;
6627 goto vector_reg;
6628
6629 case AARCH64_OPND_SVE_PNd:
6630 case AARCH64_OPND_SVE_PNg4_10:
6631 case AARCH64_OPND_SVE_PNn:
6632 case AARCH64_OPND_SVE_PNt:
6633 case AARCH64_OPND_SME_PNd3:
6634 case AARCH64_OPND_SME_PNg3:
6635 case AARCH64_OPND_SME_PNn:
6636 reg_type = REG_TYPE_PN;
6637 goto vector_reg;
6638
6639 case AARCH64_OPND_Va:
6640 case AARCH64_OPND_Vd:
6641 case AARCH64_OPND_Vn:
6642 case AARCH64_OPND_Vm:
6643 reg_type = REG_TYPE_V;
6644 vector_reg:
6645 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6646 if (!reg)
6647 goto failure;
6648 if (vectype.defined & NTA_HASINDEX)
6649 goto failure;
6650
6651 info->reg.regno = reg->number;
6652 if ((reg_type == REG_TYPE_P
6653 || reg_type == REG_TYPE_PN
6654 || reg_type == REG_TYPE_Z)
6655 && vectype.type == NT_invtype)
6656 /* Unqualified P and Z registers are allowed in certain
6657 contexts. Rely on F_STRICT qualifier checking to catch
6658 invalid uses. */
6659 info->qualifier = AARCH64_OPND_QLF_NIL;
6660 else
6661 {
6662 info->qualifier = vectype_to_qualifier (&vectype);
6663 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6664 goto failure;
6665 }
6666 break;
6667
6668 case AARCH64_OPND_VdD1:
6669 case AARCH64_OPND_VnD1:
6670 reg = aarch64_reg_parse (&str, REG_TYPE_V, &vectype);
6671 if (!reg)
6672 goto failure;
6673 if (vectype.type != NT_d || vectype.index != 1)
6674 {
6675 set_fatal_syntax_error
6676 (_("the top half of a 128-bit FP/SIMD register is expected"));
6677 goto failure;
6678 }
6679 info->reg.regno = reg->number;
6680 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6681 here; it is correct for the purpose of encoding/decoding since
6682 only the register number is explicitly encoded in the related
6683 instructions, although this appears a bit hacky. */
6684 info->qualifier = AARCH64_OPND_QLF_S_D;
6685 break;
6686
6687 case AARCH64_OPND_SVE_Zm3_INDEX:
6688 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6689 case AARCH64_OPND_SVE_Zm3_19_INDEX:
6690 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6691 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6692 case AARCH64_OPND_SVE_Zm4_INDEX:
6693 case AARCH64_OPND_SVE_Zn_INDEX:
6694 case AARCH64_OPND_SME_Zm_INDEX1:
6695 case AARCH64_OPND_SME_Zm_INDEX2:
6696 case AARCH64_OPND_SME_Zm_INDEX3_1:
6697 case AARCH64_OPND_SME_Zm_INDEX3_2:
6698 case AARCH64_OPND_SME_Zm_INDEX3_10:
6699 case AARCH64_OPND_SME_Zm_INDEX4_1:
6700 case AARCH64_OPND_SME_Zm_INDEX4_10:
6701 case AARCH64_OPND_SME_Zn_INDEX1_16:
6702 case AARCH64_OPND_SME_Zn_INDEX2_15:
6703 case AARCH64_OPND_SME_Zn_INDEX2_16:
6704 case AARCH64_OPND_SME_Zn_INDEX3_14:
6705 case AARCH64_OPND_SME_Zn_INDEX3_15:
6706 case AARCH64_OPND_SME_Zn_INDEX4_14:
6707 reg_type = REG_TYPE_Z;
6708 goto vector_reg_index;
6709
6710 case AARCH64_OPND_Ed:
6711 case AARCH64_OPND_En:
6712 case AARCH64_OPND_Em:
6713 case AARCH64_OPND_Em16:
6714 case AARCH64_OPND_SM3_IMM2:
6715 reg_type = REG_TYPE_V;
6716 vector_reg_index:
6717 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6718 if (!reg)
6719 goto failure;
6720 if (!(vectype.defined & NTA_HASINDEX))
6721 goto failure;
6722
6723 if (reg->type == REG_TYPE_Z && vectype.type == NT_invtype)
6724 /* Unqualified Zn[index] is allowed in LUTI2 instructions. */
6725 info->qualifier = AARCH64_OPND_QLF_NIL;
6726 else
6727 {
6728 if (vectype.type == NT_invtype)
6729 goto failure;
6730 info->qualifier = vectype_to_qualifier (&vectype);
6731 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6732 goto failure;
6733 }
6734
6735 info->reglane.regno = reg->number;
6736 info->reglane.index = vectype.index;
6737 break;
6738
6739 case AARCH64_OPND_SVE_ZnxN:
6740 case AARCH64_OPND_SVE_ZtxN:
6741 case AARCH64_OPND_SME_Zdnx2:
6742 case AARCH64_OPND_SME_Zdnx4:
6743 case AARCH64_OPND_SME_Zmx2:
6744 case AARCH64_OPND_SME_Zmx4:
6745 case AARCH64_OPND_SME_Znx2:
6746 case AARCH64_OPND_SME_Znx4:
6747 case AARCH64_OPND_SME_Ztx2_STRIDED:
6748 case AARCH64_OPND_SME_Ztx4_STRIDED:
6749 reg_type = REG_TYPE_Z;
6750 goto vector_reg_list;
6751
6752 case AARCH64_OPND_SME_Pdx2:
6753 case AARCH64_OPND_SME_PdxN:
6754 reg_type = REG_TYPE_P;
6755 goto vector_reg_list;
6756
6757 case AARCH64_OPND_LVn:
6758 case AARCH64_OPND_LVt:
6759 case AARCH64_OPND_LVt_AL:
6760 case AARCH64_OPND_LEt:
6761 reg_type = REG_TYPE_V;
6762 vector_reg_list:
6763 if (reg_type == REG_TYPE_Z
6764 && get_opcode_dependent_value (opcode) == 1
6765 && *str != '{')
6766 {
6767 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6768 if (!reg)
6769 goto failure;
6770 info->reglist.first_regno = reg->number;
6771 info->reglist.num_regs = 1;
6772 info->reglist.stride = 1;
6773 }
6774 else
6775 {
6776 val = parse_vector_reg_list (&str, reg_type, &vectype);
6777 if (val == PARSE_FAIL)
6778 goto failure;
6779
6780 if (! reg_list_valid_p (val, &info->reglist, reg_type))
6781 {
6782 set_fatal_syntax_error (_("invalid register list"));
6783 goto failure;
6784 }
6785
6786 if ((int) vectype.width > 0 && *str != ',')
6787 {
6788 set_fatal_syntax_error
6789 (_("expected element type rather than vector type"));
6790 goto failure;
6791 }
6792 }
6793 if (operands[i] == AARCH64_OPND_LEt)
6794 {
6795 if (!(vectype.defined & NTA_HASINDEX))
6796 goto failure;
6797 info->reglist.has_index = 1;
6798 info->reglist.index = vectype.index;
6799 }
6800 else
6801 {
6802 if (vectype.defined & NTA_HASINDEX)
6803 goto failure;
6804 if (!(vectype.defined & NTA_HASTYPE))
6805 {
6806 if (reg_type == REG_TYPE_Z || reg_type == REG_TYPE_P)
6807 set_fatal_syntax_error (_("missing type suffix"));
6808 goto failure;
6809 }
6810 }
6811 info->qualifier = vectype_to_qualifier (&vectype);
6812 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6813 goto failure;
6814 break;
6815
6816 case AARCH64_OPND_CRn:
6817 case AARCH64_OPND_CRm:
6818 {
6819 char prefix = *(str++);
6820 if (prefix != 'c' && prefix != 'C')
6821 goto failure;
6822
6823 po_imm_nc_or_fail ();
6824 if (val > 15)
6825 {
6826 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6827 goto failure;
6828 }
6829 info->qualifier = AARCH64_OPND_QLF_CR;
6830 info->imm.value = val;
6831 break;
6832 }
6833
6834 case AARCH64_OPND_SHLL_IMM:
6835 case AARCH64_OPND_IMM_VLSR:
6836 po_imm_or_fail (1, 64);
6837 info->imm.value = val;
6838 break;
6839
6840 case AARCH64_OPND_CCMP_IMM:
6841 case AARCH64_OPND_SIMM5:
6842 case AARCH64_OPND_FBITS:
6843 case AARCH64_OPND_TME_UIMM16:
6844 case AARCH64_OPND_UIMM4:
6845 case AARCH64_OPND_UIMM4_ADDG:
6846 case AARCH64_OPND_UIMM10:
6847 case AARCH64_OPND_UIMM3_OP1:
6848 case AARCH64_OPND_UIMM3_OP2:
6849 case AARCH64_OPND_IMM_VLSL:
6850 case AARCH64_OPND_IMM:
6851 case AARCH64_OPND_IMM_2:
6852 case AARCH64_OPND_WIDTH:
6853 case AARCH64_OPND_SVE_INV_LIMM:
6854 case AARCH64_OPND_SVE_LIMM:
6855 case AARCH64_OPND_SVE_LIMM_MOV:
6856 case AARCH64_OPND_SVE_SHLIMM_PRED:
6857 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6858 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6859 case AARCH64_OPND_SME_SHRIMM4:
6860 case AARCH64_OPND_SME_SHRIMM5:
6861 case AARCH64_OPND_SVE_SHRIMM_PRED:
6862 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6863 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6864 case AARCH64_OPND_SVE_SIMM5:
6865 case AARCH64_OPND_SVE_SIMM5B:
6866 case AARCH64_OPND_SVE_SIMM6:
6867 case AARCH64_OPND_SVE_SIMM8:
6868 case AARCH64_OPND_SVE_UIMM3:
6869 case AARCH64_OPND_SVE_UIMM7:
6870 case AARCH64_OPND_SVE_UIMM8:
6871 case AARCH64_OPND_SVE_UIMM8_53:
6872 case AARCH64_OPND_IMM_ROT1:
6873 case AARCH64_OPND_IMM_ROT2:
6874 case AARCH64_OPND_IMM_ROT3:
6875 case AARCH64_OPND_SVE_IMM_ROT1:
6876 case AARCH64_OPND_SVE_IMM_ROT2:
6877 case AARCH64_OPND_SVE_IMM_ROT3:
6878 case AARCH64_OPND_CSSC_SIMM8:
6879 case AARCH64_OPND_CSSC_UIMM8:
6880 po_imm_nc_or_fail ();
6881 info->imm.value = val;
6882 break;
6883
6884 case AARCH64_OPND_SVE_AIMM:
6885 case AARCH64_OPND_SVE_ASIMM:
6886 po_imm_nc_or_fail ();
6887 info->imm.value = val;
6888 skip_whitespace (str);
6889 if (skip_past_comma (&str))
6890 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6891 else
6892 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6893 break;
6894
6895 case AARCH64_OPND_SVE_PATTERN:
6896 po_enum_or_fail (aarch64_sve_pattern_array);
6897 info->imm.value = val;
6898 break;
6899
6900 case AARCH64_OPND_SVE_PATTERN_SCALED:
6901 po_enum_or_fail (aarch64_sve_pattern_array);
6902 info->imm.value = val;
6903 if (skip_past_comma (&str)
6904 && !parse_shift (&str, info, SHIFTED_MUL))
6905 goto failure;
6906 if (!info->shifter.operator_present)
6907 {
6908 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6909 info->shifter.kind = AARCH64_MOD_MUL;
6910 info->shifter.amount = 1;
6911 }
6912 break;
6913
6914 case AARCH64_OPND_SVE_PRFOP:
6915 po_enum_or_fail (aarch64_sve_prfop_array);
6916 info->imm.value = val;
6917 break;
6918
6919 case AARCH64_OPND_UIMM7:
6920 po_imm_or_fail (0, 127);
6921 info->imm.value = val;
6922 break;
6923
6924 case AARCH64_OPND_IDX:
6925 case AARCH64_OPND_MASK:
6926 case AARCH64_OPND_BIT_NUM:
6927 case AARCH64_OPND_IMMR:
6928 case AARCH64_OPND_IMMS:
6929 po_imm_or_fail (0, 63);
6930 info->imm.value = val;
6931 break;
6932
6933 case AARCH64_OPND_IMM0:
6934 po_imm_nc_or_fail ();
6935 if (val != 0)
6936 {
6937 set_fatal_syntax_error (_("immediate zero expected"));
6938 goto failure;
6939 }
6940 info->imm.value = 0;
6941 break;
6942
6943 case AARCH64_OPND_FPIMM0:
6944 {
6945 int qfloat;
6946 bool res1 = false, res2 = false;
6947 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6948 it is probably not worth the effort to support it. */
6949 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6950 imm_reg_type))
6951 && (error_p ()
6952 || !(res2 = parse_constant_immediate (&str, &val,
6953 imm_reg_type))))
6954 goto failure;
6955 if ((res1 && qfloat == 0) || (res2 && val == 0))
6956 {
6957 info->imm.value = 0;
6958 info->imm.is_fp = 1;
6959 break;
6960 }
6961 set_fatal_syntax_error (_("immediate zero expected"));
6962 goto failure;
6963 }
6964
6965 case AARCH64_OPND_IMM_MOV:
6966 {
6967 char *saved = str;
6968 if (reg_name_p (str, REG_TYPE_R_ZR_SP)
6969 || reg_name_p (str, REG_TYPE_V))
6970 goto failure;
6971 str = saved;
6972 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6973 GE_OPT_PREFIX, REJECT_ABSENT));
6974 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6975 later. fix_mov_imm_insn will try to determine a machine
6976 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6977 message if the immediate cannot be moved by a single
6978 instruction. */
6979 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6980 inst.base.operands[i].skip = 1;
6981 }
6982 break;
6983
6984 case AARCH64_OPND_SIMD_IMM:
6985 case AARCH64_OPND_SIMD_IMM_SFT:
6986 if (! parse_big_immediate (&str, &val, imm_reg_type))
6987 goto failure;
6988 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6989 /* addr_off_p */ 0,
6990 /* need_libopcodes_p */ 1,
6991 /* skip_p */ 1);
6992 /* Parse shift.
6993 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6994 shift, we don't check it here; we leave the checking to
6995 the libopcodes (operand_general_constraint_met_p). By
6996 doing this, we achieve better diagnostics. */
6997 if (skip_past_comma (&str)
6998 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6999 goto failure;
7000 if (!info->shifter.operator_present
7001 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
7002 {
7003 /* Default to LSL if not present. Libopcodes prefers shifter
7004 kind to be explicit. */
7005 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7006 info->shifter.kind = AARCH64_MOD_LSL;
7007 }
7008 break;
7009
7010 case AARCH64_OPND_FPIMM:
7011 case AARCH64_OPND_SIMD_FPIMM:
7012 case AARCH64_OPND_SVE_FPIMM8:
7013 {
7014 int qfloat;
7015 bool dp_p;
7016
7017 dp_p = double_precision_operand_p (&inst.base.operands[0]);
7018 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
7019 || !aarch64_imm_float_p (qfloat))
7020 {
7021 if (!error_p ())
7022 set_fatal_syntax_error (_("invalid floating-point"
7023 " constant"));
7024 goto failure;
7025 }
7026 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
7027 inst.base.operands[i].imm.is_fp = 1;
7028 }
7029 break;
7030
7031 case AARCH64_OPND_SVE_I1_HALF_ONE:
7032 case AARCH64_OPND_SVE_I1_HALF_TWO:
7033 case AARCH64_OPND_SVE_I1_ZERO_ONE:
7034 {
7035 int qfloat;
7036 bool dp_p;
7037
7038 dp_p = double_precision_operand_p (&inst.base.operands[0]);
7039 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
7040 {
7041 if (!error_p ())
7042 set_fatal_syntax_error (_("invalid floating-point"
7043 " constant"));
7044 goto failure;
7045 }
7046 inst.base.operands[i].imm.value = qfloat;
7047 inst.base.operands[i].imm.is_fp = 1;
7048 }
7049 break;
7050
7051 case AARCH64_OPND_LIMM:
7052 po_misc_or_fail (parse_shifter_operand (&str, info,
7053 SHIFTED_LOGIC_IMM));
7054 if (info->shifter.operator_present)
7055 {
7056 set_fatal_syntax_error
7057 (_("shift not allowed for bitmask immediate"));
7058 goto failure;
7059 }
7060 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7061 /* addr_off_p */ 0,
7062 /* need_libopcodes_p */ 1,
7063 /* skip_p */ 1);
7064 break;
7065
7066 case AARCH64_OPND_AIMM:
7067 if (opcode->op == OP_ADD)
7068 /* ADD may have relocation types. */
7069 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
7070 SHIFTED_ARITH_IMM));
7071 else
7072 po_misc_or_fail (parse_shifter_operand (&str, info,
7073 SHIFTED_ARITH_IMM));
7074 switch (inst.reloc.type)
7075 {
7076 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7077 info->shifter.amount = 12;
7078 break;
7079 case BFD_RELOC_UNUSED:
7080 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
7081 if (info->shifter.kind != AARCH64_MOD_NONE)
7082 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
7083 inst.reloc.pc_rel = 0;
7084 break;
7085 default:
7086 break;
7087 }
7088 info->imm.value = 0;
7089 if (!info->shifter.operator_present)
7090 {
7091 /* Default to LSL if not present. Libopcodes prefers shifter
7092 kind to be explicit. */
7093 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7094 info->shifter.kind = AARCH64_MOD_LSL;
7095 }
7096 break;
7097
7098 case AARCH64_OPND_HALF:
7099 {
7100 /* #<imm16> or relocation. */
7101 int internal_fixup_p;
7102 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
7103 if (internal_fixup_p)
7104 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
7105 skip_whitespace (str);
7106 if (skip_past_comma (&str))
7107 {
7108 /* {, LSL #<shift>} */
7109 if (! aarch64_gas_internal_fixup_p ())
7110 {
7111 set_fatal_syntax_error (_("can't mix relocation modifier "
7112 "with explicit shift"));
7113 goto failure;
7114 }
7115 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
7116 }
7117 else
7118 inst.base.operands[i].shifter.amount = 0;
7119 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
7120 inst.base.operands[i].imm.value = 0;
7121 if (! process_movw_reloc_info ())
7122 goto failure;
7123 }
7124 break;
7125
7126 case AARCH64_OPND_EXCEPTION:
7127 case AARCH64_OPND_UNDEFINED:
7128 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
7129 imm_reg_type));
7130 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7131 /* addr_off_p */ 0,
7132 /* need_libopcodes_p */ 0,
7133 /* skip_p */ 1);
7134 break;
7135
7136 case AARCH64_OPND_NZCV:
7137 {
7138 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
7139 if (nzcv != NULL)
7140 {
7141 str += 4;
7142 info->imm.value = nzcv->value;
7143 break;
7144 }
7145 po_imm_or_fail (0, 15);
7146 info->imm.value = val;
7147 }
7148 break;
7149
7150 case AARCH64_OPND_COND:
7151 case AARCH64_OPND_COND1:
7152 {
7153 char *start = str;
7154 do
7155 str++;
7156 while (ISALPHA (*str));
7157 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
7158 if (info->cond == NULL)
7159 {
7160 set_syntax_error (_("invalid condition"));
7161 goto failure;
7162 }
7163 else if (operands[i] == AARCH64_OPND_COND1
7164 && (info->cond->value & 0xe) == 0xe)
7165 {
7166 /* Do not allow AL or NV. */
7167 set_default_error ();
7168 goto failure;
7169 }
7170 }
7171 break;
7172
7173 case AARCH64_OPND_ADDR_ADRP:
7174 po_misc_or_fail (parse_adrp (&str));
7175 /* Clear the value as operand needs to be relocated. */
7176 info->imm.value = 0;
7177 break;
7178
7179 case AARCH64_OPND_ADDR_PCREL14:
7180 case AARCH64_OPND_ADDR_PCREL19:
7181 case AARCH64_OPND_ADDR_PCREL21:
7182 case AARCH64_OPND_ADDR_PCREL26:
7183 po_misc_or_fail (parse_address (&str, info));
7184 if (!info->addr.pcrel)
7185 {
7186 set_syntax_error (_("invalid pc-relative address"));
7187 goto failure;
7188 }
7189 if (inst.gen_lit_pool
7190 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
7191 {
7192 /* Only permit "=value" in the literal load instructions.
7193 The literal will be generated by programmer_friendly_fixup. */
7194 set_syntax_error (_("invalid use of \"=immediate\""));
7195 goto failure;
7196 }
7197 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
7198 {
7199 set_syntax_error (_("unrecognized relocation suffix"));
7200 goto failure;
7201 }
7202 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
7203 {
7204 info->imm.value = inst.reloc.exp.X_add_number;
7205 inst.reloc.type = BFD_RELOC_UNUSED;
7206 }
7207 else
7208 {
7209 info->imm.value = 0;
7210 if (inst.reloc.type == BFD_RELOC_UNUSED)
7211 switch (opcode->iclass)
7212 {
7213 case compbranch:
7214 case condbranch:
7215 /* e.g. CBZ or B.COND */
7216 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7217 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
7218 break;
7219 case testbranch:
7220 /* e.g. TBZ */
7221 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
7222 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
7223 break;
7224 case branch_imm:
7225 /* e.g. B or BL */
7226 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
7227 inst.reloc.type =
7228 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
7229 : BFD_RELOC_AARCH64_JUMP26;
7230 break;
7231 case loadlit:
7232 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7233 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
7234 break;
7235 case pcreladdr:
7236 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
7237 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
7238 break;
7239 default:
7240 gas_assert (0);
7241 abort ();
7242 }
7243 inst.reloc.pc_rel = 1;
7244 }
7245 break;
7246
7247 case AARCH64_OPND_ADDR_SIMPLE:
7248 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
7249 {
7250 /* [<Xn|SP>{, #<simm>}] */
7251 char *start = str;
7252 /* First use the normal address-parsing routines, to get
7253 the usual syntax errors. */
7254 po_misc_or_fail (parse_address (&str, info));
7255 if (info->addr.pcrel || info->addr.offset.is_reg
7256 || !info->addr.preind || info->addr.postind
7257 || info->addr.writeback)
7258 {
7259 set_syntax_error (_("invalid addressing mode"));
7260 goto failure;
7261 }
7262
7263 /* Then retry, matching the specific syntax of these addresses. */
7264 str = start;
7265 po_char_or_fail ('[');
7266 po_reg_or_fail (REG_TYPE_R64_SP);
7267 /* Accept optional ", #0". */
7268 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7269 && skip_past_char (&str, ','))
7270 {
7271 skip_past_char (&str, '#');
7272 if (! skip_past_char (&str, '0'))
7273 {
7274 set_fatal_syntax_error
7275 (_("the optional immediate offset can only be 0"));
7276 goto failure;
7277 }
7278 }
7279 po_char_or_fail (']');
7280 break;
7281 }
7282
7283 case AARCH64_OPND_ADDR_REGOFF:
7284 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7285 po_misc_or_fail (parse_address (&str, info));
7286 regoff_addr:
7287 if (info->addr.pcrel || !info->addr.offset.is_reg
7288 || !info->addr.preind || info->addr.postind
7289 || info->addr.writeback)
7290 {
7291 set_syntax_error (_("invalid addressing mode"));
7292 goto failure;
7293 }
7294 if (!info->shifter.operator_present)
7295 {
7296 /* Default to LSL if not present. Libopcodes prefers shifter
7297 kind to be explicit. */
7298 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7299 info->shifter.kind = AARCH64_MOD_LSL;
7300 }
7301 /* Qualifier to be deduced by libopcodes. */
7302 break;
7303
7304 case AARCH64_OPND_ADDR_SIMM7:
7305 po_misc_or_fail (parse_address (&str, info));
7306 if (info->addr.pcrel || info->addr.offset.is_reg
7307 || (!info->addr.preind && !info->addr.postind))
7308 {
7309 set_syntax_error (_("invalid addressing mode"));
7310 goto failure;
7311 }
7312 if (inst.reloc.type != BFD_RELOC_UNUSED)
7313 {
7314 set_syntax_error (_("relocation not allowed"));
7315 goto failure;
7316 }
7317 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7318 /* addr_off_p */ 1,
7319 /* need_libopcodes_p */ 1,
7320 /* skip_p */ 0);
7321 break;
7322
7323 case AARCH64_OPND_ADDR_SIMM9:
7324 case AARCH64_OPND_ADDR_SIMM9_2:
7325 case AARCH64_OPND_ADDR_SIMM11:
7326 case AARCH64_OPND_ADDR_SIMM13:
7327 po_misc_or_fail (parse_address (&str, info));
7328 if (info->addr.pcrel || info->addr.offset.is_reg
7329 || (!info->addr.preind && !info->addr.postind)
7330 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7331 && info->addr.writeback))
7332 {
7333 set_syntax_error (_("invalid addressing mode"));
7334 goto failure;
7335 }
7336 if (inst.reloc.type != BFD_RELOC_UNUSED)
7337 {
7338 set_syntax_error (_("relocation not allowed"));
7339 goto failure;
7340 }
7341 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7342 /* addr_off_p */ 1,
7343 /* need_libopcodes_p */ 1,
7344 /* skip_p */ 0);
7345 break;
7346
7347 case AARCH64_OPND_ADDR_SIMM10:
7348 case AARCH64_OPND_ADDR_OFFSET:
7349 po_misc_or_fail (parse_address (&str, info));
7350 if (info->addr.pcrel || info->addr.offset.is_reg
7351 || !info->addr.preind || info->addr.postind)
7352 {
7353 set_syntax_error (_("invalid addressing mode"));
7354 goto failure;
7355 }
7356 if (inst.reloc.type != BFD_RELOC_UNUSED)
7357 {
7358 set_syntax_error (_("relocation not allowed"));
7359 goto failure;
7360 }
7361 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7362 /* addr_off_p */ 1,
7363 /* need_libopcodes_p */ 1,
7364 /* skip_p */ 0);
7365 break;
7366
7367 case AARCH64_OPND_ADDR_UIMM12:
7368 po_misc_or_fail (parse_address (&str, info));
7369 if (info->addr.pcrel || info->addr.offset.is_reg
7370 || !info->addr.preind || info->addr.writeback)
7371 {
7372 set_syntax_error (_("invalid addressing mode"));
7373 goto failure;
7374 }
7375 if (inst.reloc.type == BFD_RELOC_UNUSED)
7376 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7377 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7378 || (inst.reloc.type
7379 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7380 || (inst.reloc.type
7381 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7382 || (inst.reloc.type
7383 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7384 || (inst.reloc.type
7385 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7386 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7387 /* Leave qualifier to be determined by libopcodes. */
7388 break;
7389
7390 case AARCH64_OPND_SIMD_ADDR_POST:
7391 /* [<Xn|SP>], <Xm|#<amount>> */
7392 po_misc_or_fail (parse_address (&str, info));
7393 if (!info->addr.postind || !info->addr.writeback)
7394 {
7395 set_syntax_error (_("invalid addressing mode"));
7396 goto failure;
7397 }
7398 if (!info->addr.offset.is_reg)
7399 {
7400 if (inst.reloc.exp.X_op == O_constant)
7401 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7402 else
7403 {
7404 set_fatal_syntax_error
7405 (_("writeback value must be an immediate constant"));
7406 goto failure;
7407 }
7408 }
7409 /* No qualifier. */
7410 break;
7411
7412 case AARCH64_OPND_SME_SM_ZA:
7413 /* { SM | ZA } */
7414 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7415 {
7416 set_syntax_error (_("unknown or missing PSTATE field name"));
7417 goto failure;
7418 }
7419 info->reg.regno = val;
7420 break;
7421
7422 case AARCH64_OPND_SME_PnT_Wm_imm:
7423 if (!parse_dual_indexed_reg (&str, REG_TYPE_P,
7424 &info->indexed_za, &qualifier, 0))
7425 goto failure;
7426 info->qualifier = qualifier;
7427 break;
7428
7429 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7430 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7431 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7432 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7433 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7434 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7435 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7436 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7437 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7438 case AARCH64_OPND_SVE_ADDR_RI_U6:
7439 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7440 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7441 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7442 /* [X<n>{, #imm, MUL VL}]
7443 [X<n>{, #imm}]
7444 but recognizing SVE registers. */
7445 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7446 &offset_qualifier));
7447 if (base_qualifier != AARCH64_OPND_QLF_X)
7448 {
7449 set_syntax_error (_("invalid addressing mode"));
7450 goto failure;
7451 }
7452 sve_regimm:
7453 if (info->addr.pcrel || info->addr.offset.is_reg
7454 || !info->addr.preind || info->addr.writeback)
7455 {
7456 set_syntax_error (_("invalid addressing mode"));
7457 goto failure;
7458 }
7459 if (inst.reloc.type != BFD_RELOC_UNUSED
7460 || inst.reloc.exp.X_op != O_constant)
7461 {
7462 /* Make sure this has priority over
7463 "invalid addressing mode". */
7464 set_fatal_syntax_error (_("constant offset required"));
7465 goto failure;
7466 }
7467 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7468 break;
7469
7470 case AARCH64_OPND_SVE_ADDR_R:
7471 /* [<Xn|SP>{, <R><m>}]
7472 but recognizing SVE registers. */
7473 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7474 &offset_qualifier));
7475 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7476 {
7477 offset_qualifier = AARCH64_OPND_QLF_X;
7478 info->addr.offset.is_reg = 1;
7479 info->addr.offset.regno = 31;
7480 }
7481 else if (base_qualifier != AARCH64_OPND_QLF_X
7482 || offset_qualifier != AARCH64_OPND_QLF_X)
7483 {
7484 set_syntax_error (_("invalid addressing mode"));
7485 goto failure;
7486 }
7487 goto regoff_addr;
7488
7489 case AARCH64_OPND_SVE_ADDR_RR:
7490 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7491 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7492 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7493 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7494 case AARCH64_OPND_SVE_ADDR_RX:
7495 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7496 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7497 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7498 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7499 but recognizing SVE registers. */
7500 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7501 &offset_qualifier));
7502 if (base_qualifier != AARCH64_OPND_QLF_X
7503 || offset_qualifier != AARCH64_OPND_QLF_X)
7504 {
7505 set_syntax_error (_("invalid addressing mode"));
7506 goto failure;
7507 }
7508 goto regoff_addr;
7509
7510 case AARCH64_OPND_SVE_ADDR_RZ:
7511 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7512 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7513 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7514 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7515 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7516 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7517 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7518 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7519 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7520 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7521 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7522 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7523 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7524 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7525 &offset_qualifier));
7526 if (base_qualifier != AARCH64_OPND_QLF_X
7527 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7528 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7529 {
7530 set_syntax_error (_("invalid addressing mode"));
7531 goto failure;
7532 }
7533 info->qualifier = offset_qualifier;
7534 goto regoff_addr;
7535
7536 case AARCH64_OPND_SVE_ADDR_ZX:
7537 /* [Zn.<T>{, <Xm>}]. */
7538 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7539 &offset_qualifier));
7540 /* Things to check:
7541 base_qualifier either S_S or S_D
7542 offset_qualifier must be X
7543 */
7544 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7545 && base_qualifier != AARCH64_OPND_QLF_S_D)
7546 || offset_qualifier != AARCH64_OPND_QLF_X)
7547 {
7548 set_syntax_error (_("invalid addressing mode"));
7549 goto failure;
7550 }
7551 info->qualifier = base_qualifier;
7552 if (!info->addr.offset.is_reg || info->addr.pcrel
7553 || !info->addr.preind || info->addr.writeback
7554 || info->shifter.operator_present != 0)
7555 {
7556 set_syntax_error (_("invalid addressing mode"));
7557 goto failure;
7558 }
7559 info->shifter.kind = AARCH64_MOD_LSL;
7560 break;
7561
7562
7563 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7564 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7565 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7566 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7567 /* [Z<n>.<T>{, #imm}] */
7568 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7569 &offset_qualifier));
7570 if (base_qualifier != AARCH64_OPND_QLF_S_S
7571 && base_qualifier != AARCH64_OPND_QLF_S_D)
7572 {
7573 set_syntax_error (_("invalid addressing mode"));
7574 goto failure;
7575 }
7576 info->qualifier = base_qualifier;
7577 goto sve_regimm;
7578
7579 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7580 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7581 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7582 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7583 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7584
7585 We don't reject:
7586
7587 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7588
7589 here since we get better error messages by leaving it to
7590 the qualifier checking routines. */
7591 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7592 &offset_qualifier));
7593 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7594 && base_qualifier != AARCH64_OPND_QLF_S_D)
7595 || offset_qualifier != base_qualifier)
7596 {
7597 set_syntax_error (_("invalid addressing mode"));
7598 goto failure;
7599 }
7600 info->qualifier = base_qualifier;
7601 goto regoff_addr;
7602
7603 case AARCH64_OPND_SYSREG:
7604 {
7605 uint32_t sysreg_flags;
7606 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7607 &sysreg_flags)) == PARSE_FAIL)
7608 {
7609 set_syntax_error (_("unknown or missing system register name"));
7610 goto failure;
7611 }
7612 inst.base.operands[i].sysreg.value = val;
7613 inst.base.operands[i].sysreg.flags = sysreg_flags;
7614 break;
7615 }
7616
7617 case AARCH64_OPND_PSTATEFIELD:
7618 {
7619 uint32_t sysreg_flags;
7620 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7621 &sysreg_flags)) == PARSE_FAIL)
7622 {
7623 set_syntax_error (_("unknown or missing PSTATE field name"));
7624 goto failure;
7625 }
7626 inst.base.operands[i].pstatefield = val;
7627 inst.base.operands[i].sysreg.flags = sysreg_flags;
7628 break;
7629 }
7630
7631 case AARCH64_OPND_SYSREG_IC:
7632 inst.base.operands[i].sysins_op =
7633 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7634 goto sys_reg_ins;
7635
7636 case AARCH64_OPND_SYSREG_DC:
7637 inst.base.operands[i].sysins_op =
7638 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7639 goto sys_reg_ins;
7640
7641 case AARCH64_OPND_SYSREG_AT:
7642 inst.base.operands[i].sysins_op =
7643 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7644 goto sys_reg_ins;
7645
7646 case AARCH64_OPND_SYSREG_SR:
7647 inst.base.operands[i].sysins_op =
7648 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7649 goto sys_reg_ins;
7650
7651 case AARCH64_OPND_SYSREG_TLBI:
7652 inst.base.operands[i].sysins_op =
7653 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7654 sys_reg_ins:
7655 if (inst.base.operands[i].sysins_op == NULL)
7656 {
7657 set_fatal_syntax_error ( _("unknown or missing operation name"));
7658 goto failure;
7659 }
7660 break;
7661
7662 case AARCH64_OPND_BARRIER:
7663 case AARCH64_OPND_BARRIER_ISB:
7664 val = parse_barrier (&str);
7665 if (val != PARSE_FAIL
7666 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7667 {
7668 /* ISB only accepts options name 'sy'. */
7669 set_syntax_error
7670 (_("the specified option is not accepted in ISB"));
7671 /* Turn off backtrack as this optional operand is present. */
7672 backtrack_pos = 0;
7673 goto failure;
7674 }
7675 if (val != PARSE_FAIL
7676 && operands[i] == AARCH64_OPND_BARRIER)
7677 {
7678 /* Regular barriers accept options CRm (C0-C15).
7679 DSB nXS barrier variant accepts values > 15. */
7680 if (val < 0 || val > 15)
7681 {
7682 set_syntax_error (_("the specified option is not accepted in DSB"));
7683 goto failure;
7684 }
7685 }
7686 /* This is an extension to accept a 0..15 immediate. */
7687 if (val == PARSE_FAIL)
7688 po_imm_or_fail (0, 15);
7689 info->barrier = aarch64_barrier_options + val;
7690 break;
7691
7692 case AARCH64_OPND_BARRIER_DSB_NXS:
7693 val = parse_barrier (&str);
7694 if (val != PARSE_FAIL)
7695 {
7696 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7697 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7698 {
7699 set_syntax_error (_("the specified option is not accepted in DSB"));
7700 /* Turn off backtrack as this optional operand is present. */
7701 backtrack_pos = 0;
7702 goto failure;
7703 }
7704 }
7705 else
7706 {
7707 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7708 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7709 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7710 goto failure;
7711 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7712 {
7713 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7714 goto failure;
7715 }
7716 }
7717 /* Option index is encoded as 2-bit value in val<3:2>. */
7718 val = (val >> 2) - 4;
7719 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7720 break;
7721
7722 case AARCH64_OPND_PRFOP:
7723 val = parse_pldop (&str);
7724 /* This is an extension to accept a 0..31 immediate. */
7725 if (val == PARSE_FAIL)
7726 po_imm_or_fail (0, 31);
7727 inst.base.operands[i].prfop = aarch64_prfops + val;
7728 break;
7729
7730 case AARCH64_OPND_RPRFMOP:
7731 po_enum_or_fail (aarch64_rprfmop_array);
7732 info->imm.value = val;
7733 break;
7734
7735 case AARCH64_OPND_BARRIER_PSB:
7736 if (!parse_hint_opt (opcode->name, &str, &(info->hint_option)))
7737 goto failure;
7738 break;
7739
7740 case AARCH64_OPND_SME_ZT0:
7741 po_reg_or_fail (REG_TYPE_ZT0);
7742 break;
7743
7744 case AARCH64_OPND_SME_ZT0_INDEX:
7745 reg = aarch64_reg_parse (&str, REG_TYPE_ZT0, &vectype);
7746 if (!reg || vectype.type != NT_invtype)
7747 goto failure;
7748 if (!(vectype.defined & NTA_HASINDEX))
7749 {
7750 set_syntax_error (_("missing register index"));
7751 goto failure;
7752 }
7753 info->imm.value = vectype.index;
7754 break;
7755
7756 case AARCH64_OPND_SME_ZT0_LIST:
7757 if (*str != '{')
7758 {
7759 set_expected_reglist_error (REG_TYPE_ZT0, parse_reg (&str));
7760 goto failure;
7761 }
7762 str++;
7763 if (!parse_typed_reg (&str, REG_TYPE_ZT0, &vectype, PTR_IN_REGLIST))
7764 goto failure;
7765 if (*str != '}')
7766 {
7767 set_syntax_error (_("expected '}' after ZT0"));
7768 goto failure;
7769 }
7770 str++;
7771 break;
7772
7773 case AARCH64_OPND_SME_PNn3_INDEX1:
7774 case AARCH64_OPND_SME_PNn3_INDEX2:
7775 reg = aarch64_reg_parse (&str, REG_TYPE_PN, &vectype);
7776 if (!reg)
7777 goto failure;
7778 if (!(vectype.defined & NTA_HASINDEX))
7779 {
7780 set_syntax_error (_("missing register index"));
7781 goto failure;
7782 }
7783 info->reglane.regno = reg->number;
7784 info->reglane.index = vectype.index;
7785 if (vectype.type == NT_invtype)
7786 info->qualifier = AARCH64_OPND_QLF_NIL;
7787 else
7788 info->qualifier = vectype_to_qualifier (&vectype);
7789 break;
7790
7791 case AARCH64_OPND_BARRIER_GCSB:
7792 if (!parse_hint_opt (opcode->name, &str, &(info->hint_option)))
7793 goto failure;
7794 break;
7795
7796 case AARCH64_OPND_BTI_TARGET:
7797 if (!parse_hint_opt (opcode->name, &str, &(info->hint_option)))
7798 goto failure;
7799 break;
7800
7801 case AARCH64_OPND_SME_ZAda_2b:
7802 case AARCH64_OPND_SME_ZAda_3b:
7803 reg = parse_reg_with_qual (&str, REG_TYPE_ZAT, &qualifier, 0);
7804 if (!reg)
7805 goto failure;
7806 info->reg.regno = reg->number;
7807 info->qualifier = qualifier;
7808 break;
7809
7810 case AARCH64_OPND_SME_ZA_HV_idx_src:
7811 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
7812 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7813 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
7814 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7815 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr
7816 ? !parse_sme_za_hv_tiles_operand_with_braces (&str,
7817 &info->indexed_za,
7818 &qualifier)
7819 : !parse_dual_indexed_reg (&str, REG_TYPE_ZATHV,
7820 &info->indexed_za, &qualifier, 0))
7821 goto failure;
7822 info->qualifier = qualifier;
7823 break;
7824
7825 case AARCH64_OPND_SME_list_of_64bit_tiles:
7826 val = parse_sme_list_of_64bit_tiles (&str);
7827 if (val == PARSE_FAIL)
7828 goto failure;
7829 info->imm.value = val;
7830 break;
7831
7832 case AARCH64_OPND_SME_ZA_array_off1x4:
7833 case AARCH64_OPND_SME_ZA_array_off2x2:
7834 case AARCH64_OPND_SME_ZA_array_off2x4:
7835 case AARCH64_OPND_SME_ZA_array_off3_0:
7836 case AARCH64_OPND_SME_ZA_array_off3_5:
7837 case AARCH64_OPND_SME_ZA_array_off3x2:
7838 case AARCH64_OPND_SME_ZA_array_off4:
7839 if (!parse_dual_indexed_reg (&str, REG_TYPE_ZA,
7840 &info->indexed_za, &qualifier, 0))
7841 goto failure;
7842 info->qualifier = qualifier;
7843 break;
7844
7845 case AARCH64_OPND_SME_VLxN_10:
7846 case AARCH64_OPND_SME_VLxN_13:
7847 po_strict_enum_or_fail (aarch64_sme_vlxn_array);
7848 info->imm.value = val;
7849 break;
7850
7851 case AARCH64_OPND_MOPS_ADDR_Rd:
7852 case AARCH64_OPND_MOPS_ADDR_Rs:
7853 po_char_or_fail ('[');
7854 if (!parse_x0_to_x30 (&str, info))
7855 goto failure;
7856 po_char_or_fail (']');
7857 po_char_or_fail ('!');
7858 break;
7859
7860 case AARCH64_OPND_MOPS_WB_Rn:
7861 if (!parse_x0_to_x30 (&str, info))
7862 goto failure;
7863 po_char_or_fail ('!');
7864 break;
7865
7866 case AARCH64_OPND_LSE128_Rt:
7867 case AARCH64_OPND_LSE128_Rt2:
7868 po_int_fp_reg_or_fail (REG_TYPE_R_64);
7869 break;
7870
7871 default:
7872 as_fatal (_("unhandled operand code %d"), operands[i]);
7873 }
7874
7875 /* If we get here, this operand was successfully parsed. */
7876 inst.base.operands[i].present = 1;
7877 continue;
7878
7879 failure:
7880 /* The parse routine should already have set the error, but in case
7881 not, set a default one here. */
7882 if (! error_p ())
7883 set_default_error ();
7884
7885 if (! backtrack_pos)
7886 goto parse_operands_return;
7887
7888 {
7889 /* We reach here because this operand is marked as optional, and
7890 either no operand was supplied or the operand was supplied but it
7891 was syntactically incorrect. In the latter case we report an
7892 error. In the former case we perform a few more checks before
7893 dropping through to the code to insert the default operand. */
7894
7895 char *tmp = backtrack_pos;
7896 char endchar = END_OF_INSN;
7897
7898 if (i != (aarch64_num_of_operands (opcode) - 1))
7899 endchar = ',';
7900 skip_past_char (&tmp, ',');
7901
7902 if (*tmp != endchar)
7903 /* The user has supplied an operand in the wrong format. */
7904 goto parse_operands_return;
7905
7906 /* Make sure there is not a comma before the optional operand.
7907 For example the fifth operand of 'sys' is optional:
7908
7909 sys #0,c0,c0,#0, <--- wrong
7910 sys #0,c0,c0,#0 <--- correct. */
7911 if (comma_skipped_p && i && endchar == END_OF_INSN)
7912 {
7913 set_fatal_syntax_error
7914 (_("unexpected comma before the omitted optional operand"));
7915 goto parse_operands_return;
7916 }
7917 }
7918
7919 /* Reaching here means we are dealing with an optional operand that is
7920 omitted from the assembly line. */
7921 gas_assert (optional_operand_p (opcode, i));
7922 info->present = 0;
7923 process_omitted_operand (operands[i], opcode, i, info);
7924
7925 /* Try again, skipping the optional operand at backtrack_pos. */
7926 str = backtrack_pos;
7927 backtrack_pos = 0;
7928
7929 /* Clear any error record after the omitted optional operand has been
7930 successfully handled. */
7931 clear_error ();
7932 }
7933
7934 /* Check if we have parsed all the operands. */
7935 if (*str != '\0' && ! error_p ())
7936 {
7937 /* Set I to the index of the last present operand; this is
7938 for the purpose of diagnostics. */
7939 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7940 ;
7941 set_fatal_syntax_error
7942 (_("unexpected characters following instruction"));
7943 }
7944
7945 parse_operands_return:
7946
7947 if (error_p ())
7948 {
7949 inst.parsing_error.index = i;
7950 DEBUG_TRACE ("parsing FAIL: %s - %s",
7951 operand_mismatch_kind_names[inst.parsing_error.kind],
7952 inst.parsing_error.error);
7953 /* Record the operand error properly; this is useful when there
7954 are multiple instruction templates for a mnemonic name, so that
7955 later on, we can select the error that most closely describes
7956 the problem. */
7957 record_operand_error_info (opcode, &inst.parsing_error);
7958 return false;
7959 }
7960 else
7961 {
7962 DEBUG_TRACE ("parsing SUCCESS");
7963 return true;
7964 }
7965 }
7966
7967 /* It does some fix-up to provide some programmer friendly feature while
7968 keeping the libopcodes happy, i.e. libopcodes only accepts
7969 the preferred architectural syntax.
7970 Return FALSE if there is any failure; otherwise return TRUE. */
7971
7972 static bool
7973 programmer_friendly_fixup (aarch64_instruction *instr)
7974 {
7975 aarch64_inst *base = &instr->base;
7976 const aarch64_opcode *opcode = base->opcode;
7977 enum aarch64_op op = opcode->op;
7978 aarch64_opnd_info *operands = base->operands;
7979
7980 DEBUG_TRACE ("enter");
7981
7982 switch (opcode->iclass)
7983 {
7984 case testbranch:
7985 /* TBNZ Xn|Wn, #uimm6, label
7986 Test and Branch Not Zero: conditionally jumps to label if bit number
7987 uimm6 in register Xn is not zero. The bit number implies the width of
7988 the register, which may be written and should be disassembled as Wn if
7989 uimm is less than 32. */
7990 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7991 {
7992 if (operands[1].imm.value >= 32)
7993 {
7994 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7995 0, 31);
7996 return false;
7997 }
7998 operands[0].qualifier = AARCH64_OPND_QLF_X;
7999 }
8000 break;
8001 case loadlit:
8002 /* LDR Wt, label | =value
8003 As a convenience assemblers will typically permit the notation
8004 "=value" in conjunction with the pc-relative literal load instructions
8005 to automatically place an immediate value or symbolic address in a
8006 nearby literal pool and generate a hidden label which references it.
8007 ISREG has been set to 0 in the case of =value. */
8008 if (instr->gen_lit_pool
8009 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
8010 {
8011 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
8012 if (op == OP_LDRSW_LIT)
8013 size = 4;
8014 if (instr->reloc.exp.X_op != O_constant
8015 && instr->reloc.exp.X_op != O_big
8016 && instr->reloc.exp.X_op != O_symbol)
8017 {
8018 record_operand_error (opcode, 1,
8019 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
8020 _("constant expression expected"));
8021 return false;
8022 }
8023 if (! add_to_lit_pool (&instr->reloc.exp, size))
8024 {
8025 record_operand_error (opcode, 1,
8026 AARCH64_OPDE_OTHER_ERROR,
8027 _("literal pool insertion failed"));
8028 return false;
8029 }
8030 }
8031 break;
8032 case log_shift:
8033 case bitfield:
8034 /* UXT[BHW] Wd, Wn
8035 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
8036 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
8037 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
8038 A programmer-friendly assembler should accept a destination Xd in
8039 place of Wd, however that is not the preferred form for disassembly.
8040 */
8041 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
8042 && operands[1].qualifier == AARCH64_OPND_QLF_W
8043 && operands[0].qualifier == AARCH64_OPND_QLF_X)
8044 operands[0].qualifier = AARCH64_OPND_QLF_W;
8045 break;
8046
8047 case addsub_ext:
8048 {
8049 /* In the 64-bit form, the final register operand is written as Wm
8050 for all but the (possibly omitted) UXTX/LSL and SXTX
8051 operators.
8052 As a programmer-friendly assembler, we accept e.g.
8053 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
8054 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
8055 int idx = aarch64_operand_index (opcode->operands,
8056 AARCH64_OPND_Rm_EXT);
8057 gas_assert (idx == 1 || idx == 2);
8058 if (operands[0].qualifier == AARCH64_OPND_QLF_X
8059 && operands[idx].qualifier == AARCH64_OPND_QLF_X
8060 && operands[idx].shifter.kind != AARCH64_MOD_LSL
8061 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
8062 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
8063 operands[idx].qualifier = AARCH64_OPND_QLF_W;
8064 }
8065 break;
8066
8067 default:
8068 break;
8069 }
8070
8071 DEBUG_TRACE ("exit with SUCCESS");
8072 return true;
8073 }
8074
8075 /* Check for loads and stores that will cause unpredictable behavior. */
8076
8077 static void
8078 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
8079 {
8080 aarch64_inst *base = &instr->base;
8081 const aarch64_opcode *opcode = base->opcode;
8082 const aarch64_opnd_info *opnds = base->operands;
8083 switch (opcode->iclass)
8084 {
8085 case ldst_pos:
8086 case ldst_imm9:
8087 case ldst_imm10:
8088 case ldst_unscaled:
8089 case ldst_unpriv:
8090 /* Loading/storing the base register is unpredictable if writeback. */
8091 if ((aarch64_get_operand_class (opnds[0].type)
8092 == AARCH64_OPND_CLASS_INT_REG)
8093 && opnds[0].reg.regno == opnds[1].addr.base_regno
8094 && opnds[1].addr.base_regno != REG_SP
8095 /* Exempt STG/STZG/ST2G/STZ2G. */
8096 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
8097 && opnds[1].addr.writeback)
8098 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
8099 break;
8100
8101 case ldstpair_off:
8102 case ldstnapair_offs:
8103 case ldstpair_indexed:
8104 /* Loading/storing the base register is unpredictable if writeback. */
8105 if ((aarch64_get_operand_class (opnds[0].type)
8106 == AARCH64_OPND_CLASS_INT_REG)
8107 && (opnds[0].reg.regno == opnds[2].addr.base_regno
8108 || opnds[1].reg.regno == opnds[2].addr.base_regno)
8109 && opnds[2].addr.base_regno != REG_SP
8110 /* Exempt STGP. */
8111 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
8112 && opnds[2].addr.writeback)
8113 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
8114 /* Load operations must load different registers. */
8115 if ((opcode->opcode & (1 << 22))
8116 && opnds[0].reg.regno == opnds[1].reg.regno)
8117 as_warn (_("unpredictable load of register pair -- `%s'"), str);
8118 break;
8119
8120 case ldstexcl:
8121 if ((aarch64_get_operand_class (opnds[0].type)
8122 == AARCH64_OPND_CLASS_INT_REG)
8123 && (aarch64_get_operand_class (opnds[1].type)
8124 == AARCH64_OPND_CLASS_INT_REG))
8125 {
8126 if ((opcode->opcode & (1 << 22)))
8127 {
8128 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
8129 if ((opcode->opcode & (1 << 21))
8130 && opnds[0].reg.regno == opnds[1].reg.regno)
8131 as_warn (_("unpredictable load of register pair -- `%s'"), str);
8132 }
8133 else
8134 {
8135 /* Store-Exclusive is unpredictable if Rt == Rs. */
8136 if (opnds[0].reg.regno == opnds[1].reg.regno)
8137 as_warn
8138 (_("unpredictable: identical transfer and status registers"
8139 " --`%s'"),str);
8140
8141 if (opnds[0].reg.regno == opnds[2].reg.regno)
8142 {
8143 if (!(opcode->opcode & (1 << 21)))
8144 /* Store-Exclusive is unpredictable if Rn == Rs. */
8145 as_warn
8146 (_("unpredictable: identical base and status registers"
8147 " --`%s'"),str);
8148 else
8149 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
8150 as_warn
8151 (_("unpredictable: "
8152 "identical transfer and status registers"
8153 " --`%s'"),str);
8154 }
8155
8156 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
8157 if ((opcode->opcode & (1 << 21))
8158 && opnds[0].reg.regno == opnds[3].reg.regno
8159 && opnds[3].reg.regno != REG_SP)
8160 as_warn (_("unpredictable: identical base and status registers"
8161 " --`%s'"),str);
8162 }
8163 }
8164 break;
8165
8166 default:
8167 break;
8168 }
8169 }
8170
8171 static void
8172 force_automatic_sequence_close (void)
8173 {
8174 struct aarch64_segment_info_type *tc_seg_info;
8175
8176 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8177 if (tc_seg_info->insn_sequence.instr)
8178 {
8179 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
8180 _("previous `%s' sequence has not been closed"),
8181 tc_seg_info->insn_sequence.instr->opcode->name);
8182 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
8183 }
8184 }
8185
8186 /* A wrapper function to interface with libopcodes on encoding and
8187 record the error message if there is any.
8188
8189 Return TRUE on success; otherwise return FALSE. */
8190
8191 static bool
8192 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
8193 aarch64_insn *code)
8194 {
8195 aarch64_operand_error error_info;
8196 memset (&error_info, '\0', sizeof (error_info));
8197 error_info.kind = AARCH64_OPDE_NIL;
8198 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
8199 && !error_info.non_fatal)
8200 return true;
8201
8202 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
8203 record_operand_error_info (opcode, &error_info);
8204 return error_info.non_fatal;
8205 }
8206
8207 #ifdef DEBUG_AARCH64
8208 static inline void
8209 dump_opcode_operands (const aarch64_opcode *opcode)
8210 {
8211 int i = 0;
8212 while (opcode->operands[i] != AARCH64_OPND_NIL)
8213 {
8214 aarch64_verbose ("\t\t opnd%d: %s", i,
8215 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
8216 ? aarch64_get_operand_name (opcode->operands[i])
8217 : aarch64_get_operand_desc (opcode->operands[i]));
8218 ++i;
8219 }
8220 }
8221 #endif /* DEBUG_AARCH64 */
8222
8223 /* This is the guts of the machine-dependent assembler. STR points to a
8224 machine dependent instruction. This function is supposed to emit
8225 the frags/bytes it assembles to. */
8226
8227 void
8228 md_assemble (char *str)
8229 {
8230 templates *template;
8231 const aarch64_opcode *opcode;
8232 struct aarch64_segment_info_type *tc_seg_info;
8233 aarch64_inst *inst_base;
8234 unsigned saved_cond;
8235
8236 /* Align the previous label if needed. */
8237 if (last_label_seen != NULL)
8238 {
8239 symbol_set_frag (last_label_seen, frag_now);
8240 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
8241 S_SET_SEGMENT (last_label_seen, now_seg);
8242 }
8243
8244 /* Update the current insn_sequence from the segment. */
8245 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8246 insn_sequence = &tc_seg_info->insn_sequence;
8247 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
8248
8249 inst.reloc.type = BFD_RELOC_UNUSED;
8250
8251 DEBUG_TRACE ("\n\n");
8252 DEBUG_TRACE ("==============================");
8253 DEBUG_TRACE ("Enter md_assemble with %s", str);
8254
8255 /* Scan up to the end of the mnemonic, which must end in whitespace,
8256 '.', or end of string. */
8257 char *p = str;
8258 char *dot = 0;
8259 for (; is_part_of_name (*p); p++)
8260 if (*p == '.' && !dot)
8261 dot = p;
8262
8263 if (p == str)
8264 {
8265 as_bad (_("unknown mnemonic -- `%s'"), str);
8266 return;
8267 }
8268
8269 if (!dot && create_register_alias (str, p))
8270 return;
8271
8272 template = opcode_lookup (str, dot, p);
8273 if (!template)
8274 {
8275 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
8276 str);
8277 return;
8278 }
8279
8280 skip_whitespace (p);
8281 if (*p == ',')
8282 {
8283 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
8284 get_mnemonic_name (str), str);
8285 return;
8286 }
8287
8288 init_operand_error_report ();
8289
8290 /* Sections are assumed to start aligned. In executable section, there is no
8291 MAP_DATA symbol pending. So we only align the address during
8292 MAP_DATA --> MAP_INSN transition.
8293 For other sections, this is not guaranteed. */
8294 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
8295 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
8296 frag_align_code (2, 0);
8297
8298 saved_cond = inst.cond;
8299 reset_aarch64_instruction (&inst);
8300 inst.cond = saved_cond;
8301
8302 /* Iterate through all opcode entries with the same mnemonic name. */
8303 do
8304 {
8305 opcode = template->opcode;
8306
8307 DEBUG_TRACE ("opcode %s found", opcode->name);
8308 #ifdef DEBUG_AARCH64
8309 if (debug_dump)
8310 dump_opcode_operands (opcode);
8311 #endif /* DEBUG_AARCH64 */
8312
8313 mapping_state (MAP_INSN);
8314
8315 inst_base = &inst.base;
8316 inst_base->opcode = opcode;
8317
8318 /* Truly conditionally executed instructions, e.g. b.cond. */
8319 if (opcode->flags & F_COND)
8320 {
8321 gas_assert (inst.cond != COND_ALWAYS);
8322 inst_base->cond = get_cond_from_value (inst.cond);
8323 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8324 }
8325 else if (inst.cond != COND_ALWAYS)
8326 {
8327 /* It shouldn't arrive here, where the assembly looks like a
8328 conditional instruction but the found opcode is unconditional. */
8329 gas_assert (0);
8330 continue;
8331 }
8332
8333 if (parse_operands (p, opcode)
8334 && programmer_friendly_fixup (&inst)
8335 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8336 {
8337 /* Check that this instruction is supported for this CPU. */
8338 if (!aarch64_cpu_supports_inst_p (cpu_variant, inst_base))
8339 {
8340 as_bad (_("selected processor does not support `%s'"), str);
8341 return;
8342 }
8343
8344 warn_unpredictable_ldst (&inst, str);
8345
8346 if (inst.reloc.type == BFD_RELOC_UNUSED
8347 || !inst.reloc.need_libopcodes_p)
8348 output_inst (NULL);
8349 else
8350 {
8351 /* If there is relocation generated for the instruction,
8352 store the instruction information for the future fix-up. */
8353 struct aarch64_inst *copy;
8354 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8355 copy = XNEW (struct aarch64_inst);
8356 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8357 output_inst (copy);
8358 }
8359
8360 /* Issue non-fatal messages if any. */
8361 output_operand_error_report (str, true);
8362 return;
8363 }
8364
8365 template = template->next;
8366 if (template != NULL)
8367 {
8368 reset_aarch64_instruction (&inst);
8369 inst.cond = saved_cond;
8370 }
8371 }
8372 while (template != NULL);
8373
8374 /* Issue the error messages if any. */
8375 output_operand_error_report (str, false);
8376 }
8377
8378 /* Various frobbings of labels and their addresses. */
8379
8380 void
8381 aarch64_start_line_hook (void)
8382 {
8383 last_label_seen = NULL;
8384 }
8385
8386 void
8387 aarch64_frob_label (symbolS * sym)
8388 {
8389 last_label_seen = sym;
8390
8391 dwarf2_emit_label (sym);
8392 }
8393
8394 void
8395 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8396 {
8397 /* Check to see if we have a block to close. */
8398 force_automatic_sequence_close ();
8399 }
8400
8401 int
8402 aarch64_data_in_code (void)
8403 {
8404 if (startswith (input_line_pointer + 1, "data:"))
8405 {
8406 *input_line_pointer = '/';
8407 input_line_pointer += 5;
8408 *input_line_pointer = 0;
8409 return 1;
8410 }
8411
8412 return 0;
8413 }
8414
8415 char *
8416 aarch64_canonicalize_symbol_name (char *name)
8417 {
8418 int len;
8419
8420 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8421 *(name + len - 5) = 0;
8422
8423 return name;
8424 }
8425 \f
8426 /* Table of all register names defined by default. The user can
8427 define additional names with .req. Note that all register names
8428 should appear in both upper and lowercase variants. Some registers
8429 also have mixed-case names. */
8430
8431 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8432 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8433 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8434 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8435 #define REGSET16(p,t) \
8436 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8437 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8438 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8439 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8440 #define REGSET16S(p,s,t) \
8441 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8442 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8443 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8444 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8445 #define REGSET31(p,t) \
8446 REGSET16(p, t), \
8447 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8448 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8449 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8450 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8451 #define REGSET(p,t) \
8452 REGSET31(p,t), REGNUM(p,31,t)
8453
8454 /* These go into aarch64_reg_hsh hash-table. */
8455 static const reg_entry reg_names[] = {
8456 /* Integer registers. */
8457 REGSET31 (x, R_64), REGSET31 (X, R_64),
8458 REGSET31 (w, R_32), REGSET31 (W, R_32),
8459
8460 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8461 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8462 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8463 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8464 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8465 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8466
8467 REGDEF (wzr, 31, ZR_32), REGDEF (WZR, 31, ZR_32),
8468 REGDEF (xzr, 31, ZR_64), REGDEF (XZR, 31, ZR_64),
8469
8470 /* Floating-point single precision registers. */
8471 REGSET (s, FP_S), REGSET (S, FP_S),
8472
8473 /* Floating-point double precision registers. */
8474 REGSET (d, FP_D), REGSET (D, FP_D),
8475
8476 /* Floating-point half precision registers. */
8477 REGSET (h, FP_H), REGSET (H, FP_H),
8478
8479 /* Floating-point byte precision registers. */
8480 REGSET (b, FP_B), REGSET (B, FP_B),
8481
8482 /* Floating-point quad precision registers. */
8483 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8484
8485 /* FP/SIMD registers. */
8486 REGSET (v, V), REGSET (V, V),
8487
8488 /* SVE vector registers. */
8489 REGSET (z, Z), REGSET (Z, Z),
8490
8491 /* SVE predicate(-as-mask) registers. */
8492 REGSET16 (p, P), REGSET16 (P, P),
8493
8494 /* SVE predicate-as-counter registers. */
8495 REGSET16 (pn, PN), REGSET16 (PN, PN),
8496
8497 /* SME ZA. We model this as a register because it acts syntactically
8498 like ZA0H, supporting qualifier suffixes and indexing. */
8499 REGDEF (za, 0, ZA), REGDEF (ZA, 0, ZA),
8500
8501 /* SME ZA tile registers. */
8502 REGSET16 (za, ZAT), REGSET16 (ZA, ZAT),
8503
8504 /* SME ZA tile registers (horizontal slice). */
8505 REGSET16S (za, h, ZATH), REGSET16S (ZA, H, ZATH),
8506
8507 /* SME ZA tile registers (vertical slice). */
8508 REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV),
8509
8510 /* SME2 ZT0. */
8511 REGDEF (zt0, 0, ZT0), REGDEF (ZT0, 0, ZT0)
8512 };
8513
8514 #undef REGDEF
8515 #undef REGDEF_ALIAS
8516 #undef REGNUM
8517 #undef REGSET16
8518 #undef REGSET31
8519 #undef REGSET
8520
8521 #define N 1
8522 #define n 0
8523 #define Z 1
8524 #define z 0
8525 #define C 1
8526 #define c 0
8527 #define V 1
8528 #define v 0
8529 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8530 static const asm_nzcv nzcv_names[] = {
8531 {"nzcv", B (n, z, c, v)},
8532 {"nzcV", B (n, z, c, V)},
8533 {"nzCv", B (n, z, C, v)},
8534 {"nzCV", B (n, z, C, V)},
8535 {"nZcv", B (n, Z, c, v)},
8536 {"nZcV", B (n, Z, c, V)},
8537 {"nZCv", B (n, Z, C, v)},
8538 {"nZCV", B (n, Z, C, V)},
8539 {"Nzcv", B (N, z, c, v)},
8540 {"NzcV", B (N, z, c, V)},
8541 {"NzCv", B (N, z, C, v)},
8542 {"NzCV", B (N, z, C, V)},
8543 {"NZcv", B (N, Z, c, v)},
8544 {"NZcV", B (N, Z, c, V)},
8545 {"NZCv", B (N, Z, C, v)},
8546 {"NZCV", B (N, Z, C, V)}
8547 };
8548
8549 #undef N
8550 #undef n
8551 #undef Z
8552 #undef z
8553 #undef C
8554 #undef c
8555 #undef V
8556 #undef v
8557 #undef B
8558 \f
8559 /* MD interface: bits in the object file. */
8560
8561 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8562 for use in the a.out file, and stores them in the array pointed to by buf.
8563 This knows about the endian-ness of the target machine and does
8564 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8565 2 (short) and 4 (long) Floating numbers are put out as a series of
8566 LITTLENUMS (shorts, here at least). */
8567
8568 void
8569 md_number_to_chars (char *buf, valueT val, int n)
8570 {
8571 if (target_big_endian)
8572 number_to_chars_bigendian (buf, val, n);
8573 else
8574 number_to_chars_littleendian (buf, val, n);
8575 }
8576
8577 /* MD interface: Sections. */
8578
8579 /* Estimate the size of a frag before relaxing. Assume everything fits in
8580 4 bytes. */
8581
8582 int
8583 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8584 {
8585 fragp->fr_var = 4;
8586 return 4;
8587 }
8588
8589 /* Round up a section size to the appropriate boundary. */
8590
8591 valueT
8592 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8593 {
8594 return size;
8595 }
8596
8597 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8598 of an rs_align_code fragment.
8599
8600 Here we fill the frag with the appropriate info for padding the
8601 output stream. The resulting frag will consist of a fixed (fr_fix)
8602 and of a repeating (fr_var) part.
8603
8604 The fixed content is always emitted before the repeating content and
8605 these two parts are used as follows in constructing the output:
8606 - the fixed part will be used to align to a valid instruction word
8607 boundary, in case that we start at a misaligned address; as no
8608 executable instruction can live at the misaligned location, we
8609 simply fill with zeros;
8610 - the variable part will be used to cover the remaining padding and
8611 we fill using the AArch64 NOP instruction.
8612
8613 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8614 enough storage space for up to 3 bytes for padding the back to a valid
8615 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8616
8617 void
8618 aarch64_handle_align (fragS * fragP)
8619 {
8620 /* NOP = d503201f */
8621 /* AArch64 instructions are always little-endian. */
8622 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8623
8624 int bytes, fix, noop_size;
8625 char *p;
8626
8627 if (fragP->fr_type != rs_align_code)
8628 return;
8629
8630 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8631 p = fragP->fr_literal + fragP->fr_fix;
8632
8633 #ifdef OBJ_ELF
8634 gas_assert (fragP->tc_frag_data.recorded);
8635 #endif
8636
8637 noop_size = sizeof (aarch64_noop);
8638
8639 fix = bytes & (noop_size - 1);
8640 if (fix)
8641 {
8642 #if defined OBJ_ELF || defined OBJ_COFF
8643 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8644 #endif
8645 memset (p, 0, fix);
8646 p += fix;
8647 fragP->fr_fix += fix;
8648 }
8649
8650 if (noop_size)
8651 memcpy (p, aarch64_noop, noop_size);
8652 fragP->fr_var = noop_size;
8653 }
8654
8655 /* Perform target specific initialisation of a frag.
8656 Note - despite the name this initialisation is not done when the frag
8657 is created, but only when its type is assigned. A frag can be created
8658 and used a long time before its type is set, so beware of assuming that
8659 this initialisation is performed first. */
8660
8661 #ifndef OBJ_ELF
8662 void
8663 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8664 int max_chars ATTRIBUTE_UNUSED)
8665 {
8666 }
8667
8668 #else /* OBJ_ELF is defined. */
8669 void
8670 aarch64_init_frag (fragS * fragP, int max_chars)
8671 {
8672 /* Record a mapping symbol for alignment frags. We will delete this
8673 later if the alignment ends up empty. */
8674 if (!fragP->tc_frag_data.recorded)
8675 fragP->tc_frag_data.recorded = 1;
8676
8677 /* PR 21809: Do not set a mapping state for debug sections
8678 - it just confuses other tools. */
8679 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8680 return;
8681
8682 switch (fragP->fr_type)
8683 {
8684 case rs_align_test:
8685 case rs_fill:
8686 mapping_state_2 (MAP_DATA, max_chars);
8687 break;
8688 case rs_align:
8689 /* PR 20364: We can get alignment frags in code sections,
8690 so do not just assume that we should use the MAP_DATA state. */
8691 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8692 break;
8693 case rs_align_code:
8694 mapping_state_2 (MAP_INSN, max_chars);
8695 break;
8696 default:
8697 break;
8698 }
8699 }
8700
8701 /* Whether SFrame stack trace info is supported. */
8702
8703 bool
8704 aarch64_support_sframe_p (void)
8705 {
8706 /* At this time, SFrame is supported for aarch64 only. */
8707 return (aarch64_abi == AARCH64_ABI_LP64);
8708 }
8709
8710 /* Specify if RA tracking is needed. */
8711
8712 bool
8713 aarch64_sframe_ra_tracking_p (void)
8714 {
8715 return true;
8716 }
8717
8718 /* Specify the fixed offset to recover RA from CFA.
8719 (useful only when RA tracking is not needed). */
8720
8721 offsetT
8722 aarch64_sframe_cfa_ra_offset (void)
8723 {
8724 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8725 }
8726
8727 /* Get the abi/arch indentifier for SFrame. */
8728
8729 unsigned char
8730 aarch64_sframe_get_abi_arch (void)
8731 {
8732 unsigned char sframe_abi_arch = 0;
8733
8734 if (aarch64_support_sframe_p ())
8735 {
8736 sframe_abi_arch = target_big_endian
8737 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8738 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8739 }
8740
8741 return sframe_abi_arch;
8742 }
8743
8744 #endif /* OBJ_ELF */
8745 \f
8746 /* Initialize the DWARF-2 unwind information for this procedure. */
8747
8748 void
8749 tc_aarch64_frame_initial_instructions (void)
8750 {
8751 cfi_add_CFA_def_cfa (REG_SP, 0);
8752 }
8753
8754 /* Convert REGNAME to a DWARF-2 register number. */
8755
8756 int
8757 tc_aarch64_regname_to_dw2regnum (char *regname)
8758 {
8759 const reg_entry *reg = parse_reg (&regname);
8760 if (reg == NULL)
8761 return -1;
8762
8763 switch (reg->type)
8764 {
8765 case REG_TYPE_SP_32:
8766 case REG_TYPE_SP_64:
8767 case REG_TYPE_R_32:
8768 case REG_TYPE_R_64:
8769 return reg->number;
8770
8771 case REG_TYPE_FP_B:
8772 case REG_TYPE_FP_H:
8773 case REG_TYPE_FP_S:
8774 case REG_TYPE_FP_D:
8775 case REG_TYPE_FP_Q:
8776 return reg->number + 64;
8777
8778 default:
8779 break;
8780 }
8781 return -1;
8782 }
8783
8784 /* Implement DWARF2_ADDR_SIZE. */
8785
8786 int
8787 aarch64_dwarf2_addr_size (void)
8788 {
8789 if (ilp32_p)
8790 return 4;
8791 else if (llp64_p)
8792 return 8;
8793 return bfd_arch_bits_per_address (stdoutput) / 8;
8794 }
8795
8796 /* MD interface: Symbol and relocation handling. */
8797
8798 /* Return the address within the segment that a PC-relative fixup is
8799 relative to. For AArch64 PC-relative fixups applied to instructions
8800 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8801
8802 long
8803 md_pcrel_from_section (fixS * fixP, segT seg)
8804 {
8805 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8806
8807 /* If this is pc-relative and we are going to emit a relocation
8808 then we just want to put out any pipeline compensation that the linker
8809 will need. Otherwise we want to use the calculated base. */
8810 if (fixP->fx_pcrel
8811 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8812 || aarch64_force_relocation (fixP)))
8813 base = 0;
8814
8815 /* AArch64 should be consistent for all pc-relative relocations. */
8816 return base + AARCH64_PCREL_OFFSET;
8817 }
8818
8819 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8820 Otherwise we have no need to default values of symbols. */
8821
8822 symbolS *
8823 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8824 {
8825 #ifdef OBJ_ELF
8826 if (name[0] == '_' && name[1] == 'G'
8827 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8828 {
8829 if (!GOT_symbol)
8830 {
8831 if (symbol_find (name))
8832 as_bad (_("GOT already in the symbol table"));
8833
8834 GOT_symbol = symbol_new (name, undefined_section,
8835 &zero_address_frag, 0);
8836 }
8837
8838 return GOT_symbol;
8839 }
8840 #endif
8841
8842 return 0;
8843 }
8844
8845 /* Return non-zero if the indicated VALUE has overflowed the maximum
8846 range expressible by a unsigned number with the indicated number of
8847 BITS. */
8848
8849 static bool
8850 unsigned_overflow (valueT value, unsigned bits)
8851 {
8852 valueT lim;
8853 if (bits >= sizeof (valueT) * 8)
8854 return false;
8855 lim = (valueT) 1 << bits;
8856 return (value >= lim);
8857 }
8858
8859
8860 /* Return non-zero if the indicated VALUE has overflowed the maximum
8861 range expressible by an signed number with the indicated number of
8862 BITS. */
8863
8864 static bool
8865 signed_overflow (offsetT value, unsigned bits)
8866 {
8867 offsetT lim;
8868 if (bits >= sizeof (offsetT) * 8)
8869 return false;
8870 lim = (offsetT) 1 << (bits - 1);
8871 return (value < -lim || value >= lim);
8872 }
8873
8874 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8875 unsigned immediate offset load/store instruction, try to encode it as
8876 an unscaled, 9-bit, signed immediate offset load/store instruction.
8877 Return TRUE if it is successful; otherwise return FALSE.
8878
8879 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8880 in response to the standard LDR/STR mnemonics when the immediate offset is
8881 unambiguous, i.e. when it is negative or unaligned. */
8882
8883 static bool
8884 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8885 {
8886 int idx;
8887 enum aarch64_op new_op;
8888 const aarch64_opcode *new_opcode;
8889
8890 gas_assert (instr->opcode->iclass == ldst_pos);
8891
8892 switch (instr->opcode->op)
8893 {
8894 case OP_LDRB_POS:new_op = OP_LDURB; break;
8895 case OP_STRB_POS: new_op = OP_STURB; break;
8896 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8897 case OP_LDRH_POS: new_op = OP_LDURH; break;
8898 case OP_STRH_POS: new_op = OP_STURH; break;
8899 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8900 case OP_LDR_POS: new_op = OP_LDUR; break;
8901 case OP_STR_POS: new_op = OP_STUR; break;
8902 case OP_LDRF_POS: new_op = OP_LDURV; break;
8903 case OP_STRF_POS: new_op = OP_STURV; break;
8904 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8905 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8906 default: new_op = OP_NIL; break;
8907 }
8908
8909 if (new_op == OP_NIL)
8910 return false;
8911
8912 new_opcode = aarch64_get_opcode (new_op);
8913 gas_assert (new_opcode != NULL);
8914
8915 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8916 instr->opcode->op, new_opcode->op);
8917
8918 aarch64_replace_opcode (instr, new_opcode);
8919
8920 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8921 qualifier matching may fail because the out-of-date qualifier will
8922 prevent the operand being updated with a new and correct qualifier. */
8923 idx = aarch64_operand_index (instr->opcode->operands,
8924 AARCH64_OPND_ADDR_SIMM9);
8925 gas_assert (idx == 1);
8926 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8927
8928 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8929
8930 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8931 insn_sequence))
8932 return false;
8933
8934 return true;
8935 }
8936
8937 /* Called by fix_insn to fix a MOV immediate alias instruction.
8938
8939 Operand for a generic move immediate instruction, which is an alias
8940 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8941 a 32-bit/64-bit immediate value into general register. An assembler error
8942 shall result if the immediate cannot be created by a single one of these
8943 instructions. If there is a choice, then to ensure reversability an
8944 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8945
8946 static void
8947 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8948 {
8949 const aarch64_opcode *opcode;
8950
8951 /* Need to check if the destination is SP/ZR. The check has to be done
8952 before any aarch64_replace_opcode. */
8953 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8954 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8955
8956 instr->operands[1].imm.value = value;
8957 instr->operands[1].skip = 0;
8958
8959 if (try_mov_wide_p)
8960 {
8961 /* Try the MOVZ alias. */
8962 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8963 aarch64_replace_opcode (instr, opcode);
8964 if (aarch64_opcode_encode (instr->opcode, instr,
8965 &instr->value, NULL, NULL, insn_sequence))
8966 {
8967 put_aarch64_insn (buf, instr->value);
8968 return;
8969 }
8970 /* Try the MOVK alias. */
8971 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8972 aarch64_replace_opcode (instr, opcode);
8973 if (aarch64_opcode_encode (instr->opcode, instr,
8974 &instr->value, NULL, NULL, insn_sequence))
8975 {
8976 put_aarch64_insn (buf, instr->value);
8977 return;
8978 }
8979 }
8980
8981 if (try_mov_bitmask_p)
8982 {
8983 /* Try the ORR alias. */
8984 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8985 aarch64_replace_opcode (instr, opcode);
8986 if (aarch64_opcode_encode (instr->opcode, instr,
8987 &instr->value, NULL, NULL, insn_sequence))
8988 {
8989 put_aarch64_insn (buf, instr->value);
8990 return;
8991 }
8992 }
8993
8994 as_bad_where (fixP->fx_file, fixP->fx_line,
8995 _("immediate cannot be moved by a single instruction"));
8996 }
8997
8998 /* An instruction operand which is immediate related may have symbol used
8999 in the assembly, e.g.
9000
9001 mov w0, u32
9002 .set u32, 0x00ffff00
9003
9004 At the time when the assembly instruction is parsed, a referenced symbol,
9005 like 'u32' in the above example may not have been seen; a fixS is created
9006 in such a case and is handled here after symbols have been resolved.
9007 Instruction is fixed up with VALUE using the information in *FIXP plus
9008 extra information in FLAGS.
9009
9010 This function is called by md_apply_fix to fix up instructions that need
9011 a fix-up described above but does not involve any linker-time relocation. */
9012
9013 static void
9014 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
9015 {
9016 int idx;
9017 uint32_t insn;
9018 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
9019 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
9020 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
9021
9022 if (new_inst)
9023 {
9024 /* Now the instruction is about to be fixed-up, so the operand that
9025 was previously marked as 'ignored' needs to be unmarked in order
9026 to get the encoding done properly. */
9027 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
9028 new_inst->operands[idx].skip = 0;
9029 }
9030
9031 gas_assert (opnd != AARCH64_OPND_NIL);
9032
9033 switch (opnd)
9034 {
9035 case AARCH64_OPND_EXCEPTION:
9036 case AARCH64_OPND_UNDEFINED:
9037 if (unsigned_overflow (value, 16))
9038 as_bad_where (fixP->fx_file, fixP->fx_line,
9039 _("immediate out of range"));
9040 insn = get_aarch64_insn (buf);
9041 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
9042 put_aarch64_insn (buf, insn);
9043 break;
9044
9045 case AARCH64_OPND_AIMM:
9046 /* ADD or SUB with immediate.
9047 NOTE this assumes we come here with a add/sub shifted reg encoding
9048 3 322|2222|2 2 2 21111 111111
9049 1 098|7654|3 2 1 09876 543210 98765 43210
9050 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
9051 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
9052 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
9053 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
9054 ->
9055 3 322|2222|2 2 221111111111
9056 1 098|7654|3 2 109876543210 98765 43210
9057 11000000 sf 001|0001|shift imm12 Rn Rd ADD
9058 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
9059 51000000 sf 101|0001|shift imm12 Rn Rd SUB
9060 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
9061 Fields sf Rn Rd are already set. */
9062 insn = get_aarch64_insn (buf);
9063 if (value < 0)
9064 {
9065 /* Add <-> sub. */
9066 insn = reencode_addsub_switch_add_sub (insn);
9067 value = -value;
9068 }
9069
9070 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
9071 && unsigned_overflow (value, 12))
9072 {
9073 /* Try to shift the value by 12 to make it fit. */
9074 if (((value >> 12) << 12) == value
9075 && ! unsigned_overflow (value, 12 + 12))
9076 {
9077 value >>= 12;
9078 insn |= encode_addsub_imm_shift_amount (1);
9079 }
9080 }
9081
9082 if (unsigned_overflow (value, 12))
9083 as_bad_where (fixP->fx_file, fixP->fx_line,
9084 _("immediate out of range"));
9085
9086 insn |= encode_addsub_imm (value);
9087
9088 put_aarch64_insn (buf, insn);
9089 break;
9090
9091 case AARCH64_OPND_SIMD_IMM:
9092 case AARCH64_OPND_SIMD_IMM_SFT:
9093 case AARCH64_OPND_LIMM:
9094 /* Bit mask immediate. */
9095 gas_assert (new_inst != NULL);
9096 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
9097 new_inst->operands[idx].imm.value = value;
9098 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
9099 &new_inst->value, NULL, NULL, insn_sequence))
9100 put_aarch64_insn (buf, new_inst->value);
9101 else
9102 as_bad_where (fixP->fx_file, fixP->fx_line,
9103 _("invalid immediate"));
9104 break;
9105
9106 case AARCH64_OPND_HALF:
9107 /* 16-bit unsigned immediate. */
9108 if (unsigned_overflow (value, 16))
9109 as_bad_where (fixP->fx_file, fixP->fx_line,
9110 _("immediate out of range"));
9111 insn = get_aarch64_insn (buf);
9112 insn |= encode_movw_imm (value & 0xffff);
9113 put_aarch64_insn (buf, insn);
9114 break;
9115
9116 case AARCH64_OPND_IMM_MOV:
9117 /* Operand for a generic move immediate instruction, which is
9118 an alias instruction that generates a single MOVZ, MOVN or ORR
9119 instruction to loads a 32-bit/64-bit immediate value into general
9120 register. An assembler error shall result if the immediate cannot be
9121 created by a single one of these instructions. If there is a choice,
9122 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
9123 and MOVZ or MOVN to ORR. */
9124 gas_assert (new_inst != NULL);
9125 fix_mov_imm_insn (fixP, buf, new_inst, value);
9126 break;
9127
9128 case AARCH64_OPND_ADDR_SIMM7:
9129 case AARCH64_OPND_ADDR_SIMM9:
9130 case AARCH64_OPND_ADDR_SIMM9_2:
9131 case AARCH64_OPND_ADDR_SIMM10:
9132 case AARCH64_OPND_ADDR_UIMM12:
9133 case AARCH64_OPND_ADDR_SIMM11:
9134 case AARCH64_OPND_ADDR_SIMM13:
9135 /* Immediate offset in an address. */
9136 insn = get_aarch64_insn (buf);
9137
9138 gas_assert (new_inst != NULL && new_inst->value == insn);
9139 gas_assert (new_inst->opcode->operands[1] == opnd
9140 || new_inst->opcode->operands[2] == opnd);
9141
9142 /* Get the index of the address operand. */
9143 if (new_inst->opcode->operands[1] == opnd)
9144 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
9145 idx = 1;
9146 else
9147 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
9148 idx = 2;
9149
9150 /* Update the resolved offset value. */
9151 new_inst->operands[idx].addr.offset.imm = value;
9152
9153 /* Encode/fix-up. */
9154 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
9155 &new_inst->value, NULL, NULL, insn_sequence))
9156 {
9157 put_aarch64_insn (buf, new_inst->value);
9158 break;
9159 }
9160 else if (new_inst->opcode->iclass == ldst_pos
9161 && try_to_encode_as_unscaled_ldst (new_inst))
9162 {
9163 put_aarch64_insn (buf, new_inst->value);
9164 break;
9165 }
9166
9167 as_bad_where (fixP->fx_file, fixP->fx_line,
9168 _("immediate offset out of range"));
9169 break;
9170
9171 default:
9172 gas_assert (0);
9173 as_fatal (_("unhandled operand code %d"), opnd);
9174 }
9175 }
9176
9177 /* Apply a fixup (fixP) to segment data, once it has been determined
9178 by our caller that we have all the info we need to fix it up.
9179
9180 Parameter valP is the pointer to the value of the bits. */
9181
9182 void
9183 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
9184 {
9185 offsetT value = *valP;
9186 uint32_t insn;
9187 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
9188 int scale;
9189 unsigned flags = fixP->fx_addnumber;
9190
9191 DEBUG_TRACE ("\n\n");
9192 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
9193 DEBUG_TRACE ("Enter md_apply_fix");
9194
9195 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
9196
9197 /* Note whether this will delete the relocation. */
9198
9199 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
9200 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
9201 fixP->fx_done = 1;
9202
9203 /* Process the relocations. */
9204 switch (fixP->fx_r_type)
9205 {
9206 case BFD_RELOC_NONE:
9207 /* This will need to go in the object file. */
9208 fixP->fx_done = 0;
9209 break;
9210
9211 case BFD_RELOC_8:
9212 case BFD_RELOC_8_PCREL:
9213 if (fixP->fx_done || !seg->use_rela_p)
9214 md_number_to_chars (buf, value, 1);
9215 break;
9216
9217 case BFD_RELOC_16:
9218 case BFD_RELOC_16_PCREL:
9219 if (fixP->fx_done || !seg->use_rela_p)
9220 md_number_to_chars (buf, value, 2);
9221 break;
9222
9223 case BFD_RELOC_32:
9224 case BFD_RELOC_32_PCREL:
9225 if (fixP->fx_done || !seg->use_rela_p)
9226 md_number_to_chars (buf, value, 4);
9227 break;
9228
9229 case BFD_RELOC_64:
9230 case BFD_RELOC_64_PCREL:
9231 if (fixP->fx_done || !seg->use_rela_p)
9232 md_number_to_chars (buf, value, 8);
9233 break;
9234
9235 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
9236 /* We claim that these fixups have been processed here, even if
9237 in fact we generate an error because we do not have a reloc
9238 for them, so tc_gen_reloc() will reject them. */
9239 fixP->fx_done = 1;
9240 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
9241 {
9242 as_bad_where (fixP->fx_file, fixP->fx_line,
9243 _("undefined symbol %s used as an immediate value"),
9244 S_GET_NAME (fixP->fx_addsy));
9245 goto apply_fix_return;
9246 }
9247 fix_insn (fixP, flags, value);
9248 break;
9249
9250 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
9251 if (fixP->fx_done || !seg->use_rela_p)
9252 {
9253 if (value & 3)
9254 as_bad_where (fixP->fx_file, fixP->fx_line,
9255 _("pc-relative load offset not word aligned"));
9256 if (signed_overflow (value, 21))
9257 as_bad_where (fixP->fx_file, fixP->fx_line,
9258 _("pc-relative load offset out of range"));
9259 insn = get_aarch64_insn (buf);
9260 insn |= encode_ld_lit_ofs_19 (value >> 2);
9261 put_aarch64_insn (buf, insn);
9262 }
9263 break;
9264
9265 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
9266 if (fixP->fx_done || !seg->use_rela_p)
9267 {
9268 if (signed_overflow (value, 21))
9269 as_bad_where (fixP->fx_file, fixP->fx_line,
9270 _("pc-relative address offset out of range"));
9271 insn = get_aarch64_insn (buf);
9272 insn |= encode_adr_imm (value);
9273 put_aarch64_insn (buf, insn);
9274 }
9275 break;
9276
9277 case BFD_RELOC_AARCH64_BRANCH19:
9278 if (fixP->fx_done || !seg->use_rela_p)
9279 {
9280 if (value & 3)
9281 as_bad_where (fixP->fx_file, fixP->fx_line,
9282 _("conditional branch target not word aligned"));
9283 if (signed_overflow (value, 21))
9284 as_bad_where (fixP->fx_file, fixP->fx_line,
9285 _("conditional branch out of range"));
9286 insn = get_aarch64_insn (buf);
9287 insn |= encode_cond_branch_ofs_19 (value >> 2);
9288 put_aarch64_insn (buf, insn);
9289 }
9290 break;
9291
9292 case BFD_RELOC_AARCH64_TSTBR14:
9293 if (fixP->fx_done || !seg->use_rela_p)
9294 {
9295 if (value & 3)
9296 as_bad_where (fixP->fx_file, fixP->fx_line,
9297 _("conditional branch target not word aligned"));
9298 if (signed_overflow (value, 16))
9299 as_bad_where (fixP->fx_file, fixP->fx_line,
9300 _("conditional branch out of range"));
9301 insn = get_aarch64_insn (buf);
9302 insn |= encode_tst_branch_ofs_14 (value >> 2);
9303 put_aarch64_insn (buf, insn);
9304 }
9305 break;
9306
9307 case BFD_RELOC_AARCH64_CALL26:
9308 case BFD_RELOC_AARCH64_JUMP26:
9309 if (fixP->fx_done || !seg->use_rela_p)
9310 {
9311 if (value & 3)
9312 as_bad_where (fixP->fx_file, fixP->fx_line,
9313 _("branch target not word aligned"));
9314 if (signed_overflow (value, 28))
9315 as_bad_where (fixP->fx_file, fixP->fx_line,
9316 _("branch out of range"));
9317 insn = get_aarch64_insn (buf);
9318 insn |= encode_branch_ofs_26 (value >> 2);
9319 put_aarch64_insn (buf, insn);
9320 }
9321 break;
9322
9323 case BFD_RELOC_AARCH64_MOVW_G0:
9324 case BFD_RELOC_AARCH64_MOVW_G0_NC:
9325 case BFD_RELOC_AARCH64_MOVW_G0_S:
9326 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9327 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9328 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
9329 scale = 0;
9330 goto movw_common;
9331 case BFD_RELOC_AARCH64_MOVW_G1:
9332 case BFD_RELOC_AARCH64_MOVW_G1_NC:
9333 case BFD_RELOC_AARCH64_MOVW_G1_S:
9334 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9335 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9336 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
9337 scale = 16;
9338 goto movw_common;
9339 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9340 scale = 0;
9341 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9342 /* Should always be exported to object file, see
9343 aarch64_force_relocation(). */
9344 gas_assert (!fixP->fx_done);
9345 gas_assert (seg->use_rela_p);
9346 goto movw_common;
9347 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9348 scale = 16;
9349 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9350 /* Should always be exported to object file, see
9351 aarch64_force_relocation(). */
9352 gas_assert (!fixP->fx_done);
9353 gas_assert (seg->use_rela_p);
9354 goto movw_common;
9355 case BFD_RELOC_AARCH64_MOVW_G2:
9356 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9357 case BFD_RELOC_AARCH64_MOVW_G2_S:
9358 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9359 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9360 scale = 32;
9361 goto movw_common;
9362 case BFD_RELOC_AARCH64_MOVW_G3:
9363 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9364 scale = 48;
9365 movw_common:
9366 if (fixP->fx_done || !seg->use_rela_p)
9367 {
9368 insn = get_aarch64_insn (buf);
9369
9370 if (!fixP->fx_done)
9371 {
9372 /* REL signed addend must fit in 16 bits */
9373 if (signed_overflow (value, 16))
9374 as_bad_where (fixP->fx_file, fixP->fx_line,
9375 _("offset out of range"));
9376 }
9377 else
9378 {
9379 /* Check for overflow and scale. */
9380 switch (fixP->fx_r_type)
9381 {
9382 case BFD_RELOC_AARCH64_MOVW_G0:
9383 case BFD_RELOC_AARCH64_MOVW_G1:
9384 case BFD_RELOC_AARCH64_MOVW_G2:
9385 case BFD_RELOC_AARCH64_MOVW_G3:
9386 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9387 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9388 if (unsigned_overflow (value, scale + 16))
9389 as_bad_where (fixP->fx_file, fixP->fx_line,
9390 _("unsigned value out of range"));
9391 break;
9392 case BFD_RELOC_AARCH64_MOVW_G0_S:
9393 case BFD_RELOC_AARCH64_MOVW_G1_S:
9394 case BFD_RELOC_AARCH64_MOVW_G2_S:
9395 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9396 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9397 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9398 /* NOTE: We can only come here with movz or movn. */
9399 if (signed_overflow (value, scale + 16))
9400 as_bad_where (fixP->fx_file, fixP->fx_line,
9401 _("signed value out of range"));
9402 if (value < 0)
9403 {
9404 /* Force use of MOVN. */
9405 value = ~value;
9406 insn = reencode_movzn_to_movn (insn);
9407 }
9408 else
9409 {
9410 /* Force use of MOVZ. */
9411 insn = reencode_movzn_to_movz (insn);
9412 }
9413 break;
9414 default:
9415 /* Unchecked relocations. */
9416 break;
9417 }
9418 value >>= scale;
9419 }
9420
9421 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9422 insn |= encode_movw_imm (value & 0xffff);
9423
9424 put_aarch64_insn (buf, insn);
9425 }
9426 break;
9427
9428 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9429 fixP->fx_r_type = (ilp32_p
9430 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9431 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9432 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9433 /* Should always be exported to object file, see
9434 aarch64_force_relocation(). */
9435 gas_assert (!fixP->fx_done);
9436 gas_assert (seg->use_rela_p);
9437 break;
9438
9439 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9440 fixP->fx_r_type = (ilp32_p
9441 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9442 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9443 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9444 /* Should always be exported to object file, see
9445 aarch64_force_relocation(). */
9446 gas_assert (!fixP->fx_done);
9447 gas_assert (seg->use_rela_p);
9448 break;
9449
9450 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9451 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9452 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9453 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9454 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9455 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9456 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9457 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9458 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9459 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9460 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9461 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9462 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9463 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9464 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9465 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9466 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9467 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9468 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9469 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9470 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9471 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9472 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9473 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9474 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9475 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9476 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9477 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9478 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9479 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9480 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9481 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9482 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9483 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9484 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9485 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9486 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9487 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9488 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9489 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9490 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9491 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9492 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9493 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9494 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9495 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9496 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9497 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9498 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9499 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9500 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9501 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9502 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9503 /* Should always be exported to object file, see
9504 aarch64_force_relocation(). */
9505 gas_assert (!fixP->fx_done);
9506 gas_assert (seg->use_rela_p);
9507 break;
9508
9509 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9510 /* Should always be exported to object file, see
9511 aarch64_force_relocation(). */
9512 fixP->fx_r_type = (ilp32_p
9513 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9514 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9515 gas_assert (!fixP->fx_done);
9516 gas_assert (seg->use_rela_p);
9517 break;
9518
9519 case BFD_RELOC_AARCH64_ADD_LO12:
9520 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9521 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9522 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9523 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9524 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9525 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9526 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9527 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9528 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9529 case BFD_RELOC_AARCH64_LDST128_LO12:
9530 case BFD_RELOC_AARCH64_LDST16_LO12:
9531 case BFD_RELOC_AARCH64_LDST32_LO12:
9532 case BFD_RELOC_AARCH64_LDST64_LO12:
9533 case BFD_RELOC_AARCH64_LDST8_LO12:
9534 /* Should always be exported to object file, see
9535 aarch64_force_relocation(). */
9536 gas_assert (!fixP->fx_done);
9537 gas_assert (seg->use_rela_p);
9538 break;
9539
9540 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9541 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9542 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9543 break;
9544
9545 case BFD_RELOC_UNUSED:
9546 /* An error will already have been reported. */
9547 break;
9548
9549 case BFD_RELOC_RVA:
9550 case BFD_RELOC_32_SECREL:
9551 case BFD_RELOC_16_SECIDX:
9552 break;
9553
9554 default:
9555 as_bad_where (fixP->fx_file, fixP->fx_line,
9556 _("unexpected %s fixup"),
9557 bfd_get_reloc_code_name (fixP->fx_r_type));
9558 break;
9559 }
9560
9561 apply_fix_return:
9562 /* Free the allocated the struct aarch64_inst.
9563 N.B. currently there are very limited number of fix-up types actually use
9564 this field, so the impact on the performance should be minimal . */
9565 free (fixP->tc_fix_data.inst);
9566
9567 return;
9568 }
9569
9570 /* Translate internal representation of relocation info to BFD target
9571 format. */
9572
9573 arelent *
9574 tc_gen_reloc (asection * section, fixS * fixp)
9575 {
9576 arelent *reloc;
9577 bfd_reloc_code_real_type code;
9578
9579 reloc = XNEW (arelent);
9580
9581 reloc->sym_ptr_ptr = XNEW (asymbol *);
9582 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9583 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9584
9585 if (fixp->fx_pcrel)
9586 {
9587 if (section->use_rela_p)
9588 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9589 else
9590 fixp->fx_offset = reloc->address;
9591 }
9592 reloc->addend = fixp->fx_offset;
9593
9594 code = fixp->fx_r_type;
9595 switch (code)
9596 {
9597 case BFD_RELOC_16:
9598 if (fixp->fx_pcrel)
9599 code = BFD_RELOC_16_PCREL;
9600 break;
9601
9602 case BFD_RELOC_32:
9603 if (fixp->fx_pcrel)
9604 code = BFD_RELOC_32_PCREL;
9605 break;
9606
9607 case BFD_RELOC_64:
9608 if (fixp->fx_pcrel)
9609 code = BFD_RELOC_64_PCREL;
9610 break;
9611
9612 default:
9613 break;
9614 }
9615
9616 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9617 if (reloc->howto == NULL)
9618 {
9619 as_bad_where (fixp->fx_file, fixp->fx_line,
9620 _
9621 ("cannot represent %s relocation in this object file format"),
9622 bfd_get_reloc_code_name (code));
9623 return NULL;
9624 }
9625
9626 return reloc;
9627 }
9628
9629 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9630
9631 void
9632 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9633 {
9634 bfd_reloc_code_real_type type;
9635 int pcrel = 0;
9636
9637 #ifdef TE_PE
9638 if (exp->X_op == O_secrel)
9639 {
9640 exp->X_op = O_symbol;
9641 type = BFD_RELOC_32_SECREL;
9642 }
9643 else if (exp->X_op == O_secidx)
9644 {
9645 exp->X_op = O_symbol;
9646 type = BFD_RELOC_16_SECIDX;
9647 }
9648 else
9649 {
9650 #endif
9651 /* Pick a reloc.
9652 FIXME: @@ Should look at CPU word size. */
9653 switch (size)
9654 {
9655 case 1:
9656 type = BFD_RELOC_8;
9657 break;
9658 case 2:
9659 type = BFD_RELOC_16;
9660 break;
9661 case 4:
9662 type = BFD_RELOC_32;
9663 break;
9664 case 8:
9665 type = BFD_RELOC_64;
9666 break;
9667 default:
9668 as_bad (_("cannot do %u-byte relocation"), size);
9669 type = BFD_RELOC_UNUSED;
9670 break;
9671 }
9672 #ifdef TE_PE
9673 }
9674 #endif
9675
9676 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9677 }
9678
9679 /* Implement md_after_parse_args. This is the earliest time we need to decide
9680 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9681
9682 void
9683 aarch64_after_parse_args (void)
9684 {
9685 if (aarch64_abi != AARCH64_ABI_NONE)
9686 return;
9687
9688 #ifdef OBJ_ELF
9689 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9690 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9691 aarch64_abi = AARCH64_ABI_ILP32;
9692 else
9693 aarch64_abi = AARCH64_ABI_LP64;
9694 #else
9695 aarch64_abi = AARCH64_ABI_LLP64;
9696 #endif
9697 }
9698
9699 #ifdef OBJ_ELF
9700 const char *
9701 elf64_aarch64_target_format (void)
9702 {
9703 #ifdef TE_CLOUDABI
9704 /* FIXME: What to do for ilp32_p ? */
9705 if (target_big_endian)
9706 return "elf64-bigaarch64-cloudabi";
9707 else
9708 return "elf64-littleaarch64-cloudabi";
9709 #else
9710 if (target_big_endian)
9711 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9712 else
9713 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9714 #endif
9715 }
9716
9717 void
9718 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9719 {
9720 elf_frob_symbol (symp, puntp);
9721 }
9722 #elif defined OBJ_COFF
9723 const char *
9724 coff_aarch64_target_format (void)
9725 {
9726 return "pe-aarch64-little";
9727 }
9728 #endif
9729
9730 /* MD interface: Finalization. */
9731
9732 /* A good place to do this, although this was probably not intended
9733 for this kind of use. We need to dump the literal pool before
9734 references are made to a null symbol pointer. */
9735
9736 void
9737 aarch64_cleanup (void)
9738 {
9739 literal_pool *pool;
9740
9741 for (pool = list_of_pools; pool; pool = pool->next)
9742 {
9743 /* Put it at the end of the relevant section. */
9744 subseg_set (pool->section, pool->sub_section);
9745 s_ltorg (0);
9746 }
9747 }
9748
9749 #ifdef OBJ_ELF
9750 /* Remove any excess mapping symbols generated for alignment frags in
9751 SEC. We may have created a mapping symbol before a zero byte
9752 alignment; remove it if there's a mapping symbol after the
9753 alignment. */
9754 static void
9755 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9756 void *dummy ATTRIBUTE_UNUSED)
9757 {
9758 segment_info_type *seginfo = seg_info (sec);
9759 fragS *fragp;
9760
9761 if (seginfo == NULL || seginfo->frchainP == NULL)
9762 return;
9763
9764 for (fragp = seginfo->frchainP->frch_root;
9765 fragp != NULL; fragp = fragp->fr_next)
9766 {
9767 symbolS *sym = fragp->tc_frag_data.last_map;
9768 fragS *next = fragp->fr_next;
9769
9770 /* Variable-sized frags have been converted to fixed size by
9771 this point. But if this was variable-sized to start with,
9772 there will be a fixed-size frag after it. So don't handle
9773 next == NULL. */
9774 if (sym == NULL || next == NULL)
9775 continue;
9776
9777 if (S_GET_VALUE (sym) < next->fr_address)
9778 /* Not at the end of this frag. */
9779 continue;
9780 know (S_GET_VALUE (sym) == next->fr_address);
9781
9782 do
9783 {
9784 if (next->tc_frag_data.first_map != NULL)
9785 {
9786 /* Next frag starts with a mapping symbol. Discard this
9787 one. */
9788 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9789 break;
9790 }
9791
9792 if (next->fr_next == NULL)
9793 {
9794 /* This mapping symbol is at the end of the section. Discard
9795 it. */
9796 know (next->fr_fix == 0 && next->fr_var == 0);
9797 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9798 break;
9799 }
9800
9801 /* As long as we have empty frags without any mapping symbols,
9802 keep looking. */
9803 /* If the next frag is non-empty and does not start with a
9804 mapping symbol, then this mapping symbol is required. */
9805 if (next->fr_address != next->fr_next->fr_address)
9806 break;
9807
9808 next = next->fr_next;
9809 }
9810 while (next != NULL);
9811 }
9812 }
9813 #endif
9814
9815 /* Adjust the symbol table. */
9816
9817 void
9818 aarch64_adjust_symtab (void)
9819 {
9820 #ifdef OBJ_ELF
9821 /* Remove any overlapping mapping symbols generated by alignment frags. */
9822 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9823 /* Now do generic ELF adjustments. */
9824 elf_adjust_symtab ();
9825 #endif
9826 }
9827
9828 static void
9829 checked_hash_insert (htab_t table, const char *key, void *value)
9830 {
9831 str_hash_insert (table, key, value, 0);
9832 }
9833
9834 static void
9835 sysreg_hash_insert (htab_t table, const char *key, void *value)
9836 {
9837 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9838 checked_hash_insert (table, key, value);
9839 }
9840
9841 static void
9842 fill_instruction_hash_table (void)
9843 {
9844 const aarch64_opcode *opcode = aarch64_opcode_table;
9845
9846 while (opcode->name != NULL)
9847 {
9848 templates *templ, *new_templ;
9849 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9850
9851 new_templ = XNEW (templates);
9852 new_templ->opcode = opcode;
9853 new_templ->next = NULL;
9854
9855 if (!templ)
9856 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9857 else
9858 {
9859 new_templ->next = templ->next;
9860 templ->next = new_templ;
9861 }
9862 ++opcode;
9863 }
9864 }
9865
9866 static inline void
9867 convert_to_upper (char *dst, const char *src, size_t num)
9868 {
9869 unsigned int i;
9870 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9871 *dst = TOUPPER (*src);
9872 *dst = '\0';
9873 }
9874
9875 /* Assume STR point to a lower-case string, allocate, convert and return
9876 the corresponding upper-case string. */
9877 static inline const char*
9878 get_upper_str (const char *str)
9879 {
9880 char *ret;
9881 size_t len = strlen (str);
9882 ret = XNEWVEC (char, len + 1);
9883 convert_to_upper (ret, str, len);
9884 return ret;
9885 }
9886
9887 /* MD interface: Initialization. */
9888
9889 void
9890 md_begin (void)
9891 {
9892 unsigned mach;
9893 unsigned int i;
9894
9895 aarch64_ops_hsh = str_htab_create ();
9896 aarch64_cond_hsh = str_htab_create ();
9897 aarch64_shift_hsh = str_htab_create ();
9898 aarch64_sys_regs_hsh = str_htab_create ();
9899 aarch64_pstatefield_hsh = str_htab_create ();
9900 aarch64_sys_regs_ic_hsh = str_htab_create ();
9901 aarch64_sys_regs_dc_hsh = str_htab_create ();
9902 aarch64_sys_regs_at_hsh = str_htab_create ();
9903 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9904 aarch64_sys_regs_sr_hsh = str_htab_create ();
9905 aarch64_reg_hsh = str_htab_create ();
9906 aarch64_barrier_opt_hsh = str_htab_create ();
9907 aarch64_nzcv_hsh = str_htab_create ();
9908 aarch64_pldop_hsh = str_htab_create ();
9909 aarch64_hint_opt_hsh = str_htab_create ();
9910
9911 fill_instruction_hash_table ();
9912
9913 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9914 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9915 (void *) (aarch64_sys_regs + i));
9916
9917 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9918 sysreg_hash_insert (aarch64_pstatefield_hsh,
9919 aarch64_pstatefields[i].name,
9920 (void *) (aarch64_pstatefields + i));
9921
9922 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9923 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9924 aarch64_sys_regs_ic[i].name,
9925 (void *) (aarch64_sys_regs_ic + i));
9926
9927 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9928 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9929 aarch64_sys_regs_dc[i].name,
9930 (void *) (aarch64_sys_regs_dc + i));
9931
9932 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9933 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9934 aarch64_sys_regs_at[i].name,
9935 (void *) (aarch64_sys_regs_at + i));
9936
9937 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9938 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9939 aarch64_sys_regs_tlbi[i].name,
9940 (void *) (aarch64_sys_regs_tlbi + i));
9941
9942 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9943 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9944 aarch64_sys_regs_sr[i].name,
9945 (void *) (aarch64_sys_regs_sr + i));
9946
9947 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9948 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9949 (void *) (reg_names + i));
9950
9951 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9952 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9953 (void *) (nzcv_names + i));
9954
9955 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9956 {
9957 const char *name = aarch64_operand_modifiers[i].name;
9958 checked_hash_insert (aarch64_shift_hsh, name,
9959 (void *) (aarch64_operand_modifiers + i));
9960 /* Also hash the name in the upper case. */
9961 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9962 (void *) (aarch64_operand_modifiers + i));
9963 }
9964
9965 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9966 {
9967 unsigned int j;
9968 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9969 the same condition code. */
9970 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9971 {
9972 const char *name = aarch64_conds[i].names[j];
9973 if (name == NULL)
9974 break;
9975 checked_hash_insert (aarch64_cond_hsh, name,
9976 (void *) (aarch64_conds + i));
9977 /* Also hash the name in the upper case. */
9978 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9979 (void *) (aarch64_conds + i));
9980 }
9981 }
9982
9983 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9984 {
9985 const char *name = aarch64_barrier_options[i].name;
9986 /* Skip xx00 - the unallocated values of option. */
9987 if ((i & 0x3) == 0)
9988 continue;
9989 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9990 (void *) (aarch64_barrier_options + i));
9991 /* Also hash the name in the upper case. */
9992 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9993 (void *) (aarch64_barrier_options + i));
9994 }
9995
9996 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9997 {
9998 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9999 checked_hash_insert (aarch64_barrier_opt_hsh, name,
10000 (void *) (aarch64_barrier_dsb_nxs_options + i));
10001 /* Also hash the name in the upper case. */
10002 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
10003 (void *) (aarch64_barrier_dsb_nxs_options + i));
10004 }
10005
10006 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
10007 {
10008 const char* name = aarch64_prfops[i].name;
10009 /* Skip the unallocated hint encodings. */
10010 if (name == NULL)
10011 continue;
10012 checked_hash_insert (aarch64_pldop_hsh, name,
10013 (void *) (aarch64_prfops + i));
10014 /* Also hash the name in the upper case. */
10015 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
10016 (void *) (aarch64_prfops + i));
10017 }
10018
10019 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
10020 {
10021 const char* name = aarch64_hint_options[i].name;
10022 const char* upper_name = get_upper_str(name);
10023
10024 checked_hash_insert (aarch64_hint_opt_hsh, name,
10025 (void *) (aarch64_hint_options + i));
10026
10027 /* Also hash the name in the upper case if not the same. */
10028 if (strcmp (name, upper_name) != 0)
10029 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
10030 (void *) (aarch64_hint_options + i));
10031 }
10032
10033 /* Set the cpu variant based on the command-line options. */
10034 if (!mcpu_cpu_opt)
10035 mcpu_cpu_opt = march_cpu_opt;
10036
10037 if (!mcpu_cpu_opt)
10038 mcpu_cpu_opt = &cpu_default;
10039
10040 cpu_variant = *mcpu_cpu_opt;
10041
10042 /* Record the CPU type. */
10043 if(ilp32_p)
10044 mach = bfd_mach_aarch64_ilp32;
10045 else if (llp64_p)
10046 mach = bfd_mach_aarch64_llp64;
10047 else
10048 mach = bfd_mach_aarch64;
10049
10050 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
10051 #ifdef OBJ_ELF
10052 /* FIXME - is there a better way to do it ? */
10053 aarch64_sframe_cfa_sp_reg = 31;
10054 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
10055 aarch64_sframe_cfa_ra_reg = 30;
10056 #endif
10057 }
10058
10059 /* Command line processing. */
10060
10061 const char *md_shortopts = "m:";
10062
10063 #ifdef AARCH64_BI_ENDIAN
10064 #define OPTION_EB (OPTION_MD_BASE + 0)
10065 #define OPTION_EL (OPTION_MD_BASE + 1)
10066 #else
10067 #if TARGET_BYTES_BIG_ENDIAN
10068 #define OPTION_EB (OPTION_MD_BASE + 0)
10069 #else
10070 #define OPTION_EL (OPTION_MD_BASE + 1)
10071 #endif
10072 #endif
10073
10074 struct option md_longopts[] = {
10075 #ifdef OPTION_EB
10076 {"EB", no_argument, NULL, OPTION_EB},
10077 #endif
10078 #ifdef OPTION_EL
10079 {"EL", no_argument, NULL, OPTION_EL},
10080 #endif
10081 {NULL, no_argument, NULL, 0}
10082 };
10083
10084 size_t md_longopts_size = sizeof (md_longopts);
10085
10086 struct aarch64_option_table
10087 {
10088 const char *option; /* Option name to match. */
10089 const char *help; /* Help information. */
10090 int *var; /* Variable to change. */
10091 int value; /* What to change it to. */
10092 char *deprecated; /* If non-null, print this message. */
10093 };
10094
10095 static struct aarch64_option_table aarch64_opts[] = {
10096 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
10097 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
10098 NULL},
10099 #ifdef DEBUG_AARCH64
10100 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
10101 #endif /* DEBUG_AARCH64 */
10102 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
10103 NULL},
10104 {"mno-verbose-error", N_("do not output verbose error messages"),
10105 &verbose_error_p, 0, NULL},
10106 {NULL, NULL, NULL, 0, NULL}
10107 };
10108
10109 struct aarch64_cpu_option_table
10110 {
10111 const char *name;
10112 const aarch64_feature_set value;
10113 /* The canonical name of the CPU, or NULL to use NAME converted to upper
10114 case. */
10115 const char *canonical_name;
10116 };
10117
10118 /* This list should, at a minimum, contain all the cpu names
10119 recognized by GCC. */
10120 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
10121 {"all", AARCH64_ALL_FEATURES, NULL},
10122 {"cortex-a34", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A34"},
10123 {"cortex-a35", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A35"},
10124 {"cortex-a53", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A53"},
10125 {"cortex-a57", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A57"},
10126 {"cortex-a72", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A72"},
10127 {"cortex-a73", AARCH64_CPU_FEATURES (V8A, 1, CRC), "Cortex-A73"},
10128 {"cortex-a55", AARCH64_CPU_FEATURES (V8_2A, 3, RCPC, F16, DOTPROD),
10129 "Cortex-A55"},
10130 {"cortex-a75", AARCH64_CPU_FEATURES (V8_2A, 3, RCPC, F16, DOTPROD),
10131 "Cortex-A75"},
10132 {"cortex-a76", AARCH64_CPU_FEATURES (V8_2A, 3, RCPC, F16, DOTPROD),
10133 "Cortex-A76"},
10134 {"cortex-a76ae", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10135 SSBS), "Cortex-A76AE"},
10136 {"cortex-a77", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10137 SSBS), "Cortex-A77"},
10138 {"cortex-a65", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10139 SSBS), "Cortex-A65"},
10140 {"cortex-a65ae", AARCH64_CPU_FEATURES (V8_2A, 4, F16, RCPC, DOTPROD,
10141 SSBS), "Cortex-A65AE"},
10142 {"cortex-a78", AARCH64_CPU_FEATURES (V8_2A, 5, F16, RCPC, DOTPROD,
10143 SSBS, PROFILE), "Cortex-A78"},
10144 {"cortex-a78ae", AARCH64_CPU_FEATURES (V8_2A, 5, F16, RCPC, DOTPROD,
10145 SSBS, PROFILE), "Cortex-A78AE"},
10146 {"cortex-a78c", AARCH64_CPU_FEATURES (V8_2A, 7, DOTPROD, F16, FLAGM,
10147 PAC, PROFILE, RCPC, SSBS),
10148 "Cortex-A78C"},
10149 {"cortex-a510", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
10150 SVE2_BITPERM), "Cortex-A510"},
10151 {"cortex-a520", AARCH64_CPU_FEATURES (V9_2A, 2, MEMTAG, SVE2_BITPERM),
10152 "Cortex-A520"},
10153 {"cortex-a710", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
10154 SVE2_BITPERM), "Cortex-A710"},
10155 {"cortex-a720", AARCH64_CPU_FEATURES (V9_2A, 3, MEMTAG, PROFILE,
10156 SVE2_BITPERM), "Cortex-A720"},
10157 {"ares", AARCH64_CPU_FEATURES (V8_2A, 4, RCPC, F16, DOTPROD,
10158 PROFILE), "Ares"},
10159 {"exynos-m1", AARCH64_CPU_FEATURES (V8A, 3, CRC, SHA2, AES),
10160 "Samsung Exynos M1"},
10161 {"falkor", AARCH64_CPU_FEATURES (V8A, 4, CRC, SHA2, AES, RDMA),
10162 "Qualcomm Falkor"},
10163 {"neoverse-e1", AARCH64_CPU_FEATURES (V8_2A, 4, RCPC, F16, DOTPROD,
10164 SSBS), "Neoverse E1"},
10165 {"neoverse-n1", AARCH64_CPU_FEATURES (V8_2A, 4, RCPC, F16, DOTPROD,
10166 PROFILE), "Neoverse N1"},
10167 {"neoverse-n2", AARCH64_CPU_FEATURES (V8_5A, 8, BFLOAT16, I8MM, F16,
10168 SVE, SVE2, SVE2_BITPERM, MEMTAG,
10169 RNG), "Neoverse N2"},
10170 {"neoverse-v1", AARCH64_CPU_FEATURES (V8_4A, 8, PROFILE, CVADP, SVE,
10171 SSBS, RNG, F16, BFLOAT16, I8MM),
10172 "Neoverse V1"},
10173 {"qdf24xx", AARCH64_CPU_FEATURES (V8A, 4, CRC, SHA2, AES, RDMA),
10174 "Qualcomm QDF24XX"},
10175 {"saphira", AARCH64_CPU_FEATURES (V8_4A, 3, SHA2, AES, PROFILE),
10176 "Qualcomm Saphira"},
10177 {"thunderx", AARCH64_CPU_FEATURES (V8A, 3, CRC, SHA2, AES),
10178 "Cavium ThunderX"},
10179 {"vulcan", AARCH64_CPU_FEATURES (V8_1A, 2, SHA2, AES),
10180 "Broadcom Vulcan"},
10181 /* The 'xgene-1' name is an older name for 'xgene1', which was used
10182 in earlier releases and is superseded by 'xgene1' in all
10183 tools. */
10184 {"xgene-1", AARCH64_ARCH_FEATURES (V8A), "APM X-Gene 1"},
10185 {"xgene1", AARCH64_ARCH_FEATURES (V8A), "APM X-Gene 1"},
10186 {"xgene2", AARCH64_CPU_FEATURES (V8A, 1, CRC), "APM X-Gene 2"},
10187 {"cortex-r82", AARCH64_ARCH_FEATURES (V8R), "Cortex-R82"},
10188 {"cortex-x1", AARCH64_CPU_FEATURES (V8_2A, 5, F16, RCPC, DOTPROD,
10189 SSBS, PROFILE), "Cortex-X1"},
10190 {"cortex-x2", AARCH64_CPU_FEATURES (V9A, 4, BFLOAT16, I8MM, MEMTAG,
10191 SVE2_BITPERM), "Cortex-X2"},
10192 {"cortex-x4", AARCH64_CPU_FEATURES (V9_2A, 3, MEMTAG, PROFILE,
10193 SVE2_BITPERM), "Cortex-X4"},
10194 {"generic", AARCH64_ARCH_FEATURES (V8A), NULL},
10195
10196 {NULL, AARCH64_NO_FEATURES, NULL}
10197 };
10198
10199 struct aarch64_arch_option_table
10200 {
10201 const char *name;
10202 const aarch64_feature_set value;
10203 };
10204
10205 /* This list should, at a minimum, contain all the architecture names
10206 recognized by GCC. */
10207 static const struct aarch64_arch_option_table aarch64_archs[] = {
10208 {"all", AARCH64_ALL_FEATURES},
10209 {"armv8-a", AARCH64_ARCH_FEATURES (V8A)},
10210 {"armv8.1-a", AARCH64_ARCH_FEATURES (V8_1A)},
10211 {"armv8.2-a", AARCH64_ARCH_FEATURES (V8_2A)},
10212 {"armv8.3-a", AARCH64_ARCH_FEATURES (V8_3A)},
10213 {"armv8.4-a", AARCH64_ARCH_FEATURES (V8_4A)},
10214 {"armv8.5-a", AARCH64_ARCH_FEATURES (V8_5A)},
10215 {"armv8.6-a", AARCH64_ARCH_FEATURES (V8_6A)},
10216 {"armv8.7-a", AARCH64_ARCH_FEATURES (V8_7A)},
10217 {"armv8.8-a", AARCH64_ARCH_FEATURES (V8_8A)},
10218 {"armv8.9-a", AARCH64_ARCH_FEATURES (V8_9A)},
10219 {"armv8-r", AARCH64_ARCH_FEATURES (V8R)},
10220 {"armv9-a", AARCH64_ARCH_FEATURES (V9A)},
10221 {"armv9.1-a", AARCH64_ARCH_FEATURES (V9_1A)},
10222 {"armv9.2-a", AARCH64_ARCH_FEATURES (V9_2A)},
10223 {"armv9.3-a", AARCH64_ARCH_FEATURES (V9_3A)},
10224 {"armv9.4-a", AARCH64_ARCH_FEATURES (V9_4A)},
10225 {NULL, AARCH64_NO_FEATURES}
10226 };
10227
10228 /* ISA extensions. */
10229 struct aarch64_option_cpu_value_table
10230 {
10231 const char *name;
10232 const aarch64_feature_set value;
10233 const aarch64_feature_set require; /* Feature dependencies. */
10234 };
10235
10236 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
10237 {"crc", AARCH64_FEATURE (CRC), AARCH64_NO_FEATURES},
10238 {"crypto", AARCH64_FEATURES (2, AES, SHA2),
10239 AARCH64_FEATURE (SIMD)},
10240 {"fp", AARCH64_FEATURE (FP), AARCH64_NO_FEATURES},
10241 {"lse", AARCH64_FEATURE (LSE), AARCH64_NO_FEATURES},
10242 {"lse128", AARCH64_FEATURES (2, LSE, LSE128), AARCH64_NO_FEATURES},
10243 {"simd", AARCH64_FEATURE (SIMD), AARCH64_FEATURE (FP)},
10244 {"pan", AARCH64_FEATURE (PAN), AARCH64_NO_FEATURES},
10245 {"lor", AARCH64_FEATURE (LOR), AARCH64_NO_FEATURES},
10246 {"ras", AARCH64_FEATURE (RAS), AARCH64_NO_FEATURES},
10247 {"rdma", AARCH64_FEATURE (RDMA), AARCH64_FEATURE (SIMD)},
10248 {"fp16", AARCH64_FEATURE (F16), AARCH64_FEATURE (FP)},
10249 {"fp16fml", AARCH64_FEATURE (F16_FML), AARCH64_FEATURE (F16)},
10250 {"profile", AARCH64_FEATURE (PROFILE), AARCH64_NO_FEATURES},
10251 {"sve", AARCH64_FEATURE (SVE), AARCH64_FEATURE (COMPNUM)},
10252 {"tme", AARCH64_FEATURE (TME), AARCH64_NO_FEATURES},
10253 {"compnum", AARCH64_FEATURE (COMPNUM),
10254 AARCH64_FEATURES (2, F16, SIMD)},
10255 {"rcpc", AARCH64_FEATURE (RCPC), AARCH64_NO_FEATURES},
10256 {"dotprod", AARCH64_FEATURE (DOTPROD), AARCH64_FEATURE (SIMD)},
10257 {"sha2", AARCH64_FEATURE (SHA2), AARCH64_FEATURE (FP)},
10258 {"sb", AARCH64_FEATURE (SB), AARCH64_NO_FEATURES},
10259 {"predres", AARCH64_FEATURE (PREDRES), AARCH64_NO_FEATURES},
10260 {"aes", AARCH64_FEATURE (AES), AARCH64_FEATURE (SIMD)},
10261 {"sm4", AARCH64_FEATURE (SM4), AARCH64_FEATURE (SIMD)},
10262 {"sha3", AARCH64_FEATURE (SHA3), AARCH64_FEATURE (SHA2)},
10263 {"rng", AARCH64_FEATURE (RNG), AARCH64_NO_FEATURES},
10264 {"ssbs", AARCH64_FEATURE (SSBS), AARCH64_NO_FEATURES},
10265 {"memtag", AARCH64_FEATURE (MEMTAG), AARCH64_NO_FEATURES},
10266 {"sve2", AARCH64_FEATURE (SVE2), AARCH64_FEATURE (SVE)},
10267 {"sve2-sm4", AARCH64_FEATURE (SVE2_SM4),
10268 AARCH64_FEATURES (2, SVE2, SM4)},
10269 {"sve2-aes", AARCH64_FEATURE (SVE2_AES),
10270 AARCH64_FEATURES (2, SVE2, AES)},
10271 {"sve2-sha3", AARCH64_FEATURE (SVE2_SHA3),
10272 AARCH64_FEATURES (2, SVE2, SHA3)},
10273 {"sve2-bitperm", AARCH64_FEATURE (SVE2_BITPERM),
10274 AARCH64_FEATURE (SVE2)},
10275 {"sme", AARCH64_FEATURE (SME),
10276 AARCH64_FEATURES (2, SVE2, BFLOAT16)},
10277 {"sme-f64", AARCH64_FEATURE (SME_F64F64), AARCH64_FEATURE (SME)},
10278 {"sme-f64f64", AARCH64_FEATURE (SME_F64F64), AARCH64_FEATURE (SME)},
10279 {"sme-i64", AARCH64_FEATURE (SME_I16I64), AARCH64_FEATURE (SME)},
10280 {"sme-i16i64", AARCH64_FEATURE (SME_I16I64), AARCH64_FEATURE (SME)},
10281 {"sme2", AARCH64_FEATURE (SME2), AARCH64_FEATURE (SME)},
10282 {"bf16", AARCH64_FEATURE (BFLOAT16), AARCH64_FEATURE (FP)},
10283 {"i8mm", AARCH64_FEATURE (I8MM), AARCH64_FEATURE (SIMD)},
10284 {"f32mm", AARCH64_FEATURE (F32MM), AARCH64_FEATURE (SVE)},
10285 {"f64mm", AARCH64_FEATURE (F64MM), AARCH64_FEATURE (SVE)},
10286 {"ls64", AARCH64_FEATURE (LS64), AARCH64_NO_FEATURES},
10287 {"flagm", AARCH64_FEATURE (FLAGM), AARCH64_NO_FEATURES},
10288 {"pauth", AARCH64_FEATURE (PAC), AARCH64_NO_FEATURES},
10289 {"mops", AARCH64_FEATURE (MOPS), AARCH64_NO_FEATURES},
10290 {"hbc", AARCH64_FEATURE (HBC), AARCH64_NO_FEATURES},
10291 {"cssc", AARCH64_FEATURE (CSSC), AARCH64_NO_FEATURES},
10292 {"chk", AARCH64_FEATURE (CHK), AARCH64_NO_FEATURES},
10293 {"gcs", AARCH64_FEATURE (GCS), AARCH64_NO_FEATURES},
10294 {"the", AARCH64_FEATURE (THE), AARCH64_NO_FEATURES},
10295 {NULL, AARCH64_NO_FEATURES, AARCH64_NO_FEATURES},
10296 };
10297
10298 struct aarch64_long_option_table
10299 {
10300 const char *option; /* Substring to match. */
10301 const char *help; /* Help information. */
10302 int (*func) (const char *subopt); /* Function to decode sub-option. */
10303 char *deprecated; /* If non-null, print this message. */
10304 };
10305
10306 /* Transitive closure of features depending on set. */
10307 static aarch64_feature_set
10308 aarch64_feature_disable_set (aarch64_feature_set set)
10309 {
10310 const struct aarch64_option_cpu_value_table *opt;
10311 aarch64_feature_set prev = AARCH64_NO_FEATURES;
10312
10313 while (!AARCH64_CPU_HAS_ALL_FEATURES (prev, set))
10314 {
10315 prev = set;
10316 for (opt = aarch64_features; opt->name != NULL; opt++)
10317 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10318 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10319 }
10320 return set;
10321 }
10322
10323 /* Transitive closure of dependencies of set. */
10324 static aarch64_feature_set
10325 aarch64_feature_enable_set (aarch64_feature_set set)
10326 {
10327 const struct aarch64_option_cpu_value_table *opt;
10328 aarch64_feature_set prev = AARCH64_NO_FEATURES;
10329
10330 while (!AARCH64_CPU_HAS_ALL_FEATURES (prev, set))
10331 {
10332 prev = set;
10333 for (opt = aarch64_features; opt->name != NULL; opt++)
10334 if (AARCH64_CPU_HAS_ALL_FEATURES (set, opt->value))
10335 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10336 }
10337 return set;
10338 }
10339
10340 static int
10341 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10342 bool ext_only)
10343 {
10344 /* We insist on extensions being added before being removed. We achieve
10345 this by using the ADDING_VALUE variable to indicate whether we are
10346 adding an extension (1) or removing it (0) and only allowing it to
10347 change in the order -1 -> 1 -> 0. */
10348 int adding_value = -1;
10349 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10350
10351 /* Copy the feature set, so that we can modify it. */
10352 *ext_set = **opt_p;
10353 *opt_p = ext_set;
10354
10355 while (str != NULL && *str != 0)
10356 {
10357 const struct aarch64_option_cpu_value_table *opt;
10358 const char *ext = NULL;
10359 int optlen;
10360
10361 if (!ext_only)
10362 {
10363 if (*str != '+')
10364 {
10365 as_bad (_("invalid architectural extension"));
10366 return 0;
10367 }
10368
10369 ext = strchr (++str, '+');
10370 }
10371
10372 if (ext != NULL)
10373 optlen = ext - str;
10374 else
10375 optlen = strlen (str);
10376
10377 if (optlen >= 2 && startswith (str, "no"))
10378 {
10379 if (adding_value != 0)
10380 adding_value = 0;
10381 optlen -= 2;
10382 str += 2;
10383 }
10384 else if (optlen > 0)
10385 {
10386 if (adding_value == -1)
10387 adding_value = 1;
10388 else if (adding_value != 1)
10389 {
10390 as_bad (_("must specify extensions to add before specifying "
10391 "those to remove"));
10392 return false;
10393 }
10394 }
10395
10396 if (optlen == 0)
10397 {
10398 as_bad (_("missing architectural extension"));
10399 return 0;
10400 }
10401
10402 gas_assert (adding_value != -1);
10403
10404 for (opt = aarch64_features; opt->name != NULL; opt++)
10405 if (strncmp (opt->name, str, optlen) == 0)
10406 {
10407 aarch64_feature_set set;
10408
10409 /* Add or remove the extension. */
10410 if (adding_value)
10411 {
10412 set = aarch64_feature_enable_set (opt->value);
10413 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10414 }
10415 else
10416 {
10417 set = aarch64_feature_disable_set (opt->value);
10418 AARCH64_CLEAR_FEATURES (*ext_set, *ext_set, set);
10419 }
10420 break;
10421 }
10422
10423 if (opt->name == NULL)
10424 {
10425 as_bad (_("unknown architectural extension `%s'"), str);
10426 return 0;
10427 }
10428
10429 str = ext;
10430 };
10431
10432 return 1;
10433 }
10434
10435 static int
10436 aarch64_parse_cpu (const char *str)
10437 {
10438 const struct aarch64_cpu_option_table *opt;
10439 const char *ext = strchr (str, '+');
10440 size_t optlen;
10441
10442 if (ext != NULL)
10443 optlen = ext - str;
10444 else
10445 optlen = strlen (str);
10446
10447 if (optlen == 0)
10448 {
10449 as_bad (_("missing cpu name `%s'"), str);
10450 return 0;
10451 }
10452
10453 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10454 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10455 {
10456 mcpu_cpu_opt = &opt->value;
10457 if (ext != NULL)
10458 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10459
10460 return 1;
10461 }
10462
10463 as_bad (_("unknown cpu `%s'"), str);
10464 return 0;
10465 }
10466
10467 static int
10468 aarch64_parse_arch (const char *str)
10469 {
10470 const struct aarch64_arch_option_table *opt;
10471 const char *ext = strchr (str, '+');
10472 size_t optlen;
10473
10474 if (ext != NULL)
10475 optlen = ext - str;
10476 else
10477 optlen = strlen (str);
10478
10479 if (optlen == 0)
10480 {
10481 as_bad (_("missing architecture name `%s'"), str);
10482 return 0;
10483 }
10484
10485 for (opt = aarch64_archs; opt->name != NULL; opt++)
10486 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10487 {
10488 march_cpu_opt = &opt->value;
10489 if (ext != NULL)
10490 return aarch64_parse_features (ext, &march_cpu_opt, false);
10491
10492 return 1;
10493 }
10494
10495 as_bad (_("unknown architecture `%s'\n"), str);
10496 return 0;
10497 }
10498
10499 /* ABIs. */
10500 struct aarch64_option_abi_value_table
10501 {
10502 const char *name;
10503 enum aarch64_abi_type value;
10504 };
10505
10506 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10507 #ifdef OBJ_ELF
10508 {"ilp32", AARCH64_ABI_ILP32},
10509 {"lp64", AARCH64_ABI_LP64},
10510 #else
10511 {"llp64", AARCH64_ABI_LLP64},
10512 #endif
10513 };
10514
10515 static int
10516 aarch64_parse_abi (const char *str)
10517 {
10518 unsigned int i;
10519
10520 if (str[0] == '\0')
10521 {
10522 as_bad (_("missing abi name `%s'"), str);
10523 return 0;
10524 }
10525
10526 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10527 if (strcmp (str, aarch64_abis[i].name) == 0)
10528 {
10529 aarch64_abi = aarch64_abis[i].value;
10530 return 1;
10531 }
10532
10533 as_bad (_("unknown abi `%s'\n"), str);
10534 return 0;
10535 }
10536
10537 static struct aarch64_long_option_table aarch64_long_opts[] = {
10538 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10539 aarch64_parse_abi, NULL},
10540 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10541 aarch64_parse_cpu, NULL},
10542 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10543 aarch64_parse_arch, NULL},
10544 {NULL, NULL, 0, NULL}
10545 };
10546
10547 int
10548 md_parse_option (int c, const char *arg)
10549 {
10550 struct aarch64_option_table *opt;
10551 struct aarch64_long_option_table *lopt;
10552
10553 switch (c)
10554 {
10555 #ifdef OPTION_EB
10556 case OPTION_EB:
10557 target_big_endian = 1;
10558 break;
10559 #endif
10560
10561 #ifdef OPTION_EL
10562 case OPTION_EL:
10563 target_big_endian = 0;
10564 break;
10565 #endif
10566
10567 case 'a':
10568 /* Listing option. Just ignore these, we don't support additional
10569 ones. */
10570 return 0;
10571
10572 default:
10573 for (opt = aarch64_opts; opt->option != NULL; opt++)
10574 {
10575 if (c == opt->option[0]
10576 && ((arg == NULL && opt->option[1] == 0)
10577 || streq (arg, opt->option + 1)))
10578 {
10579 /* If the option is deprecated, tell the user. */
10580 if (opt->deprecated != NULL)
10581 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10582 arg ? arg : "", _(opt->deprecated));
10583
10584 if (opt->var != NULL)
10585 *opt->var = opt->value;
10586
10587 return 1;
10588 }
10589 }
10590
10591 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10592 {
10593 /* These options are expected to have an argument. */
10594 if (c == lopt->option[0]
10595 && arg != NULL
10596 && startswith (arg, lopt->option + 1))
10597 {
10598 /* If the option is deprecated, tell the user. */
10599 if (lopt->deprecated != NULL)
10600 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10601 _(lopt->deprecated));
10602
10603 /* Call the sup-option parser. */
10604 return lopt->func (arg + strlen (lopt->option) - 1);
10605 }
10606 }
10607
10608 return 0;
10609 }
10610
10611 return 1;
10612 }
10613
10614 void
10615 md_show_usage (FILE * fp)
10616 {
10617 struct aarch64_option_table *opt;
10618 struct aarch64_long_option_table *lopt;
10619
10620 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10621
10622 for (opt = aarch64_opts; opt->option != NULL; opt++)
10623 if (opt->help != NULL)
10624 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10625
10626 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10627 if (lopt->help != NULL)
10628 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10629
10630 #ifdef OPTION_EB
10631 fprintf (fp, _("\
10632 -EB assemble code for a big-endian cpu\n"));
10633 #endif
10634
10635 #ifdef OPTION_EL
10636 fprintf (fp, _("\
10637 -EL assemble code for a little-endian cpu\n"));
10638 #endif
10639 }
10640
10641 /* Parse a .cpu directive. */
10642
10643 static void
10644 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10645 {
10646 const struct aarch64_cpu_option_table *opt;
10647 char saved_char;
10648 char *name;
10649 char *ext;
10650 size_t optlen;
10651
10652 name = input_line_pointer;
10653 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10654 saved_char = *input_line_pointer;
10655 *input_line_pointer = 0;
10656
10657 ext = strchr (name, '+');
10658
10659 if (ext != NULL)
10660 optlen = ext - name;
10661 else
10662 optlen = strlen (name);
10663
10664 /* Skip the first "all" entry. */
10665 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10666 if (strlen (opt->name) == optlen
10667 && strncmp (name, opt->name, optlen) == 0)
10668 {
10669 mcpu_cpu_opt = &opt->value;
10670 if (ext != NULL)
10671 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10672 return;
10673
10674 cpu_variant = *mcpu_cpu_opt;
10675
10676 *input_line_pointer = saved_char;
10677 demand_empty_rest_of_line ();
10678 return;
10679 }
10680 as_bad (_("unknown cpu `%s'"), name);
10681 *input_line_pointer = saved_char;
10682 ignore_rest_of_line ();
10683 }
10684
10685
10686 /* Parse a .arch directive. */
10687
10688 static void
10689 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10690 {
10691 const struct aarch64_arch_option_table *opt;
10692 char saved_char;
10693 char *name;
10694 char *ext;
10695 size_t optlen;
10696
10697 name = input_line_pointer;
10698 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10699 saved_char = *input_line_pointer;
10700 *input_line_pointer = 0;
10701
10702 ext = strchr (name, '+');
10703
10704 if (ext != NULL)
10705 optlen = ext - name;
10706 else
10707 optlen = strlen (name);
10708
10709 /* Skip the first "all" entry. */
10710 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10711 if (strlen (opt->name) == optlen
10712 && strncmp (name, opt->name, optlen) == 0)
10713 {
10714 mcpu_cpu_opt = &opt->value;
10715 if (ext != NULL)
10716 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10717 return;
10718
10719 cpu_variant = *mcpu_cpu_opt;
10720
10721 *input_line_pointer = saved_char;
10722 demand_empty_rest_of_line ();
10723 return;
10724 }
10725
10726 as_bad (_("unknown architecture `%s'\n"), name);
10727 *input_line_pointer = saved_char;
10728 ignore_rest_of_line ();
10729 }
10730
10731 /* Parse a .arch_extension directive. */
10732
10733 static void
10734 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10735 {
10736 char saved_char;
10737 char *ext = input_line_pointer;
10738
10739 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10740 saved_char = *input_line_pointer;
10741 *input_line_pointer = 0;
10742
10743 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10744 return;
10745
10746 cpu_variant = *mcpu_cpu_opt;
10747
10748 *input_line_pointer = saved_char;
10749 demand_empty_rest_of_line ();
10750 }
10751
10752 /* Copy symbol information. */
10753
10754 void
10755 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10756 {
10757 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10758 }
10759
10760 #ifdef OBJ_ELF
10761 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10762 This is needed so AArch64 specific st_other values can be independently
10763 specified for an IFUNC resolver (that is called by the dynamic linker)
10764 and the symbol it resolves (aliased to the resolver). In particular,
10765 if a function symbol has special st_other value set via directives,
10766 then attaching an IFUNC resolver to that symbol should not override
10767 the st_other setting. Requiring the directive on the IFUNC resolver
10768 symbol would be unexpected and problematic in C code, where the two
10769 symbols appear as two independent function declarations. */
10770
10771 void
10772 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10773 {
10774 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10775 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10776 /* If size is unset, copy size from src. Because we don't track whether
10777 .size has been used, we can't differentiate .size dest, 0 from the case
10778 where dest's size is unset. */
10779 if (!destelf->size && S_GET_SIZE (dest) == 0)
10780 {
10781 if (srcelf->size)
10782 {
10783 destelf->size = XNEW (expressionS);
10784 *destelf->size = *srcelf->size;
10785 }
10786 S_SET_SIZE (dest, S_GET_SIZE (src));
10787 }
10788 }
10789 #endif