opcodes/
[binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30
31 #include "aarch64-opc.h"
32
33 #ifdef DEBUG_AARCH64
34 int debug_dump = FALSE;
35 #endif /* DEBUG_AARCH64 */
36
37 /* Helper functions to determine which operand to be used to encode/decode
38 the size:Q fields for AdvSIMD instructions. */
39
40 static inline bfd_boolean
41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
42 {
43 return ((qualifier >= AARCH64_OPND_QLF_V_8B
44 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
45 : FALSE);
46 }
47
48 static inline bfd_boolean
49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
50 {
51 return ((qualifier >= AARCH64_OPND_QLF_S_B
52 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
53 : FALSE);
54 }
55
56 enum data_pattern
57 {
58 DP_UNKNOWN,
59 DP_VECTOR_3SAME,
60 DP_VECTOR_LONG,
61 DP_VECTOR_WIDE,
62 DP_VECTOR_ACROSS_LANES,
63 };
64
65 static const char significant_operand_index [] =
66 {
67 0, /* DP_UNKNOWN, by default using operand 0. */
68 0, /* DP_VECTOR_3SAME */
69 1, /* DP_VECTOR_LONG */
70 2, /* DP_VECTOR_WIDE */
71 1, /* DP_VECTOR_ACROSS_LANES */
72 };
73
74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
75 the data pattern.
76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77 corresponds to one of a sequence of operands. */
78
79 static enum data_pattern
80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
81 {
82 if (vector_qualifier_p (qualifiers[0]) == TRUE)
83 {
84 /* e.g. v.4s, v.4s, v.4s
85 or v.4h, v.4h, v.h[3]. */
86 if (qualifiers[0] == qualifiers[1]
87 && vector_qualifier_p (qualifiers[2]) == TRUE
88 && (aarch64_get_qualifier_esize (qualifiers[0])
89 == aarch64_get_qualifier_esize (qualifiers[1]))
90 && (aarch64_get_qualifier_esize (qualifiers[0])
91 == aarch64_get_qualifier_esize (qualifiers[2])))
92 return DP_VECTOR_3SAME;
93 /* e.g. v.8h, v.8b, v.8b.
94 or v.4s, v.4h, v.h[2].
95 or v.8h, v.16b. */
96 if (vector_qualifier_p (qualifiers[1]) == TRUE
97 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
98 && (aarch64_get_qualifier_esize (qualifiers[0])
99 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
100 return DP_VECTOR_LONG;
101 /* e.g. v.8h, v.8h, v.8b. */
102 if (qualifiers[0] == qualifiers[1]
103 && vector_qualifier_p (qualifiers[2]) == TRUE
104 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
105 && (aarch64_get_qualifier_esize (qualifiers[0])
106 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
107 && (aarch64_get_qualifier_esize (qualifiers[0])
108 == aarch64_get_qualifier_esize (qualifiers[1])))
109 return DP_VECTOR_WIDE;
110 }
111 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
112 {
113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
114 if (vector_qualifier_p (qualifiers[1]) == TRUE
115 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
116 return DP_VECTOR_ACROSS_LANES;
117 }
118
119 return DP_UNKNOWN;
120 }
121
122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123 the AdvSIMD instructions. */
124 /* N.B. it is possible to do some optimization that doesn't call
125 get_data_pattern each time when we need to select an operand. We can
126 either buffer the caculated the result or statically generate the data,
127 however, it is not obvious that the optimization will bring significant
128 benefit. */
129
130 int
131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
132 {
133 return
134 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
135 }
136 \f
137 const aarch64_field fields[] =
138 {
139 { 0, 0 }, /* NIL. */
140 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
141 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
142 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
143 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
144 { 5, 19 }, /* imm19: e.g. in CBZ. */
145 { 5, 19 }, /* immhi: e.g. in ADRP. */
146 { 29, 2 }, /* immlo: e.g. in ADRP. */
147 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
148 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
149 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
150 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
151 { 0, 5 }, /* Rt: in load/store instructions. */
152 { 0, 5 }, /* Rd: in many integer instructions. */
153 { 5, 5 }, /* Rn: in many integer instructions. */
154 { 10, 5 }, /* Rt2: in load/store pair instructions. */
155 { 10, 5 }, /* Ra: in fp instructions. */
156 { 5, 3 }, /* op2: in the system instructions. */
157 { 8, 4 }, /* CRm: in the system instructions. */
158 { 12, 4 }, /* CRn: in the system instructions. */
159 { 16, 3 }, /* op1: in the system instructions. */
160 { 19, 2 }, /* op0: in the system instructions. */
161 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
162 { 12, 4 }, /* cond: condition flags as a source operand. */
163 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
164 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
165 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
166 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
167 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
168 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
169 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
170 { 12, 1 }, /* S: in load/store reg offset instructions. */
171 { 21, 2 }, /* hw: in move wide constant instructions. */
172 { 22, 2 }, /* opc: in load/store reg offset instructions. */
173 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
174 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
175 { 22, 2 }, /* type: floating point type field in fp data inst. */
176 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
177 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
178 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
179 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
180 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
181 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
182 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
183 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
184 { 5, 14 }, /* imm14: in test bit and branch instructions. */
185 { 5, 16 }, /* imm16: in exception instructions. */
186 { 0, 26 }, /* imm26: in unconditional branch instructions. */
187 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
188 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
189 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
190 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
191 { 22, 1 }, /* N: in logical (immediate) instructions. */
192 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
193 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
194 { 31, 1 }, /* sf: in integer data processing instructions. */
195 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
196 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
197 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
198 { 31, 1 }, /* b5: in the test bit and branch instructions. */
199 { 19, 5 }, /* b40: in the test bit and branch instructions. */
200 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
201 };
202
203 enum aarch64_operand_class
204 aarch64_get_operand_class (enum aarch64_opnd type)
205 {
206 return aarch64_operands[type].op_class;
207 }
208
209 const char *
210 aarch64_get_operand_name (enum aarch64_opnd type)
211 {
212 return aarch64_operands[type].name;
213 }
214
215 /* Get operand description string.
216 This is usually for the diagnosis purpose. */
217 const char *
218 aarch64_get_operand_desc (enum aarch64_opnd type)
219 {
220 return aarch64_operands[type].desc;
221 }
222
223 /* Table of all conditional affixes. */
224 const aarch64_cond aarch64_conds[16] =
225 {
226 {{"eq"}, 0x0},
227 {{"ne"}, 0x1},
228 {{"cs", "hs"}, 0x2},
229 {{"cc", "lo", "ul"}, 0x3},
230 {{"mi"}, 0x4},
231 {{"pl"}, 0x5},
232 {{"vs"}, 0x6},
233 {{"vc"}, 0x7},
234 {{"hi"}, 0x8},
235 {{"ls"}, 0x9},
236 {{"ge"}, 0xa},
237 {{"lt"}, 0xb},
238 {{"gt"}, 0xc},
239 {{"le"}, 0xd},
240 {{"al"}, 0xe},
241 {{"nv"}, 0xf},
242 };
243
244 const aarch64_cond *
245 get_cond_from_value (aarch64_insn value)
246 {
247 assert (value < 16);
248 return &aarch64_conds[(unsigned int) value];
249 }
250
251 const aarch64_cond *
252 get_inverted_cond (const aarch64_cond *cond)
253 {
254 return &aarch64_conds[cond->value ^ 0x1];
255 }
256
257 /* Table describing the operand extension/shifting operators; indexed by
258 enum aarch64_modifier_kind.
259
260 The value column provides the most common values for encoding modifiers,
261 which enables table-driven encoding/decoding for the modifiers. */
262 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
263 {
264 {"none", 0x0},
265 {"msl", 0x0},
266 {"ror", 0x3},
267 {"asr", 0x2},
268 {"lsr", 0x1},
269 {"lsl", 0x0},
270 {"uxtb", 0x0},
271 {"uxth", 0x1},
272 {"uxtw", 0x2},
273 {"uxtx", 0x3},
274 {"sxtb", 0x4},
275 {"sxth", 0x5},
276 {"sxtw", 0x6},
277 {"sxtx", 0x7},
278 {NULL, 0},
279 };
280
281 enum aarch64_modifier_kind
282 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
283 {
284 return desc - aarch64_operand_modifiers;
285 }
286
287 aarch64_insn
288 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
289 {
290 return aarch64_operand_modifiers[kind].value;
291 }
292
293 enum aarch64_modifier_kind
294 aarch64_get_operand_modifier_from_value (aarch64_insn value,
295 bfd_boolean extend_p)
296 {
297 if (extend_p == TRUE)
298 return AARCH64_MOD_UXTB + value;
299 else
300 return AARCH64_MOD_LSL - value;
301 }
302
303 bfd_boolean
304 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
305 {
306 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
307 ? TRUE : FALSE;
308 }
309
310 static inline bfd_boolean
311 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
312 {
313 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
314 ? TRUE : FALSE;
315 }
316
317 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
318 {
319 { "#0x00", 0x0 },
320 { "oshld", 0x1 },
321 { "oshst", 0x2 },
322 { "osh", 0x3 },
323 { "#0x04", 0x4 },
324 { "nshld", 0x5 },
325 { "nshst", 0x6 },
326 { "nsh", 0x7 },
327 { "#0x08", 0x8 },
328 { "ishld", 0x9 },
329 { "ishst", 0xa },
330 { "ish", 0xb },
331 { "#0x0c", 0xc },
332 { "ld", 0xd },
333 { "st", 0xe },
334 { "sy", 0xf },
335 };
336
337 /* op -> op: load = 0 instruction = 1 store = 2
338 l -> level: 1-3
339 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
340 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
341 const struct aarch64_name_value_pair aarch64_prfops[32] =
342 {
343 { "pldl1keep", B(0, 1, 0) },
344 { "pldl1strm", B(0, 1, 1) },
345 { "pldl2keep", B(0, 2, 0) },
346 { "pldl2strm", B(0, 2, 1) },
347 { "pldl3keep", B(0, 3, 0) },
348 { "pldl3strm", B(0, 3, 1) },
349 { NULL, 0x06 },
350 { NULL, 0x07 },
351 { "plil1keep", B(1, 1, 0) },
352 { "plil1strm", B(1, 1, 1) },
353 { "plil2keep", B(1, 2, 0) },
354 { "plil2strm", B(1, 2, 1) },
355 { "plil3keep", B(1, 3, 0) },
356 { "plil3strm", B(1, 3, 1) },
357 { NULL, 0x0e },
358 { NULL, 0x0f },
359 { "pstl1keep", B(2, 1, 0) },
360 { "pstl1strm", B(2, 1, 1) },
361 { "pstl2keep", B(2, 2, 0) },
362 { "pstl2strm", B(2, 2, 1) },
363 { "pstl3keep", B(2, 3, 0) },
364 { "pstl3strm", B(2, 3, 1) },
365 { NULL, 0x16 },
366 { NULL, 0x17 },
367 { NULL, 0x18 },
368 { NULL, 0x19 },
369 { NULL, 0x1a },
370 { NULL, 0x1b },
371 { NULL, 0x1c },
372 { NULL, 0x1d },
373 { NULL, 0x1e },
374 { NULL, 0x1f },
375 };
376 #undef B
377 \f
378 /* Utilities on value constraint. */
379
380 static inline int
381 value_in_range_p (int64_t value, int low, int high)
382 {
383 return (value >= low && value <= high) ? 1 : 0;
384 }
385
386 static inline int
387 value_aligned_p (int64_t value, int align)
388 {
389 return ((value & (align - 1)) == 0) ? 1 : 0;
390 }
391
392 /* A signed value fits in a field. */
393 static inline int
394 value_fit_signed_field_p (int64_t value, unsigned width)
395 {
396 assert (width < 32);
397 if (width < sizeof (value) * 8)
398 {
399 int64_t lim = (int64_t)1 << (width - 1);
400 if (value >= -lim && value < lim)
401 return 1;
402 }
403 return 0;
404 }
405
406 /* An unsigned value fits in a field. */
407 static inline int
408 value_fit_unsigned_field_p (int64_t value, unsigned width)
409 {
410 assert (width < 32);
411 if (width < sizeof (value) * 8)
412 {
413 int64_t lim = (int64_t)1 << width;
414 if (value >= 0 && value < lim)
415 return 1;
416 }
417 return 0;
418 }
419
420 /* Return 1 if OPERAND is SP or WSP. */
421 int
422 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
423 {
424 return ((aarch64_get_operand_class (operand->type)
425 == AARCH64_OPND_CLASS_INT_REG)
426 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
427 && operand->reg.regno == 31);
428 }
429
430 /* Return 1 if OPERAND is XZR or WZP. */
431 int
432 aarch64_zero_register_p (const aarch64_opnd_info *operand)
433 {
434 return ((aarch64_get_operand_class (operand->type)
435 == AARCH64_OPND_CLASS_INT_REG)
436 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
437 && operand->reg.regno == 31);
438 }
439
440 /* Return true if the operand *OPERAND that has the operand code
441 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
442 qualified by the qualifier TARGET. */
443
444 static inline int
445 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
446 aarch64_opnd_qualifier_t target)
447 {
448 switch (operand->qualifier)
449 {
450 case AARCH64_OPND_QLF_W:
451 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
452 return 1;
453 break;
454 case AARCH64_OPND_QLF_X:
455 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
456 return 1;
457 break;
458 case AARCH64_OPND_QLF_WSP:
459 if (target == AARCH64_OPND_QLF_W
460 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
461 return 1;
462 break;
463 case AARCH64_OPND_QLF_SP:
464 if (target == AARCH64_OPND_QLF_X
465 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
466 return 1;
467 break;
468 default:
469 break;
470 }
471
472 return 0;
473 }
474
475 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
476 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
477
478 Return NIL if more than one expected qualifiers are found. */
479
480 aarch64_opnd_qualifier_t
481 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
482 int idx,
483 const aarch64_opnd_qualifier_t known_qlf,
484 int known_idx)
485 {
486 int i, saved_i;
487
488 /* Special case.
489
490 When the known qualifier is NIL, we have to assume that there is only
491 one qualifier sequence in the *QSEQ_LIST and return the corresponding
492 qualifier directly. One scenario is that for instruction
493 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
494 which has only one possible valid qualifier sequence
495 NIL, S_D
496 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
497 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
498
499 Because the qualifier NIL has dual roles in the qualifier sequence:
500 it can mean no qualifier for the operand, or the qualifer sequence is
501 not in use (when all qualifiers in the sequence are NILs), we have to
502 handle this special case here. */
503 if (known_qlf == AARCH64_OPND_NIL)
504 {
505 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
506 return qseq_list[0][idx];
507 }
508
509 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
510 {
511 if (qseq_list[i][known_idx] == known_qlf)
512 {
513 if (saved_i != -1)
514 /* More than one sequences are found to have KNOWN_QLF at
515 KNOWN_IDX. */
516 return AARCH64_OPND_NIL;
517 saved_i = i;
518 }
519 }
520
521 return qseq_list[saved_i][idx];
522 }
523
524 enum operand_qualifier_kind
525 {
526 OQK_NIL,
527 OQK_OPD_VARIANT,
528 OQK_VALUE_IN_RANGE,
529 OQK_MISC,
530 };
531
532 /* Operand qualifier description. */
533 struct operand_qualifier_data
534 {
535 /* The usage of the three data fields depends on the qualifier kind. */
536 int data0;
537 int data1;
538 int data2;
539 /* Description. */
540 const char *desc;
541 /* Kind. */
542 enum operand_qualifier_kind kind;
543 };
544
545 /* Indexed by the operand qualifier enumerators. */
546 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
547 {
548 {0, 0, 0, "NIL", OQK_NIL},
549
550 /* Operand variant qualifiers.
551 First 3 fields:
552 element size, number of elements and common value for encoding. */
553
554 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
555 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
556 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
557 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
558
559 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
560 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
561 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
562 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
563 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
564
565 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
566 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
567 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
568 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
569 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
570 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
571 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
572 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
573 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
574
575 /* Qualifiers constraining the value range.
576 First 3 fields:
577 Lower bound, higher bound, unused. */
578
579 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
580 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
581 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
582 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
583 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
584 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
585
586 /* Qualifiers for miscellaneous purpose.
587 First 3 fields:
588 unused, unused and unused. */
589
590 {0, 0, 0, "lsl", 0},
591 {0, 0, 0, "msl", 0},
592
593 {0, 0, 0, "retrieving", 0},
594 };
595
596 static inline bfd_boolean
597 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
598 {
599 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
600 ? TRUE : FALSE;
601 }
602
603 static inline bfd_boolean
604 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
605 {
606 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
607 ? TRUE : FALSE;
608 }
609
610 const char*
611 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
612 {
613 return aarch64_opnd_qualifiers[qualifier].desc;
614 }
615
616 /* Given an operand qualifier, return the expected data element size
617 of a qualified operand. */
618 unsigned char
619 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
620 {
621 assert (operand_variant_qualifier_p (qualifier) == TRUE);
622 return aarch64_opnd_qualifiers[qualifier].data0;
623 }
624
625 unsigned char
626 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
627 {
628 assert (operand_variant_qualifier_p (qualifier) == TRUE);
629 return aarch64_opnd_qualifiers[qualifier].data1;
630 }
631
632 aarch64_insn
633 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
634 {
635 assert (operand_variant_qualifier_p (qualifier) == TRUE);
636 return aarch64_opnd_qualifiers[qualifier].data2;
637 }
638
639 static int
640 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
641 {
642 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
643 return aarch64_opnd_qualifiers[qualifier].data0;
644 }
645
646 static int
647 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
648 {
649 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
650 return aarch64_opnd_qualifiers[qualifier].data1;
651 }
652
653 #ifdef DEBUG_AARCH64
654 void
655 aarch64_verbose (const char *str, ...)
656 {
657 va_list ap;
658 va_start (ap, str);
659 printf ("#### ");
660 vprintf (str, ap);
661 printf ("\n");
662 va_end (ap);
663 }
664
665 static inline void
666 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
667 {
668 int i;
669 printf ("#### \t");
670 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
671 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
672 printf ("\n");
673 }
674
675 static void
676 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
677 const aarch64_opnd_qualifier_t *qualifier)
678 {
679 int i;
680 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
681
682 aarch64_verbose ("dump_match_qualifiers:");
683 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
684 curr[i] = opnd[i].qualifier;
685 dump_qualifier_sequence (curr);
686 aarch64_verbose ("against");
687 dump_qualifier_sequence (qualifier);
688 }
689 #endif /* DEBUG_AARCH64 */
690
691 /* TODO improve this, we can have an extra field at the runtime to
692 store the number of operands rather than calculating it every time. */
693
694 int
695 aarch64_num_of_operands (const aarch64_opcode *opcode)
696 {
697 int i = 0;
698 const enum aarch64_opnd *opnds = opcode->operands;
699 while (opnds[i++] != AARCH64_OPND_NIL)
700 ;
701 --i;
702 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
703 return i;
704 }
705
706 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
707 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
708
709 N.B. on the entry, it is very likely that only some operands in *INST
710 have had their qualifiers been established.
711
712 If STOP_AT is not -1, the function will only try to match
713 the qualifier sequence for operands before and including the operand
714 of index STOP_AT; and on success *RET will only be filled with the first
715 (STOP_AT+1) qualifiers.
716
717 A couple examples of the matching algorithm:
718
719 X,W,NIL should match
720 X,W,NIL
721
722 NIL,NIL should match
723 X ,NIL
724
725 Apart from serving the main encoding routine, this can also be called
726 during or after the operand decoding. */
727
728 int
729 aarch64_find_best_match (const aarch64_inst *inst,
730 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
731 int stop_at, aarch64_opnd_qualifier_t *ret)
732 {
733 int found = 0;
734 int i, num_opnds;
735 const aarch64_opnd_qualifier_t *qualifiers;
736
737 num_opnds = aarch64_num_of_operands (inst->opcode);
738 if (num_opnds == 0)
739 {
740 DEBUG_TRACE ("SUCCEED: no operand");
741 return 1;
742 }
743
744 if (stop_at < 0 || stop_at >= num_opnds)
745 stop_at = num_opnds - 1;
746
747 /* For each pattern. */
748 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
749 {
750 int j;
751 qualifiers = *qualifiers_list;
752
753 /* Start as positive. */
754 found = 1;
755
756 DEBUG_TRACE ("%d", i);
757 #ifdef DEBUG_AARCH64
758 if (debug_dump)
759 dump_match_qualifiers (inst->operands, qualifiers);
760 #endif
761
762 /* Most opcodes has much fewer patterns in the list.
763 First NIL qualifier indicates the end in the list. */
764 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
765 {
766 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
767 if (i)
768 found = 0;
769 break;
770 }
771
772 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
773 {
774 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
775 {
776 /* Either the operand does not have qualifier, or the qualifier
777 for the operand needs to be deduced from the qualifier
778 sequence.
779 In the latter case, any constraint checking related with
780 the obtained qualifier should be done later in
781 operand_general_constraint_met_p. */
782 continue;
783 }
784 else if (*qualifiers != inst->operands[j].qualifier)
785 {
786 /* Unless the target qualifier can also qualify the operand
787 (which has already had a non-nil qualifier), non-equal
788 qualifiers are generally un-matched. */
789 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
790 continue;
791 else
792 {
793 found = 0;
794 break;
795 }
796 }
797 else
798 continue; /* Equal qualifiers are certainly matched. */
799 }
800
801 /* Qualifiers established. */
802 if (found == 1)
803 break;
804 }
805
806 if (found == 1)
807 {
808 /* Fill the result in *RET. */
809 int j;
810 qualifiers = *qualifiers_list;
811
812 DEBUG_TRACE ("complete qualifiers using list %d", i);
813 #ifdef DEBUG_AARCH64
814 if (debug_dump)
815 dump_qualifier_sequence (qualifiers);
816 #endif
817
818 for (j = 0; j <= stop_at; ++j, ++qualifiers)
819 ret[j] = *qualifiers;
820 for (; j < AARCH64_MAX_OPND_NUM; ++j)
821 ret[j] = AARCH64_OPND_QLF_NIL;
822
823 DEBUG_TRACE ("SUCCESS");
824 return 1;
825 }
826
827 DEBUG_TRACE ("FAIL");
828 return 0;
829 }
830
831 /* Operand qualifier matching and resolving.
832
833 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
834 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
835
836 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
837 succeeds. */
838
839 static int
840 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
841 {
842 int i;
843 aarch64_opnd_qualifier_seq_t qualifiers;
844
845 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
846 qualifiers))
847 {
848 DEBUG_TRACE ("matching FAIL");
849 return 0;
850 }
851
852 /* Update the qualifiers. */
853 if (update_p == TRUE)
854 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
855 {
856 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
857 break;
858 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
859 "update %s with %s for operand %d",
860 aarch64_get_qualifier_name (inst->operands[i].qualifier),
861 aarch64_get_qualifier_name (qualifiers[i]), i);
862 inst->operands[i].qualifier = qualifiers[i];
863 }
864
865 DEBUG_TRACE ("matching SUCCESS");
866 return 1;
867 }
868
869 /* Return TRUE if VALUE is a wide constant that can be moved into a general
870 register by MOVZ.
871
872 IS32 indicates whether value is a 32-bit immediate or not.
873 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
874 amount will be returned in *SHIFT_AMOUNT. */
875
876 bfd_boolean
877 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
878 {
879 int amount;
880
881 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
882
883 if (is32)
884 {
885 /* Allow all zeros or all ones in top 32-bits, so that
886 32-bit constant expressions like ~0x80000000 are
887 permitted. */
888 uint64_t ext = value;
889 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
890 /* Immediate out of range. */
891 return FALSE;
892 value &= (int64_t) 0xffffffff;
893 }
894
895 /* first, try movz then movn */
896 amount = -1;
897 if ((value & ((int64_t) 0xffff << 0)) == value)
898 amount = 0;
899 else if ((value & ((int64_t) 0xffff << 16)) == value)
900 amount = 16;
901 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
902 amount = 32;
903 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
904 amount = 48;
905
906 if (amount == -1)
907 {
908 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
909 return FALSE;
910 }
911
912 if (shift_amount != NULL)
913 *shift_amount = amount;
914
915 DEBUG_TRACE ("exit TRUE with amount %d", amount);
916
917 return TRUE;
918 }
919
920 /* Build the accepted values for immediate logical SIMD instructions.
921
922 The standard encodings of the immediate value are:
923 N imms immr SIMD size R S
924 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
925 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
926 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
927 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
928 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
929 0 11110s 00000r 2 UInt(r) UInt(s)
930 where all-ones value of S is reserved.
931
932 Let's call E the SIMD size.
933
934 The immediate value is: S+1 bits '1' rotated to the right by R.
935
936 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
937 (remember S != E - 1). */
938
939 #define TOTAL_IMM_NB 5334
940
941 typedef struct
942 {
943 uint64_t imm;
944 aarch64_insn encoding;
945 } simd_imm_encoding;
946
947 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
948
949 static int
950 simd_imm_encoding_cmp(const void *i1, const void *i2)
951 {
952 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
953 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
954
955 if (imm1->imm < imm2->imm)
956 return -1;
957 if (imm1->imm > imm2->imm)
958 return +1;
959 return 0;
960 }
961
962 /* immediate bitfield standard encoding
963 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
964 1 ssssss rrrrrr 64 rrrrrr ssssss
965 0 0sssss 0rrrrr 32 rrrrr sssss
966 0 10ssss 00rrrr 16 rrrr ssss
967 0 110sss 000rrr 8 rrr sss
968 0 1110ss 0000rr 4 rr ss
969 0 11110s 00000r 2 r s */
970 static inline int
971 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
972 {
973 return (is64 << 12) | (r << 6) | s;
974 }
975
976 static void
977 build_immediate_table (void)
978 {
979 uint32_t log_e, e, s, r, s_mask;
980 uint64_t mask, imm;
981 int nb_imms;
982 int is64;
983
984 nb_imms = 0;
985 for (log_e = 1; log_e <= 6; log_e++)
986 {
987 /* Get element size. */
988 e = 1u << log_e;
989 if (log_e == 6)
990 {
991 is64 = 1;
992 mask = 0xffffffffffffffffull;
993 s_mask = 0;
994 }
995 else
996 {
997 is64 = 0;
998 mask = (1ull << e) - 1;
999 /* log_e s_mask
1000 1 ((1 << 4) - 1) << 2 = 111100
1001 2 ((1 << 3) - 1) << 3 = 111000
1002 3 ((1 << 2) - 1) << 4 = 110000
1003 4 ((1 << 1) - 1) << 5 = 100000
1004 5 ((1 << 0) - 1) << 6 = 000000 */
1005 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1006 }
1007 for (s = 0; s < e - 1; s++)
1008 for (r = 0; r < e; r++)
1009 {
1010 /* s+1 consecutive bits to 1 (s < 63) */
1011 imm = (1ull << (s + 1)) - 1;
1012 /* rotate right by r */
1013 if (r != 0)
1014 imm = (imm >> r) | ((imm << (e - r)) & mask);
1015 /* replicate the constant depending on SIMD size */
1016 switch (log_e)
1017 {
1018 case 1: imm = (imm << 2) | imm;
1019 case 2: imm = (imm << 4) | imm;
1020 case 3: imm = (imm << 8) | imm;
1021 case 4: imm = (imm << 16) | imm;
1022 case 5: imm = (imm << 32) | imm;
1023 case 6: break;
1024 default: abort ();
1025 }
1026 simd_immediates[nb_imms].imm = imm;
1027 simd_immediates[nb_imms].encoding =
1028 encode_immediate_bitfield(is64, s | s_mask, r);
1029 nb_imms++;
1030 }
1031 }
1032 assert (nb_imms == TOTAL_IMM_NB);
1033 qsort(simd_immediates, nb_imms,
1034 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1035 }
1036
1037 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1038 be accepted by logical (immediate) instructions
1039 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1040
1041 IS32 indicates whether or not VALUE is a 32-bit immediate.
1042 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1043 VALUE will be returned in *ENCODING. */
1044
1045 bfd_boolean
1046 aarch64_logical_immediate_p (uint64_t value, int is32, aarch64_insn *encoding)
1047 {
1048 simd_imm_encoding imm_enc;
1049 const simd_imm_encoding *imm_encoding;
1050 static bfd_boolean initialized = FALSE;
1051
1052 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1053 value, is32);
1054
1055 if (initialized == FALSE)
1056 {
1057 build_immediate_table ();
1058 initialized = TRUE;
1059 }
1060
1061 if (is32)
1062 {
1063 /* Allow all zeros or all ones in top 32-bits, so that
1064 constant expressions like ~1 are permitted. */
1065 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1066 return FALSE;
1067
1068 /* Replicate the 32 lower bits to the 32 upper bits. */
1069 value &= 0xffffffff;
1070 value |= value << 32;
1071 }
1072
1073 imm_enc.imm = value;
1074 imm_encoding = (const simd_imm_encoding *)
1075 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1076 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1077 if (imm_encoding == NULL)
1078 {
1079 DEBUG_TRACE ("exit with FALSE");
1080 return FALSE;
1081 }
1082 if (encoding != NULL)
1083 *encoding = imm_encoding->encoding;
1084 DEBUG_TRACE ("exit with TRUE");
1085 return TRUE;
1086 }
1087
1088 /* If 64-bit immediate IMM is in the format of
1089 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1090 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1091 of value "abcdefgh". Otherwise return -1. */
1092 int
1093 aarch64_shrink_expanded_imm8 (uint64_t imm)
1094 {
1095 int i, ret;
1096 uint32_t byte;
1097
1098 ret = 0;
1099 for (i = 0; i < 8; i++)
1100 {
1101 byte = (imm >> (8 * i)) & 0xff;
1102 if (byte == 0xff)
1103 ret |= 1 << i;
1104 else if (byte != 0x00)
1105 return -1;
1106 }
1107 return ret;
1108 }
1109
1110 /* Utility inline functions for operand_general_constraint_met_p. */
1111
1112 static inline void
1113 set_error (aarch64_operand_error *mismatch_detail,
1114 enum aarch64_operand_error_kind kind, int idx,
1115 const char* error)
1116 {
1117 if (mismatch_detail == NULL)
1118 return;
1119 mismatch_detail->kind = kind;
1120 mismatch_detail->index = idx;
1121 mismatch_detail->error = error;
1122 }
1123
1124 static inline void
1125 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1126 const char* error)
1127 {
1128 if (mismatch_detail == NULL)
1129 return;
1130 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1131 }
1132
1133 static inline void
1134 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1135 int idx, int lower_bound, int upper_bound,
1136 const char* error)
1137 {
1138 if (mismatch_detail == NULL)
1139 return;
1140 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1141 mismatch_detail->data[0] = lower_bound;
1142 mismatch_detail->data[1] = upper_bound;
1143 }
1144
1145 static inline void
1146 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1147 int idx, int lower_bound, int upper_bound)
1148 {
1149 if (mismatch_detail == NULL)
1150 return;
1151 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1152 _("immediate value"));
1153 }
1154
1155 static inline void
1156 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1157 int idx, int lower_bound, int upper_bound)
1158 {
1159 if (mismatch_detail == NULL)
1160 return;
1161 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1162 _("immediate offset"));
1163 }
1164
1165 static inline void
1166 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1167 int idx, int lower_bound, int upper_bound)
1168 {
1169 if (mismatch_detail == NULL)
1170 return;
1171 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1172 _("register number"));
1173 }
1174
1175 static inline void
1176 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1177 int idx, int lower_bound, int upper_bound)
1178 {
1179 if (mismatch_detail == NULL)
1180 return;
1181 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1182 _("register element index"));
1183 }
1184
1185 static inline void
1186 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1187 int idx, int lower_bound, int upper_bound)
1188 {
1189 if (mismatch_detail == NULL)
1190 return;
1191 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1192 _("shift amount"));
1193 }
1194
1195 static inline void
1196 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1197 int alignment)
1198 {
1199 if (mismatch_detail == NULL)
1200 return;
1201 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1202 mismatch_detail->data[0] = alignment;
1203 }
1204
1205 static inline void
1206 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1207 int expected_num)
1208 {
1209 if (mismatch_detail == NULL)
1210 return;
1211 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1212 mismatch_detail->data[0] = expected_num;
1213 }
1214
1215 static inline void
1216 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1217 const char* error)
1218 {
1219 if (mismatch_detail == NULL)
1220 return;
1221 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1222 }
1223
1224 /* General constraint checking based on operand code.
1225
1226 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1227 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1228
1229 This function has to be called after the qualifiers for all operands
1230 have been resolved.
1231
1232 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1233 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1234 of error message during the disassembling where error message is not
1235 wanted. We avoid the dynamic construction of strings of error messages
1236 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1237 use a combination of error code, static string and some integer data to
1238 represent an error. */
1239
1240 static int
1241 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1242 enum aarch64_opnd type,
1243 const aarch64_opcode *opcode,
1244 aarch64_operand_error *mismatch_detail)
1245 {
1246 unsigned num;
1247 unsigned char size;
1248 int64_t imm;
1249 const aarch64_opnd_info *opnd = opnds + idx;
1250 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1251
1252 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1253
1254 switch (aarch64_operands[type].op_class)
1255 {
1256 case AARCH64_OPND_CLASS_INT_REG:
1257 /* <Xt> may be optional in some IC and TLBI instructions. */
1258 if (type == AARCH64_OPND_Rt_SYS)
1259 {
1260 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1261 == AARCH64_OPND_CLASS_SYSTEM));
1262 if (opnds[1].present && !opnds[0].sysins_op->has_xt)
1263 {
1264 set_other_error (mismatch_detail, idx, _("extraneous register"));
1265 return 0;
1266 }
1267 if (!opnds[1].present && opnds[0].sysins_op->has_xt)
1268 {
1269 set_other_error (mismatch_detail, idx, _("missing register"));
1270 return 0;
1271 }
1272 }
1273 switch (qualifier)
1274 {
1275 case AARCH64_OPND_QLF_WSP:
1276 case AARCH64_OPND_QLF_SP:
1277 if (!aarch64_stack_pointer_p (opnd))
1278 {
1279 set_other_error (mismatch_detail, idx,
1280 _("stack pointer register expected"));
1281 return 0;
1282 }
1283 break;
1284 default:
1285 break;
1286 }
1287 break;
1288
1289 case AARCH64_OPND_CLASS_ADDRESS:
1290 /* Check writeback. */
1291 switch (opcode->iclass)
1292 {
1293 case ldst_pos:
1294 case ldst_unscaled:
1295 case ldstnapair_offs:
1296 case ldstpair_off:
1297 case ldst_unpriv:
1298 if (opnd->addr.writeback == 1)
1299 {
1300 set_syntax_error (mismatch_detail, idx,
1301 _("unexpected address writeback"));
1302 return 0;
1303 }
1304 break;
1305 case ldst_imm9:
1306 case ldstpair_indexed:
1307 case asisdlsep:
1308 case asisdlsop:
1309 if (opnd->addr.writeback == 0)
1310 {
1311 set_syntax_error (mismatch_detail, idx,
1312 _("address writeback expected"));
1313 return 0;
1314 }
1315 break;
1316 default:
1317 assert (opnd->addr.writeback == 0);
1318 break;
1319 }
1320 switch (type)
1321 {
1322 case AARCH64_OPND_ADDR_SIMM7:
1323 /* Scaled signed 7 bits immediate offset. */
1324 /* Get the size of the data element that is accessed, which may be
1325 different from that of the source register size,
1326 e.g. in strb/ldrb. */
1327 size = aarch64_get_qualifier_esize (opnd->qualifier);
1328 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1329 {
1330 set_offset_out_of_range_error (mismatch_detail, idx,
1331 -64 * size, 63 * size);
1332 return 0;
1333 }
1334 if (!value_aligned_p (opnd->addr.offset.imm, size))
1335 {
1336 set_unaligned_error (mismatch_detail, idx, size);
1337 return 0;
1338 }
1339 break;
1340 case AARCH64_OPND_ADDR_SIMM9:
1341 /* Unscaled signed 9 bits immediate offset. */
1342 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1343 {
1344 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1345 return 0;
1346 }
1347 break;
1348
1349 case AARCH64_OPND_ADDR_SIMM9_2:
1350 /* Unscaled signed 9 bits immediate offset, which has to be negative
1351 or unaligned. */
1352 size = aarch64_get_qualifier_esize (qualifier);
1353 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1354 && !value_aligned_p (opnd->addr.offset.imm, size))
1355 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1356 return 1;
1357 set_other_error (mismatch_detail, idx,
1358 _("negative or unaligned offset expected"));
1359 return 0;
1360
1361 case AARCH64_OPND_SIMD_ADDR_POST:
1362 /* AdvSIMD load/store multiple structures, post-index. */
1363 assert (idx == 1);
1364 if (opnd->addr.offset.is_reg)
1365 {
1366 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1367 return 1;
1368 else
1369 {
1370 set_other_error (mismatch_detail, idx,
1371 _("invalid register offset"));
1372 return 0;
1373 }
1374 }
1375 else
1376 {
1377 const aarch64_opnd_info *prev = &opnds[idx-1];
1378 unsigned num_bytes; /* total number of bytes transferred. */
1379 /* The opcode dependent area stores the number of elements in
1380 each structure to be loaded/stored. */
1381 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1382 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1383 /* Special handling of loading single structure to all lane. */
1384 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1385 * aarch64_get_qualifier_esize (prev->qualifier);
1386 else
1387 num_bytes = prev->reglist.num_regs
1388 * aarch64_get_qualifier_esize (prev->qualifier)
1389 * aarch64_get_qualifier_nelem (prev->qualifier);
1390 if ((int) num_bytes != opnd->addr.offset.imm)
1391 {
1392 set_other_error (mismatch_detail, idx,
1393 _("invalid post-increment amount"));
1394 return 0;
1395 }
1396 }
1397 break;
1398
1399 case AARCH64_OPND_ADDR_REGOFF:
1400 /* Get the size of the data element that is accessed, which may be
1401 different from that of the source register size,
1402 e.g. in strb/ldrb. */
1403 size = aarch64_get_qualifier_esize (opnd->qualifier);
1404 /* It is either no shift or shift by the binary logarithm of SIZE. */
1405 if (opnd->shifter.amount != 0
1406 && opnd->shifter.amount != (int)get_logsz (size))
1407 {
1408 set_other_error (mismatch_detail, idx,
1409 _("invalid shift amount"));
1410 return 0;
1411 }
1412 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1413 operators. */
1414 switch (opnd->shifter.kind)
1415 {
1416 case AARCH64_MOD_UXTW:
1417 case AARCH64_MOD_LSL:
1418 case AARCH64_MOD_SXTW:
1419 case AARCH64_MOD_SXTX: break;
1420 default:
1421 set_other_error (mismatch_detail, idx,
1422 _("invalid extend/shift operator"));
1423 return 0;
1424 }
1425 break;
1426
1427 case AARCH64_OPND_ADDR_UIMM12:
1428 imm = opnd->addr.offset.imm;
1429 /* Get the size of the data element that is accessed, which may be
1430 different from that of the source register size,
1431 e.g. in strb/ldrb. */
1432 size = aarch64_get_qualifier_esize (qualifier);
1433 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1434 {
1435 set_offset_out_of_range_error (mismatch_detail, idx,
1436 0, 4095 * size);
1437 return 0;
1438 }
1439 if (!value_aligned_p (opnd->addr.offset.imm, size))
1440 {
1441 set_unaligned_error (mismatch_detail, idx, size);
1442 return 0;
1443 }
1444 break;
1445
1446 case AARCH64_OPND_ADDR_PCREL14:
1447 case AARCH64_OPND_ADDR_PCREL19:
1448 case AARCH64_OPND_ADDR_PCREL21:
1449 case AARCH64_OPND_ADDR_PCREL26:
1450 imm = opnd->imm.value;
1451 if (operand_need_shift_by_two (get_operand_from_code (type)))
1452 {
1453 /* The offset value in a PC-relative branch instruction is alway
1454 4-byte aligned and is encoded without the lowest 2 bits. */
1455 if (!value_aligned_p (imm, 4))
1456 {
1457 set_unaligned_error (mismatch_detail, idx, 4);
1458 return 0;
1459 }
1460 /* Right shift by 2 so that we can carry out the following check
1461 canonically. */
1462 imm >>= 2;
1463 }
1464 size = get_operand_fields_width (get_operand_from_code (type));
1465 if (!value_fit_signed_field_p (imm, size))
1466 {
1467 set_other_error (mismatch_detail, idx,
1468 _("immediate out of range"));
1469 return 0;
1470 }
1471 break;
1472
1473 default:
1474 break;
1475 }
1476 break;
1477
1478 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1479 /* The opcode dependent area stores the number of elements in
1480 each structure to be loaded/stored. */
1481 num = get_opcode_dependent_value (opcode);
1482 switch (type)
1483 {
1484 case AARCH64_OPND_LVt:
1485 assert (num >= 1 && num <= 4);
1486 /* Unless LD1/ST1, the number of registers should be equal to that
1487 of the structure elements. */
1488 if (num != 1 && opnd->reglist.num_regs != num)
1489 {
1490 set_reg_list_error (mismatch_detail, idx, num);
1491 return 0;
1492 }
1493 break;
1494 case AARCH64_OPND_LVt_AL:
1495 case AARCH64_OPND_LEt:
1496 assert (num >= 1 && num <= 4);
1497 /* The number of registers should be equal to that of the structure
1498 elements. */
1499 if (opnd->reglist.num_regs != num)
1500 {
1501 set_reg_list_error (mismatch_detail, idx, num);
1502 return 0;
1503 }
1504 break;
1505 default:
1506 break;
1507 }
1508 break;
1509
1510 case AARCH64_OPND_CLASS_IMMEDIATE:
1511 /* Constraint check on immediate operand. */
1512 imm = opnd->imm.value;
1513 /* E.g. imm_0_31 constrains value to be 0..31. */
1514 if (qualifier_value_in_range_constraint_p (qualifier)
1515 && !value_in_range_p (imm, get_lower_bound (qualifier),
1516 get_upper_bound (qualifier)))
1517 {
1518 set_imm_out_of_range_error (mismatch_detail, idx,
1519 get_lower_bound (qualifier),
1520 get_upper_bound (qualifier));
1521 return 0;
1522 }
1523
1524 switch (type)
1525 {
1526 case AARCH64_OPND_AIMM:
1527 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1528 {
1529 set_other_error (mismatch_detail, idx,
1530 _("invalid shift operator"));
1531 return 0;
1532 }
1533 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1534 {
1535 set_other_error (mismatch_detail, idx,
1536 _("shift amount expected to be 0 or 12"));
1537 return 0;
1538 }
1539 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1540 {
1541 set_other_error (mismatch_detail, idx,
1542 _("immediate out of range"));
1543 return 0;
1544 }
1545 break;
1546
1547 case AARCH64_OPND_HALF:
1548 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1549 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1550 {
1551 set_other_error (mismatch_detail, idx,
1552 _("invalid shift operator"));
1553 return 0;
1554 }
1555 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1556 if (!value_aligned_p (opnd->shifter.amount, 16))
1557 {
1558 set_other_error (mismatch_detail, idx,
1559 _("shift amount should be a multiple of 16"));
1560 return 0;
1561 }
1562 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1563 {
1564 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1565 0, size * 8 - 16);
1566 return 0;
1567 }
1568 if (opnd->imm.value < 0)
1569 {
1570 set_other_error (mismatch_detail, idx,
1571 _("negative immediate value not allowed"));
1572 return 0;
1573 }
1574 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1575 {
1576 set_other_error (mismatch_detail, idx,
1577 _("immediate out of range"));
1578 return 0;
1579 }
1580 break;
1581
1582 case AARCH64_OPND_IMM_MOV:
1583 {
1584 int is32 = aarch64_get_qualifier_esize (opnds[0].qualifier) == 4;
1585 imm = opnd->imm.value;
1586 assert (idx == 1);
1587 switch (opcode->op)
1588 {
1589 case OP_MOV_IMM_WIDEN:
1590 imm = ~imm;
1591 /* Fall through... */
1592 case OP_MOV_IMM_WIDE:
1593 if (!aarch64_wide_constant_p (imm, is32, NULL))
1594 {
1595 set_other_error (mismatch_detail, idx,
1596 _("immediate out of range"));
1597 return 0;
1598 }
1599 break;
1600 case OP_MOV_IMM_LOG:
1601 if (!aarch64_logical_immediate_p (imm, is32, NULL))
1602 {
1603 set_other_error (mismatch_detail, idx,
1604 _("immediate out of range"));
1605 return 0;
1606 }
1607 break;
1608 default:
1609 assert (0);
1610 return 0;
1611 }
1612 }
1613 break;
1614
1615 case AARCH64_OPND_NZCV:
1616 case AARCH64_OPND_CCMP_IMM:
1617 case AARCH64_OPND_EXCEPTION:
1618 case AARCH64_OPND_UIMM4:
1619 case AARCH64_OPND_UIMM7:
1620 case AARCH64_OPND_UIMM3_OP1:
1621 case AARCH64_OPND_UIMM3_OP2:
1622 size = get_operand_fields_width (get_operand_from_code (type));
1623 assert (size < 32);
1624 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1625 {
1626 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1627 (1 << size) - 1);
1628 return 0;
1629 }
1630 break;
1631
1632 case AARCH64_OPND_WIDTH:
1633 assert (idx == 3 && opnds[idx-1].type == AARCH64_OPND_IMM
1634 && opnds[0].type == AARCH64_OPND_Rd);
1635 size = get_upper_bound (qualifier);
1636 if (opnd->imm.value + opnds[idx-1].imm.value > size)
1637 /* lsb+width <= reg.size */
1638 {
1639 set_imm_out_of_range_error (mismatch_detail, idx, 1,
1640 size - opnds[idx-1].imm.value);
1641 return 0;
1642 }
1643 break;
1644
1645 case AARCH64_OPND_LIMM:
1646 {
1647 int is32 = opnds[0].qualifier == AARCH64_OPND_QLF_W;
1648 uint64_t uimm = opnd->imm.value;
1649 if (opcode->op == OP_BIC)
1650 uimm = ~uimm;
1651 if (aarch64_logical_immediate_p (uimm, is32, NULL) == FALSE)
1652 {
1653 set_other_error (mismatch_detail, idx,
1654 _("immediate out of range"));
1655 return 0;
1656 }
1657 }
1658 break;
1659
1660 case AARCH64_OPND_IMM0:
1661 case AARCH64_OPND_FPIMM0:
1662 if (opnd->imm.value != 0)
1663 {
1664 set_other_error (mismatch_detail, idx,
1665 _("immediate zero expected"));
1666 return 0;
1667 }
1668 break;
1669
1670 case AARCH64_OPND_SHLL_IMM:
1671 assert (idx == 2);
1672 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
1673 if (opnd->imm.value != size)
1674 {
1675 set_other_error (mismatch_detail, idx,
1676 _("invalid shift amount"));
1677 return 0;
1678 }
1679 break;
1680
1681 case AARCH64_OPND_IMM_VLSL:
1682 size = aarch64_get_qualifier_esize (qualifier);
1683 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
1684 {
1685 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1686 size * 8 - 1);
1687 return 0;
1688 }
1689 break;
1690
1691 case AARCH64_OPND_IMM_VLSR:
1692 size = aarch64_get_qualifier_esize (qualifier);
1693 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
1694 {
1695 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
1696 return 0;
1697 }
1698 break;
1699
1700 case AARCH64_OPND_SIMD_IMM:
1701 case AARCH64_OPND_SIMD_IMM_SFT:
1702 /* Qualifier check. */
1703 switch (qualifier)
1704 {
1705 case AARCH64_OPND_QLF_LSL:
1706 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1707 {
1708 set_other_error (mismatch_detail, idx,
1709 _("invalid shift operator"));
1710 return 0;
1711 }
1712 break;
1713 case AARCH64_OPND_QLF_MSL:
1714 if (opnd->shifter.kind != AARCH64_MOD_MSL)
1715 {
1716 set_other_error (mismatch_detail, idx,
1717 _("invalid shift operator"));
1718 return 0;
1719 }
1720 break;
1721 case AARCH64_OPND_QLF_NIL:
1722 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1723 {
1724 set_other_error (mismatch_detail, idx,
1725 _("shift is not permitted"));
1726 return 0;
1727 }
1728 break;
1729 default:
1730 assert (0);
1731 return 0;
1732 }
1733 /* Is the immediate valid? */
1734 assert (idx == 1);
1735 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
1736 {
1737 /* uimm8 or simm8 */
1738 if (!value_in_range_p (opnd->imm.value, -128, 255))
1739 {
1740 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
1741 return 0;
1742 }
1743 }
1744 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
1745 {
1746 /* uimm64 is not
1747 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1748 ffffffffgggggggghhhhhhhh'. */
1749 set_other_error (mismatch_detail, idx,
1750 _("invalid value for immediate"));
1751 return 0;
1752 }
1753 /* Is the shift amount valid? */
1754 switch (opnd->shifter.kind)
1755 {
1756 case AARCH64_MOD_LSL:
1757 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1758 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
1759 {
1760 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
1761 (size - 1) * 8);
1762 return 0;
1763 }
1764 if (!value_aligned_p (opnd->shifter.amount, 8))
1765 {
1766 set_unaligned_error (mismatch_detail, idx, 8);
1767 return 0;
1768 }
1769 break;
1770 case AARCH64_MOD_MSL:
1771 /* Only 8 and 16 are valid shift amount. */
1772 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
1773 {
1774 set_other_error (mismatch_detail, idx,
1775 _("shift amount expected to be 0 or 16"));
1776 return 0;
1777 }
1778 break;
1779 default:
1780 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1781 {
1782 set_other_error (mismatch_detail, idx,
1783 _("invalid shift operator"));
1784 return 0;
1785 }
1786 break;
1787 }
1788 break;
1789
1790 case AARCH64_OPND_FPIMM:
1791 case AARCH64_OPND_SIMD_FPIMM:
1792 if (opnd->imm.is_fp == 0)
1793 {
1794 set_other_error (mismatch_detail, idx,
1795 _("floating-point immediate expected"));
1796 return 0;
1797 }
1798 /* The value is expected to be an 8-bit floating-point constant with
1799 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1800 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1801 instruction). */
1802 if (!value_in_range_p (opnd->imm.value, 0, 255))
1803 {
1804 set_other_error (mismatch_detail, idx,
1805 _("immediate out of range"));
1806 return 0;
1807 }
1808 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1809 {
1810 set_other_error (mismatch_detail, idx,
1811 _("invalid shift operator"));
1812 return 0;
1813 }
1814 break;
1815
1816 default:
1817 break;
1818 }
1819 break;
1820
1821 case AARCH64_OPND_CLASS_CP_REG:
1822 /* Cn or Cm: 4-bit opcode field named for historical reasons.
1823 valid range: C0 - C15. */
1824 if (opnd->reg.regno > 15)
1825 {
1826 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1827 return 0;
1828 }
1829 break;
1830
1831 case AARCH64_OPND_CLASS_SYSTEM:
1832 switch (type)
1833 {
1834 case AARCH64_OPND_PSTATEFIELD:
1835 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
1836 /* MSR SPSel, #uimm4
1837 Uses uimm4 as a control value to select the stack pointer: if
1838 bit 0 is set it selects the current exception level's stack
1839 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1840 Bits 1 to 3 of uimm4 are reserved and should be zero. */
1841 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
1842 {
1843 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
1844 return 0;
1845 }
1846 break;
1847 default:
1848 break;
1849 }
1850 break;
1851
1852 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
1853 /* Get the upper bound for the element index. */
1854 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1855 /* Index out-of-range. */
1856 if (!value_in_range_p (opnd->reglane.index, 0, num))
1857 {
1858 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1859 return 0;
1860 }
1861 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1862 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
1863 number is encoded in "size:M:Rm":
1864 size <Vm>
1865 00 RESERVED
1866 01 0:Rm
1867 10 M:Rm
1868 11 RESERVED */
1869 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
1870 && !value_in_range_p (opnd->reglane.regno, 0, 15))
1871 {
1872 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1873 return 0;
1874 }
1875 break;
1876
1877 case AARCH64_OPND_CLASS_MODIFIED_REG:
1878 assert (idx == 1 || idx == 2);
1879 switch (type)
1880 {
1881 case AARCH64_OPND_Rm_EXT:
1882 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
1883 && opnd->shifter.kind != AARCH64_MOD_LSL)
1884 {
1885 set_other_error (mismatch_detail, idx,
1886 _("extend operator expected"));
1887 return 0;
1888 }
1889 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
1890 (i.e. SP), in which case it defaults to LSL. The LSL alias is
1891 only valid when "Rd" or "Rn" is '11111', and is preferred in that
1892 case. */
1893 if (!aarch64_stack_pointer_p (opnds + 0)
1894 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
1895 {
1896 if (!opnd->shifter.operator_present)
1897 {
1898 set_other_error (mismatch_detail, idx,
1899 _("missing extend operator"));
1900 return 0;
1901 }
1902 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
1903 {
1904 set_other_error (mismatch_detail, idx,
1905 _("'LSL' operator not allowed"));
1906 return 0;
1907 }
1908 }
1909 assert (opnd->shifter.operator_present /* Default to LSL. */
1910 || opnd->shifter.kind == AARCH64_MOD_LSL);
1911 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
1912 {
1913 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
1914 return 0;
1915 }
1916 /* In the 64-bit form, the final register operand is written as Wm
1917 for all but the (possibly omitted) UXTX/LSL and SXTX
1918 operators.
1919 N.B. GAS allows X register to be used with any operator as a
1920 programming convenience. */
1921 if (qualifier == AARCH64_OPND_QLF_X
1922 && opnd->shifter.kind != AARCH64_MOD_LSL
1923 && opnd->shifter.kind != AARCH64_MOD_UXTX
1924 && opnd->shifter.kind != AARCH64_MOD_SXTX)
1925 {
1926 set_other_error (mismatch_detail, idx, _("W register expected"));
1927 return 0;
1928 }
1929 break;
1930
1931 case AARCH64_OPND_Rm_SFT:
1932 /* ROR is not available to the shifted register operand in
1933 arithmetic instructions. */
1934 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
1935 {
1936 set_other_error (mismatch_detail, idx,
1937 _("shift operator expected"));
1938 return 0;
1939 }
1940 if (opnd->shifter.kind == AARCH64_MOD_ROR
1941 && opcode->iclass != log_shift)
1942 {
1943 set_other_error (mismatch_detail, idx,
1944 _("'ROR' operator not allowed"));
1945 return 0;
1946 }
1947 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
1948 if (!value_in_range_p (opnd->shifter.amount, 0, num))
1949 {
1950 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
1951 return 0;
1952 }
1953 break;
1954
1955 default:
1956 break;
1957 }
1958 break;
1959
1960 default:
1961 break;
1962 }
1963
1964 return 1;
1965 }
1966
1967 /* Main entrypoint for the operand constraint checking.
1968
1969 Return 1 if operands of *INST meet the constraint applied by the operand
1970 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
1971 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
1972 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
1973 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
1974 error kind when it is notified that an instruction does not pass the check).
1975
1976 Un-determined operand qualifiers may get established during the process. */
1977
1978 int
1979 aarch64_match_operands_constraint (aarch64_inst *inst,
1980 aarch64_operand_error *mismatch_detail)
1981 {
1982 int i;
1983
1984 DEBUG_TRACE ("enter");
1985
1986 /* Match operands' qualifier.
1987 *INST has already had qualifier establish for some, if not all, of
1988 its operands; we need to find out whether these established
1989 qualifiers match one of the qualifier sequence in
1990 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
1991 with the corresponding qualifier in such a sequence.
1992 Only basic operand constraint checking is done here; the more thorough
1993 constraint checking will carried out by operand_general_constraint_met_p,
1994 which has be to called after this in order to get all of the operands'
1995 qualifiers established. */
1996 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
1997 {
1998 DEBUG_TRACE ("FAIL on operand qualifier matching");
1999 if (mismatch_detail)
2000 {
2001 /* Return an error type to indicate that it is the qualifier
2002 matching failure; we don't care about which operand as there
2003 are enough information in the opcode table to reproduce it. */
2004 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2005 mismatch_detail->index = -1;
2006 mismatch_detail->error = NULL;
2007 }
2008 return 0;
2009 }
2010
2011 /* Match operands' constraint. */
2012 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2013 {
2014 enum aarch64_opnd type = inst->opcode->operands[i];
2015 if (type == AARCH64_OPND_NIL)
2016 break;
2017 if (inst->operands[i].skip)
2018 {
2019 DEBUG_TRACE ("skip the incomplete operand %d", i);
2020 continue;
2021 }
2022 if (operand_general_constraint_met_p (inst->operands, i, type,
2023 inst->opcode, mismatch_detail) == 0)
2024 {
2025 DEBUG_TRACE ("FAIL on operand %d", i);
2026 return 0;
2027 }
2028 }
2029
2030 DEBUG_TRACE ("PASS");
2031
2032 return 1;
2033 }
2034
2035 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2036 Also updates the TYPE of each INST->OPERANDS with the corresponding
2037 value of OPCODE->OPERANDS.
2038
2039 Note that some operand qualifiers may need to be manually cleared by
2040 the caller before it further calls the aarch64_opcode_encode; by
2041 doing this, it helps the qualifier matching facilities work
2042 properly. */
2043
2044 const aarch64_opcode*
2045 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2046 {
2047 int i;
2048 const aarch64_opcode *old = inst->opcode;
2049
2050 inst->opcode = opcode;
2051
2052 /* Update the operand types. */
2053 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2054 {
2055 inst->operands[i].type = opcode->operands[i];
2056 if (opcode->operands[i] == AARCH64_OPND_NIL)
2057 break;
2058 }
2059
2060 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2061
2062 return old;
2063 }
2064
2065 int
2066 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2067 {
2068 int i;
2069 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2070 if (operands[i] == operand)
2071 return i;
2072 else if (operands[i] == AARCH64_OPND_NIL)
2073 break;
2074 return -1;
2075 }
2076 \f
2077 /* [0][0] 32-bit integer regs with sp Wn
2078 [0][1] 64-bit integer regs with sp Xn sf=1
2079 [1][0] 32-bit integer regs with #0 Wn
2080 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2081 static const char *int_reg[2][2][32] = {
2082 #define R32 "w"
2083 #define R64 "x"
2084 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2085 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2086 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2087 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", "wsp" },
2088 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2089 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2090 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2091 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", "sp" } },
2092 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2093 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2094 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2095 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", R32 "zr" },
2096 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2097 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2098 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2099 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", R64 "zr" } }
2100 #undef R64
2101 #undef R32
2102 };
2103
2104 /* Return the integer register name.
2105 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2106
2107 static inline const char *
2108 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2109 {
2110 const int has_zr = sp_reg_p ? 0 : 1;
2111 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2112 return int_reg[has_zr][is_64][regno];
2113 }
2114
2115 /* Like get_int_reg_name, but IS_64 is always 1. */
2116
2117 static inline const char *
2118 get_64bit_int_reg_name (int regno, int sp_reg_p)
2119 {
2120 const int has_zr = sp_reg_p ? 0 : 1;
2121 return int_reg[has_zr][1][regno];
2122 }
2123
2124 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2125
2126 typedef union
2127 {
2128 uint64_t i;
2129 double d;
2130 } double_conv_t;
2131
2132 typedef union
2133 {
2134 uint32_t i;
2135 float f;
2136 } single_conv_t;
2137
2138 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2139 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2140 (depending on the type of the instruction). IMM8 will be expanded to a
2141 single-precision floating-point value (IS_DP == 0) or a double-precision
2142 floating-point value (IS_DP == 1). The expanded value is returned. */
2143
2144 static uint64_t
2145 expand_fp_imm (int is_dp, uint32_t imm8)
2146 {
2147 uint64_t imm;
2148 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2149
2150 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2151 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2152 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2153 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2154 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2155 if (is_dp)
2156 {
2157 imm = (imm8_7 << (63-32)) /* imm8<7> */
2158 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2159 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2160 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2161 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2162 imm <<= 32;
2163 }
2164 else
2165 {
2166 imm = (imm8_7 << 31) /* imm8<7> */
2167 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2168 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2169 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2170 }
2171
2172 return imm;
2173 }
2174
2175 /* Produce the string representation of the register list operand *OPND
2176 in the buffer pointed by BUF of size SIZE. */
2177 static void
2178 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd)
2179 {
2180 const int num_regs = opnd->reglist.num_regs;
2181 const int first_reg = opnd->reglist.first_regno;
2182 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2183 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2184 char tb[8]; /* Temporary buffer. */
2185
2186 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2187 assert (num_regs >= 1 && num_regs <= 4);
2188
2189 /* Prepare the index if any. */
2190 if (opnd->reglist.has_index)
2191 snprintf (tb, 8, "[%d]", opnd->reglist.index);
2192 else
2193 tb[0] = '\0';
2194
2195 /* The hyphenated form is preferred for disassembly if there are
2196 more than two registers in the list, and the register numbers
2197 are monotonically increasing in increments of one. */
2198 if (num_regs > 2 && last_reg > first_reg)
2199 snprintf (buf, size, "{v%d.%s-v%d.%s}%s", first_reg, qlf_name,
2200 last_reg, qlf_name, tb);
2201 else
2202 {
2203 const int reg0 = first_reg;
2204 const int reg1 = (first_reg + 1) & 0x1f;
2205 const int reg2 = (first_reg + 2) & 0x1f;
2206 const int reg3 = (first_reg + 3) & 0x1f;
2207
2208 switch (num_regs)
2209 {
2210 case 1:
2211 snprintf (buf, size, "{v%d.%s}%s", reg0, qlf_name, tb);
2212 break;
2213 case 2:
2214 snprintf (buf, size, "{v%d.%s, v%d.%s}%s", reg0, qlf_name,
2215 reg1, qlf_name, tb);
2216 break;
2217 case 3:
2218 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s}%s", reg0, qlf_name,
2219 reg1, qlf_name, reg2, qlf_name, tb);
2220 break;
2221 case 4:
2222 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s, v%d.%s}%s",
2223 reg0, qlf_name, reg1, qlf_name, reg2, qlf_name,
2224 reg3, qlf_name, tb);
2225 break;
2226 }
2227 }
2228 }
2229
2230 /* Produce the string representation of the register offset address operand
2231 *OPND in the buffer pointed by BUF of size SIZE. */
2232 static void
2233 print_register_offset_address (char *buf, size_t size,
2234 const aarch64_opnd_info *opnd)
2235 {
2236 const size_t tblen = 16;
2237 char tb[tblen]; /* Temporary buffer. */
2238 bfd_boolean lsl_p = FALSE; /* Is LSL shift operator? */
2239 bfd_boolean wm_p = FALSE; /* Should Rm be Wm? */
2240 bfd_boolean print_extend_p = TRUE;
2241 bfd_boolean print_amount_p = TRUE;
2242 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2243
2244 switch (opnd->shifter.kind)
2245 {
2246 case AARCH64_MOD_UXTW: wm_p = TRUE; break;
2247 case AARCH64_MOD_LSL : lsl_p = TRUE; break;
2248 case AARCH64_MOD_SXTW: wm_p = TRUE; break;
2249 case AARCH64_MOD_SXTX: break;
2250 default: assert (0);
2251 }
2252
2253 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2254 || !opnd->shifter.amount_present))
2255 {
2256 /* Not print the shift/extend amount when the amount is zero and
2257 when it is not the special case of 8-bit load/store instruction. */
2258 print_amount_p = FALSE;
2259 /* Likewise, no need to print the shift operator LSL in such a
2260 situation. */
2261 if (lsl_p)
2262 print_extend_p = FALSE;
2263 }
2264
2265 /* Prepare for the extend/shift. */
2266 if (print_extend_p)
2267 {
2268 if (print_amount_p)
2269 snprintf (tb, tblen, ",%s #%d", shift_name, opnd->shifter.amount);
2270 else
2271 snprintf (tb, tblen, ",%s", shift_name);
2272 }
2273 else
2274 tb[0] = '\0';
2275
2276 snprintf (buf, size, "[%s,%c%d%s]",
2277 get_64bit_int_reg_name (opnd->addr.base_regno, 1),
2278 wm_p ? 'w' : 'x', opnd->addr.offset.regno, tb);
2279 }
2280
2281 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2282 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2283 PC, PCREL_P and ADDRESS are used to pass in and return information about
2284 the PC-relative address calculation, where the PC value is passed in
2285 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2286 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2287 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2288
2289 The function serves both the disassembler and the assembler diagnostics
2290 issuer, which is the reason why it lives in this file. */
2291
2292 void
2293 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2294 const aarch64_opcode *opcode,
2295 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2296 bfd_vma *address)
2297 {
2298 int i;
2299 const char *name = NULL;
2300 const aarch64_opnd_info *opnd = opnds + idx;
2301 enum aarch64_modifier_kind kind;
2302 uint64_t addr;
2303
2304 buf[0] = '\0';
2305 if (pcrel_p)
2306 *pcrel_p = 0;
2307
2308 switch (opnd->type)
2309 {
2310 case AARCH64_OPND_Rd:
2311 case AARCH64_OPND_Rn:
2312 case AARCH64_OPND_Rm:
2313 case AARCH64_OPND_Rt:
2314 case AARCH64_OPND_Rt2:
2315 case AARCH64_OPND_Rs:
2316 case AARCH64_OPND_Ra:
2317 case AARCH64_OPND_Rt_SYS:
2318 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2319 the <ic_op>, therefore we we use opnd->present to override the
2320 generic optional-ness information. */
2321 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2322 break;
2323 /* Omit the operand, e.g. RET. */
2324 if (optional_operand_p (opcode, idx)
2325 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2326 break;
2327 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2328 || opnd->qualifier == AARCH64_OPND_QLF_X);
2329 snprintf (buf, size, "%s",
2330 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2331 break;
2332
2333 case AARCH64_OPND_Rd_SP:
2334 case AARCH64_OPND_Rn_SP:
2335 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2336 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2337 || opnd->qualifier == AARCH64_OPND_QLF_X
2338 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2339 snprintf (buf, size, "%s",
2340 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2341 break;
2342
2343 case AARCH64_OPND_Rm_EXT:
2344 kind = opnd->shifter.kind;
2345 assert (idx == 1 || idx == 2);
2346 if ((aarch64_stack_pointer_p (opnds)
2347 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2348 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2349 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2350 && kind == AARCH64_MOD_UXTW)
2351 || (opnd->qualifier == AARCH64_OPND_QLF_X
2352 && kind == AARCH64_MOD_UXTX)))
2353 {
2354 /* 'LSL' is the preferred form in this case. */
2355 kind = AARCH64_MOD_LSL;
2356 if (opnd->shifter.amount == 0)
2357 {
2358 /* Shifter omitted. */
2359 snprintf (buf, size, "%s",
2360 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2361 break;
2362 }
2363 }
2364 if (opnd->shifter.amount)
2365 snprintf (buf, size, "%s, %s #%d",
2366 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2367 aarch64_operand_modifiers[kind].name,
2368 opnd->shifter.amount);
2369 else
2370 snprintf (buf, size, "%s, %s",
2371 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2372 aarch64_operand_modifiers[kind].name);
2373 break;
2374
2375 case AARCH64_OPND_Rm_SFT:
2376 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2377 || opnd->qualifier == AARCH64_OPND_QLF_X);
2378 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2379 snprintf (buf, size, "%s",
2380 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2381 else
2382 snprintf (buf, size, "%s, %s #%d",
2383 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2384 aarch64_operand_modifiers[opnd->shifter.kind].name,
2385 opnd->shifter.amount);
2386 break;
2387
2388 case AARCH64_OPND_Fd:
2389 case AARCH64_OPND_Fn:
2390 case AARCH64_OPND_Fm:
2391 case AARCH64_OPND_Fa:
2392 case AARCH64_OPND_Ft:
2393 case AARCH64_OPND_Ft2:
2394 case AARCH64_OPND_Sd:
2395 case AARCH64_OPND_Sn:
2396 case AARCH64_OPND_Sm:
2397 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
2398 opnd->reg.regno);
2399 break;
2400
2401 case AARCH64_OPND_Vd:
2402 case AARCH64_OPND_Vn:
2403 case AARCH64_OPND_Vm:
2404 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
2405 aarch64_get_qualifier_name (opnd->qualifier));
2406 break;
2407
2408 case AARCH64_OPND_Ed:
2409 case AARCH64_OPND_En:
2410 case AARCH64_OPND_Em:
2411 snprintf (buf, size, "v%d.%s[%d]", opnd->reglane.regno,
2412 aarch64_get_qualifier_name (opnd->qualifier),
2413 opnd->reglane.index);
2414 break;
2415
2416 case AARCH64_OPND_VdD1:
2417 case AARCH64_OPND_VnD1:
2418 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
2419 break;
2420
2421 case AARCH64_OPND_LVn:
2422 case AARCH64_OPND_LVt:
2423 case AARCH64_OPND_LVt_AL:
2424 case AARCH64_OPND_LEt:
2425 print_register_list (buf, size, opnd);
2426 break;
2427
2428 case AARCH64_OPND_Cn:
2429 case AARCH64_OPND_Cm:
2430 snprintf (buf, size, "C%d", opnd->reg.regno);
2431 break;
2432
2433 case AARCH64_OPND_IDX:
2434 case AARCH64_OPND_IMM:
2435 case AARCH64_OPND_WIDTH:
2436 case AARCH64_OPND_UIMM3_OP1:
2437 case AARCH64_OPND_UIMM3_OP2:
2438 case AARCH64_OPND_BIT_NUM:
2439 case AARCH64_OPND_IMM_VLSL:
2440 case AARCH64_OPND_IMM_VLSR:
2441 case AARCH64_OPND_SHLL_IMM:
2442 case AARCH64_OPND_IMM0:
2443 case AARCH64_OPND_IMMR:
2444 case AARCH64_OPND_IMMS:
2445 case AARCH64_OPND_FBITS:
2446 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2447 break;
2448
2449 case AARCH64_OPND_IMM_MOV:
2450 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2451 {
2452 case 4: /* e.g. MOV Wd, #<imm32>. */
2453 {
2454 int imm32 = opnd->imm.value;
2455 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
2456 }
2457 break;
2458 case 8: /* e.g. MOV Xd, #<imm64>. */
2459 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
2460 opnd->imm.value, opnd->imm.value);
2461 break;
2462 default: assert (0);
2463 }
2464 break;
2465
2466 case AARCH64_OPND_FPIMM0:
2467 snprintf (buf, size, "#0.0");
2468 break;
2469
2470 case AARCH64_OPND_LIMM:
2471 case AARCH64_OPND_AIMM:
2472 case AARCH64_OPND_HALF:
2473 if (opnd->shifter.amount)
2474 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value,
2475 opnd->shifter.amount);
2476 else
2477 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2478 break;
2479
2480 case AARCH64_OPND_SIMD_IMM:
2481 case AARCH64_OPND_SIMD_IMM_SFT:
2482 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
2483 || opnd->shifter.kind == AARCH64_MOD_NONE)
2484 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2485 else
2486 snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value,
2487 aarch64_operand_modifiers[opnd->shifter.kind].name,
2488 opnd->shifter.amount);
2489 break;
2490
2491 case AARCH64_OPND_FPIMM:
2492 case AARCH64_OPND_SIMD_FPIMM:
2493 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2494 {
2495 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2496 {
2497 single_conv_t c;
2498 c.i = expand_fp_imm (0, opnd->imm.value);
2499 snprintf (buf, size, "#%.18e", c.f);
2500 }
2501 break;
2502 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2503 {
2504 double_conv_t c;
2505 c.i = expand_fp_imm (1, opnd->imm.value);
2506 snprintf (buf, size, "#%.18e", c.d);
2507 }
2508 break;
2509 default: assert (0);
2510 }
2511 break;
2512
2513 case AARCH64_OPND_CCMP_IMM:
2514 case AARCH64_OPND_NZCV:
2515 case AARCH64_OPND_EXCEPTION:
2516 case AARCH64_OPND_UIMM4:
2517 case AARCH64_OPND_UIMM7:
2518 if (optional_operand_p (opcode, idx) == TRUE
2519 && (opnd->imm.value ==
2520 (int64_t) get_optional_operand_default_value (opcode)))
2521 /* Omit the operand, e.g. DCPS1. */
2522 break;
2523 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
2524 break;
2525
2526 case AARCH64_OPND_COND:
2527 snprintf (buf, size, "%s", opnd->cond->names[0]);
2528 break;
2529
2530 case AARCH64_OPND_ADDR_ADRP:
2531 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
2532 + opnd->imm.value;
2533 if (pcrel_p)
2534 *pcrel_p = 1;
2535 if (address)
2536 *address = addr;
2537 /* This is not necessary during the disassembling, as print_address_func
2538 in the disassemble_info will take care of the printing. But some
2539 other callers may be still interested in getting the string in *STR,
2540 so here we do snprintf regardless. */
2541 snprintf (buf, size, "#0x%" PRIx64, addr);
2542 break;
2543
2544 case AARCH64_OPND_ADDR_PCREL14:
2545 case AARCH64_OPND_ADDR_PCREL19:
2546 case AARCH64_OPND_ADDR_PCREL21:
2547 case AARCH64_OPND_ADDR_PCREL26:
2548 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
2549 if (pcrel_p)
2550 *pcrel_p = 1;
2551 if (address)
2552 *address = addr;
2553 /* This is not necessary during the disassembling, as print_address_func
2554 in the disassemble_info will take care of the printing. But some
2555 other callers may be still interested in getting the string in *STR,
2556 so here we do snprintf regardless. */
2557 snprintf (buf, size, "#0x%" PRIx64, addr);
2558 break;
2559
2560 case AARCH64_OPND_ADDR_SIMPLE:
2561 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
2562 case AARCH64_OPND_SIMD_ADDR_POST:
2563 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2564 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
2565 {
2566 if (opnd->addr.offset.is_reg)
2567 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
2568 else
2569 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
2570 }
2571 else
2572 snprintf (buf, size, "[%s]", name);
2573 break;
2574
2575 case AARCH64_OPND_ADDR_REGOFF:
2576 print_register_offset_address (buf, size, opnd);
2577 break;
2578
2579 case AARCH64_OPND_ADDR_SIMM7:
2580 case AARCH64_OPND_ADDR_SIMM9:
2581 case AARCH64_OPND_ADDR_SIMM9_2:
2582 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2583 if (opnd->addr.writeback)
2584 {
2585 if (opnd->addr.preind)
2586 snprintf (buf, size, "[%s,#%d]!", name, opnd->addr.offset.imm);
2587 else
2588 snprintf (buf, size, "[%s],#%d", name, opnd->addr.offset.imm);
2589 }
2590 else
2591 {
2592 if (opnd->addr.offset.imm)
2593 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2594 else
2595 snprintf (buf, size, "[%s]", name);
2596 }
2597 break;
2598
2599 case AARCH64_OPND_ADDR_UIMM12:
2600 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2601 if (opnd->addr.offset.imm)
2602 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2603 else
2604 snprintf (buf, size, "[%s]", name);
2605 break;
2606
2607 case AARCH64_OPND_SYSREG:
2608 for (i = 0; aarch64_sys_regs[i].name; ++i)
2609 if (aarch64_sys_regs[i].value == opnd->sysreg)
2610 break;
2611 if (aarch64_sys_regs[i].name)
2612 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
2613 else
2614 {
2615 /* Implementation defined system register. */
2616 unsigned int value = opnd->sysreg;
2617 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
2618 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
2619 value & 0x7);
2620 }
2621 break;
2622
2623 case AARCH64_OPND_PSTATEFIELD:
2624 for (i = 0; aarch64_pstatefields[i].name; ++i)
2625 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2626 break;
2627 assert (aarch64_pstatefields[i].name);
2628 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
2629 break;
2630
2631 case AARCH64_OPND_SYSREG_AT:
2632 case AARCH64_OPND_SYSREG_DC:
2633 case AARCH64_OPND_SYSREG_IC:
2634 case AARCH64_OPND_SYSREG_TLBI:
2635 snprintf (buf, size, "%s", opnd->sysins_op->template);
2636 break;
2637
2638 case AARCH64_OPND_BARRIER:
2639 snprintf (buf, size, "%s", opnd->barrier->name);
2640 break;
2641
2642 case AARCH64_OPND_BARRIER_ISB:
2643 /* Operand can be omitted, e.g. in DCPS1. */
2644 if (! optional_operand_p (opcode, idx)
2645 || (opnd->barrier->value
2646 != get_optional_operand_default_value (opcode)))
2647 snprintf (buf, size, "#0x%x", opnd->barrier->value);
2648 break;
2649
2650 case AARCH64_OPND_PRFOP:
2651 if (opnd->prfop->name != NULL)
2652 snprintf (buf, size, "%s", opnd->prfop->name);
2653 else
2654 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
2655 break;
2656
2657 default:
2658 assert (0);
2659 }
2660 }
2661 \f
2662 #define CPENC(op0,op1,crn,crm,op2) \
2663 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2664 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2665 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2666 /* for 3.9.10 System Instructions */
2667 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2668
2669 #define C0 0
2670 #define C1 1
2671 #define C2 2
2672 #define C3 3
2673 #define C4 4
2674 #define C5 5
2675 #define C6 6
2676 #define C7 7
2677 #define C8 8
2678 #define C9 9
2679 #define C10 10
2680 #define C11 11
2681 #define C12 12
2682 #define C13 13
2683 #define C14 14
2684 #define C15 15
2685
2686 /* TODO there are two more issues need to be resolved
2687 1. handle read-only and write-only system registers
2688 2. handle cpu-implementation-defined system registers. */
2689 const struct aarch64_name_value_pair aarch64_sys_regs [] =
2690 {
2691 { "spsr_el1", CPEN_(0,C0,0) }, /* = spsr_svc */
2692 { "elr_el1", CPEN_(0,C0,1) },
2693 { "sp_el0", CPEN_(0,C1,0) },
2694 { "spsel", CPEN_(0,C2,0) },
2695 { "daif", CPEN_(3,C2,1) },
2696 { "currentel", CPEN_(0,C2,2) }, /* RO */
2697 { "nzcv", CPEN_(3,C2,0) },
2698 { "fpcr", CPEN_(3,C4,0) },
2699 { "fpsr", CPEN_(3,C4,1) },
2700 { "dspsr_el0", CPEN_(3,C5,0) },
2701 { "dlr_el0", CPEN_(3,C5,1) },
2702 { "spsr_el2", CPEN_(4,C0,0) }, /* = spsr_hyp */
2703 { "elr_el2", CPEN_(4,C0,1) },
2704 { "sp_el1", CPEN_(4,C1,0) },
2705 { "spsr_irq", CPEN_(4,C3,0) },
2706 { "spsr_abt", CPEN_(4,C3,1) },
2707 { "spsr_und", CPEN_(4,C3,2) },
2708 { "spsr_fiq", CPEN_(4,C3,3) },
2709 { "spsr_el3", CPEN_(6,C0,0) },
2710 { "elr_el3", CPEN_(6,C0,1) },
2711 { "sp_el2", CPEN_(6,C1,0) },
2712 { "spsr_svc", CPEN_(0,C0,0) }, /* = spsr_el1 */
2713 { "spsr_hyp", CPEN_(4,C0,0) }, /* = spsr_el2 */
2714 { "midr_el1", CPENC(3,0,C0,C0,0) }, /* RO */
2715 { "ctr_el0", CPENC(3,3,C0,C0,1) }, /* RO */
2716 { "mpidr_el1", CPENC(3,0,C0,C0,5) }, /* RO */
2717 { "revidr_el1", CPENC(3,0,C0,C0,6) }, /* RO */
2718 { "aidr_el1", CPENC(3,1,C0,C0,7) }, /* RO */
2719 { "dczid_el0", CPENC(3,3,C0,C0,7) }, /* RO */
2720 { "id_dfr0_el1", CPENC(3,0,C0,C1,2) }, /* RO */
2721 { "id_pfr0_el1", CPENC(3,0,C0,C1,0) }, /* RO */
2722 { "id_pfr1_el1", CPENC(3,0,C0,C1,1) }, /* RO */
2723 { "id_afr0_el1", CPENC(3,0,C0,C1,3) }, /* RO */
2724 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4) }, /* RO */
2725 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5) }, /* RO */
2726 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6) }, /* RO */
2727 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7) }, /* RO */
2728 { "id_isar0_el1", CPENC(3,0,C0,C2,0) }, /* RO */
2729 { "id_isar1_el1", CPENC(3,0,C0,C2,1) }, /* RO */
2730 { "id_isar2_el1", CPENC(3,0,C0,C2,2) }, /* RO */
2731 { "id_isar3_el1", CPENC(3,0,C0,C2,3) }, /* RO */
2732 { "id_isar4_el1", CPENC(3,0,C0,C2,4) }, /* RO */
2733 { "id_isar5_el1", CPENC(3,0,C0,C2,5) }, /* RO */
2734 { "mvfr0_el1", CPENC(3,0,C0,C3,0) }, /* RO */
2735 { "mvfr1_el1", CPENC(3,0,C0,C3,1) }, /* RO */
2736 { "mvfr2_el1", CPENC(3,0,C0,C3,2) }, /* RO */
2737 { "ccsidr_el1", CPENC(3,1,C0,C0,0) }, /* RO */
2738 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0) }, /* RO */
2739 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1) }, /* RO */
2740 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0) }, /* RO */
2741 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1) }, /* RO */
2742 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0) }, /* RO */
2743 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1) }, /* RO */
2744 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0) }, /* RO */
2745 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1) }, /* RO */
2746 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4) }, /* RO */
2747 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5) }, /* RO */
2748 { "clidr_el1", CPENC(3,1,C0,C0,1) }, /* RO */
2749 { "csselr_el1", CPENC(3,2,C0,C0,0) }, /* RO */
2750 { "vpidr_el2", CPENC(3,4,C0,C0,0) },
2751 { "vmpidr_el2", CPENC(3,4,C0,C0,5) },
2752 { "sctlr_el1", CPENC(3,0,C1,C0,0) },
2753 { "sctlr_el2", CPENC(3,4,C1,C0,0) },
2754 { "sctlr_el3", CPENC(3,6,C1,C0,0) },
2755 { "actlr_el1", CPENC(3,0,C1,C0,1) },
2756 { "actlr_el2", CPENC(3,4,C1,C0,1) },
2757 { "actlr_el3", CPENC(3,6,C1,C0,1) },
2758 { "cpacr_el1", CPENC(3,0,C1,C0,2) },
2759 { "cptr_el2", CPENC(3,4,C1,C1,2) },
2760 { "cptr_el3", CPENC(3,6,C1,C1,2) },
2761 { "scr_el3", CPENC(3,6,C1,C1,0) },
2762 { "hcr_el2", CPENC(3,4,C1,C1,0) },
2763 { "mdcr_el2", CPENC(3,4,C1,C1,1) },
2764 { "mdcr_el3", CPENC(3,6,C1,C3,1) },
2765 { "hstr_el2", CPENC(3,4,C1,C1,3) },
2766 { "hacr_el2", CPENC(3,4,C1,C1,7) },
2767 { "ttbr0_el1", CPENC(3,0,C2,C0,0) },
2768 { "ttbr1_el1", CPENC(3,0,C2,C0,1) },
2769 { "ttbr0_el2", CPENC(3,4,C2,C0,0) },
2770 { "ttbr0_el3", CPENC(3,6,C2,C0,0) },
2771 { "vttbr_el2", CPENC(3,4,C2,C1,0) },
2772 { "tcr_el1", CPENC(3,0,C2,C0,2) },
2773 { "tcr_el2", CPENC(3,4,C2,C0,2) },
2774 { "tcr_el3", CPENC(3,6,C2,C0,2) },
2775 { "vtcr_el2", CPENC(3,4,C2,C1,2) },
2776 { "afsr0_el1", CPENC(3,0,C5,C1,0) },
2777 { "afsr1_el1", CPENC(3,0,C5,C1,1) },
2778 { "afsr0_el2", CPENC(3,4,C5,C1,0) },
2779 { "afsr1_el2", CPENC(3,4,C5,C1,1) },
2780 { "afsr0_el3", CPENC(3,6,C5,C1,0) },
2781 { "afsr1_el3", CPENC(3,6,C5,C1,1) },
2782 { "esr_el1", CPENC(3,0,C5,C2,0) },
2783 { "esr_el2", CPENC(3,4,C5,C2,0) },
2784 { "esr_el3", CPENC(3,6,C5,C2,0) },
2785 { "fpexc32_el2", CPENC(3,4,C5,C3,0) },
2786 { "far_el1", CPENC(3,0,C6,C0,0) },
2787 { "far_el2", CPENC(3,4,C6,C0,0) },
2788 { "far_el3", CPENC(3,6,C6,C0,0) },
2789 { "hpfar_el2", CPENC(3,4,C6,C0,4) },
2790 { "par_el1", CPENC(3,0,C7,C4,0) },
2791 { "mair_el1", CPENC(3,0,C10,C2,0) },
2792 { "mair_el2", CPENC(3,4,C10,C2,0) },
2793 { "mair_el3", CPENC(3,6,C10,C2,0) },
2794 { "amair_el1", CPENC(3,0,C10,C3,0) },
2795 { "amair_el2", CPENC(3,4,C10,C3,0) },
2796 { "amair_el3", CPENC(3,6,C10,C3,0) },
2797 { "vbar_el1", CPENC(3,0,C12,C0,0) },
2798 { "vbar_el2", CPENC(3,4,C12,C0,0) },
2799 { "vbar_el3", CPENC(3,6,C12,C0,0) },
2800 { "rvbar_el1", CPENC(3,0,C12,C0,1) }, /* RO */
2801 { "rvbar_el2", CPENC(3,4,C12,C0,1) }, /* RO */
2802 { "rvbar_el3", CPENC(3,6,C12,C0,1) }, /* RO */
2803 { "rmr_el1", CPENC(3,0,C12,C0,2) },
2804 { "rmr_el2", CPENC(3,4,C12,C0,2) },
2805 { "rmr_el3", CPENC(3,6,C12,C0,2) },
2806 { "isr_el1", CPENC(3,0,C12,C1,0) }, /* RO */
2807 { "contextidr_el1", CPENC(3,0,C13,C0,1) },
2808 { "tpidr_el0", CPENC(3,3,C13,C0,2) },
2809 { "tpidrro_el0", CPENC(3,3,C13,C0,3) }, /* RO */
2810 { "tpidr_el1", CPENC(3,0,C13,C0,4) },
2811 { "tpidr_el2", CPENC(3,4,C13,C0,2) },
2812 { "tpidr_el3", CPENC(3,6,C13,C0,2) },
2813 { "teecr32_el1", CPENC(2,2,C0, C0,0) }, /* See section 3.9.7.1 */
2814 { "cntfrq_el0", CPENC(3,3,C14,C0,0) }, /* RO */
2815 { "cntpct_el0", CPENC(3,3,C14,C0,1) }, /* RO */
2816 { "cntvct_el0", CPENC(3,3,C14,C0,2) }, /* RO */
2817 { "cntvoff_el2", CPENC(3,4,C14,C0,3) },
2818 { "cntkctl_el1", CPENC(3,0,C14,C1,0) },
2819 { "cnthctl_el2", CPENC(3,4,C14,C1,0) },
2820 { "cntp_tval_el0", CPENC(3,3,C14,C2,0) },
2821 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1) },
2822 { "cntp_cval_el0", CPENC(3,3,C14,C2,2) },
2823 { "cntv_tval_el0", CPENC(3,3,C14,C3,0) },
2824 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1) },
2825 { "cntv_cval_el0", CPENC(3,3,C14,C3,2) },
2826 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0) },
2827 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1) },
2828 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2) },
2829 { "cntps_tval_el1", CPENC(3,7,C14,C2,0) },
2830 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1) },
2831 { "cntps_cval_el1", CPENC(3,7,C14,C2,2) },
2832 { "dacr32_el2", CPENC(3,4,C3,C0,0) },
2833 { "ifsr32_el2", CPENC(3,4,C5,C0,1) },
2834 { "teehbr32_el1", CPENC(2,2,C1,C0,0) },
2835 { "sder32_el3", CPENC(3,6,C1,C1,1) },
2836 { "mdscr_el1", CPENC(2,0,C0, C2, 2) },
2837 { "mdccsr_el0", CPENC(2,3,C0, C1, 0) }, /* r */
2838 { "mdccint_el1", CPENC(2,0,C0, C2, 0) },
2839 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0) },
2840 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0) }, /* r */
2841 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0) }, /* w */
2842 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2) }, /* r */
2843 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2) }, /* w */
2844 { "oseccr_el1", CPENC(2,0,C0, C6, 2) },
2845 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0) },
2846 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4) },
2847 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4) },
2848 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4) },
2849 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4) },
2850 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4) },
2851 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4) },
2852 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4) },
2853 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4) },
2854 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4) },
2855 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4) },
2856 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4) },
2857 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4) },
2858 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4) },
2859 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4) },
2860 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4) },
2861 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4) },
2862 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5) },
2863 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5) },
2864 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5) },
2865 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5) },
2866 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5) },
2867 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5) },
2868 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5) },
2869 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5) },
2870 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5) },
2871 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5) },
2872 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5) },
2873 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5) },
2874 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5) },
2875 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5) },
2876 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5) },
2877 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5) },
2878 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6) },
2879 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6) },
2880 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6) },
2881 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6) },
2882 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6) },
2883 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6) },
2884 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6) },
2885 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6) },
2886 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6) },
2887 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6) },
2888 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6) },
2889 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6) },
2890 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6) },
2891 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6) },
2892 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6) },
2893 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6) },
2894 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7) },
2895 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7) },
2896 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7) },
2897 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7) },
2898 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7) },
2899 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7) },
2900 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7) },
2901 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7) },
2902 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7) },
2903 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7) },
2904 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7) },
2905 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7) },
2906 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7) },
2907 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7) },
2908 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7) },
2909 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7) },
2910 { "mdrar_el1", CPENC(2,0,C1, C0, 0) }, /* r */
2911 { "oslar_el1", CPENC(2,0,C1, C0, 4) }, /* w */
2912 { "oslsr_el1", CPENC(2,0,C1, C1, 4) }, /* r */
2913 { "osdlr_el1", CPENC(2,0,C1, C3, 4) },
2914 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4) },
2915 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6) },
2916 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6) },
2917 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6) }, /* r */
2918
2919 { "pmcr_el0", CPENC(3,3,C9,C12, 0) },
2920 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1) },
2921 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2) },
2922 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3) },
2923 { "pmswinc_el0", CPENC(3,3,C9,C12, 4) }, /* w */
2924 { "pmselr_el0", CPENC(3,3,C9,C12, 5) },
2925 { "pmceid0_el0", CPENC(3,3,C9,C12, 6) }, /* r */
2926 { "pmceid1_el0", CPENC(3,3,C9,C12, 7) }, /* r */
2927 { "pmccntr_el0", CPENC(3,3,C9,C13, 0) },
2928 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1) },
2929 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2) },
2930 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0) },
2931 { "pmintenset_el1", CPENC(3,0,C9,C14, 1) },
2932 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2) },
2933 { "pmovsset_el0", CPENC(3,3,C9,C14, 3) },
2934 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0) },
2935 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1) },
2936 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2) },
2937 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3) },
2938 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4) },
2939 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5) },
2940 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6) },
2941 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7) },
2942 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0) },
2943 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1) },
2944 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2) },
2945 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3) },
2946 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4) },
2947 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5) },
2948 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6) },
2949 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7) },
2950 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0) },
2951 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1) },
2952 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2) },
2953 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3) },
2954 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4) },
2955 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5) },
2956 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6) },
2957 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7) },
2958 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0) },
2959 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1) },
2960 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2) },
2961 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3) },
2962 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4) },
2963 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5) },
2964 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6) },
2965 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0) },
2966 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1) },
2967 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2) },
2968 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3) },
2969 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4) },
2970 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5) },
2971 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6) },
2972 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7) },
2973 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0) },
2974 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1) },
2975 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2) },
2976 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3) },
2977 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4) },
2978 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5) },
2979 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6) },
2980 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7) },
2981 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0) },
2982 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1) },
2983 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2) },
2984 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3) },
2985 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4) },
2986 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5) },
2987 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6) },
2988 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7) },
2989 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0) },
2990 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1) },
2991 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2) },
2992 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3) },
2993 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4) },
2994 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5) },
2995 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6) },
2996 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7) },
2997 { 0, CPENC(0,0,0,0,0) },
2998 };
2999
3000 const struct aarch64_name_value_pair aarch64_pstatefields [] =
3001 {
3002 { "spsel", 0x05 },
3003 { "daifset", 0x1e },
3004 { "daifclr", 0x1f },
3005 { 0, CPENC(0,0,0,0,0) },
3006 };
3007
3008 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
3009 {
3010 { "ialluis", CPENS(0,C7,C1,0), 0 },
3011 { "iallu", CPENS(0,C7,C5,0), 0 },
3012 { "ivau", CPENS(3,C7,C5,1), 1 },
3013 { 0, CPENS(0,0,0,0), 0 }
3014 };
3015
3016 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3017 {
3018 { "zva", CPENS(3,C7,C4,1), 1 },
3019 { "ivac", CPENS(0,C7,C6,1), 1 },
3020 { "isw", CPENS(0,C7,C6,2), 1 },
3021 { "cvac", CPENS(3,C7,C10,1), 1 },
3022 { "csw", CPENS(0,C7,C10,2), 1 },
3023 { "cvau", CPENS(3,C7,C11,1), 1 },
3024 { "civac", CPENS(3,C7,C14,1), 1 },
3025 { "cisw", CPENS(0,C7,C14,2), 1 },
3026 { 0, CPENS(0,0,0,0), 0 }
3027 };
3028
3029 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
3030 {
3031 { "s1e1r", CPENS(0,C7,C8,0), 1 },
3032 { "s1e1w", CPENS(0,C7,C8,1), 1 },
3033 { "s1e0r", CPENS(0,C7,C8,2), 1 },
3034 { "s1e0w", CPENS(0,C7,C8,3), 1 },
3035 { "s12e1r", CPENS(4,C7,C8,4), 1 },
3036 { "s12e1w", CPENS(4,C7,C8,5), 1 },
3037 { "s12e0r", CPENS(4,C7,C8,6), 1 },
3038 { "s12e0w", CPENS(4,C7,C8,7), 1 },
3039 { "s1e2r", CPENS(4,C7,C8,0), 1 },
3040 { "s1e2w", CPENS(4,C7,C8,1), 1 },
3041 { "s1e3r", CPENS(6,C7,C8,0), 1 },
3042 { "s1e3w", CPENS(6,C7,C8,1), 1 },
3043 { 0, CPENS(0,0,0,0), 0 }
3044 };
3045
3046 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
3047 {
3048 { "vmalle1", CPENS(0,C8,C7,0), 0 },
3049 { "vae1", CPENS(0,C8,C7,1), 1 },
3050 { "aside1", CPENS(0,C8,C7,2), 1 },
3051 { "vaae1", CPENS(0,C8,C7,3), 1 },
3052 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
3053 { "vae1is", CPENS(0,C8,C3,1), 1 },
3054 { "aside1is", CPENS(0,C8,C3,2), 1 },
3055 { "vaae1is", CPENS(0,C8,C3,3), 1 },
3056 { "ipas2e1is", CPENS(4,C8,C0,1), 1 },
3057 { "ipas2le1is",CPENS(4,C8,C0,5), 1 },
3058 { "ipas2e1", CPENS(4,C8,C4,1), 1 },
3059 { "ipas2le1", CPENS(4,C8,C4,5), 1 },
3060 { "vae2", CPENS(4,C8,C7,1), 1 },
3061 { "vae2is", CPENS(4,C8,C3,1), 1 },
3062 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
3063 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
3064 { "vae3", CPENS(6,C8,C7,1), 1 },
3065 { "vae3is", CPENS(6,C8,C3,1), 1 },
3066 { "alle2", CPENS(4,C8,C7,0), 0 },
3067 { "alle2is", CPENS(4,C8,C3,0), 0 },
3068 { "alle1", CPENS(4,C8,C7,4), 0 },
3069 { "alle1is", CPENS(4,C8,C3,4), 0 },
3070 { "alle3", CPENS(6,C8,C7,0), 0 },
3071 { "alle3is", CPENS(6,C8,C3,0), 0 },
3072 { "vale1is", CPENS(0,C8,C3,5), 1 },
3073 { "vale2is", CPENS(4,C8,C3,5), 1 },
3074 { "vale3is", CPENS(6,C8,C3,5), 1 },
3075 { "vaale1is", CPENS(0,C8,C3,7), 1 },
3076 { "vale1", CPENS(0,C8,C7,5), 1 },
3077 { "vale2", CPENS(4,C8,C7,5), 1 },
3078 { "vale3", CPENS(6,C8,C7,5), 1 },
3079 { "vaale1", CPENS(0,C8,C7,7), 1 },
3080 { 0, CPENS(0,0,0,0), 0 }
3081 };
3082
3083 #undef C0
3084 #undef C1
3085 #undef C2
3086 #undef C3
3087 #undef C4
3088 #undef C5
3089 #undef C6
3090 #undef C7
3091 #undef C8
3092 #undef C9
3093 #undef C10
3094 #undef C11
3095 #undef C12
3096 #undef C13
3097 #undef C14
3098 #undef C15
3099
3100 /* Include the opcode description table as well as the operand description
3101 table. */
3102 #include "aarch64-tbl.h"