aarch64: Resync field names
[binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2023 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = false;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bool
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return (qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q);
110 }
111
112 static inline bool
113 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
114 {
115 return (qualifier >= AARCH64_OPND_QLF_S_B
116 && qualifier <= AARCH64_OPND_QLF_S_Q);
117 }
118
119 enum data_pattern
120 {
121 DP_UNKNOWN,
122 DP_VECTOR_3SAME,
123 DP_VECTOR_LONG,
124 DP_VECTOR_WIDE,
125 DP_VECTOR_ACROSS_LANES,
126 };
127
128 static const char significant_operand_index [] =
129 {
130 0, /* DP_UNKNOWN, by default using operand 0. */
131 0, /* DP_VECTOR_3SAME */
132 1, /* DP_VECTOR_LONG */
133 2, /* DP_VECTOR_WIDE */
134 1, /* DP_VECTOR_ACROSS_LANES */
135 };
136
137 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
138 the data pattern.
139 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
140 corresponds to one of a sequence of operands. */
141
142 static enum data_pattern
143 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
144 {
145 if (vector_qualifier_p (qualifiers[0]))
146 {
147 /* e.g. v.4s, v.4s, v.4s
148 or v.4h, v.4h, v.h[3]. */
149 if (qualifiers[0] == qualifiers[1]
150 && vector_qualifier_p (qualifiers[2])
151 && (aarch64_get_qualifier_esize (qualifiers[0])
152 == aarch64_get_qualifier_esize (qualifiers[1]))
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[2])))
155 return DP_VECTOR_3SAME;
156 /* e.g. v.8h, v.8b, v.8b.
157 or v.4s, v.4h, v.h[2].
158 or v.8h, v.16b. */
159 if (vector_qualifier_p (qualifiers[1])
160 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
161 && (aarch64_get_qualifier_esize (qualifiers[0])
162 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
163 return DP_VECTOR_LONG;
164 /* e.g. v.8h, v.8h, v.8b. */
165 if (qualifiers[0] == qualifiers[1]
166 && vector_qualifier_p (qualifiers[2])
167 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
168 && (aarch64_get_qualifier_esize (qualifiers[0])
169 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[1])))
172 return DP_VECTOR_WIDE;
173 }
174 else if (fp_qualifier_p (qualifiers[0]))
175 {
176 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
177 if (vector_qualifier_p (qualifiers[1])
178 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
179 return DP_VECTOR_ACROSS_LANES;
180 }
181
182 return DP_UNKNOWN;
183 }
184
185 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
186 the AdvSIMD instructions. */
187 /* N.B. it is possible to do some optimization that doesn't call
188 get_data_pattern each time when we need to select an operand. We can
189 either buffer the caculated the result or statically generate the data,
190 however, it is not obvious that the optimization will bring significant
191 benefit. */
192
193 int
194 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
195 {
196 return
197 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
198 }
199 \f
200 /* Instruction bit-fields.
201 + Keep synced with 'enum aarch64_field_kind'. */
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6_10: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_15: in rmif instructions. */
244 { 11, 4 }, /* imm4_11: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_0: in rmif instructions. */
246 { 10, 4 }, /* imm4_10: in adddg/subg instructions. */
247 { 5, 4 }, /* imm4_5: in SME instructions. */
248 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
249 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
250 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
251 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
252 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
253 { 5, 14 }, /* imm14: in test bit and branch instructions. */
254 { 5, 16 }, /* imm16_5: in exception instructions. */
255 { 0, 16 }, /* imm16_0: in udf instruction. */
256 { 0, 26 }, /* imm26: in unconditional branch instructions. */
257 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
258 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
259 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
260 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
261 { 22, 1 }, /* S_imm10: in LDRAA and LDRAB instructions. */
262 { 22, 1 }, /* N: in logical (immediate) instructions. */
263 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
264 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
265 { 31, 1 }, /* sf: in integer data processing instructions. */
266 { 30, 1 }, /* lse_sz: in LSE extension atomic instructions. */
267 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
268 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
269 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
270 { 31, 1 }, /* b5: in the test bit and branch instructions. */
271 { 19, 5 }, /* b40: in the test bit and branch instructions. */
272 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
273 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
274 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
275 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
276 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
277 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
278 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
279 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
280 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
281 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
282 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
283 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
284 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
285 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
286 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
287 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
288 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
289 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
290 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
291 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
292 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
293 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
294 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
295 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
296 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
297 { 5, 1 }, /* SVE_i1: single-bit immediate. */
298 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
299 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
300 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
301 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
302 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
303 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
304 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
305 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
306 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
307 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
308 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
309 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
310 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
311 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
312 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
313 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
314 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
315 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
316 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
317 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
318 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
319 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
320 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
321 { 16, 4 }, /* SVE_tsz: triangular size select. */
322 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
323 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
324 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
325 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
326 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
327 { 0, 2 }, /* SME_ZAda_2b: tile ZA0-ZA3. */
328 { 0, 3 }, /* SME_ZAda_3b: tile ZA0-ZA7. */
329 { 22, 2 }, /* SME_size_22: size<1>, size<0> class field, [23:22]. */
330 { 16, 1 }, /* SME_Q: Q class bit, bit 16. */
331 { 15, 1 }, /* SME_V: (horizontal / vertical tiles), bit 15. */
332 { 13, 2 }, /* SME_Rv: vector select register W12-W15, bits [14:13]. */
333 { 13, 3 }, /* SME_Pm: second source scalable predicate register P0-P7. */
334 { 0, 8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0]. */
335 { 16, 2 }, /* SME_Rm: index base register W12-W15 [17:16]. */
336 { 23, 1 }, /* SME_i1: immediate field, bit 23. */
337 { 22, 1 }, /* SME_tszh: immediate and qualifier field, bit 22. */
338 { 18, 3 }, /* SME_tszl: immediate and qualifier field, bits [20:18]. */
339 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
340 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
341 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
342 { 12, 2 }, /* SM3_imm2: Indexed element SM3 2 bits index immediate. */
343 { 22, 1 }, /* sz: 1-bit element size select. */
344 { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
345 { 10, 8 }, /* CSSC_imm8. */
346 };
347
348 enum aarch64_operand_class
349 aarch64_get_operand_class (enum aarch64_opnd type)
350 {
351 return aarch64_operands[type].op_class;
352 }
353
354 const char *
355 aarch64_get_operand_name (enum aarch64_opnd type)
356 {
357 return aarch64_operands[type].name;
358 }
359
360 /* Get operand description string.
361 This is usually for the diagnosis purpose. */
362 const char *
363 aarch64_get_operand_desc (enum aarch64_opnd type)
364 {
365 return aarch64_operands[type].desc;
366 }
367
368 /* Table of all conditional affixes. */
369 const aarch64_cond aarch64_conds[16] =
370 {
371 {{"eq", "none"}, 0x0},
372 {{"ne", "any"}, 0x1},
373 {{"cs", "hs", "nlast"}, 0x2},
374 {{"cc", "lo", "ul", "last"}, 0x3},
375 {{"mi", "first"}, 0x4},
376 {{"pl", "nfrst"}, 0x5},
377 {{"vs"}, 0x6},
378 {{"vc"}, 0x7},
379 {{"hi", "pmore"}, 0x8},
380 {{"ls", "plast"}, 0x9},
381 {{"ge", "tcont"}, 0xa},
382 {{"lt", "tstop"}, 0xb},
383 {{"gt"}, 0xc},
384 {{"le"}, 0xd},
385 {{"al"}, 0xe},
386 {{"nv"}, 0xf},
387 };
388
389 const aarch64_cond *
390 get_cond_from_value (aarch64_insn value)
391 {
392 assert (value < 16);
393 return &aarch64_conds[(unsigned int) value];
394 }
395
396 const aarch64_cond *
397 get_inverted_cond (const aarch64_cond *cond)
398 {
399 return &aarch64_conds[cond->value ^ 0x1];
400 }
401
402 /* Table describing the operand extension/shifting operators; indexed by
403 enum aarch64_modifier_kind.
404
405 The value column provides the most common values for encoding modifiers,
406 which enables table-driven encoding/decoding for the modifiers. */
407 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
408 {
409 {"none", 0x0},
410 {"msl", 0x0},
411 {"ror", 0x3},
412 {"asr", 0x2},
413 {"lsr", 0x1},
414 {"lsl", 0x0},
415 {"uxtb", 0x0},
416 {"uxth", 0x1},
417 {"uxtw", 0x2},
418 {"uxtx", 0x3},
419 {"sxtb", 0x4},
420 {"sxth", 0x5},
421 {"sxtw", 0x6},
422 {"sxtx", 0x7},
423 {"mul", 0x0},
424 {"mul vl", 0x0},
425 {NULL, 0},
426 };
427
428 enum aarch64_modifier_kind
429 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
430 {
431 return desc - aarch64_operand_modifiers;
432 }
433
434 aarch64_insn
435 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
436 {
437 return aarch64_operand_modifiers[kind].value;
438 }
439
440 enum aarch64_modifier_kind
441 aarch64_get_operand_modifier_from_value (aarch64_insn value,
442 bool extend_p)
443 {
444 if (extend_p)
445 return AARCH64_MOD_UXTB + value;
446 else
447 return AARCH64_MOD_LSL - value;
448 }
449
450 bool
451 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
452 {
453 return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
454 }
455
456 static inline bool
457 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
458 {
459 return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
460 }
461
462 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
463 {
464 { "#0x00", 0x0 },
465 { "oshld", 0x1 },
466 { "oshst", 0x2 },
467 { "osh", 0x3 },
468 { "#0x04", 0x4 },
469 { "nshld", 0x5 },
470 { "nshst", 0x6 },
471 { "nsh", 0x7 },
472 { "#0x08", 0x8 },
473 { "ishld", 0x9 },
474 { "ishst", 0xa },
475 { "ish", 0xb },
476 { "#0x0c", 0xc },
477 { "ld", 0xd },
478 { "st", 0xe },
479 { "sy", 0xf },
480 };
481
482 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] =
483 { /* CRm<3:2> #imm */
484 { "oshnxs", 16 }, /* 00 16 */
485 { "nshnxs", 20 }, /* 01 20 */
486 { "ishnxs", 24 }, /* 10 24 */
487 { "synxs", 28 }, /* 11 28 */
488 };
489
490 /* Table describing the operands supported by the aliases of the HINT
491 instruction.
492
493 The name column is the operand that is accepted for the alias. The value
494 column is the hint number of the alias. The list of operands is terminated
495 by NULL in the name column. */
496
497 const struct aarch64_name_value_pair aarch64_hint_options[] =
498 {
499 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
500 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
501 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
502 { "c", HINT_OPD_C }, /* BTI C. */
503 { "j", HINT_OPD_J }, /* BTI J. */
504 { "jc", HINT_OPD_JC }, /* BTI JC. */
505 { NULL, HINT_OPD_NULL },
506 };
507
508 /* op -> op: load = 0 instruction = 1 store = 2
509 l -> level: 1-3
510 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
511 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
512 const struct aarch64_name_value_pair aarch64_prfops[32] =
513 {
514 { "pldl1keep", B(0, 1, 0) },
515 { "pldl1strm", B(0, 1, 1) },
516 { "pldl2keep", B(0, 2, 0) },
517 { "pldl2strm", B(0, 2, 1) },
518 { "pldl3keep", B(0, 3, 0) },
519 { "pldl3strm", B(0, 3, 1) },
520 { NULL, 0x06 },
521 { NULL, 0x07 },
522 { "plil1keep", B(1, 1, 0) },
523 { "plil1strm", B(1, 1, 1) },
524 { "plil2keep", B(1, 2, 0) },
525 { "plil2strm", B(1, 2, 1) },
526 { "plil3keep", B(1, 3, 0) },
527 { "plil3strm", B(1, 3, 1) },
528 { NULL, 0x0e },
529 { NULL, 0x0f },
530 { "pstl1keep", B(2, 1, 0) },
531 { "pstl1strm", B(2, 1, 1) },
532 { "pstl2keep", B(2, 2, 0) },
533 { "pstl2strm", B(2, 2, 1) },
534 { "pstl3keep", B(2, 3, 0) },
535 { "pstl3strm", B(2, 3, 1) },
536 { NULL, 0x16 },
537 { NULL, 0x17 },
538 { NULL, 0x18 },
539 { NULL, 0x19 },
540 { NULL, 0x1a },
541 { NULL, 0x1b },
542 { NULL, 0x1c },
543 { NULL, 0x1d },
544 { NULL, 0x1e },
545 { NULL, 0x1f },
546 };
547 #undef B
548 \f
549 /* Utilities on value constraint. */
550
551 static inline int
552 value_in_range_p (int64_t value, int low, int high)
553 {
554 return (value >= low && value <= high) ? 1 : 0;
555 }
556
557 /* Return true if VALUE is a multiple of ALIGN. */
558 static inline int
559 value_aligned_p (int64_t value, int align)
560 {
561 return (value % align) == 0;
562 }
563
564 /* A signed value fits in a field. */
565 static inline int
566 value_fit_signed_field_p (int64_t value, unsigned width)
567 {
568 assert (width < 32);
569 if (width < sizeof (value) * 8)
570 {
571 int64_t lim = (uint64_t) 1 << (width - 1);
572 if (value >= -lim && value < lim)
573 return 1;
574 }
575 return 0;
576 }
577
578 /* An unsigned value fits in a field. */
579 static inline int
580 value_fit_unsigned_field_p (int64_t value, unsigned width)
581 {
582 assert (width < 32);
583 if (width < sizeof (value) * 8)
584 {
585 int64_t lim = (uint64_t) 1 << width;
586 if (value >= 0 && value < lim)
587 return 1;
588 }
589 return 0;
590 }
591
592 /* Return 1 if OPERAND is SP or WSP. */
593 int
594 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
595 {
596 return ((aarch64_get_operand_class (operand->type)
597 == AARCH64_OPND_CLASS_INT_REG)
598 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
599 && operand->reg.regno == 31);
600 }
601
602 /* Return 1 if OPERAND is XZR or WZP. */
603 int
604 aarch64_zero_register_p (const aarch64_opnd_info *operand)
605 {
606 return ((aarch64_get_operand_class (operand->type)
607 == AARCH64_OPND_CLASS_INT_REG)
608 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
609 && operand->reg.regno == 31);
610 }
611
612 /* Return true if the operand *OPERAND that has the operand code
613 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
614 qualified by the qualifier TARGET. */
615
616 static inline int
617 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
618 aarch64_opnd_qualifier_t target)
619 {
620 switch (operand->qualifier)
621 {
622 case AARCH64_OPND_QLF_W:
623 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
624 return 1;
625 break;
626 case AARCH64_OPND_QLF_X:
627 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
628 return 1;
629 break;
630 case AARCH64_OPND_QLF_WSP:
631 if (target == AARCH64_OPND_QLF_W
632 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
633 return 1;
634 break;
635 case AARCH64_OPND_QLF_SP:
636 if (target == AARCH64_OPND_QLF_X
637 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
638 return 1;
639 break;
640 default:
641 break;
642 }
643
644 return 0;
645 }
646
647 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
648 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
649
650 Return NIL if more than one expected qualifiers are found. */
651
652 aarch64_opnd_qualifier_t
653 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
654 int idx,
655 const aarch64_opnd_qualifier_t known_qlf,
656 int known_idx)
657 {
658 int i, saved_i;
659
660 /* Special case.
661
662 When the known qualifier is NIL, we have to assume that there is only
663 one qualifier sequence in the *QSEQ_LIST and return the corresponding
664 qualifier directly. One scenario is that for instruction
665 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
666 which has only one possible valid qualifier sequence
667 NIL, S_D
668 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
669 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
670
671 Because the qualifier NIL has dual roles in the qualifier sequence:
672 it can mean no qualifier for the operand, or the qualifer sequence is
673 not in use (when all qualifiers in the sequence are NILs), we have to
674 handle this special case here. */
675 if (known_qlf == AARCH64_OPND_NIL)
676 {
677 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
678 return qseq_list[0][idx];
679 }
680
681 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
682 {
683 if (qseq_list[i][known_idx] == known_qlf)
684 {
685 if (saved_i != -1)
686 /* More than one sequences are found to have KNOWN_QLF at
687 KNOWN_IDX. */
688 return AARCH64_OPND_NIL;
689 saved_i = i;
690 }
691 }
692
693 return qseq_list[saved_i][idx];
694 }
695
696 enum operand_qualifier_kind
697 {
698 OQK_NIL,
699 OQK_OPD_VARIANT,
700 OQK_VALUE_IN_RANGE,
701 OQK_MISC,
702 };
703
704 /* Operand qualifier description. */
705 struct operand_qualifier_data
706 {
707 /* The usage of the three data fields depends on the qualifier kind. */
708 int data0;
709 int data1;
710 int data2;
711 /* Description. */
712 const char *desc;
713 /* Kind. */
714 enum operand_qualifier_kind kind;
715 };
716
717 /* Indexed by the operand qualifier enumerators. */
718 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
719 {
720 {0, 0, 0, "NIL", OQK_NIL},
721
722 /* Operand variant qualifiers.
723 First 3 fields:
724 element size, number of elements and common value for encoding. */
725
726 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
727 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
728 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
729 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
730
731 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
732 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
733 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
734 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
735 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
736 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
737 {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
738
739 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
740 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
741 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
742 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
743 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
744 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
745 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
746 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
747 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
748 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
749 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
750
751 {0, 0, 0, "z", OQK_OPD_VARIANT},
752 {0, 0, 0, "m", OQK_OPD_VARIANT},
753
754 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
755 {16, 0, 0, "tag", OQK_OPD_VARIANT},
756
757 /* Qualifiers constraining the value range.
758 First 3 fields:
759 Lower bound, higher bound, unused. */
760
761 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
762 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
763 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
764 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
765 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
766 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
767 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
768
769 /* Qualifiers for miscellaneous purpose.
770 First 3 fields:
771 unused, unused and unused. */
772
773 {0, 0, 0, "lsl", 0},
774 {0, 0, 0, "msl", 0},
775
776 {0, 0, 0, "retrieving", 0},
777 };
778
779 static inline bool
780 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
781 {
782 return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
783 }
784
785 static inline bool
786 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
787 {
788 return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
789 }
790
791 const char*
792 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
793 {
794 return aarch64_opnd_qualifiers[qualifier].desc;
795 }
796
797 /* Given an operand qualifier, return the expected data element size
798 of a qualified operand. */
799 unsigned char
800 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
801 {
802 assert (operand_variant_qualifier_p (qualifier));
803 return aarch64_opnd_qualifiers[qualifier].data0;
804 }
805
806 unsigned char
807 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
808 {
809 assert (operand_variant_qualifier_p (qualifier));
810 return aarch64_opnd_qualifiers[qualifier].data1;
811 }
812
813 aarch64_insn
814 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
815 {
816 assert (operand_variant_qualifier_p (qualifier));
817 return aarch64_opnd_qualifiers[qualifier].data2;
818 }
819
820 static int
821 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
822 {
823 assert (qualifier_value_in_range_constraint_p (qualifier));
824 return aarch64_opnd_qualifiers[qualifier].data0;
825 }
826
827 static int
828 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
829 {
830 assert (qualifier_value_in_range_constraint_p (qualifier));
831 return aarch64_opnd_qualifiers[qualifier].data1;
832 }
833
834 #ifdef DEBUG_AARCH64
835 void
836 aarch64_verbose (const char *str, ...)
837 {
838 va_list ap;
839 va_start (ap, str);
840 printf ("#### ");
841 vprintf (str, ap);
842 printf ("\n");
843 va_end (ap);
844 }
845
846 static inline void
847 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
848 {
849 int i;
850 printf ("#### \t");
851 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
852 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
853 printf ("\n");
854 }
855
856 static void
857 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
858 const aarch64_opnd_qualifier_t *qualifier)
859 {
860 int i;
861 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
862
863 aarch64_verbose ("dump_match_qualifiers:");
864 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
865 curr[i] = opnd[i].qualifier;
866 dump_qualifier_sequence (curr);
867 aarch64_verbose ("against");
868 dump_qualifier_sequence (qualifier);
869 }
870 #endif /* DEBUG_AARCH64 */
871
872 /* This function checks if the given instruction INSN is a destructive
873 instruction based on the usage of the registers. It does not recognize
874 unary destructive instructions. */
875 bool
876 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
877 {
878 int i = 0;
879 const enum aarch64_opnd *opnds = opcode->operands;
880
881 if (opnds[0] == AARCH64_OPND_NIL)
882 return false;
883
884 while (opnds[++i] != AARCH64_OPND_NIL)
885 if (opnds[i] == opnds[0])
886 return true;
887
888 return false;
889 }
890
891 /* TODO improve this, we can have an extra field at the runtime to
892 store the number of operands rather than calculating it every time. */
893
894 int
895 aarch64_num_of_operands (const aarch64_opcode *opcode)
896 {
897 int i = 0;
898 const enum aarch64_opnd *opnds = opcode->operands;
899 while (opnds[i++] != AARCH64_OPND_NIL)
900 ;
901 --i;
902 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
903 return i;
904 }
905
906 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
907 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
908
909 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
910 This is always 0 if the function succeeds.
911
912 N.B. on the entry, it is very likely that only some operands in *INST
913 have had their qualifiers been established.
914
915 If STOP_AT is not -1, the function will only try to match
916 the qualifier sequence for operands before and including the operand
917 of index STOP_AT; and on success *RET will only be filled with the first
918 (STOP_AT+1) qualifiers.
919
920 A couple examples of the matching algorithm:
921
922 X,W,NIL should match
923 X,W,NIL
924
925 NIL,NIL should match
926 X ,NIL
927
928 Apart from serving the main encoding routine, this can also be called
929 during or after the operand decoding. */
930
931 int
932 aarch64_find_best_match (const aarch64_inst *inst,
933 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
934 int stop_at, aarch64_opnd_qualifier_t *ret,
935 int *invalid_count)
936 {
937 int i, num_opnds, invalid, min_invalid;
938 const aarch64_opnd_qualifier_t *qualifiers;
939
940 num_opnds = aarch64_num_of_operands (inst->opcode);
941 if (num_opnds == 0)
942 {
943 DEBUG_TRACE ("SUCCEED: no operand");
944 *invalid_count = 0;
945 return 1;
946 }
947
948 if (stop_at < 0 || stop_at >= num_opnds)
949 stop_at = num_opnds - 1;
950
951 /* For each pattern. */
952 min_invalid = num_opnds;
953 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
954 {
955 int j;
956 qualifiers = *qualifiers_list;
957
958 /* Start as positive. */
959 invalid = 0;
960
961 DEBUG_TRACE ("%d", i);
962 #ifdef DEBUG_AARCH64
963 if (debug_dump)
964 dump_match_qualifiers (inst->operands, qualifiers);
965 #endif
966
967 /* The first entry should be taken literally, even if it's an empty
968 qualifier sequence. (This matters for strict testing.) In other
969 positions an empty sequence acts as a terminator. */
970 if (i > 0 && empty_qualifier_sequence_p (qualifiers))
971 break;
972
973 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
974 {
975 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL
976 && !(inst->opcode->flags & F_STRICT))
977 {
978 /* Either the operand does not have qualifier, or the qualifier
979 for the operand needs to be deduced from the qualifier
980 sequence.
981 In the latter case, any constraint checking related with
982 the obtained qualifier should be done later in
983 operand_general_constraint_met_p. */
984 continue;
985 }
986 else if (*qualifiers != inst->operands[j].qualifier)
987 {
988 /* Unless the target qualifier can also qualify the operand
989 (which has already had a non-nil qualifier), non-equal
990 qualifiers are generally un-matched. */
991 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
992 continue;
993 else
994 invalid += 1;
995 }
996 else
997 continue; /* Equal qualifiers are certainly matched. */
998 }
999
1000 if (min_invalid > invalid)
1001 min_invalid = invalid;
1002
1003 /* Qualifiers established. */
1004 if (min_invalid == 0)
1005 break;
1006 }
1007
1008 *invalid_count = min_invalid;
1009 if (min_invalid == 0)
1010 {
1011 /* Fill the result in *RET. */
1012 int j;
1013 qualifiers = *qualifiers_list;
1014
1015 DEBUG_TRACE ("complete qualifiers using list %d", i);
1016 #ifdef DEBUG_AARCH64
1017 if (debug_dump)
1018 dump_qualifier_sequence (qualifiers);
1019 #endif
1020
1021 for (j = 0; j <= stop_at; ++j, ++qualifiers)
1022 ret[j] = *qualifiers;
1023 for (; j < AARCH64_MAX_OPND_NUM; ++j)
1024 ret[j] = AARCH64_OPND_QLF_NIL;
1025
1026 DEBUG_TRACE ("SUCCESS");
1027 return 1;
1028 }
1029
1030 DEBUG_TRACE ("FAIL");
1031 return 0;
1032 }
1033
1034 /* Operand qualifier matching and resolving.
1035
1036 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1037 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1038
1039 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
1040 This is always 0 if the function succeeds.
1041
1042 if UPDATE_P, update the qualifier(s) in *INST after the matching
1043 succeeds. */
1044
1045 static int
1046 match_operands_qualifier (aarch64_inst *inst, bool update_p,
1047 int *invalid_count)
1048 {
1049 int i;
1050 aarch64_opnd_qualifier_seq_t qualifiers;
1051
1052 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1053 qualifiers, invalid_count))
1054 {
1055 DEBUG_TRACE ("matching FAIL");
1056 return 0;
1057 }
1058
1059 /* Update the qualifiers. */
1060 if (update_p)
1061 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1062 {
1063 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1064 break;
1065 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1066 "update %s with %s for operand %d",
1067 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1068 aarch64_get_qualifier_name (qualifiers[i]), i);
1069 inst->operands[i].qualifier = qualifiers[i];
1070 }
1071
1072 DEBUG_TRACE ("matching SUCCESS");
1073 return 1;
1074 }
1075
1076 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1077 register by MOVZ.
1078
1079 IS32 indicates whether value is a 32-bit immediate or not.
1080 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1081 amount will be returned in *SHIFT_AMOUNT. */
1082
1083 bool
1084 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1085 {
1086 int amount;
1087
1088 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1089
1090 if (is32)
1091 {
1092 /* Allow all zeros or all ones in top 32-bits, so that
1093 32-bit constant expressions like ~0x80000000 are
1094 permitted. */
1095 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1096 /* Immediate out of range. */
1097 return false;
1098 value &= 0xffffffff;
1099 }
1100
1101 /* first, try movz then movn */
1102 amount = -1;
1103 if ((value & ((uint64_t) 0xffff << 0)) == value)
1104 amount = 0;
1105 else if ((value & ((uint64_t) 0xffff << 16)) == value)
1106 amount = 16;
1107 else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1108 amount = 32;
1109 else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1110 amount = 48;
1111
1112 if (amount == -1)
1113 {
1114 DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1115 return false;
1116 }
1117
1118 if (shift_amount != NULL)
1119 *shift_amount = amount;
1120
1121 DEBUG_TRACE ("exit true with amount %d", amount);
1122
1123 return true;
1124 }
1125
1126 /* Build the accepted values for immediate logical SIMD instructions.
1127
1128 The standard encodings of the immediate value are:
1129 N imms immr SIMD size R S
1130 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1131 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1132 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1133 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1134 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1135 0 11110s 00000r 2 UInt(r) UInt(s)
1136 where all-ones value of S is reserved.
1137
1138 Let's call E the SIMD size.
1139
1140 The immediate value is: S+1 bits '1' rotated to the right by R.
1141
1142 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1143 (remember S != E - 1). */
1144
1145 #define TOTAL_IMM_NB 5334
1146
1147 typedef struct
1148 {
1149 uint64_t imm;
1150 aarch64_insn encoding;
1151 } simd_imm_encoding;
1152
1153 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1154
1155 static int
1156 simd_imm_encoding_cmp(const void *i1, const void *i2)
1157 {
1158 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1159 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1160
1161 if (imm1->imm < imm2->imm)
1162 return -1;
1163 if (imm1->imm > imm2->imm)
1164 return +1;
1165 return 0;
1166 }
1167
1168 /* immediate bitfield standard encoding
1169 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1170 1 ssssss rrrrrr 64 rrrrrr ssssss
1171 0 0sssss 0rrrrr 32 rrrrr sssss
1172 0 10ssss 00rrrr 16 rrrr ssss
1173 0 110sss 000rrr 8 rrr sss
1174 0 1110ss 0000rr 4 rr ss
1175 0 11110s 00000r 2 r s */
1176 static inline int
1177 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1178 {
1179 return (is64 << 12) | (r << 6) | s;
1180 }
1181
1182 static void
1183 build_immediate_table (void)
1184 {
1185 uint32_t log_e, e, s, r, s_mask;
1186 uint64_t mask, imm;
1187 int nb_imms;
1188 int is64;
1189
1190 nb_imms = 0;
1191 for (log_e = 1; log_e <= 6; log_e++)
1192 {
1193 /* Get element size. */
1194 e = 1u << log_e;
1195 if (log_e == 6)
1196 {
1197 is64 = 1;
1198 mask = 0xffffffffffffffffull;
1199 s_mask = 0;
1200 }
1201 else
1202 {
1203 is64 = 0;
1204 mask = (1ull << e) - 1;
1205 /* log_e s_mask
1206 1 ((1 << 4) - 1) << 2 = 111100
1207 2 ((1 << 3) - 1) << 3 = 111000
1208 3 ((1 << 2) - 1) << 4 = 110000
1209 4 ((1 << 1) - 1) << 5 = 100000
1210 5 ((1 << 0) - 1) << 6 = 000000 */
1211 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1212 }
1213 for (s = 0; s < e - 1; s++)
1214 for (r = 0; r < e; r++)
1215 {
1216 /* s+1 consecutive bits to 1 (s < 63) */
1217 imm = (1ull << (s + 1)) - 1;
1218 /* rotate right by r */
1219 if (r != 0)
1220 imm = (imm >> r) | ((imm << (e - r)) & mask);
1221 /* replicate the constant depending on SIMD size */
1222 switch (log_e)
1223 {
1224 case 1: imm = (imm << 2) | imm;
1225 /* Fall through. */
1226 case 2: imm = (imm << 4) | imm;
1227 /* Fall through. */
1228 case 3: imm = (imm << 8) | imm;
1229 /* Fall through. */
1230 case 4: imm = (imm << 16) | imm;
1231 /* Fall through. */
1232 case 5: imm = (imm << 32) | imm;
1233 /* Fall through. */
1234 case 6: break;
1235 default: abort ();
1236 }
1237 simd_immediates[nb_imms].imm = imm;
1238 simd_immediates[nb_imms].encoding =
1239 encode_immediate_bitfield(is64, s | s_mask, r);
1240 nb_imms++;
1241 }
1242 }
1243 assert (nb_imms == TOTAL_IMM_NB);
1244 qsort(simd_immediates, nb_imms,
1245 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1246 }
1247
1248 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1249 be accepted by logical (immediate) instructions
1250 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1251
1252 ESIZE is the number of bytes in the decoded immediate value.
1253 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1254 VALUE will be returned in *ENCODING. */
1255
1256 bool
1257 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1258 {
1259 simd_imm_encoding imm_enc;
1260 const simd_imm_encoding *imm_encoding;
1261 static bool initialized = false;
1262 uint64_t upper;
1263 int i;
1264
1265 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1266 value, esize);
1267
1268 if (!initialized)
1269 {
1270 build_immediate_table ();
1271 initialized = true;
1272 }
1273
1274 /* Allow all zeros or all ones in top bits, so that
1275 constant expressions like ~1 are permitted. */
1276 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1277 if ((value & ~upper) != value && (value | upper) != value)
1278 return false;
1279
1280 /* Replicate to a full 64-bit value. */
1281 value &= ~upper;
1282 for (i = esize * 8; i < 64; i *= 2)
1283 value |= (value << i);
1284
1285 imm_enc.imm = value;
1286 imm_encoding = (const simd_imm_encoding *)
1287 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1288 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1289 if (imm_encoding == NULL)
1290 {
1291 DEBUG_TRACE ("exit with false");
1292 return false;
1293 }
1294 if (encoding != NULL)
1295 *encoding = imm_encoding->encoding;
1296 DEBUG_TRACE ("exit with true");
1297 return true;
1298 }
1299
1300 /* If 64-bit immediate IMM is in the format of
1301 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1302 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1303 of value "abcdefgh". Otherwise return -1. */
1304 int
1305 aarch64_shrink_expanded_imm8 (uint64_t imm)
1306 {
1307 int i, ret;
1308 uint32_t byte;
1309
1310 ret = 0;
1311 for (i = 0; i < 8; i++)
1312 {
1313 byte = (imm >> (8 * i)) & 0xff;
1314 if (byte == 0xff)
1315 ret |= 1 << i;
1316 else if (byte != 0x00)
1317 return -1;
1318 }
1319 return ret;
1320 }
1321
1322 /* Utility inline functions for operand_general_constraint_met_p. */
1323
1324 static inline void
1325 set_error (aarch64_operand_error *mismatch_detail,
1326 enum aarch64_operand_error_kind kind, int idx,
1327 const char* error)
1328 {
1329 if (mismatch_detail == NULL)
1330 return;
1331 mismatch_detail->kind = kind;
1332 mismatch_detail->index = idx;
1333 mismatch_detail->error = error;
1334 }
1335
1336 static inline void
1337 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1338 const char* error)
1339 {
1340 if (mismatch_detail == NULL)
1341 return;
1342 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1343 }
1344
1345 static inline void
1346 set_invalid_regno_error (aarch64_operand_error *mismatch_detail, int idx,
1347 const char *prefix, int lower_bound, int upper_bound)
1348 {
1349 if (mismatch_detail == NULL)
1350 return;
1351 set_error (mismatch_detail, AARCH64_OPDE_INVALID_REGNO, idx, NULL);
1352 mismatch_detail->data[0].s = prefix;
1353 mismatch_detail->data[1].i = lower_bound;
1354 mismatch_detail->data[2].i = upper_bound;
1355 }
1356
1357 static inline void
1358 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1359 int idx, int lower_bound, int upper_bound,
1360 const char* error)
1361 {
1362 if (mismatch_detail == NULL)
1363 return;
1364 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1365 mismatch_detail->data[0].i = lower_bound;
1366 mismatch_detail->data[1].i = upper_bound;
1367 }
1368
1369 static inline void
1370 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1371 int idx, int lower_bound, int upper_bound)
1372 {
1373 if (mismatch_detail == NULL)
1374 return;
1375 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1376 _("immediate value"));
1377 }
1378
1379 static inline void
1380 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1381 int idx, int lower_bound, int upper_bound)
1382 {
1383 if (mismatch_detail == NULL)
1384 return;
1385 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1386 _("immediate offset"));
1387 }
1388
1389 static inline void
1390 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1391 int idx, int lower_bound, int upper_bound)
1392 {
1393 if (mismatch_detail == NULL)
1394 return;
1395 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1396 _("register number"));
1397 }
1398
1399 static inline void
1400 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1401 int idx, int lower_bound, int upper_bound)
1402 {
1403 if (mismatch_detail == NULL)
1404 return;
1405 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1406 _("register element index"));
1407 }
1408
1409 static inline void
1410 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1411 int idx, int lower_bound, int upper_bound)
1412 {
1413 if (mismatch_detail == NULL)
1414 return;
1415 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1416 _("shift amount"));
1417 }
1418
1419 /* Report that the MUL modifier in operand IDX should be in the range
1420 [LOWER_BOUND, UPPER_BOUND]. */
1421 static inline void
1422 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1423 int idx, int lower_bound, int upper_bound)
1424 {
1425 if (mismatch_detail == NULL)
1426 return;
1427 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1428 _("multiplier"));
1429 }
1430
1431 static inline void
1432 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1433 int alignment)
1434 {
1435 if (mismatch_detail == NULL)
1436 return;
1437 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1438 mismatch_detail->data[0].i = alignment;
1439 }
1440
1441 static inline void
1442 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1443 int expected_num)
1444 {
1445 if (mismatch_detail == NULL)
1446 return;
1447 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1448 mismatch_detail->data[0].i = 1 << expected_num;
1449 }
1450
1451 static inline void
1452 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1453 const char* error)
1454 {
1455 if (mismatch_detail == NULL)
1456 return;
1457 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1458 }
1459
1460 /* Check that indexed register operand OPND has a register in the range
1461 [MIN_REGNO, MAX_REGNO] and an index in the range [MIN_INDEX, MAX_INDEX].
1462 PREFIX is the register prefix, such as "z" for SVE vector registers. */
1463
1464 static bool
1465 check_reglane (const aarch64_opnd_info *opnd,
1466 aarch64_operand_error *mismatch_detail, int idx,
1467 const char *prefix, int min_regno, int max_regno,
1468 int min_index, int max_index)
1469 {
1470 if (!value_in_range_p (opnd->reglane.regno, min_regno, max_regno))
1471 {
1472 set_invalid_regno_error (mismatch_detail, idx, prefix, min_regno,
1473 max_regno);
1474 return false;
1475 }
1476 if (!value_in_range_p (opnd->reglane.index, min_index, max_index))
1477 {
1478 set_elem_idx_out_of_range_error (mismatch_detail, idx, min_index,
1479 max_index);
1480 return false;
1481 }
1482 return true;
1483 }
1484
1485 /* Check that indexed ZA operand OPND has:
1486
1487 - a selection register in the range [MIN_WREG, MIN_WREG + 3]
1488
1489 - an immediate offset in the range [0, MAX_VALUE]. */
1490
1491 static bool
1492 check_za_access (const aarch64_opnd_info *opnd,
1493 aarch64_operand_error *mismatch_detail, int idx,
1494 int min_wreg, int max_value)
1495 {
1496 if (!value_in_range_p (opnd->indexed_za.index.regno, min_wreg, min_wreg + 3))
1497 {
1498 if (min_wreg == 12)
1499 set_other_error (mismatch_detail, idx,
1500 _("expected a selection register in the"
1501 " range w12-w15"));
1502 else
1503 abort ();
1504 return false;
1505 }
1506
1507 if (!value_in_range_p (opnd->indexed_za.index.imm, 0, max_value))
1508 {
1509 set_offset_out_of_range_error (mismatch_detail, idx, 0, max_value);
1510 return false;
1511 }
1512 return true;
1513 }
1514
1515 /* General constraint checking based on operand code.
1516
1517 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1518 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1519
1520 This function has to be called after the qualifiers for all operands
1521 have been resolved.
1522
1523 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1524 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1525 of error message during the disassembling where error message is not
1526 wanted. We avoid the dynamic construction of strings of error messages
1527 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1528 use a combination of error code, static string and some integer data to
1529 represent an error. */
1530
1531 static int
1532 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1533 enum aarch64_opnd type,
1534 const aarch64_opcode *opcode,
1535 aarch64_operand_error *mismatch_detail)
1536 {
1537 unsigned num, modifiers, shift;
1538 unsigned char size;
1539 int64_t imm, min_value, max_value;
1540 uint64_t uvalue, mask;
1541 const aarch64_opnd_info *opnd = opnds + idx;
1542 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1543 int i;
1544
1545 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1546
1547 switch (aarch64_operands[type].op_class)
1548 {
1549 case AARCH64_OPND_CLASS_INT_REG:
1550 /* Check pair reg constraints for cas* instructions. */
1551 if (type == AARCH64_OPND_PAIRREG)
1552 {
1553 assert (idx == 1 || idx == 3);
1554 if (opnds[idx - 1].reg.regno % 2 != 0)
1555 {
1556 set_syntax_error (mismatch_detail, idx - 1,
1557 _("reg pair must start from even reg"));
1558 return 0;
1559 }
1560 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1561 {
1562 set_syntax_error (mismatch_detail, idx,
1563 _("reg pair must be contiguous"));
1564 return 0;
1565 }
1566 break;
1567 }
1568
1569 /* <Xt> may be optional in some IC and TLBI instructions. */
1570 if (type == AARCH64_OPND_Rt_SYS)
1571 {
1572 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1573 == AARCH64_OPND_CLASS_SYSTEM));
1574 if (opnds[1].present
1575 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1576 {
1577 set_other_error (mismatch_detail, idx, _("extraneous register"));
1578 return 0;
1579 }
1580 if (!opnds[1].present
1581 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1582 {
1583 set_other_error (mismatch_detail, idx, _("missing register"));
1584 return 0;
1585 }
1586 }
1587 switch (qualifier)
1588 {
1589 case AARCH64_OPND_QLF_WSP:
1590 case AARCH64_OPND_QLF_SP:
1591 if (!aarch64_stack_pointer_p (opnd))
1592 {
1593 set_other_error (mismatch_detail, idx,
1594 _("stack pointer register expected"));
1595 return 0;
1596 }
1597 break;
1598 default:
1599 break;
1600 }
1601 break;
1602
1603 case AARCH64_OPND_CLASS_SVE_REG:
1604 switch (type)
1605 {
1606 case AARCH64_OPND_SVE_Zm3_INDEX:
1607 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1608 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1609 case AARCH64_OPND_SVE_Zm4_11_INDEX:
1610 case AARCH64_OPND_SVE_Zm4_INDEX:
1611 size = get_operand_fields_width (get_operand_from_code (type));
1612 shift = get_operand_specific_data (&aarch64_operands[type]);
1613 if (!check_reglane (opnd, mismatch_detail, idx,
1614 "z", 0, (1 << shift) - 1,
1615 0, (1u << (size - shift)) - 1))
1616 return 0;
1617 break;
1618
1619 case AARCH64_OPND_SVE_Zn_INDEX:
1620 size = aarch64_get_qualifier_esize (opnd->qualifier);
1621 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1622 0, 64 / size - 1))
1623 return 0;
1624 break;
1625
1626 case AARCH64_OPND_SME_PnT_Wm_imm:
1627 size = aarch64_get_qualifier_esize (opnd->qualifier);
1628 max_value = 16 / size - 1;
1629 if (!check_za_access (opnd, mismatch_detail, idx, 12, max_value))
1630 return 0;
1631 break;
1632
1633 default:
1634 break;
1635 }
1636 break;
1637
1638 case AARCH64_OPND_CLASS_SVE_REGLIST:
1639 num = get_opcode_dependent_value (opcode);
1640 if (opnd->reglist.num_regs != num)
1641 {
1642 set_reg_list_error (mismatch_detail, idx, num);
1643 return 0;
1644 }
1645 break;
1646
1647 case AARCH64_OPND_CLASS_ZA_ACCESS:
1648 switch (type)
1649 {
1650 case AARCH64_OPND_SME_ZA_HV_idx_src:
1651 case AARCH64_OPND_SME_ZA_HV_idx_dest:
1652 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
1653 size = aarch64_get_qualifier_esize (opnd->qualifier);
1654 max_value = 16 / size - 1;
1655 if (!check_za_access (opnd, mismatch_detail, idx, 12, max_value))
1656 return 0;
1657 break;
1658
1659 case AARCH64_OPND_SME_ZA_array:
1660 if (!check_za_access (opnd, mismatch_detail, idx, 12, 15))
1661 return 0;
1662 break;
1663
1664 default:
1665 abort ();
1666 }
1667 break;
1668
1669 case AARCH64_OPND_CLASS_PRED_REG:
1670 if (opnd->reg.regno >= 8
1671 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1672 {
1673 set_invalid_regno_error (mismatch_detail, idx, "p", 0, 7);
1674 return 0;
1675 }
1676 break;
1677
1678 case AARCH64_OPND_CLASS_COND:
1679 if (type == AARCH64_OPND_COND1
1680 && (opnds[idx].cond->value & 0xe) == 0xe)
1681 {
1682 /* Not allow AL or NV. */
1683 set_syntax_error (mismatch_detail, idx, NULL);
1684 }
1685 break;
1686
1687 case AARCH64_OPND_CLASS_ADDRESS:
1688 /* Check writeback. */
1689 switch (opcode->iclass)
1690 {
1691 case ldst_pos:
1692 case ldst_unscaled:
1693 case ldstnapair_offs:
1694 case ldstpair_off:
1695 case ldst_unpriv:
1696 if (opnd->addr.writeback == 1)
1697 {
1698 set_syntax_error (mismatch_detail, idx,
1699 _("unexpected address writeback"));
1700 return 0;
1701 }
1702 break;
1703 case ldst_imm10:
1704 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1705 {
1706 set_syntax_error (mismatch_detail, idx,
1707 _("unexpected address writeback"));
1708 return 0;
1709 }
1710 break;
1711 case ldst_imm9:
1712 case ldstpair_indexed:
1713 case asisdlsep:
1714 case asisdlsop:
1715 if (opnd->addr.writeback == 0)
1716 {
1717 set_syntax_error (mismatch_detail, idx,
1718 _("address writeback expected"));
1719 return 0;
1720 }
1721 break;
1722 default:
1723 assert (opnd->addr.writeback == 0);
1724 break;
1725 }
1726 switch (type)
1727 {
1728 case AARCH64_OPND_ADDR_SIMM7:
1729 /* Scaled signed 7 bits immediate offset. */
1730 /* Get the size of the data element that is accessed, which may be
1731 different from that of the source register size,
1732 e.g. in strb/ldrb. */
1733 size = aarch64_get_qualifier_esize (opnd->qualifier);
1734 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1735 {
1736 set_offset_out_of_range_error (mismatch_detail, idx,
1737 -64 * size, 63 * size);
1738 return 0;
1739 }
1740 if (!value_aligned_p (opnd->addr.offset.imm, size))
1741 {
1742 set_unaligned_error (mismatch_detail, idx, size);
1743 return 0;
1744 }
1745 break;
1746 case AARCH64_OPND_ADDR_OFFSET:
1747 case AARCH64_OPND_ADDR_SIMM9:
1748 /* Unscaled signed 9 bits immediate offset. */
1749 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1750 {
1751 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1752 return 0;
1753 }
1754 break;
1755
1756 case AARCH64_OPND_ADDR_SIMM9_2:
1757 /* Unscaled signed 9 bits immediate offset, which has to be negative
1758 or unaligned. */
1759 size = aarch64_get_qualifier_esize (qualifier);
1760 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1761 && !value_aligned_p (opnd->addr.offset.imm, size))
1762 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1763 return 1;
1764 set_other_error (mismatch_detail, idx,
1765 _("negative or unaligned offset expected"));
1766 return 0;
1767
1768 case AARCH64_OPND_ADDR_SIMM10:
1769 /* Scaled signed 10 bits immediate offset. */
1770 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1771 {
1772 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1773 return 0;
1774 }
1775 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1776 {
1777 set_unaligned_error (mismatch_detail, idx, 8);
1778 return 0;
1779 }
1780 break;
1781
1782 case AARCH64_OPND_ADDR_SIMM11:
1783 /* Signed 11 bits immediate offset (multiple of 16). */
1784 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1785 {
1786 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1787 return 0;
1788 }
1789
1790 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1791 {
1792 set_unaligned_error (mismatch_detail, idx, 16);
1793 return 0;
1794 }
1795 break;
1796
1797 case AARCH64_OPND_ADDR_SIMM13:
1798 /* Signed 13 bits immediate offset (multiple of 16). */
1799 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1800 {
1801 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1802 return 0;
1803 }
1804
1805 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1806 {
1807 set_unaligned_error (mismatch_detail, idx, 16);
1808 return 0;
1809 }
1810 break;
1811
1812 case AARCH64_OPND_SIMD_ADDR_POST:
1813 /* AdvSIMD load/store multiple structures, post-index. */
1814 assert (idx == 1);
1815 if (opnd->addr.offset.is_reg)
1816 {
1817 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1818 return 1;
1819 else
1820 {
1821 set_other_error (mismatch_detail, idx,
1822 _("invalid register offset"));
1823 return 0;
1824 }
1825 }
1826 else
1827 {
1828 const aarch64_opnd_info *prev = &opnds[idx-1];
1829 unsigned num_bytes; /* total number of bytes transferred. */
1830 /* The opcode dependent area stores the number of elements in
1831 each structure to be loaded/stored. */
1832 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1833 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1834 /* Special handling of loading single structure to all lane. */
1835 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1836 * aarch64_get_qualifier_esize (prev->qualifier);
1837 else
1838 num_bytes = prev->reglist.num_regs
1839 * aarch64_get_qualifier_esize (prev->qualifier)
1840 * aarch64_get_qualifier_nelem (prev->qualifier);
1841 if ((int) num_bytes != opnd->addr.offset.imm)
1842 {
1843 set_other_error (mismatch_detail, idx,
1844 _("invalid post-increment amount"));
1845 return 0;
1846 }
1847 }
1848 break;
1849
1850 case AARCH64_OPND_ADDR_REGOFF:
1851 /* Get the size of the data element that is accessed, which may be
1852 different from that of the source register size,
1853 e.g. in strb/ldrb. */
1854 size = aarch64_get_qualifier_esize (opnd->qualifier);
1855 /* It is either no shift or shift by the binary logarithm of SIZE. */
1856 if (opnd->shifter.amount != 0
1857 && opnd->shifter.amount != (int)get_logsz (size))
1858 {
1859 set_other_error (mismatch_detail, idx,
1860 _("invalid shift amount"));
1861 return 0;
1862 }
1863 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1864 operators. */
1865 switch (opnd->shifter.kind)
1866 {
1867 case AARCH64_MOD_UXTW:
1868 case AARCH64_MOD_LSL:
1869 case AARCH64_MOD_SXTW:
1870 case AARCH64_MOD_SXTX: break;
1871 default:
1872 set_other_error (mismatch_detail, idx,
1873 _("invalid extend/shift operator"));
1874 return 0;
1875 }
1876 break;
1877
1878 case AARCH64_OPND_ADDR_UIMM12:
1879 imm = opnd->addr.offset.imm;
1880 /* Get the size of the data element that is accessed, which may be
1881 different from that of the source register size,
1882 e.g. in strb/ldrb. */
1883 size = aarch64_get_qualifier_esize (qualifier);
1884 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1885 {
1886 set_offset_out_of_range_error (mismatch_detail, idx,
1887 0, 4095 * size);
1888 return 0;
1889 }
1890 if (!value_aligned_p (opnd->addr.offset.imm, size))
1891 {
1892 set_unaligned_error (mismatch_detail, idx, size);
1893 return 0;
1894 }
1895 break;
1896
1897 case AARCH64_OPND_ADDR_PCREL14:
1898 case AARCH64_OPND_ADDR_PCREL19:
1899 case AARCH64_OPND_ADDR_PCREL21:
1900 case AARCH64_OPND_ADDR_PCREL26:
1901 imm = opnd->imm.value;
1902 if (operand_need_shift_by_two (get_operand_from_code (type)))
1903 {
1904 /* The offset value in a PC-relative branch instruction is alway
1905 4-byte aligned and is encoded without the lowest 2 bits. */
1906 if (!value_aligned_p (imm, 4))
1907 {
1908 set_unaligned_error (mismatch_detail, idx, 4);
1909 return 0;
1910 }
1911 /* Right shift by 2 so that we can carry out the following check
1912 canonically. */
1913 imm >>= 2;
1914 }
1915 size = get_operand_fields_width (get_operand_from_code (type));
1916 if (!value_fit_signed_field_p (imm, size))
1917 {
1918 set_other_error (mismatch_detail, idx,
1919 _("immediate out of range"));
1920 return 0;
1921 }
1922 break;
1923
1924 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
1925 if (!value_in_range_p (opnd->addr.offset.imm, 0, 15))
1926 {
1927 set_offset_out_of_range_error (mismatch_detail, idx, 0, 15);
1928 return 0;
1929 }
1930 break;
1931
1932 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1933 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1934 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1935 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1936 min_value = -8;
1937 max_value = 7;
1938 sve_imm_offset_vl:
1939 assert (!opnd->addr.offset.is_reg);
1940 assert (opnd->addr.preind);
1941 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1942 min_value *= num;
1943 max_value *= num;
1944 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1945 || (opnd->shifter.operator_present
1946 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1947 {
1948 set_other_error (mismatch_detail, idx,
1949 _("invalid addressing mode"));
1950 return 0;
1951 }
1952 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1953 {
1954 set_offset_out_of_range_error (mismatch_detail, idx,
1955 min_value, max_value);
1956 return 0;
1957 }
1958 if (!value_aligned_p (opnd->addr.offset.imm, num))
1959 {
1960 set_unaligned_error (mismatch_detail, idx, num);
1961 return 0;
1962 }
1963 break;
1964
1965 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1966 min_value = -32;
1967 max_value = 31;
1968 goto sve_imm_offset_vl;
1969
1970 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1971 min_value = -256;
1972 max_value = 255;
1973 goto sve_imm_offset_vl;
1974
1975 case AARCH64_OPND_SVE_ADDR_RI_U6:
1976 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1977 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1978 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1979 min_value = 0;
1980 max_value = 63;
1981 sve_imm_offset:
1982 assert (!opnd->addr.offset.is_reg);
1983 assert (opnd->addr.preind);
1984 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1985 min_value *= num;
1986 max_value *= num;
1987 if (opnd->shifter.operator_present
1988 || opnd->shifter.amount_present)
1989 {
1990 set_other_error (mismatch_detail, idx,
1991 _("invalid addressing mode"));
1992 return 0;
1993 }
1994 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1995 {
1996 set_offset_out_of_range_error (mismatch_detail, idx,
1997 min_value, max_value);
1998 return 0;
1999 }
2000 if (!value_aligned_p (opnd->addr.offset.imm, num))
2001 {
2002 set_unaligned_error (mismatch_detail, idx, num);
2003 return 0;
2004 }
2005 break;
2006
2007 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
2008 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
2009 min_value = -8;
2010 max_value = 7;
2011 goto sve_imm_offset;
2012
2013 case AARCH64_OPND_SVE_ADDR_ZX:
2014 /* Everything is already ensured by parse_operands or
2015 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
2016 argument type). */
2017 assert (opnd->addr.offset.is_reg);
2018 assert (opnd->addr.preind);
2019 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
2020 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2021 assert (opnd->shifter.operator_present == 0);
2022 break;
2023
2024 case AARCH64_OPND_SVE_ADDR_R:
2025 case AARCH64_OPND_SVE_ADDR_RR:
2026 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
2027 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
2028 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
2029 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
2030 case AARCH64_OPND_SVE_ADDR_RX:
2031 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
2032 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
2033 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
2034 case AARCH64_OPND_SVE_ADDR_RZ:
2035 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
2036 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
2037 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
2038 modifiers = 1 << AARCH64_MOD_LSL;
2039 sve_rr_operand:
2040 assert (opnd->addr.offset.is_reg);
2041 assert (opnd->addr.preind);
2042 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
2043 && opnd->addr.offset.regno == 31)
2044 {
2045 set_other_error (mismatch_detail, idx,
2046 _("index register xzr is not allowed"));
2047 return 0;
2048 }
2049 if (((1 << opnd->shifter.kind) & modifiers) == 0
2050 || (opnd->shifter.amount
2051 != get_operand_specific_data (&aarch64_operands[type])))
2052 {
2053 set_other_error (mismatch_detail, idx,
2054 _("invalid addressing mode"));
2055 return 0;
2056 }
2057 break;
2058
2059 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
2060 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
2061 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
2062 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
2063 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
2064 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
2065 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
2066 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
2067 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
2068 goto sve_rr_operand;
2069
2070 case AARCH64_OPND_SVE_ADDR_ZI_U5:
2071 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
2072 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
2073 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
2074 min_value = 0;
2075 max_value = 31;
2076 goto sve_imm_offset;
2077
2078 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
2079 modifiers = 1 << AARCH64_MOD_LSL;
2080 sve_zz_operand:
2081 assert (opnd->addr.offset.is_reg);
2082 assert (opnd->addr.preind);
2083 if (((1 << opnd->shifter.kind) & modifiers) == 0
2084 || opnd->shifter.amount < 0
2085 || opnd->shifter.amount > 3)
2086 {
2087 set_other_error (mismatch_detail, idx,
2088 _("invalid addressing mode"));
2089 return 0;
2090 }
2091 break;
2092
2093 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
2094 modifiers = (1 << AARCH64_MOD_SXTW);
2095 goto sve_zz_operand;
2096
2097 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
2098 modifiers = 1 << AARCH64_MOD_UXTW;
2099 goto sve_zz_operand;
2100
2101 default:
2102 break;
2103 }
2104 break;
2105
2106 case AARCH64_OPND_CLASS_SIMD_REGLIST:
2107 if (type == AARCH64_OPND_LEt)
2108 {
2109 /* Get the upper bound for the element index. */
2110 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2111 if (!value_in_range_p (opnd->reglist.index, 0, num))
2112 {
2113 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2114 return 0;
2115 }
2116 }
2117 /* The opcode dependent area stores the number of elements in
2118 each structure to be loaded/stored. */
2119 num = get_opcode_dependent_value (opcode);
2120 switch (type)
2121 {
2122 case AARCH64_OPND_LVt:
2123 assert (num >= 1 && num <= 4);
2124 /* Unless LD1/ST1, the number of registers should be equal to that
2125 of the structure elements. */
2126 if (num != 1 && opnd->reglist.num_regs != num)
2127 {
2128 set_reg_list_error (mismatch_detail, idx, num);
2129 return 0;
2130 }
2131 break;
2132 case AARCH64_OPND_LVt_AL:
2133 case AARCH64_OPND_LEt:
2134 assert (num >= 1 && num <= 4);
2135 /* The number of registers should be equal to that of the structure
2136 elements. */
2137 if (opnd->reglist.num_regs != num)
2138 {
2139 set_reg_list_error (mismatch_detail, idx, num);
2140 return 0;
2141 }
2142 break;
2143 default:
2144 break;
2145 }
2146 break;
2147
2148 case AARCH64_OPND_CLASS_IMMEDIATE:
2149 /* Constraint check on immediate operand. */
2150 imm = opnd->imm.value;
2151 /* E.g. imm_0_31 constrains value to be 0..31. */
2152 if (qualifier_value_in_range_constraint_p (qualifier)
2153 && !value_in_range_p (imm, get_lower_bound (qualifier),
2154 get_upper_bound (qualifier)))
2155 {
2156 set_imm_out_of_range_error (mismatch_detail, idx,
2157 get_lower_bound (qualifier),
2158 get_upper_bound (qualifier));
2159 return 0;
2160 }
2161
2162 switch (type)
2163 {
2164 case AARCH64_OPND_AIMM:
2165 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2166 {
2167 set_other_error (mismatch_detail, idx,
2168 _("invalid shift operator"));
2169 return 0;
2170 }
2171 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2172 {
2173 set_other_error (mismatch_detail, idx,
2174 _("shift amount must be 0 or 12"));
2175 return 0;
2176 }
2177 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2178 {
2179 set_other_error (mismatch_detail, idx,
2180 _("immediate out of range"));
2181 return 0;
2182 }
2183 break;
2184
2185 case AARCH64_OPND_HALF:
2186 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2187 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2188 {
2189 set_other_error (mismatch_detail, idx,
2190 _("invalid shift operator"));
2191 return 0;
2192 }
2193 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2194 if (!value_aligned_p (opnd->shifter.amount, 16))
2195 {
2196 set_other_error (mismatch_detail, idx,
2197 _("shift amount must be a multiple of 16"));
2198 return 0;
2199 }
2200 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2201 {
2202 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2203 0, size * 8 - 16);
2204 return 0;
2205 }
2206 if (opnd->imm.value < 0)
2207 {
2208 set_other_error (mismatch_detail, idx,
2209 _("negative immediate value not allowed"));
2210 return 0;
2211 }
2212 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2213 {
2214 set_other_error (mismatch_detail, idx,
2215 _("immediate out of range"));
2216 return 0;
2217 }
2218 break;
2219
2220 case AARCH64_OPND_IMM_MOV:
2221 {
2222 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2223 imm = opnd->imm.value;
2224 assert (idx == 1);
2225 switch (opcode->op)
2226 {
2227 case OP_MOV_IMM_WIDEN:
2228 imm = ~imm;
2229 /* Fall through. */
2230 case OP_MOV_IMM_WIDE:
2231 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2232 {
2233 set_other_error (mismatch_detail, idx,
2234 _("immediate out of range"));
2235 return 0;
2236 }
2237 break;
2238 case OP_MOV_IMM_LOG:
2239 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2240 {
2241 set_other_error (mismatch_detail, idx,
2242 _("immediate out of range"));
2243 return 0;
2244 }
2245 break;
2246 default:
2247 assert (0);
2248 return 0;
2249 }
2250 }
2251 break;
2252
2253 case AARCH64_OPND_NZCV:
2254 case AARCH64_OPND_CCMP_IMM:
2255 case AARCH64_OPND_EXCEPTION:
2256 case AARCH64_OPND_UNDEFINED:
2257 case AARCH64_OPND_TME_UIMM16:
2258 case AARCH64_OPND_UIMM4:
2259 case AARCH64_OPND_UIMM4_ADDG:
2260 case AARCH64_OPND_UIMM7:
2261 case AARCH64_OPND_UIMM3_OP1:
2262 case AARCH64_OPND_UIMM3_OP2:
2263 case AARCH64_OPND_SVE_UIMM3:
2264 case AARCH64_OPND_SVE_UIMM7:
2265 case AARCH64_OPND_SVE_UIMM8:
2266 case AARCH64_OPND_SVE_UIMM8_53:
2267 case AARCH64_OPND_CSSC_UIMM8:
2268 size = get_operand_fields_width (get_operand_from_code (type));
2269 assert (size < 32);
2270 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2271 {
2272 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2273 (1u << size) - 1);
2274 return 0;
2275 }
2276 break;
2277
2278 case AARCH64_OPND_UIMM10:
2279 /* Scaled unsigned 10 bits immediate offset. */
2280 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2281 {
2282 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2283 return 0;
2284 }
2285
2286 if (!value_aligned_p (opnd->imm.value, 16))
2287 {
2288 set_unaligned_error (mismatch_detail, idx, 16);
2289 return 0;
2290 }
2291 break;
2292
2293 case AARCH64_OPND_SIMM5:
2294 case AARCH64_OPND_SVE_SIMM5:
2295 case AARCH64_OPND_SVE_SIMM5B:
2296 case AARCH64_OPND_SVE_SIMM6:
2297 case AARCH64_OPND_SVE_SIMM8:
2298 case AARCH64_OPND_CSSC_SIMM8:
2299 size = get_operand_fields_width (get_operand_from_code (type));
2300 assert (size < 32);
2301 if (!value_fit_signed_field_p (opnd->imm.value, size))
2302 {
2303 set_imm_out_of_range_error (mismatch_detail, idx,
2304 -(1 << (size - 1)),
2305 (1 << (size - 1)) - 1);
2306 return 0;
2307 }
2308 break;
2309
2310 case AARCH64_OPND_WIDTH:
2311 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2312 && opnds[0].type == AARCH64_OPND_Rd);
2313 size = get_upper_bound (qualifier);
2314 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2315 /* lsb+width <= reg.size */
2316 {
2317 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2318 size - opnds[idx-1].imm.value);
2319 return 0;
2320 }
2321 break;
2322
2323 case AARCH64_OPND_LIMM:
2324 case AARCH64_OPND_SVE_LIMM:
2325 {
2326 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2327 uint64_t uimm = opnd->imm.value;
2328 if (opcode->op == OP_BIC)
2329 uimm = ~uimm;
2330 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2331 {
2332 set_other_error (mismatch_detail, idx,
2333 _("immediate out of range"));
2334 return 0;
2335 }
2336 }
2337 break;
2338
2339 case AARCH64_OPND_IMM0:
2340 case AARCH64_OPND_FPIMM0:
2341 if (opnd->imm.value != 0)
2342 {
2343 set_other_error (mismatch_detail, idx,
2344 _("immediate zero expected"));
2345 return 0;
2346 }
2347 break;
2348
2349 case AARCH64_OPND_IMM_ROT1:
2350 case AARCH64_OPND_IMM_ROT2:
2351 case AARCH64_OPND_SVE_IMM_ROT2:
2352 if (opnd->imm.value != 0
2353 && opnd->imm.value != 90
2354 && opnd->imm.value != 180
2355 && opnd->imm.value != 270)
2356 {
2357 set_other_error (mismatch_detail, idx,
2358 _("rotate expected to be 0, 90, 180 or 270"));
2359 return 0;
2360 }
2361 break;
2362
2363 case AARCH64_OPND_IMM_ROT3:
2364 case AARCH64_OPND_SVE_IMM_ROT1:
2365 case AARCH64_OPND_SVE_IMM_ROT3:
2366 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2367 {
2368 set_other_error (mismatch_detail, idx,
2369 _("rotate expected to be 90 or 270"));
2370 return 0;
2371 }
2372 break;
2373
2374 case AARCH64_OPND_SHLL_IMM:
2375 assert (idx == 2);
2376 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2377 if (opnd->imm.value != size)
2378 {
2379 set_other_error (mismatch_detail, idx,
2380 _("invalid shift amount"));
2381 return 0;
2382 }
2383 break;
2384
2385 case AARCH64_OPND_IMM_VLSL:
2386 size = aarch64_get_qualifier_esize (qualifier);
2387 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2388 {
2389 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2390 size * 8 - 1);
2391 return 0;
2392 }
2393 break;
2394
2395 case AARCH64_OPND_IMM_VLSR:
2396 size = aarch64_get_qualifier_esize (qualifier);
2397 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2398 {
2399 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2400 return 0;
2401 }
2402 break;
2403
2404 case AARCH64_OPND_SIMD_IMM:
2405 case AARCH64_OPND_SIMD_IMM_SFT:
2406 /* Qualifier check. */
2407 switch (qualifier)
2408 {
2409 case AARCH64_OPND_QLF_LSL:
2410 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2411 {
2412 set_other_error (mismatch_detail, idx,
2413 _("invalid shift operator"));
2414 return 0;
2415 }
2416 break;
2417 case AARCH64_OPND_QLF_MSL:
2418 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2419 {
2420 set_other_error (mismatch_detail, idx,
2421 _("invalid shift operator"));
2422 return 0;
2423 }
2424 break;
2425 case AARCH64_OPND_QLF_NIL:
2426 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2427 {
2428 set_other_error (mismatch_detail, idx,
2429 _("shift is not permitted"));
2430 return 0;
2431 }
2432 break;
2433 default:
2434 assert (0);
2435 return 0;
2436 }
2437 /* Is the immediate valid? */
2438 assert (idx == 1);
2439 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2440 {
2441 /* uimm8 or simm8 */
2442 if (!value_in_range_p (opnd->imm.value, -128, 255))
2443 {
2444 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2445 return 0;
2446 }
2447 }
2448 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2449 {
2450 /* uimm64 is not
2451 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2452 ffffffffgggggggghhhhhhhh'. */
2453 set_other_error (mismatch_detail, idx,
2454 _("invalid value for immediate"));
2455 return 0;
2456 }
2457 /* Is the shift amount valid? */
2458 switch (opnd->shifter.kind)
2459 {
2460 case AARCH64_MOD_LSL:
2461 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2462 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2463 {
2464 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2465 (size - 1) * 8);
2466 return 0;
2467 }
2468 if (!value_aligned_p (opnd->shifter.amount, 8))
2469 {
2470 set_unaligned_error (mismatch_detail, idx, 8);
2471 return 0;
2472 }
2473 break;
2474 case AARCH64_MOD_MSL:
2475 /* Only 8 and 16 are valid shift amount. */
2476 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2477 {
2478 set_other_error (mismatch_detail, idx,
2479 _("shift amount must be 0 or 16"));
2480 return 0;
2481 }
2482 break;
2483 default:
2484 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2485 {
2486 set_other_error (mismatch_detail, idx,
2487 _("invalid shift operator"));
2488 return 0;
2489 }
2490 break;
2491 }
2492 break;
2493
2494 case AARCH64_OPND_FPIMM:
2495 case AARCH64_OPND_SIMD_FPIMM:
2496 case AARCH64_OPND_SVE_FPIMM8:
2497 if (opnd->imm.is_fp == 0)
2498 {
2499 set_other_error (mismatch_detail, idx,
2500 _("floating-point immediate expected"));
2501 return 0;
2502 }
2503 /* The value is expected to be an 8-bit floating-point constant with
2504 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2505 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2506 instruction). */
2507 if (!value_in_range_p (opnd->imm.value, 0, 255))
2508 {
2509 set_other_error (mismatch_detail, idx,
2510 _("immediate out of range"));
2511 return 0;
2512 }
2513 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2514 {
2515 set_other_error (mismatch_detail, idx,
2516 _("invalid shift operator"));
2517 return 0;
2518 }
2519 break;
2520
2521 case AARCH64_OPND_SVE_AIMM:
2522 min_value = 0;
2523 sve_aimm:
2524 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2525 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2526 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2527 uvalue = opnd->imm.value;
2528 shift = opnd->shifter.amount;
2529 if (size == 1)
2530 {
2531 if (shift != 0)
2532 {
2533 set_other_error (mismatch_detail, idx,
2534 _("no shift amount allowed for"
2535 " 8-bit constants"));
2536 return 0;
2537 }
2538 }
2539 else
2540 {
2541 if (shift != 0 && shift != 8)
2542 {
2543 set_other_error (mismatch_detail, idx,
2544 _("shift amount must be 0 or 8"));
2545 return 0;
2546 }
2547 if (shift == 0 && (uvalue & 0xff) == 0)
2548 {
2549 shift = 8;
2550 uvalue = (int64_t) uvalue / 256;
2551 }
2552 }
2553 mask >>= shift;
2554 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2555 {
2556 set_other_error (mismatch_detail, idx,
2557 _("immediate too big for element size"));
2558 return 0;
2559 }
2560 uvalue = (uvalue - min_value) & mask;
2561 if (uvalue > 0xff)
2562 {
2563 set_other_error (mismatch_detail, idx,
2564 _("invalid arithmetic immediate"));
2565 return 0;
2566 }
2567 break;
2568
2569 case AARCH64_OPND_SVE_ASIMM:
2570 min_value = -128;
2571 goto sve_aimm;
2572
2573 case AARCH64_OPND_SVE_I1_HALF_ONE:
2574 assert (opnd->imm.is_fp);
2575 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2576 {
2577 set_other_error (mismatch_detail, idx,
2578 _("floating-point value must be 0.5 or 1.0"));
2579 return 0;
2580 }
2581 break;
2582
2583 case AARCH64_OPND_SVE_I1_HALF_TWO:
2584 assert (opnd->imm.is_fp);
2585 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2586 {
2587 set_other_error (mismatch_detail, idx,
2588 _("floating-point value must be 0.5 or 2.0"));
2589 return 0;
2590 }
2591 break;
2592
2593 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2594 assert (opnd->imm.is_fp);
2595 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2596 {
2597 set_other_error (mismatch_detail, idx,
2598 _("floating-point value must be 0.0 or 1.0"));
2599 return 0;
2600 }
2601 break;
2602
2603 case AARCH64_OPND_SVE_INV_LIMM:
2604 {
2605 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2606 uint64_t uimm = ~opnd->imm.value;
2607 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2608 {
2609 set_other_error (mismatch_detail, idx,
2610 _("immediate out of range"));
2611 return 0;
2612 }
2613 }
2614 break;
2615
2616 case AARCH64_OPND_SVE_LIMM_MOV:
2617 {
2618 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2619 uint64_t uimm = opnd->imm.value;
2620 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2621 {
2622 set_other_error (mismatch_detail, idx,
2623 _("immediate out of range"));
2624 return 0;
2625 }
2626 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2627 {
2628 set_other_error (mismatch_detail, idx,
2629 _("invalid replicated MOV immediate"));
2630 return 0;
2631 }
2632 }
2633 break;
2634
2635 case AARCH64_OPND_SVE_PATTERN_SCALED:
2636 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2637 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2638 {
2639 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2640 return 0;
2641 }
2642 break;
2643
2644 case AARCH64_OPND_SVE_SHLIMM_PRED:
2645 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2646 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
2647 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2648 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2649 {
2650 set_imm_out_of_range_error (mismatch_detail, idx,
2651 0, 8 * size - 1);
2652 return 0;
2653 }
2654 break;
2655
2656 case AARCH64_OPND_SVE_SHRIMM_PRED:
2657 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2658 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
2659 num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
2660 size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
2661 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2662 {
2663 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
2664 return 0;
2665 }
2666 break;
2667
2668 default:
2669 break;
2670 }
2671 break;
2672
2673 case AARCH64_OPND_CLASS_SYSTEM:
2674 switch (type)
2675 {
2676 case AARCH64_OPND_PSTATEFIELD:
2677 for (i = 0; aarch64_pstatefields[i].name; ++i)
2678 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2679 break;
2680 assert (aarch64_pstatefields[i].name);
2681 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2682 max_value = F_GET_REG_MAX_VALUE (aarch64_pstatefields[i].flags);
2683 if (opnds[1].imm.value < 0 || opnds[1].imm.value > max_value)
2684 {
2685 set_imm_out_of_range_error (mismatch_detail, 1, 0, max_value);
2686 return 0;
2687 }
2688 break;
2689 case AARCH64_OPND_PRFOP:
2690 if (opcode->iclass == ldst_regoff && opnd->prfop->value >= 24)
2691 {
2692 set_other_error (mismatch_detail, idx,
2693 _("the register-index form of PRFM does"
2694 " not accept opcodes in the range 24-31"));
2695 return 0;
2696 }
2697 break;
2698 default:
2699 break;
2700 }
2701 break;
2702
2703 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2704 /* Get the upper bound for the element index. */
2705 if (opcode->op == OP_FCMLA_ELEM)
2706 /* FCMLA index range depends on the vector size of other operands
2707 and is halfed because complex numbers take two elements. */
2708 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2709 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2710 else
2711 num = 16;
2712 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2713 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2714
2715 /* Index out-of-range. */
2716 if (!value_in_range_p (opnd->reglane.index, 0, num))
2717 {
2718 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2719 return 0;
2720 }
2721 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2722 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2723 number is encoded in "size:M:Rm":
2724 size <Vm>
2725 00 RESERVED
2726 01 0:Rm
2727 10 M:Rm
2728 11 RESERVED */
2729 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2730 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2731 {
2732 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2733 return 0;
2734 }
2735 break;
2736
2737 case AARCH64_OPND_CLASS_MODIFIED_REG:
2738 assert (idx == 1 || idx == 2);
2739 switch (type)
2740 {
2741 case AARCH64_OPND_Rm_EXT:
2742 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2743 && opnd->shifter.kind != AARCH64_MOD_LSL)
2744 {
2745 set_other_error (mismatch_detail, idx,
2746 _("extend operator expected"));
2747 return 0;
2748 }
2749 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2750 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2751 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2752 case. */
2753 if (!aarch64_stack_pointer_p (opnds + 0)
2754 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2755 {
2756 if (!opnd->shifter.operator_present)
2757 {
2758 set_other_error (mismatch_detail, idx,
2759 _("missing extend operator"));
2760 return 0;
2761 }
2762 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2763 {
2764 set_other_error (mismatch_detail, idx,
2765 _("'LSL' operator not allowed"));
2766 return 0;
2767 }
2768 }
2769 assert (opnd->shifter.operator_present /* Default to LSL. */
2770 || opnd->shifter.kind == AARCH64_MOD_LSL);
2771 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2772 {
2773 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2774 return 0;
2775 }
2776 /* In the 64-bit form, the final register operand is written as Wm
2777 for all but the (possibly omitted) UXTX/LSL and SXTX
2778 operators.
2779 N.B. GAS allows X register to be used with any operator as a
2780 programming convenience. */
2781 if (qualifier == AARCH64_OPND_QLF_X
2782 && opnd->shifter.kind != AARCH64_MOD_LSL
2783 && opnd->shifter.kind != AARCH64_MOD_UXTX
2784 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2785 {
2786 set_other_error (mismatch_detail, idx, _("W register expected"));
2787 return 0;
2788 }
2789 break;
2790
2791 case AARCH64_OPND_Rm_SFT:
2792 /* ROR is not available to the shifted register operand in
2793 arithmetic instructions. */
2794 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2795 {
2796 set_other_error (mismatch_detail, idx,
2797 _("shift operator expected"));
2798 return 0;
2799 }
2800 if (opnd->shifter.kind == AARCH64_MOD_ROR
2801 && opcode->iclass != log_shift)
2802 {
2803 set_other_error (mismatch_detail, idx,
2804 _("'ROR' operator not allowed"));
2805 return 0;
2806 }
2807 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2808 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2809 {
2810 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2811 return 0;
2812 }
2813 break;
2814
2815 default:
2816 break;
2817 }
2818 break;
2819
2820 default:
2821 break;
2822 }
2823
2824 return 1;
2825 }
2826
2827 /* Main entrypoint for the operand constraint checking.
2828
2829 Return 1 if operands of *INST meet the constraint applied by the operand
2830 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2831 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2832 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2833 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2834 error kind when it is notified that an instruction does not pass the check).
2835
2836 Un-determined operand qualifiers may get established during the process. */
2837
2838 int
2839 aarch64_match_operands_constraint (aarch64_inst *inst,
2840 aarch64_operand_error *mismatch_detail)
2841 {
2842 int i;
2843
2844 DEBUG_TRACE ("enter");
2845
2846 i = inst->opcode->tied_operand;
2847
2848 if (i > 0)
2849 {
2850 /* Check for tied_operands with specific opcode iclass. */
2851 switch (inst->opcode->iclass)
2852 {
2853 /* For SME LDR and STR instructions #imm must have the same numerical
2854 value for both operands.
2855 */
2856 case sme_ldr:
2857 case sme_str:
2858 assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array);
2859 assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL);
2860 if (inst->operands[0].indexed_za.index.imm
2861 != inst->operands[1].addr.offset.imm)
2862 {
2863 if (mismatch_detail)
2864 {
2865 mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS;
2866 mismatch_detail->index = i;
2867 }
2868 return 0;
2869 }
2870 break;
2871
2872 default:
2873 /* Check for cases where a source register needs to be the same as the
2874 destination register. Do this before matching qualifiers since if
2875 an instruction has both invalid tying and invalid qualifiers,
2876 the error about qualifiers would suggest several alternative
2877 instructions that also have invalid tying. */
2878 if (inst->operands[0].reg.regno
2879 != inst->operands[i].reg.regno)
2880 {
2881 if (mismatch_detail)
2882 {
2883 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2884 mismatch_detail->index = i;
2885 mismatch_detail->error = NULL;
2886 }
2887 return 0;
2888 }
2889 break;
2890 }
2891 }
2892
2893 /* Match operands' qualifier.
2894 *INST has already had qualifier establish for some, if not all, of
2895 its operands; we need to find out whether these established
2896 qualifiers match one of the qualifier sequence in
2897 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2898 with the corresponding qualifier in such a sequence.
2899 Only basic operand constraint checking is done here; the more thorough
2900 constraint checking will carried out by operand_general_constraint_met_p,
2901 which has be to called after this in order to get all of the operands'
2902 qualifiers established. */
2903 int invalid_count;
2904 if (match_operands_qualifier (inst, true /* update_p */,
2905 &invalid_count) == 0)
2906 {
2907 DEBUG_TRACE ("FAIL on operand qualifier matching");
2908 if (mismatch_detail)
2909 {
2910 /* Return an error type to indicate that it is the qualifier
2911 matching failure; we don't care about which operand as there
2912 are enough information in the opcode table to reproduce it. */
2913 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2914 mismatch_detail->index = -1;
2915 mismatch_detail->error = NULL;
2916 mismatch_detail->data[0].i = invalid_count;
2917 }
2918 return 0;
2919 }
2920
2921 /* Match operands' constraint. */
2922 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2923 {
2924 enum aarch64_opnd type = inst->opcode->operands[i];
2925 if (type == AARCH64_OPND_NIL)
2926 break;
2927 if (inst->operands[i].skip)
2928 {
2929 DEBUG_TRACE ("skip the incomplete operand %d", i);
2930 continue;
2931 }
2932 if (operand_general_constraint_met_p (inst->operands, i, type,
2933 inst->opcode, mismatch_detail) == 0)
2934 {
2935 DEBUG_TRACE ("FAIL on operand %d", i);
2936 return 0;
2937 }
2938 }
2939
2940 DEBUG_TRACE ("PASS");
2941
2942 return 1;
2943 }
2944
2945 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2946 Also updates the TYPE of each INST->OPERANDS with the corresponding
2947 value of OPCODE->OPERANDS.
2948
2949 Note that some operand qualifiers may need to be manually cleared by
2950 the caller before it further calls the aarch64_opcode_encode; by
2951 doing this, it helps the qualifier matching facilities work
2952 properly. */
2953
2954 const aarch64_opcode*
2955 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2956 {
2957 int i;
2958 const aarch64_opcode *old = inst->opcode;
2959
2960 inst->opcode = opcode;
2961
2962 /* Update the operand types. */
2963 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2964 {
2965 inst->operands[i].type = opcode->operands[i];
2966 if (opcode->operands[i] == AARCH64_OPND_NIL)
2967 break;
2968 }
2969
2970 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2971
2972 return old;
2973 }
2974
2975 int
2976 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2977 {
2978 int i;
2979 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2980 if (operands[i] == operand)
2981 return i;
2982 else if (operands[i] == AARCH64_OPND_NIL)
2983 break;
2984 return -1;
2985 }
2986 \f
2987 /* R0...R30, followed by FOR31. */
2988 #define BANK(R, FOR31) \
2989 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2990 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2991 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2992 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2993 /* [0][0] 32-bit integer regs with sp Wn
2994 [0][1] 64-bit integer regs with sp Xn sf=1
2995 [1][0] 32-bit integer regs with #0 Wn
2996 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2997 static const char *int_reg[2][2][32] = {
2998 #define R32(X) "w" #X
2999 #define R64(X) "x" #X
3000 { BANK (R32, "wsp"), BANK (R64, "sp") },
3001 { BANK (R32, "wzr"), BANK (R64, "xzr") }
3002 #undef R64
3003 #undef R32
3004 };
3005
3006 /* Names of the SVE vector registers, first with .S suffixes,
3007 then with .D suffixes. */
3008
3009 static const char *sve_reg[2][32] = {
3010 #define ZS(X) "z" #X ".s"
3011 #define ZD(X) "z" #X ".d"
3012 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
3013 #undef ZD
3014 #undef ZS
3015 };
3016 #undef BANK
3017
3018 /* Return the integer register name.
3019 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
3020
3021 static inline const char *
3022 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
3023 {
3024 const int has_zr = sp_reg_p ? 0 : 1;
3025 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
3026 return int_reg[has_zr][is_64][regno];
3027 }
3028
3029 /* Like get_int_reg_name, but IS_64 is always 1. */
3030
3031 static inline const char *
3032 get_64bit_int_reg_name (int regno, int sp_reg_p)
3033 {
3034 const int has_zr = sp_reg_p ? 0 : 1;
3035 return int_reg[has_zr][1][regno];
3036 }
3037
3038 /* Get the name of the integer offset register in OPND, using the shift type
3039 to decide whether it's a word or doubleword. */
3040
3041 static inline const char *
3042 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
3043 {
3044 switch (opnd->shifter.kind)
3045 {
3046 case AARCH64_MOD_UXTW:
3047 case AARCH64_MOD_SXTW:
3048 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
3049
3050 case AARCH64_MOD_LSL:
3051 case AARCH64_MOD_SXTX:
3052 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
3053
3054 default:
3055 abort ();
3056 }
3057 }
3058
3059 /* Get the name of the SVE vector offset register in OPND, using the operand
3060 qualifier to decide whether the suffix should be .S or .D. */
3061
3062 static inline const char *
3063 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
3064 {
3065 assert (qualifier == AARCH64_OPND_QLF_S_S
3066 || qualifier == AARCH64_OPND_QLF_S_D);
3067 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
3068 }
3069
3070 /* Types for expanding an encoded 8-bit value to a floating-point value. */
3071
3072 typedef union
3073 {
3074 uint64_t i;
3075 double d;
3076 } double_conv_t;
3077
3078 typedef union
3079 {
3080 uint32_t i;
3081 float f;
3082 } single_conv_t;
3083
3084 typedef union
3085 {
3086 uint32_t i;
3087 float f;
3088 } half_conv_t;
3089
3090 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
3091 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
3092 (depending on the type of the instruction). IMM8 will be expanded to a
3093 single-precision floating-point value (SIZE == 4) or a double-precision
3094 floating-point value (SIZE == 8). A half-precision floating-point value
3095 (SIZE == 2) is expanded to a single-precision floating-point value. The
3096 expanded value is returned. */
3097
3098 static uint64_t
3099 expand_fp_imm (int size, uint32_t imm8)
3100 {
3101 uint64_t imm = 0;
3102 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
3103
3104 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
3105 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
3106 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
3107 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
3108 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
3109 if (size == 8)
3110 {
3111 imm = (imm8_7 << (63-32)) /* imm8<7> */
3112 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
3113 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
3114 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
3115 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
3116 imm <<= 32;
3117 }
3118 else if (size == 4 || size == 2)
3119 {
3120 imm = (imm8_7 << 31) /* imm8<7> */
3121 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
3122 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
3123 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
3124 }
3125 else
3126 {
3127 /* An unsupported size. */
3128 assert (0);
3129 }
3130
3131 return imm;
3132 }
3133
3134 /* Return a string based on FMT with the register style applied. */
3135
3136 static const char *
3137 style_reg (struct aarch64_styler *styler, const char *fmt, ...)
3138 {
3139 const char *txt;
3140 va_list ap;
3141
3142 va_start (ap, fmt);
3143 txt = styler->apply_style (styler, dis_style_register, fmt, ap);
3144 va_end (ap);
3145
3146 return txt;
3147 }
3148
3149 /* Return a string based on FMT with the immediate style applied. */
3150
3151 static const char *
3152 style_imm (struct aarch64_styler *styler, const char *fmt, ...)
3153 {
3154 const char *txt;
3155 va_list ap;
3156
3157 va_start (ap, fmt);
3158 txt = styler->apply_style (styler, dis_style_immediate, fmt, ap);
3159 va_end (ap);
3160
3161 return txt;
3162 }
3163
3164 /* Return a string based on FMT with the sub-mnemonic style applied. */
3165
3166 static const char *
3167 style_sub_mnem (struct aarch64_styler *styler, const char *fmt, ...)
3168 {
3169 const char *txt;
3170 va_list ap;
3171
3172 va_start (ap, fmt);
3173 txt = styler->apply_style (styler, dis_style_sub_mnemonic, fmt, ap);
3174 va_end (ap);
3175
3176 return txt;
3177 }
3178
3179 /* Return a string based on FMT with the address style applied. */
3180
3181 static const char *
3182 style_addr (struct aarch64_styler *styler, const char *fmt, ...)
3183 {
3184 const char *txt;
3185 va_list ap;
3186
3187 va_start (ap, fmt);
3188 txt = styler->apply_style (styler, dis_style_address, fmt, ap);
3189 va_end (ap);
3190
3191 return txt;
3192 }
3193
3194 /* Produce the string representation of the register list operand *OPND
3195 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3196 the register name that comes before the register number, such as "v". */
3197 static void
3198 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3199 const char *prefix, struct aarch64_styler *styler)
3200 {
3201 const int num_regs = opnd->reglist.num_regs;
3202 const int first_reg = opnd->reglist.first_regno;
3203 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
3204 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3205 char tb[16]; /* Temporary buffer. */
3206
3207 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3208 assert (num_regs >= 1 && num_regs <= 4);
3209
3210 /* Prepare the index if any. */
3211 if (opnd->reglist.has_index)
3212 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3213 snprintf (tb, sizeof (tb), "[%s]",
3214 style_imm (styler, "%" PRIi64, (opnd->reglist.index % 100)));
3215 else
3216 tb[0] = '\0';
3217
3218 /* The hyphenated form is preferred for disassembly if there are
3219 more than two registers in the list, and the register numbers
3220 are monotonically increasing in increments of one. */
3221 if (num_regs > 2 && last_reg > first_reg)
3222 snprintf (buf, size, "{%s-%s}%s",
3223 style_reg (styler, "%s%d.%s", prefix, first_reg, qlf_name),
3224 style_reg (styler, "%s%d.%s", prefix, last_reg, qlf_name), tb);
3225 else
3226 {
3227 const int reg0 = first_reg;
3228 const int reg1 = (first_reg + 1) & 0x1f;
3229 const int reg2 = (first_reg + 2) & 0x1f;
3230 const int reg3 = (first_reg + 3) & 0x1f;
3231
3232 switch (num_regs)
3233 {
3234 case 1:
3235 snprintf (buf, size, "{%s}%s",
3236 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3237 tb);
3238 break;
3239 case 2:
3240 snprintf (buf, size, "{%s, %s}%s",
3241 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3242 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3243 tb);
3244 break;
3245 case 3:
3246 snprintf (buf, size, "{%s, %s, %s}%s",
3247 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3248 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3249 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3250 tb);
3251 break;
3252 case 4:
3253 snprintf (buf, size, "{%s, %s, %s, %s}%s",
3254 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3255 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3256 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3257 style_reg (styler, "%s%d.%s", prefix, reg3, qlf_name),
3258 tb);
3259 break;
3260 }
3261 }
3262 }
3263
3264 /* Print the register+immediate address in OPND to BUF, which has SIZE
3265 characters. BASE is the name of the base register. */
3266
3267 static void
3268 print_immediate_offset_address (char *buf, size_t size,
3269 const aarch64_opnd_info *opnd,
3270 const char *base,
3271 struct aarch64_styler *styler)
3272 {
3273 if (opnd->addr.writeback)
3274 {
3275 if (opnd->addr.preind)
3276 {
3277 if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3278 snprintf (buf, size, "[%s]!", style_reg (styler, base));
3279 else
3280 snprintf (buf, size, "[%s, %s]!",
3281 style_reg (styler, base),
3282 style_imm (styler, "#%d", opnd->addr.offset.imm));
3283 }
3284 else
3285 snprintf (buf, size, "[%s], %s",
3286 style_reg (styler, base),
3287 style_imm (styler, "#%d", opnd->addr.offset.imm));
3288 }
3289 else
3290 {
3291 if (opnd->shifter.operator_present)
3292 {
3293 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3294 snprintf (buf, size, "[%s, %s, %s]",
3295 style_reg (styler, base),
3296 style_imm (styler, "#%d", opnd->addr.offset.imm),
3297 style_sub_mnem (styler, "mul vl"));
3298 }
3299 else if (opnd->addr.offset.imm)
3300 snprintf (buf, size, "[%s, %s]",
3301 style_reg (styler, base),
3302 style_imm (styler, "#%d", opnd->addr.offset.imm));
3303 else
3304 snprintf (buf, size, "[%s]", style_reg (styler, base));
3305 }
3306 }
3307
3308 /* Produce the string representation of the register offset address operand
3309 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3310 the names of the base and offset registers. */
3311 static void
3312 print_register_offset_address (char *buf, size_t size,
3313 const aarch64_opnd_info *opnd,
3314 const char *base, const char *offset,
3315 struct aarch64_styler *styler)
3316 {
3317 char tb[32]; /* Temporary buffer. */
3318 bool print_extend_p = true;
3319 bool print_amount_p = true;
3320 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3321
3322 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3323 || !opnd->shifter.amount_present))
3324 {
3325 /* Not print the shift/extend amount when the amount is zero and
3326 when it is not the special case of 8-bit load/store instruction. */
3327 print_amount_p = false;
3328 /* Likewise, no need to print the shift operator LSL in such a
3329 situation. */
3330 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3331 print_extend_p = false;
3332 }
3333
3334 /* Prepare for the extend/shift. */
3335 if (print_extend_p)
3336 {
3337 if (print_amount_p)
3338 snprintf (tb, sizeof (tb), ", %s %s",
3339 style_sub_mnem (styler, shift_name),
3340 style_imm (styler, "#%" PRIi64,
3341 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3342 (opnd->shifter.amount % 100)));
3343 else
3344 snprintf (tb, sizeof (tb), ", %s",
3345 style_sub_mnem (styler, shift_name));
3346 }
3347 else
3348 tb[0] = '\0';
3349
3350 snprintf (buf, size, "[%s, %s%s]", style_reg (styler, base),
3351 style_reg (styler, offset), tb);
3352 }
3353
3354 /* Print ZA tiles from imm8 in ZERO instruction.
3355
3356 The preferred disassembly of this instruction uses the shortest list of tile
3357 names that represent the encoded immediate mask.
3358
3359 For example:
3360 * An all-ones immediate is disassembled as {ZA}.
3361 * An all-zeros immediate is disassembled as an empty list { }.
3362 */
3363 static void
3364 print_sme_za_list (char *buf, size_t size, int mask,
3365 struct aarch64_styler *styler)
3366 {
3367 const char* zan[] = { "za", "za0.h", "za1.h", "za0.s",
3368 "za1.s", "za2.s", "za3.s", "za0.d",
3369 "za1.d", "za2.d", "za3.d", "za4.d",
3370 "za5.d", "za6.d", "za7.d", " " };
3371 const int zan_v[] = { 0xff, 0x55, 0xaa, 0x11,
3372 0x22, 0x44, 0x88, 0x01,
3373 0x02, 0x04, 0x08, 0x10,
3374 0x20, 0x40, 0x80, 0x00 };
3375 int i, k;
3376 const int ZAN_SIZE = sizeof(zan) / sizeof(zan[0]);
3377
3378 k = snprintf (buf, size, "{");
3379 for (i = 0; i < ZAN_SIZE; i++)
3380 {
3381 if ((mask & zan_v[i]) == zan_v[i])
3382 {
3383 mask &= ~zan_v[i];
3384 if (k > 1)
3385 k += snprintf (buf + k, size - k, ", ");
3386
3387 k += snprintf (buf + k, size - k, "%s", style_reg (styler, zan[i]));
3388 }
3389 if (mask == 0)
3390 break;
3391 }
3392 snprintf (buf + k, size - k, "}");
3393 }
3394
3395 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3396 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3397 PC, PCREL_P and ADDRESS are used to pass in and return information about
3398 the PC-relative address calculation, where the PC value is passed in
3399 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3400 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3401 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3402
3403 The function serves both the disassembler and the assembler diagnostics
3404 issuer, which is the reason why it lives in this file. */
3405
3406 void
3407 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3408 const aarch64_opcode *opcode,
3409 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3410 bfd_vma *address, char** notes,
3411 char *comment, size_t comment_size,
3412 aarch64_feature_set features,
3413 struct aarch64_styler *styler)
3414 {
3415 unsigned int i, num_conds;
3416 const char *name = NULL;
3417 const aarch64_opnd_info *opnd = opnds + idx;
3418 enum aarch64_modifier_kind kind;
3419 uint64_t addr, enum_value;
3420
3421 if (comment != NULL)
3422 {
3423 assert (comment_size > 0);
3424 comment[0] = '\0';
3425 }
3426 else
3427 assert (comment_size == 0);
3428
3429 buf[0] = '\0';
3430 if (pcrel_p)
3431 *pcrel_p = 0;
3432
3433 switch (opnd->type)
3434 {
3435 case AARCH64_OPND_Rd:
3436 case AARCH64_OPND_Rn:
3437 case AARCH64_OPND_Rm:
3438 case AARCH64_OPND_Rt:
3439 case AARCH64_OPND_Rt2:
3440 case AARCH64_OPND_Rs:
3441 case AARCH64_OPND_Ra:
3442 case AARCH64_OPND_Rt_LS64:
3443 case AARCH64_OPND_Rt_SYS:
3444 case AARCH64_OPND_PAIRREG:
3445 case AARCH64_OPND_SVE_Rm:
3446 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3447 the <ic_op>, therefore we use opnd->present to override the
3448 generic optional-ness information. */
3449 if (opnd->type == AARCH64_OPND_Rt_SYS)
3450 {
3451 if (!opnd->present)
3452 break;
3453 }
3454 /* Omit the operand, e.g. RET. */
3455 else if (optional_operand_p (opcode, idx)
3456 && (opnd->reg.regno
3457 == get_optional_operand_default_value (opcode)))
3458 break;
3459 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3460 || opnd->qualifier == AARCH64_OPND_QLF_X);
3461 snprintf (buf, size, "%s",
3462 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3463 opnd->qualifier, 0)));
3464 break;
3465
3466 case AARCH64_OPND_Rd_SP:
3467 case AARCH64_OPND_Rn_SP:
3468 case AARCH64_OPND_Rt_SP:
3469 case AARCH64_OPND_SVE_Rn_SP:
3470 case AARCH64_OPND_Rm_SP:
3471 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3472 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3473 || opnd->qualifier == AARCH64_OPND_QLF_X
3474 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3475 snprintf (buf, size, "%s",
3476 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3477 opnd->qualifier, 1)));
3478 break;
3479
3480 case AARCH64_OPND_Rm_EXT:
3481 kind = opnd->shifter.kind;
3482 assert (idx == 1 || idx == 2);
3483 if ((aarch64_stack_pointer_p (opnds)
3484 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3485 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3486 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3487 && kind == AARCH64_MOD_UXTW)
3488 || (opnd->qualifier == AARCH64_OPND_QLF_X
3489 && kind == AARCH64_MOD_UXTX)))
3490 {
3491 /* 'LSL' is the preferred form in this case. */
3492 kind = AARCH64_MOD_LSL;
3493 if (opnd->shifter.amount == 0)
3494 {
3495 /* Shifter omitted. */
3496 snprintf (buf, size, "%s",
3497 style_reg (styler,
3498 get_int_reg_name (opnd->reg.regno,
3499 opnd->qualifier, 0)));
3500 break;
3501 }
3502 }
3503 if (opnd->shifter.amount)
3504 snprintf (buf, size, "%s, %s %s",
3505 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3506 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name),
3507 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3508 else
3509 snprintf (buf, size, "%s, %s",
3510 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3511 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name));
3512 break;
3513
3514 case AARCH64_OPND_Rm_SFT:
3515 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3516 || opnd->qualifier == AARCH64_OPND_QLF_X);
3517 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3518 snprintf (buf, size, "%s",
3519 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3520 opnd->qualifier, 0)));
3521 else
3522 snprintf (buf, size, "%s, %s %s",
3523 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3524 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
3525 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3526 break;
3527
3528 case AARCH64_OPND_Fd:
3529 case AARCH64_OPND_Fn:
3530 case AARCH64_OPND_Fm:
3531 case AARCH64_OPND_Fa:
3532 case AARCH64_OPND_Ft:
3533 case AARCH64_OPND_Ft2:
3534 case AARCH64_OPND_Sd:
3535 case AARCH64_OPND_Sn:
3536 case AARCH64_OPND_Sm:
3537 case AARCH64_OPND_SVE_VZn:
3538 case AARCH64_OPND_SVE_Vd:
3539 case AARCH64_OPND_SVE_Vm:
3540 case AARCH64_OPND_SVE_Vn:
3541 snprintf (buf, size, "%s",
3542 style_reg (styler, "%s%d",
3543 aarch64_get_qualifier_name (opnd->qualifier),
3544 opnd->reg.regno));
3545 break;
3546
3547 case AARCH64_OPND_Va:
3548 case AARCH64_OPND_Vd:
3549 case AARCH64_OPND_Vn:
3550 case AARCH64_OPND_Vm:
3551 snprintf (buf, size, "%s",
3552 style_reg (styler, "v%d.%s", opnd->reg.regno,
3553 aarch64_get_qualifier_name (opnd->qualifier)));
3554 break;
3555
3556 case AARCH64_OPND_Ed:
3557 case AARCH64_OPND_En:
3558 case AARCH64_OPND_Em:
3559 case AARCH64_OPND_Em16:
3560 case AARCH64_OPND_SM3_IMM2:
3561 snprintf (buf, size, "%s[%s]",
3562 style_reg (styler, "v%d.%s", opnd->reglane.regno,
3563 aarch64_get_qualifier_name (opnd->qualifier)),
3564 style_imm (styler, "%" PRIi64, opnd->reglane.index));
3565 break;
3566
3567 case AARCH64_OPND_VdD1:
3568 case AARCH64_OPND_VnD1:
3569 snprintf (buf, size, "%s[%s]",
3570 style_reg (styler, "v%d.d", opnd->reg.regno),
3571 style_imm (styler, "1"));
3572 break;
3573
3574 case AARCH64_OPND_LVn:
3575 case AARCH64_OPND_LVt:
3576 case AARCH64_OPND_LVt_AL:
3577 case AARCH64_OPND_LEt:
3578 print_register_list (buf, size, opnd, "v", styler);
3579 break;
3580
3581 case AARCH64_OPND_SVE_Pd:
3582 case AARCH64_OPND_SVE_Pg3:
3583 case AARCH64_OPND_SVE_Pg4_5:
3584 case AARCH64_OPND_SVE_Pg4_10:
3585 case AARCH64_OPND_SVE_Pg4_16:
3586 case AARCH64_OPND_SVE_Pm:
3587 case AARCH64_OPND_SVE_Pn:
3588 case AARCH64_OPND_SVE_Pt:
3589 case AARCH64_OPND_SME_Pm:
3590 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3591 snprintf (buf, size, "%s",
3592 style_reg (styler, "p%d", opnd->reg.regno));
3593 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3594 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3595 snprintf (buf, size, "%s",
3596 style_reg (styler, "p%d/%s", opnd->reg.regno,
3597 aarch64_get_qualifier_name (opnd->qualifier)));
3598 else
3599 snprintf (buf, size, "%s",
3600 style_reg (styler, "p%d.%s", opnd->reg.regno,
3601 aarch64_get_qualifier_name (opnd->qualifier)));
3602 break;
3603
3604 case AARCH64_OPND_SVE_Za_5:
3605 case AARCH64_OPND_SVE_Za_16:
3606 case AARCH64_OPND_SVE_Zd:
3607 case AARCH64_OPND_SVE_Zm_5:
3608 case AARCH64_OPND_SVE_Zm_16:
3609 case AARCH64_OPND_SVE_Zn:
3610 case AARCH64_OPND_SVE_Zt:
3611 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3612 snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno));
3613 else
3614 snprintf (buf, size, "%s",
3615 style_reg (styler, "z%d.%s", opnd->reg.regno,
3616 aarch64_get_qualifier_name (opnd->qualifier)));
3617 break;
3618
3619 case AARCH64_OPND_SVE_ZnxN:
3620 case AARCH64_OPND_SVE_ZtxN:
3621 print_register_list (buf, size, opnd, "z", styler);
3622 break;
3623
3624 case AARCH64_OPND_SVE_Zm3_INDEX:
3625 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3626 case AARCH64_OPND_SVE_Zm3_11_INDEX:
3627 case AARCH64_OPND_SVE_Zm4_11_INDEX:
3628 case AARCH64_OPND_SVE_Zm4_INDEX:
3629 case AARCH64_OPND_SVE_Zn_INDEX:
3630 snprintf (buf, size, "%s[%s]",
3631 style_reg (styler, "z%d.%s", opnd->reglane.regno,
3632 aarch64_get_qualifier_name (opnd->qualifier)),
3633 style_imm (styler, "%" PRIi64, opnd->reglane.index));
3634 break;
3635
3636 case AARCH64_OPND_SME_ZAda_2b:
3637 case AARCH64_OPND_SME_ZAda_3b:
3638 snprintf (buf, size, "%s",
3639 style_reg (styler, "za%d.%s", opnd->reg.regno,
3640 aarch64_get_qualifier_name (opnd->qualifier)));
3641 break;
3642
3643 case AARCH64_OPND_SME_ZA_HV_idx_src:
3644 case AARCH64_OPND_SME_ZA_HV_idx_dest:
3645 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
3646 snprintf (buf, size, "%s%s[%s, %s]%s",
3647 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "",
3648 style_reg (styler, "za%d%c.%s",
3649 opnd->indexed_za.regno,
3650 opnd->indexed_za.v == 1 ? 'v' : 'h',
3651 aarch64_get_qualifier_name (opnd->qualifier)),
3652 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
3653 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
3654 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : "");
3655 break;
3656
3657 case AARCH64_OPND_SME_list_of_64bit_tiles:
3658 print_sme_za_list (buf, size, opnd->reg.regno, styler);
3659 break;
3660
3661 case AARCH64_OPND_SME_ZA_array:
3662 snprintf (buf, size, "%s[%s, %s]",
3663 style_reg (styler, "za"),
3664 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
3665 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm));
3666 break;
3667
3668 case AARCH64_OPND_SME_SM_ZA:
3669 snprintf (buf, size, "%s",
3670 style_reg (styler, opnd->reg.regno == 's' ? "sm" : "za"));
3671 break;
3672
3673 case AARCH64_OPND_SME_PnT_Wm_imm:
3674 snprintf (buf, size, "%s[%s, %s]",
3675 style_reg (styler, "p%d.%s", opnd->indexed_za.regno,
3676 aarch64_get_qualifier_name (opnd->qualifier)),
3677 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
3678 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm));
3679 break;
3680
3681 case AARCH64_OPND_CRn:
3682 case AARCH64_OPND_CRm:
3683 snprintf (buf, size, "%s",
3684 style_reg (styler, "C%" PRIi64, opnd->imm.value));
3685 break;
3686
3687 case AARCH64_OPND_IDX:
3688 case AARCH64_OPND_MASK:
3689 case AARCH64_OPND_IMM:
3690 case AARCH64_OPND_IMM_2:
3691 case AARCH64_OPND_WIDTH:
3692 case AARCH64_OPND_UIMM3_OP1:
3693 case AARCH64_OPND_UIMM3_OP2:
3694 case AARCH64_OPND_BIT_NUM:
3695 case AARCH64_OPND_IMM_VLSL:
3696 case AARCH64_OPND_IMM_VLSR:
3697 case AARCH64_OPND_SHLL_IMM:
3698 case AARCH64_OPND_IMM0:
3699 case AARCH64_OPND_IMMR:
3700 case AARCH64_OPND_IMMS:
3701 case AARCH64_OPND_UNDEFINED:
3702 case AARCH64_OPND_FBITS:
3703 case AARCH64_OPND_TME_UIMM16:
3704 case AARCH64_OPND_SIMM5:
3705 case AARCH64_OPND_SVE_SHLIMM_PRED:
3706 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3707 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
3708 case AARCH64_OPND_SVE_SHRIMM_PRED:
3709 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3710 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3711 case AARCH64_OPND_SVE_SIMM5:
3712 case AARCH64_OPND_SVE_SIMM5B:
3713 case AARCH64_OPND_SVE_SIMM6:
3714 case AARCH64_OPND_SVE_SIMM8:
3715 case AARCH64_OPND_SVE_UIMM3:
3716 case AARCH64_OPND_SVE_UIMM7:
3717 case AARCH64_OPND_SVE_UIMM8:
3718 case AARCH64_OPND_SVE_UIMM8_53:
3719 case AARCH64_OPND_IMM_ROT1:
3720 case AARCH64_OPND_IMM_ROT2:
3721 case AARCH64_OPND_IMM_ROT3:
3722 case AARCH64_OPND_SVE_IMM_ROT1:
3723 case AARCH64_OPND_SVE_IMM_ROT2:
3724 case AARCH64_OPND_SVE_IMM_ROT3:
3725 case AARCH64_OPND_CSSC_SIMM8:
3726 case AARCH64_OPND_CSSC_UIMM8:
3727 snprintf (buf, size, "%s",
3728 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3729 break;
3730
3731 case AARCH64_OPND_SVE_I1_HALF_ONE:
3732 case AARCH64_OPND_SVE_I1_HALF_TWO:
3733 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3734 {
3735 single_conv_t c;
3736 c.i = opnd->imm.value;
3737 snprintf (buf, size, "%s", style_imm (styler, "#%.1f", c.f));
3738 break;
3739 }
3740
3741 case AARCH64_OPND_SVE_PATTERN:
3742 if (optional_operand_p (opcode, idx)
3743 && opnd->imm.value == get_optional_operand_default_value (opcode))
3744 break;
3745 enum_value = opnd->imm.value;
3746 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3747 if (aarch64_sve_pattern_array[enum_value])
3748 snprintf (buf, size, "%s",
3749 style_reg (styler, aarch64_sve_pattern_array[enum_value]));
3750 else
3751 snprintf (buf, size, "%s",
3752 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3753 break;
3754
3755 case AARCH64_OPND_SVE_PATTERN_SCALED:
3756 if (optional_operand_p (opcode, idx)
3757 && !opnd->shifter.operator_present
3758 && opnd->imm.value == get_optional_operand_default_value (opcode))
3759 break;
3760 enum_value = opnd->imm.value;
3761 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3762 if (aarch64_sve_pattern_array[opnd->imm.value])
3763 snprintf (buf, size, "%s",
3764 style_reg (styler,
3765 aarch64_sve_pattern_array[opnd->imm.value]));
3766 else
3767 snprintf (buf, size, "%s",
3768 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3769 if (opnd->shifter.operator_present)
3770 {
3771 size_t len = strlen (buf);
3772 const char *shift_name
3773 = aarch64_operand_modifiers[opnd->shifter.kind].name;
3774 snprintf (buf + len, size - len, ", %s %s",
3775 style_sub_mnem (styler, shift_name),
3776 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3777 }
3778 break;
3779
3780 case AARCH64_OPND_SVE_PRFOP:
3781 enum_value = opnd->imm.value;
3782 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3783 if (aarch64_sve_prfop_array[enum_value])
3784 snprintf (buf, size, "%s",
3785 style_reg (styler, aarch64_sve_prfop_array[enum_value]));
3786 else
3787 snprintf (buf, size, "%s",
3788 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3789 break;
3790
3791 case AARCH64_OPND_IMM_MOV:
3792 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3793 {
3794 case 4: /* e.g. MOV Wd, #<imm32>. */
3795 {
3796 int imm32 = opnd->imm.value;
3797 snprintf (buf, size, "%s",
3798 style_imm (styler, "#0x%-20x", imm32));
3799 snprintf (comment, comment_size, "#%d", imm32);
3800 }
3801 break;
3802 case 8: /* e.g. MOV Xd, #<imm64>. */
3803 snprintf (buf, size, "%s", style_imm (styler, "#0x%-20" PRIx64,
3804 opnd->imm.value));
3805 snprintf (comment, comment_size, "#%" PRIi64, opnd->imm.value);
3806 break;
3807 default:
3808 snprintf (buf, size, "<invalid>");
3809 break;
3810 }
3811 break;
3812
3813 case AARCH64_OPND_FPIMM0:
3814 snprintf (buf, size, "%s", style_imm (styler, "#0.0"));
3815 break;
3816
3817 case AARCH64_OPND_LIMM:
3818 case AARCH64_OPND_AIMM:
3819 case AARCH64_OPND_HALF:
3820 case AARCH64_OPND_SVE_INV_LIMM:
3821 case AARCH64_OPND_SVE_LIMM:
3822 case AARCH64_OPND_SVE_LIMM_MOV:
3823 if (opnd->shifter.amount)
3824 snprintf (buf, size, "%s, %s %s",
3825 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
3826 style_sub_mnem (styler, "lsl"),
3827 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3828 else
3829 snprintf (buf, size, "%s",
3830 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
3831 break;
3832
3833 case AARCH64_OPND_SIMD_IMM:
3834 case AARCH64_OPND_SIMD_IMM_SFT:
3835 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3836 || opnd->shifter.kind == AARCH64_MOD_NONE)
3837 snprintf (buf, size, "%s",
3838 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
3839 else
3840 snprintf (buf, size, "%s, %s %s",
3841 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
3842 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
3843 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3844 break;
3845
3846 case AARCH64_OPND_SVE_AIMM:
3847 case AARCH64_OPND_SVE_ASIMM:
3848 if (opnd->shifter.amount)
3849 snprintf (buf, size, "%s, %s %s",
3850 style_imm (styler, "#%" PRIi64, opnd->imm.value),
3851 style_sub_mnem (styler, "lsl"),
3852 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3853 else
3854 snprintf (buf, size, "%s",
3855 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3856 break;
3857
3858 case AARCH64_OPND_FPIMM:
3859 case AARCH64_OPND_SIMD_FPIMM:
3860 case AARCH64_OPND_SVE_FPIMM8:
3861 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3862 {
3863 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3864 {
3865 half_conv_t c;
3866 c.i = expand_fp_imm (2, opnd->imm.value);
3867 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
3868 }
3869 break;
3870 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3871 {
3872 single_conv_t c;
3873 c.i = expand_fp_imm (4, opnd->imm.value);
3874 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
3875 }
3876 break;
3877 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3878 {
3879 double_conv_t c;
3880 c.i = expand_fp_imm (8, opnd->imm.value);
3881 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.d));
3882 }
3883 break;
3884 default:
3885 snprintf (buf, size, "<invalid>");
3886 break;
3887 }
3888 break;
3889
3890 case AARCH64_OPND_CCMP_IMM:
3891 case AARCH64_OPND_NZCV:
3892 case AARCH64_OPND_EXCEPTION:
3893 case AARCH64_OPND_UIMM4:
3894 case AARCH64_OPND_UIMM4_ADDG:
3895 case AARCH64_OPND_UIMM7:
3896 case AARCH64_OPND_UIMM10:
3897 if (optional_operand_p (opcode, idx)
3898 && (opnd->imm.value ==
3899 (int64_t) get_optional_operand_default_value (opcode)))
3900 /* Omit the operand, e.g. DCPS1. */
3901 break;
3902 snprintf (buf, size, "%s",
3903 style_imm (styler, "#0x%x", (unsigned int) opnd->imm.value));
3904 break;
3905
3906 case AARCH64_OPND_COND:
3907 case AARCH64_OPND_COND1:
3908 snprintf (buf, size, "%s",
3909 style_sub_mnem (styler, opnd->cond->names[0]));
3910 num_conds = ARRAY_SIZE (opnd->cond->names);
3911 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3912 {
3913 size_t len = comment != NULL ? strlen (comment) : 0;
3914 if (i == 1)
3915 snprintf (comment + len, comment_size - len, "%s = %s",
3916 opnd->cond->names[0], opnd->cond->names[i]);
3917 else
3918 snprintf (comment + len, comment_size - len, ", %s",
3919 opnd->cond->names[i]);
3920 }
3921 break;
3922
3923 case AARCH64_OPND_ADDR_ADRP:
3924 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3925 + opnd->imm.value;
3926 if (pcrel_p)
3927 *pcrel_p = 1;
3928 if (address)
3929 *address = addr;
3930 /* This is not necessary during the disassembling, as print_address_func
3931 in the disassemble_info will take care of the printing. But some
3932 other callers may be still interested in getting the string in *STR,
3933 so here we do snprintf regardless. */
3934 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64 , addr));
3935 break;
3936
3937 case AARCH64_OPND_ADDR_PCREL14:
3938 case AARCH64_OPND_ADDR_PCREL19:
3939 case AARCH64_OPND_ADDR_PCREL21:
3940 case AARCH64_OPND_ADDR_PCREL26:
3941 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3942 if (pcrel_p)
3943 *pcrel_p = 1;
3944 if (address)
3945 *address = addr;
3946 /* This is not necessary during the disassembling, as print_address_func
3947 in the disassemble_info will take care of the printing. But some
3948 other callers may be still interested in getting the string in *STR,
3949 so here we do snprintf regardless. */
3950 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64, addr));
3951 break;
3952
3953 case AARCH64_OPND_ADDR_SIMPLE:
3954 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3955 case AARCH64_OPND_SIMD_ADDR_POST:
3956 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3957 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3958 {
3959 if (opnd->addr.offset.is_reg)
3960 snprintf (buf, size, "[%s], %s",
3961 style_reg (styler, name),
3962 style_reg (styler, "x%d", opnd->addr.offset.regno));
3963 else
3964 snprintf (buf, size, "[%s], %s",
3965 style_reg (styler, name),
3966 style_imm (styler, "#%d", opnd->addr.offset.imm));
3967 }
3968 else
3969 snprintf (buf, size, "[%s]", style_reg (styler, name));
3970 break;
3971
3972 case AARCH64_OPND_ADDR_REGOFF:
3973 case AARCH64_OPND_SVE_ADDR_R:
3974 case AARCH64_OPND_SVE_ADDR_RR:
3975 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3976 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3977 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3978 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
3979 case AARCH64_OPND_SVE_ADDR_RX:
3980 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3981 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3982 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3983 print_register_offset_address
3984 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3985 get_offset_int_reg_name (opnd), styler);
3986 break;
3987
3988 case AARCH64_OPND_SVE_ADDR_ZX:
3989 print_register_offset_address
3990 (buf, size, opnd,
3991 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3992 get_64bit_int_reg_name (opnd->addr.offset.regno, 0), styler);
3993 break;
3994
3995 case AARCH64_OPND_SVE_ADDR_RZ:
3996 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3997 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3998 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3999 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
4000 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
4001 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
4002 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
4003 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
4004 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
4005 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
4006 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
4007 print_register_offset_address
4008 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4009 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
4010 styler);
4011 break;
4012
4013 case AARCH64_OPND_ADDR_SIMM7:
4014 case AARCH64_OPND_ADDR_SIMM9:
4015 case AARCH64_OPND_ADDR_SIMM9_2:
4016 case AARCH64_OPND_ADDR_SIMM10:
4017 case AARCH64_OPND_ADDR_SIMM11:
4018 case AARCH64_OPND_ADDR_SIMM13:
4019 case AARCH64_OPND_ADDR_OFFSET:
4020 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
4021 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
4022 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
4023 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
4024 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
4025 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
4026 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
4027 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
4028 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
4029 case AARCH64_OPND_SVE_ADDR_RI_U6:
4030 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
4031 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
4032 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
4033 print_immediate_offset_address
4034 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4035 styler);
4036 break;
4037
4038 case AARCH64_OPND_SVE_ADDR_ZI_U5:
4039 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
4040 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
4041 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
4042 print_immediate_offset_address
4043 (buf, size, opnd,
4044 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4045 styler);
4046 break;
4047
4048 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
4049 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
4050 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
4051 print_register_offset_address
4052 (buf, size, opnd,
4053 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4054 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
4055 styler);
4056 break;
4057
4058 case AARCH64_OPND_ADDR_UIMM12:
4059 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
4060 if (opnd->addr.offset.imm)
4061 snprintf (buf, size, "[%s, %s]",
4062 style_reg (styler, name),
4063 style_imm (styler, "#%d", opnd->addr.offset.imm));
4064 else
4065 snprintf (buf, size, "[%s]", style_reg (styler, name));
4066 break;
4067
4068 case AARCH64_OPND_SYSREG:
4069 for (i = 0; aarch64_sys_regs[i].name; ++i)
4070 {
4071 const aarch64_sys_reg *sr = aarch64_sys_regs + i;
4072
4073 bool exact_match
4074 = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
4075 || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
4076 && AARCH64_CPU_HAS_FEATURE (features, sr->features);
4077
4078 /* Try and find an exact match, But if that fails, return the first
4079 partial match that was found. */
4080 if (aarch64_sys_regs[i].value == opnd->sysreg.value
4081 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
4082 && (name == NULL || exact_match))
4083 {
4084 name = aarch64_sys_regs[i].name;
4085 if (exact_match)
4086 {
4087 if (notes)
4088 *notes = NULL;
4089 break;
4090 }
4091
4092 /* If we didn't match exactly, that means the presense of a flag
4093 indicates what we didn't want for this instruction. e.g. If
4094 F_REG_READ is there, that means we were looking for a write
4095 register. See aarch64_ext_sysreg. */
4096 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
4097 *notes = _("reading from a write-only register");
4098 else if (aarch64_sys_regs[i].flags & F_REG_READ)
4099 *notes = _("writing to a read-only register");
4100 }
4101 }
4102
4103 if (name)
4104 snprintf (buf, size, "%s", style_reg (styler, name));
4105 else
4106 {
4107 /* Implementation defined system register. */
4108 unsigned int value = opnd->sysreg.value;
4109 snprintf (buf, size, "%s",
4110 style_reg (styler, "s%u_%u_c%u_c%u_%u",
4111 (value >> 14) & 0x3, (value >> 11) & 0x7,
4112 (value >> 7) & 0xf, (value >> 3) & 0xf,
4113 value & 0x7));
4114 }
4115 break;
4116
4117 case AARCH64_OPND_PSTATEFIELD:
4118 for (i = 0; aarch64_pstatefields[i].name; ++i)
4119 if (aarch64_pstatefields[i].value == opnd->pstatefield)
4120 {
4121 /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
4122 SVCRZA and SVCRSMZA. */
4123 uint32_t flags = aarch64_pstatefields[i].flags;
4124 if (flags & F_REG_IN_CRM
4125 && (PSTATE_DECODE_CRM (opnd->sysreg.flags)
4126 != PSTATE_DECODE_CRM (flags)))
4127 continue;
4128 break;
4129 }
4130 assert (aarch64_pstatefields[i].name);
4131 snprintf (buf, size, "%s",
4132 style_reg (styler, aarch64_pstatefields[i].name));
4133 break;
4134
4135 case AARCH64_OPND_SYSREG_AT:
4136 case AARCH64_OPND_SYSREG_DC:
4137 case AARCH64_OPND_SYSREG_IC:
4138 case AARCH64_OPND_SYSREG_TLBI:
4139 case AARCH64_OPND_SYSREG_SR:
4140 snprintf (buf, size, "%s", style_reg (styler, opnd->sysins_op->name));
4141 break;
4142
4143 case AARCH64_OPND_BARRIER:
4144 case AARCH64_OPND_BARRIER_DSB_NXS:
4145 {
4146 if (opnd->barrier->name[0] == '#')
4147 snprintf (buf, size, "%s", style_imm (styler, opnd->barrier->name));
4148 else
4149 snprintf (buf, size, "%s",
4150 style_sub_mnem (styler, opnd->barrier->name));
4151 }
4152 break;
4153
4154 case AARCH64_OPND_BARRIER_ISB:
4155 /* Operand can be omitted, e.g. in DCPS1. */
4156 if (! optional_operand_p (opcode, idx)
4157 || (opnd->barrier->value
4158 != get_optional_operand_default_value (opcode)))
4159 snprintf (buf, size, "%s",
4160 style_imm (styler, "#0x%x", opnd->barrier->value));
4161 break;
4162
4163 case AARCH64_OPND_PRFOP:
4164 if (opnd->prfop->name != NULL)
4165 snprintf (buf, size, "%s", style_sub_mnem (styler, opnd->prfop->name));
4166 else
4167 snprintf (buf, size, "%s", style_imm (styler, "#0x%02x",
4168 opnd->prfop->value));
4169 break;
4170
4171 case AARCH64_OPND_BARRIER_PSB:
4172 snprintf (buf, size, "%s", style_sub_mnem (styler, "csync"));
4173 break;
4174
4175 case AARCH64_OPND_BTI_TARGET:
4176 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
4177 snprintf (buf, size, "%s",
4178 style_sub_mnem (styler, opnd->hint_option->name));
4179 break;
4180
4181 case AARCH64_OPND_MOPS_ADDR_Rd:
4182 case AARCH64_OPND_MOPS_ADDR_Rs:
4183 snprintf (buf, size, "[%s]!",
4184 style_reg (styler,
4185 get_int_reg_name (opnd->reg.regno,
4186 AARCH64_OPND_QLF_X, 0)));
4187 break;
4188
4189 case AARCH64_OPND_MOPS_WB_Rn:
4190 snprintf (buf, size, "%s!",
4191 style_reg (styler, get_int_reg_name (opnd->reg.regno,
4192 AARCH64_OPND_QLF_X, 0)));
4193 break;
4194
4195 default:
4196 snprintf (buf, size, "<invalid>");
4197 break;
4198 }
4199 }
4200 \f
4201 #define CPENC(op0,op1,crn,crm,op2) \
4202 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
4203 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
4204 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
4205 /* for 3.9.10 System Instructions */
4206 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
4207
4208 #define C0 0
4209 #define C1 1
4210 #define C2 2
4211 #define C3 3
4212 #define C4 4
4213 #define C5 5
4214 #define C6 6
4215 #define C7 7
4216 #define C8 8
4217 #define C9 9
4218 #define C10 10
4219 #define C11 11
4220 #define C12 12
4221 #define C13 13
4222 #define C14 14
4223 #define C15 15
4224
4225 #define SYSREG(name, encoding, flags, features) \
4226 { name, encoding, flags, features }
4227
4228 #define SR_CORE(n,e,f) SYSREG (n,e,f,0)
4229
4230 #define SR_FEAT(n,e,f,feat) \
4231 SYSREG ((n), (e), (f) | F_ARCHEXT, AARCH64_FEATURE_##feat)
4232
4233 #define SR_FEAT2(n,e,f,fe1,fe2) \
4234 SYSREG ((n), (e), (f) | F_ARCHEXT, \
4235 AARCH64_FEATURE_##fe1 | AARCH64_FEATURE_##fe2)
4236
4237 #define SR_V8_1_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_1)
4238 #define SR_V8_4_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_4)
4239
4240 #define SR_V8_A(n,e,f) SR_FEAT (n,e,f,V8_A)
4241 #define SR_V8_R(n,e,f) SR_FEAT (n,e,f,V8_R)
4242 #define SR_V8_1(n,e,f) SR_FEAT (n,e,f,V8_1)
4243 #define SR_V8_2(n,e,f) SR_FEAT (n,e,f,V8_2)
4244 #define SR_V8_3(n,e,f) SR_FEAT (n,e,f,V8_3)
4245 #define SR_V8_4(n,e,f) SR_FEAT (n,e,f,V8_4)
4246 #define SR_V8_6(n,e,f) SR_FEAT (n,e,f,V8_6)
4247 #define SR_V8_7(n,e,f) SR_FEAT (n,e,f,V8_7)
4248 #define SR_V8_8(n,e,f) SR_FEAT (n,e,f,V8_8)
4249 /* Has no separate libopcodes feature flag, but separated out for clarity. */
4250 #define SR_GIC(n,e,f) SR_CORE (n,e,f)
4251 /* Has no separate libopcodes feature flag, but separated out for clarity. */
4252 #define SR_AMU(n,e,f) SR_FEAT (n,e,f,V8_4)
4253 #define SR_LOR(n,e,f) SR_FEAT (n,e,f,LOR)
4254 #define SR_PAN(n,e,f) SR_FEAT (n,e,f,PAN)
4255 #define SR_RAS(n,e,f) SR_FEAT (n,e,f,RAS)
4256 #define SR_RNG(n,e,f) SR_FEAT (n,e,f,RNG)
4257 #define SR_SME(n,e,f) SR_FEAT (n,e,f,SME)
4258 #define SR_SSBS(n,e,f) SR_FEAT (n,e,f,SSBS)
4259 #define SR_SVE(n,e,f) SR_FEAT (n,e,f,SVE)
4260 #define SR_ID_PFR2(n,e,f) SR_FEAT (n,e,f,ID_PFR2)
4261 #define SR_PROFILE(n,e,f) SR_FEAT (n,e,f,PROFILE)
4262 #define SR_MEMTAG(n,e,f) SR_FEAT (n,e,f,MEMTAG)
4263 #define SR_SCXTNUM(n,e,f) SR_FEAT (n,e,f,SCXTNUM)
4264
4265 #define SR_EXPAND_ELx(f,x) \
4266 f (x, 1), \
4267 f (x, 2), \
4268 f (x, 3), \
4269 f (x, 4), \
4270 f (x, 5), \
4271 f (x, 6), \
4272 f (x, 7), \
4273 f (x, 8), \
4274 f (x, 9), \
4275 f (x, 10), \
4276 f (x, 11), \
4277 f (x, 12), \
4278 f (x, 13), \
4279 f (x, 14), \
4280 f (x, 15),
4281
4282 #define SR_EXPAND_EL12(f) \
4283 SR_EXPAND_ELx (f,1) \
4284 SR_EXPAND_ELx (f,2)
4285
4286 /* TODO there is one more issues need to be resolved
4287 1. handle cpu-implementation-defined system registers.
4288
4289 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
4290 respectively. If neither of these are set then the register is read-write. */
4291 const aarch64_sys_reg aarch64_sys_regs [] =
4292 {
4293 SR_CORE ("spsr_el1", CPEN_ (0,C0,0), 0), /* = spsr_svc. */
4294 SR_V8_1 ("spsr_el12", CPEN_ (5,C0,0), 0),
4295 SR_CORE ("elr_el1", CPEN_ (0,C0,1), 0),
4296 SR_V8_1 ("elr_el12", CPEN_ (5,C0,1), 0),
4297 SR_CORE ("sp_el0", CPEN_ (0,C1,0), 0),
4298 SR_CORE ("spsel", CPEN_ (0,C2,0), 0),
4299 SR_CORE ("daif", CPEN_ (3,C2,1), 0),
4300 SR_CORE ("currentel", CPEN_ (0,C2,2), F_REG_READ),
4301 SR_PAN ("pan", CPEN_ (0,C2,3), 0),
4302 SR_V8_2 ("uao", CPEN_ (0,C2,4), 0),
4303 SR_CORE ("nzcv", CPEN_ (3,C2,0), 0),
4304 SR_SSBS ("ssbs", CPEN_ (3,C2,6), 0),
4305 SR_CORE ("fpcr", CPEN_ (3,C4,0), 0),
4306 SR_CORE ("fpsr", CPEN_ (3,C4,1), 0),
4307 SR_CORE ("dspsr_el0", CPEN_ (3,C5,0), 0),
4308 SR_CORE ("dlr_el0", CPEN_ (3,C5,1), 0),
4309 SR_CORE ("spsr_el2", CPEN_ (4,C0,0), 0), /* = spsr_hyp. */
4310 SR_CORE ("elr_el2", CPEN_ (4,C0,1), 0),
4311 SR_CORE ("sp_el1", CPEN_ (4,C1,0), 0),
4312 SR_CORE ("spsr_irq", CPEN_ (4,C3,0), 0),
4313 SR_CORE ("spsr_abt", CPEN_ (4,C3,1), 0),
4314 SR_CORE ("spsr_und", CPEN_ (4,C3,2), 0),
4315 SR_CORE ("spsr_fiq", CPEN_ (4,C3,3), 0),
4316 SR_CORE ("spsr_el3", CPEN_ (6,C0,0), 0),
4317 SR_CORE ("elr_el3", CPEN_ (6,C0,1), 0),
4318 SR_CORE ("sp_el2", CPEN_ (6,C1,0), 0),
4319 SR_CORE ("spsr_svc", CPEN_ (0,C0,0), F_DEPRECATED), /* = spsr_el1. */
4320 SR_CORE ("spsr_hyp", CPEN_ (4,C0,0), F_DEPRECATED), /* = spsr_el2. */
4321 SR_CORE ("midr_el1", CPENC (3,0,C0,C0,0), F_REG_READ),
4322 SR_CORE ("ctr_el0", CPENC (3,3,C0,C0,1), F_REG_READ),
4323 SR_CORE ("mpidr_el1", CPENC (3,0,C0,C0,5), F_REG_READ),
4324 SR_CORE ("revidr_el1", CPENC (3,0,C0,C0,6), F_REG_READ),
4325 SR_CORE ("aidr_el1", CPENC (3,1,C0,C0,7), F_REG_READ),
4326 SR_CORE ("dczid_el0", CPENC (3,3,C0,C0,7), F_REG_READ),
4327 SR_CORE ("id_dfr0_el1", CPENC (3,0,C0,C1,2), F_REG_READ),
4328 SR_CORE ("id_dfr1_el1", CPENC (3,0,C0,C3,5), F_REG_READ),
4329 SR_CORE ("id_pfr0_el1", CPENC (3,0,C0,C1,0), F_REG_READ),
4330 SR_CORE ("id_pfr1_el1", CPENC (3,0,C0,C1,1), F_REG_READ),
4331 SR_ID_PFR2 ("id_pfr2_el1", CPENC (3,0,C0,C3,4), F_REG_READ),
4332 SR_CORE ("id_afr0_el1", CPENC (3,0,C0,C1,3), F_REG_READ),
4333 SR_CORE ("id_mmfr0_el1", CPENC (3,0,C0,C1,4), F_REG_READ),
4334 SR_CORE ("id_mmfr1_el1", CPENC (3,0,C0,C1,5), F_REG_READ),
4335 SR_CORE ("id_mmfr2_el1", CPENC (3,0,C0,C1,6), F_REG_READ),
4336 SR_CORE ("id_mmfr3_el1", CPENC (3,0,C0,C1,7), F_REG_READ),
4337 SR_CORE ("id_mmfr4_el1", CPENC (3,0,C0,C2,6), F_REG_READ),
4338 SR_CORE ("id_mmfr5_el1", CPENC (3,0,C0,C3,6), F_REG_READ),
4339 SR_CORE ("id_isar0_el1", CPENC (3,0,C0,C2,0), F_REG_READ),
4340 SR_CORE ("id_isar1_el1", CPENC (3,0,C0,C2,1), F_REG_READ),
4341 SR_CORE ("id_isar2_el1", CPENC (3,0,C0,C2,2), F_REG_READ),
4342 SR_CORE ("id_isar3_el1", CPENC (3,0,C0,C2,3), F_REG_READ),
4343 SR_CORE ("id_isar4_el1", CPENC (3,0,C0,C2,4), F_REG_READ),
4344 SR_CORE ("id_isar5_el1", CPENC (3,0,C0,C2,5), F_REG_READ),
4345 SR_CORE ("id_isar6_el1", CPENC (3,0,C0,C2,7), F_REG_READ),
4346 SR_CORE ("mvfr0_el1", CPENC (3,0,C0,C3,0), F_REG_READ),
4347 SR_CORE ("mvfr1_el1", CPENC (3,0,C0,C3,1), F_REG_READ),
4348 SR_CORE ("mvfr2_el1", CPENC (3,0,C0,C3,2), F_REG_READ),
4349 SR_CORE ("ccsidr_el1", CPENC (3,1,C0,C0,0), F_REG_READ),
4350 SR_V8_3 ("ccsidr2_el1", CPENC (3,1,C0,C0,2), F_REG_READ),
4351 SR_CORE ("id_aa64pfr0_el1", CPENC (3,0,C0,C4,0), F_REG_READ),
4352 SR_CORE ("id_aa64pfr1_el1", CPENC (3,0,C0,C4,1), F_REG_READ),
4353 SR_CORE ("id_aa64dfr0_el1", CPENC (3,0,C0,C5,0), F_REG_READ),
4354 SR_CORE ("id_aa64dfr1_el1", CPENC (3,0,C0,C5,1), F_REG_READ),
4355 SR_CORE ("id_aa64isar0_el1", CPENC (3,0,C0,C6,0), F_REG_READ),
4356 SR_CORE ("id_aa64isar1_el1", CPENC (3,0,C0,C6,1), F_REG_READ),
4357 SR_CORE ("id_aa64isar2_el1", CPENC (3,0,C0,C6,2), F_REG_READ),
4358 SR_CORE ("id_aa64mmfr0_el1", CPENC (3,0,C0,C7,0), F_REG_READ),
4359 SR_CORE ("id_aa64mmfr1_el1", CPENC (3,0,C0,C7,1), F_REG_READ),
4360 SR_CORE ("id_aa64mmfr2_el1", CPENC (3,0,C0,C7,2), F_REG_READ),
4361 SR_CORE ("id_aa64afr0_el1", CPENC (3,0,C0,C5,4), F_REG_READ),
4362 SR_CORE ("id_aa64afr1_el1", CPENC (3,0,C0,C5,5), F_REG_READ),
4363 SR_SVE ("id_aa64zfr0_el1", CPENC (3,0,C0,C4,4), F_REG_READ),
4364 SR_CORE ("clidr_el1", CPENC (3,1,C0,C0,1), F_REG_READ),
4365 SR_CORE ("csselr_el1", CPENC (3,2,C0,C0,0), 0),
4366 SR_CORE ("vpidr_el2", CPENC (3,4,C0,C0,0), 0),
4367 SR_CORE ("vmpidr_el2", CPENC (3,4,C0,C0,5), 0),
4368 SR_CORE ("sctlr_el1", CPENC (3,0,C1,C0,0), 0),
4369 SR_CORE ("sctlr_el2", CPENC (3,4,C1,C0,0), 0),
4370 SR_CORE ("sctlr_el3", CPENC (3,6,C1,C0,0), 0),
4371 SR_V8_1 ("sctlr_el12", CPENC (3,5,C1,C0,0), 0),
4372 SR_CORE ("actlr_el1", CPENC (3,0,C1,C0,1), 0),
4373 SR_CORE ("actlr_el2", CPENC (3,4,C1,C0,1), 0),
4374 SR_CORE ("actlr_el3", CPENC (3,6,C1,C0,1), 0),
4375 SR_CORE ("cpacr_el1", CPENC (3,0,C1,C0,2), 0),
4376 SR_V8_1 ("cpacr_el12", CPENC (3,5,C1,C0,2), 0),
4377 SR_CORE ("cptr_el2", CPENC (3,4,C1,C1,2), 0),
4378 SR_CORE ("cptr_el3", CPENC (3,6,C1,C1,2), 0),
4379 SR_CORE ("scr_el3", CPENC (3,6,C1,C1,0), 0),
4380 SR_CORE ("hcr_el2", CPENC (3,4,C1,C1,0), 0),
4381 SR_CORE ("mdcr_el2", CPENC (3,4,C1,C1,1), 0),
4382 SR_CORE ("mdcr_el3", CPENC (3,6,C1,C3,1), 0),
4383 SR_CORE ("hstr_el2", CPENC (3,4,C1,C1,3), 0),
4384 SR_CORE ("hacr_el2", CPENC (3,4,C1,C1,7), 0),
4385 SR_SVE ("zcr_el1", CPENC (3,0,C1,C2,0), 0),
4386 SR_SVE ("zcr_el12", CPENC (3,5,C1,C2,0), 0),
4387 SR_SVE ("zcr_el2", CPENC (3,4,C1,C2,0), 0),
4388 SR_SVE ("zcr_el3", CPENC (3,6,C1,C2,0), 0),
4389 SR_CORE ("ttbr0_el1", CPENC (3,0,C2,C0,0), 0),
4390 SR_CORE ("ttbr1_el1", CPENC (3,0,C2,C0,1), 0),
4391 SR_V8_A ("ttbr0_el2", CPENC (3,4,C2,C0,0), 0),
4392 SR_V8_1_A ("ttbr1_el2", CPENC (3,4,C2,C0,1), 0),
4393 SR_CORE ("ttbr0_el3", CPENC (3,6,C2,C0,0), 0),
4394 SR_V8_1 ("ttbr0_el12", CPENC (3,5,C2,C0,0), 0),
4395 SR_V8_1 ("ttbr1_el12", CPENC (3,5,C2,C0,1), 0),
4396 SR_V8_A ("vttbr_el2", CPENC (3,4,C2,C1,0), 0),
4397 SR_CORE ("tcr_el1", CPENC (3,0,C2,C0,2), 0),
4398 SR_CORE ("tcr_el2", CPENC (3,4,C2,C0,2), 0),
4399 SR_CORE ("tcr_el3", CPENC (3,6,C2,C0,2), 0),
4400 SR_V8_1 ("tcr_el12", CPENC (3,5,C2,C0,2), 0),
4401 SR_CORE ("vtcr_el2", CPENC (3,4,C2,C1,2), 0),
4402 SR_V8_3 ("apiakeylo_el1", CPENC (3,0,C2,C1,0), 0),
4403 SR_V8_3 ("apiakeyhi_el1", CPENC (3,0,C2,C1,1), 0),
4404 SR_V8_3 ("apibkeylo_el1", CPENC (3,0,C2,C1,2), 0),
4405 SR_V8_3 ("apibkeyhi_el1", CPENC (3,0,C2,C1,3), 0),
4406 SR_V8_3 ("apdakeylo_el1", CPENC (3,0,C2,C2,0), 0),
4407 SR_V8_3 ("apdakeyhi_el1", CPENC (3,0,C2,C2,1), 0),
4408 SR_V8_3 ("apdbkeylo_el1", CPENC (3,0,C2,C2,2), 0),
4409 SR_V8_3 ("apdbkeyhi_el1", CPENC (3,0,C2,C2,3), 0),
4410 SR_V8_3 ("apgakeylo_el1", CPENC (3,0,C2,C3,0), 0),
4411 SR_V8_3 ("apgakeyhi_el1", CPENC (3,0,C2,C3,1), 0),
4412 SR_CORE ("afsr0_el1", CPENC (3,0,C5,C1,0), 0),
4413 SR_CORE ("afsr1_el1", CPENC (3,0,C5,C1,1), 0),
4414 SR_CORE ("afsr0_el2", CPENC (3,4,C5,C1,0), 0),
4415 SR_CORE ("afsr1_el2", CPENC (3,4,C5,C1,1), 0),
4416 SR_CORE ("afsr0_el3", CPENC (3,6,C5,C1,0), 0),
4417 SR_V8_1 ("afsr0_el12", CPENC (3,5,C5,C1,0), 0),
4418 SR_CORE ("afsr1_el3", CPENC (3,6,C5,C1,1), 0),
4419 SR_V8_1 ("afsr1_el12", CPENC (3,5,C5,C1,1), 0),
4420 SR_CORE ("esr_el1", CPENC (3,0,C5,C2,0), 0),
4421 SR_CORE ("esr_el2", CPENC (3,4,C5,C2,0), 0),
4422 SR_CORE ("esr_el3", CPENC (3,6,C5,C2,0), 0),
4423 SR_V8_1 ("esr_el12", CPENC (3,5,C5,C2,0), 0),
4424 SR_RAS ("vsesr_el2", CPENC (3,4,C5,C2,3), 0),
4425 SR_CORE ("fpexc32_el2", CPENC (3,4,C5,C3,0), 0),
4426 SR_RAS ("erridr_el1", CPENC (3,0,C5,C3,0), F_REG_READ),
4427 SR_RAS ("errselr_el1", CPENC (3,0,C5,C3,1), 0),
4428 SR_RAS ("erxfr_el1", CPENC (3,0,C5,C4,0), F_REG_READ),
4429 SR_RAS ("erxctlr_el1", CPENC (3,0,C5,C4,1), 0),
4430 SR_RAS ("erxstatus_el1", CPENC (3,0,C5,C4,2), 0),
4431 SR_RAS ("erxaddr_el1", CPENC (3,0,C5,C4,3), 0),
4432 SR_RAS ("erxmisc0_el1", CPENC (3,0,C5,C5,0), 0),
4433 SR_RAS ("erxmisc1_el1", CPENC (3,0,C5,C5,1), 0),
4434 SR_RAS ("erxmisc2_el1", CPENC (3,0,C5,C5,2), 0),
4435 SR_RAS ("erxmisc3_el1", CPENC (3,0,C5,C5,3), 0),
4436 SR_RAS ("erxpfgcdn_el1", CPENC (3,0,C5,C4,6), 0),
4437 SR_RAS ("erxpfgctl_el1", CPENC (3,0,C5,C4,5), 0),
4438 SR_RAS ("erxpfgf_el1", CPENC (3,0,C5,C4,4), F_REG_READ),
4439 SR_CORE ("far_el1", CPENC (3,0,C6,C0,0), 0),
4440 SR_CORE ("far_el2", CPENC (3,4,C6,C0,0), 0),
4441 SR_CORE ("far_el3", CPENC (3,6,C6,C0,0), 0),
4442 SR_V8_1 ("far_el12", CPENC (3,5,C6,C0,0), 0),
4443 SR_CORE ("hpfar_el2", CPENC (3,4,C6,C0,4), 0),
4444 SR_CORE ("par_el1", CPENC (3,0,C7,C4,0), 0),
4445 SR_CORE ("mair_el1", CPENC (3,0,C10,C2,0), 0),
4446 SR_CORE ("mair_el2", CPENC (3,4,C10,C2,0), 0),
4447 SR_CORE ("mair_el3", CPENC (3,6,C10,C2,0), 0),
4448 SR_V8_1 ("mair_el12", CPENC (3,5,C10,C2,0), 0),
4449 SR_CORE ("amair_el1", CPENC (3,0,C10,C3,0), 0),
4450 SR_CORE ("amair_el2", CPENC (3,4,C10,C3,0), 0),
4451 SR_CORE ("amair_el3", CPENC (3,6,C10,C3,0), 0),
4452 SR_V8_1 ("amair_el12", CPENC (3,5,C10,C3,0), 0),
4453 SR_CORE ("vbar_el1", CPENC (3,0,C12,C0,0), 0),
4454 SR_CORE ("vbar_el2", CPENC (3,4,C12,C0,0), 0),
4455 SR_CORE ("vbar_el3", CPENC (3,6,C12,C0,0), 0),
4456 SR_V8_1 ("vbar_el12", CPENC (3,5,C12,C0,0), 0),
4457 SR_CORE ("rvbar_el1", CPENC (3,0,C12,C0,1), F_REG_READ),
4458 SR_CORE ("rvbar_el2", CPENC (3,4,C12,C0,1), F_REG_READ),
4459 SR_CORE ("rvbar_el3", CPENC (3,6,C12,C0,1), F_REG_READ),
4460 SR_CORE ("rmr_el1", CPENC (3,0,C12,C0,2), 0),
4461 SR_CORE ("rmr_el2", CPENC (3,4,C12,C0,2), 0),
4462 SR_CORE ("rmr_el3", CPENC (3,6,C12,C0,2), 0),
4463 SR_CORE ("isr_el1", CPENC (3,0,C12,C1,0), F_REG_READ),
4464 SR_RAS ("disr_el1", CPENC (3,0,C12,C1,1), 0),
4465 SR_RAS ("vdisr_el2", CPENC (3,4,C12,C1,1), 0),
4466 SR_CORE ("contextidr_el1", CPENC (3,0,C13,C0,1), 0),
4467 SR_V8_1 ("contextidr_el2", CPENC (3,4,C13,C0,1), 0),
4468 SR_V8_1 ("contextidr_el12", CPENC (3,5,C13,C0,1), 0),
4469 SR_RNG ("rndr", CPENC (3,3,C2,C4,0), F_REG_READ),
4470 SR_RNG ("rndrrs", CPENC (3,3,C2,C4,1), F_REG_READ),
4471 SR_MEMTAG ("tco", CPENC (3,3,C4,C2,7), 0),
4472 SR_MEMTAG ("tfsre0_el1", CPENC (3,0,C5,C6,1), 0),
4473 SR_MEMTAG ("tfsr_el1", CPENC (3,0,C5,C6,0), 0),
4474 SR_MEMTAG ("tfsr_el2", CPENC (3,4,C5,C6,0), 0),
4475 SR_MEMTAG ("tfsr_el3", CPENC (3,6,C5,C6,0), 0),
4476 SR_MEMTAG ("tfsr_el12", CPENC (3,5,C5,C6,0), 0),
4477 SR_MEMTAG ("rgsr_el1", CPENC (3,0,C1,C0,5), 0),
4478 SR_MEMTAG ("gcr_el1", CPENC (3,0,C1,C0,6), 0),
4479 SR_MEMTAG ("gmid_el1", CPENC (3,1,C0,C0,4), F_REG_READ),
4480 SR_CORE ("tpidr_el0", CPENC (3,3,C13,C0,2), 0),
4481 SR_CORE ("tpidrro_el0", CPENC (3,3,C13,C0,3), 0),
4482 SR_CORE ("tpidr_el1", CPENC (3,0,C13,C0,4), 0),
4483 SR_CORE ("tpidr_el2", CPENC (3,4,C13,C0,2), 0),
4484 SR_CORE ("tpidr_el3", CPENC (3,6,C13,C0,2), 0),
4485 SR_SCXTNUM ("scxtnum_el0", CPENC (3,3,C13,C0,7), 0),
4486 SR_SCXTNUM ("scxtnum_el1", CPENC (3,0,C13,C0,7), 0),
4487 SR_SCXTNUM ("scxtnum_el2", CPENC (3,4,C13,C0,7), 0),
4488 SR_SCXTNUM ("scxtnum_el12", CPENC (3,5,C13,C0,7), 0),
4489 SR_SCXTNUM ("scxtnum_el3", CPENC (3,6,C13,C0,7), 0),
4490 SR_CORE ("teecr32_el1", CPENC (2,2,C0, C0,0), 0), /* See section 3.9.7.1. */
4491 SR_CORE ("cntfrq_el0", CPENC (3,3,C14,C0,0), 0),
4492 SR_CORE ("cntpct_el0", CPENC (3,3,C14,C0,1), F_REG_READ),
4493 SR_CORE ("cntvct_el0", CPENC (3,3,C14,C0,2), F_REG_READ),
4494 SR_CORE ("cntvoff_el2", CPENC (3,4,C14,C0,3), 0),
4495 SR_CORE ("cntkctl_el1", CPENC (3,0,C14,C1,0), 0),
4496 SR_V8_1 ("cntkctl_el12", CPENC (3,5,C14,C1,0), 0),
4497 SR_CORE ("cnthctl_el2", CPENC (3,4,C14,C1,0), 0),
4498 SR_CORE ("cntp_tval_el0", CPENC (3,3,C14,C2,0), 0),
4499 SR_V8_1 ("cntp_tval_el02", CPENC (3,5,C14,C2,0), 0),
4500 SR_CORE ("cntp_ctl_el0", CPENC (3,3,C14,C2,1), 0),
4501 SR_V8_1 ("cntp_ctl_el02", CPENC (3,5,C14,C2,1), 0),
4502 SR_CORE ("cntp_cval_el0", CPENC (3,3,C14,C2,2), 0),
4503 SR_V8_1 ("cntp_cval_el02", CPENC (3,5,C14,C2,2), 0),
4504 SR_CORE ("cntv_tval_el0", CPENC (3,3,C14,C3,0), 0),
4505 SR_V8_1 ("cntv_tval_el02", CPENC (3,5,C14,C3,0), 0),
4506 SR_CORE ("cntv_ctl_el0", CPENC (3,3,C14,C3,1), 0),
4507 SR_V8_1 ("cntv_ctl_el02", CPENC (3,5,C14,C3,1), 0),
4508 SR_CORE ("cntv_cval_el0", CPENC (3,3,C14,C3,2), 0),
4509 SR_V8_1 ("cntv_cval_el02", CPENC (3,5,C14,C3,2), 0),
4510 SR_CORE ("cnthp_tval_el2", CPENC (3,4,C14,C2,0), 0),
4511 SR_CORE ("cnthp_ctl_el2", CPENC (3,4,C14,C2,1), 0),
4512 SR_CORE ("cnthp_cval_el2", CPENC (3,4,C14,C2,2), 0),
4513 SR_CORE ("cntps_tval_el1", CPENC (3,7,C14,C2,0), 0),
4514 SR_CORE ("cntps_ctl_el1", CPENC (3,7,C14,C2,1), 0),
4515 SR_CORE ("cntps_cval_el1", CPENC (3,7,C14,C2,2), 0),
4516 SR_V8_1 ("cnthv_tval_el2", CPENC (3,4,C14,C3,0), 0),
4517 SR_V8_1 ("cnthv_ctl_el2", CPENC (3,4,C14,C3,1), 0),
4518 SR_V8_1 ("cnthv_cval_el2", CPENC (3,4,C14,C3,2), 0),
4519 SR_CORE ("dacr32_el2", CPENC (3,4,C3,C0,0), 0),
4520 SR_CORE ("ifsr32_el2", CPENC (3,4,C5,C0,1), 0),
4521 SR_CORE ("teehbr32_el1", CPENC (2,2,C1,C0,0), 0),
4522 SR_CORE ("sder32_el3", CPENC (3,6,C1,C1,1), 0),
4523 SR_CORE ("mdscr_el1", CPENC (2,0,C0,C2,2), 0),
4524 SR_CORE ("mdccsr_el0", CPENC (2,3,C0,C1,0), F_REG_READ),
4525 SR_CORE ("mdccint_el1", CPENC (2,0,C0,C2,0), 0),
4526 SR_CORE ("dbgdtr_el0", CPENC (2,3,C0,C4,0), 0),
4527 SR_CORE ("dbgdtrrx_el0", CPENC (2,3,C0,C5,0), F_REG_READ),
4528 SR_CORE ("dbgdtrtx_el0", CPENC (2,3,C0,C5,0), F_REG_WRITE),
4529 SR_CORE ("osdtrrx_el1", CPENC (2,0,C0,C0,2), 0),
4530 SR_CORE ("osdtrtx_el1", CPENC (2,0,C0,C3,2), 0),
4531 SR_CORE ("oseccr_el1", CPENC (2,0,C0,C6,2), 0),
4532 SR_CORE ("dbgvcr32_el2", CPENC (2,4,C0,C7,0), 0),
4533 SR_CORE ("dbgbvr0_el1", CPENC (2,0,C0,C0,4), 0),
4534 SR_CORE ("dbgbvr1_el1", CPENC (2,0,C0,C1,4), 0),
4535 SR_CORE ("dbgbvr2_el1", CPENC (2,0,C0,C2,4), 0),
4536 SR_CORE ("dbgbvr3_el1", CPENC (2,0,C0,C3,4), 0),
4537 SR_CORE ("dbgbvr4_el1", CPENC (2,0,C0,C4,4), 0),
4538 SR_CORE ("dbgbvr5_el1", CPENC (2,0,C0,C5,4), 0),
4539 SR_CORE ("dbgbvr6_el1", CPENC (2,0,C0,C6,4), 0),
4540 SR_CORE ("dbgbvr7_el1", CPENC (2,0,C0,C7,4), 0),
4541 SR_CORE ("dbgbvr8_el1", CPENC (2,0,C0,C8,4), 0),
4542 SR_CORE ("dbgbvr9_el1", CPENC (2,0,C0,C9,4), 0),
4543 SR_CORE ("dbgbvr10_el1", CPENC (2,0,C0,C10,4), 0),
4544 SR_CORE ("dbgbvr11_el1", CPENC (2,0,C0,C11,4), 0),
4545 SR_CORE ("dbgbvr12_el1", CPENC (2,0,C0,C12,4), 0),
4546 SR_CORE ("dbgbvr13_el1", CPENC (2,0,C0,C13,4), 0),
4547 SR_CORE ("dbgbvr14_el1", CPENC (2,0,C0,C14,4), 0),
4548 SR_CORE ("dbgbvr15_el1", CPENC (2,0,C0,C15,4), 0),
4549 SR_CORE ("dbgbcr0_el1", CPENC (2,0,C0,C0,5), 0),
4550 SR_CORE ("dbgbcr1_el1", CPENC (2,0,C0,C1,5), 0),
4551 SR_CORE ("dbgbcr2_el1", CPENC (2,0,C0,C2,5), 0),
4552 SR_CORE ("dbgbcr3_el1", CPENC (2,0,C0,C3,5), 0),
4553 SR_CORE ("dbgbcr4_el1", CPENC (2,0,C0,C4,5), 0),
4554 SR_CORE ("dbgbcr5_el1", CPENC (2,0,C0,C5,5), 0),
4555 SR_CORE ("dbgbcr6_el1", CPENC (2,0,C0,C6,5), 0),
4556 SR_CORE ("dbgbcr7_el1", CPENC (2,0,C0,C7,5), 0),
4557 SR_CORE ("dbgbcr8_el1", CPENC (2,0,C0,C8,5), 0),
4558 SR_CORE ("dbgbcr9_el1", CPENC (2,0,C0,C9,5), 0),
4559 SR_CORE ("dbgbcr10_el1", CPENC (2,0,C0,C10,5), 0),
4560 SR_CORE ("dbgbcr11_el1", CPENC (2,0,C0,C11,5), 0),
4561 SR_CORE ("dbgbcr12_el1", CPENC (2,0,C0,C12,5), 0),
4562 SR_CORE ("dbgbcr13_el1", CPENC (2,0,C0,C13,5), 0),
4563 SR_CORE ("dbgbcr14_el1", CPENC (2,0,C0,C14,5), 0),
4564 SR_CORE ("dbgbcr15_el1", CPENC (2,0,C0,C15,5), 0),
4565 SR_CORE ("dbgwvr0_el1", CPENC (2,0,C0,C0,6), 0),
4566 SR_CORE ("dbgwvr1_el1", CPENC (2,0,C0,C1,6), 0),
4567 SR_CORE ("dbgwvr2_el1", CPENC (2,0,C0,C2,6), 0),
4568 SR_CORE ("dbgwvr3_el1", CPENC (2,0,C0,C3,6), 0),
4569 SR_CORE ("dbgwvr4_el1", CPENC (2,0,C0,C4,6), 0),
4570 SR_CORE ("dbgwvr5_el1", CPENC (2,0,C0,C5,6), 0),
4571 SR_CORE ("dbgwvr6_el1", CPENC (2,0,C0,C6,6), 0),
4572 SR_CORE ("dbgwvr7_el1", CPENC (2,0,C0,C7,6), 0),
4573 SR_CORE ("dbgwvr8_el1", CPENC (2,0,C0,C8,6), 0),
4574 SR_CORE ("dbgwvr9_el1", CPENC (2,0,C0,C9,6), 0),
4575 SR_CORE ("dbgwvr10_el1", CPENC (2,0,C0,C10,6), 0),
4576 SR_CORE ("dbgwvr11_el1", CPENC (2,0,C0,C11,6), 0),
4577 SR_CORE ("dbgwvr12_el1", CPENC (2,0,C0,C12,6), 0),
4578 SR_CORE ("dbgwvr13_el1", CPENC (2,0,C0,C13,6), 0),
4579 SR_CORE ("dbgwvr14_el1", CPENC (2,0,C0,C14,6), 0),
4580 SR_CORE ("dbgwvr15_el1", CPENC (2,0,C0,C15,6), 0),
4581 SR_CORE ("dbgwcr0_el1", CPENC (2,0,C0,C0,7), 0),
4582 SR_CORE ("dbgwcr1_el1", CPENC (2,0,C0,C1,7), 0),
4583 SR_CORE ("dbgwcr2_el1", CPENC (2,0,C0,C2,7), 0),
4584 SR_CORE ("dbgwcr3_el1", CPENC (2,0,C0,C3,7), 0),
4585 SR_CORE ("dbgwcr4_el1", CPENC (2,0,C0,C4,7), 0),
4586 SR_CORE ("dbgwcr5_el1", CPENC (2,0,C0,C5,7), 0),
4587 SR_CORE ("dbgwcr6_el1", CPENC (2,0,C0,C6,7), 0),
4588 SR_CORE ("dbgwcr7_el1", CPENC (2,0,C0,C7,7), 0),
4589 SR_CORE ("dbgwcr8_el1", CPENC (2,0,C0,C8,7), 0),
4590 SR_CORE ("dbgwcr9_el1", CPENC (2,0,C0,C9,7), 0),
4591 SR_CORE ("dbgwcr10_el1", CPENC (2,0,C0,C10,7), 0),
4592 SR_CORE ("dbgwcr11_el1", CPENC (2,0,C0,C11,7), 0),
4593 SR_CORE ("dbgwcr12_el1", CPENC (2,0,C0,C12,7), 0),
4594 SR_CORE ("dbgwcr13_el1", CPENC (2,0,C0,C13,7), 0),
4595 SR_CORE ("dbgwcr14_el1", CPENC (2,0,C0,C14,7), 0),
4596 SR_CORE ("dbgwcr15_el1", CPENC (2,0,C0,C15,7), 0),
4597 SR_CORE ("mdrar_el1", CPENC (2,0,C1,C0,0), F_REG_READ),
4598 SR_CORE ("oslar_el1", CPENC (2,0,C1,C0,4), F_REG_WRITE),
4599 SR_CORE ("oslsr_el1", CPENC (2,0,C1,C1,4), F_REG_READ),
4600 SR_CORE ("osdlr_el1", CPENC (2,0,C1,C3,4), 0),
4601 SR_CORE ("dbgprcr_el1", CPENC (2,0,C1,C4,4), 0),
4602 SR_CORE ("dbgclaimset_el1", CPENC (2,0,C7,C8,6), 0),
4603 SR_CORE ("dbgclaimclr_el1", CPENC (2,0,C7,C9,6), 0),
4604 SR_CORE ("dbgauthstatus_el1", CPENC (2,0,C7,C14,6), F_REG_READ),
4605 SR_PROFILE ("pmblimitr_el1", CPENC (3,0,C9,C10,0), 0),
4606 SR_PROFILE ("pmbptr_el1", CPENC (3,0,C9,C10,1), 0),
4607 SR_PROFILE ("pmbsr_el1", CPENC (3,0,C9,C10,3), 0),
4608 SR_PROFILE ("pmbidr_el1", CPENC (3,0,C9,C10,7), F_REG_READ),
4609 SR_PROFILE ("pmscr_el1", CPENC (3,0,C9,C9,0), 0),
4610 SR_PROFILE ("pmsicr_el1", CPENC (3,0,C9,C9,2), 0),
4611 SR_PROFILE ("pmsirr_el1", CPENC (3,0,C9,C9,3), 0),
4612 SR_PROFILE ("pmsfcr_el1", CPENC (3,0,C9,C9,4), 0),
4613 SR_PROFILE ("pmsevfr_el1", CPENC (3,0,C9,C9,5), 0),
4614 SR_PROFILE ("pmslatfr_el1", CPENC (3,0,C9,C9,6), 0),
4615 SR_PROFILE ("pmsidr_el1", CPENC (3,0,C9,C9,7), F_REG_READ),
4616 SR_PROFILE ("pmscr_el2", CPENC (3,4,C9,C9,0), 0),
4617 SR_PROFILE ("pmscr_el12", CPENC (3,5,C9,C9,0), 0),
4618 SR_CORE ("pmcr_el0", CPENC (3,3,C9,C12,0), 0),
4619 SR_CORE ("pmcntenset_el0", CPENC (3,3,C9,C12,1), 0),
4620 SR_CORE ("pmcntenclr_el0", CPENC (3,3,C9,C12,2), 0),
4621 SR_CORE ("pmovsclr_el0", CPENC (3,3,C9,C12,3), 0),
4622 SR_CORE ("pmswinc_el0", CPENC (3,3,C9,C12,4), F_REG_WRITE),
4623 SR_CORE ("pmselr_el0", CPENC (3,3,C9,C12,5), 0),
4624 SR_CORE ("pmceid0_el0", CPENC (3,3,C9,C12,6), F_REG_READ),
4625 SR_CORE ("pmceid1_el0", CPENC (3,3,C9,C12,7), F_REG_READ),
4626 SR_CORE ("pmccntr_el0", CPENC (3,3,C9,C13,0), 0),
4627 SR_CORE ("pmxevtyper_el0", CPENC (3,3,C9,C13,1), 0),
4628 SR_CORE ("pmxevcntr_el0", CPENC (3,3,C9,C13,2), 0),
4629 SR_CORE ("pmuserenr_el0", CPENC (3,3,C9,C14,0), 0),
4630 SR_CORE ("pmintenset_el1", CPENC (3,0,C9,C14,1), 0),
4631 SR_CORE ("pmintenclr_el1", CPENC (3,0,C9,C14,2), 0),
4632 SR_CORE ("pmovsset_el0", CPENC (3,3,C9,C14,3), 0),
4633 SR_CORE ("pmevcntr0_el0", CPENC (3,3,C14,C8,0), 0),
4634 SR_CORE ("pmevcntr1_el0", CPENC (3,3,C14,C8,1), 0),
4635 SR_CORE ("pmevcntr2_el0", CPENC (3,3,C14,C8,2), 0),
4636 SR_CORE ("pmevcntr3_el0", CPENC (3,3,C14,C8,3), 0),
4637 SR_CORE ("pmevcntr4_el0", CPENC (3,3,C14,C8,4), 0),
4638 SR_CORE ("pmevcntr5_el0", CPENC (3,3,C14,C8,5), 0),
4639 SR_CORE ("pmevcntr6_el0", CPENC (3,3,C14,C8,6), 0),
4640 SR_CORE ("pmevcntr7_el0", CPENC (3,3,C14,C8,7), 0),
4641 SR_CORE ("pmevcntr8_el0", CPENC (3,3,C14,C9,0), 0),
4642 SR_CORE ("pmevcntr9_el0", CPENC (3,3,C14,C9,1), 0),
4643 SR_CORE ("pmevcntr10_el0", CPENC (3,3,C14,C9,2), 0),
4644 SR_CORE ("pmevcntr11_el0", CPENC (3,3,C14,C9,3), 0),
4645 SR_CORE ("pmevcntr12_el0", CPENC (3,3,C14,C9,4), 0),
4646 SR_CORE ("pmevcntr13_el0", CPENC (3,3,C14,C9,5), 0),
4647 SR_CORE ("pmevcntr14_el0", CPENC (3,3,C14,C9,6), 0),
4648 SR_CORE ("pmevcntr15_el0", CPENC (3,3,C14,C9,7), 0),
4649 SR_CORE ("pmevcntr16_el0", CPENC (3,3,C14,C10,0), 0),
4650 SR_CORE ("pmevcntr17_el0", CPENC (3,3,C14,C10,1), 0),
4651 SR_CORE ("pmevcntr18_el0", CPENC (3,3,C14,C10,2), 0),
4652 SR_CORE ("pmevcntr19_el0", CPENC (3,3,C14,C10,3), 0),
4653 SR_CORE ("pmevcntr20_el0", CPENC (3,3,C14,C10,4), 0),
4654 SR_CORE ("pmevcntr21_el0", CPENC (3,3,C14,C10,5), 0),
4655 SR_CORE ("pmevcntr22_el0", CPENC (3,3,C14,C10,6), 0),
4656 SR_CORE ("pmevcntr23_el0", CPENC (3,3,C14,C10,7), 0),
4657 SR_CORE ("pmevcntr24_el0", CPENC (3,3,C14,C11,0), 0),
4658 SR_CORE ("pmevcntr25_el0", CPENC (3,3,C14,C11,1), 0),
4659 SR_CORE ("pmevcntr26_el0", CPENC (3,3,C14,C11,2), 0),
4660 SR_CORE ("pmevcntr27_el0", CPENC (3,3,C14,C11,3), 0),
4661 SR_CORE ("pmevcntr28_el0", CPENC (3,3,C14,C11,4), 0),
4662 SR_CORE ("pmevcntr29_el0", CPENC (3,3,C14,C11,5), 0),
4663 SR_CORE ("pmevcntr30_el0", CPENC (3,3,C14,C11,6), 0),
4664 SR_CORE ("pmevtyper0_el0", CPENC (3,3,C14,C12,0), 0),
4665 SR_CORE ("pmevtyper1_el0", CPENC (3,3,C14,C12,1), 0),
4666 SR_CORE ("pmevtyper2_el0", CPENC (3,3,C14,C12,2), 0),
4667 SR_CORE ("pmevtyper3_el0", CPENC (3,3,C14,C12,3), 0),
4668 SR_CORE ("pmevtyper4_el0", CPENC (3,3,C14,C12,4), 0),
4669 SR_CORE ("pmevtyper5_el0", CPENC (3,3,C14,C12,5), 0),
4670 SR_CORE ("pmevtyper6_el0", CPENC (3,3,C14,C12,6), 0),
4671 SR_CORE ("pmevtyper7_el0", CPENC (3,3,C14,C12,7), 0),
4672 SR_CORE ("pmevtyper8_el0", CPENC (3,3,C14,C13,0), 0),
4673 SR_CORE ("pmevtyper9_el0", CPENC (3,3,C14,C13,1), 0),
4674 SR_CORE ("pmevtyper10_el0", CPENC (3,3,C14,C13,2), 0),
4675 SR_CORE ("pmevtyper11_el0", CPENC (3,3,C14,C13,3), 0),
4676 SR_CORE ("pmevtyper12_el0", CPENC (3,3,C14,C13,4), 0),
4677 SR_CORE ("pmevtyper13_el0", CPENC (3,3,C14,C13,5), 0),
4678 SR_CORE ("pmevtyper14_el0", CPENC (3,3,C14,C13,6), 0),
4679 SR_CORE ("pmevtyper15_el0", CPENC (3,3,C14,C13,7), 0),
4680 SR_CORE ("pmevtyper16_el0", CPENC (3,3,C14,C14,0), 0),
4681 SR_CORE ("pmevtyper17_el0", CPENC (3,3,C14,C14,1), 0),
4682 SR_CORE ("pmevtyper18_el0", CPENC (3,3,C14,C14,2), 0),
4683 SR_CORE ("pmevtyper19_el0", CPENC (3,3,C14,C14,3), 0),
4684 SR_CORE ("pmevtyper20_el0", CPENC (3,3,C14,C14,4), 0),
4685 SR_CORE ("pmevtyper21_el0", CPENC (3,3,C14,C14,5), 0),
4686 SR_CORE ("pmevtyper22_el0", CPENC (3,3,C14,C14,6), 0),
4687 SR_CORE ("pmevtyper23_el0", CPENC (3,3,C14,C14,7), 0),
4688 SR_CORE ("pmevtyper24_el0", CPENC (3,3,C14,C15,0), 0),
4689 SR_CORE ("pmevtyper25_el0", CPENC (3,3,C14,C15,1), 0),
4690 SR_CORE ("pmevtyper26_el0", CPENC (3,3,C14,C15,2), 0),
4691 SR_CORE ("pmevtyper27_el0", CPENC (3,3,C14,C15,3), 0),
4692 SR_CORE ("pmevtyper28_el0", CPENC (3,3,C14,C15,4), 0),
4693 SR_CORE ("pmevtyper29_el0", CPENC (3,3,C14,C15,5), 0),
4694 SR_CORE ("pmevtyper30_el0", CPENC (3,3,C14,C15,6), 0),
4695 SR_CORE ("pmccfiltr_el0", CPENC (3,3,C14,C15,7), 0),
4696
4697 SR_V8_4 ("dit", CPEN_ (3,C2,5), 0),
4698 SR_V8_4 ("trfcr_el1", CPENC (3,0,C1,C2,1), 0),
4699 SR_V8_4 ("pmmir_el1", CPENC (3,0,C9,C14,6), F_REG_READ),
4700 SR_V8_4 ("trfcr_el2", CPENC (3,4,C1,C2,1), 0),
4701 SR_V8_4 ("vstcr_el2", CPENC (3,4,C2,C6,2), 0),
4702 SR_V8_4_A ("vsttbr_el2", CPENC (3,4,C2,C6,0), 0),
4703 SR_V8_4 ("cnthvs_tval_el2", CPENC (3,4,C14,C4,0), 0),
4704 SR_V8_4 ("cnthvs_cval_el2", CPENC (3,4,C14,C4,2), 0),
4705 SR_V8_4 ("cnthvs_ctl_el2", CPENC (3,4,C14,C4,1), 0),
4706 SR_V8_4 ("cnthps_tval_el2", CPENC (3,4,C14,C5,0), 0),
4707 SR_V8_4 ("cnthps_cval_el2", CPENC (3,4,C14,C5,2), 0),
4708 SR_V8_4 ("cnthps_ctl_el2", CPENC (3,4,C14,C5,1), 0),
4709 SR_V8_4 ("sder32_el2", CPENC (3,4,C1,C3,1), 0),
4710 SR_V8_4 ("vncr_el2", CPENC (3,4,C2,C2,0), 0),
4711 SR_V8_4 ("trfcr_el12", CPENC (3,5,C1,C2,1), 0),
4712
4713 SR_CORE ("mpam0_el1", CPENC (3,0,C10,C5,1), 0),
4714 SR_CORE ("mpam1_el1", CPENC (3,0,C10,C5,0), 0),
4715 SR_CORE ("mpam1_el12", CPENC (3,5,C10,C5,0), 0),
4716 SR_CORE ("mpam2_el2", CPENC (3,4,C10,C5,0), 0),
4717 SR_CORE ("mpam3_el3", CPENC (3,6,C10,C5,0), 0),
4718 SR_CORE ("mpamhcr_el2", CPENC (3,4,C10,C4,0), 0),
4719 SR_CORE ("mpamidr_el1", CPENC (3,0,C10,C4,4), F_REG_READ),
4720 SR_CORE ("mpamvpm0_el2", CPENC (3,4,C10,C6,0), 0),
4721 SR_CORE ("mpamvpm1_el2", CPENC (3,4,C10,C6,1), 0),
4722 SR_CORE ("mpamvpm2_el2", CPENC (3,4,C10,C6,2), 0),
4723 SR_CORE ("mpamvpm3_el2", CPENC (3,4,C10,C6,3), 0),
4724 SR_CORE ("mpamvpm4_el2", CPENC (3,4,C10,C6,4), 0),
4725 SR_CORE ("mpamvpm5_el2", CPENC (3,4,C10,C6,5), 0),
4726 SR_CORE ("mpamvpm6_el2", CPENC (3,4,C10,C6,6), 0),
4727 SR_CORE ("mpamvpm7_el2", CPENC (3,4,C10,C6,7), 0),
4728 SR_CORE ("mpamvpmv_el2", CPENC (3,4,C10,C4,1), 0),
4729
4730 SR_V8_R ("mpuir_el1", CPENC (3,0,C0,C0,4), F_REG_READ),
4731 SR_V8_R ("mpuir_el2", CPENC (3,4,C0,C0,4), F_REG_READ),
4732 SR_V8_R ("prbar_el1", CPENC (3,0,C6,C8,0), 0),
4733 SR_V8_R ("prbar_el2", CPENC (3,4,C6,C8,0), 0),
4734
4735 #define ENC_BARLAR(x,n,lar) \
4736 CPENC (3, (x-1) << 2, C6, 8 | (n >> 1), ((n & 1) << 2) | lar)
4737
4738 #define PRBARn_ELx(x,n) SR_V8_R ("prbar" #n "_el" #x, ENC_BARLAR (x,n,0), 0)
4739 #define PRLARn_ELx(x,n) SR_V8_R ("prlar" #n "_el" #x, ENC_BARLAR (x,n,1), 0)
4740
4741 SR_EXPAND_EL12 (PRBARn_ELx)
4742 SR_V8_R ("prenr_el1", CPENC (3,0,C6,C1,1), 0),
4743 SR_V8_R ("prenr_el2", CPENC (3,4,C6,C1,1), 0),
4744 SR_V8_R ("prlar_el1", CPENC (3,0,C6,C8,1), 0),
4745 SR_V8_R ("prlar_el2", CPENC (3,4,C6,C8,1), 0),
4746 SR_EXPAND_EL12 (PRLARn_ELx)
4747 SR_V8_R ("prselr_el1", CPENC (3,0,C6,C2,1), 0),
4748 SR_V8_R ("prselr_el2", CPENC (3,4,C6,C2,1), 0),
4749 SR_V8_R ("vsctlr_el2", CPENC (3,4,C2,C0,0), 0),
4750
4751 SR_CORE("trbbaser_el1", CPENC (3,0,C9,C11,2), 0),
4752 SR_CORE("trbidr_el1", CPENC (3,0,C9,C11,7), F_REG_READ),
4753 SR_CORE("trblimitr_el1", CPENC (3,0,C9,C11,0), 0),
4754 SR_CORE("trbmar_el1", CPENC (3,0,C9,C11,4), 0),
4755 SR_CORE("trbptr_el1", CPENC (3,0,C9,C11,1), 0),
4756 SR_CORE("trbsr_el1", CPENC (3,0,C9,C11,3), 0),
4757 SR_CORE("trbtrg_el1", CPENC (3,0,C9,C11,6), 0),
4758
4759 SR_CORE ("trcauthstatus", CPENC (2,1,C7,C14,6), F_REG_READ),
4760 SR_CORE ("trccidr0", CPENC (2,1,C7,C12,7), F_REG_READ),
4761 SR_CORE ("trccidr1", CPENC (2,1,C7,C13,7), F_REG_READ),
4762 SR_CORE ("trccidr2", CPENC (2,1,C7,C14,7), F_REG_READ),
4763 SR_CORE ("trccidr3", CPENC (2,1,C7,C15,7), F_REG_READ),
4764 SR_CORE ("trcdevaff0", CPENC (2,1,C7,C10,6), F_REG_READ),
4765 SR_CORE ("trcdevaff1", CPENC (2,1,C7,C11,6), F_REG_READ),
4766 SR_CORE ("trcdevarch", CPENC (2,1,C7,C15,6), F_REG_READ),
4767 SR_CORE ("trcdevid", CPENC (2,1,C7,C2,7), F_REG_READ),
4768 SR_CORE ("trcdevtype", CPENC (2,1,C7,C3,7), F_REG_READ),
4769 SR_CORE ("trcidr0", CPENC (2,1,C0,C8,7), F_REG_READ),
4770 SR_CORE ("trcidr1", CPENC (2,1,C0,C9,7), F_REG_READ),
4771 SR_CORE ("trcidr2", CPENC (2,1,C0,C10,7), F_REG_READ),
4772 SR_CORE ("trcidr3", CPENC (2,1,C0,C11,7), F_REG_READ),
4773 SR_CORE ("trcidr4", CPENC (2,1,C0,C12,7), F_REG_READ),
4774 SR_CORE ("trcidr5", CPENC (2,1,C0,C13,7), F_REG_READ),
4775 SR_CORE ("trcidr6", CPENC (2,1,C0,C14,7), F_REG_READ),
4776 SR_CORE ("trcidr7", CPENC (2,1,C0,C15,7), F_REG_READ),
4777 SR_CORE ("trcidr8", CPENC (2,1,C0,C0,6), F_REG_READ),
4778 SR_CORE ("trcidr9", CPENC (2,1,C0,C1,6), F_REG_READ),
4779 SR_CORE ("trcidr10", CPENC (2,1,C0,C2,6), F_REG_READ),
4780 SR_CORE ("trcidr11", CPENC (2,1,C0,C3,6), F_REG_READ),
4781 SR_CORE ("trcidr12", CPENC (2,1,C0,C4,6), F_REG_READ),
4782 SR_CORE ("trcidr13", CPENC (2,1,C0,C5,6), F_REG_READ),
4783 SR_CORE ("trclsr", CPENC (2,1,C7,C13,6), F_REG_READ),
4784 SR_CORE ("trcoslsr", CPENC (2,1,C1,C1,4), F_REG_READ),
4785 SR_CORE ("trcpdsr", CPENC (2,1,C1,C5,4), F_REG_READ),
4786 SR_CORE ("trcpidr0", CPENC (2,1,C7,C8,7), F_REG_READ),
4787 SR_CORE ("trcpidr1", CPENC (2,1,C7,C9,7), F_REG_READ),
4788 SR_CORE ("trcpidr2", CPENC (2,1,C7,C10,7), F_REG_READ),
4789 SR_CORE ("trcpidr3", CPENC (2,1,C7,C11,7), F_REG_READ),
4790 SR_CORE ("trcpidr4", CPENC (2,1,C7,C4,7), F_REG_READ),
4791 SR_CORE ("trcpidr5", CPENC (2,1,C7,C5,7), F_REG_READ),
4792 SR_CORE ("trcpidr6", CPENC (2,1,C7,C6,7), F_REG_READ),
4793 SR_CORE ("trcpidr7", CPENC (2,1,C7,C7,7), F_REG_READ),
4794 SR_CORE ("trcstatr", CPENC (2,1,C0,C3,0), F_REG_READ),
4795 SR_CORE ("trcacatr0", CPENC (2,1,C2,C0,2), 0),
4796 SR_CORE ("trcacatr1", CPENC (2,1,C2,C2,2), 0),
4797 SR_CORE ("trcacatr2", CPENC (2,1,C2,C4,2), 0),
4798 SR_CORE ("trcacatr3", CPENC (2,1,C2,C6,2), 0),
4799 SR_CORE ("trcacatr4", CPENC (2,1,C2,C8,2), 0),
4800 SR_CORE ("trcacatr5", CPENC (2,1,C2,C10,2), 0),
4801 SR_CORE ("trcacatr6", CPENC (2,1,C2,C12,2), 0),
4802 SR_CORE ("trcacatr7", CPENC (2,1,C2,C14,2), 0),
4803 SR_CORE ("trcacatr8", CPENC (2,1,C2,C0,3), 0),
4804 SR_CORE ("trcacatr9", CPENC (2,1,C2,C2,3), 0),
4805 SR_CORE ("trcacatr10", CPENC (2,1,C2,C4,3), 0),
4806 SR_CORE ("trcacatr11", CPENC (2,1,C2,C6,3), 0),
4807 SR_CORE ("trcacatr12", CPENC (2,1,C2,C8,3), 0),
4808 SR_CORE ("trcacatr13", CPENC (2,1,C2,C10,3), 0),
4809 SR_CORE ("trcacatr14", CPENC (2,1,C2,C12,3), 0),
4810 SR_CORE ("trcacatr15", CPENC (2,1,C2,C14,3), 0),
4811 SR_CORE ("trcacvr0", CPENC (2,1,C2,C0,0), 0),
4812 SR_CORE ("trcacvr1", CPENC (2,1,C2,C2,0), 0),
4813 SR_CORE ("trcacvr2", CPENC (2,1,C2,C4,0), 0),
4814 SR_CORE ("trcacvr3", CPENC (2,1,C2,C6,0), 0),
4815 SR_CORE ("trcacvr4", CPENC (2,1,C2,C8,0), 0),
4816 SR_CORE ("trcacvr5", CPENC (2,1,C2,C10,0), 0),
4817 SR_CORE ("trcacvr6", CPENC (2,1,C2,C12,0), 0),
4818 SR_CORE ("trcacvr7", CPENC (2,1,C2,C14,0), 0),
4819 SR_CORE ("trcacvr8", CPENC (2,1,C2,C0,1), 0),
4820 SR_CORE ("trcacvr9", CPENC (2,1,C2,C2,1), 0),
4821 SR_CORE ("trcacvr10", CPENC (2,1,C2,C4,1), 0),
4822 SR_CORE ("trcacvr11", CPENC (2,1,C2,C6,1), 0),
4823 SR_CORE ("trcacvr12", CPENC (2,1,C2,C8,1), 0),
4824 SR_CORE ("trcacvr13", CPENC (2,1,C2,C10,1), 0),
4825 SR_CORE ("trcacvr14", CPENC (2,1,C2,C12,1), 0),
4826 SR_CORE ("trcacvr15", CPENC (2,1,C2,C14,1), 0),
4827 SR_CORE ("trcauxctlr", CPENC (2,1,C0,C6,0), 0),
4828 SR_CORE ("trcbbctlr", CPENC (2,1,C0,C15,0), 0),
4829 SR_CORE ("trcccctlr", CPENC (2,1,C0,C14,0), 0),
4830 SR_CORE ("trccidcctlr0", CPENC (2,1,C3,C0,2), 0),
4831 SR_CORE ("trccidcctlr1", CPENC (2,1,C3,C1,2), 0),
4832 SR_CORE ("trccidcvr0", CPENC (2,1,C3,C0,0), 0),
4833 SR_CORE ("trccidcvr1", CPENC (2,1,C3,C2,0), 0),
4834 SR_CORE ("trccidcvr2", CPENC (2,1,C3,C4,0), 0),
4835 SR_CORE ("trccidcvr3", CPENC (2,1,C3,C6,0), 0),
4836 SR_CORE ("trccidcvr4", CPENC (2,1,C3,C8,0), 0),
4837 SR_CORE ("trccidcvr5", CPENC (2,1,C3,C10,0), 0),
4838 SR_CORE ("trccidcvr6", CPENC (2,1,C3,C12,0), 0),
4839 SR_CORE ("trccidcvr7", CPENC (2,1,C3,C14,0), 0),
4840 SR_CORE ("trcclaimclr", CPENC (2,1,C7,C9,6), 0),
4841 SR_CORE ("trcclaimset", CPENC (2,1,C7,C8,6), 0),
4842 SR_CORE ("trccntctlr0", CPENC (2,1,C0,C4,5), 0),
4843 SR_CORE ("trccntctlr1", CPENC (2,1,C0,C5,5), 0),
4844 SR_CORE ("trccntctlr2", CPENC (2,1,C0,C6,5), 0),
4845 SR_CORE ("trccntctlr3", CPENC (2,1,C0,C7,5), 0),
4846 SR_CORE ("trccntrldvr0", CPENC (2,1,C0,C0,5), 0),
4847 SR_CORE ("trccntrldvr1", CPENC (2,1,C0,C1,5), 0),
4848 SR_CORE ("trccntrldvr2", CPENC (2,1,C0,C2,5), 0),
4849 SR_CORE ("trccntrldvr3", CPENC (2,1,C0,C3,5), 0),
4850 SR_CORE ("trccntvr0", CPENC (2,1,C0,C8,5), 0),
4851 SR_CORE ("trccntvr1", CPENC (2,1,C0,C9,5), 0),
4852 SR_CORE ("trccntvr2", CPENC (2,1,C0,C10,5), 0),
4853 SR_CORE ("trccntvr3", CPENC (2,1,C0,C11,5), 0),
4854 SR_CORE ("trcconfigr", CPENC (2,1,C0,C4,0), 0),
4855 SR_CORE ("trcdvcmr0", CPENC (2,1,C2,C0,6), 0),
4856 SR_CORE ("trcdvcmr1", CPENC (2,1,C2,C4,6), 0),
4857 SR_CORE ("trcdvcmr2", CPENC (2,1,C2,C8,6), 0),
4858 SR_CORE ("trcdvcmr3", CPENC (2,1,C2,C12,6), 0),
4859 SR_CORE ("trcdvcmr4", CPENC (2,1,C2,C0,7), 0),
4860 SR_CORE ("trcdvcmr5", CPENC (2,1,C2,C4,7), 0),
4861 SR_CORE ("trcdvcmr6", CPENC (2,1,C2,C8,7), 0),
4862 SR_CORE ("trcdvcmr7", CPENC (2,1,C2,C12,7), 0),
4863 SR_CORE ("trcdvcvr0", CPENC (2,1,C2,C0,4), 0),
4864 SR_CORE ("trcdvcvr1", CPENC (2,1,C2,C4,4), 0),
4865 SR_CORE ("trcdvcvr2", CPENC (2,1,C2,C8,4), 0),
4866 SR_CORE ("trcdvcvr3", CPENC (2,1,C2,C12,4), 0),
4867 SR_CORE ("trcdvcvr4", CPENC (2,1,C2,C0,5), 0),
4868 SR_CORE ("trcdvcvr5", CPENC (2,1,C2,C4,5), 0),
4869 SR_CORE ("trcdvcvr6", CPENC (2,1,C2,C8,5), 0),
4870 SR_CORE ("trcdvcvr7", CPENC (2,1,C2,C12,5), 0),
4871 SR_CORE ("trceventctl0r", CPENC (2,1,C0,C8,0), 0),
4872 SR_CORE ("trceventctl1r", CPENC (2,1,C0,C9,0), 0),
4873 SR_CORE ("trcextinselr0", CPENC (2,1,C0,C8,4), 0),
4874 SR_CORE ("trcextinselr", CPENC (2,1,C0,C8,4), 0),
4875 SR_CORE ("trcextinselr1", CPENC (2,1,C0,C9,4), 0),
4876 SR_CORE ("trcextinselr2", CPENC (2,1,C0,C10,4), 0),
4877 SR_CORE ("trcextinselr3", CPENC (2,1,C0,C11,4), 0),
4878 SR_CORE ("trcimspec0", CPENC (2,1,C0,C0,7), 0),
4879 SR_CORE ("trcimspec1", CPENC (2,1,C0,C1,7), 0),
4880 SR_CORE ("trcimspec2", CPENC (2,1,C0,C2,7), 0),
4881 SR_CORE ("trcimspec3", CPENC (2,1,C0,C3,7), 0),
4882 SR_CORE ("trcimspec4", CPENC (2,1,C0,C4,7), 0),
4883 SR_CORE ("trcimspec5", CPENC (2,1,C0,C5,7), 0),
4884 SR_CORE ("trcimspec6", CPENC (2,1,C0,C6,7), 0),
4885 SR_CORE ("trcimspec7", CPENC (2,1,C0,C7,7), 0),
4886 SR_CORE ("trcitctrl", CPENC (2,1,C7,C0,4), 0),
4887 SR_CORE ("trcpdcr", CPENC (2,1,C1,C4,4), 0),
4888 SR_CORE ("trcprgctlr", CPENC (2,1,C0,C1,0), 0),
4889 SR_CORE ("trcprocselr", CPENC (2,1,C0,C2,0), 0),
4890 SR_CORE ("trcqctlr", CPENC (2,1,C0,C1,1), 0),
4891 SR_CORE ("trcrsr", CPENC (2,1,C0,C10,0), 0),
4892 SR_CORE ("trcrsctlr2", CPENC (2,1,C1,C2,0), 0),
4893 SR_CORE ("trcrsctlr3", CPENC (2,1,C1,C3,0), 0),
4894 SR_CORE ("trcrsctlr4", CPENC (2,1,C1,C4,0), 0),
4895 SR_CORE ("trcrsctlr5", CPENC (2,1,C1,C5,0), 0),
4896 SR_CORE ("trcrsctlr6", CPENC (2,1,C1,C6,0), 0),
4897 SR_CORE ("trcrsctlr7", CPENC (2,1,C1,C7,0), 0),
4898 SR_CORE ("trcrsctlr8", CPENC (2,1,C1,C8,0), 0),
4899 SR_CORE ("trcrsctlr9", CPENC (2,1,C1,C9,0), 0),
4900 SR_CORE ("trcrsctlr10", CPENC (2,1,C1,C10,0), 0),
4901 SR_CORE ("trcrsctlr11", CPENC (2,1,C1,C11,0), 0),
4902 SR_CORE ("trcrsctlr12", CPENC (2,1,C1,C12,0), 0),
4903 SR_CORE ("trcrsctlr13", CPENC (2,1,C1,C13,0), 0),
4904 SR_CORE ("trcrsctlr14", CPENC (2,1,C1,C14,0), 0),
4905 SR_CORE ("trcrsctlr15", CPENC (2,1,C1,C15,0), 0),
4906 SR_CORE ("trcrsctlr16", CPENC (2,1,C1,C0,1), 0),
4907 SR_CORE ("trcrsctlr17", CPENC (2,1,C1,C1,1), 0),
4908 SR_CORE ("trcrsctlr18", CPENC (2,1,C1,C2,1), 0),
4909 SR_CORE ("trcrsctlr19", CPENC (2,1,C1,C3,1), 0),
4910 SR_CORE ("trcrsctlr20", CPENC (2,1,C1,C4,1), 0),
4911 SR_CORE ("trcrsctlr21", CPENC (2,1,C1,C5,1), 0),
4912 SR_CORE ("trcrsctlr22", CPENC (2,1,C1,C6,1), 0),
4913 SR_CORE ("trcrsctlr23", CPENC (2,1,C1,C7,1), 0),
4914 SR_CORE ("trcrsctlr24", CPENC (2,1,C1,C8,1), 0),
4915 SR_CORE ("trcrsctlr25", CPENC (2,1,C1,C9,1), 0),
4916 SR_CORE ("trcrsctlr26", CPENC (2,1,C1,C10,1), 0),
4917 SR_CORE ("trcrsctlr27", CPENC (2,1,C1,C11,1), 0),
4918 SR_CORE ("trcrsctlr28", CPENC (2,1,C1,C12,1), 0),
4919 SR_CORE ("trcrsctlr29", CPENC (2,1,C1,C13,1), 0),
4920 SR_CORE ("trcrsctlr30", CPENC (2,1,C1,C14,1), 0),
4921 SR_CORE ("trcrsctlr31", CPENC (2,1,C1,C15,1), 0),
4922 SR_CORE ("trcseqevr0", CPENC (2,1,C0,C0,4), 0),
4923 SR_CORE ("trcseqevr1", CPENC (2,1,C0,C1,4), 0),
4924 SR_CORE ("trcseqevr2", CPENC (2,1,C0,C2,4), 0),
4925 SR_CORE ("trcseqrstevr", CPENC (2,1,C0,C6,4), 0),
4926 SR_CORE ("trcseqstr", CPENC (2,1,C0,C7,4), 0),
4927 SR_CORE ("trcssccr0", CPENC (2,1,C1,C0,2), 0),
4928 SR_CORE ("trcssccr1", CPENC (2,1,C1,C1,2), 0),
4929 SR_CORE ("trcssccr2", CPENC (2,1,C1,C2,2), 0),
4930 SR_CORE ("trcssccr3", CPENC (2,1,C1,C3,2), 0),
4931 SR_CORE ("trcssccr4", CPENC (2,1,C1,C4,2), 0),
4932 SR_CORE ("trcssccr5", CPENC (2,1,C1,C5,2), 0),
4933 SR_CORE ("trcssccr6", CPENC (2,1,C1,C6,2), 0),
4934 SR_CORE ("trcssccr7", CPENC (2,1,C1,C7,2), 0),
4935 SR_CORE ("trcsscsr0", CPENC (2,1,C1,C8,2), 0),
4936 SR_CORE ("trcsscsr1", CPENC (2,1,C1,C9,2), 0),
4937 SR_CORE ("trcsscsr2", CPENC (2,1,C1,C10,2), 0),
4938 SR_CORE ("trcsscsr3", CPENC (2,1,C1,C11,2), 0),
4939 SR_CORE ("trcsscsr4", CPENC (2,1,C1,C12,2), 0),
4940 SR_CORE ("trcsscsr5", CPENC (2,1,C1,C13,2), 0),
4941 SR_CORE ("trcsscsr6", CPENC (2,1,C1,C14,2), 0),
4942 SR_CORE ("trcsscsr7", CPENC (2,1,C1,C15,2), 0),
4943 SR_CORE ("trcsspcicr0", CPENC (2,1,C1,C0,3), 0),
4944 SR_CORE ("trcsspcicr1", CPENC (2,1,C1,C1,3), 0),
4945 SR_CORE ("trcsspcicr2", CPENC (2,1,C1,C2,3), 0),
4946 SR_CORE ("trcsspcicr3", CPENC (2,1,C1,C3,3), 0),
4947 SR_CORE ("trcsspcicr4", CPENC (2,1,C1,C4,3), 0),
4948 SR_CORE ("trcsspcicr5", CPENC (2,1,C1,C5,3), 0),
4949 SR_CORE ("trcsspcicr6", CPENC (2,1,C1,C6,3), 0),
4950 SR_CORE ("trcsspcicr7", CPENC (2,1,C1,C7,3), 0),
4951 SR_CORE ("trcstallctlr", CPENC (2,1,C0,C11,0), 0),
4952 SR_CORE ("trcsyncpr", CPENC (2,1,C0,C13,0), 0),
4953 SR_CORE ("trctraceidr", CPENC (2,1,C0,C0,1), 0),
4954 SR_CORE ("trctsctlr", CPENC (2,1,C0,C12,0), 0),
4955 SR_CORE ("trcvdarcctlr", CPENC (2,1,C0,C10,2), 0),
4956 SR_CORE ("trcvdctlr", CPENC (2,1,C0,C8,2), 0),
4957 SR_CORE ("trcvdsacctlr", CPENC (2,1,C0,C9,2), 0),
4958 SR_CORE ("trcvictlr", CPENC (2,1,C0,C0,2), 0),
4959 SR_CORE ("trcviiectlr", CPENC (2,1,C0,C1,2), 0),
4960 SR_CORE ("trcvipcssctlr", CPENC (2,1,C0,C3,2), 0),
4961 SR_CORE ("trcvissctlr", CPENC (2,1,C0,C2,2), 0),
4962 SR_CORE ("trcvmidcctlr0", CPENC (2,1,C3,C2,2), 0),
4963 SR_CORE ("trcvmidcctlr1", CPENC (2,1,C3,C3,2), 0),
4964 SR_CORE ("trcvmidcvr0", CPENC (2,1,C3,C0,1), 0),
4965 SR_CORE ("trcvmidcvr1", CPENC (2,1,C3,C2,1), 0),
4966 SR_CORE ("trcvmidcvr2", CPENC (2,1,C3,C4,1), 0),
4967 SR_CORE ("trcvmidcvr3", CPENC (2,1,C3,C6,1), 0),
4968 SR_CORE ("trcvmidcvr4", CPENC (2,1,C3,C8,1), 0),
4969 SR_CORE ("trcvmidcvr5", CPENC (2,1,C3,C10,1), 0),
4970 SR_CORE ("trcvmidcvr6", CPENC (2,1,C3,C12,1), 0),
4971 SR_CORE ("trcvmidcvr7", CPENC (2,1,C3,C14,1), 0),
4972 SR_CORE ("trclar", CPENC (2,1,C7,C12,6), F_REG_WRITE),
4973 SR_CORE ("trcoslar", CPENC (2,1,C1,C0,4), F_REG_WRITE),
4974
4975 SR_CORE ("csrcr_el0", CPENC (2,3,C8,C0,0), 0),
4976 SR_CORE ("csrptr_el0", CPENC (2,3,C8,C0,1), 0),
4977 SR_CORE ("csridr_el0", CPENC (2,3,C8,C0,2), F_REG_READ),
4978 SR_CORE ("csrptridx_el0", CPENC (2,3,C8,C0,3), F_REG_READ),
4979 SR_CORE ("csrcr_el1", CPENC (2,0,C8,C0,0), 0),
4980 SR_CORE ("csrcr_el12", CPENC (2,5,C8,C0,0), 0),
4981 SR_CORE ("csrptr_el1", CPENC (2,0,C8,C0,1), 0),
4982 SR_CORE ("csrptr_el12", CPENC (2,5,C8,C0,1), 0),
4983 SR_CORE ("csrptridx_el1", CPENC (2,0,C8,C0,3), F_REG_READ),
4984 SR_CORE ("csrcr_el2", CPENC (2,4,C8,C0,0), 0),
4985 SR_CORE ("csrptr_el2", CPENC (2,4,C8,C0,1), 0),
4986 SR_CORE ("csrptridx_el2", CPENC (2,4,C8,C0,3), F_REG_READ),
4987
4988 SR_LOR ("lorid_el1", CPENC (3,0,C10,C4,7), F_REG_READ),
4989 SR_LOR ("lorc_el1", CPENC (3,0,C10,C4,3), 0),
4990 SR_LOR ("lorea_el1", CPENC (3,0,C10,C4,1), 0),
4991 SR_LOR ("lorn_el1", CPENC (3,0,C10,C4,2), 0),
4992 SR_LOR ("lorsa_el1", CPENC (3,0,C10,C4,0), 0),
4993
4994 SR_CORE ("icc_ctlr_el3", CPENC (3,6,C12,C12,4), 0),
4995 SR_CORE ("icc_sre_el1", CPENC (3,0,C12,C12,5), 0),
4996 SR_CORE ("icc_sre_el2", CPENC (3,4,C12,C9,5), 0),
4997 SR_CORE ("icc_sre_el3", CPENC (3,6,C12,C12,5), 0),
4998 SR_CORE ("ich_vtr_el2", CPENC (3,4,C12,C11,1), F_REG_READ),
4999
5000 SR_CORE ("brbcr_el1", CPENC (2,1,C9,C0,0), 0),
5001 SR_CORE ("brbcr_el12", CPENC (2,5,C9,C0,0), 0),
5002 SR_CORE ("brbfcr_el1", CPENC (2,1,C9,C0,1), 0),
5003 SR_CORE ("brbts_el1", CPENC (2,1,C9,C0,2), 0),
5004 SR_CORE ("brbinfinj_el1", CPENC (2,1,C9,C1,0), 0),
5005 SR_CORE ("brbsrcinj_el1", CPENC (2,1,C9,C1,1), 0),
5006 SR_CORE ("brbtgtinj_el1", CPENC (2,1,C9,C1,2), 0),
5007 SR_CORE ("brbidr0_el1", CPENC (2,1,C9,C2,0), F_REG_READ),
5008 SR_CORE ("brbcr_el2", CPENC (2,4,C9,C0,0), 0),
5009 SR_CORE ("brbsrc0_el1", CPENC (2,1,C8,C0,1), F_REG_READ),
5010 SR_CORE ("brbsrc1_el1", CPENC (2,1,C8,C1,1), F_REG_READ),
5011 SR_CORE ("brbsrc2_el1", CPENC (2,1,C8,C2,1), F_REG_READ),
5012 SR_CORE ("brbsrc3_el1", CPENC (2,1,C8,C3,1), F_REG_READ),
5013 SR_CORE ("brbsrc4_el1", CPENC (2,1,C8,C4,1), F_REG_READ),
5014 SR_CORE ("brbsrc5_el1", CPENC (2,1,C8,C5,1), F_REG_READ),
5015 SR_CORE ("brbsrc6_el1", CPENC (2,1,C8,C6,1), F_REG_READ),
5016 SR_CORE ("brbsrc7_el1", CPENC (2,1,C8,C7,1), F_REG_READ),
5017 SR_CORE ("brbsrc8_el1", CPENC (2,1,C8,C8,1), F_REG_READ),
5018 SR_CORE ("brbsrc9_el1", CPENC (2,1,C8,C9,1), F_REG_READ),
5019 SR_CORE ("brbsrc10_el1", CPENC (2,1,C8,C10,1), F_REG_READ),
5020 SR_CORE ("brbsrc11_el1", CPENC (2,1,C8,C11,1), F_REG_READ),
5021 SR_CORE ("brbsrc12_el1", CPENC (2,1,C8,C12,1), F_REG_READ),
5022 SR_CORE ("brbsrc13_el1", CPENC (2,1,C8,C13,1), F_REG_READ),
5023 SR_CORE ("brbsrc14_el1", CPENC (2,1,C8,C14,1), F_REG_READ),
5024 SR_CORE ("brbsrc15_el1", CPENC (2,1,C8,C15,1), F_REG_READ),
5025 SR_CORE ("brbsrc16_el1", CPENC (2,1,C8,C0,5), F_REG_READ),
5026 SR_CORE ("brbsrc17_el1", CPENC (2,1,C8,C1,5), F_REG_READ),
5027 SR_CORE ("brbsrc18_el1", CPENC (2,1,C8,C2,5), F_REG_READ),
5028 SR_CORE ("brbsrc19_el1", CPENC (2,1,C8,C3,5), F_REG_READ),
5029 SR_CORE ("brbsrc20_el1", CPENC (2,1,C8,C4,5), F_REG_READ),
5030 SR_CORE ("brbsrc21_el1", CPENC (2,1,C8,C5,5), F_REG_READ),
5031 SR_CORE ("brbsrc22_el1", CPENC (2,1,C8,C6,5), F_REG_READ),
5032 SR_CORE ("brbsrc23_el1", CPENC (2,1,C8,C7,5), F_REG_READ),
5033 SR_CORE ("brbsrc24_el1", CPENC (2,1,C8,C8,5), F_REG_READ),
5034 SR_CORE ("brbsrc25_el1", CPENC (2,1,C8,C9,5), F_REG_READ),
5035 SR_CORE ("brbsrc26_el1", CPENC (2,1,C8,C10,5), F_REG_READ),
5036 SR_CORE ("brbsrc27_el1", CPENC (2,1,C8,C11,5), F_REG_READ),
5037 SR_CORE ("brbsrc28_el1", CPENC (2,1,C8,C12,5), F_REG_READ),
5038 SR_CORE ("brbsrc29_el1", CPENC (2,1,C8,C13,5), F_REG_READ),
5039 SR_CORE ("brbsrc30_el1", CPENC (2,1,C8,C14,5), F_REG_READ),
5040 SR_CORE ("brbsrc31_el1", CPENC (2,1,C8,C15,5), F_REG_READ),
5041 SR_CORE ("brbtgt0_el1", CPENC (2,1,C8,C0,2), F_REG_READ),
5042 SR_CORE ("brbtgt1_el1", CPENC (2,1,C8,C1,2), F_REG_READ),
5043 SR_CORE ("brbtgt2_el1", CPENC (2,1,C8,C2,2), F_REG_READ),
5044 SR_CORE ("brbtgt3_el1", CPENC (2,1,C8,C3,2), F_REG_READ),
5045 SR_CORE ("brbtgt4_el1", CPENC (2,1,C8,C4,2), F_REG_READ),
5046 SR_CORE ("brbtgt5_el1", CPENC (2,1,C8,C5,2), F_REG_READ),
5047 SR_CORE ("brbtgt6_el1", CPENC (2,1,C8,C6,2), F_REG_READ),
5048 SR_CORE ("brbtgt7_el1", CPENC (2,1,C8,C7,2), F_REG_READ),
5049 SR_CORE ("brbtgt8_el1", CPENC (2,1,C8,C8,2), F_REG_READ),
5050 SR_CORE ("brbtgt9_el1", CPENC (2,1,C8,C9,2), F_REG_READ),
5051 SR_CORE ("brbtgt10_el1", CPENC (2,1,C8,C10,2), F_REG_READ),
5052 SR_CORE ("brbtgt11_el1", CPENC (2,1,C8,C11,2), F_REG_READ),
5053 SR_CORE ("brbtgt12_el1", CPENC (2,1,C8,C12,2), F_REG_READ),
5054 SR_CORE ("brbtgt13_el1", CPENC (2,1,C8,C13,2), F_REG_READ),
5055 SR_CORE ("brbtgt14_el1", CPENC (2,1,C8,C14,2), F_REG_READ),
5056 SR_CORE ("brbtgt15_el1", CPENC (2,1,C8,C15,2), F_REG_READ),
5057 SR_CORE ("brbtgt16_el1", CPENC (2,1,C8,C0,6), F_REG_READ),
5058 SR_CORE ("brbtgt17_el1", CPENC (2,1,C8,C1,6), F_REG_READ),
5059 SR_CORE ("brbtgt18_el1", CPENC (2,1,C8,C2,6), F_REG_READ),
5060 SR_CORE ("brbtgt19_el1", CPENC (2,1,C8,C3,6), F_REG_READ),
5061 SR_CORE ("brbtgt20_el1", CPENC (2,1,C8,C4,6), F_REG_READ),
5062 SR_CORE ("brbtgt21_el1", CPENC (2,1,C8,C5,6), F_REG_READ),
5063 SR_CORE ("brbtgt22_el1", CPENC (2,1,C8,C6,6), F_REG_READ),
5064 SR_CORE ("brbtgt23_el1", CPENC (2,1,C8,C7,6), F_REG_READ),
5065 SR_CORE ("brbtgt24_el1", CPENC (2,1,C8,C8,6), F_REG_READ),
5066 SR_CORE ("brbtgt25_el1", CPENC (2,1,C8,C9,6), F_REG_READ),
5067 SR_CORE ("brbtgt26_el1", CPENC (2,1,C8,C10,6), F_REG_READ),
5068 SR_CORE ("brbtgt27_el1", CPENC (2,1,C8,C11,6), F_REG_READ),
5069 SR_CORE ("brbtgt28_el1", CPENC (2,1,C8,C12,6), F_REG_READ),
5070 SR_CORE ("brbtgt29_el1", CPENC (2,1,C8,C13,6), F_REG_READ),
5071 SR_CORE ("brbtgt30_el1", CPENC (2,1,C8,C14,6), F_REG_READ),
5072 SR_CORE ("brbtgt31_el1", CPENC (2,1,C8,C15,6), F_REG_READ),
5073 SR_CORE ("brbinf0_el1", CPENC (2,1,C8,C0,0), F_REG_READ),
5074 SR_CORE ("brbinf1_el1", CPENC (2,1,C8,C1,0), F_REG_READ),
5075 SR_CORE ("brbinf2_el1", CPENC (2,1,C8,C2,0), F_REG_READ),
5076 SR_CORE ("brbinf3_el1", CPENC (2,1,C8,C3,0), F_REG_READ),
5077 SR_CORE ("brbinf4_el1", CPENC (2,1,C8,C4,0), F_REG_READ),
5078 SR_CORE ("brbinf5_el1", CPENC (2,1,C8,C5,0), F_REG_READ),
5079 SR_CORE ("brbinf6_el1", CPENC (2,1,C8,C6,0), F_REG_READ),
5080 SR_CORE ("brbinf7_el1", CPENC (2,1,C8,C7,0), F_REG_READ),
5081 SR_CORE ("brbinf8_el1", CPENC (2,1,C8,C8,0), F_REG_READ),
5082 SR_CORE ("brbinf9_el1", CPENC (2,1,C8,C9,0), F_REG_READ),
5083 SR_CORE ("brbinf10_el1", CPENC (2,1,C8,C10,0), F_REG_READ),
5084 SR_CORE ("brbinf11_el1", CPENC (2,1,C8,C11,0), F_REG_READ),
5085 SR_CORE ("brbinf12_el1", CPENC (2,1,C8,C12,0), F_REG_READ),
5086 SR_CORE ("brbinf13_el1", CPENC (2,1,C8,C13,0), F_REG_READ),
5087 SR_CORE ("brbinf14_el1", CPENC (2,1,C8,C14,0), F_REG_READ),
5088 SR_CORE ("brbinf15_el1", CPENC (2,1,C8,C15,0), F_REG_READ),
5089 SR_CORE ("brbinf16_el1", CPENC (2,1,C8,C0,4), F_REG_READ),
5090 SR_CORE ("brbinf17_el1", CPENC (2,1,C8,C1,4), F_REG_READ),
5091 SR_CORE ("brbinf18_el1", CPENC (2,1,C8,C2,4), F_REG_READ),
5092 SR_CORE ("brbinf19_el1", CPENC (2,1,C8,C3,4), F_REG_READ),
5093 SR_CORE ("brbinf20_el1", CPENC (2,1,C8,C4,4), F_REG_READ),
5094 SR_CORE ("brbinf21_el1", CPENC (2,1,C8,C5,4), F_REG_READ),
5095 SR_CORE ("brbinf22_el1", CPENC (2,1,C8,C6,4), F_REG_READ),
5096 SR_CORE ("brbinf23_el1", CPENC (2,1,C8,C7,4), F_REG_READ),
5097 SR_CORE ("brbinf24_el1", CPENC (2,1,C8,C8,4), F_REG_READ),
5098 SR_CORE ("brbinf25_el1", CPENC (2,1,C8,C9,4), F_REG_READ),
5099 SR_CORE ("brbinf26_el1", CPENC (2,1,C8,C10,4), F_REG_READ),
5100 SR_CORE ("brbinf27_el1", CPENC (2,1,C8,C11,4), F_REG_READ),
5101 SR_CORE ("brbinf28_el1", CPENC (2,1,C8,C12,4), F_REG_READ),
5102 SR_CORE ("brbinf29_el1", CPENC (2,1,C8,C13,4), F_REG_READ),
5103 SR_CORE ("brbinf30_el1", CPENC (2,1,C8,C14,4), F_REG_READ),
5104 SR_CORE ("brbinf31_el1", CPENC (2,1,C8,C15,4), F_REG_READ),
5105
5106 SR_CORE ("accdata_el1", CPENC (3,0,C13,C0,5), 0),
5107
5108 SR_CORE ("mfar_el3", CPENC (3,6,C6,C0,5), 0),
5109 SR_CORE ("gpccr_el3", CPENC (3,6,C2,C1,6), 0),
5110 SR_CORE ("gptbr_el3", CPENC (3,6,C2,C1,4), 0),
5111
5112 SR_CORE ("mecidr_el2", CPENC (3,4,C10,C8,7), F_REG_READ),
5113 SR_CORE ("mecid_p0_el2", CPENC (3,4,C10,C8,0), 0),
5114 SR_CORE ("mecid_a0_el2", CPENC (3,4,C10,C8,1), 0),
5115 SR_CORE ("mecid_p1_el2", CPENC (3,4,C10,C8,2), 0),
5116 SR_CORE ("mecid_a1_el2", CPENC (3,4,C10,C8,3), 0),
5117 SR_CORE ("vmecid_p_el2", CPENC (3,4,C10,C9,0), 0),
5118 SR_CORE ("vmecid_a_el2", CPENC (3,4,C10,C9,1), 0),
5119 SR_CORE ("mecid_rl_a_el3",CPENC (3,6,C10,C10,1), 0),
5120
5121 SR_SME ("svcr", CPENC (3,3,C4,C2,2), 0),
5122 SR_SME ("id_aa64smfr0_el1", CPENC (3,0,C0,C4,5), F_REG_READ),
5123 SR_SME ("smcr_el1", CPENC (3,0,C1,C2,6), 0),
5124 SR_SME ("smcr_el12", CPENC (3,5,C1,C2,6), 0),
5125 SR_SME ("smcr_el2", CPENC (3,4,C1,C2,6), 0),
5126 SR_SME ("smcr_el3", CPENC (3,6,C1,C2,6), 0),
5127 SR_SME ("smpri_el1", CPENC (3,0,C1,C2,4), 0),
5128 SR_SME ("smprimap_el2", CPENC (3,4,C1,C2,5), 0),
5129 SR_SME ("smidr_el1", CPENC (3,1,C0,C0,6), F_REG_READ),
5130 SR_SME ("tpidr2_el0", CPENC (3,3,C13,C0,5), 0),
5131 SR_SME ("mpamsm_el1", CPENC (3,0,C10,C5,3), 0),
5132
5133 SR_AMU ("amcr_el0", CPENC (3,3,C13,C2,0), 0),
5134 SR_AMU ("amcfgr_el0", CPENC (3,3,C13,C2,1), F_REG_READ),
5135 SR_AMU ("amcgcr_el0", CPENC (3,3,C13,C2,2), F_REG_READ),
5136 SR_AMU ("amuserenr_el0", CPENC (3,3,C13,C2,3), 0),
5137 SR_AMU ("amcntenclr0_el0", CPENC (3,3,C13,C2,4), 0),
5138 SR_AMU ("amcntenset0_el0", CPENC (3,3,C13,C2,5), 0),
5139 SR_AMU ("amcntenclr1_el0", CPENC (3,3,C13,C3,0), 0),
5140 SR_AMU ("amcntenset1_el0", CPENC (3,3,C13,C3,1), 0),
5141 SR_AMU ("amevcntr00_el0", CPENC (3,3,C13,C4,0), 0),
5142 SR_AMU ("amevcntr01_el0", CPENC (3,3,C13,C4,1), 0),
5143 SR_AMU ("amevcntr02_el0", CPENC (3,3,C13,C4,2), 0),
5144 SR_AMU ("amevcntr03_el0", CPENC (3,3,C13,C4,3), 0),
5145 SR_AMU ("amevtyper00_el0", CPENC (3,3,C13,C6,0), F_REG_READ),
5146 SR_AMU ("amevtyper01_el0", CPENC (3,3,C13,C6,1), F_REG_READ),
5147 SR_AMU ("amevtyper02_el0", CPENC (3,3,C13,C6,2), F_REG_READ),
5148 SR_AMU ("amevtyper03_el0", CPENC (3,3,C13,C6,3), F_REG_READ),
5149 SR_AMU ("amevcntr10_el0", CPENC (3,3,C13,C12,0), 0),
5150 SR_AMU ("amevcntr11_el0", CPENC (3,3,C13,C12,1), 0),
5151 SR_AMU ("amevcntr12_el0", CPENC (3,3,C13,C12,2), 0),
5152 SR_AMU ("amevcntr13_el0", CPENC (3,3,C13,C12,3), 0),
5153 SR_AMU ("amevcntr14_el0", CPENC (3,3,C13,C12,4), 0),
5154 SR_AMU ("amevcntr15_el0", CPENC (3,3,C13,C12,5), 0),
5155 SR_AMU ("amevcntr16_el0", CPENC (3,3,C13,C12,6), 0),
5156 SR_AMU ("amevcntr17_el0", CPENC (3,3,C13,C12,7), 0),
5157 SR_AMU ("amevcntr18_el0", CPENC (3,3,C13,C13,0), 0),
5158 SR_AMU ("amevcntr19_el0", CPENC (3,3,C13,C13,1), 0),
5159 SR_AMU ("amevcntr110_el0", CPENC (3,3,C13,C13,2), 0),
5160 SR_AMU ("amevcntr111_el0", CPENC (3,3,C13,C13,3), 0),
5161 SR_AMU ("amevcntr112_el0", CPENC (3,3,C13,C13,4), 0),
5162 SR_AMU ("amevcntr113_el0", CPENC (3,3,C13,C13,5), 0),
5163 SR_AMU ("amevcntr114_el0", CPENC (3,3,C13,C13,6), 0),
5164 SR_AMU ("amevcntr115_el0", CPENC (3,3,C13,C13,7), 0),
5165 SR_AMU ("amevtyper10_el0", CPENC (3,3,C13,C14,0), 0),
5166 SR_AMU ("amevtyper11_el0", CPENC (3,3,C13,C14,1), 0),
5167 SR_AMU ("amevtyper12_el0", CPENC (3,3,C13,C14,2), 0),
5168 SR_AMU ("amevtyper13_el0", CPENC (3,3,C13,C14,3), 0),
5169 SR_AMU ("amevtyper14_el0", CPENC (3,3,C13,C14,4), 0),
5170 SR_AMU ("amevtyper15_el0", CPENC (3,3,C13,C14,5), 0),
5171 SR_AMU ("amevtyper16_el0", CPENC (3,3,C13,C14,6), 0),
5172 SR_AMU ("amevtyper17_el0", CPENC (3,3,C13,C14,7), 0),
5173 SR_AMU ("amevtyper18_el0", CPENC (3,3,C13,C15,0), 0),
5174 SR_AMU ("amevtyper19_el0", CPENC (3,3,C13,C15,1), 0),
5175 SR_AMU ("amevtyper110_el0", CPENC (3,3,C13,C15,2), 0),
5176 SR_AMU ("amevtyper111_el0", CPENC (3,3,C13,C15,3), 0),
5177 SR_AMU ("amevtyper112_el0", CPENC (3,3,C13,C15,4), 0),
5178 SR_AMU ("amevtyper113_el0", CPENC (3,3,C13,C15,5), 0),
5179 SR_AMU ("amevtyper114_el0", CPENC (3,3,C13,C15,6), 0),
5180 SR_AMU ("amevtyper115_el0", CPENC (3,3,C13,C15,7), 0),
5181
5182 SR_GIC ("icc_pmr_el1", CPENC (3,0,C4,C6,0), 0),
5183 SR_GIC ("icc_iar0_el1", CPENC (3,0,C12,C8,0), F_REG_READ),
5184 SR_GIC ("icc_eoir0_el1", CPENC (3,0,C12,C8,1), F_REG_WRITE),
5185 SR_GIC ("icc_hppir0_el1", CPENC (3,0,C12,C8,2), F_REG_READ),
5186 SR_GIC ("icc_bpr0_el1", CPENC (3,0,C12,C8,3), 0),
5187 SR_GIC ("icc_ap0r0_el1", CPENC (3,0,C12,C8,4), 0),
5188 SR_GIC ("icc_ap0r1_el1", CPENC (3,0,C12,C8,5), 0),
5189 SR_GIC ("icc_ap0r2_el1", CPENC (3,0,C12,C8,6), 0),
5190 SR_GIC ("icc_ap0r3_el1", CPENC (3,0,C12,C8,7), 0),
5191 SR_GIC ("icc_ap1r0_el1", CPENC (3,0,C12,C9,0), 0),
5192 SR_GIC ("icc_ap1r1_el1", CPENC (3,0,C12,C9,1), 0),
5193 SR_GIC ("icc_ap1r2_el1", CPENC (3,0,C12,C9,2), 0),
5194 SR_GIC ("icc_ap1r3_el1", CPENC (3,0,C12,C9,3), 0),
5195 SR_GIC ("icc_dir_el1", CPENC (3,0,C12,C11,1), F_REG_WRITE),
5196 SR_GIC ("icc_rpr_el1", CPENC (3,0,C12,C11,3), F_REG_READ),
5197 SR_GIC ("icc_sgi1r_el1", CPENC (3,0,C12,C11,5), F_REG_WRITE),
5198 SR_GIC ("icc_asgi1r_el1", CPENC (3,0,C12,C11,6), F_REG_WRITE),
5199 SR_GIC ("icc_sgi0r_el1", CPENC (3,0,C12,C11,7), F_REG_WRITE),
5200 SR_GIC ("icc_iar1_el1", CPENC (3,0,C12,C12,0), F_REG_READ),
5201 SR_GIC ("icc_eoir1_el1", CPENC (3,0,C12,C12,1), F_REG_WRITE),
5202 SR_GIC ("icc_hppir1_el1", CPENC (3,0,C12,C12,2), F_REG_READ),
5203 SR_GIC ("icc_bpr1_el1", CPENC (3,0,C12,C12,3), 0),
5204 SR_GIC ("icc_ctlr_el1", CPENC (3,0,C12,C12,4), 0),
5205 SR_GIC ("icc_igrpen0_el1", CPENC (3,0,C12,C12,6), 0),
5206 SR_GIC ("icc_igrpen1_el1", CPENC (3,0,C12,C12,7), 0),
5207 SR_GIC ("ich_ap0r0_el2", CPENC (3,4,C12,C8,0), 0),
5208 SR_GIC ("ich_ap0r1_el2", CPENC (3,4,C12,C8,1), 0),
5209 SR_GIC ("ich_ap0r2_el2", CPENC (3,4,C12,C8,2), 0),
5210 SR_GIC ("ich_ap0r3_el2", CPENC (3,4,C12,C8,3), 0),
5211 SR_GIC ("ich_ap1r0_el2", CPENC (3,4,C12,C9,0), 0),
5212 SR_GIC ("ich_ap1r1_el2", CPENC (3,4,C12,C9,1), 0),
5213 SR_GIC ("ich_ap1r2_el2", CPENC (3,4,C12,C9,2), 0),
5214 SR_GIC ("ich_ap1r3_el2", CPENC (3,4,C12,C9,3), 0),
5215 SR_GIC ("ich_hcr_el2", CPENC (3,4,C12,C11,0), 0),
5216 SR_GIC ("ich_misr_el2", CPENC (3,4,C12,C11,2), F_REG_READ),
5217 SR_GIC ("ich_eisr_el2", CPENC (3,4,C12,C11,3), F_REG_READ),
5218 SR_GIC ("ich_elrsr_el2", CPENC (3,4,C12,C11,5), F_REG_READ),
5219 SR_GIC ("ich_vmcr_el2", CPENC (3,4,C12,C11,7), 0),
5220 SR_GIC ("ich_lr0_el2", CPENC (3,4,C12,C12,0), 0),
5221 SR_GIC ("ich_lr1_el2", CPENC (3,4,C12,C12,1), 0),
5222 SR_GIC ("ich_lr2_el2", CPENC (3,4,C12,C12,2), 0),
5223 SR_GIC ("ich_lr3_el2", CPENC (3,4,C12,C12,3), 0),
5224 SR_GIC ("ich_lr4_el2", CPENC (3,4,C12,C12,4), 0),
5225 SR_GIC ("ich_lr5_el2", CPENC (3,4,C12,C12,5), 0),
5226 SR_GIC ("ich_lr6_el2", CPENC (3,4,C12,C12,6), 0),
5227 SR_GIC ("ich_lr7_el2", CPENC (3,4,C12,C12,7), 0),
5228 SR_GIC ("ich_lr8_el2", CPENC (3,4,C12,C13,0), 0),
5229 SR_GIC ("ich_lr9_el2", CPENC (3,4,C12,C13,1), 0),
5230 SR_GIC ("ich_lr10_el2", CPENC (3,4,C12,C13,2), 0),
5231 SR_GIC ("ich_lr11_el2", CPENC (3,4,C12,C13,3), 0),
5232 SR_GIC ("ich_lr12_el2", CPENC (3,4,C12,C13,4), 0),
5233 SR_GIC ("ich_lr13_el2", CPENC (3,4,C12,C13,5), 0),
5234 SR_GIC ("ich_lr14_el2", CPENC (3,4,C12,C13,6), 0),
5235 SR_GIC ("ich_lr15_el2", CPENC (3,4,C12,C13,7), 0),
5236 SR_GIC ("icc_igrpen1_el3", CPENC (3,6,C12,C12,7), 0),
5237
5238 SR_V8_6 ("amcg1idr_el0", CPENC (3,3,C13,C2,6), F_REG_READ),
5239 SR_V8_6 ("cntpctss_el0", CPENC (3,3,C14,C0,5), F_REG_READ),
5240 SR_V8_6 ("cntvctss_el0", CPENC (3,3,C14,C0,6), F_REG_READ),
5241 SR_V8_6 ("hfgrtr_el2", CPENC (3,4,C1,C1,4), 0),
5242 SR_V8_6 ("hfgwtr_el2", CPENC (3,4,C1,C1,5), 0),
5243 SR_V8_6 ("hfgitr_el2", CPENC (3,4,C1,C1,6), 0),
5244 SR_V8_6 ("hdfgrtr_el2", CPENC (3,4,C3,C1,4), 0),
5245 SR_V8_6 ("hdfgwtr_el2", CPENC (3,4,C3,C1,5), 0),
5246 SR_V8_6 ("hafgrtr_el2", CPENC (3,4,C3,C1,6), 0),
5247 SR_V8_6 ("amevcntvoff00_el2", CPENC (3,4,C13,C8,0), 0),
5248 SR_V8_6 ("amevcntvoff01_el2", CPENC (3,4,C13,C8,1), 0),
5249 SR_V8_6 ("amevcntvoff02_el2", CPENC (3,4,C13,C8,2), 0),
5250 SR_V8_6 ("amevcntvoff03_el2", CPENC (3,4,C13,C8,3), 0),
5251 SR_V8_6 ("amevcntvoff04_el2", CPENC (3,4,C13,C8,4), 0),
5252 SR_V8_6 ("amevcntvoff05_el2", CPENC (3,4,C13,C8,5), 0),
5253 SR_V8_6 ("amevcntvoff06_el2", CPENC (3,4,C13,C8,6), 0),
5254 SR_V8_6 ("amevcntvoff07_el2", CPENC (3,4,C13,C8,7), 0),
5255 SR_V8_6 ("amevcntvoff08_el2", CPENC (3,4,C13,C9,0), 0),
5256 SR_V8_6 ("amevcntvoff09_el2", CPENC (3,4,C13,C9,1), 0),
5257 SR_V8_6 ("amevcntvoff010_el2", CPENC (3,4,C13,C9,2), 0),
5258 SR_V8_6 ("amevcntvoff011_el2", CPENC (3,4,C13,C9,3), 0),
5259 SR_V8_6 ("amevcntvoff012_el2", CPENC (3,4,C13,C9,4), 0),
5260 SR_V8_6 ("amevcntvoff013_el2", CPENC (3,4,C13,C9,5), 0),
5261 SR_V8_6 ("amevcntvoff014_el2", CPENC (3,4,C13,C9,6), 0),
5262 SR_V8_6 ("amevcntvoff015_el2", CPENC (3,4,C13,C9,7), 0),
5263 SR_V8_6 ("amevcntvoff10_el2", CPENC (3,4,C13,C10,0), 0),
5264 SR_V8_6 ("amevcntvoff11_el2", CPENC (3,4,C13,C10,1), 0),
5265 SR_V8_6 ("amevcntvoff12_el2", CPENC (3,4,C13,C10,2), 0),
5266 SR_V8_6 ("amevcntvoff13_el2", CPENC (3,4,C13,C10,3), 0),
5267 SR_V8_6 ("amevcntvoff14_el2", CPENC (3,4,C13,C10,4), 0),
5268 SR_V8_6 ("amevcntvoff15_el2", CPENC (3,4,C13,C10,5), 0),
5269 SR_V8_6 ("amevcntvoff16_el2", CPENC (3,4,C13,C10,6), 0),
5270 SR_V8_6 ("amevcntvoff17_el2", CPENC (3,4,C13,C10,7), 0),
5271 SR_V8_6 ("amevcntvoff18_el2", CPENC (3,4,C13,C11,0), 0),
5272 SR_V8_6 ("amevcntvoff19_el2", CPENC (3,4,C13,C11,1), 0),
5273 SR_V8_6 ("amevcntvoff110_el2", CPENC (3,4,C13,C11,2), 0),
5274 SR_V8_6 ("amevcntvoff111_el2", CPENC (3,4,C13,C11,3), 0),
5275 SR_V8_6 ("amevcntvoff112_el2", CPENC (3,4,C13,C11,4), 0),
5276 SR_V8_6 ("amevcntvoff113_el2", CPENC (3,4,C13,C11,5), 0),
5277 SR_V8_6 ("amevcntvoff114_el2", CPENC (3,4,C13,C11,6), 0),
5278 SR_V8_6 ("amevcntvoff115_el2", CPENC (3,4,C13,C11,7), 0),
5279 SR_V8_6 ("cntpoff_el2", CPENC (3,4,C14,C0,6), 0),
5280
5281 SR_V8_7 ("pmsnevfr_el1", CPENC (3,0,C9,C9,1), 0),
5282 SR_V8_7 ("hcrx_el2", CPENC (3,4,C1,C2,2), 0),
5283
5284 SR_V8_8 ("allint", CPENC (3,0,C4,C3,0), 0),
5285 SR_V8_8 ("icc_nmiar1_el1", CPENC (3,0,C12,C9,5), F_REG_READ),
5286
5287 { 0, CPENC (0,0,0,0,0), 0, 0 }
5288 };
5289
5290 bool
5291 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
5292 {
5293 return (reg_flags & F_DEPRECATED) != 0;
5294 }
5295
5296 /* The CPENC below is fairly misleading, the fields
5297 here are not in CPENC form. They are in op2op1 form. The fields are encoded
5298 by ins_pstatefield, which just shifts the value by the width of the fields
5299 in a loop. So if you CPENC them only the first value will be set, the rest
5300 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
5301 value of 0b110000000001000000 (0x30040) while what you want is
5302 0b011010 (0x1a). */
5303 const aarch64_sys_reg aarch64_pstatefields [] =
5304 {
5305 SR_CORE ("spsel", 0x05, F_REG_MAX_VALUE (1)),
5306 SR_CORE ("daifset", 0x1e, F_REG_MAX_VALUE (15)),
5307 SR_CORE ("daifclr", 0x1f, F_REG_MAX_VALUE (15)),
5308 SR_PAN ("pan", 0x04, F_REG_MAX_VALUE (1)),
5309 SR_V8_2 ("uao", 0x03, F_REG_MAX_VALUE (1)),
5310 SR_SSBS ("ssbs", 0x19, F_REG_MAX_VALUE (1)),
5311 SR_V8_4 ("dit", 0x1a, F_REG_MAX_VALUE (1)),
5312 SR_MEMTAG ("tco", 0x1c, F_REG_MAX_VALUE (1)),
5313 SR_SME ("svcrsm", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x2,0x1)
5314 | F_REG_MAX_VALUE (1)),
5315 SR_SME ("svcrza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x4,0x1)
5316 | F_REG_MAX_VALUE (1)),
5317 SR_SME ("svcrsmza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x6,0x1)
5318 | F_REG_MAX_VALUE (1)),
5319 SR_V8_8 ("allint", 0x08, F_REG_MAX_VALUE (1)),
5320 { 0, CPENC (0,0,0,0,0), 0, 0 },
5321 };
5322
5323 bool
5324 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
5325 const aarch64_sys_reg *reg)
5326 {
5327 if (!(reg->flags & F_ARCHEXT))
5328 return true;
5329
5330 return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
5331 }
5332
5333 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
5334 {
5335 { "ialluis", CPENS(0,C7,C1,0), 0 },
5336 { "iallu", CPENS(0,C7,C5,0), 0 },
5337 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
5338 { 0, CPENS(0,0,0,0), 0 }
5339 };
5340
5341 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
5342 {
5343 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
5344 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
5345 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
5346 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
5347 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
5348 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
5349 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
5350 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
5351 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
5352 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
5353 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
5354 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
5355 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
5356 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
5357 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
5358 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
5359 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
5360 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
5361 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
5362 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
5363 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
5364 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
5365 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
5366 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
5367 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
5368 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
5369 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
5370 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
5371 { "cipapa", CPENS (6, C7, C14, 1), F_HASXT },
5372 { "cigdpapa", CPENS (6, C7, C14, 5), F_HASXT },
5373 { 0, CPENS(0,0,0,0), 0 }
5374 };
5375
5376 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
5377 {
5378 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
5379 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
5380 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
5381 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
5382 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
5383 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
5384 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
5385 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
5386 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
5387 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
5388 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
5389 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
5390 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
5391 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
5392 { 0, CPENS(0,0,0,0), 0 }
5393 };
5394
5395 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
5396 {
5397 { "vmalle1", CPENS(0,C8,C7,0), 0 },
5398 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
5399 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
5400 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
5401 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
5402 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
5403 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
5404 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
5405 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
5406 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
5407 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
5408 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
5409 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
5410 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
5411 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
5412 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
5413 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
5414 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
5415 { "alle2", CPENS(4,C8,C7,0), 0 },
5416 { "alle2is", CPENS(4,C8,C3,0), 0 },
5417 { "alle1", CPENS(4,C8,C7,4), 0 },
5418 { "alle1is", CPENS(4,C8,C3,4), 0 },
5419 { "alle3", CPENS(6,C8,C7,0), 0 },
5420 { "alle3is", CPENS(6,C8,C3,0), 0 },
5421 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
5422 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
5423 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
5424 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
5425 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
5426 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
5427 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
5428 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
5429
5430 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
5431 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
5432 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
5433 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
5434 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
5435 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
5436 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
5437 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
5438 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
5439 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
5440 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
5441 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
5442 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
5443 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
5444 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
5445 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
5446
5447 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
5448 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
5449 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
5450 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
5451 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
5452 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
5453 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
5454 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
5455 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
5456 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
5457 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
5458 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
5459 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
5460 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
5461 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
5462 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
5463 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
5464 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
5465 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
5466 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
5467 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
5468 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
5469 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
5470 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
5471 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
5472 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
5473 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
5474 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
5475 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
5476 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
5477
5478 { "rpaos", CPENS (6, C8, C4, 3), F_HASXT },
5479 { "rpalos", CPENS (6, C8, C4, 7), F_HASXT },
5480 { "paallos", CPENS (6, C8, C1, 4), 0},
5481 { "paall", CPENS (6, C8, C7, 4), 0},
5482
5483 { 0, CPENS(0,0,0,0), 0 }
5484 };
5485
5486 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
5487 {
5488 /* RCTX is somewhat unique in a way that it has different values
5489 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
5490 Thus op2 is masked out and instead encoded directly in the
5491 aarch64_opcode_table entries for the respective instructions. */
5492 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
5493
5494 { 0, CPENS(0,0,0,0), 0 }
5495 };
5496
5497 bool
5498 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
5499 {
5500 return (sys_ins_reg->flags & F_HASXT) != 0;
5501 }
5502
5503 extern bool
5504 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
5505 const char *reg_name,
5506 aarch64_insn reg_value,
5507 uint32_t reg_flags,
5508 aarch64_feature_set reg_features)
5509 {
5510 /* Armv8-R has no EL3. */
5511 if (AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_R))
5512 {
5513 const char *suffix = strrchr (reg_name, '_');
5514 if (suffix && !strcmp (suffix, "_el3"))
5515 return false;
5516 }
5517
5518 if (!(reg_flags & F_ARCHEXT))
5519 return true;
5520
5521 if (reg_features
5522 && AARCH64_CPU_HAS_ALL_FEATURES (features, reg_features))
5523 return true;
5524
5525 /* ARMv8.4 TLB instructions. */
5526 if ((reg_value == CPENS (0, C8, C1, 0)
5527 || reg_value == CPENS (0, C8, C1, 1)
5528 || reg_value == CPENS (0, C8, C1, 2)
5529 || reg_value == CPENS (0, C8, C1, 3)
5530 || reg_value == CPENS (0, C8, C1, 5)
5531 || reg_value == CPENS (0, C8, C1, 7)
5532 || reg_value == CPENS (4, C8, C4, 0)
5533 || reg_value == CPENS (4, C8, C4, 4)
5534 || reg_value == CPENS (4, C8, C1, 1)
5535 || reg_value == CPENS (4, C8, C1, 5)
5536 || reg_value == CPENS (4, C8, C1, 6)
5537 || reg_value == CPENS (6, C8, C1, 1)
5538 || reg_value == CPENS (6, C8, C1, 5)
5539 || reg_value == CPENS (4, C8, C1, 0)
5540 || reg_value == CPENS (4, C8, C1, 4)
5541 || reg_value == CPENS (6, C8, C1, 0)
5542 || reg_value == CPENS (0, C8, C6, 1)
5543 || reg_value == CPENS (0, C8, C6, 3)
5544 || reg_value == CPENS (0, C8, C6, 5)
5545 || reg_value == CPENS (0, C8, C6, 7)
5546 || reg_value == CPENS (0, C8, C2, 1)
5547 || reg_value == CPENS (0, C8, C2, 3)
5548 || reg_value == CPENS (0, C8, C2, 5)
5549 || reg_value == CPENS (0, C8, C2, 7)
5550 || reg_value == CPENS (0, C8, C5, 1)
5551 || reg_value == CPENS (0, C8, C5, 3)
5552 || reg_value == CPENS (0, C8, C5, 5)
5553 || reg_value == CPENS (0, C8, C5, 7)
5554 || reg_value == CPENS (4, C8, C0, 2)
5555 || reg_value == CPENS (4, C8, C0, 6)
5556 || reg_value == CPENS (4, C8, C4, 2)
5557 || reg_value == CPENS (4, C8, C4, 6)
5558 || reg_value == CPENS (4, C8, C4, 3)
5559 || reg_value == CPENS (4, C8, C4, 7)
5560 || reg_value == CPENS (4, C8, C6, 1)
5561 || reg_value == CPENS (4, C8, C6, 5)
5562 || reg_value == CPENS (4, C8, C2, 1)
5563 || reg_value == CPENS (4, C8, C2, 5)
5564 || reg_value == CPENS (4, C8, C5, 1)
5565 || reg_value == CPENS (4, C8, C5, 5)
5566 || reg_value == CPENS (6, C8, C6, 1)
5567 || reg_value == CPENS (6, C8, C6, 5)
5568 || reg_value == CPENS (6, C8, C2, 1)
5569 || reg_value == CPENS (6, C8, C2, 5)
5570 || reg_value == CPENS (6, C8, C5, 1)
5571 || reg_value == CPENS (6, C8, C5, 5))
5572 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
5573 return true;
5574
5575 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
5576 if (reg_value == CPENS (3, C7, C12, 1)
5577 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5578 return true;
5579
5580 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
5581 if (reg_value == CPENS (3, C7, C13, 1)
5582 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
5583 return true;
5584
5585 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
5586 if ((reg_value == CPENS (0, C7, C6, 3)
5587 || reg_value == CPENS (0, C7, C6, 4)
5588 || reg_value == CPENS (0, C7, C10, 4)
5589 || reg_value == CPENS (0, C7, C14, 4)
5590 || reg_value == CPENS (3, C7, C10, 3)
5591 || reg_value == CPENS (3, C7, C12, 3)
5592 || reg_value == CPENS (3, C7, C13, 3)
5593 || reg_value == CPENS (3, C7, C14, 3)
5594 || reg_value == CPENS (3, C7, C4, 3)
5595 || reg_value == CPENS (0, C7, C6, 5)
5596 || reg_value == CPENS (0, C7, C6, 6)
5597 || reg_value == CPENS (0, C7, C10, 6)
5598 || reg_value == CPENS (0, C7, C14, 6)
5599 || reg_value == CPENS (3, C7, C10, 5)
5600 || reg_value == CPENS (3, C7, C12, 5)
5601 || reg_value == CPENS (3, C7, C13, 5)
5602 || reg_value == CPENS (3, C7, C14, 5)
5603 || reg_value == CPENS (3, C7, C4, 4))
5604 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
5605 return true;
5606
5607 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
5608 if ((reg_value == CPENS (0, C7, C9, 0)
5609 || reg_value == CPENS (0, C7, C9, 1))
5610 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5611 return true;
5612
5613 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
5614 if (reg_value == CPENS (3, C7, C3, 0)
5615 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
5616 return true;
5617
5618 return false;
5619 }
5620
5621 #undef C0
5622 #undef C1
5623 #undef C2
5624 #undef C3
5625 #undef C4
5626 #undef C5
5627 #undef C6
5628 #undef C7
5629 #undef C8
5630 #undef C9
5631 #undef C10
5632 #undef C11
5633 #undef C12
5634 #undef C13
5635 #undef C14
5636 #undef C15
5637
5638 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5639 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5640
5641 static enum err_type
5642 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
5643 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
5644 bool encoding ATTRIBUTE_UNUSED,
5645 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5646 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5647 {
5648 int t = BITS (insn, 4, 0);
5649 int n = BITS (insn, 9, 5);
5650 int t2 = BITS (insn, 14, 10);
5651
5652 if (BIT (insn, 23))
5653 {
5654 /* Write back enabled. */
5655 if ((t == n || t2 == n) && n != 31)
5656 return ERR_UND;
5657 }
5658
5659 if (BIT (insn, 22))
5660 {
5661 /* Load */
5662 if (t == t2)
5663 return ERR_UND;
5664 }
5665
5666 return ERR_OK;
5667 }
5668
5669 /* Verifier for vector by element 3 operands functions where the
5670 conditions `if sz:L == 11 then UNDEFINED` holds. */
5671
5672 static enum err_type
5673 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
5674 bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
5675 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5676 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5677 {
5678 const aarch64_insn undef_pattern = 0x3;
5679 aarch64_insn value;
5680
5681 assert (inst->opcode);
5682 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
5683 value = encoding ? inst->value : insn;
5684 assert (value);
5685
5686 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
5687 return ERR_UND;
5688
5689 return ERR_OK;
5690 }
5691
5692 /* Check an instruction that takes three register operands and that
5693 requires the register numbers to be distinct from one another. */
5694
5695 static enum err_type
5696 verify_three_different_regs (const struct aarch64_inst *inst,
5697 const aarch64_insn insn ATTRIBUTE_UNUSED,
5698 bfd_vma pc ATTRIBUTE_UNUSED,
5699 bool encoding ATTRIBUTE_UNUSED,
5700 aarch64_operand_error *mismatch_detail
5701 ATTRIBUTE_UNUSED,
5702 aarch64_instr_sequence *insn_sequence
5703 ATTRIBUTE_UNUSED)
5704 {
5705 int rd, rs, rn;
5706
5707 rd = inst->operands[0].reg.regno;
5708 rs = inst->operands[1].reg.regno;
5709 rn = inst->operands[2].reg.regno;
5710 if (rd == rs || rd == rn || rs == rn)
5711 {
5712 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5713 mismatch_detail->error
5714 = _("the three register operands must be distinct from one another");
5715 mismatch_detail->index = -1;
5716 return ERR_UND;
5717 }
5718
5719 return ERR_OK;
5720 }
5721
5722 /* Add INST to the end of INSN_SEQUENCE. */
5723
5724 static void
5725 add_insn_to_sequence (const struct aarch64_inst *inst,
5726 aarch64_instr_sequence *insn_sequence)
5727 {
5728 insn_sequence->instr[insn_sequence->num_added_insns++] = *inst;
5729 }
5730
5731 /* Initialize an instruction sequence insn_sequence with the instruction INST.
5732 If INST is NULL the given insn_sequence is cleared and the sequence is left
5733 uninitialized. */
5734
5735 void
5736 init_insn_sequence (const struct aarch64_inst *inst,
5737 aarch64_instr_sequence *insn_sequence)
5738 {
5739 int num_req_entries = 0;
5740
5741 if (insn_sequence->instr)
5742 {
5743 XDELETE (insn_sequence->instr);
5744 insn_sequence->instr = NULL;
5745 }
5746
5747 /* Handle all the cases here. May need to think of something smarter than
5748 a giant if/else chain if this grows. At that time, a lookup table may be
5749 best. */
5750 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
5751 num_req_entries = 1;
5752 if (inst && (inst->opcode->constraints & C_SCAN_MOPS_PME) == C_SCAN_MOPS_P)
5753 num_req_entries = 2;
5754
5755 insn_sequence->num_added_insns = 0;
5756 insn_sequence->num_allocated_insns = num_req_entries;
5757
5758 if (num_req_entries != 0)
5759 {
5760 insn_sequence->instr = XCNEWVEC (aarch64_inst, num_req_entries);
5761 add_insn_to_sequence (inst, insn_sequence);
5762 }
5763 }
5764
5765 /* Subroutine of verify_constraints. Check whether the instruction
5766 is part of a MOPS P/M/E sequence and, if so, whether sequencing
5767 expectations are met. Return true if the check passes, otherwise
5768 describe the problem in MISMATCH_DETAIL.
5769
5770 IS_NEW_SECTION is true if INST is assumed to start a new section.
5771 The other arguments are as for verify_constraints. */
5772
5773 static bool
5774 verify_mops_pme_sequence (const struct aarch64_inst *inst,
5775 bool is_new_section,
5776 aarch64_operand_error *mismatch_detail,
5777 aarch64_instr_sequence *insn_sequence)
5778 {
5779 const struct aarch64_opcode *opcode;
5780 const struct aarch64_inst *prev_insn;
5781 int i;
5782
5783 opcode = inst->opcode;
5784 if (insn_sequence->instr)
5785 prev_insn = insn_sequence->instr + (insn_sequence->num_added_insns - 1);
5786 else
5787 prev_insn = NULL;
5788
5789 if (prev_insn
5790 && (prev_insn->opcode->constraints & C_SCAN_MOPS_PME)
5791 && prev_insn->opcode != opcode - 1)
5792 {
5793 mismatch_detail->kind = AARCH64_OPDE_EXPECTED_A_AFTER_B;
5794 mismatch_detail->error = NULL;
5795 mismatch_detail->index = -1;
5796 mismatch_detail->data[0].s = prev_insn->opcode[1].name;
5797 mismatch_detail->data[1].s = prev_insn->opcode->name;
5798 mismatch_detail->non_fatal = true;
5799 return false;
5800 }
5801
5802 if (opcode->constraints & C_SCAN_MOPS_PME)
5803 {
5804 if (is_new_section || !prev_insn || prev_insn->opcode != opcode - 1)
5805 {
5806 mismatch_detail->kind = AARCH64_OPDE_A_SHOULD_FOLLOW_B;
5807 mismatch_detail->error = NULL;
5808 mismatch_detail->index = -1;
5809 mismatch_detail->data[0].s = opcode->name;
5810 mismatch_detail->data[1].s = opcode[-1].name;
5811 mismatch_detail->non_fatal = true;
5812 return false;
5813 }
5814
5815 for (i = 0; i < 3; ++i)
5816 /* There's no specific requirement for the data register to be
5817 the same between consecutive SET* instructions. */
5818 if ((opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd
5819 || opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs
5820 || opcode->operands[i] == AARCH64_OPND_MOPS_WB_Rn)
5821 && prev_insn->operands[i].reg.regno != inst->operands[i].reg.regno)
5822 {
5823 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5824 if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd)
5825 mismatch_detail->error = _("destination register differs from "
5826 "preceding instruction");
5827 else if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs)
5828 mismatch_detail->error = _("source register differs from "
5829 "preceding instruction");
5830 else
5831 mismatch_detail->error = _("size register differs from "
5832 "preceding instruction");
5833 mismatch_detail->index = i;
5834 mismatch_detail->non_fatal = true;
5835 return false;
5836 }
5837 }
5838
5839 return true;
5840 }
5841
5842 /* This function verifies that the instruction INST adheres to its specified
5843 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
5844 returned and MISMATCH_DETAIL contains the reason why verification failed.
5845
5846 The function is called both during assembly and disassembly. If assembling
5847 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
5848 and will contain the PC of the current instruction w.r.t to the section.
5849
5850 If ENCODING and PC=0 then you are at a start of a section. The constraints
5851 are verified against the given state insn_sequence which is updated as it
5852 transitions through the verification. */
5853
5854 enum err_type
5855 verify_constraints (const struct aarch64_inst *inst,
5856 const aarch64_insn insn ATTRIBUTE_UNUSED,
5857 bfd_vma pc,
5858 bool encoding,
5859 aarch64_operand_error *mismatch_detail,
5860 aarch64_instr_sequence *insn_sequence)
5861 {
5862 assert (inst);
5863 assert (inst->opcode);
5864
5865 const struct aarch64_opcode *opcode = inst->opcode;
5866 if (!opcode->constraints && !insn_sequence->instr)
5867 return ERR_OK;
5868
5869 assert (insn_sequence);
5870
5871 enum err_type res = ERR_OK;
5872
5873 /* This instruction puts a constraint on the insn_sequence. */
5874 if (opcode->flags & F_SCAN)
5875 {
5876 if (insn_sequence->instr)
5877 {
5878 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5879 mismatch_detail->error = _("instruction opens new dependency "
5880 "sequence without ending previous one");
5881 mismatch_detail->index = -1;
5882 mismatch_detail->non_fatal = true;
5883 res = ERR_VFI;
5884 }
5885
5886 init_insn_sequence (inst, insn_sequence);
5887 return res;
5888 }
5889
5890 bool is_new_section = (!encoding && pc == 0);
5891 if (!verify_mops_pme_sequence (inst, is_new_section, mismatch_detail,
5892 insn_sequence))
5893 {
5894 res = ERR_VFI;
5895 if ((opcode->constraints & C_SCAN_MOPS_PME) != C_SCAN_MOPS_M)
5896 init_insn_sequence (NULL, insn_sequence);
5897 }
5898
5899 /* Verify constraints on an existing sequence. */
5900 if (insn_sequence->instr)
5901 {
5902 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
5903 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5904 closed a previous one that we should have. */
5905 if (is_new_section && res == ERR_OK)
5906 {
5907 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5908 mismatch_detail->error = _("previous `movprfx' sequence not closed");
5909 mismatch_detail->index = -1;
5910 mismatch_detail->non_fatal = true;
5911 res = ERR_VFI;
5912 /* Reset the sequence. */
5913 init_insn_sequence (NULL, insn_sequence);
5914 return res;
5915 }
5916
5917 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
5918 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
5919 {
5920 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5921 instruction for better error messages. */
5922 if (!opcode->avariant
5923 || !(*opcode->avariant &
5924 (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
5925 {
5926 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5927 mismatch_detail->error = _("SVE instruction expected after "
5928 "`movprfx'");
5929 mismatch_detail->index = -1;
5930 mismatch_detail->non_fatal = true;
5931 res = ERR_VFI;
5932 goto done;
5933 }
5934
5935 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5936 instruction that is allowed to be used with a MOVPRFX. */
5937 if (!(opcode->constraints & C_SCAN_MOVPRFX))
5938 {
5939 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5940 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
5941 "expected");
5942 mismatch_detail->index = -1;
5943 mismatch_detail->non_fatal = true;
5944 res = ERR_VFI;
5945 goto done;
5946 }
5947
5948 /* Next check for usage of the predicate register. */
5949 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
5950 aarch64_opnd_info blk_pred, inst_pred;
5951 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
5952 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
5953 bool predicated = false;
5954 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
5955
5956 /* Determine if the movprfx instruction used is predicated or not. */
5957 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
5958 {
5959 predicated = true;
5960 blk_pred = insn_sequence->instr->operands[1];
5961 }
5962
5963 unsigned char max_elem_size = 0;
5964 unsigned char current_elem_size;
5965 int num_op_used = 0, last_op_usage = 0;
5966 int i, inst_pred_idx = -1;
5967 int num_ops = aarch64_num_of_operands (opcode);
5968 for (i = 0; i < num_ops; i++)
5969 {
5970 aarch64_opnd_info inst_op = inst->operands[i];
5971 switch (inst_op.type)
5972 {
5973 case AARCH64_OPND_SVE_Zd:
5974 case AARCH64_OPND_SVE_Zm_5:
5975 case AARCH64_OPND_SVE_Zm_16:
5976 case AARCH64_OPND_SVE_Zn:
5977 case AARCH64_OPND_SVE_Zt:
5978 case AARCH64_OPND_SVE_Vm:
5979 case AARCH64_OPND_SVE_Vn:
5980 case AARCH64_OPND_Va:
5981 case AARCH64_OPND_Vn:
5982 case AARCH64_OPND_Vm:
5983 case AARCH64_OPND_Sn:
5984 case AARCH64_OPND_Sm:
5985 if (inst_op.reg.regno == blk_dest.reg.regno)
5986 {
5987 num_op_used++;
5988 last_op_usage = i;
5989 }
5990 current_elem_size
5991 = aarch64_get_qualifier_esize (inst_op.qualifier);
5992 if (current_elem_size > max_elem_size)
5993 max_elem_size = current_elem_size;
5994 break;
5995 case AARCH64_OPND_SVE_Pd:
5996 case AARCH64_OPND_SVE_Pg3:
5997 case AARCH64_OPND_SVE_Pg4_5:
5998 case AARCH64_OPND_SVE_Pg4_10:
5999 case AARCH64_OPND_SVE_Pg4_16:
6000 case AARCH64_OPND_SVE_Pm:
6001 case AARCH64_OPND_SVE_Pn:
6002 case AARCH64_OPND_SVE_Pt:
6003 case AARCH64_OPND_SME_Pm:
6004 inst_pred = inst_op;
6005 inst_pred_idx = i;
6006 break;
6007 default:
6008 break;
6009 }
6010 }
6011
6012 assert (max_elem_size != 0);
6013 aarch64_opnd_info inst_dest = inst->operands[0];
6014 /* Determine the size that should be used to compare against the
6015 movprfx size. */
6016 current_elem_size
6017 = opcode->constraints & C_MAX_ELEM
6018 ? max_elem_size
6019 : aarch64_get_qualifier_esize (inst_dest.qualifier);
6020
6021 /* If movprfx is predicated do some extra checks. */
6022 if (predicated)
6023 {
6024 /* The instruction must be predicated. */
6025 if (inst_pred_idx < 0)
6026 {
6027 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6028 mismatch_detail->error = _("predicated instruction expected "
6029 "after `movprfx'");
6030 mismatch_detail->index = -1;
6031 mismatch_detail->non_fatal = true;
6032 res = ERR_VFI;
6033 goto done;
6034 }
6035
6036 /* The instruction must have a merging predicate. */
6037 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
6038 {
6039 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6040 mismatch_detail->error = _("merging predicate expected due "
6041 "to preceding `movprfx'");
6042 mismatch_detail->index = inst_pred_idx;
6043 mismatch_detail->non_fatal = true;
6044 res = ERR_VFI;
6045 goto done;
6046 }
6047
6048 /* The same register must be used in instruction. */
6049 if (blk_pred.reg.regno != inst_pred.reg.regno)
6050 {
6051 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6052 mismatch_detail->error = _("predicate register differs "
6053 "from that in preceding "
6054 "`movprfx'");
6055 mismatch_detail->index = inst_pred_idx;
6056 mismatch_detail->non_fatal = true;
6057 res = ERR_VFI;
6058 goto done;
6059 }
6060 }
6061
6062 /* Destructive operations by definition must allow one usage of the
6063 same register. */
6064 int allowed_usage
6065 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
6066
6067 /* Operand is not used at all. */
6068 if (num_op_used == 0)
6069 {
6070 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6071 mismatch_detail->error = _("output register of preceding "
6072 "`movprfx' not used in current "
6073 "instruction");
6074 mismatch_detail->index = 0;
6075 mismatch_detail->non_fatal = true;
6076 res = ERR_VFI;
6077 goto done;
6078 }
6079
6080 /* We now know it's used, now determine exactly where it's used. */
6081 if (blk_dest.reg.regno != inst_dest.reg.regno)
6082 {
6083 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6084 mismatch_detail->error = _("output register of preceding "
6085 "`movprfx' expected as output");
6086 mismatch_detail->index = 0;
6087 mismatch_detail->non_fatal = true;
6088 res = ERR_VFI;
6089 goto done;
6090 }
6091
6092 /* Operand used more than allowed for the specific opcode type. */
6093 if (num_op_used > allowed_usage)
6094 {
6095 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6096 mismatch_detail->error = _("output register of preceding "
6097 "`movprfx' used as input");
6098 mismatch_detail->index = last_op_usage;
6099 mismatch_detail->non_fatal = true;
6100 res = ERR_VFI;
6101 goto done;
6102 }
6103
6104 /* Now the only thing left is the qualifiers checks. The register
6105 must have the same maximum element size. */
6106 if (inst_dest.qualifier
6107 && blk_dest.qualifier
6108 && current_elem_size
6109 != aarch64_get_qualifier_esize (blk_dest.qualifier))
6110 {
6111 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6112 mismatch_detail->error = _("register size not compatible with "
6113 "previous `movprfx'");
6114 mismatch_detail->index = 0;
6115 mismatch_detail->non_fatal = true;
6116 res = ERR_VFI;
6117 goto done;
6118 }
6119 }
6120
6121 done:
6122 if (insn_sequence->num_added_insns == insn_sequence->num_allocated_insns)
6123 /* We've checked the last instruction in the sequence and so
6124 don't need the sequence any more. */
6125 init_insn_sequence (NULL, insn_sequence);
6126 else
6127 add_insn_to_sequence (inst, insn_sequence);
6128 }
6129
6130 return res;
6131 }
6132
6133
6134 /* Return true if VALUE cannot be moved into an SVE register using DUP
6135 (with any element size, not just ESIZE) and if using DUPM would
6136 therefore be OK. ESIZE is the number of bytes in the immediate. */
6137
6138 bool
6139 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
6140 {
6141 int64_t svalue = uvalue;
6142 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
6143
6144 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
6145 return false;
6146 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
6147 {
6148 svalue = (int32_t) uvalue;
6149 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
6150 {
6151 svalue = (int16_t) uvalue;
6152 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
6153 return false;
6154 }
6155 }
6156 if ((svalue & 0xff) == 0)
6157 svalue /= 256;
6158 return svalue < -128 || svalue >= 128;
6159 }
6160
6161 /* Return true if a CPU with the AARCH64_FEATURE_* bits in CPU_VARIANT
6162 supports the instruction described by INST. */
6163
6164 bool
6165 aarch64_cpu_supports_inst_p (uint64_t cpu_variant, aarch64_inst *inst)
6166 {
6167 if (!inst->opcode->avariant
6168 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *inst->opcode->avariant))
6169 return false;
6170
6171 return true;
6172 }
6173
6174 /* Include the opcode description table as well as the operand description
6175 table. */
6176 #define VERIFIER(x) verify_##x
6177 #include "aarch64-tbl.h"