Automatic date update in version.in
[binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2022 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = false;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bool
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return (qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q);
110 }
111
112 static inline bool
113 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
114 {
115 return (qualifier >= AARCH64_OPND_QLF_S_B
116 && qualifier <= AARCH64_OPND_QLF_S_Q);
117 }
118
119 enum data_pattern
120 {
121 DP_UNKNOWN,
122 DP_VECTOR_3SAME,
123 DP_VECTOR_LONG,
124 DP_VECTOR_WIDE,
125 DP_VECTOR_ACROSS_LANES,
126 };
127
128 static const char significant_operand_index [] =
129 {
130 0, /* DP_UNKNOWN, by default using operand 0. */
131 0, /* DP_VECTOR_3SAME */
132 1, /* DP_VECTOR_LONG */
133 2, /* DP_VECTOR_WIDE */
134 1, /* DP_VECTOR_ACROSS_LANES */
135 };
136
137 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
138 the data pattern.
139 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
140 corresponds to one of a sequence of operands. */
141
142 static enum data_pattern
143 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
144 {
145 if (vector_qualifier_p (qualifiers[0]))
146 {
147 /* e.g. v.4s, v.4s, v.4s
148 or v.4h, v.4h, v.h[3]. */
149 if (qualifiers[0] == qualifiers[1]
150 && vector_qualifier_p (qualifiers[2])
151 && (aarch64_get_qualifier_esize (qualifiers[0])
152 == aarch64_get_qualifier_esize (qualifiers[1]))
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[2])))
155 return DP_VECTOR_3SAME;
156 /* e.g. v.8h, v.8b, v.8b.
157 or v.4s, v.4h, v.h[2].
158 or v.8h, v.16b. */
159 if (vector_qualifier_p (qualifiers[1])
160 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
161 && (aarch64_get_qualifier_esize (qualifiers[0])
162 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
163 return DP_VECTOR_LONG;
164 /* e.g. v.8h, v.8h, v.8b. */
165 if (qualifiers[0] == qualifiers[1]
166 && vector_qualifier_p (qualifiers[2])
167 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
168 && (aarch64_get_qualifier_esize (qualifiers[0])
169 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[1])))
172 return DP_VECTOR_WIDE;
173 }
174 else if (fp_qualifier_p (qualifiers[0]))
175 {
176 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
177 if (vector_qualifier_p (qualifiers[1])
178 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
179 return DP_VECTOR_ACROSS_LANES;
180 }
181
182 return DP_UNKNOWN;
183 }
184
185 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
186 the AdvSIMD instructions. */
187 /* N.B. it is possible to do some optimization that doesn't call
188 get_data_pattern each time when we need to select an operand. We can
189 either buffer the caculated the result or statically generate the data,
190 however, it is not obvious that the optimization will bring significant
191 benefit. */
192
193 int
194 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
195 {
196 return
197 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
198 }
199 \f
200 /* Instruction bit-fields.
201 + Keep synced with 'enum aarch64_field_kind'. */
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 10, 4 }, /* imm4_3: in adddg/subg instructions. */
247 { 5, 4 }, /* imm4_5: in SME instructions. */
248 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
249 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
250 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
251 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
252 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
253 { 5, 14 }, /* imm14: in test bit and branch instructions. */
254 { 5, 16 }, /* imm16: in exception instructions. */
255 { 0, 16 }, /* imm16_2: in udf instruction. */
256 { 0, 26 }, /* imm26: in unconditional branch instructions. */
257 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
258 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
259 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
260 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
261 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
262 { 22, 1 }, /* N: in logical (immediate) instructions. */
263 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
264 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
265 { 31, 1 }, /* sf: in integer data processing instructions. */
266 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
267 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
268 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
269 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
270 { 31, 1 }, /* b5: in the test bit and branch instructions. */
271 { 19, 5 }, /* b40: in the test bit and branch instructions. */
272 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
273 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
274 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
275 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
276 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
277 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
278 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
279 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
280 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
281 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
282 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
283 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
284 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
285 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
286 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
287 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
288 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
289 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
290 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
291 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
292 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
293 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
294 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
295 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
296 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
297 { 5, 1 }, /* SVE_i1: single-bit immediate. */
298 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
299 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
300 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
301 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
302 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
303 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
304 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
305 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
306 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
307 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
308 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
309 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
310 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
311 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
312 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
313 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
314 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
315 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
316 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
317 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
318 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
319 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
320 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
321 { 16, 4 }, /* SVE_tsz: triangular size select. */
322 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
323 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
324 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
325 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
326 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
327 { 0, 2 }, /* SME ZAda tile ZA0-ZA3. */
328 { 0, 3 }, /* SME ZAda tile ZA0-ZA7. */
329 { 22, 2 }, /* SME_size_10: size<1>, size<0> class field, [23:22]. */
330 { 16, 1 }, /* SME_Q: Q class bit, bit 16. */
331 { 15, 1 }, /* SME_V: (horizontal / vertical tiles), bit 15. */
332 { 13, 2 }, /* SME_Rv: vector select register W12-W15, bits [14:13]. */
333 { 13, 3 }, /* SME Pm second source scalable predicate register P0-P7. */
334 { 0, 8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0]. */
335 { 16, 2 }, /* SME_Rm: index base register W12-W15 [17:16]. */
336 { 23, 1 }, /* SME_i1: immediate field, bit 23. */
337 { 22, 1 }, /* SME_tszh: immediate and qualifier field, bit 22. */
338 { 18, 3 }, /* SME_tshl: immediate and qualifier field, bits [20:18]. */
339 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
340 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
341 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
342 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
343 { 22, 1 }, /* sz: 1-bit element size select. */
344 { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
345 };
346
347 enum aarch64_operand_class
348 aarch64_get_operand_class (enum aarch64_opnd type)
349 {
350 return aarch64_operands[type].op_class;
351 }
352
353 const char *
354 aarch64_get_operand_name (enum aarch64_opnd type)
355 {
356 return aarch64_operands[type].name;
357 }
358
359 /* Get operand description string.
360 This is usually for the diagnosis purpose. */
361 const char *
362 aarch64_get_operand_desc (enum aarch64_opnd type)
363 {
364 return aarch64_operands[type].desc;
365 }
366
367 /* Table of all conditional affixes. */
368 const aarch64_cond aarch64_conds[16] =
369 {
370 {{"eq", "none"}, 0x0},
371 {{"ne", "any"}, 0x1},
372 {{"cs", "hs", "nlast"}, 0x2},
373 {{"cc", "lo", "ul", "last"}, 0x3},
374 {{"mi", "first"}, 0x4},
375 {{"pl", "nfrst"}, 0x5},
376 {{"vs"}, 0x6},
377 {{"vc"}, 0x7},
378 {{"hi", "pmore"}, 0x8},
379 {{"ls", "plast"}, 0x9},
380 {{"ge", "tcont"}, 0xa},
381 {{"lt", "tstop"}, 0xb},
382 {{"gt"}, 0xc},
383 {{"le"}, 0xd},
384 {{"al"}, 0xe},
385 {{"nv"}, 0xf},
386 };
387
388 const aarch64_cond *
389 get_cond_from_value (aarch64_insn value)
390 {
391 assert (value < 16);
392 return &aarch64_conds[(unsigned int) value];
393 }
394
395 const aarch64_cond *
396 get_inverted_cond (const aarch64_cond *cond)
397 {
398 return &aarch64_conds[cond->value ^ 0x1];
399 }
400
401 /* Table describing the operand extension/shifting operators; indexed by
402 enum aarch64_modifier_kind.
403
404 The value column provides the most common values for encoding modifiers,
405 which enables table-driven encoding/decoding for the modifiers. */
406 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
407 {
408 {"none", 0x0},
409 {"msl", 0x0},
410 {"ror", 0x3},
411 {"asr", 0x2},
412 {"lsr", 0x1},
413 {"lsl", 0x0},
414 {"uxtb", 0x0},
415 {"uxth", 0x1},
416 {"uxtw", 0x2},
417 {"uxtx", 0x3},
418 {"sxtb", 0x4},
419 {"sxth", 0x5},
420 {"sxtw", 0x6},
421 {"sxtx", 0x7},
422 {"mul", 0x0},
423 {"mul vl", 0x0},
424 {NULL, 0},
425 };
426
427 enum aarch64_modifier_kind
428 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
429 {
430 return desc - aarch64_operand_modifiers;
431 }
432
433 aarch64_insn
434 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
435 {
436 return aarch64_operand_modifiers[kind].value;
437 }
438
439 enum aarch64_modifier_kind
440 aarch64_get_operand_modifier_from_value (aarch64_insn value,
441 bool extend_p)
442 {
443 if (extend_p)
444 return AARCH64_MOD_UXTB + value;
445 else
446 return AARCH64_MOD_LSL - value;
447 }
448
449 bool
450 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
451 {
452 return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
453 }
454
455 static inline bool
456 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
457 {
458 return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
459 }
460
461 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
462 {
463 { "#0x00", 0x0 },
464 { "oshld", 0x1 },
465 { "oshst", 0x2 },
466 { "osh", 0x3 },
467 { "#0x04", 0x4 },
468 { "nshld", 0x5 },
469 { "nshst", 0x6 },
470 { "nsh", 0x7 },
471 { "#0x08", 0x8 },
472 { "ishld", 0x9 },
473 { "ishst", 0xa },
474 { "ish", 0xb },
475 { "#0x0c", 0xc },
476 { "ld", 0xd },
477 { "st", 0xe },
478 { "sy", 0xf },
479 };
480
481 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] =
482 { /* CRm<3:2> #imm */
483 { "oshnxs", 16 }, /* 00 16 */
484 { "nshnxs", 20 }, /* 01 20 */
485 { "ishnxs", 24 }, /* 10 24 */
486 { "synxs", 28 }, /* 11 28 */
487 };
488
489 /* Table describing the operands supported by the aliases of the HINT
490 instruction.
491
492 The name column is the operand that is accepted for the alias. The value
493 column is the hint number of the alias. The list of operands is terminated
494 by NULL in the name column. */
495
496 const struct aarch64_name_value_pair aarch64_hint_options[] =
497 {
498 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
499 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
500 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
501 { "c", HINT_OPD_C }, /* BTI C. */
502 { "j", HINT_OPD_J }, /* BTI J. */
503 { "jc", HINT_OPD_JC }, /* BTI JC. */
504 { NULL, HINT_OPD_NULL },
505 };
506
507 /* op -> op: load = 0 instruction = 1 store = 2
508 l -> level: 1-3
509 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
510 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
511 const struct aarch64_name_value_pair aarch64_prfops[32] =
512 {
513 { "pldl1keep", B(0, 1, 0) },
514 { "pldl1strm", B(0, 1, 1) },
515 { "pldl2keep", B(0, 2, 0) },
516 { "pldl2strm", B(0, 2, 1) },
517 { "pldl3keep", B(0, 3, 0) },
518 { "pldl3strm", B(0, 3, 1) },
519 { NULL, 0x06 },
520 { NULL, 0x07 },
521 { "plil1keep", B(1, 1, 0) },
522 { "plil1strm", B(1, 1, 1) },
523 { "plil2keep", B(1, 2, 0) },
524 { "plil2strm", B(1, 2, 1) },
525 { "plil3keep", B(1, 3, 0) },
526 { "plil3strm", B(1, 3, 1) },
527 { NULL, 0x0e },
528 { NULL, 0x0f },
529 { "pstl1keep", B(2, 1, 0) },
530 { "pstl1strm", B(2, 1, 1) },
531 { "pstl2keep", B(2, 2, 0) },
532 { "pstl2strm", B(2, 2, 1) },
533 { "pstl3keep", B(2, 3, 0) },
534 { "pstl3strm", B(2, 3, 1) },
535 { NULL, 0x16 },
536 { NULL, 0x17 },
537 { NULL, 0x18 },
538 { NULL, 0x19 },
539 { NULL, 0x1a },
540 { NULL, 0x1b },
541 { NULL, 0x1c },
542 { NULL, 0x1d },
543 { NULL, 0x1e },
544 { NULL, 0x1f },
545 };
546 #undef B
547 \f
548 /* Utilities on value constraint. */
549
550 static inline int
551 value_in_range_p (int64_t value, int low, int high)
552 {
553 return (value >= low && value <= high) ? 1 : 0;
554 }
555
556 /* Return true if VALUE is a multiple of ALIGN. */
557 static inline int
558 value_aligned_p (int64_t value, int align)
559 {
560 return (value % align) == 0;
561 }
562
563 /* A signed value fits in a field. */
564 static inline int
565 value_fit_signed_field_p (int64_t value, unsigned width)
566 {
567 assert (width < 32);
568 if (width < sizeof (value) * 8)
569 {
570 int64_t lim = (uint64_t) 1 << (width - 1);
571 if (value >= -lim && value < lim)
572 return 1;
573 }
574 return 0;
575 }
576
577 /* An unsigned value fits in a field. */
578 static inline int
579 value_fit_unsigned_field_p (int64_t value, unsigned width)
580 {
581 assert (width < 32);
582 if (width < sizeof (value) * 8)
583 {
584 int64_t lim = (uint64_t) 1 << width;
585 if (value >= 0 && value < lim)
586 return 1;
587 }
588 return 0;
589 }
590
591 /* Return 1 if OPERAND is SP or WSP. */
592 int
593 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
594 {
595 return ((aarch64_get_operand_class (operand->type)
596 == AARCH64_OPND_CLASS_INT_REG)
597 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
598 && operand->reg.regno == 31);
599 }
600
601 /* Return 1 if OPERAND is XZR or WZP. */
602 int
603 aarch64_zero_register_p (const aarch64_opnd_info *operand)
604 {
605 return ((aarch64_get_operand_class (operand->type)
606 == AARCH64_OPND_CLASS_INT_REG)
607 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
608 && operand->reg.regno == 31);
609 }
610
611 /* Return true if the operand *OPERAND that has the operand code
612 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
613 qualified by the qualifier TARGET. */
614
615 static inline int
616 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
617 aarch64_opnd_qualifier_t target)
618 {
619 switch (operand->qualifier)
620 {
621 case AARCH64_OPND_QLF_W:
622 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
623 return 1;
624 break;
625 case AARCH64_OPND_QLF_X:
626 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
627 return 1;
628 break;
629 case AARCH64_OPND_QLF_WSP:
630 if (target == AARCH64_OPND_QLF_W
631 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
632 return 1;
633 break;
634 case AARCH64_OPND_QLF_SP:
635 if (target == AARCH64_OPND_QLF_X
636 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
637 return 1;
638 break;
639 default:
640 break;
641 }
642
643 return 0;
644 }
645
646 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
647 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
648
649 Return NIL if more than one expected qualifiers are found. */
650
651 aarch64_opnd_qualifier_t
652 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
653 int idx,
654 const aarch64_opnd_qualifier_t known_qlf,
655 int known_idx)
656 {
657 int i, saved_i;
658
659 /* Special case.
660
661 When the known qualifier is NIL, we have to assume that there is only
662 one qualifier sequence in the *QSEQ_LIST and return the corresponding
663 qualifier directly. One scenario is that for instruction
664 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
665 which has only one possible valid qualifier sequence
666 NIL, S_D
667 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
668 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
669
670 Because the qualifier NIL has dual roles in the qualifier sequence:
671 it can mean no qualifier for the operand, or the qualifer sequence is
672 not in use (when all qualifiers in the sequence are NILs), we have to
673 handle this special case here. */
674 if (known_qlf == AARCH64_OPND_NIL)
675 {
676 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
677 return qseq_list[0][idx];
678 }
679
680 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
681 {
682 if (qseq_list[i][known_idx] == known_qlf)
683 {
684 if (saved_i != -1)
685 /* More than one sequences are found to have KNOWN_QLF at
686 KNOWN_IDX. */
687 return AARCH64_OPND_NIL;
688 saved_i = i;
689 }
690 }
691
692 return qseq_list[saved_i][idx];
693 }
694
695 enum operand_qualifier_kind
696 {
697 OQK_NIL,
698 OQK_OPD_VARIANT,
699 OQK_VALUE_IN_RANGE,
700 OQK_MISC,
701 };
702
703 /* Operand qualifier description. */
704 struct operand_qualifier_data
705 {
706 /* The usage of the three data fields depends on the qualifier kind. */
707 int data0;
708 int data1;
709 int data2;
710 /* Description. */
711 const char *desc;
712 /* Kind. */
713 enum operand_qualifier_kind kind;
714 };
715
716 /* Indexed by the operand qualifier enumerators. */
717 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
718 {
719 {0, 0, 0, "NIL", OQK_NIL},
720
721 /* Operand variant qualifiers.
722 First 3 fields:
723 element size, number of elements and common value for encoding. */
724
725 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
726 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
727 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
728 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
729
730 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
731 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
732 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
733 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
734 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
735 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
736 {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
737
738 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
739 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
740 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
741 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
742 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
743 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
744 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
745 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
746 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
747 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
748 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
749
750 {0, 0, 0, "z", OQK_OPD_VARIANT},
751 {0, 0, 0, "m", OQK_OPD_VARIANT},
752
753 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
754 {16, 0, 0, "tag", OQK_OPD_VARIANT},
755
756 /* Qualifiers constraining the value range.
757 First 3 fields:
758 Lower bound, higher bound, unused. */
759
760 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
761 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
762 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
763 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
764 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
765 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
766 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
767
768 /* Qualifiers for miscellaneous purpose.
769 First 3 fields:
770 unused, unused and unused. */
771
772 {0, 0, 0, "lsl", 0},
773 {0, 0, 0, "msl", 0},
774
775 {0, 0, 0, "retrieving", 0},
776 };
777
778 static inline bool
779 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
780 {
781 return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
782 }
783
784 static inline bool
785 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
786 {
787 return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
788 }
789
790 const char*
791 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
792 {
793 return aarch64_opnd_qualifiers[qualifier].desc;
794 }
795
796 /* Given an operand qualifier, return the expected data element size
797 of a qualified operand. */
798 unsigned char
799 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
800 {
801 assert (operand_variant_qualifier_p (qualifier));
802 return aarch64_opnd_qualifiers[qualifier].data0;
803 }
804
805 unsigned char
806 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
807 {
808 assert (operand_variant_qualifier_p (qualifier));
809 return aarch64_opnd_qualifiers[qualifier].data1;
810 }
811
812 aarch64_insn
813 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
814 {
815 assert (operand_variant_qualifier_p (qualifier));
816 return aarch64_opnd_qualifiers[qualifier].data2;
817 }
818
819 static int
820 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
821 {
822 assert (qualifier_value_in_range_constraint_p (qualifier));
823 return aarch64_opnd_qualifiers[qualifier].data0;
824 }
825
826 static int
827 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
828 {
829 assert (qualifier_value_in_range_constraint_p (qualifier));
830 return aarch64_opnd_qualifiers[qualifier].data1;
831 }
832
833 #ifdef DEBUG_AARCH64
834 void
835 aarch64_verbose (const char *str, ...)
836 {
837 va_list ap;
838 va_start (ap, str);
839 printf ("#### ");
840 vprintf (str, ap);
841 printf ("\n");
842 va_end (ap);
843 }
844
845 static inline void
846 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
847 {
848 int i;
849 printf ("#### \t");
850 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
851 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
852 printf ("\n");
853 }
854
855 static void
856 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
857 const aarch64_opnd_qualifier_t *qualifier)
858 {
859 int i;
860 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
861
862 aarch64_verbose ("dump_match_qualifiers:");
863 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
864 curr[i] = opnd[i].qualifier;
865 dump_qualifier_sequence (curr);
866 aarch64_verbose ("against");
867 dump_qualifier_sequence (qualifier);
868 }
869 #endif /* DEBUG_AARCH64 */
870
871 /* This function checks if the given instruction INSN is a destructive
872 instruction based on the usage of the registers. It does not recognize
873 unary destructive instructions. */
874 bool
875 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
876 {
877 int i = 0;
878 const enum aarch64_opnd *opnds = opcode->operands;
879
880 if (opnds[0] == AARCH64_OPND_NIL)
881 return false;
882
883 while (opnds[++i] != AARCH64_OPND_NIL)
884 if (opnds[i] == opnds[0])
885 return true;
886
887 return false;
888 }
889
890 /* TODO improve this, we can have an extra field at the runtime to
891 store the number of operands rather than calculating it every time. */
892
893 int
894 aarch64_num_of_operands (const aarch64_opcode *opcode)
895 {
896 int i = 0;
897 const enum aarch64_opnd *opnds = opcode->operands;
898 while (opnds[i++] != AARCH64_OPND_NIL)
899 ;
900 --i;
901 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
902 return i;
903 }
904
905 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
906 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
907
908 N.B. on the entry, it is very likely that only some operands in *INST
909 have had their qualifiers been established.
910
911 If STOP_AT is not -1, the function will only try to match
912 the qualifier sequence for operands before and including the operand
913 of index STOP_AT; and on success *RET will only be filled with the first
914 (STOP_AT+1) qualifiers.
915
916 A couple examples of the matching algorithm:
917
918 X,W,NIL should match
919 X,W,NIL
920
921 NIL,NIL should match
922 X ,NIL
923
924 Apart from serving the main encoding routine, this can also be called
925 during or after the operand decoding. */
926
927 int
928 aarch64_find_best_match (const aarch64_inst *inst,
929 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
930 int stop_at, aarch64_opnd_qualifier_t *ret)
931 {
932 int found = 0;
933 int i, num_opnds;
934 const aarch64_opnd_qualifier_t *qualifiers;
935
936 num_opnds = aarch64_num_of_operands (inst->opcode);
937 if (num_opnds == 0)
938 {
939 DEBUG_TRACE ("SUCCEED: no operand");
940 return 1;
941 }
942
943 if (stop_at < 0 || stop_at >= num_opnds)
944 stop_at = num_opnds - 1;
945
946 /* For each pattern. */
947 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
948 {
949 int j;
950 qualifiers = *qualifiers_list;
951
952 /* Start as positive. */
953 found = 1;
954
955 DEBUG_TRACE ("%d", i);
956 #ifdef DEBUG_AARCH64
957 if (debug_dump)
958 dump_match_qualifiers (inst->operands, qualifiers);
959 #endif
960
961 /* Most opcodes has much fewer patterns in the list.
962 First NIL qualifier indicates the end in the list. */
963 if (empty_qualifier_sequence_p (qualifiers))
964 {
965 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
966 if (i)
967 found = 0;
968 break;
969 }
970
971 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
972 {
973 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
974 {
975 /* Either the operand does not have qualifier, or the qualifier
976 for the operand needs to be deduced from the qualifier
977 sequence.
978 In the latter case, any constraint checking related with
979 the obtained qualifier should be done later in
980 operand_general_constraint_met_p. */
981 continue;
982 }
983 else if (*qualifiers != inst->operands[j].qualifier)
984 {
985 /* Unless the target qualifier can also qualify the operand
986 (which has already had a non-nil qualifier), non-equal
987 qualifiers are generally un-matched. */
988 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
989 continue;
990 else
991 {
992 found = 0;
993 break;
994 }
995 }
996 else
997 continue; /* Equal qualifiers are certainly matched. */
998 }
999
1000 /* Qualifiers established. */
1001 if (found == 1)
1002 break;
1003 }
1004
1005 if (found == 1)
1006 {
1007 /* Fill the result in *RET. */
1008 int j;
1009 qualifiers = *qualifiers_list;
1010
1011 DEBUG_TRACE ("complete qualifiers using list %d", i);
1012 #ifdef DEBUG_AARCH64
1013 if (debug_dump)
1014 dump_qualifier_sequence (qualifiers);
1015 #endif
1016
1017 for (j = 0; j <= stop_at; ++j, ++qualifiers)
1018 ret[j] = *qualifiers;
1019 for (; j < AARCH64_MAX_OPND_NUM; ++j)
1020 ret[j] = AARCH64_OPND_QLF_NIL;
1021
1022 DEBUG_TRACE ("SUCCESS");
1023 return 1;
1024 }
1025
1026 DEBUG_TRACE ("FAIL");
1027 return 0;
1028 }
1029
1030 /* Operand qualifier matching and resolving.
1031
1032 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1033 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1034
1035 if UPDATE_P, update the qualifier(s) in *INST after the matching
1036 succeeds. */
1037
1038 static int
1039 match_operands_qualifier (aarch64_inst *inst, bool update_p)
1040 {
1041 int i, nops;
1042 aarch64_opnd_qualifier_seq_t qualifiers;
1043
1044 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1045 qualifiers))
1046 {
1047 DEBUG_TRACE ("matching FAIL");
1048 return 0;
1049 }
1050
1051 if (inst->opcode->flags & F_STRICT)
1052 {
1053 /* Require an exact qualifier match, even for NIL qualifiers. */
1054 nops = aarch64_num_of_operands (inst->opcode);
1055 for (i = 0; i < nops; ++i)
1056 if (inst->operands[i].qualifier != qualifiers[i])
1057 return false;
1058 }
1059
1060 /* Update the qualifiers. */
1061 if (update_p)
1062 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1063 {
1064 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1065 break;
1066 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1067 "update %s with %s for operand %d",
1068 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1069 aarch64_get_qualifier_name (qualifiers[i]), i);
1070 inst->operands[i].qualifier = qualifiers[i];
1071 }
1072
1073 DEBUG_TRACE ("matching SUCCESS");
1074 return 1;
1075 }
1076
1077 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1078 register by MOVZ.
1079
1080 IS32 indicates whether value is a 32-bit immediate or not.
1081 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1082 amount will be returned in *SHIFT_AMOUNT. */
1083
1084 bool
1085 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1086 {
1087 int amount;
1088
1089 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1090
1091 if (is32)
1092 {
1093 /* Allow all zeros or all ones in top 32-bits, so that
1094 32-bit constant expressions like ~0x80000000 are
1095 permitted. */
1096 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1097 /* Immediate out of range. */
1098 return false;
1099 value &= 0xffffffff;
1100 }
1101
1102 /* first, try movz then movn */
1103 amount = -1;
1104 if ((value & ((uint64_t) 0xffff << 0)) == value)
1105 amount = 0;
1106 else if ((value & ((uint64_t) 0xffff << 16)) == value)
1107 amount = 16;
1108 else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1109 amount = 32;
1110 else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1111 amount = 48;
1112
1113 if (amount == -1)
1114 {
1115 DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1116 return false;
1117 }
1118
1119 if (shift_amount != NULL)
1120 *shift_amount = amount;
1121
1122 DEBUG_TRACE ("exit true with amount %d", amount);
1123
1124 return true;
1125 }
1126
1127 /* Build the accepted values for immediate logical SIMD instructions.
1128
1129 The standard encodings of the immediate value are:
1130 N imms immr SIMD size R S
1131 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1132 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1133 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1134 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1135 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1136 0 11110s 00000r 2 UInt(r) UInt(s)
1137 where all-ones value of S is reserved.
1138
1139 Let's call E the SIMD size.
1140
1141 The immediate value is: S+1 bits '1' rotated to the right by R.
1142
1143 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1144 (remember S != E - 1). */
1145
1146 #define TOTAL_IMM_NB 5334
1147
1148 typedef struct
1149 {
1150 uint64_t imm;
1151 aarch64_insn encoding;
1152 } simd_imm_encoding;
1153
1154 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1155
1156 static int
1157 simd_imm_encoding_cmp(const void *i1, const void *i2)
1158 {
1159 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1160 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1161
1162 if (imm1->imm < imm2->imm)
1163 return -1;
1164 if (imm1->imm > imm2->imm)
1165 return +1;
1166 return 0;
1167 }
1168
1169 /* immediate bitfield standard encoding
1170 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1171 1 ssssss rrrrrr 64 rrrrrr ssssss
1172 0 0sssss 0rrrrr 32 rrrrr sssss
1173 0 10ssss 00rrrr 16 rrrr ssss
1174 0 110sss 000rrr 8 rrr sss
1175 0 1110ss 0000rr 4 rr ss
1176 0 11110s 00000r 2 r s */
1177 static inline int
1178 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1179 {
1180 return (is64 << 12) | (r << 6) | s;
1181 }
1182
1183 static void
1184 build_immediate_table (void)
1185 {
1186 uint32_t log_e, e, s, r, s_mask;
1187 uint64_t mask, imm;
1188 int nb_imms;
1189 int is64;
1190
1191 nb_imms = 0;
1192 for (log_e = 1; log_e <= 6; log_e++)
1193 {
1194 /* Get element size. */
1195 e = 1u << log_e;
1196 if (log_e == 6)
1197 {
1198 is64 = 1;
1199 mask = 0xffffffffffffffffull;
1200 s_mask = 0;
1201 }
1202 else
1203 {
1204 is64 = 0;
1205 mask = (1ull << e) - 1;
1206 /* log_e s_mask
1207 1 ((1 << 4) - 1) << 2 = 111100
1208 2 ((1 << 3) - 1) << 3 = 111000
1209 3 ((1 << 2) - 1) << 4 = 110000
1210 4 ((1 << 1) - 1) << 5 = 100000
1211 5 ((1 << 0) - 1) << 6 = 000000 */
1212 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1213 }
1214 for (s = 0; s < e - 1; s++)
1215 for (r = 0; r < e; r++)
1216 {
1217 /* s+1 consecutive bits to 1 (s < 63) */
1218 imm = (1ull << (s + 1)) - 1;
1219 /* rotate right by r */
1220 if (r != 0)
1221 imm = (imm >> r) | ((imm << (e - r)) & mask);
1222 /* replicate the constant depending on SIMD size */
1223 switch (log_e)
1224 {
1225 case 1: imm = (imm << 2) | imm;
1226 /* Fall through. */
1227 case 2: imm = (imm << 4) | imm;
1228 /* Fall through. */
1229 case 3: imm = (imm << 8) | imm;
1230 /* Fall through. */
1231 case 4: imm = (imm << 16) | imm;
1232 /* Fall through. */
1233 case 5: imm = (imm << 32) | imm;
1234 /* Fall through. */
1235 case 6: break;
1236 default: abort ();
1237 }
1238 simd_immediates[nb_imms].imm = imm;
1239 simd_immediates[nb_imms].encoding =
1240 encode_immediate_bitfield(is64, s | s_mask, r);
1241 nb_imms++;
1242 }
1243 }
1244 assert (nb_imms == TOTAL_IMM_NB);
1245 qsort(simd_immediates, nb_imms,
1246 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1247 }
1248
1249 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1250 be accepted by logical (immediate) instructions
1251 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1252
1253 ESIZE is the number of bytes in the decoded immediate value.
1254 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1255 VALUE will be returned in *ENCODING. */
1256
1257 bool
1258 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1259 {
1260 simd_imm_encoding imm_enc;
1261 const simd_imm_encoding *imm_encoding;
1262 static bool initialized = false;
1263 uint64_t upper;
1264 int i;
1265
1266 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1267 value, esize);
1268
1269 if (!initialized)
1270 {
1271 build_immediate_table ();
1272 initialized = true;
1273 }
1274
1275 /* Allow all zeros or all ones in top bits, so that
1276 constant expressions like ~1 are permitted. */
1277 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1278 if ((value & ~upper) != value && (value | upper) != value)
1279 return false;
1280
1281 /* Replicate to a full 64-bit value. */
1282 value &= ~upper;
1283 for (i = esize * 8; i < 64; i *= 2)
1284 value |= (value << i);
1285
1286 imm_enc.imm = value;
1287 imm_encoding = (const simd_imm_encoding *)
1288 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1289 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1290 if (imm_encoding == NULL)
1291 {
1292 DEBUG_TRACE ("exit with false");
1293 return false;
1294 }
1295 if (encoding != NULL)
1296 *encoding = imm_encoding->encoding;
1297 DEBUG_TRACE ("exit with true");
1298 return true;
1299 }
1300
1301 /* If 64-bit immediate IMM is in the format of
1302 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1303 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1304 of value "abcdefgh". Otherwise return -1. */
1305 int
1306 aarch64_shrink_expanded_imm8 (uint64_t imm)
1307 {
1308 int i, ret;
1309 uint32_t byte;
1310
1311 ret = 0;
1312 for (i = 0; i < 8; i++)
1313 {
1314 byte = (imm >> (8 * i)) & 0xff;
1315 if (byte == 0xff)
1316 ret |= 1 << i;
1317 else if (byte != 0x00)
1318 return -1;
1319 }
1320 return ret;
1321 }
1322
1323 /* Utility inline functions for operand_general_constraint_met_p. */
1324
1325 static inline void
1326 set_error (aarch64_operand_error *mismatch_detail,
1327 enum aarch64_operand_error_kind kind, int idx,
1328 const char* error)
1329 {
1330 if (mismatch_detail == NULL)
1331 return;
1332 mismatch_detail->kind = kind;
1333 mismatch_detail->index = idx;
1334 mismatch_detail->error = error;
1335 }
1336
1337 static inline void
1338 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1339 const char* error)
1340 {
1341 if (mismatch_detail == NULL)
1342 return;
1343 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1344 }
1345
1346 static inline void
1347 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1348 int idx, int lower_bound, int upper_bound,
1349 const char* error)
1350 {
1351 if (mismatch_detail == NULL)
1352 return;
1353 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1354 mismatch_detail->data[0].i = lower_bound;
1355 mismatch_detail->data[1].i = upper_bound;
1356 }
1357
1358 static inline void
1359 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1360 int idx, int lower_bound, int upper_bound)
1361 {
1362 if (mismatch_detail == NULL)
1363 return;
1364 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1365 _("immediate value"));
1366 }
1367
1368 static inline void
1369 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1370 int idx, int lower_bound, int upper_bound)
1371 {
1372 if (mismatch_detail == NULL)
1373 return;
1374 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1375 _("immediate offset"));
1376 }
1377
1378 static inline void
1379 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1380 int idx, int lower_bound, int upper_bound)
1381 {
1382 if (mismatch_detail == NULL)
1383 return;
1384 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1385 _("register number"));
1386 }
1387
1388 static inline void
1389 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1390 int idx, int lower_bound, int upper_bound)
1391 {
1392 if (mismatch_detail == NULL)
1393 return;
1394 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1395 _("register element index"));
1396 }
1397
1398 static inline void
1399 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1400 int idx, int lower_bound, int upper_bound)
1401 {
1402 if (mismatch_detail == NULL)
1403 return;
1404 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1405 _("shift amount"));
1406 }
1407
1408 /* Report that the MUL modifier in operand IDX should be in the range
1409 [LOWER_BOUND, UPPER_BOUND]. */
1410 static inline void
1411 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1412 int idx, int lower_bound, int upper_bound)
1413 {
1414 if (mismatch_detail == NULL)
1415 return;
1416 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1417 _("multiplier"));
1418 }
1419
1420 static inline void
1421 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1422 int alignment)
1423 {
1424 if (mismatch_detail == NULL)
1425 return;
1426 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1427 mismatch_detail->data[0].i = alignment;
1428 }
1429
1430 static inline void
1431 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1432 int expected_num)
1433 {
1434 if (mismatch_detail == NULL)
1435 return;
1436 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1437 mismatch_detail->data[0].i = expected_num;
1438 }
1439
1440 static inline void
1441 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1442 const char* error)
1443 {
1444 if (mismatch_detail == NULL)
1445 return;
1446 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1447 }
1448
1449 /* General constraint checking based on operand code.
1450
1451 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1452 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1453
1454 This function has to be called after the qualifiers for all operands
1455 have been resolved.
1456
1457 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1458 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1459 of error message during the disassembling where error message is not
1460 wanted. We avoid the dynamic construction of strings of error messages
1461 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1462 use a combination of error code, static string and some integer data to
1463 represent an error. */
1464
1465 static int
1466 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1467 enum aarch64_opnd type,
1468 const aarch64_opcode *opcode,
1469 aarch64_operand_error *mismatch_detail)
1470 {
1471 unsigned num, modifiers, shift;
1472 unsigned char size;
1473 int64_t imm, min_value, max_value;
1474 uint64_t uvalue, mask;
1475 const aarch64_opnd_info *opnd = opnds + idx;
1476 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1477 int i;
1478
1479 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1480
1481 switch (aarch64_operands[type].op_class)
1482 {
1483 case AARCH64_OPND_CLASS_INT_REG:
1484 /* Check pair reg constraints for cas* instructions. */
1485 if (type == AARCH64_OPND_PAIRREG)
1486 {
1487 assert (idx == 1 || idx == 3);
1488 if (opnds[idx - 1].reg.regno % 2 != 0)
1489 {
1490 set_syntax_error (mismatch_detail, idx - 1,
1491 _("reg pair must start from even reg"));
1492 return 0;
1493 }
1494 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1495 {
1496 set_syntax_error (mismatch_detail, idx,
1497 _("reg pair must be contiguous"));
1498 return 0;
1499 }
1500 break;
1501 }
1502
1503 /* <Xt> may be optional in some IC and TLBI instructions. */
1504 if (type == AARCH64_OPND_Rt_SYS)
1505 {
1506 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1507 == AARCH64_OPND_CLASS_SYSTEM));
1508 if (opnds[1].present
1509 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1510 {
1511 set_other_error (mismatch_detail, idx, _("extraneous register"));
1512 return 0;
1513 }
1514 if (!opnds[1].present
1515 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1516 {
1517 set_other_error (mismatch_detail, idx, _("missing register"));
1518 return 0;
1519 }
1520 }
1521 switch (qualifier)
1522 {
1523 case AARCH64_OPND_QLF_WSP:
1524 case AARCH64_OPND_QLF_SP:
1525 if (!aarch64_stack_pointer_p (opnd))
1526 {
1527 set_other_error (mismatch_detail, idx,
1528 _("stack pointer register expected"));
1529 return 0;
1530 }
1531 break;
1532 default:
1533 break;
1534 }
1535 break;
1536
1537 case AARCH64_OPND_CLASS_SVE_REG:
1538 switch (type)
1539 {
1540 case AARCH64_OPND_SVE_Zm3_INDEX:
1541 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1542 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1543 case AARCH64_OPND_SVE_Zm4_11_INDEX:
1544 case AARCH64_OPND_SVE_Zm4_INDEX:
1545 size = get_operand_fields_width (get_operand_from_code (type));
1546 shift = get_operand_specific_data (&aarch64_operands[type]);
1547 mask = (1 << shift) - 1;
1548 if (opnd->reg.regno > mask)
1549 {
1550 assert (mask == 7 || mask == 15);
1551 set_other_error (mismatch_detail, idx,
1552 mask == 15
1553 ? _("z0-z15 expected")
1554 : _("z0-z7 expected"));
1555 return 0;
1556 }
1557 mask = (1u << (size - shift)) - 1;
1558 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1559 {
1560 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1561 return 0;
1562 }
1563 break;
1564
1565 case AARCH64_OPND_SVE_Zn_INDEX:
1566 size = aarch64_get_qualifier_esize (opnd->qualifier);
1567 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1568 {
1569 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1570 0, 64 / size - 1);
1571 return 0;
1572 }
1573 break;
1574
1575 case AARCH64_OPND_SVE_ZnxN:
1576 case AARCH64_OPND_SVE_ZtxN:
1577 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1578 {
1579 set_other_error (mismatch_detail, idx,
1580 _("invalid register list"));
1581 return 0;
1582 }
1583 break;
1584
1585 default:
1586 break;
1587 }
1588 break;
1589
1590 case AARCH64_OPND_CLASS_PRED_REG:
1591 if (opnd->reg.regno >= 8
1592 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1593 {
1594 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1595 return 0;
1596 }
1597 break;
1598
1599 case AARCH64_OPND_CLASS_COND:
1600 if (type == AARCH64_OPND_COND1
1601 && (opnds[idx].cond->value & 0xe) == 0xe)
1602 {
1603 /* Not allow AL or NV. */
1604 set_syntax_error (mismatch_detail, idx, NULL);
1605 }
1606 break;
1607
1608 case AARCH64_OPND_CLASS_ADDRESS:
1609 /* Check writeback. */
1610 switch (opcode->iclass)
1611 {
1612 case ldst_pos:
1613 case ldst_unscaled:
1614 case ldstnapair_offs:
1615 case ldstpair_off:
1616 case ldst_unpriv:
1617 if (opnd->addr.writeback == 1)
1618 {
1619 set_syntax_error (mismatch_detail, idx,
1620 _("unexpected address writeback"));
1621 return 0;
1622 }
1623 break;
1624 case ldst_imm10:
1625 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1626 {
1627 set_syntax_error (mismatch_detail, idx,
1628 _("unexpected address writeback"));
1629 return 0;
1630 }
1631 break;
1632 case ldst_imm9:
1633 case ldstpair_indexed:
1634 case asisdlsep:
1635 case asisdlsop:
1636 if (opnd->addr.writeback == 0)
1637 {
1638 set_syntax_error (mismatch_detail, idx,
1639 _("address writeback expected"));
1640 return 0;
1641 }
1642 break;
1643 default:
1644 assert (opnd->addr.writeback == 0);
1645 break;
1646 }
1647 switch (type)
1648 {
1649 case AARCH64_OPND_ADDR_SIMM7:
1650 /* Scaled signed 7 bits immediate offset. */
1651 /* Get the size of the data element that is accessed, which may be
1652 different from that of the source register size,
1653 e.g. in strb/ldrb. */
1654 size = aarch64_get_qualifier_esize (opnd->qualifier);
1655 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1656 {
1657 set_offset_out_of_range_error (mismatch_detail, idx,
1658 -64 * size, 63 * size);
1659 return 0;
1660 }
1661 if (!value_aligned_p (opnd->addr.offset.imm, size))
1662 {
1663 set_unaligned_error (mismatch_detail, idx, size);
1664 return 0;
1665 }
1666 break;
1667 case AARCH64_OPND_ADDR_OFFSET:
1668 case AARCH64_OPND_ADDR_SIMM9:
1669 /* Unscaled signed 9 bits immediate offset. */
1670 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1671 {
1672 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1673 return 0;
1674 }
1675 break;
1676
1677 case AARCH64_OPND_ADDR_SIMM9_2:
1678 /* Unscaled signed 9 bits immediate offset, which has to be negative
1679 or unaligned. */
1680 size = aarch64_get_qualifier_esize (qualifier);
1681 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1682 && !value_aligned_p (opnd->addr.offset.imm, size))
1683 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1684 return 1;
1685 set_other_error (mismatch_detail, idx,
1686 _("negative or unaligned offset expected"));
1687 return 0;
1688
1689 case AARCH64_OPND_ADDR_SIMM10:
1690 /* Scaled signed 10 bits immediate offset. */
1691 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1692 {
1693 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1694 return 0;
1695 }
1696 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1697 {
1698 set_unaligned_error (mismatch_detail, idx, 8);
1699 return 0;
1700 }
1701 break;
1702
1703 case AARCH64_OPND_ADDR_SIMM11:
1704 /* Signed 11 bits immediate offset (multiple of 16). */
1705 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1706 {
1707 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1708 return 0;
1709 }
1710
1711 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1712 {
1713 set_unaligned_error (mismatch_detail, idx, 16);
1714 return 0;
1715 }
1716 break;
1717
1718 case AARCH64_OPND_ADDR_SIMM13:
1719 /* Signed 13 bits immediate offset (multiple of 16). */
1720 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1721 {
1722 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1723 return 0;
1724 }
1725
1726 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1727 {
1728 set_unaligned_error (mismatch_detail, idx, 16);
1729 return 0;
1730 }
1731 break;
1732
1733 case AARCH64_OPND_SIMD_ADDR_POST:
1734 /* AdvSIMD load/store multiple structures, post-index. */
1735 assert (idx == 1);
1736 if (opnd->addr.offset.is_reg)
1737 {
1738 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1739 return 1;
1740 else
1741 {
1742 set_other_error (mismatch_detail, idx,
1743 _("invalid register offset"));
1744 return 0;
1745 }
1746 }
1747 else
1748 {
1749 const aarch64_opnd_info *prev = &opnds[idx-1];
1750 unsigned num_bytes; /* total number of bytes transferred. */
1751 /* The opcode dependent area stores the number of elements in
1752 each structure to be loaded/stored. */
1753 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1754 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1755 /* Special handling of loading single structure to all lane. */
1756 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1757 * aarch64_get_qualifier_esize (prev->qualifier);
1758 else
1759 num_bytes = prev->reglist.num_regs
1760 * aarch64_get_qualifier_esize (prev->qualifier)
1761 * aarch64_get_qualifier_nelem (prev->qualifier);
1762 if ((int) num_bytes != opnd->addr.offset.imm)
1763 {
1764 set_other_error (mismatch_detail, idx,
1765 _("invalid post-increment amount"));
1766 return 0;
1767 }
1768 }
1769 break;
1770
1771 case AARCH64_OPND_ADDR_REGOFF:
1772 /* Get the size of the data element that is accessed, which may be
1773 different from that of the source register size,
1774 e.g. in strb/ldrb. */
1775 size = aarch64_get_qualifier_esize (opnd->qualifier);
1776 /* It is either no shift or shift by the binary logarithm of SIZE. */
1777 if (opnd->shifter.amount != 0
1778 && opnd->shifter.amount != (int)get_logsz (size))
1779 {
1780 set_other_error (mismatch_detail, idx,
1781 _("invalid shift amount"));
1782 return 0;
1783 }
1784 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1785 operators. */
1786 switch (opnd->shifter.kind)
1787 {
1788 case AARCH64_MOD_UXTW:
1789 case AARCH64_MOD_LSL:
1790 case AARCH64_MOD_SXTW:
1791 case AARCH64_MOD_SXTX: break;
1792 default:
1793 set_other_error (mismatch_detail, idx,
1794 _("invalid extend/shift operator"));
1795 return 0;
1796 }
1797 break;
1798
1799 case AARCH64_OPND_ADDR_UIMM12:
1800 imm = opnd->addr.offset.imm;
1801 /* Get the size of the data element that is accessed, which may be
1802 different from that of the source register size,
1803 e.g. in strb/ldrb. */
1804 size = aarch64_get_qualifier_esize (qualifier);
1805 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1806 {
1807 set_offset_out_of_range_error (mismatch_detail, idx,
1808 0, 4095 * size);
1809 return 0;
1810 }
1811 if (!value_aligned_p (opnd->addr.offset.imm, size))
1812 {
1813 set_unaligned_error (mismatch_detail, idx, size);
1814 return 0;
1815 }
1816 break;
1817
1818 case AARCH64_OPND_ADDR_PCREL14:
1819 case AARCH64_OPND_ADDR_PCREL19:
1820 case AARCH64_OPND_ADDR_PCREL21:
1821 case AARCH64_OPND_ADDR_PCREL26:
1822 imm = opnd->imm.value;
1823 if (operand_need_shift_by_two (get_operand_from_code (type)))
1824 {
1825 /* The offset value in a PC-relative branch instruction is alway
1826 4-byte aligned and is encoded without the lowest 2 bits. */
1827 if (!value_aligned_p (imm, 4))
1828 {
1829 set_unaligned_error (mismatch_detail, idx, 4);
1830 return 0;
1831 }
1832 /* Right shift by 2 so that we can carry out the following check
1833 canonically. */
1834 imm >>= 2;
1835 }
1836 size = get_operand_fields_width (get_operand_from_code (type));
1837 if (!value_fit_signed_field_p (imm, size))
1838 {
1839 set_other_error (mismatch_detail, idx,
1840 _("immediate out of range"));
1841 return 0;
1842 }
1843 break;
1844
1845 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
1846 if (!value_in_range_p (opnd->addr.offset.imm, 0, 15))
1847 {
1848 set_offset_out_of_range_error (mismatch_detail, idx, 0, 15);
1849 return 0;
1850 }
1851 break;
1852
1853 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1854 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1855 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1856 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1857 min_value = -8;
1858 max_value = 7;
1859 sve_imm_offset_vl:
1860 assert (!opnd->addr.offset.is_reg);
1861 assert (opnd->addr.preind);
1862 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1863 min_value *= num;
1864 max_value *= num;
1865 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1866 || (opnd->shifter.operator_present
1867 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1868 {
1869 set_other_error (mismatch_detail, idx,
1870 _("invalid addressing mode"));
1871 return 0;
1872 }
1873 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1874 {
1875 set_offset_out_of_range_error (mismatch_detail, idx,
1876 min_value, max_value);
1877 return 0;
1878 }
1879 if (!value_aligned_p (opnd->addr.offset.imm, num))
1880 {
1881 set_unaligned_error (mismatch_detail, idx, num);
1882 return 0;
1883 }
1884 break;
1885
1886 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1887 min_value = -32;
1888 max_value = 31;
1889 goto sve_imm_offset_vl;
1890
1891 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1892 min_value = -256;
1893 max_value = 255;
1894 goto sve_imm_offset_vl;
1895
1896 case AARCH64_OPND_SVE_ADDR_RI_U6:
1897 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1898 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1899 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1900 min_value = 0;
1901 max_value = 63;
1902 sve_imm_offset:
1903 assert (!opnd->addr.offset.is_reg);
1904 assert (opnd->addr.preind);
1905 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1906 min_value *= num;
1907 max_value *= num;
1908 if (opnd->shifter.operator_present
1909 || opnd->shifter.amount_present)
1910 {
1911 set_other_error (mismatch_detail, idx,
1912 _("invalid addressing mode"));
1913 return 0;
1914 }
1915 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1916 {
1917 set_offset_out_of_range_error (mismatch_detail, idx,
1918 min_value, max_value);
1919 return 0;
1920 }
1921 if (!value_aligned_p (opnd->addr.offset.imm, num))
1922 {
1923 set_unaligned_error (mismatch_detail, idx, num);
1924 return 0;
1925 }
1926 break;
1927
1928 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1929 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
1930 min_value = -8;
1931 max_value = 7;
1932 goto sve_imm_offset;
1933
1934 case AARCH64_OPND_SVE_ADDR_ZX:
1935 /* Everything is already ensured by parse_operands or
1936 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
1937 argument type). */
1938 assert (opnd->addr.offset.is_reg);
1939 assert (opnd->addr.preind);
1940 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
1941 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
1942 assert (opnd->shifter.operator_present == 0);
1943 break;
1944
1945 case AARCH64_OPND_SVE_ADDR_R:
1946 case AARCH64_OPND_SVE_ADDR_RR:
1947 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1948 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1949 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1950 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
1951 case AARCH64_OPND_SVE_ADDR_RX:
1952 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1953 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1954 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1955 case AARCH64_OPND_SVE_ADDR_RZ:
1956 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1957 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1958 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1959 modifiers = 1 << AARCH64_MOD_LSL;
1960 sve_rr_operand:
1961 assert (opnd->addr.offset.is_reg);
1962 assert (opnd->addr.preind);
1963 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1964 && opnd->addr.offset.regno == 31)
1965 {
1966 set_other_error (mismatch_detail, idx,
1967 _("index register xzr is not allowed"));
1968 return 0;
1969 }
1970 if (((1 << opnd->shifter.kind) & modifiers) == 0
1971 || (opnd->shifter.amount
1972 != get_operand_specific_data (&aarch64_operands[type])))
1973 {
1974 set_other_error (mismatch_detail, idx,
1975 _("invalid addressing mode"));
1976 return 0;
1977 }
1978 break;
1979
1980 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1981 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1982 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1983 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1984 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1985 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1986 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1987 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1988 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1989 goto sve_rr_operand;
1990
1991 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1992 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1993 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1994 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1995 min_value = 0;
1996 max_value = 31;
1997 goto sve_imm_offset;
1998
1999 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
2000 modifiers = 1 << AARCH64_MOD_LSL;
2001 sve_zz_operand:
2002 assert (opnd->addr.offset.is_reg);
2003 assert (opnd->addr.preind);
2004 if (((1 << opnd->shifter.kind) & modifiers) == 0
2005 || opnd->shifter.amount < 0
2006 || opnd->shifter.amount > 3)
2007 {
2008 set_other_error (mismatch_detail, idx,
2009 _("invalid addressing mode"));
2010 return 0;
2011 }
2012 break;
2013
2014 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
2015 modifiers = (1 << AARCH64_MOD_SXTW);
2016 goto sve_zz_operand;
2017
2018 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
2019 modifiers = 1 << AARCH64_MOD_UXTW;
2020 goto sve_zz_operand;
2021
2022 default:
2023 break;
2024 }
2025 break;
2026
2027 case AARCH64_OPND_CLASS_SIMD_REGLIST:
2028 if (type == AARCH64_OPND_LEt)
2029 {
2030 /* Get the upper bound for the element index. */
2031 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2032 if (!value_in_range_p (opnd->reglist.index, 0, num))
2033 {
2034 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2035 return 0;
2036 }
2037 }
2038 /* The opcode dependent area stores the number of elements in
2039 each structure to be loaded/stored. */
2040 num = get_opcode_dependent_value (opcode);
2041 switch (type)
2042 {
2043 case AARCH64_OPND_LVt:
2044 assert (num >= 1 && num <= 4);
2045 /* Unless LD1/ST1, the number of registers should be equal to that
2046 of the structure elements. */
2047 if (num != 1 && opnd->reglist.num_regs != num)
2048 {
2049 set_reg_list_error (mismatch_detail, idx, num);
2050 return 0;
2051 }
2052 break;
2053 case AARCH64_OPND_LVt_AL:
2054 case AARCH64_OPND_LEt:
2055 assert (num >= 1 && num <= 4);
2056 /* The number of registers should be equal to that of the structure
2057 elements. */
2058 if (opnd->reglist.num_regs != num)
2059 {
2060 set_reg_list_error (mismatch_detail, idx, num);
2061 return 0;
2062 }
2063 break;
2064 default:
2065 break;
2066 }
2067 break;
2068
2069 case AARCH64_OPND_CLASS_IMMEDIATE:
2070 /* Constraint check on immediate operand. */
2071 imm = opnd->imm.value;
2072 /* E.g. imm_0_31 constrains value to be 0..31. */
2073 if (qualifier_value_in_range_constraint_p (qualifier)
2074 && !value_in_range_p (imm, get_lower_bound (qualifier),
2075 get_upper_bound (qualifier)))
2076 {
2077 set_imm_out_of_range_error (mismatch_detail, idx,
2078 get_lower_bound (qualifier),
2079 get_upper_bound (qualifier));
2080 return 0;
2081 }
2082
2083 switch (type)
2084 {
2085 case AARCH64_OPND_AIMM:
2086 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2087 {
2088 set_other_error (mismatch_detail, idx,
2089 _("invalid shift operator"));
2090 return 0;
2091 }
2092 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2093 {
2094 set_other_error (mismatch_detail, idx,
2095 _("shift amount must be 0 or 12"));
2096 return 0;
2097 }
2098 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2099 {
2100 set_other_error (mismatch_detail, idx,
2101 _("immediate out of range"));
2102 return 0;
2103 }
2104 break;
2105
2106 case AARCH64_OPND_HALF:
2107 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2108 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2109 {
2110 set_other_error (mismatch_detail, idx,
2111 _("invalid shift operator"));
2112 return 0;
2113 }
2114 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2115 if (!value_aligned_p (opnd->shifter.amount, 16))
2116 {
2117 set_other_error (mismatch_detail, idx,
2118 _("shift amount must be a multiple of 16"));
2119 return 0;
2120 }
2121 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2122 {
2123 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2124 0, size * 8 - 16);
2125 return 0;
2126 }
2127 if (opnd->imm.value < 0)
2128 {
2129 set_other_error (mismatch_detail, idx,
2130 _("negative immediate value not allowed"));
2131 return 0;
2132 }
2133 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2134 {
2135 set_other_error (mismatch_detail, idx,
2136 _("immediate out of range"));
2137 return 0;
2138 }
2139 break;
2140
2141 case AARCH64_OPND_IMM_MOV:
2142 {
2143 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2144 imm = opnd->imm.value;
2145 assert (idx == 1);
2146 switch (opcode->op)
2147 {
2148 case OP_MOV_IMM_WIDEN:
2149 imm = ~imm;
2150 /* Fall through. */
2151 case OP_MOV_IMM_WIDE:
2152 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2153 {
2154 set_other_error (mismatch_detail, idx,
2155 _("immediate out of range"));
2156 return 0;
2157 }
2158 break;
2159 case OP_MOV_IMM_LOG:
2160 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2161 {
2162 set_other_error (mismatch_detail, idx,
2163 _("immediate out of range"));
2164 return 0;
2165 }
2166 break;
2167 default:
2168 assert (0);
2169 return 0;
2170 }
2171 }
2172 break;
2173
2174 case AARCH64_OPND_NZCV:
2175 case AARCH64_OPND_CCMP_IMM:
2176 case AARCH64_OPND_EXCEPTION:
2177 case AARCH64_OPND_UNDEFINED:
2178 case AARCH64_OPND_TME_UIMM16:
2179 case AARCH64_OPND_UIMM4:
2180 case AARCH64_OPND_UIMM4_ADDG:
2181 case AARCH64_OPND_UIMM7:
2182 case AARCH64_OPND_UIMM3_OP1:
2183 case AARCH64_OPND_UIMM3_OP2:
2184 case AARCH64_OPND_SVE_UIMM3:
2185 case AARCH64_OPND_SVE_UIMM7:
2186 case AARCH64_OPND_SVE_UIMM8:
2187 case AARCH64_OPND_SVE_UIMM8_53:
2188 size = get_operand_fields_width (get_operand_from_code (type));
2189 assert (size < 32);
2190 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2191 {
2192 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2193 (1u << size) - 1);
2194 return 0;
2195 }
2196 break;
2197
2198 case AARCH64_OPND_UIMM10:
2199 /* Scaled unsigned 10 bits immediate offset. */
2200 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2201 {
2202 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2203 return 0;
2204 }
2205
2206 if (!value_aligned_p (opnd->imm.value, 16))
2207 {
2208 set_unaligned_error (mismatch_detail, idx, 16);
2209 return 0;
2210 }
2211 break;
2212
2213 case AARCH64_OPND_SIMM5:
2214 case AARCH64_OPND_SVE_SIMM5:
2215 case AARCH64_OPND_SVE_SIMM5B:
2216 case AARCH64_OPND_SVE_SIMM6:
2217 case AARCH64_OPND_SVE_SIMM8:
2218 size = get_operand_fields_width (get_operand_from_code (type));
2219 assert (size < 32);
2220 if (!value_fit_signed_field_p (opnd->imm.value, size))
2221 {
2222 set_imm_out_of_range_error (mismatch_detail, idx,
2223 -(1 << (size - 1)),
2224 (1 << (size - 1)) - 1);
2225 return 0;
2226 }
2227 break;
2228
2229 case AARCH64_OPND_WIDTH:
2230 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2231 && opnds[0].type == AARCH64_OPND_Rd);
2232 size = get_upper_bound (qualifier);
2233 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2234 /* lsb+width <= reg.size */
2235 {
2236 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2237 size - opnds[idx-1].imm.value);
2238 return 0;
2239 }
2240 break;
2241
2242 case AARCH64_OPND_LIMM:
2243 case AARCH64_OPND_SVE_LIMM:
2244 {
2245 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2246 uint64_t uimm = opnd->imm.value;
2247 if (opcode->op == OP_BIC)
2248 uimm = ~uimm;
2249 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2250 {
2251 set_other_error (mismatch_detail, idx,
2252 _("immediate out of range"));
2253 return 0;
2254 }
2255 }
2256 break;
2257
2258 case AARCH64_OPND_IMM0:
2259 case AARCH64_OPND_FPIMM0:
2260 if (opnd->imm.value != 0)
2261 {
2262 set_other_error (mismatch_detail, idx,
2263 _("immediate zero expected"));
2264 return 0;
2265 }
2266 break;
2267
2268 case AARCH64_OPND_IMM_ROT1:
2269 case AARCH64_OPND_IMM_ROT2:
2270 case AARCH64_OPND_SVE_IMM_ROT2:
2271 if (opnd->imm.value != 0
2272 && opnd->imm.value != 90
2273 && opnd->imm.value != 180
2274 && opnd->imm.value != 270)
2275 {
2276 set_other_error (mismatch_detail, idx,
2277 _("rotate expected to be 0, 90, 180 or 270"));
2278 return 0;
2279 }
2280 break;
2281
2282 case AARCH64_OPND_IMM_ROT3:
2283 case AARCH64_OPND_SVE_IMM_ROT1:
2284 case AARCH64_OPND_SVE_IMM_ROT3:
2285 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2286 {
2287 set_other_error (mismatch_detail, idx,
2288 _("rotate expected to be 90 or 270"));
2289 return 0;
2290 }
2291 break;
2292
2293 case AARCH64_OPND_SHLL_IMM:
2294 assert (idx == 2);
2295 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2296 if (opnd->imm.value != size)
2297 {
2298 set_other_error (mismatch_detail, idx,
2299 _("invalid shift amount"));
2300 return 0;
2301 }
2302 break;
2303
2304 case AARCH64_OPND_IMM_VLSL:
2305 size = aarch64_get_qualifier_esize (qualifier);
2306 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2307 {
2308 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2309 size * 8 - 1);
2310 return 0;
2311 }
2312 break;
2313
2314 case AARCH64_OPND_IMM_VLSR:
2315 size = aarch64_get_qualifier_esize (qualifier);
2316 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2317 {
2318 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2319 return 0;
2320 }
2321 break;
2322
2323 case AARCH64_OPND_SIMD_IMM:
2324 case AARCH64_OPND_SIMD_IMM_SFT:
2325 /* Qualifier check. */
2326 switch (qualifier)
2327 {
2328 case AARCH64_OPND_QLF_LSL:
2329 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2330 {
2331 set_other_error (mismatch_detail, idx,
2332 _("invalid shift operator"));
2333 return 0;
2334 }
2335 break;
2336 case AARCH64_OPND_QLF_MSL:
2337 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2338 {
2339 set_other_error (mismatch_detail, idx,
2340 _("invalid shift operator"));
2341 return 0;
2342 }
2343 break;
2344 case AARCH64_OPND_QLF_NIL:
2345 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2346 {
2347 set_other_error (mismatch_detail, idx,
2348 _("shift is not permitted"));
2349 return 0;
2350 }
2351 break;
2352 default:
2353 assert (0);
2354 return 0;
2355 }
2356 /* Is the immediate valid? */
2357 assert (idx == 1);
2358 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2359 {
2360 /* uimm8 or simm8 */
2361 if (!value_in_range_p (opnd->imm.value, -128, 255))
2362 {
2363 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2364 return 0;
2365 }
2366 }
2367 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2368 {
2369 /* uimm64 is not
2370 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2371 ffffffffgggggggghhhhhhhh'. */
2372 set_other_error (mismatch_detail, idx,
2373 _("invalid value for immediate"));
2374 return 0;
2375 }
2376 /* Is the shift amount valid? */
2377 switch (opnd->shifter.kind)
2378 {
2379 case AARCH64_MOD_LSL:
2380 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2381 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2382 {
2383 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2384 (size - 1) * 8);
2385 return 0;
2386 }
2387 if (!value_aligned_p (opnd->shifter.amount, 8))
2388 {
2389 set_unaligned_error (mismatch_detail, idx, 8);
2390 return 0;
2391 }
2392 break;
2393 case AARCH64_MOD_MSL:
2394 /* Only 8 and 16 are valid shift amount. */
2395 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2396 {
2397 set_other_error (mismatch_detail, idx,
2398 _("shift amount must be 0 or 16"));
2399 return 0;
2400 }
2401 break;
2402 default:
2403 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2404 {
2405 set_other_error (mismatch_detail, idx,
2406 _("invalid shift operator"));
2407 return 0;
2408 }
2409 break;
2410 }
2411 break;
2412
2413 case AARCH64_OPND_FPIMM:
2414 case AARCH64_OPND_SIMD_FPIMM:
2415 case AARCH64_OPND_SVE_FPIMM8:
2416 if (opnd->imm.is_fp == 0)
2417 {
2418 set_other_error (mismatch_detail, idx,
2419 _("floating-point immediate expected"));
2420 return 0;
2421 }
2422 /* The value is expected to be an 8-bit floating-point constant with
2423 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2424 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2425 instruction). */
2426 if (!value_in_range_p (opnd->imm.value, 0, 255))
2427 {
2428 set_other_error (mismatch_detail, idx,
2429 _("immediate out of range"));
2430 return 0;
2431 }
2432 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2433 {
2434 set_other_error (mismatch_detail, idx,
2435 _("invalid shift operator"));
2436 return 0;
2437 }
2438 break;
2439
2440 case AARCH64_OPND_SVE_AIMM:
2441 min_value = 0;
2442 sve_aimm:
2443 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2444 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2445 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2446 uvalue = opnd->imm.value;
2447 shift = opnd->shifter.amount;
2448 if (size == 1)
2449 {
2450 if (shift != 0)
2451 {
2452 set_other_error (mismatch_detail, idx,
2453 _("no shift amount allowed for"
2454 " 8-bit constants"));
2455 return 0;
2456 }
2457 }
2458 else
2459 {
2460 if (shift != 0 && shift != 8)
2461 {
2462 set_other_error (mismatch_detail, idx,
2463 _("shift amount must be 0 or 8"));
2464 return 0;
2465 }
2466 if (shift == 0 && (uvalue & 0xff) == 0)
2467 {
2468 shift = 8;
2469 uvalue = (int64_t) uvalue / 256;
2470 }
2471 }
2472 mask >>= shift;
2473 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2474 {
2475 set_other_error (mismatch_detail, idx,
2476 _("immediate too big for element size"));
2477 return 0;
2478 }
2479 uvalue = (uvalue - min_value) & mask;
2480 if (uvalue > 0xff)
2481 {
2482 set_other_error (mismatch_detail, idx,
2483 _("invalid arithmetic immediate"));
2484 return 0;
2485 }
2486 break;
2487
2488 case AARCH64_OPND_SVE_ASIMM:
2489 min_value = -128;
2490 goto sve_aimm;
2491
2492 case AARCH64_OPND_SVE_I1_HALF_ONE:
2493 assert (opnd->imm.is_fp);
2494 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2495 {
2496 set_other_error (mismatch_detail, idx,
2497 _("floating-point value must be 0.5 or 1.0"));
2498 return 0;
2499 }
2500 break;
2501
2502 case AARCH64_OPND_SVE_I1_HALF_TWO:
2503 assert (opnd->imm.is_fp);
2504 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2505 {
2506 set_other_error (mismatch_detail, idx,
2507 _("floating-point value must be 0.5 or 2.0"));
2508 return 0;
2509 }
2510 break;
2511
2512 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2513 assert (opnd->imm.is_fp);
2514 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2515 {
2516 set_other_error (mismatch_detail, idx,
2517 _("floating-point value must be 0.0 or 1.0"));
2518 return 0;
2519 }
2520 break;
2521
2522 case AARCH64_OPND_SVE_INV_LIMM:
2523 {
2524 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2525 uint64_t uimm = ~opnd->imm.value;
2526 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2527 {
2528 set_other_error (mismatch_detail, idx,
2529 _("immediate out of range"));
2530 return 0;
2531 }
2532 }
2533 break;
2534
2535 case AARCH64_OPND_SVE_LIMM_MOV:
2536 {
2537 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2538 uint64_t uimm = opnd->imm.value;
2539 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2540 {
2541 set_other_error (mismatch_detail, idx,
2542 _("immediate out of range"));
2543 return 0;
2544 }
2545 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2546 {
2547 set_other_error (mismatch_detail, idx,
2548 _("invalid replicated MOV immediate"));
2549 return 0;
2550 }
2551 }
2552 break;
2553
2554 case AARCH64_OPND_SVE_PATTERN_SCALED:
2555 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2556 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2557 {
2558 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2559 return 0;
2560 }
2561 break;
2562
2563 case AARCH64_OPND_SVE_SHLIMM_PRED:
2564 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2565 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
2566 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2567 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2568 {
2569 set_imm_out_of_range_error (mismatch_detail, idx,
2570 0, 8 * size - 1);
2571 return 0;
2572 }
2573 break;
2574
2575 case AARCH64_OPND_SVE_SHRIMM_PRED:
2576 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2577 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
2578 num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
2579 size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
2580 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2581 {
2582 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
2583 return 0;
2584 }
2585 break;
2586
2587 default:
2588 break;
2589 }
2590 break;
2591
2592 case AARCH64_OPND_CLASS_SYSTEM:
2593 switch (type)
2594 {
2595 case AARCH64_OPND_PSTATEFIELD:
2596 for (i = 0; aarch64_pstatefields[i].name; ++i)
2597 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2598 break;
2599 assert (aarch64_pstatefields[i].name);
2600 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2601 max_value = F_GET_REG_MAX_VALUE (aarch64_pstatefields[i].flags);
2602 if (opnds[1].imm.value < 0 || opnds[1].imm.value > max_value)
2603 {
2604 set_imm_out_of_range_error (mismatch_detail, 1, 0, max_value);
2605 return 0;
2606 }
2607 break;
2608 default:
2609 break;
2610 }
2611 break;
2612
2613 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2614 /* Get the upper bound for the element index. */
2615 if (opcode->op == OP_FCMLA_ELEM)
2616 /* FCMLA index range depends on the vector size of other operands
2617 and is halfed because complex numbers take two elements. */
2618 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2619 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2620 else
2621 num = 16;
2622 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2623 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2624
2625 /* Index out-of-range. */
2626 if (!value_in_range_p (opnd->reglane.index, 0, num))
2627 {
2628 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2629 return 0;
2630 }
2631 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2632 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2633 number is encoded in "size:M:Rm":
2634 size <Vm>
2635 00 RESERVED
2636 01 0:Rm
2637 10 M:Rm
2638 11 RESERVED */
2639 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2640 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2641 {
2642 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2643 return 0;
2644 }
2645 break;
2646
2647 case AARCH64_OPND_CLASS_MODIFIED_REG:
2648 assert (idx == 1 || idx == 2);
2649 switch (type)
2650 {
2651 case AARCH64_OPND_Rm_EXT:
2652 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2653 && opnd->shifter.kind != AARCH64_MOD_LSL)
2654 {
2655 set_other_error (mismatch_detail, idx,
2656 _("extend operator expected"));
2657 return 0;
2658 }
2659 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2660 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2661 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2662 case. */
2663 if (!aarch64_stack_pointer_p (opnds + 0)
2664 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2665 {
2666 if (!opnd->shifter.operator_present)
2667 {
2668 set_other_error (mismatch_detail, idx,
2669 _("missing extend operator"));
2670 return 0;
2671 }
2672 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2673 {
2674 set_other_error (mismatch_detail, idx,
2675 _("'LSL' operator not allowed"));
2676 return 0;
2677 }
2678 }
2679 assert (opnd->shifter.operator_present /* Default to LSL. */
2680 || opnd->shifter.kind == AARCH64_MOD_LSL);
2681 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2682 {
2683 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2684 return 0;
2685 }
2686 /* In the 64-bit form, the final register operand is written as Wm
2687 for all but the (possibly omitted) UXTX/LSL and SXTX
2688 operators.
2689 N.B. GAS allows X register to be used with any operator as a
2690 programming convenience. */
2691 if (qualifier == AARCH64_OPND_QLF_X
2692 && opnd->shifter.kind != AARCH64_MOD_LSL
2693 && opnd->shifter.kind != AARCH64_MOD_UXTX
2694 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2695 {
2696 set_other_error (mismatch_detail, idx, _("W register expected"));
2697 return 0;
2698 }
2699 break;
2700
2701 case AARCH64_OPND_Rm_SFT:
2702 /* ROR is not available to the shifted register operand in
2703 arithmetic instructions. */
2704 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2705 {
2706 set_other_error (mismatch_detail, idx,
2707 _("shift operator expected"));
2708 return 0;
2709 }
2710 if (opnd->shifter.kind == AARCH64_MOD_ROR
2711 && opcode->iclass != log_shift)
2712 {
2713 set_other_error (mismatch_detail, idx,
2714 _("'ROR' operator not allowed"));
2715 return 0;
2716 }
2717 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2718 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2719 {
2720 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2721 return 0;
2722 }
2723 break;
2724
2725 default:
2726 break;
2727 }
2728 break;
2729
2730 default:
2731 break;
2732 }
2733
2734 return 1;
2735 }
2736
2737 /* Main entrypoint for the operand constraint checking.
2738
2739 Return 1 if operands of *INST meet the constraint applied by the operand
2740 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2741 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2742 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2743 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2744 error kind when it is notified that an instruction does not pass the check).
2745
2746 Un-determined operand qualifiers may get established during the process. */
2747
2748 int
2749 aarch64_match_operands_constraint (aarch64_inst *inst,
2750 aarch64_operand_error *mismatch_detail)
2751 {
2752 int i;
2753
2754 DEBUG_TRACE ("enter");
2755
2756 i = inst->opcode->tied_operand;
2757
2758 if (i > 0)
2759 {
2760 /* Check for tied_operands with specific opcode iclass. */
2761 switch (inst->opcode->iclass)
2762 {
2763 /* For SME LDR and STR instructions #imm must have the same numerical
2764 value for both operands.
2765 */
2766 case sme_ldr:
2767 case sme_str:
2768 assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array);
2769 assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL);
2770 if (inst->operands[0].za_tile_vector.index.imm
2771 != inst->operands[1].addr.offset.imm)
2772 {
2773 if (mismatch_detail)
2774 {
2775 mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS;
2776 mismatch_detail->index = i;
2777 }
2778 return 0;
2779 }
2780 break;
2781
2782 default:
2783 /* Check for cases where a source register needs to be the same as the
2784 destination register. Do this before matching qualifiers since if
2785 an instruction has both invalid tying and invalid qualifiers,
2786 the error about qualifiers would suggest several alternative
2787 instructions that also have invalid tying. */
2788 if (inst->operands[0].reg.regno
2789 != inst->operands[i].reg.regno)
2790 {
2791 if (mismatch_detail)
2792 {
2793 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2794 mismatch_detail->index = i;
2795 mismatch_detail->error = NULL;
2796 }
2797 return 0;
2798 }
2799 break;
2800 }
2801 }
2802
2803 /* Match operands' qualifier.
2804 *INST has already had qualifier establish for some, if not all, of
2805 its operands; we need to find out whether these established
2806 qualifiers match one of the qualifier sequence in
2807 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2808 with the corresponding qualifier in such a sequence.
2809 Only basic operand constraint checking is done here; the more thorough
2810 constraint checking will carried out by operand_general_constraint_met_p,
2811 which has be to called after this in order to get all of the operands'
2812 qualifiers established. */
2813 if (match_operands_qualifier (inst, true /* update_p */) == 0)
2814 {
2815 DEBUG_TRACE ("FAIL on operand qualifier matching");
2816 if (mismatch_detail)
2817 {
2818 /* Return an error type to indicate that it is the qualifier
2819 matching failure; we don't care about which operand as there
2820 are enough information in the opcode table to reproduce it. */
2821 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2822 mismatch_detail->index = -1;
2823 mismatch_detail->error = NULL;
2824 }
2825 return 0;
2826 }
2827
2828 /* Match operands' constraint. */
2829 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2830 {
2831 enum aarch64_opnd type = inst->opcode->operands[i];
2832 if (type == AARCH64_OPND_NIL)
2833 break;
2834 if (inst->operands[i].skip)
2835 {
2836 DEBUG_TRACE ("skip the incomplete operand %d", i);
2837 continue;
2838 }
2839 if (operand_general_constraint_met_p (inst->operands, i, type,
2840 inst->opcode, mismatch_detail) == 0)
2841 {
2842 DEBUG_TRACE ("FAIL on operand %d", i);
2843 return 0;
2844 }
2845 }
2846
2847 DEBUG_TRACE ("PASS");
2848
2849 return 1;
2850 }
2851
2852 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2853 Also updates the TYPE of each INST->OPERANDS with the corresponding
2854 value of OPCODE->OPERANDS.
2855
2856 Note that some operand qualifiers may need to be manually cleared by
2857 the caller before it further calls the aarch64_opcode_encode; by
2858 doing this, it helps the qualifier matching facilities work
2859 properly. */
2860
2861 const aarch64_opcode*
2862 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2863 {
2864 int i;
2865 const aarch64_opcode *old = inst->opcode;
2866
2867 inst->opcode = opcode;
2868
2869 /* Update the operand types. */
2870 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2871 {
2872 inst->operands[i].type = opcode->operands[i];
2873 if (opcode->operands[i] == AARCH64_OPND_NIL)
2874 break;
2875 }
2876
2877 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2878
2879 return old;
2880 }
2881
2882 int
2883 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2884 {
2885 int i;
2886 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2887 if (operands[i] == operand)
2888 return i;
2889 else if (operands[i] == AARCH64_OPND_NIL)
2890 break;
2891 return -1;
2892 }
2893 \f
2894 /* R0...R30, followed by FOR31. */
2895 #define BANK(R, FOR31) \
2896 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2897 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2898 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2899 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2900 /* [0][0] 32-bit integer regs with sp Wn
2901 [0][1] 64-bit integer regs with sp Xn sf=1
2902 [1][0] 32-bit integer regs with #0 Wn
2903 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2904 static const char *int_reg[2][2][32] = {
2905 #define R32(X) "w" #X
2906 #define R64(X) "x" #X
2907 { BANK (R32, "wsp"), BANK (R64, "sp") },
2908 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2909 #undef R64
2910 #undef R32
2911 };
2912
2913 /* Names of the SVE vector registers, first with .S suffixes,
2914 then with .D suffixes. */
2915
2916 static const char *sve_reg[2][32] = {
2917 #define ZS(X) "z" #X ".s"
2918 #define ZD(X) "z" #X ".d"
2919 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2920 #undef ZD
2921 #undef ZS
2922 };
2923 #undef BANK
2924
2925 /* Return the integer register name.
2926 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2927
2928 static inline const char *
2929 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2930 {
2931 const int has_zr = sp_reg_p ? 0 : 1;
2932 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2933 return int_reg[has_zr][is_64][regno];
2934 }
2935
2936 /* Like get_int_reg_name, but IS_64 is always 1. */
2937
2938 static inline const char *
2939 get_64bit_int_reg_name (int regno, int sp_reg_p)
2940 {
2941 const int has_zr = sp_reg_p ? 0 : 1;
2942 return int_reg[has_zr][1][regno];
2943 }
2944
2945 /* Get the name of the integer offset register in OPND, using the shift type
2946 to decide whether it's a word or doubleword. */
2947
2948 static inline const char *
2949 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2950 {
2951 switch (opnd->shifter.kind)
2952 {
2953 case AARCH64_MOD_UXTW:
2954 case AARCH64_MOD_SXTW:
2955 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2956
2957 case AARCH64_MOD_LSL:
2958 case AARCH64_MOD_SXTX:
2959 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2960
2961 default:
2962 abort ();
2963 }
2964 }
2965
2966 /* Get the name of the SVE vector offset register in OPND, using the operand
2967 qualifier to decide whether the suffix should be .S or .D. */
2968
2969 static inline const char *
2970 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2971 {
2972 assert (qualifier == AARCH64_OPND_QLF_S_S
2973 || qualifier == AARCH64_OPND_QLF_S_D);
2974 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2975 }
2976
2977 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2978
2979 typedef union
2980 {
2981 uint64_t i;
2982 double d;
2983 } double_conv_t;
2984
2985 typedef union
2986 {
2987 uint32_t i;
2988 float f;
2989 } single_conv_t;
2990
2991 typedef union
2992 {
2993 uint32_t i;
2994 float f;
2995 } half_conv_t;
2996
2997 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2998 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2999 (depending on the type of the instruction). IMM8 will be expanded to a
3000 single-precision floating-point value (SIZE == 4) or a double-precision
3001 floating-point value (SIZE == 8). A half-precision floating-point value
3002 (SIZE == 2) is expanded to a single-precision floating-point value. The
3003 expanded value is returned. */
3004
3005 static uint64_t
3006 expand_fp_imm (int size, uint32_t imm8)
3007 {
3008 uint64_t imm = 0;
3009 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
3010
3011 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
3012 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
3013 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
3014 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
3015 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
3016 if (size == 8)
3017 {
3018 imm = (imm8_7 << (63-32)) /* imm8<7> */
3019 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
3020 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
3021 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
3022 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
3023 imm <<= 32;
3024 }
3025 else if (size == 4 || size == 2)
3026 {
3027 imm = (imm8_7 << 31) /* imm8<7> */
3028 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
3029 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
3030 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
3031 }
3032 else
3033 {
3034 /* An unsupported size. */
3035 assert (0);
3036 }
3037
3038 return imm;
3039 }
3040
3041 /* Return a string based on FMT with the register style applied. */
3042
3043 static const char *
3044 style_reg (struct aarch64_styler *styler, const char *fmt, ...)
3045 {
3046 const char *txt;
3047 va_list ap;
3048
3049 va_start (ap, fmt);
3050 txt = styler->apply_style (styler, dis_style_register, fmt, ap);
3051 va_end (ap);
3052
3053 return txt;
3054 }
3055
3056 /* Return a string based on FMT with the immediate style applied. */
3057
3058 static const char *
3059 style_imm (struct aarch64_styler *styler, const char *fmt, ...)
3060 {
3061 const char *txt;
3062 va_list ap;
3063
3064 va_start (ap, fmt);
3065 txt = styler->apply_style (styler, dis_style_immediate, fmt, ap);
3066 va_end (ap);
3067
3068 return txt;
3069 }
3070
3071 /* Return a string based on FMT with the sub-mnemonic style applied. */
3072
3073 static const char *
3074 style_sub_mnem (struct aarch64_styler *styler, const char *fmt, ...)
3075 {
3076 const char *txt;
3077 va_list ap;
3078
3079 va_start (ap, fmt);
3080 txt = styler->apply_style (styler, dis_style_sub_mnemonic, fmt, ap);
3081 va_end (ap);
3082
3083 return txt;
3084 }
3085
3086 /* Return a string based on FMT with the address style applied. */
3087
3088 static const char *
3089 style_addr (struct aarch64_styler *styler, const char *fmt, ...)
3090 {
3091 const char *txt;
3092 va_list ap;
3093
3094 va_start (ap, fmt);
3095 txt = styler->apply_style (styler, dis_style_address, fmt, ap);
3096 va_end (ap);
3097
3098 return txt;
3099 }
3100
3101 /* Produce the string representation of the register list operand *OPND
3102 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3103 the register name that comes before the register number, such as "v". */
3104 static void
3105 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3106 const char *prefix, struct aarch64_styler *styler)
3107 {
3108 const int num_regs = opnd->reglist.num_regs;
3109 const int first_reg = opnd->reglist.first_regno;
3110 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
3111 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3112 char tb[16]; /* Temporary buffer. */
3113
3114 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3115 assert (num_regs >= 1 && num_regs <= 4);
3116
3117 /* Prepare the index if any. */
3118 if (opnd->reglist.has_index)
3119 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3120 snprintf (tb, sizeof (tb), "[%s]",
3121 style_imm (styler, "%" PRIi64, (opnd->reglist.index % 100)));
3122 else
3123 tb[0] = '\0';
3124
3125 /* The hyphenated form is preferred for disassembly if there are
3126 more than two registers in the list, and the register numbers
3127 are monotonically increasing in increments of one. */
3128 if (num_regs > 2 && last_reg > first_reg)
3129 snprintf (buf, size, "{%s-%s}%s",
3130 style_reg (styler, "%s%d.%s", prefix, first_reg, qlf_name),
3131 style_reg (styler, "%s%d.%s", prefix, last_reg, qlf_name), tb);
3132 else
3133 {
3134 const int reg0 = first_reg;
3135 const int reg1 = (first_reg + 1) & 0x1f;
3136 const int reg2 = (first_reg + 2) & 0x1f;
3137 const int reg3 = (first_reg + 3) & 0x1f;
3138
3139 switch (num_regs)
3140 {
3141 case 1:
3142 snprintf (buf, size, "{%s}%s",
3143 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3144 tb);
3145 break;
3146 case 2:
3147 snprintf (buf, size, "{%s, %s}%s",
3148 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3149 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3150 tb);
3151 break;
3152 case 3:
3153 snprintf (buf, size, "{%s, %s, %s}%s",
3154 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3155 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3156 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3157 tb);
3158 break;
3159 case 4:
3160 snprintf (buf, size, "{%s, %s, %s, %s}%s",
3161 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3162 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3163 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3164 style_reg (styler, "%s%d.%s", prefix, reg3, qlf_name),
3165 tb);
3166 break;
3167 }
3168 }
3169 }
3170
3171 /* Print the register+immediate address in OPND to BUF, which has SIZE
3172 characters. BASE is the name of the base register. */
3173
3174 static void
3175 print_immediate_offset_address (char *buf, size_t size,
3176 const aarch64_opnd_info *opnd,
3177 const char *base,
3178 struct aarch64_styler *styler)
3179 {
3180 if (opnd->addr.writeback)
3181 {
3182 if (opnd->addr.preind)
3183 {
3184 if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3185 snprintf (buf, size, "[%s]!", style_reg (styler, base));
3186 else
3187 snprintf (buf, size, "[%s, %s]!",
3188 style_reg (styler, base),
3189 style_imm (styler, "#%d", opnd->addr.offset.imm));
3190 }
3191 else
3192 snprintf (buf, size, "[%s], %s",
3193 style_reg (styler, base),
3194 style_imm (styler, "#%d", opnd->addr.offset.imm));
3195 }
3196 else
3197 {
3198 if (opnd->shifter.operator_present)
3199 {
3200 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3201 snprintf (buf, size, "[%s, %s, %s]",
3202 style_reg (styler, base),
3203 style_imm (styler, "#%d", opnd->addr.offset.imm),
3204 style_sub_mnem (styler, "mul vl"));
3205 }
3206 else if (opnd->addr.offset.imm)
3207 snprintf (buf, size, "[%s, %s]",
3208 style_reg (styler, base),
3209 style_imm (styler, "#%d", opnd->addr.offset.imm));
3210 else
3211 snprintf (buf, size, "[%s]", style_reg (styler, base));
3212 }
3213 }
3214
3215 /* Produce the string representation of the register offset address operand
3216 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3217 the names of the base and offset registers. */
3218 static void
3219 print_register_offset_address (char *buf, size_t size,
3220 const aarch64_opnd_info *opnd,
3221 const char *base, const char *offset,
3222 struct aarch64_styler *styler)
3223 {
3224 char tb[32]; /* Temporary buffer. */
3225 bool print_extend_p = true;
3226 bool print_amount_p = true;
3227 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3228
3229 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3230 || !opnd->shifter.amount_present))
3231 {
3232 /* Not print the shift/extend amount when the amount is zero and
3233 when it is not the special case of 8-bit load/store instruction. */
3234 print_amount_p = false;
3235 /* Likewise, no need to print the shift operator LSL in such a
3236 situation. */
3237 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3238 print_extend_p = false;
3239 }
3240
3241 /* Prepare for the extend/shift. */
3242 if (print_extend_p)
3243 {
3244 if (print_amount_p)
3245 snprintf (tb, sizeof (tb), ", %s %s",
3246 style_sub_mnem (styler, shift_name),
3247 style_imm (styler, "#%" PRIi64,
3248 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3249 (opnd->shifter.amount % 100)));
3250 else
3251 snprintf (tb, sizeof (tb), ", %s",
3252 style_sub_mnem (styler, shift_name));
3253 }
3254 else
3255 tb[0] = '\0';
3256
3257 snprintf (buf, size, "[%s, %s%s]", style_reg (styler, base),
3258 style_reg (styler, offset), tb);
3259 }
3260
3261 /* Print ZA tiles from imm8 in ZERO instruction.
3262
3263 The preferred disassembly of this instruction uses the shortest list of tile
3264 names that represent the encoded immediate mask.
3265
3266 For example:
3267 * An all-ones immediate is disassembled as {ZA}.
3268 * An all-zeros immediate is disassembled as an empty list { }.
3269 */
3270 static void
3271 print_sme_za_list (char *buf, size_t size, int mask,
3272 struct aarch64_styler *styler)
3273 {
3274 const char* zan[] = { "za", "za0.h", "za1.h", "za0.s",
3275 "za1.s", "za2.s", "za3.s", "za0.d",
3276 "za1.d", "za2.d", "za3.d", "za4.d",
3277 "za5.d", "za6.d", "za7.d", " " };
3278 const int zan_v[] = { 0xff, 0x55, 0xaa, 0x11,
3279 0x22, 0x44, 0x88, 0x01,
3280 0x02, 0x04, 0x08, 0x10,
3281 0x20, 0x40, 0x80, 0x00 };
3282 int i, k;
3283 const int ZAN_SIZE = sizeof(zan) / sizeof(zan[0]);
3284
3285 k = snprintf (buf, size, "{");
3286 for (i = 0; i < ZAN_SIZE; i++)
3287 {
3288 if ((mask & zan_v[i]) == zan_v[i])
3289 {
3290 mask &= ~zan_v[i];
3291 if (k > 1)
3292 k += snprintf (buf + k, size - k, ", ");
3293
3294 k += snprintf (buf + k, size - k, "%s", style_reg (styler, zan[i]));
3295 }
3296 if (mask == 0)
3297 break;
3298 }
3299 snprintf (buf + k, size - k, "}");
3300 }
3301
3302 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3303 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3304 PC, PCREL_P and ADDRESS are used to pass in and return information about
3305 the PC-relative address calculation, where the PC value is passed in
3306 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3307 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3308 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3309
3310 The function serves both the disassembler and the assembler diagnostics
3311 issuer, which is the reason why it lives in this file. */
3312
3313 void
3314 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3315 const aarch64_opcode *opcode,
3316 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3317 bfd_vma *address, char** notes,
3318 char *comment, size_t comment_size,
3319 aarch64_feature_set features,
3320 struct aarch64_styler *styler)
3321 {
3322 unsigned int i, num_conds;
3323 const char *name = NULL;
3324 const aarch64_opnd_info *opnd = opnds + idx;
3325 enum aarch64_modifier_kind kind;
3326 uint64_t addr, enum_value;
3327
3328 if (comment != NULL)
3329 {
3330 assert (comment_size > 0);
3331 comment[0] = '\0';
3332 }
3333 else
3334 assert (comment_size == 0);
3335
3336 buf[0] = '\0';
3337 if (pcrel_p)
3338 *pcrel_p = 0;
3339
3340 switch (opnd->type)
3341 {
3342 case AARCH64_OPND_Rd:
3343 case AARCH64_OPND_Rn:
3344 case AARCH64_OPND_Rm:
3345 case AARCH64_OPND_Rt:
3346 case AARCH64_OPND_Rt2:
3347 case AARCH64_OPND_Rs:
3348 case AARCH64_OPND_Ra:
3349 case AARCH64_OPND_Rt_LS64:
3350 case AARCH64_OPND_Rt_SYS:
3351 case AARCH64_OPND_PAIRREG:
3352 case AARCH64_OPND_SVE_Rm:
3353 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3354 the <ic_op>, therefore we use opnd->present to override the
3355 generic optional-ness information. */
3356 if (opnd->type == AARCH64_OPND_Rt_SYS)
3357 {
3358 if (!opnd->present)
3359 break;
3360 }
3361 /* Omit the operand, e.g. RET. */
3362 else if (optional_operand_p (opcode, idx)
3363 && (opnd->reg.regno
3364 == get_optional_operand_default_value (opcode)))
3365 break;
3366 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3367 || opnd->qualifier == AARCH64_OPND_QLF_X);
3368 snprintf (buf, size, "%s",
3369 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3370 opnd->qualifier, 0)));
3371 break;
3372
3373 case AARCH64_OPND_Rd_SP:
3374 case AARCH64_OPND_Rn_SP:
3375 case AARCH64_OPND_Rt_SP:
3376 case AARCH64_OPND_SVE_Rn_SP:
3377 case AARCH64_OPND_Rm_SP:
3378 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3379 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3380 || opnd->qualifier == AARCH64_OPND_QLF_X
3381 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3382 snprintf (buf, size, "%s",
3383 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3384 opnd->qualifier, 1)));
3385 break;
3386
3387 case AARCH64_OPND_Rm_EXT:
3388 kind = opnd->shifter.kind;
3389 assert (idx == 1 || idx == 2);
3390 if ((aarch64_stack_pointer_p (opnds)
3391 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3392 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3393 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3394 && kind == AARCH64_MOD_UXTW)
3395 || (opnd->qualifier == AARCH64_OPND_QLF_X
3396 && kind == AARCH64_MOD_UXTX)))
3397 {
3398 /* 'LSL' is the preferred form in this case. */
3399 kind = AARCH64_MOD_LSL;
3400 if (opnd->shifter.amount == 0)
3401 {
3402 /* Shifter omitted. */
3403 snprintf (buf, size, "%s",
3404 style_reg (styler,
3405 get_int_reg_name (opnd->reg.regno,
3406 opnd->qualifier, 0)));
3407 break;
3408 }
3409 }
3410 if (opnd->shifter.amount)
3411 snprintf (buf, size, "%s, %s %s",
3412 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3413 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name),
3414 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3415 else
3416 snprintf (buf, size, "%s, %s",
3417 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3418 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name));
3419 break;
3420
3421 case AARCH64_OPND_Rm_SFT:
3422 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3423 || opnd->qualifier == AARCH64_OPND_QLF_X);
3424 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3425 snprintf (buf, size, "%s",
3426 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3427 opnd->qualifier, 0)));
3428 else
3429 snprintf (buf, size, "%s, %s %s",
3430 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3431 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
3432 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3433 break;
3434
3435 case AARCH64_OPND_Fd:
3436 case AARCH64_OPND_Fn:
3437 case AARCH64_OPND_Fm:
3438 case AARCH64_OPND_Fa:
3439 case AARCH64_OPND_Ft:
3440 case AARCH64_OPND_Ft2:
3441 case AARCH64_OPND_Sd:
3442 case AARCH64_OPND_Sn:
3443 case AARCH64_OPND_Sm:
3444 case AARCH64_OPND_SVE_VZn:
3445 case AARCH64_OPND_SVE_Vd:
3446 case AARCH64_OPND_SVE_Vm:
3447 case AARCH64_OPND_SVE_Vn:
3448 snprintf (buf, size, "%s",
3449 style_reg (styler, "%s%d",
3450 aarch64_get_qualifier_name (opnd->qualifier),
3451 opnd->reg.regno));
3452 break;
3453
3454 case AARCH64_OPND_Va:
3455 case AARCH64_OPND_Vd:
3456 case AARCH64_OPND_Vn:
3457 case AARCH64_OPND_Vm:
3458 snprintf (buf, size, "%s",
3459 style_reg (styler, "v%d.%s", opnd->reg.regno,
3460 aarch64_get_qualifier_name (opnd->qualifier)));
3461 break;
3462
3463 case AARCH64_OPND_Ed:
3464 case AARCH64_OPND_En:
3465 case AARCH64_OPND_Em:
3466 case AARCH64_OPND_Em16:
3467 case AARCH64_OPND_SM3_IMM2:
3468 snprintf (buf, size, "%s[%s]",
3469 style_reg (styler, "v%d.%s", opnd->reglane.regno,
3470 aarch64_get_qualifier_name (opnd->qualifier)),
3471 style_imm (styler, "%" PRIi64, opnd->reglane.index));
3472 break;
3473
3474 case AARCH64_OPND_VdD1:
3475 case AARCH64_OPND_VnD1:
3476 snprintf (buf, size, "%s[%s]",
3477 style_reg (styler, "v%d.d", opnd->reg.regno),
3478 style_imm (styler, "1"));
3479 break;
3480
3481 case AARCH64_OPND_LVn:
3482 case AARCH64_OPND_LVt:
3483 case AARCH64_OPND_LVt_AL:
3484 case AARCH64_OPND_LEt:
3485 print_register_list (buf, size, opnd, "v", styler);
3486 break;
3487
3488 case AARCH64_OPND_SVE_Pd:
3489 case AARCH64_OPND_SVE_Pg3:
3490 case AARCH64_OPND_SVE_Pg4_5:
3491 case AARCH64_OPND_SVE_Pg4_10:
3492 case AARCH64_OPND_SVE_Pg4_16:
3493 case AARCH64_OPND_SVE_Pm:
3494 case AARCH64_OPND_SVE_Pn:
3495 case AARCH64_OPND_SVE_Pt:
3496 case AARCH64_OPND_SME_Pm:
3497 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3498 snprintf (buf, size, "%s",
3499 style_reg (styler, "p%d", opnd->reg.regno));
3500 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3501 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3502 snprintf (buf, size, "%s",
3503 style_reg (styler, "p%d/%s", opnd->reg.regno,
3504 aarch64_get_qualifier_name (opnd->qualifier)));
3505 else
3506 snprintf (buf, size, "%s",
3507 style_reg (styler, "p%d.%s", opnd->reg.regno,
3508 aarch64_get_qualifier_name (opnd->qualifier)));
3509 break;
3510
3511 case AARCH64_OPND_SVE_Za_5:
3512 case AARCH64_OPND_SVE_Za_16:
3513 case AARCH64_OPND_SVE_Zd:
3514 case AARCH64_OPND_SVE_Zm_5:
3515 case AARCH64_OPND_SVE_Zm_16:
3516 case AARCH64_OPND_SVE_Zn:
3517 case AARCH64_OPND_SVE_Zt:
3518 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3519 snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno));
3520 else
3521 snprintf (buf, size, "%s",
3522 style_reg (styler, "z%d.%s", opnd->reg.regno,
3523 aarch64_get_qualifier_name (opnd->qualifier)));
3524 break;
3525
3526 case AARCH64_OPND_SVE_ZnxN:
3527 case AARCH64_OPND_SVE_ZtxN:
3528 print_register_list (buf, size, opnd, "z", styler);
3529 break;
3530
3531 case AARCH64_OPND_SVE_Zm3_INDEX:
3532 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3533 case AARCH64_OPND_SVE_Zm3_11_INDEX:
3534 case AARCH64_OPND_SVE_Zm4_11_INDEX:
3535 case AARCH64_OPND_SVE_Zm4_INDEX:
3536 case AARCH64_OPND_SVE_Zn_INDEX:
3537 snprintf (buf, size, "%s[%s]",
3538 style_reg (styler, "z%d.%s", opnd->reglane.regno,
3539 aarch64_get_qualifier_name (opnd->qualifier)),
3540 style_imm (styler, "%" PRIi64, opnd->reglane.index));
3541 break;
3542
3543 case AARCH64_OPND_SME_ZAda_2b:
3544 case AARCH64_OPND_SME_ZAda_3b:
3545 snprintf (buf, size, "%s",
3546 style_reg (styler, "za%d.%s", opnd->reg.regno,
3547 aarch64_get_qualifier_name (opnd->qualifier)));
3548 break;
3549
3550 case AARCH64_OPND_SME_ZA_HV_idx_src:
3551 case AARCH64_OPND_SME_ZA_HV_idx_dest:
3552 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
3553 snprintf (buf, size, "%s%s[%s, %s]%s",
3554 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "",
3555 style_reg (styler, "za%d%c.%s",
3556 opnd->za_tile_vector.regno,
3557 opnd->za_tile_vector.v == 1 ? 'v' : 'h',
3558 aarch64_get_qualifier_name (opnd->qualifier)),
3559 style_reg (styler, "w%d", opnd->za_tile_vector.index.regno),
3560 style_imm (styler, "%d", opnd->za_tile_vector.index.imm),
3561 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : "");
3562 break;
3563
3564 case AARCH64_OPND_SME_list_of_64bit_tiles:
3565 print_sme_za_list (buf, size, opnd->reg.regno, styler);
3566 break;
3567
3568 case AARCH64_OPND_SME_ZA_array:
3569 snprintf (buf, size, "%s[%s, %s]",
3570 style_reg (styler, "za"),
3571 style_reg (styler, "w%d", opnd->za_tile_vector.index.regno),
3572 style_imm (styler, "%d", opnd->za_tile_vector.index.imm));
3573 break;
3574
3575 case AARCH64_OPND_SME_SM_ZA:
3576 snprintf (buf, size, "%s",
3577 style_reg (styler, opnd->reg.regno == 's' ? "sm" : "za"));
3578 break;
3579
3580 case AARCH64_OPND_SME_PnT_Wm_imm:
3581 snprintf (buf, size, "%s[%s, %s]",
3582 style_reg (styler, "p%d.%s", opnd->za_tile_vector.regno,
3583 aarch64_get_qualifier_name (opnd->qualifier)),
3584 style_reg (styler, "w%d", opnd->za_tile_vector.index.regno),
3585 style_imm (styler, "%d", opnd->za_tile_vector.index.imm));
3586 break;
3587
3588 case AARCH64_OPND_CRn:
3589 case AARCH64_OPND_CRm:
3590 snprintf (buf, size, "%s",
3591 style_reg (styler, "C%" PRIi64, opnd->imm.value));
3592 break;
3593
3594 case AARCH64_OPND_IDX:
3595 case AARCH64_OPND_MASK:
3596 case AARCH64_OPND_IMM:
3597 case AARCH64_OPND_IMM_2:
3598 case AARCH64_OPND_WIDTH:
3599 case AARCH64_OPND_UIMM3_OP1:
3600 case AARCH64_OPND_UIMM3_OP2:
3601 case AARCH64_OPND_BIT_NUM:
3602 case AARCH64_OPND_IMM_VLSL:
3603 case AARCH64_OPND_IMM_VLSR:
3604 case AARCH64_OPND_SHLL_IMM:
3605 case AARCH64_OPND_IMM0:
3606 case AARCH64_OPND_IMMR:
3607 case AARCH64_OPND_IMMS:
3608 case AARCH64_OPND_UNDEFINED:
3609 case AARCH64_OPND_FBITS:
3610 case AARCH64_OPND_TME_UIMM16:
3611 case AARCH64_OPND_SIMM5:
3612 case AARCH64_OPND_SVE_SHLIMM_PRED:
3613 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3614 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
3615 case AARCH64_OPND_SVE_SHRIMM_PRED:
3616 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3617 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3618 case AARCH64_OPND_SVE_SIMM5:
3619 case AARCH64_OPND_SVE_SIMM5B:
3620 case AARCH64_OPND_SVE_SIMM6:
3621 case AARCH64_OPND_SVE_SIMM8:
3622 case AARCH64_OPND_SVE_UIMM3:
3623 case AARCH64_OPND_SVE_UIMM7:
3624 case AARCH64_OPND_SVE_UIMM8:
3625 case AARCH64_OPND_SVE_UIMM8_53:
3626 case AARCH64_OPND_IMM_ROT1:
3627 case AARCH64_OPND_IMM_ROT2:
3628 case AARCH64_OPND_IMM_ROT3:
3629 case AARCH64_OPND_SVE_IMM_ROT1:
3630 case AARCH64_OPND_SVE_IMM_ROT2:
3631 case AARCH64_OPND_SVE_IMM_ROT3:
3632 snprintf (buf, size, "%s",
3633 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3634 break;
3635
3636 case AARCH64_OPND_SVE_I1_HALF_ONE:
3637 case AARCH64_OPND_SVE_I1_HALF_TWO:
3638 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3639 {
3640 single_conv_t c;
3641 c.i = opnd->imm.value;
3642 snprintf (buf, size, "%s", style_imm (styler, "#%.1f", c.f));
3643 break;
3644 }
3645
3646 case AARCH64_OPND_SVE_PATTERN:
3647 if (optional_operand_p (opcode, idx)
3648 && opnd->imm.value == get_optional_operand_default_value (opcode))
3649 break;
3650 enum_value = opnd->imm.value;
3651 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3652 if (aarch64_sve_pattern_array[enum_value])
3653 snprintf (buf, size, "%s",
3654 style_reg (styler, aarch64_sve_pattern_array[enum_value]));
3655 else
3656 snprintf (buf, size, "%s",
3657 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3658 break;
3659
3660 case AARCH64_OPND_SVE_PATTERN_SCALED:
3661 if (optional_operand_p (opcode, idx)
3662 && !opnd->shifter.operator_present
3663 && opnd->imm.value == get_optional_operand_default_value (opcode))
3664 break;
3665 enum_value = opnd->imm.value;
3666 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3667 if (aarch64_sve_pattern_array[opnd->imm.value])
3668 snprintf (buf, size, "%s",
3669 style_reg (styler,
3670 aarch64_sve_pattern_array[opnd->imm.value]));
3671 else
3672 snprintf (buf, size, "%s",
3673 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3674 if (opnd->shifter.operator_present)
3675 {
3676 size_t len = strlen (buf);
3677 const char *shift_name
3678 = aarch64_operand_modifiers[opnd->shifter.kind].name;
3679 snprintf (buf + len, size - len, ", %s %s",
3680 style_sub_mnem (styler, shift_name),
3681 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3682 }
3683 break;
3684
3685 case AARCH64_OPND_SVE_PRFOP:
3686 enum_value = opnd->imm.value;
3687 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3688 if (aarch64_sve_prfop_array[enum_value])
3689 snprintf (buf, size, "%s",
3690 style_reg (styler, aarch64_sve_prfop_array[enum_value]));
3691 else
3692 snprintf (buf, size, "%s",
3693 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3694 break;
3695
3696 case AARCH64_OPND_IMM_MOV:
3697 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3698 {
3699 case 4: /* e.g. MOV Wd, #<imm32>. */
3700 {
3701 int imm32 = opnd->imm.value;
3702 snprintf (buf, size, "%s",
3703 style_imm (styler, "#0x%-20x", imm32));
3704 snprintf (comment, comment_size, "#%d", imm32);
3705 }
3706 break;
3707 case 8: /* e.g. MOV Xd, #<imm64>. */
3708 snprintf (buf, size, "%s", style_imm (styler, "#0x%-20" PRIx64,
3709 opnd->imm.value));
3710 snprintf (comment, comment_size, "#%" PRIi64, opnd->imm.value);
3711 break;
3712 default:
3713 snprintf (buf, size, "<invalid>");
3714 break;
3715 }
3716 break;
3717
3718 case AARCH64_OPND_FPIMM0:
3719 snprintf (buf, size, "%s", style_imm (styler, "#0.0"));
3720 break;
3721
3722 case AARCH64_OPND_LIMM:
3723 case AARCH64_OPND_AIMM:
3724 case AARCH64_OPND_HALF:
3725 case AARCH64_OPND_SVE_INV_LIMM:
3726 case AARCH64_OPND_SVE_LIMM:
3727 case AARCH64_OPND_SVE_LIMM_MOV:
3728 if (opnd->shifter.amount)
3729 snprintf (buf, size, "%s, %s %s",
3730 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
3731 style_sub_mnem (styler, "lsl"),
3732 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3733 else
3734 snprintf (buf, size, "%s",
3735 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
3736 break;
3737
3738 case AARCH64_OPND_SIMD_IMM:
3739 case AARCH64_OPND_SIMD_IMM_SFT:
3740 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3741 || opnd->shifter.kind == AARCH64_MOD_NONE)
3742 snprintf (buf, size, "%s",
3743 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
3744 else
3745 snprintf (buf, size, "%s, %s %s",
3746 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
3747 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
3748 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3749 break;
3750
3751 case AARCH64_OPND_SVE_AIMM:
3752 case AARCH64_OPND_SVE_ASIMM:
3753 if (opnd->shifter.amount)
3754 snprintf (buf, size, "%s, %s %s",
3755 style_imm (styler, "#%" PRIi64, opnd->imm.value),
3756 style_sub_mnem (styler, "lsl"),
3757 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3758 else
3759 snprintf (buf, size, "%s",
3760 style_imm (styler, "#%" PRIi64, opnd->imm.value));
3761 break;
3762
3763 case AARCH64_OPND_FPIMM:
3764 case AARCH64_OPND_SIMD_FPIMM:
3765 case AARCH64_OPND_SVE_FPIMM8:
3766 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3767 {
3768 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3769 {
3770 half_conv_t c;
3771 c.i = expand_fp_imm (2, opnd->imm.value);
3772 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
3773 }
3774 break;
3775 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3776 {
3777 single_conv_t c;
3778 c.i = expand_fp_imm (4, opnd->imm.value);
3779 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
3780 }
3781 break;
3782 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3783 {
3784 double_conv_t c;
3785 c.i = expand_fp_imm (8, opnd->imm.value);
3786 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.d));
3787 }
3788 break;
3789 default:
3790 snprintf (buf, size, "<invalid>");
3791 break;
3792 }
3793 break;
3794
3795 case AARCH64_OPND_CCMP_IMM:
3796 case AARCH64_OPND_NZCV:
3797 case AARCH64_OPND_EXCEPTION:
3798 case AARCH64_OPND_UIMM4:
3799 case AARCH64_OPND_UIMM4_ADDG:
3800 case AARCH64_OPND_UIMM7:
3801 case AARCH64_OPND_UIMM10:
3802 if (optional_operand_p (opcode, idx)
3803 && (opnd->imm.value ==
3804 (int64_t) get_optional_operand_default_value (opcode)))
3805 /* Omit the operand, e.g. DCPS1. */
3806 break;
3807 snprintf (buf, size, "%s",
3808 style_imm (styler, "#0x%x", (unsigned int) opnd->imm.value));
3809 break;
3810
3811 case AARCH64_OPND_COND:
3812 case AARCH64_OPND_COND1:
3813 snprintf (buf, size, "%s",
3814 style_sub_mnem (styler, opnd->cond->names[0]));
3815 num_conds = ARRAY_SIZE (opnd->cond->names);
3816 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3817 {
3818 size_t len = comment != NULL ? strlen (comment) : 0;
3819 if (i == 1)
3820 snprintf (comment + len, comment_size - len, "%s = %s",
3821 opnd->cond->names[0], opnd->cond->names[i]);
3822 else
3823 snprintf (comment + len, comment_size - len, ", %s",
3824 opnd->cond->names[i]);
3825 }
3826 break;
3827
3828 case AARCH64_OPND_ADDR_ADRP:
3829 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3830 + opnd->imm.value;
3831 if (pcrel_p)
3832 *pcrel_p = 1;
3833 if (address)
3834 *address = addr;
3835 /* This is not necessary during the disassembling, as print_address_func
3836 in the disassemble_info will take care of the printing. But some
3837 other callers may be still interested in getting the string in *STR,
3838 so here we do snprintf regardless. */
3839 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64 , addr));
3840 break;
3841
3842 case AARCH64_OPND_ADDR_PCREL14:
3843 case AARCH64_OPND_ADDR_PCREL19:
3844 case AARCH64_OPND_ADDR_PCREL21:
3845 case AARCH64_OPND_ADDR_PCREL26:
3846 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3847 if (pcrel_p)
3848 *pcrel_p = 1;
3849 if (address)
3850 *address = addr;
3851 /* This is not necessary during the disassembling, as print_address_func
3852 in the disassemble_info will take care of the printing. But some
3853 other callers may be still interested in getting the string in *STR,
3854 so here we do snprintf regardless. */
3855 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64, addr));
3856 break;
3857
3858 case AARCH64_OPND_ADDR_SIMPLE:
3859 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3860 case AARCH64_OPND_SIMD_ADDR_POST:
3861 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3862 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3863 {
3864 if (opnd->addr.offset.is_reg)
3865 snprintf (buf, size, "[%s], %s",
3866 style_reg (styler, name),
3867 style_reg (styler, "x%d", opnd->addr.offset.regno));
3868 else
3869 snprintf (buf, size, "[%s], %s",
3870 style_reg (styler, name),
3871 style_imm (styler, "#%d", opnd->addr.offset.imm));
3872 }
3873 else
3874 snprintf (buf, size, "[%s]", style_reg (styler, name));
3875 break;
3876
3877 case AARCH64_OPND_ADDR_REGOFF:
3878 case AARCH64_OPND_SVE_ADDR_R:
3879 case AARCH64_OPND_SVE_ADDR_RR:
3880 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3881 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3882 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3883 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
3884 case AARCH64_OPND_SVE_ADDR_RX:
3885 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3886 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3887 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3888 print_register_offset_address
3889 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3890 get_offset_int_reg_name (opnd), styler);
3891 break;
3892
3893 case AARCH64_OPND_SVE_ADDR_ZX:
3894 print_register_offset_address
3895 (buf, size, opnd,
3896 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3897 get_64bit_int_reg_name (opnd->addr.offset.regno, 0), styler);
3898 break;
3899
3900 case AARCH64_OPND_SVE_ADDR_RZ:
3901 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3902 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3903 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3904 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3905 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3906 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3907 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3908 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3909 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3910 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3911 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3912 print_register_offset_address
3913 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3914 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
3915 styler);
3916 break;
3917
3918 case AARCH64_OPND_ADDR_SIMM7:
3919 case AARCH64_OPND_ADDR_SIMM9:
3920 case AARCH64_OPND_ADDR_SIMM9_2:
3921 case AARCH64_OPND_ADDR_SIMM10:
3922 case AARCH64_OPND_ADDR_SIMM11:
3923 case AARCH64_OPND_ADDR_SIMM13:
3924 case AARCH64_OPND_ADDR_OFFSET:
3925 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
3926 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3927 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
3928 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3929 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3930 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3931 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3932 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3933 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3934 case AARCH64_OPND_SVE_ADDR_RI_U6:
3935 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3936 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3937 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3938 print_immediate_offset_address
3939 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3940 styler);
3941 break;
3942
3943 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3944 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3945 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3946 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3947 print_immediate_offset_address
3948 (buf, size, opnd,
3949 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3950 styler);
3951 break;
3952
3953 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3954 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3955 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3956 print_register_offset_address
3957 (buf, size, opnd,
3958 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3959 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
3960 styler);
3961 break;
3962
3963 case AARCH64_OPND_ADDR_UIMM12:
3964 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3965 if (opnd->addr.offset.imm)
3966 snprintf (buf, size, "[%s, %s]",
3967 style_reg (styler, name),
3968 style_imm (styler, "#%d", opnd->addr.offset.imm));
3969 else
3970 snprintf (buf, size, "[%s]", style_reg (styler, name));
3971 break;
3972
3973 case AARCH64_OPND_SYSREG:
3974 for (i = 0; aarch64_sys_regs[i].name; ++i)
3975 {
3976 const aarch64_sys_reg *sr = aarch64_sys_regs + i;
3977
3978 bool exact_match
3979 = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
3980 || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
3981 && AARCH64_CPU_HAS_FEATURE (features, sr->features);
3982
3983 /* Try and find an exact match, But if that fails, return the first
3984 partial match that was found. */
3985 if (aarch64_sys_regs[i].value == opnd->sysreg.value
3986 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
3987 && (name == NULL || exact_match))
3988 {
3989 name = aarch64_sys_regs[i].name;
3990 if (exact_match)
3991 {
3992 if (notes)
3993 *notes = NULL;
3994 break;
3995 }
3996
3997 /* If we didn't match exactly, that means the presense of a flag
3998 indicates what we didn't want for this instruction. e.g. If
3999 F_REG_READ is there, that means we were looking for a write
4000 register. See aarch64_ext_sysreg. */
4001 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
4002 *notes = _("reading from a write-only register");
4003 else if (aarch64_sys_regs[i].flags & F_REG_READ)
4004 *notes = _("writing to a read-only register");
4005 }
4006 }
4007
4008 if (name)
4009 snprintf (buf, size, "%s", style_reg (styler, name));
4010 else
4011 {
4012 /* Implementation defined system register. */
4013 unsigned int value = opnd->sysreg.value;
4014 snprintf (buf, size, "%s",
4015 style_reg (styler, "s%u_%u_c%u_c%u_%u",
4016 (value >> 14) & 0x3, (value >> 11) & 0x7,
4017 (value >> 7) & 0xf, (value >> 3) & 0xf,
4018 value & 0x7));
4019 }
4020 break;
4021
4022 case AARCH64_OPND_PSTATEFIELD:
4023 for (i = 0; aarch64_pstatefields[i].name; ++i)
4024 if (aarch64_pstatefields[i].value == opnd->pstatefield)
4025 {
4026 /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
4027 SVCRZA and SVCRSMZA. */
4028 uint32_t flags = aarch64_pstatefields[i].flags;
4029 if (flags & F_REG_IN_CRM
4030 && (PSTATE_DECODE_CRM (opnd->sysreg.flags)
4031 != PSTATE_DECODE_CRM (flags)))
4032 continue;
4033 break;
4034 }
4035 assert (aarch64_pstatefields[i].name);
4036 snprintf (buf, size, "%s",
4037 style_reg (styler, aarch64_pstatefields[i].name));
4038 break;
4039
4040 case AARCH64_OPND_SYSREG_AT:
4041 case AARCH64_OPND_SYSREG_DC:
4042 case AARCH64_OPND_SYSREG_IC:
4043 case AARCH64_OPND_SYSREG_TLBI:
4044 case AARCH64_OPND_SYSREG_SR:
4045 snprintf (buf, size, "%s", style_reg (styler, opnd->sysins_op->name));
4046 break;
4047
4048 case AARCH64_OPND_BARRIER:
4049 case AARCH64_OPND_BARRIER_DSB_NXS:
4050 {
4051 if (opnd->barrier->name[0] == '#')
4052 snprintf (buf, size, "%s", style_imm (styler, opnd->barrier->name));
4053 else
4054 snprintf (buf, size, "%s",
4055 style_sub_mnem (styler, opnd->barrier->name));
4056 }
4057 break;
4058
4059 case AARCH64_OPND_BARRIER_ISB:
4060 /* Operand can be omitted, e.g. in DCPS1. */
4061 if (! optional_operand_p (opcode, idx)
4062 || (opnd->barrier->value
4063 != get_optional_operand_default_value (opcode)))
4064 snprintf (buf, size, "%s",
4065 style_imm (styler, "#0x%x", opnd->barrier->value));
4066 break;
4067
4068 case AARCH64_OPND_PRFOP:
4069 if (opnd->prfop->name != NULL)
4070 snprintf (buf, size, "%s", style_sub_mnem (styler, opnd->prfop->name));
4071 else
4072 snprintf (buf, size, "%s", style_imm (styler, "#0x%02x",
4073 opnd->prfop->value));
4074 break;
4075
4076 case AARCH64_OPND_BARRIER_PSB:
4077 snprintf (buf, size, "%s", style_sub_mnem (styler, "csync"));
4078 break;
4079
4080 case AARCH64_OPND_BTI_TARGET:
4081 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
4082 snprintf (buf, size, "%s",
4083 style_sub_mnem (styler, opnd->hint_option->name));
4084 break;
4085
4086 case AARCH64_OPND_MOPS_ADDR_Rd:
4087 case AARCH64_OPND_MOPS_ADDR_Rs:
4088 snprintf (buf, size, "[%s]!",
4089 style_reg (styler,
4090 get_int_reg_name (opnd->reg.regno,
4091 AARCH64_OPND_QLF_X, 0)));
4092 break;
4093
4094 case AARCH64_OPND_MOPS_WB_Rn:
4095 snprintf (buf, size, "%s!",
4096 style_reg (styler, get_int_reg_name (opnd->reg.regno,
4097 AARCH64_OPND_QLF_X, 0)));
4098 break;
4099
4100 default:
4101 snprintf (buf, size, "<invalid>");
4102 break;
4103 }
4104 }
4105 \f
4106 #define CPENC(op0,op1,crn,crm,op2) \
4107 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
4108 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
4109 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
4110 /* for 3.9.10 System Instructions */
4111 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
4112
4113 #define C0 0
4114 #define C1 1
4115 #define C2 2
4116 #define C3 3
4117 #define C4 4
4118 #define C5 5
4119 #define C6 6
4120 #define C7 7
4121 #define C8 8
4122 #define C9 9
4123 #define C10 10
4124 #define C11 11
4125 #define C12 12
4126 #define C13 13
4127 #define C14 14
4128 #define C15 15
4129
4130 #define SYSREG(name, encoding, flags, features) \
4131 { name, encoding, flags, features }
4132
4133 #define SR_CORE(n,e,f) SYSREG (n,e,f,0)
4134
4135 #define SR_FEAT(n,e,f,feat) \
4136 SYSREG ((n), (e), (f) | F_ARCHEXT, AARCH64_FEATURE_##feat)
4137
4138 #define SR_FEAT2(n,e,f,fe1,fe2) \
4139 SYSREG ((n), (e), (f) | F_ARCHEXT, \
4140 AARCH64_FEATURE_##fe1 | AARCH64_FEATURE_##fe2)
4141
4142 #define SR_V8_1_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_1)
4143 #define SR_V8_4_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_4)
4144
4145 #define SR_V8_A(n,e,f) SR_FEAT (n,e,f,V8_A)
4146 #define SR_V8_R(n,e,f) SR_FEAT (n,e,f,V8_R)
4147 #define SR_V8_1(n,e,f) SR_FEAT (n,e,f,V8_1)
4148 #define SR_V8_2(n,e,f) SR_FEAT (n,e,f,V8_2)
4149 #define SR_V8_3(n,e,f) SR_FEAT (n,e,f,V8_3)
4150 #define SR_V8_4(n,e,f) SR_FEAT (n,e,f,V8_4)
4151 #define SR_V8_6(n,e,f) SR_FEAT (n,e,f,V8_6)
4152 #define SR_V8_7(n,e,f) SR_FEAT (n,e,f,V8_7)
4153 #define SR_V8_8(n,e,f) SR_FEAT (n,e,f,V8_8)
4154 /* Has no separate libopcodes feature flag, but separated out for clarity. */
4155 #define SR_GIC(n,e,f) SR_CORE (n,e,f)
4156 /* Has no separate libopcodes feature flag, but separated out for clarity. */
4157 #define SR_AMU(n,e,f) SR_FEAT (n,e,f,V8_4)
4158 #define SR_LOR(n,e,f) SR_FEAT (n,e,f,LOR)
4159 #define SR_PAN(n,e,f) SR_FEAT (n,e,f,PAN)
4160 #define SR_RAS(n,e,f) SR_FEAT (n,e,f,RAS)
4161 #define SR_RNG(n,e,f) SR_FEAT (n,e,f,RNG)
4162 #define SR_SME(n,e,f) SR_FEAT (n,e,f,SME)
4163 #define SR_SSBS(n,e,f) SR_FEAT (n,e,f,SSBS)
4164 #define SR_SVE(n,e,f) SR_FEAT (n,e,f,SVE)
4165 #define SR_ID_PFR2(n,e,f) SR_FEAT (n,e,f,ID_PFR2)
4166 #define SR_PROFILE(n,e,f) SR_FEAT (n,e,f,PROFILE)
4167 #define SR_MEMTAG(n,e,f) SR_FEAT (n,e,f,MEMTAG)
4168 #define SR_SCXTNUM(n,e,f) SR_FEAT (n,e,f,SCXTNUM)
4169
4170 #define SR_EXPAND_ELx(f,x) \
4171 f (x, 1), \
4172 f (x, 2), \
4173 f (x, 3), \
4174 f (x, 4), \
4175 f (x, 5), \
4176 f (x, 6), \
4177 f (x, 7), \
4178 f (x, 8), \
4179 f (x, 9), \
4180 f (x, 10), \
4181 f (x, 11), \
4182 f (x, 12), \
4183 f (x, 13), \
4184 f (x, 14), \
4185 f (x, 15),
4186
4187 #define SR_EXPAND_EL12(f) \
4188 SR_EXPAND_ELx (f,1) \
4189 SR_EXPAND_ELx (f,2)
4190
4191 /* TODO there is one more issues need to be resolved
4192 1. handle cpu-implementation-defined system registers.
4193
4194 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
4195 respectively. If neither of these are set then the register is read-write. */
4196 const aarch64_sys_reg aarch64_sys_regs [] =
4197 {
4198 SR_CORE ("spsr_el1", CPEN_ (0,C0,0), 0), /* = spsr_svc. */
4199 SR_V8_1 ("spsr_el12", CPEN_ (5,C0,0), 0),
4200 SR_CORE ("elr_el1", CPEN_ (0,C0,1), 0),
4201 SR_V8_1 ("elr_el12", CPEN_ (5,C0,1), 0),
4202 SR_CORE ("sp_el0", CPEN_ (0,C1,0), 0),
4203 SR_CORE ("spsel", CPEN_ (0,C2,0), 0),
4204 SR_CORE ("daif", CPEN_ (3,C2,1), 0),
4205 SR_CORE ("currentel", CPEN_ (0,C2,2), F_REG_READ),
4206 SR_PAN ("pan", CPEN_ (0,C2,3), 0),
4207 SR_V8_2 ("uao", CPEN_ (0,C2,4), 0),
4208 SR_CORE ("nzcv", CPEN_ (3,C2,0), 0),
4209 SR_SSBS ("ssbs", CPEN_ (3,C2,6), 0),
4210 SR_CORE ("fpcr", CPEN_ (3,C4,0), 0),
4211 SR_CORE ("fpsr", CPEN_ (3,C4,1), 0),
4212 SR_CORE ("dspsr_el0", CPEN_ (3,C5,0), 0),
4213 SR_CORE ("dlr_el0", CPEN_ (3,C5,1), 0),
4214 SR_CORE ("spsr_el2", CPEN_ (4,C0,0), 0), /* = spsr_hyp. */
4215 SR_CORE ("elr_el2", CPEN_ (4,C0,1), 0),
4216 SR_CORE ("sp_el1", CPEN_ (4,C1,0), 0),
4217 SR_CORE ("spsr_irq", CPEN_ (4,C3,0), 0),
4218 SR_CORE ("spsr_abt", CPEN_ (4,C3,1), 0),
4219 SR_CORE ("spsr_und", CPEN_ (4,C3,2), 0),
4220 SR_CORE ("spsr_fiq", CPEN_ (4,C3,3), 0),
4221 SR_CORE ("spsr_el3", CPEN_ (6,C0,0), 0),
4222 SR_CORE ("elr_el3", CPEN_ (6,C0,1), 0),
4223 SR_CORE ("sp_el2", CPEN_ (6,C1,0), 0),
4224 SR_CORE ("spsr_svc", CPEN_ (0,C0,0), F_DEPRECATED), /* = spsr_el1. */
4225 SR_CORE ("spsr_hyp", CPEN_ (4,C0,0), F_DEPRECATED), /* = spsr_el2. */
4226 SR_CORE ("midr_el1", CPENC (3,0,C0,C0,0), F_REG_READ),
4227 SR_CORE ("ctr_el0", CPENC (3,3,C0,C0,1), F_REG_READ),
4228 SR_CORE ("mpidr_el1", CPENC (3,0,C0,C0,5), F_REG_READ),
4229 SR_CORE ("revidr_el1", CPENC (3,0,C0,C0,6), F_REG_READ),
4230 SR_CORE ("aidr_el1", CPENC (3,1,C0,C0,7), F_REG_READ),
4231 SR_CORE ("dczid_el0", CPENC (3,3,C0,C0,7), F_REG_READ),
4232 SR_CORE ("id_dfr0_el1", CPENC (3,0,C0,C1,2), F_REG_READ),
4233 SR_CORE ("id_dfr1_el1", CPENC (3,0,C0,C3,5), F_REG_READ),
4234 SR_CORE ("id_pfr0_el1", CPENC (3,0,C0,C1,0), F_REG_READ),
4235 SR_CORE ("id_pfr1_el1", CPENC (3,0,C0,C1,1), F_REG_READ),
4236 SR_ID_PFR2 ("id_pfr2_el1", CPENC (3,0,C0,C3,4), F_REG_READ),
4237 SR_CORE ("id_afr0_el1", CPENC (3,0,C0,C1,3), F_REG_READ),
4238 SR_CORE ("id_mmfr0_el1", CPENC (3,0,C0,C1,4), F_REG_READ),
4239 SR_CORE ("id_mmfr1_el1", CPENC (3,0,C0,C1,5), F_REG_READ),
4240 SR_CORE ("id_mmfr2_el1", CPENC (3,0,C0,C1,6), F_REG_READ),
4241 SR_CORE ("id_mmfr3_el1", CPENC (3,0,C0,C1,7), F_REG_READ),
4242 SR_CORE ("id_mmfr4_el1", CPENC (3,0,C0,C2,6), F_REG_READ),
4243 SR_CORE ("id_mmfr5_el1", CPENC (3,0,C0,C3,6), F_REG_READ),
4244 SR_CORE ("id_isar0_el1", CPENC (3,0,C0,C2,0), F_REG_READ),
4245 SR_CORE ("id_isar1_el1", CPENC (3,0,C0,C2,1), F_REG_READ),
4246 SR_CORE ("id_isar2_el1", CPENC (3,0,C0,C2,2), F_REG_READ),
4247 SR_CORE ("id_isar3_el1", CPENC (3,0,C0,C2,3), F_REG_READ),
4248 SR_CORE ("id_isar4_el1", CPENC (3,0,C0,C2,4), F_REG_READ),
4249 SR_CORE ("id_isar5_el1", CPENC (3,0,C0,C2,5), F_REG_READ),
4250 SR_CORE ("id_isar6_el1", CPENC (3,0,C0,C2,7), F_REG_READ),
4251 SR_CORE ("mvfr0_el1", CPENC (3,0,C0,C3,0), F_REG_READ),
4252 SR_CORE ("mvfr1_el1", CPENC (3,0,C0,C3,1), F_REG_READ),
4253 SR_CORE ("mvfr2_el1", CPENC (3,0,C0,C3,2), F_REG_READ),
4254 SR_CORE ("ccsidr_el1", CPENC (3,1,C0,C0,0), F_REG_READ),
4255 SR_V8_3 ("ccsidr2_el1", CPENC (3,1,C0,C0,2), F_REG_READ),
4256 SR_CORE ("id_aa64pfr0_el1", CPENC (3,0,C0,C4,0), F_REG_READ),
4257 SR_CORE ("id_aa64pfr1_el1", CPENC (3,0,C0,C4,1), F_REG_READ),
4258 SR_CORE ("id_aa64dfr0_el1", CPENC (3,0,C0,C5,0), F_REG_READ),
4259 SR_CORE ("id_aa64dfr1_el1", CPENC (3,0,C0,C5,1), F_REG_READ),
4260 SR_CORE ("id_aa64isar0_el1", CPENC (3,0,C0,C6,0), F_REG_READ),
4261 SR_CORE ("id_aa64isar1_el1", CPENC (3,0,C0,C6,1), F_REG_READ),
4262 SR_CORE ("id_aa64isar2_el1", CPENC (3,0,C0,C6,2), F_REG_READ),
4263 SR_CORE ("id_aa64mmfr0_el1", CPENC (3,0,C0,C7,0), F_REG_READ),
4264 SR_CORE ("id_aa64mmfr1_el1", CPENC (3,0,C0,C7,1), F_REG_READ),
4265 SR_CORE ("id_aa64mmfr2_el1", CPENC (3,0,C0,C7,2), F_REG_READ),
4266 SR_CORE ("id_aa64afr0_el1", CPENC (3,0,C0,C5,4), F_REG_READ),
4267 SR_CORE ("id_aa64afr1_el1", CPENC (3,0,C0,C5,5), F_REG_READ),
4268 SR_SVE ("id_aa64zfr0_el1", CPENC (3,0,C0,C4,4), F_REG_READ),
4269 SR_CORE ("clidr_el1", CPENC (3,1,C0,C0,1), F_REG_READ),
4270 SR_CORE ("csselr_el1", CPENC (3,2,C0,C0,0), 0),
4271 SR_CORE ("vpidr_el2", CPENC (3,4,C0,C0,0), 0),
4272 SR_CORE ("vmpidr_el2", CPENC (3,4,C0,C0,5), 0),
4273 SR_CORE ("sctlr_el1", CPENC (3,0,C1,C0,0), 0),
4274 SR_CORE ("sctlr_el2", CPENC (3,4,C1,C0,0), 0),
4275 SR_CORE ("sctlr_el3", CPENC (3,6,C1,C0,0), 0),
4276 SR_V8_1 ("sctlr_el12", CPENC (3,5,C1,C0,0), 0),
4277 SR_CORE ("actlr_el1", CPENC (3,0,C1,C0,1), 0),
4278 SR_CORE ("actlr_el2", CPENC (3,4,C1,C0,1), 0),
4279 SR_CORE ("actlr_el3", CPENC (3,6,C1,C0,1), 0),
4280 SR_CORE ("cpacr_el1", CPENC (3,0,C1,C0,2), 0),
4281 SR_V8_1 ("cpacr_el12", CPENC (3,5,C1,C0,2), 0),
4282 SR_CORE ("cptr_el2", CPENC (3,4,C1,C1,2), 0),
4283 SR_CORE ("cptr_el3", CPENC (3,6,C1,C1,2), 0),
4284 SR_CORE ("scr_el3", CPENC (3,6,C1,C1,0), 0),
4285 SR_CORE ("hcr_el2", CPENC (3,4,C1,C1,0), 0),
4286 SR_CORE ("mdcr_el2", CPENC (3,4,C1,C1,1), 0),
4287 SR_CORE ("mdcr_el3", CPENC (3,6,C1,C3,1), 0),
4288 SR_CORE ("hstr_el2", CPENC (3,4,C1,C1,3), 0),
4289 SR_CORE ("hacr_el2", CPENC (3,4,C1,C1,7), 0),
4290 SR_SVE ("zcr_el1", CPENC (3,0,C1,C2,0), 0),
4291 SR_SVE ("zcr_el12", CPENC (3,5,C1,C2,0), 0),
4292 SR_SVE ("zcr_el2", CPENC (3,4,C1,C2,0), 0),
4293 SR_SVE ("zcr_el3", CPENC (3,6,C1,C2,0), 0),
4294 SR_CORE ("ttbr0_el1", CPENC (3,0,C2,C0,0), 0),
4295 SR_CORE ("ttbr1_el1", CPENC (3,0,C2,C0,1), 0),
4296 SR_V8_A ("ttbr0_el2", CPENC (3,4,C2,C0,0), 0),
4297 SR_V8_1_A ("ttbr1_el2", CPENC (3,4,C2,C0,1), 0),
4298 SR_CORE ("ttbr0_el3", CPENC (3,6,C2,C0,0), 0),
4299 SR_V8_1 ("ttbr0_el12", CPENC (3,5,C2,C0,0), 0),
4300 SR_V8_1 ("ttbr1_el12", CPENC (3,5,C2,C0,1), 0),
4301 SR_V8_A ("vttbr_el2", CPENC (3,4,C2,C1,0), 0),
4302 SR_CORE ("tcr_el1", CPENC (3,0,C2,C0,2), 0),
4303 SR_CORE ("tcr_el2", CPENC (3,4,C2,C0,2), 0),
4304 SR_CORE ("tcr_el3", CPENC (3,6,C2,C0,2), 0),
4305 SR_V8_1 ("tcr_el12", CPENC (3,5,C2,C0,2), 0),
4306 SR_CORE ("vtcr_el2", CPENC (3,4,C2,C1,2), 0),
4307 SR_V8_3 ("apiakeylo_el1", CPENC (3,0,C2,C1,0), 0),
4308 SR_V8_3 ("apiakeyhi_el1", CPENC (3,0,C2,C1,1), 0),
4309 SR_V8_3 ("apibkeylo_el1", CPENC (3,0,C2,C1,2), 0),
4310 SR_V8_3 ("apibkeyhi_el1", CPENC (3,0,C2,C1,3), 0),
4311 SR_V8_3 ("apdakeylo_el1", CPENC (3,0,C2,C2,0), 0),
4312 SR_V8_3 ("apdakeyhi_el1", CPENC (3,0,C2,C2,1), 0),
4313 SR_V8_3 ("apdbkeylo_el1", CPENC (3,0,C2,C2,2), 0),
4314 SR_V8_3 ("apdbkeyhi_el1", CPENC (3,0,C2,C2,3), 0),
4315 SR_V8_3 ("apgakeylo_el1", CPENC (3,0,C2,C3,0), 0),
4316 SR_V8_3 ("apgakeyhi_el1", CPENC (3,0,C2,C3,1), 0),
4317 SR_CORE ("afsr0_el1", CPENC (3,0,C5,C1,0), 0),
4318 SR_CORE ("afsr1_el1", CPENC (3,0,C5,C1,1), 0),
4319 SR_CORE ("afsr0_el2", CPENC (3,4,C5,C1,0), 0),
4320 SR_CORE ("afsr1_el2", CPENC (3,4,C5,C1,1), 0),
4321 SR_CORE ("afsr0_el3", CPENC (3,6,C5,C1,0), 0),
4322 SR_V8_1 ("afsr0_el12", CPENC (3,5,C5,C1,0), 0),
4323 SR_CORE ("afsr1_el3", CPENC (3,6,C5,C1,1), 0),
4324 SR_V8_1 ("afsr1_el12", CPENC (3,5,C5,C1,1), 0),
4325 SR_CORE ("esr_el1", CPENC (3,0,C5,C2,0), 0),
4326 SR_CORE ("esr_el2", CPENC (3,4,C5,C2,0), 0),
4327 SR_CORE ("esr_el3", CPENC (3,6,C5,C2,0), 0),
4328 SR_V8_1 ("esr_el12", CPENC (3,5,C5,C2,0), 0),
4329 SR_RAS ("vsesr_el2", CPENC (3,4,C5,C2,3), 0),
4330 SR_CORE ("fpexc32_el2", CPENC (3,4,C5,C3,0), 0),
4331 SR_RAS ("erridr_el1", CPENC (3,0,C5,C3,0), F_REG_READ),
4332 SR_RAS ("errselr_el1", CPENC (3,0,C5,C3,1), 0),
4333 SR_RAS ("erxfr_el1", CPENC (3,0,C5,C4,0), F_REG_READ),
4334 SR_RAS ("erxctlr_el1", CPENC (3,0,C5,C4,1), 0),
4335 SR_RAS ("erxstatus_el1", CPENC (3,0,C5,C4,2), 0),
4336 SR_RAS ("erxaddr_el1", CPENC (3,0,C5,C4,3), 0),
4337 SR_RAS ("erxmisc0_el1", CPENC (3,0,C5,C5,0), 0),
4338 SR_RAS ("erxmisc1_el1", CPENC (3,0,C5,C5,1), 0),
4339 SR_RAS ("erxmisc2_el1", CPENC (3,0,C5,C5,2), 0),
4340 SR_RAS ("erxmisc3_el1", CPENC (3,0,C5,C5,3), 0),
4341 SR_RAS ("erxpfgcdn_el1", CPENC (3,0,C5,C4,6), 0),
4342 SR_RAS ("erxpfgctl_el1", CPENC (3,0,C5,C4,5), 0),
4343 SR_RAS ("erxpfgf_el1", CPENC (3,0,C5,C4,4), F_REG_READ),
4344 SR_CORE ("far_el1", CPENC (3,0,C6,C0,0), 0),
4345 SR_CORE ("far_el2", CPENC (3,4,C6,C0,0), 0),
4346 SR_CORE ("far_el3", CPENC (3,6,C6,C0,0), 0),
4347 SR_V8_1 ("far_el12", CPENC (3,5,C6,C0,0), 0),
4348 SR_CORE ("hpfar_el2", CPENC (3,4,C6,C0,4), 0),
4349 SR_CORE ("par_el1", CPENC (3,0,C7,C4,0), 0),
4350 SR_CORE ("mair_el1", CPENC (3,0,C10,C2,0), 0),
4351 SR_CORE ("mair_el2", CPENC (3,4,C10,C2,0), 0),
4352 SR_CORE ("mair_el3", CPENC (3,6,C10,C2,0), 0),
4353 SR_V8_1 ("mair_el12", CPENC (3,5,C10,C2,0), 0),
4354 SR_CORE ("amair_el1", CPENC (3,0,C10,C3,0), 0),
4355 SR_CORE ("amair_el2", CPENC (3,4,C10,C3,0), 0),
4356 SR_CORE ("amair_el3", CPENC (3,6,C10,C3,0), 0),
4357 SR_V8_1 ("amair_el12", CPENC (3,5,C10,C3,0), 0),
4358 SR_CORE ("vbar_el1", CPENC (3,0,C12,C0,0), 0),
4359 SR_CORE ("vbar_el2", CPENC (3,4,C12,C0,0), 0),
4360 SR_CORE ("vbar_el3", CPENC (3,6,C12,C0,0), 0),
4361 SR_V8_1 ("vbar_el12", CPENC (3,5,C12,C0,0), 0),
4362 SR_CORE ("rvbar_el1", CPENC (3,0,C12,C0,1), F_REG_READ),
4363 SR_CORE ("rvbar_el2", CPENC (3,4,C12,C0,1), F_REG_READ),
4364 SR_CORE ("rvbar_el3", CPENC (3,6,C12,C0,1), F_REG_READ),
4365 SR_CORE ("rmr_el1", CPENC (3,0,C12,C0,2), 0),
4366 SR_CORE ("rmr_el2", CPENC (3,4,C12,C0,2), 0),
4367 SR_CORE ("rmr_el3", CPENC (3,6,C12,C0,2), 0),
4368 SR_CORE ("isr_el1", CPENC (3,0,C12,C1,0), F_REG_READ),
4369 SR_RAS ("disr_el1", CPENC (3,0,C12,C1,1), 0),
4370 SR_RAS ("vdisr_el2", CPENC (3,4,C12,C1,1), 0),
4371 SR_CORE ("contextidr_el1", CPENC (3,0,C13,C0,1), 0),
4372 SR_V8_1 ("contextidr_el2", CPENC (3,4,C13,C0,1), 0),
4373 SR_V8_1 ("contextidr_el12", CPENC (3,5,C13,C0,1), 0),
4374 SR_RNG ("rndr", CPENC (3,3,C2,C4,0), F_REG_READ),
4375 SR_RNG ("rndrrs", CPENC (3,3,C2,C4,1), F_REG_READ),
4376 SR_MEMTAG ("tco", CPENC (3,3,C4,C2,7), 0),
4377 SR_MEMTAG ("tfsre0_el1", CPENC (3,0,C5,C6,1), 0),
4378 SR_MEMTAG ("tfsr_el1", CPENC (3,0,C5,C6,0), 0),
4379 SR_MEMTAG ("tfsr_el2", CPENC (3,4,C5,C6,0), 0),
4380 SR_MEMTAG ("tfsr_el3", CPENC (3,6,C5,C6,0), 0),
4381 SR_MEMTAG ("tfsr_el12", CPENC (3,5,C5,C6,0), 0),
4382 SR_MEMTAG ("rgsr_el1", CPENC (3,0,C1,C0,5), 0),
4383 SR_MEMTAG ("gcr_el1", CPENC (3,0,C1,C0,6), 0),
4384 SR_MEMTAG ("gmid_el1", CPENC (3,1,C0,C0,4), F_REG_READ),
4385 SR_CORE ("tpidr_el0", CPENC (3,3,C13,C0,2), 0),
4386 SR_CORE ("tpidrro_el0", CPENC (3,3,C13,C0,3), 0),
4387 SR_CORE ("tpidr_el1", CPENC (3,0,C13,C0,4), 0),
4388 SR_CORE ("tpidr_el2", CPENC (3,4,C13,C0,2), 0),
4389 SR_CORE ("tpidr_el3", CPENC (3,6,C13,C0,2), 0),
4390 SR_SCXTNUM ("scxtnum_el0", CPENC (3,3,C13,C0,7), 0),
4391 SR_SCXTNUM ("scxtnum_el1", CPENC (3,0,C13,C0,7), 0),
4392 SR_SCXTNUM ("scxtnum_el2", CPENC (3,4,C13,C0,7), 0),
4393 SR_SCXTNUM ("scxtnum_el12", CPENC (3,5,C13,C0,7), 0),
4394 SR_SCXTNUM ("scxtnum_el3", CPENC (3,6,C13,C0,7), 0),
4395 SR_CORE ("teecr32_el1", CPENC (2,2,C0, C0,0), 0), /* See section 3.9.7.1. */
4396 SR_CORE ("cntfrq_el0", CPENC (3,3,C14,C0,0), 0),
4397 SR_CORE ("cntpct_el0", CPENC (3,3,C14,C0,1), F_REG_READ),
4398 SR_CORE ("cntvct_el0", CPENC (3,3,C14,C0,2), F_REG_READ),
4399 SR_CORE ("cntvoff_el2", CPENC (3,4,C14,C0,3), 0),
4400 SR_CORE ("cntkctl_el1", CPENC (3,0,C14,C1,0), 0),
4401 SR_V8_1 ("cntkctl_el12", CPENC (3,5,C14,C1,0), 0),
4402 SR_CORE ("cnthctl_el2", CPENC (3,4,C14,C1,0), 0),
4403 SR_CORE ("cntp_tval_el0", CPENC (3,3,C14,C2,0), 0),
4404 SR_V8_1 ("cntp_tval_el02", CPENC (3,5,C14,C2,0), 0),
4405 SR_CORE ("cntp_ctl_el0", CPENC (3,3,C14,C2,1), 0),
4406 SR_V8_1 ("cntp_ctl_el02", CPENC (3,5,C14,C2,1), 0),
4407 SR_CORE ("cntp_cval_el0", CPENC (3,3,C14,C2,2), 0),
4408 SR_V8_1 ("cntp_cval_el02", CPENC (3,5,C14,C2,2), 0),
4409 SR_CORE ("cntv_tval_el0", CPENC (3,3,C14,C3,0), 0),
4410 SR_V8_1 ("cntv_tval_el02", CPENC (3,5,C14,C3,0), 0),
4411 SR_CORE ("cntv_ctl_el0", CPENC (3,3,C14,C3,1), 0),
4412 SR_V8_1 ("cntv_ctl_el02", CPENC (3,5,C14,C3,1), 0),
4413 SR_CORE ("cntv_cval_el0", CPENC (3,3,C14,C3,2), 0),
4414 SR_V8_1 ("cntv_cval_el02", CPENC (3,5,C14,C3,2), 0),
4415 SR_CORE ("cnthp_tval_el2", CPENC (3,4,C14,C2,0), 0),
4416 SR_CORE ("cnthp_ctl_el2", CPENC (3,4,C14,C2,1), 0),
4417 SR_CORE ("cnthp_cval_el2", CPENC (3,4,C14,C2,2), 0),
4418 SR_CORE ("cntps_tval_el1", CPENC (3,7,C14,C2,0), 0),
4419 SR_CORE ("cntps_ctl_el1", CPENC (3,7,C14,C2,1), 0),
4420 SR_CORE ("cntps_cval_el1", CPENC (3,7,C14,C2,2), 0),
4421 SR_V8_1 ("cnthv_tval_el2", CPENC (3,4,C14,C3,0), 0),
4422 SR_V8_1 ("cnthv_ctl_el2", CPENC (3,4,C14,C3,1), 0),
4423 SR_V8_1 ("cnthv_cval_el2", CPENC (3,4,C14,C3,2), 0),
4424 SR_CORE ("dacr32_el2", CPENC (3,4,C3,C0,0), 0),
4425 SR_CORE ("ifsr32_el2", CPENC (3,4,C5,C0,1), 0),
4426 SR_CORE ("teehbr32_el1", CPENC (2,2,C1,C0,0), 0),
4427 SR_CORE ("sder32_el3", CPENC (3,6,C1,C1,1), 0),
4428 SR_CORE ("mdscr_el1", CPENC (2,0,C0,C2,2), 0),
4429 SR_CORE ("mdccsr_el0", CPENC (2,3,C0,C1,0), F_REG_READ),
4430 SR_CORE ("mdccint_el1", CPENC (2,0,C0,C2,0), 0),
4431 SR_CORE ("dbgdtr_el0", CPENC (2,3,C0,C4,0), 0),
4432 SR_CORE ("dbgdtrrx_el0", CPENC (2,3,C0,C5,0), F_REG_READ),
4433 SR_CORE ("dbgdtrtx_el0", CPENC (2,3,C0,C5,0), F_REG_WRITE),
4434 SR_CORE ("osdtrrx_el1", CPENC (2,0,C0,C0,2), 0),
4435 SR_CORE ("osdtrtx_el1", CPENC (2,0,C0,C3,2), 0),
4436 SR_CORE ("oseccr_el1", CPENC (2,0,C0,C6,2), 0),
4437 SR_CORE ("dbgvcr32_el2", CPENC (2,4,C0,C7,0), 0),
4438 SR_CORE ("dbgbvr0_el1", CPENC (2,0,C0,C0,4), 0),
4439 SR_CORE ("dbgbvr1_el1", CPENC (2,0,C0,C1,4), 0),
4440 SR_CORE ("dbgbvr2_el1", CPENC (2,0,C0,C2,4), 0),
4441 SR_CORE ("dbgbvr3_el1", CPENC (2,0,C0,C3,4), 0),
4442 SR_CORE ("dbgbvr4_el1", CPENC (2,0,C0,C4,4), 0),
4443 SR_CORE ("dbgbvr5_el1", CPENC (2,0,C0,C5,4), 0),
4444 SR_CORE ("dbgbvr6_el1", CPENC (2,0,C0,C6,4), 0),
4445 SR_CORE ("dbgbvr7_el1", CPENC (2,0,C0,C7,4), 0),
4446 SR_CORE ("dbgbvr8_el1", CPENC (2,0,C0,C8,4), 0),
4447 SR_CORE ("dbgbvr9_el1", CPENC (2,0,C0,C9,4), 0),
4448 SR_CORE ("dbgbvr10_el1", CPENC (2,0,C0,C10,4), 0),
4449 SR_CORE ("dbgbvr11_el1", CPENC (2,0,C0,C11,4), 0),
4450 SR_CORE ("dbgbvr12_el1", CPENC (2,0,C0,C12,4), 0),
4451 SR_CORE ("dbgbvr13_el1", CPENC (2,0,C0,C13,4), 0),
4452 SR_CORE ("dbgbvr14_el1", CPENC (2,0,C0,C14,4), 0),
4453 SR_CORE ("dbgbvr15_el1", CPENC (2,0,C0,C15,4), 0),
4454 SR_CORE ("dbgbcr0_el1", CPENC (2,0,C0,C0,5), 0),
4455 SR_CORE ("dbgbcr1_el1", CPENC (2,0,C0,C1,5), 0),
4456 SR_CORE ("dbgbcr2_el1", CPENC (2,0,C0,C2,5), 0),
4457 SR_CORE ("dbgbcr3_el1", CPENC (2,0,C0,C3,5), 0),
4458 SR_CORE ("dbgbcr4_el1", CPENC (2,0,C0,C4,5), 0),
4459 SR_CORE ("dbgbcr5_el1", CPENC (2,0,C0,C5,5), 0),
4460 SR_CORE ("dbgbcr6_el1", CPENC (2,0,C0,C6,5), 0),
4461 SR_CORE ("dbgbcr7_el1", CPENC (2,0,C0,C7,5), 0),
4462 SR_CORE ("dbgbcr8_el1", CPENC (2,0,C0,C8,5), 0),
4463 SR_CORE ("dbgbcr9_el1", CPENC (2,0,C0,C9,5), 0),
4464 SR_CORE ("dbgbcr10_el1", CPENC (2,0,C0,C10,5), 0),
4465 SR_CORE ("dbgbcr11_el1", CPENC (2,0,C0,C11,5), 0),
4466 SR_CORE ("dbgbcr12_el1", CPENC (2,0,C0,C12,5), 0),
4467 SR_CORE ("dbgbcr13_el1", CPENC (2,0,C0,C13,5), 0),
4468 SR_CORE ("dbgbcr14_el1", CPENC (2,0,C0,C14,5), 0),
4469 SR_CORE ("dbgbcr15_el1", CPENC (2,0,C0,C15,5), 0),
4470 SR_CORE ("dbgwvr0_el1", CPENC (2,0,C0,C0,6), 0),
4471 SR_CORE ("dbgwvr1_el1", CPENC (2,0,C0,C1,6), 0),
4472 SR_CORE ("dbgwvr2_el1", CPENC (2,0,C0,C2,6), 0),
4473 SR_CORE ("dbgwvr3_el1", CPENC (2,0,C0,C3,6), 0),
4474 SR_CORE ("dbgwvr4_el1", CPENC (2,0,C0,C4,6), 0),
4475 SR_CORE ("dbgwvr5_el1", CPENC (2,0,C0,C5,6), 0),
4476 SR_CORE ("dbgwvr6_el1", CPENC (2,0,C0,C6,6), 0),
4477 SR_CORE ("dbgwvr7_el1", CPENC (2,0,C0,C7,6), 0),
4478 SR_CORE ("dbgwvr8_el1", CPENC (2,0,C0,C8,6), 0),
4479 SR_CORE ("dbgwvr9_el1", CPENC (2,0,C0,C9,6), 0),
4480 SR_CORE ("dbgwvr10_el1", CPENC (2,0,C0,C10,6), 0),
4481 SR_CORE ("dbgwvr11_el1", CPENC (2,0,C0,C11,6), 0),
4482 SR_CORE ("dbgwvr12_el1", CPENC (2,0,C0,C12,6), 0),
4483 SR_CORE ("dbgwvr13_el1", CPENC (2,0,C0,C13,6), 0),
4484 SR_CORE ("dbgwvr14_el1", CPENC (2,0,C0,C14,6), 0),
4485 SR_CORE ("dbgwvr15_el1", CPENC (2,0,C0,C15,6), 0),
4486 SR_CORE ("dbgwcr0_el1", CPENC (2,0,C0,C0,7), 0),
4487 SR_CORE ("dbgwcr1_el1", CPENC (2,0,C0,C1,7), 0),
4488 SR_CORE ("dbgwcr2_el1", CPENC (2,0,C0,C2,7), 0),
4489 SR_CORE ("dbgwcr3_el1", CPENC (2,0,C0,C3,7), 0),
4490 SR_CORE ("dbgwcr4_el1", CPENC (2,0,C0,C4,7), 0),
4491 SR_CORE ("dbgwcr5_el1", CPENC (2,0,C0,C5,7), 0),
4492 SR_CORE ("dbgwcr6_el1", CPENC (2,0,C0,C6,7), 0),
4493 SR_CORE ("dbgwcr7_el1", CPENC (2,0,C0,C7,7), 0),
4494 SR_CORE ("dbgwcr8_el1", CPENC (2,0,C0,C8,7), 0),
4495 SR_CORE ("dbgwcr9_el1", CPENC (2,0,C0,C9,7), 0),
4496 SR_CORE ("dbgwcr10_el1", CPENC (2,0,C0,C10,7), 0),
4497 SR_CORE ("dbgwcr11_el1", CPENC (2,0,C0,C11,7), 0),
4498 SR_CORE ("dbgwcr12_el1", CPENC (2,0,C0,C12,7), 0),
4499 SR_CORE ("dbgwcr13_el1", CPENC (2,0,C0,C13,7), 0),
4500 SR_CORE ("dbgwcr14_el1", CPENC (2,0,C0,C14,7), 0),
4501 SR_CORE ("dbgwcr15_el1", CPENC (2,0,C0,C15,7), 0),
4502 SR_CORE ("mdrar_el1", CPENC (2,0,C1,C0,0), F_REG_READ),
4503 SR_CORE ("oslar_el1", CPENC (2,0,C1,C0,4), F_REG_WRITE),
4504 SR_CORE ("oslsr_el1", CPENC (2,0,C1,C1,4), F_REG_READ),
4505 SR_CORE ("osdlr_el1", CPENC (2,0,C1,C3,4), 0),
4506 SR_CORE ("dbgprcr_el1", CPENC (2,0,C1,C4,4), 0),
4507 SR_CORE ("dbgclaimset_el1", CPENC (2,0,C7,C8,6), 0),
4508 SR_CORE ("dbgclaimclr_el1", CPENC (2,0,C7,C9,6), 0),
4509 SR_CORE ("dbgauthstatus_el1", CPENC (2,0,C7,C14,6), F_REG_READ),
4510 SR_PROFILE ("pmblimitr_el1", CPENC (3,0,C9,C10,0), 0),
4511 SR_PROFILE ("pmbptr_el1", CPENC (3,0,C9,C10,1), 0),
4512 SR_PROFILE ("pmbsr_el1", CPENC (3,0,C9,C10,3), 0),
4513 SR_PROFILE ("pmbidr_el1", CPENC (3,0,C9,C10,7), F_REG_READ),
4514 SR_PROFILE ("pmscr_el1", CPENC (3,0,C9,C9,0), 0),
4515 SR_PROFILE ("pmsicr_el1", CPENC (3,0,C9,C9,2), 0),
4516 SR_PROFILE ("pmsirr_el1", CPENC (3,0,C9,C9,3), 0),
4517 SR_PROFILE ("pmsfcr_el1", CPENC (3,0,C9,C9,4), 0),
4518 SR_PROFILE ("pmsevfr_el1", CPENC (3,0,C9,C9,5), 0),
4519 SR_PROFILE ("pmslatfr_el1", CPENC (3,0,C9,C9,6), 0),
4520 SR_PROFILE ("pmsidr_el1", CPENC (3,0,C9,C9,7), F_REG_READ),
4521 SR_PROFILE ("pmscr_el2", CPENC (3,4,C9,C9,0), 0),
4522 SR_PROFILE ("pmscr_el12", CPENC (3,5,C9,C9,0), 0),
4523 SR_CORE ("pmcr_el0", CPENC (3,3,C9,C12,0), 0),
4524 SR_CORE ("pmcntenset_el0", CPENC (3,3,C9,C12,1), 0),
4525 SR_CORE ("pmcntenclr_el0", CPENC (3,3,C9,C12,2), 0),
4526 SR_CORE ("pmovsclr_el0", CPENC (3,3,C9,C12,3), 0),
4527 SR_CORE ("pmswinc_el0", CPENC (3,3,C9,C12,4), F_REG_WRITE),
4528 SR_CORE ("pmselr_el0", CPENC (3,3,C9,C12,5), 0),
4529 SR_CORE ("pmceid0_el0", CPENC (3,3,C9,C12,6), F_REG_READ),
4530 SR_CORE ("pmceid1_el0", CPENC (3,3,C9,C12,7), F_REG_READ),
4531 SR_CORE ("pmccntr_el0", CPENC (3,3,C9,C13,0), 0),
4532 SR_CORE ("pmxevtyper_el0", CPENC (3,3,C9,C13,1), 0),
4533 SR_CORE ("pmxevcntr_el0", CPENC (3,3,C9,C13,2), 0),
4534 SR_CORE ("pmuserenr_el0", CPENC (3,3,C9,C14,0), 0),
4535 SR_CORE ("pmintenset_el1", CPENC (3,0,C9,C14,1), 0),
4536 SR_CORE ("pmintenclr_el1", CPENC (3,0,C9,C14,2), 0),
4537 SR_CORE ("pmovsset_el0", CPENC (3,3,C9,C14,3), 0),
4538 SR_CORE ("pmevcntr0_el0", CPENC (3,3,C14,C8,0), 0),
4539 SR_CORE ("pmevcntr1_el0", CPENC (3,3,C14,C8,1), 0),
4540 SR_CORE ("pmevcntr2_el0", CPENC (3,3,C14,C8,2), 0),
4541 SR_CORE ("pmevcntr3_el0", CPENC (3,3,C14,C8,3), 0),
4542 SR_CORE ("pmevcntr4_el0", CPENC (3,3,C14,C8,4), 0),
4543 SR_CORE ("pmevcntr5_el0", CPENC (3,3,C14,C8,5), 0),
4544 SR_CORE ("pmevcntr6_el0", CPENC (3,3,C14,C8,6), 0),
4545 SR_CORE ("pmevcntr7_el0", CPENC (3,3,C14,C8,7), 0),
4546 SR_CORE ("pmevcntr8_el0", CPENC (3,3,C14,C9,0), 0),
4547 SR_CORE ("pmevcntr9_el0", CPENC (3,3,C14,C9,1), 0),
4548 SR_CORE ("pmevcntr10_el0", CPENC (3,3,C14,C9,2), 0),
4549 SR_CORE ("pmevcntr11_el0", CPENC (3,3,C14,C9,3), 0),
4550 SR_CORE ("pmevcntr12_el0", CPENC (3,3,C14,C9,4), 0),
4551 SR_CORE ("pmevcntr13_el0", CPENC (3,3,C14,C9,5), 0),
4552 SR_CORE ("pmevcntr14_el0", CPENC (3,3,C14,C9,6), 0),
4553 SR_CORE ("pmevcntr15_el0", CPENC (3,3,C14,C9,7), 0),
4554 SR_CORE ("pmevcntr16_el0", CPENC (3,3,C14,C10,0), 0),
4555 SR_CORE ("pmevcntr17_el0", CPENC (3,3,C14,C10,1), 0),
4556 SR_CORE ("pmevcntr18_el0", CPENC (3,3,C14,C10,2), 0),
4557 SR_CORE ("pmevcntr19_el0", CPENC (3,3,C14,C10,3), 0),
4558 SR_CORE ("pmevcntr20_el0", CPENC (3,3,C14,C10,4), 0),
4559 SR_CORE ("pmevcntr21_el0", CPENC (3,3,C14,C10,5), 0),
4560 SR_CORE ("pmevcntr22_el0", CPENC (3,3,C14,C10,6), 0),
4561 SR_CORE ("pmevcntr23_el0", CPENC (3,3,C14,C10,7), 0),
4562 SR_CORE ("pmevcntr24_el0", CPENC (3,3,C14,C11,0), 0),
4563 SR_CORE ("pmevcntr25_el0", CPENC (3,3,C14,C11,1), 0),
4564 SR_CORE ("pmevcntr26_el0", CPENC (3,3,C14,C11,2), 0),
4565 SR_CORE ("pmevcntr27_el0", CPENC (3,3,C14,C11,3), 0),
4566 SR_CORE ("pmevcntr28_el0", CPENC (3,3,C14,C11,4), 0),
4567 SR_CORE ("pmevcntr29_el0", CPENC (3,3,C14,C11,5), 0),
4568 SR_CORE ("pmevcntr30_el0", CPENC (3,3,C14,C11,6), 0),
4569 SR_CORE ("pmevtyper0_el0", CPENC (3,3,C14,C12,0), 0),
4570 SR_CORE ("pmevtyper1_el0", CPENC (3,3,C14,C12,1), 0),
4571 SR_CORE ("pmevtyper2_el0", CPENC (3,3,C14,C12,2), 0),
4572 SR_CORE ("pmevtyper3_el0", CPENC (3,3,C14,C12,3), 0),
4573 SR_CORE ("pmevtyper4_el0", CPENC (3,3,C14,C12,4), 0),
4574 SR_CORE ("pmevtyper5_el0", CPENC (3,3,C14,C12,5), 0),
4575 SR_CORE ("pmevtyper6_el0", CPENC (3,3,C14,C12,6), 0),
4576 SR_CORE ("pmevtyper7_el0", CPENC (3,3,C14,C12,7), 0),
4577 SR_CORE ("pmevtyper8_el0", CPENC (3,3,C14,C13,0), 0),
4578 SR_CORE ("pmevtyper9_el0", CPENC (3,3,C14,C13,1), 0),
4579 SR_CORE ("pmevtyper10_el0", CPENC (3,3,C14,C13,2), 0),
4580 SR_CORE ("pmevtyper11_el0", CPENC (3,3,C14,C13,3), 0),
4581 SR_CORE ("pmevtyper12_el0", CPENC (3,3,C14,C13,4), 0),
4582 SR_CORE ("pmevtyper13_el0", CPENC (3,3,C14,C13,5), 0),
4583 SR_CORE ("pmevtyper14_el0", CPENC (3,3,C14,C13,6), 0),
4584 SR_CORE ("pmevtyper15_el0", CPENC (3,3,C14,C13,7), 0),
4585 SR_CORE ("pmevtyper16_el0", CPENC (3,3,C14,C14,0), 0),
4586 SR_CORE ("pmevtyper17_el0", CPENC (3,3,C14,C14,1), 0),
4587 SR_CORE ("pmevtyper18_el0", CPENC (3,3,C14,C14,2), 0),
4588 SR_CORE ("pmevtyper19_el0", CPENC (3,3,C14,C14,3), 0),
4589 SR_CORE ("pmevtyper20_el0", CPENC (3,3,C14,C14,4), 0),
4590 SR_CORE ("pmevtyper21_el0", CPENC (3,3,C14,C14,5), 0),
4591 SR_CORE ("pmevtyper22_el0", CPENC (3,3,C14,C14,6), 0),
4592 SR_CORE ("pmevtyper23_el0", CPENC (3,3,C14,C14,7), 0),
4593 SR_CORE ("pmevtyper24_el0", CPENC (3,3,C14,C15,0), 0),
4594 SR_CORE ("pmevtyper25_el0", CPENC (3,3,C14,C15,1), 0),
4595 SR_CORE ("pmevtyper26_el0", CPENC (3,3,C14,C15,2), 0),
4596 SR_CORE ("pmevtyper27_el0", CPENC (3,3,C14,C15,3), 0),
4597 SR_CORE ("pmevtyper28_el0", CPENC (3,3,C14,C15,4), 0),
4598 SR_CORE ("pmevtyper29_el0", CPENC (3,3,C14,C15,5), 0),
4599 SR_CORE ("pmevtyper30_el0", CPENC (3,3,C14,C15,6), 0),
4600 SR_CORE ("pmccfiltr_el0", CPENC (3,3,C14,C15,7), 0),
4601
4602 SR_V8_4 ("dit", CPEN_ (3,C2,5), 0),
4603 SR_V8_4 ("trfcr_el1", CPENC (3,0,C1,C2,1), 0),
4604 SR_V8_4 ("pmmir_el1", CPENC (3,0,C9,C14,6), F_REG_READ),
4605 SR_V8_4 ("trfcr_el2", CPENC (3,4,C1,C2,1), 0),
4606 SR_V8_4 ("vstcr_el2", CPENC (3,4,C2,C6,2), 0),
4607 SR_V8_4_A ("vsttbr_el2", CPENC (3,4,C2,C6,0), 0),
4608 SR_V8_4 ("cnthvs_tval_el2", CPENC (3,4,C14,C4,0), 0),
4609 SR_V8_4 ("cnthvs_cval_el2", CPENC (3,4,C14,C4,2), 0),
4610 SR_V8_4 ("cnthvs_ctl_el2", CPENC (3,4,C14,C4,1), 0),
4611 SR_V8_4 ("cnthps_tval_el2", CPENC (3,4,C14,C5,0), 0),
4612 SR_V8_4 ("cnthps_cval_el2", CPENC (3,4,C14,C5,2), 0),
4613 SR_V8_4 ("cnthps_ctl_el2", CPENC (3,4,C14,C5,1), 0),
4614 SR_V8_4 ("sder32_el2", CPENC (3,4,C1,C3,1), 0),
4615 SR_V8_4 ("vncr_el2", CPENC (3,4,C2,C2,0), 0),
4616 SR_V8_4 ("trfcr_el12", CPENC (3,5,C1,C2,1), 0),
4617
4618 SR_CORE ("mpam0_el1", CPENC (3,0,C10,C5,1), 0),
4619 SR_CORE ("mpam1_el1", CPENC (3,0,C10,C5,0), 0),
4620 SR_CORE ("mpam1_el12", CPENC (3,5,C10,C5,0), 0),
4621 SR_CORE ("mpam2_el2", CPENC (3,4,C10,C5,0), 0),
4622 SR_CORE ("mpam3_el3", CPENC (3,6,C10,C5,0), 0),
4623 SR_CORE ("mpamhcr_el2", CPENC (3,4,C10,C4,0), 0),
4624 SR_CORE ("mpamidr_el1", CPENC (3,0,C10,C4,4), F_REG_READ),
4625 SR_CORE ("mpamvpm0_el2", CPENC (3,4,C10,C6,0), 0),
4626 SR_CORE ("mpamvpm1_el2", CPENC (3,4,C10,C6,1), 0),
4627 SR_CORE ("mpamvpm2_el2", CPENC (3,4,C10,C6,2), 0),
4628 SR_CORE ("mpamvpm3_el2", CPENC (3,4,C10,C6,3), 0),
4629 SR_CORE ("mpamvpm4_el2", CPENC (3,4,C10,C6,4), 0),
4630 SR_CORE ("mpamvpm5_el2", CPENC (3,4,C10,C6,5), 0),
4631 SR_CORE ("mpamvpm6_el2", CPENC (3,4,C10,C6,6), 0),
4632 SR_CORE ("mpamvpm7_el2", CPENC (3,4,C10,C6,7), 0),
4633 SR_CORE ("mpamvpmv_el2", CPENC (3,4,C10,C4,1), 0),
4634
4635 SR_V8_R ("mpuir_el1", CPENC (3,0,C0,C0,4), F_REG_READ),
4636 SR_V8_R ("mpuir_el2", CPENC (3,4,C0,C0,4), F_REG_READ),
4637 SR_V8_R ("prbar_el1", CPENC (3,0,C6,C8,0), 0),
4638 SR_V8_R ("prbar_el2", CPENC (3,4,C6,C8,0), 0),
4639
4640 #define ENC_BARLAR(x,n,lar) \
4641 CPENC (3, (x-1) << 2, C6, 8 | (n >> 1), ((n & 1) << 2) | lar)
4642
4643 #define PRBARn_ELx(x,n) SR_V8_R ("prbar" #n "_el" #x, ENC_BARLAR (x,n,0), 0)
4644 #define PRLARn_ELx(x,n) SR_V8_R ("prlar" #n "_el" #x, ENC_BARLAR (x,n,1), 0)
4645
4646 SR_EXPAND_EL12 (PRBARn_ELx)
4647 SR_V8_R ("prenr_el1", CPENC (3,0,C6,C1,1), 0),
4648 SR_V8_R ("prenr_el2", CPENC (3,4,C6,C1,1), 0),
4649 SR_V8_R ("prlar_el1", CPENC (3,0,C6,C8,1), 0),
4650 SR_V8_R ("prlar_el2", CPENC (3,4,C6,C8,1), 0),
4651 SR_EXPAND_EL12 (PRLARn_ELx)
4652 SR_V8_R ("prselr_el1", CPENC (3,0,C6,C2,1), 0),
4653 SR_V8_R ("prselr_el2", CPENC (3,4,C6,C2,1), 0),
4654 SR_V8_R ("vsctlr_el2", CPENC (3,4,C2,C0,0), 0),
4655
4656 SR_CORE("trbbaser_el1", CPENC (3,0,C9,C11,2), 0),
4657 SR_CORE("trbidr_el1", CPENC (3,0,C9,C11,7), F_REG_READ),
4658 SR_CORE("trblimitr_el1", CPENC (3,0,C9,C11,0), 0),
4659 SR_CORE("trbmar_el1", CPENC (3,0,C9,C11,4), 0),
4660 SR_CORE("trbptr_el1", CPENC (3,0,C9,C11,1), 0),
4661 SR_CORE("trbsr_el1", CPENC (3,0,C9,C11,3), 0),
4662 SR_CORE("trbtrg_el1", CPENC (3,0,C9,C11,6), 0),
4663
4664 SR_CORE ("trcauthstatus", CPENC (2,1,C7,C14,6), F_REG_READ),
4665 SR_CORE ("trccidr0", CPENC (2,1,C7,C12,7), F_REG_READ),
4666 SR_CORE ("trccidr1", CPENC (2,1,C7,C13,7), F_REG_READ),
4667 SR_CORE ("trccidr2", CPENC (2,1,C7,C14,7), F_REG_READ),
4668 SR_CORE ("trccidr3", CPENC (2,1,C7,C15,7), F_REG_READ),
4669 SR_CORE ("trcdevaff0", CPENC (2,1,C7,C10,6), F_REG_READ),
4670 SR_CORE ("trcdevaff1", CPENC (2,1,C7,C11,6), F_REG_READ),
4671 SR_CORE ("trcdevarch", CPENC (2,1,C7,C15,6), F_REG_READ),
4672 SR_CORE ("trcdevid", CPENC (2,1,C7,C2,7), F_REG_READ),
4673 SR_CORE ("trcdevtype", CPENC (2,1,C7,C3,7), F_REG_READ),
4674 SR_CORE ("trcidr0", CPENC (2,1,C0,C8,7), F_REG_READ),
4675 SR_CORE ("trcidr1", CPENC (2,1,C0,C9,7), F_REG_READ),
4676 SR_CORE ("trcidr2", CPENC (2,1,C0,C10,7), F_REG_READ),
4677 SR_CORE ("trcidr3", CPENC (2,1,C0,C11,7), F_REG_READ),
4678 SR_CORE ("trcidr4", CPENC (2,1,C0,C12,7), F_REG_READ),
4679 SR_CORE ("trcidr5", CPENC (2,1,C0,C13,7), F_REG_READ),
4680 SR_CORE ("trcidr6", CPENC (2,1,C0,C14,7), F_REG_READ),
4681 SR_CORE ("trcidr7", CPENC (2,1,C0,C15,7), F_REG_READ),
4682 SR_CORE ("trcidr8", CPENC (2,1,C0,C0,6), F_REG_READ),
4683 SR_CORE ("trcidr9", CPENC (2,1,C0,C1,6), F_REG_READ),
4684 SR_CORE ("trcidr10", CPENC (2,1,C0,C2,6), F_REG_READ),
4685 SR_CORE ("trcidr11", CPENC (2,1,C0,C3,6), F_REG_READ),
4686 SR_CORE ("trcidr12", CPENC (2,1,C0,C4,6), F_REG_READ),
4687 SR_CORE ("trcidr13", CPENC (2,1,C0,C5,6), F_REG_READ),
4688 SR_CORE ("trclsr", CPENC (2,1,C7,C13,6), F_REG_READ),
4689 SR_CORE ("trcoslsr", CPENC (2,1,C1,C1,4), F_REG_READ),
4690 SR_CORE ("trcpdsr", CPENC (2,1,C1,C5,4), F_REG_READ),
4691 SR_CORE ("trcpidr0", CPENC (2,1,C7,C8,7), F_REG_READ),
4692 SR_CORE ("trcpidr1", CPENC (2,1,C7,C9,7), F_REG_READ),
4693 SR_CORE ("trcpidr2", CPENC (2,1,C7,C10,7), F_REG_READ),
4694 SR_CORE ("trcpidr3", CPENC (2,1,C7,C11,7), F_REG_READ),
4695 SR_CORE ("trcpidr4", CPENC (2,1,C7,C4,7), F_REG_READ),
4696 SR_CORE ("trcpidr5", CPENC (2,1,C7,C5,7), F_REG_READ),
4697 SR_CORE ("trcpidr6", CPENC (2,1,C7,C6,7), F_REG_READ),
4698 SR_CORE ("trcpidr7", CPENC (2,1,C7,C7,7), F_REG_READ),
4699 SR_CORE ("trcstatr", CPENC (2,1,C0,C3,0), F_REG_READ),
4700 SR_CORE ("trcacatr0", CPENC (2,1,C2,C0,2), 0),
4701 SR_CORE ("trcacatr1", CPENC (2,1,C2,C2,2), 0),
4702 SR_CORE ("trcacatr2", CPENC (2,1,C2,C4,2), 0),
4703 SR_CORE ("trcacatr3", CPENC (2,1,C2,C6,2), 0),
4704 SR_CORE ("trcacatr4", CPENC (2,1,C2,C8,2), 0),
4705 SR_CORE ("trcacatr5", CPENC (2,1,C2,C10,2), 0),
4706 SR_CORE ("trcacatr6", CPENC (2,1,C2,C12,2), 0),
4707 SR_CORE ("trcacatr7", CPENC (2,1,C2,C14,2), 0),
4708 SR_CORE ("trcacatr8", CPENC (2,1,C2,C0,3), 0),
4709 SR_CORE ("trcacatr9", CPENC (2,1,C2,C2,3), 0),
4710 SR_CORE ("trcacatr10", CPENC (2,1,C2,C4,3), 0),
4711 SR_CORE ("trcacatr11", CPENC (2,1,C2,C6,3), 0),
4712 SR_CORE ("trcacatr12", CPENC (2,1,C2,C8,3), 0),
4713 SR_CORE ("trcacatr13", CPENC (2,1,C2,C10,3), 0),
4714 SR_CORE ("trcacatr14", CPENC (2,1,C2,C12,3), 0),
4715 SR_CORE ("trcacatr15", CPENC (2,1,C2,C14,3), 0),
4716 SR_CORE ("trcacvr0", CPENC (2,1,C2,C0,0), 0),
4717 SR_CORE ("trcacvr1", CPENC (2,1,C2,C2,0), 0),
4718 SR_CORE ("trcacvr2", CPENC (2,1,C2,C4,0), 0),
4719 SR_CORE ("trcacvr3", CPENC (2,1,C2,C6,0), 0),
4720 SR_CORE ("trcacvr4", CPENC (2,1,C2,C8,0), 0),
4721 SR_CORE ("trcacvr5", CPENC (2,1,C2,C10,0), 0),
4722 SR_CORE ("trcacvr6", CPENC (2,1,C2,C12,0), 0),
4723 SR_CORE ("trcacvr7", CPENC (2,1,C2,C14,0), 0),
4724 SR_CORE ("trcacvr8", CPENC (2,1,C2,C0,1), 0),
4725 SR_CORE ("trcacvr9", CPENC (2,1,C2,C2,1), 0),
4726 SR_CORE ("trcacvr10", CPENC (2,1,C2,C4,1), 0),
4727 SR_CORE ("trcacvr11", CPENC (2,1,C2,C6,1), 0),
4728 SR_CORE ("trcacvr12", CPENC (2,1,C2,C8,1), 0),
4729 SR_CORE ("trcacvr13", CPENC (2,1,C2,C10,1), 0),
4730 SR_CORE ("trcacvr14", CPENC (2,1,C2,C12,1), 0),
4731 SR_CORE ("trcacvr15", CPENC (2,1,C2,C14,1), 0),
4732 SR_CORE ("trcauxctlr", CPENC (2,1,C0,C6,0), 0),
4733 SR_CORE ("trcbbctlr", CPENC (2,1,C0,C15,0), 0),
4734 SR_CORE ("trcccctlr", CPENC (2,1,C0,C14,0), 0),
4735 SR_CORE ("trccidcctlr0", CPENC (2,1,C3,C0,2), 0),
4736 SR_CORE ("trccidcctlr1", CPENC (2,1,C3,C1,2), 0),
4737 SR_CORE ("trccidcvr0", CPENC (2,1,C3,C0,0), 0),
4738 SR_CORE ("trccidcvr1", CPENC (2,1,C3,C2,0), 0),
4739 SR_CORE ("trccidcvr2", CPENC (2,1,C3,C4,0), 0),
4740 SR_CORE ("trccidcvr3", CPENC (2,1,C3,C6,0), 0),
4741 SR_CORE ("trccidcvr4", CPENC (2,1,C3,C8,0), 0),
4742 SR_CORE ("trccidcvr5", CPENC (2,1,C3,C10,0), 0),
4743 SR_CORE ("trccidcvr6", CPENC (2,1,C3,C12,0), 0),
4744 SR_CORE ("trccidcvr7", CPENC (2,1,C3,C14,0), 0),
4745 SR_CORE ("trcclaimclr", CPENC (2,1,C7,C9,6), 0),
4746 SR_CORE ("trcclaimset", CPENC (2,1,C7,C8,6), 0),
4747 SR_CORE ("trccntctlr0", CPENC (2,1,C0,C4,5), 0),
4748 SR_CORE ("trccntctlr1", CPENC (2,1,C0,C5,5), 0),
4749 SR_CORE ("trccntctlr2", CPENC (2,1,C0,C6,5), 0),
4750 SR_CORE ("trccntctlr3", CPENC (2,1,C0,C7,5), 0),
4751 SR_CORE ("trccntrldvr0", CPENC (2,1,C0,C0,5), 0),
4752 SR_CORE ("trccntrldvr1", CPENC (2,1,C0,C1,5), 0),
4753 SR_CORE ("trccntrldvr2", CPENC (2,1,C0,C2,5), 0),
4754 SR_CORE ("trccntrldvr3", CPENC (2,1,C0,C3,5), 0),
4755 SR_CORE ("trccntvr0", CPENC (2,1,C0,C8,5), 0),
4756 SR_CORE ("trccntvr1", CPENC (2,1,C0,C9,5), 0),
4757 SR_CORE ("trccntvr2", CPENC (2,1,C0,C10,5), 0),
4758 SR_CORE ("trccntvr3", CPENC (2,1,C0,C11,5), 0),
4759 SR_CORE ("trcconfigr", CPENC (2,1,C0,C4,0), 0),
4760 SR_CORE ("trcdvcmr0", CPENC (2,1,C2,C0,6), 0),
4761 SR_CORE ("trcdvcmr1", CPENC (2,1,C2,C4,6), 0),
4762 SR_CORE ("trcdvcmr2", CPENC (2,1,C2,C8,6), 0),
4763 SR_CORE ("trcdvcmr3", CPENC (2,1,C2,C12,6), 0),
4764 SR_CORE ("trcdvcmr4", CPENC (2,1,C2,C0,7), 0),
4765 SR_CORE ("trcdvcmr5", CPENC (2,1,C2,C4,7), 0),
4766 SR_CORE ("trcdvcmr6", CPENC (2,1,C2,C8,7), 0),
4767 SR_CORE ("trcdvcmr7", CPENC (2,1,C2,C12,7), 0),
4768 SR_CORE ("trcdvcvr0", CPENC (2,1,C2,C0,4), 0),
4769 SR_CORE ("trcdvcvr1", CPENC (2,1,C2,C4,4), 0),
4770 SR_CORE ("trcdvcvr2", CPENC (2,1,C2,C8,4), 0),
4771 SR_CORE ("trcdvcvr3", CPENC (2,1,C2,C12,4), 0),
4772 SR_CORE ("trcdvcvr4", CPENC (2,1,C2,C0,5), 0),
4773 SR_CORE ("trcdvcvr5", CPENC (2,1,C2,C4,5), 0),
4774 SR_CORE ("trcdvcvr6", CPENC (2,1,C2,C8,5), 0),
4775 SR_CORE ("trcdvcvr7", CPENC (2,1,C2,C12,5), 0),
4776 SR_CORE ("trceventctl0r", CPENC (2,1,C0,C8,0), 0),
4777 SR_CORE ("trceventctl1r", CPENC (2,1,C0,C9,0), 0),
4778 SR_CORE ("trcextinselr0", CPENC (2,1,C0,C8,4), 0),
4779 SR_CORE ("trcextinselr", CPENC (2,1,C0,C8,4), 0),
4780 SR_CORE ("trcextinselr1", CPENC (2,1,C0,C9,4), 0),
4781 SR_CORE ("trcextinselr2", CPENC (2,1,C0,C10,4), 0),
4782 SR_CORE ("trcextinselr3", CPENC (2,1,C0,C11,4), 0),
4783 SR_CORE ("trcimspec0", CPENC (2,1,C0,C0,7), 0),
4784 SR_CORE ("trcimspec1", CPENC (2,1,C0,C1,7), 0),
4785 SR_CORE ("trcimspec2", CPENC (2,1,C0,C2,7), 0),
4786 SR_CORE ("trcimspec3", CPENC (2,1,C0,C3,7), 0),
4787 SR_CORE ("trcimspec4", CPENC (2,1,C0,C4,7), 0),
4788 SR_CORE ("trcimspec5", CPENC (2,1,C0,C5,7), 0),
4789 SR_CORE ("trcimspec6", CPENC (2,1,C0,C6,7), 0),
4790 SR_CORE ("trcimspec7", CPENC (2,1,C0,C7,7), 0),
4791 SR_CORE ("trcitctrl", CPENC (2,1,C7,C0,4), 0),
4792 SR_CORE ("trcpdcr", CPENC (2,1,C1,C4,4), 0),
4793 SR_CORE ("trcprgctlr", CPENC (2,1,C0,C1,0), 0),
4794 SR_CORE ("trcprocselr", CPENC (2,1,C0,C2,0), 0),
4795 SR_CORE ("trcqctlr", CPENC (2,1,C0,C1,1), 0),
4796 SR_CORE ("trcrsr", CPENC (2,1,C0,C10,0), 0),
4797 SR_CORE ("trcrsctlr2", CPENC (2,1,C1,C2,0), 0),
4798 SR_CORE ("trcrsctlr3", CPENC (2,1,C1,C3,0), 0),
4799 SR_CORE ("trcrsctlr4", CPENC (2,1,C1,C4,0), 0),
4800 SR_CORE ("trcrsctlr5", CPENC (2,1,C1,C5,0), 0),
4801 SR_CORE ("trcrsctlr6", CPENC (2,1,C1,C6,0), 0),
4802 SR_CORE ("trcrsctlr7", CPENC (2,1,C1,C7,0), 0),
4803 SR_CORE ("trcrsctlr8", CPENC (2,1,C1,C8,0), 0),
4804 SR_CORE ("trcrsctlr9", CPENC (2,1,C1,C9,0), 0),
4805 SR_CORE ("trcrsctlr10", CPENC (2,1,C1,C10,0), 0),
4806 SR_CORE ("trcrsctlr11", CPENC (2,1,C1,C11,0), 0),
4807 SR_CORE ("trcrsctlr12", CPENC (2,1,C1,C12,0), 0),
4808 SR_CORE ("trcrsctlr13", CPENC (2,1,C1,C13,0), 0),
4809 SR_CORE ("trcrsctlr14", CPENC (2,1,C1,C14,0), 0),
4810 SR_CORE ("trcrsctlr15", CPENC (2,1,C1,C15,0), 0),
4811 SR_CORE ("trcrsctlr16", CPENC (2,1,C1,C0,1), 0),
4812 SR_CORE ("trcrsctlr17", CPENC (2,1,C1,C1,1), 0),
4813 SR_CORE ("trcrsctlr18", CPENC (2,1,C1,C2,1), 0),
4814 SR_CORE ("trcrsctlr19", CPENC (2,1,C1,C3,1), 0),
4815 SR_CORE ("trcrsctlr20", CPENC (2,1,C1,C4,1), 0),
4816 SR_CORE ("trcrsctlr21", CPENC (2,1,C1,C5,1), 0),
4817 SR_CORE ("trcrsctlr22", CPENC (2,1,C1,C6,1), 0),
4818 SR_CORE ("trcrsctlr23", CPENC (2,1,C1,C7,1), 0),
4819 SR_CORE ("trcrsctlr24", CPENC (2,1,C1,C8,1), 0),
4820 SR_CORE ("trcrsctlr25", CPENC (2,1,C1,C9,1), 0),
4821 SR_CORE ("trcrsctlr26", CPENC (2,1,C1,C10,1), 0),
4822 SR_CORE ("trcrsctlr27", CPENC (2,1,C1,C11,1), 0),
4823 SR_CORE ("trcrsctlr28", CPENC (2,1,C1,C12,1), 0),
4824 SR_CORE ("trcrsctlr29", CPENC (2,1,C1,C13,1), 0),
4825 SR_CORE ("trcrsctlr30", CPENC (2,1,C1,C14,1), 0),
4826 SR_CORE ("trcrsctlr31", CPENC (2,1,C1,C15,1), 0),
4827 SR_CORE ("trcseqevr0", CPENC (2,1,C0,C0,4), 0),
4828 SR_CORE ("trcseqevr1", CPENC (2,1,C0,C1,4), 0),
4829 SR_CORE ("trcseqevr2", CPENC (2,1,C0,C2,4), 0),
4830 SR_CORE ("trcseqrstevr", CPENC (2,1,C0,C6,4), 0),
4831 SR_CORE ("trcseqstr", CPENC (2,1,C0,C7,4), 0),
4832 SR_CORE ("trcssccr0", CPENC (2,1,C1,C0,2), 0),
4833 SR_CORE ("trcssccr1", CPENC (2,1,C1,C1,2), 0),
4834 SR_CORE ("trcssccr2", CPENC (2,1,C1,C2,2), 0),
4835 SR_CORE ("trcssccr3", CPENC (2,1,C1,C3,2), 0),
4836 SR_CORE ("trcssccr4", CPENC (2,1,C1,C4,2), 0),
4837 SR_CORE ("trcssccr5", CPENC (2,1,C1,C5,2), 0),
4838 SR_CORE ("trcssccr6", CPENC (2,1,C1,C6,2), 0),
4839 SR_CORE ("trcssccr7", CPENC (2,1,C1,C7,2), 0),
4840 SR_CORE ("trcsscsr0", CPENC (2,1,C1,C8,2), 0),
4841 SR_CORE ("trcsscsr1", CPENC (2,1,C1,C9,2), 0),
4842 SR_CORE ("trcsscsr2", CPENC (2,1,C1,C10,2), 0),
4843 SR_CORE ("trcsscsr3", CPENC (2,1,C1,C11,2), 0),
4844 SR_CORE ("trcsscsr4", CPENC (2,1,C1,C12,2), 0),
4845 SR_CORE ("trcsscsr5", CPENC (2,1,C1,C13,2), 0),
4846 SR_CORE ("trcsscsr6", CPENC (2,1,C1,C14,2), 0),
4847 SR_CORE ("trcsscsr7", CPENC (2,1,C1,C15,2), 0),
4848 SR_CORE ("trcsspcicr0", CPENC (2,1,C1,C0,3), 0),
4849 SR_CORE ("trcsspcicr1", CPENC (2,1,C1,C1,3), 0),
4850 SR_CORE ("trcsspcicr2", CPENC (2,1,C1,C2,3), 0),
4851 SR_CORE ("trcsspcicr3", CPENC (2,1,C1,C3,3), 0),
4852 SR_CORE ("trcsspcicr4", CPENC (2,1,C1,C4,3), 0),
4853 SR_CORE ("trcsspcicr5", CPENC (2,1,C1,C5,3), 0),
4854 SR_CORE ("trcsspcicr6", CPENC (2,1,C1,C6,3), 0),
4855 SR_CORE ("trcsspcicr7", CPENC (2,1,C1,C7,3), 0),
4856 SR_CORE ("trcstallctlr", CPENC (2,1,C0,C11,0), 0),
4857 SR_CORE ("trcsyncpr", CPENC (2,1,C0,C13,0), 0),
4858 SR_CORE ("trctraceidr", CPENC (2,1,C0,C0,1), 0),
4859 SR_CORE ("trctsctlr", CPENC (2,1,C0,C12,0), 0),
4860 SR_CORE ("trcvdarcctlr", CPENC (2,1,C0,C10,2), 0),
4861 SR_CORE ("trcvdctlr", CPENC (2,1,C0,C8,2), 0),
4862 SR_CORE ("trcvdsacctlr", CPENC (2,1,C0,C9,2), 0),
4863 SR_CORE ("trcvictlr", CPENC (2,1,C0,C0,2), 0),
4864 SR_CORE ("trcviiectlr", CPENC (2,1,C0,C1,2), 0),
4865 SR_CORE ("trcvipcssctlr", CPENC (2,1,C0,C3,2), 0),
4866 SR_CORE ("trcvissctlr", CPENC (2,1,C0,C2,2), 0),
4867 SR_CORE ("trcvmidcctlr0", CPENC (2,1,C3,C2,2), 0),
4868 SR_CORE ("trcvmidcctlr1", CPENC (2,1,C3,C3,2), 0),
4869 SR_CORE ("trcvmidcvr0", CPENC (2,1,C3,C0,1), 0),
4870 SR_CORE ("trcvmidcvr1", CPENC (2,1,C3,C2,1), 0),
4871 SR_CORE ("trcvmidcvr2", CPENC (2,1,C3,C4,1), 0),
4872 SR_CORE ("trcvmidcvr3", CPENC (2,1,C3,C6,1), 0),
4873 SR_CORE ("trcvmidcvr4", CPENC (2,1,C3,C8,1), 0),
4874 SR_CORE ("trcvmidcvr5", CPENC (2,1,C3,C10,1), 0),
4875 SR_CORE ("trcvmidcvr6", CPENC (2,1,C3,C12,1), 0),
4876 SR_CORE ("trcvmidcvr7", CPENC (2,1,C3,C14,1), 0),
4877 SR_CORE ("trclar", CPENC (2,1,C7,C12,6), F_REG_WRITE),
4878 SR_CORE ("trcoslar", CPENC (2,1,C1,C0,4), F_REG_WRITE),
4879
4880 SR_CORE ("csrcr_el0", CPENC (2,3,C8,C0,0), 0),
4881 SR_CORE ("csrptr_el0", CPENC (2,3,C8,C0,1), 0),
4882 SR_CORE ("csridr_el0", CPENC (2,3,C8,C0,2), F_REG_READ),
4883 SR_CORE ("csrptridx_el0", CPENC (2,3,C8,C0,3), F_REG_READ),
4884 SR_CORE ("csrcr_el1", CPENC (2,0,C8,C0,0), 0),
4885 SR_CORE ("csrcr_el12", CPENC (2,5,C8,C0,0), 0),
4886 SR_CORE ("csrptr_el1", CPENC (2,0,C8,C0,1), 0),
4887 SR_CORE ("csrptr_el12", CPENC (2,5,C8,C0,1), 0),
4888 SR_CORE ("csrptridx_el1", CPENC (2,0,C8,C0,3), F_REG_READ),
4889 SR_CORE ("csrcr_el2", CPENC (2,4,C8,C0,0), 0),
4890 SR_CORE ("csrptr_el2", CPENC (2,4,C8,C0,1), 0),
4891 SR_CORE ("csrptridx_el2", CPENC (2,4,C8,C0,3), F_REG_READ),
4892
4893 SR_LOR ("lorid_el1", CPENC (3,0,C10,C4,7), F_REG_READ),
4894 SR_LOR ("lorc_el1", CPENC (3,0,C10,C4,3), 0),
4895 SR_LOR ("lorea_el1", CPENC (3,0,C10,C4,1), 0),
4896 SR_LOR ("lorn_el1", CPENC (3,0,C10,C4,2), 0),
4897 SR_LOR ("lorsa_el1", CPENC (3,0,C10,C4,0), 0),
4898
4899 SR_CORE ("icc_ctlr_el3", CPENC (3,6,C12,C12,4), 0),
4900 SR_CORE ("icc_sre_el1", CPENC (3,0,C12,C12,5), 0),
4901 SR_CORE ("icc_sre_el2", CPENC (3,4,C12,C9,5), 0),
4902 SR_CORE ("icc_sre_el3", CPENC (3,6,C12,C12,5), 0),
4903 SR_CORE ("ich_vtr_el2", CPENC (3,4,C12,C11,1), F_REG_READ),
4904
4905 SR_CORE ("brbcr_el1", CPENC (2,1,C9,C0,0), 0),
4906 SR_CORE ("brbcr_el12", CPENC (2,5,C9,C0,0), 0),
4907 SR_CORE ("brbfcr_el1", CPENC (2,1,C9,C0,1), 0),
4908 SR_CORE ("brbts_el1", CPENC (2,1,C9,C0,2), 0),
4909 SR_CORE ("brbinfinj_el1", CPENC (2,1,C9,C1,0), 0),
4910 SR_CORE ("brbsrcinj_el1", CPENC (2,1,C9,C1,1), 0),
4911 SR_CORE ("brbtgtinj_el1", CPENC (2,1,C9,C1,2), 0),
4912 SR_CORE ("brbidr0_el1", CPENC (2,1,C9,C2,0), F_REG_READ),
4913 SR_CORE ("brbcr_el2", CPENC (2,4,C9,C0,0), 0),
4914 SR_CORE ("brbsrc0_el1", CPENC (2,1,C8,C0,1), F_REG_READ),
4915 SR_CORE ("brbsrc1_el1", CPENC (2,1,C8,C1,1), F_REG_READ),
4916 SR_CORE ("brbsrc2_el1", CPENC (2,1,C8,C2,1), F_REG_READ),
4917 SR_CORE ("brbsrc3_el1", CPENC (2,1,C8,C3,1), F_REG_READ),
4918 SR_CORE ("brbsrc4_el1", CPENC (2,1,C8,C4,1), F_REG_READ),
4919 SR_CORE ("brbsrc5_el1", CPENC (2,1,C8,C5,1), F_REG_READ),
4920 SR_CORE ("brbsrc6_el1", CPENC (2,1,C8,C6,1), F_REG_READ),
4921 SR_CORE ("brbsrc7_el1", CPENC (2,1,C8,C7,1), F_REG_READ),
4922 SR_CORE ("brbsrc8_el1", CPENC (2,1,C8,C8,1), F_REG_READ),
4923 SR_CORE ("brbsrc9_el1", CPENC (2,1,C8,C9,1), F_REG_READ),
4924 SR_CORE ("brbsrc10_el1", CPENC (2,1,C8,C10,1), F_REG_READ),
4925 SR_CORE ("brbsrc11_el1", CPENC (2,1,C8,C11,1), F_REG_READ),
4926 SR_CORE ("brbsrc12_el1", CPENC (2,1,C8,C12,1), F_REG_READ),
4927 SR_CORE ("brbsrc13_el1", CPENC (2,1,C8,C13,1), F_REG_READ),
4928 SR_CORE ("brbsrc14_el1", CPENC (2,1,C8,C14,1), F_REG_READ),
4929 SR_CORE ("brbsrc15_el1", CPENC (2,1,C8,C15,1), F_REG_READ),
4930 SR_CORE ("brbsrc16_el1", CPENC (2,1,C8,C0,5), F_REG_READ),
4931 SR_CORE ("brbsrc17_el1", CPENC (2,1,C8,C1,5), F_REG_READ),
4932 SR_CORE ("brbsrc18_el1", CPENC (2,1,C8,C2,5), F_REG_READ),
4933 SR_CORE ("brbsrc19_el1", CPENC (2,1,C8,C3,5), F_REG_READ),
4934 SR_CORE ("brbsrc20_el1", CPENC (2,1,C8,C4,5), F_REG_READ),
4935 SR_CORE ("brbsrc21_el1", CPENC (2,1,C8,C5,5), F_REG_READ),
4936 SR_CORE ("brbsrc22_el1", CPENC (2,1,C8,C6,5), F_REG_READ),
4937 SR_CORE ("brbsrc23_el1", CPENC (2,1,C8,C7,5), F_REG_READ),
4938 SR_CORE ("brbsrc24_el1", CPENC (2,1,C8,C8,5), F_REG_READ),
4939 SR_CORE ("brbsrc25_el1", CPENC (2,1,C8,C9,5), F_REG_READ),
4940 SR_CORE ("brbsrc26_el1", CPENC (2,1,C8,C10,5), F_REG_READ),
4941 SR_CORE ("brbsrc27_el1", CPENC (2,1,C8,C11,5), F_REG_READ),
4942 SR_CORE ("brbsrc28_el1", CPENC (2,1,C8,C12,5), F_REG_READ),
4943 SR_CORE ("brbsrc29_el1", CPENC (2,1,C8,C13,5), F_REG_READ),
4944 SR_CORE ("brbsrc30_el1", CPENC (2,1,C8,C14,5), F_REG_READ),
4945 SR_CORE ("brbsrc31_el1", CPENC (2,1,C8,C15,5), F_REG_READ),
4946 SR_CORE ("brbtgt0_el1", CPENC (2,1,C8,C0,2), F_REG_READ),
4947 SR_CORE ("brbtgt1_el1", CPENC (2,1,C8,C1,2), F_REG_READ),
4948 SR_CORE ("brbtgt2_el1", CPENC (2,1,C8,C2,2), F_REG_READ),
4949 SR_CORE ("brbtgt3_el1", CPENC (2,1,C8,C3,2), F_REG_READ),
4950 SR_CORE ("brbtgt4_el1", CPENC (2,1,C8,C4,2), F_REG_READ),
4951 SR_CORE ("brbtgt5_el1", CPENC (2,1,C8,C5,2), F_REG_READ),
4952 SR_CORE ("brbtgt6_el1", CPENC (2,1,C8,C6,2), F_REG_READ),
4953 SR_CORE ("brbtgt7_el1", CPENC (2,1,C8,C7,2), F_REG_READ),
4954 SR_CORE ("brbtgt8_el1", CPENC (2,1,C8,C8,2), F_REG_READ),
4955 SR_CORE ("brbtgt9_el1", CPENC (2,1,C8,C9,2), F_REG_READ),
4956 SR_CORE ("brbtgt10_el1", CPENC (2,1,C8,C10,2), F_REG_READ),
4957 SR_CORE ("brbtgt11_el1", CPENC (2,1,C8,C11,2), F_REG_READ),
4958 SR_CORE ("brbtgt12_el1", CPENC (2,1,C8,C12,2), F_REG_READ),
4959 SR_CORE ("brbtgt13_el1", CPENC (2,1,C8,C13,2), F_REG_READ),
4960 SR_CORE ("brbtgt14_el1", CPENC (2,1,C8,C14,2), F_REG_READ),
4961 SR_CORE ("brbtgt15_el1", CPENC (2,1,C8,C15,2), F_REG_READ),
4962 SR_CORE ("brbtgt16_el1", CPENC (2,1,C8,C0,6), F_REG_READ),
4963 SR_CORE ("brbtgt17_el1", CPENC (2,1,C8,C1,6), F_REG_READ),
4964 SR_CORE ("brbtgt18_el1", CPENC (2,1,C8,C2,6), F_REG_READ),
4965 SR_CORE ("brbtgt19_el1", CPENC (2,1,C8,C3,6), F_REG_READ),
4966 SR_CORE ("brbtgt20_el1", CPENC (2,1,C8,C4,6), F_REG_READ),
4967 SR_CORE ("brbtgt21_el1", CPENC (2,1,C8,C5,6), F_REG_READ),
4968 SR_CORE ("brbtgt22_el1", CPENC (2,1,C8,C6,6), F_REG_READ),
4969 SR_CORE ("brbtgt23_el1", CPENC (2,1,C8,C7,6), F_REG_READ),
4970 SR_CORE ("brbtgt24_el1", CPENC (2,1,C8,C8,6), F_REG_READ),
4971 SR_CORE ("brbtgt25_el1", CPENC (2,1,C8,C9,6), F_REG_READ),
4972 SR_CORE ("brbtgt26_el1", CPENC (2,1,C8,C10,6), F_REG_READ),
4973 SR_CORE ("brbtgt27_el1", CPENC (2,1,C8,C11,6), F_REG_READ),
4974 SR_CORE ("brbtgt28_el1", CPENC (2,1,C8,C12,6), F_REG_READ),
4975 SR_CORE ("brbtgt29_el1", CPENC (2,1,C8,C13,6), F_REG_READ),
4976 SR_CORE ("brbtgt30_el1", CPENC (2,1,C8,C14,6), F_REG_READ),
4977 SR_CORE ("brbtgt31_el1", CPENC (2,1,C8,C15,6), F_REG_READ),
4978 SR_CORE ("brbinf0_el1", CPENC (2,1,C8,C0,0), F_REG_READ),
4979 SR_CORE ("brbinf1_el1", CPENC (2,1,C8,C1,0), F_REG_READ),
4980 SR_CORE ("brbinf2_el1", CPENC (2,1,C8,C2,0), F_REG_READ),
4981 SR_CORE ("brbinf3_el1", CPENC (2,1,C8,C3,0), F_REG_READ),
4982 SR_CORE ("brbinf4_el1", CPENC (2,1,C8,C4,0), F_REG_READ),
4983 SR_CORE ("brbinf5_el1", CPENC (2,1,C8,C5,0), F_REG_READ),
4984 SR_CORE ("brbinf6_el1", CPENC (2,1,C8,C6,0), F_REG_READ),
4985 SR_CORE ("brbinf7_el1", CPENC (2,1,C8,C7,0), F_REG_READ),
4986 SR_CORE ("brbinf8_el1", CPENC (2,1,C8,C8,0), F_REG_READ),
4987 SR_CORE ("brbinf9_el1", CPENC (2,1,C8,C9,0), F_REG_READ),
4988 SR_CORE ("brbinf10_el1", CPENC (2,1,C8,C10,0), F_REG_READ),
4989 SR_CORE ("brbinf11_el1", CPENC (2,1,C8,C11,0), F_REG_READ),
4990 SR_CORE ("brbinf12_el1", CPENC (2,1,C8,C12,0), F_REG_READ),
4991 SR_CORE ("brbinf13_el1", CPENC (2,1,C8,C13,0), F_REG_READ),
4992 SR_CORE ("brbinf14_el1", CPENC (2,1,C8,C14,0), F_REG_READ),
4993 SR_CORE ("brbinf15_el1", CPENC (2,1,C8,C15,0), F_REG_READ),
4994 SR_CORE ("brbinf16_el1", CPENC (2,1,C8,C0,4), F_REG_READ),
4995 SR_CORE ("brbinf17_el1", CPENC (2,1,C8,C1,4), F_REG_READ),
4996 SR_CORE ("brbinf18_el1", CPENC (2,1,C8,C2,4), F_REG_READ),
4997 SR_CORE ("brbinf19_el1", CPENC (2,1,C8,C3,4), F_REG_READ),
4998 SR_CORE ("brbinf20_el1", CPENC (2,1,C8,C4,4), F_REG_READ),
4999 SR_CORE ("brbinf21_el1", CPENC (2,1,C8,C5,4), F_REG_READ),
5000 SR_CORE ("brbinf22_el1", CPENC (2,1,C8,C6,4), F_REG_READ),
5001 SR_CORE ("brbinf23_el1", CPENC (2,1,C8,C7,4), F_REG_READ),
5002 SR_CORE ("brbinf24_el1", CPENC (2,1,C8,C8,4), F_REG_READ),
5003 SR_CORE ("brbinf25_el1", CPENC (2,1,C8,C9,4), F_REG_READ),
5004 SR_CORE ("brbinf26_el1", CPENC (2,1,C8,C10,4), F_REG_READ),
5005 SR_CORE ("brbinf27_el1", CPENC (2,1,C8,C11,4), F_REG_READ),
5006 SR_CORE ("brbinf28_el1", CPENC (2,1,C8,C12,4), F_REG_READ),
5007 SR_CORE ("brbinf29_el1", CPENC (2,1,C8,C13,4), F_REG_READ),
5008 SR_CORE ("brbinf30_el1", CPENC (2,1,C8,C14,4), F_REG_READ),
5009 SR_CORE ("brbinf31_el1", CPENC (2,1,C8,C15,4), F_REG_READ),
5010
5011 SR_CORE ("accdata_el1", CPENC (3,0,C13,C0,5), 0),
5012
5013 SR_CORE ("mfar_el3", CPENC (3,6,C6,C0,5), 0),
5014 SR_CORE ("gpccr_el3", CPENC (3,6,C2,C1,6), 0),
5015 SR_CORE ("gptbr_el3", CPENC (3,6,C2,C1,4), 0),
5016
5017 SR_SME ("svcr", CPENC (3,3,C4,C2,2), 0),
5018 SR_SME ("id_aa64smfr0_el1", CPENC (3,0,C0,C4,5), F_REG_READ),
5019 SR_SME ("smcr_el1", CPENC (3,0,C1,C2,6), 0),
5020 SR_SME ("smcr_el12", CPENC (3,5,C1,C2,6), 0),
5021 SR_SME ("smcr_el2", CPENC (3,4,C1,C2,6), 0),
5022 SR_SME ("smcr_el3", CPENC (3,6,C1,C2,6), 0),
5023 SR_SME ("smpri_el1", CPENC (3,0,C1,C2,4), 0),
5024 SR_SME ("smprimap_el2", CPENC (3,4,C1,C2,5), 0),
5025 SR_SME ("smidr_el1", CPENC (3,1,C0,C0,6), F_REG_READ),
5026 SR_SME ("tpidr2_el0", CPENC (3,3,C13,C0,5), 0),
5027 SR_SME ("mpamsm_el1", CPENC (3,0,C10,C5,3), 0),
5028
5029 SR_AMU ("amcr_el0", CPENC (3,3,C13,C2,0), 0),
5030 SR_AMU ("amcfgr_el0", CPENC (3,3,C13,C2,1), F_REG_READ),
5031 SR_AMU ("amcgcr_el0", CPENC (3,3,C13,C2,2), F_REG_READ),
5032 SR_AMU ("amuserenr_el0", CPENC (3,3,C13,C2,3), 0),
5033 SR_AMU ("amcntenclr0_el0", CPENC (3,3,C13,C2,4), 0),
5034 SR_AMU ("amcntenset0_el0", CPENC (3,3,C13,C2,5), 0),
5035 SR_AMU ("amcntenclr1_el0", CPENC (3,3,C13,C3,0), 0),
5036 SR_AMU ("amcntenset1_el0", CPENC (3,3,C13,C3,1), 0),
5037 SR_AMU ("amevcntr00_el0", CPENC (3,3,C13,C4,0), 0),
5038 SR_AMU ("amevcntr01_el0", CPENC (3,3,C13,C4,1), 0),
5039 SR_AMU ("amevcntr02_el0", CPENC (3,3,C13,C4,2), 0),
5040 SR_AMU ("amevcntr03_el0", CPENC (3,3,C13,C4,3), 0),
5041 SR_AMU ("amevtyper00_el0", CPENC (3,3,C13,C6,0), F_REG_READ),
5042 SR_AMU ("amevtyper01_el0", CPENC (3,3,C13,C6,1), F_REG_READ),
5043 SR_AMU ("amevtyper02_el0", CPENC (3,3,C13,C6,2), F_REG_READ),
5044 SR_AMU ("amevtyper03_el0", CPENC (3,3,C13,C6,3), F_REG_READ),
5045 SR_AMU ("amevcntr10_el0", CPENC (3,3,C13,C12,0), 0),
5046 SR_AMU ("amevcntr11_el0", CPENC (3,3,C13,C12,1), 0),
5047 SR_AMU ("amevcntr12_el0", CPENC (3,3,C13,C12,2), 0),
5048 SR_AMU ("amevcntr13_el0", CPENC (3,3,C13,C12,3), 0),
5049 SR_AMU ("amevcntr14_el0", CPENC (3,3,C13,C12,4), 0),
5050 SR_AMU ("amevcntr15_el0", CPENC (3,3,C13,C12,5), 0),
5051 SR_AMU ("amevcntr16_el0", CPENC (3,3,C13,C12,6), 0),
5052 SR_AMU ("amevcntr17_el0", CPENC (3,3,C13,C12,7), 0),
5053 SR_AMU ("amevcntr18_el0", CPENC (3,3,C13,C13,0), 0),
5054 SR_AMU ("amevcntr19_el0", CPENC (3,3,C13,C13,1), 0),
5055 SR_AMU ("amevcntr110_el0", CPENC (3,3,C13,C13,2), 0),
5056 SR_AMU ("amevcntr111_el0", CPENC (3,3,C13,C13,3), 0),
5057 SR_AMU ("amevcntr112_el0", CPENC (3,3,C13,C13,4), 0),
5058 SR_AMU ("amevcntr113_el0", CPENC (3,3,C13,C13,5), 0),
5059 SR_AMU ("amevcntr114_el0", CPENC (3,3,C13,C13,6), 0),
5060 SR_AMU ("amevcntr115_el0", CPENC (3,3,C13,C13,7), 0),
5061 SR_AMU ("amevtyper10_el0", CPENC (3,3,C13,C14,0), 0),
5062 SR_AMU ("amevtyper11_el0", CPENC (3,3,C13,C14,1), 0),
5063 SR_AMU ("amevtyper12_el0", CPENC (3,3,C13,C14,2), 0),
5064 SR_AMU ("amevtyper13_el0", CPENC (3,3,C13,C14,3), 0),
5065 SR_AMU ("amevtyper14_el0", CPENC (3,3,C13,C14,4), 0),
5066 SR_AMU ("amevtyper15_el0", CPENC (3,3,C13,C14,5), 0),
5067 SR_AMU ("amevtyper16_el0", CPENC (3,3,C13,C14,6), 0),
5068 SR_AMU ("amevtyper17_el0", CPENC (3,3,C13,C14,7), 0),
5069 SR_AMU ("amevtyper18_el0", CPENC (3,3,C13,C15,0), 0),
5070 SR_AMU ("amevtyper19_el0", CPENC (3,3,C13,C15,1), 0),
5071 SR_AMU ("amevtyper110_el0", CPENC (3,3,C13,C15,2), 0),
5072 SR_AMU ("amevtyper111_el0", CPENC (3,3,C13,C15,3), 0),
5073 SR_AMU ("amevtyper112_el0", CPENC (3,3,C13,C15,4), 0),
5074 SR_AMU ("amevtyper113_el0", CPENC (3,3,C13,C15,5), 0),
5075 SR_AMU ("amevtyper114_el0", CPENC (3,3,C13,C15,6), 0),
5076 SR_AMU ("amevtyper115_el0", CPENC (3,3,C13,C15,7), 0),
5077
5078 SR_GIC ("icc_pmr_el1", CPENC (3,0,C4,C6,0), 0),
5079 SR_GIC ("icc_iar0_el1", CPENC (3,0,C12,C8,0), F_REG_READ),
5080 SR_GIC ("icc_eoir0_el1", CPENC (3,0,C12,C8,1), F_REG_WRITE),
5081 SR_GIC ("icc_hppir0_el1", CPENC (3,0,C12,C8,2), F_REG_READ),
5082 SR_GIC ("icc_bpr0_el1", CPENC (3,0,C12,C8,3), 0),
5083 SR_GIC ("icc_ap0r0_el1", CPENC (3,0,C12,C8,4), 0),
5084 SR_GIC ("icc_ap0r1_el1", CPENC (3,0,C12,C8,5), 0),
5085 SR_GIC ("icc_ap0r2_el1", CPENC (3,0,C12,C8,6), 0),
5086 SR_GIC ("icc_ap0r3_el1", CPENC (3,0,C12,C8,7), 0),
5087 SR_GIC ("icc_ap1r0_el1", CPENC (3,0,C12,C9,0), 0),
5088 SR_GIC ("icc_ap1r1_el1", CPENC (3,0,C12,C9,1), 0),
5089 SR_GIC ("icc_ap1r2_el1", CPENC (3,0,C12,C9,2), 0),
5090 SR_GIC ("icc_ap1r3_el1", CPENC (3,0,C12,C9,3), 0),
5091 SR_GIC ("icc_dir_el1", CPENC (3,0,C12,C11,1), F_REG_WRITE),
5092 SR_GIC ("icc_rpr_el1", CPENC (3,0,C12,C11,3), F_REG_READ),
5093 SR_GIC ("icc_sgi1r_el1", CPENC (3,0,C12,C11,5), F_REG_WRITE),
5094 SR_GIC ("icc_asgi1r_el1", CPENC (3,0,C12,C11,6), F_REG_WRITE),
5095 SR_GIC ("icc_sgi0r_el1", CPENC (3,0,C12,C11,7), F_REG_WRITE),
5096 SR_GIC ("icc_iar1_el1", CPENC (3,0,C12,C12,0), F_REG_READ),
5097 SR_GIC ("icc_eoir1_el1", CPENC (3,0,C12,C12,1), F_REG_WRITE),
5098 SR_GIC ("icc_hppir1_el1", CPENC (3,0,C12,C12,2), F_REG_READ),
5099 SR_GIC ("icc_bpr1_el1", CPENC (3,0,C12,C12,3), 0),
5100 SR_GIC ("icc_ctlr_el1", CPENC (3,0,C12,C12,4), 0),
5101 SR_GIC ("icc_igrpen0_el1", CPENC (3,0,C12,C12,6), 0),
5102 SR_GIC ("icc_igrpen1_el1", CPENC (3,0,C12,C12,7), 0),
5103 SR_GIC ("ich_ap0r0_el2", CPENC (3,4,C12,C8,0), 0),
5104 SR_GIC ("ich_ap0r1_el2", CPENC (3,4,C12,C8,1), 0),
5105 SR_GIC ("ich_ap0r2_el2", CPENC (3,4,C12,C8,2), 0),
5106 SR_GIC ("ich_ap0r3_el2", CPENC (3,4,C12,C8,3), 0),
5107 SR_GIC ("ich_ap1r0_el2", CPENC (3,4,C12,C9,0), 0),
5108 SR_GIC ("ich_ap1r1_el2", CPENC (3,4,C12,C9,1), 0),
5109 SR_GIC ("ich_ap1r2_el2", CPENC (3,4,C12,C9,2), 0),
5110 SR_GIC ("ich_ap1r3_el2", CPENC (3,4,C12,C9,3), 0),
5111 SR_GIC ("ich_hcr_el2", CPENC (3,4,C12,C11,0), 0),
5112 SR_GIC ("ich_misr_el2", CPENC (3,4,C12,C11,2), F_REG_READ),
5113 SR_GIC ("ich_eisr_el2", CPENC (3,4,C12,C11,3), F_REG_READ),
5114 SR_GIC ("ich_elrsr_el2", CPENC (3,4,C12,C11,5), F_REG_READ),
5115 SR_GIC ("ich_vmcr_el2", CPENC (3,4,C12,C11,7), 0),
5116 SR_GIC ("ich_lr0_el2", CPENC (3,4,C12,C12,0), 0),
5117 SR_GIC ("ich_lr1_el2", CPENC (3,4,C12,C12,1), 0),
5118 SR_GIC ("ich_lr2_el2", CPENC (3,4,C12,C12,2), 0),
5119 SR_GIC ("ich_lr3_el2", CPENC (3,4,C12,C12,3), 0),
5120 SR_GIC ("ich_lr4_el2", CPENC (3,4,C12,C12,4), 0),
5121 SR_GIC ("ich_lr5_el2", CPENC (3,4,C12,C12,5), 0),
5122 SR_GIC ("ich_lr6_el2", CPENC (3,4,C12,C12,6), 0),
5123 SR_GIC ("ich_lr7_el2", CPENC (3,4,C12,C12,7), 0),
5124 SR_GIC ("ich_lr8_el2", CPENC (3,4,C12,C13,0), 0),
5125 SR_GIC ("ich_lr9_el2", CPENC (3,4,C12,C13,1), 0),
5126 SR_GIC ("ich_lr10_el2", CPENC (3,4,C12,C13,2), 0),
5127 SR_GIC ("ich_lr11_el2", CPENC (3,4,C12,C13,3), 0),
5128 SR_GIC ("ich_lr12_el2", CPENC (3,4,C12,C13,4), 0),
5129 SR_GIC ("ich_lr13_el2", CPENC (3,4,C12,C13,5), 0),
5130 SR_GIC ("ich_lr14_el2", CPENC (3,4,C12,C13,6), 0),
5131 SR_GIC ("ich_lr15_el2", CPENC (3,4,C12,C13,7), 0),
5132 SR_GIC ("icc_igrpen1_el3", CPENC (3,6,C12,C12,7), 0),
5133
5134 SR_V8_6 ("amcg1idr_el0", CPENC (3,3,C13,C2,6), F_REG_READ),
5135 SR_V8_6 ("cntpctss_el0", CPENC (3,3,C14,C0,5), F_REG_READ),
5136 SR_V8_6 ("cntvctss_el0", CPENC (3,3,C14,C0,6), F_REG_READ),
5137 SR_V8_6 ("hfgrtr_el2", CPENC (3,4,C1,C1,4), 0),
5138 SR_V8_6 ("hfgwtr_el2", CPENC (3,4,C1,C1,5), 0),
5139 SR_V8_6 ("hfgitr_el2", CPENC (3,4,C1,C1,6), 0),
5140 SR_V8_6 ("hdfgrtr_el2", CPENC (3,4,C3,C1,4), 0),
5141 SR_V8_6 ("hdfgwtr_el2", CPENC (3,4,C3,C1,5), 0),
5142 SR_V8_6 ("hafgrtr_el2", CPENC (3,4,C3,C1,6), 0),
5143 SR_V8_6 ("amevcntvoff00_el2", CPENC (3,4,C13,C8,0), 0),
5144 SR_V8_6 ("amevcntvoff01_el2", CPENC (3,4,C13,C8,1), 0),
5145 SR_V8_6 ("amevcntvoff02_el2", CPENC (3,4,C13,C8,2), 0),
5146 SR_V8_6 ("amevcntvoff03_el2", CPENC (3,4,C13,C8,3), 0),
5147 SR_V8_6 ("amevcntvoff04_el2", CPENC (3,4,C13,C8,4), 0),
5148 SR_V8_6 ("amevcntvoff05_el2", CPENC (3,4,C13,C8,5), 0),
5149 SR_V8_6 ("amevcntvoff06_el2", CPENC (3,4,C13,C8,6), 0),
5150 SR_V8_6 ("amevcntvoff07_el2", CPENC (3,4,C13,C8,7), 0),
5151 SR_V8_6 ("amevcntvoff08_el2", CPENC (3,4,C13,C9,0), 0),
5152 SR_V8_6 ("amevcntvoff09_el2", CPENC (3,4,C13,C9,1), 0),
5153 SR_V8_6 ("amevcntvoff010_el2", CPENC (3,4,C13,C9,2), 0),
5154 SR_V8_6 ("amevcntvoff011_el2", CPENC (3,4,C13,C9,3), 0),
5155 SR_V8_6 ("amevcntvoff012_el2", CPENC (3,4,C13,C9,4), 0),
5156 SR_V8_6 ("amevcntvoff013_el2", CPENC (3,4,C13,C9,5), 0),
5157 SR_V8_6 ("amevcntvoff014_el2", CPENC (3,4,C13,C9,6), 0),
5158 SR_V8_6 ("amevcntvoff015_el2", CPENC (3,4,C13,C9,7), 0),
5159 SR_V8_6 ("amevcntvoff10_el2", CPENC (3,4,C13,C10,0), 0),
5160 SR_V8_6 ("amevcntvoff11_el2", CPENC (3,4,C13,C10,1), 0),
5161 SR_V8_6 ("amevcntvoff12_el2", CPENC (3,4,C13,C10,2), 0),
5162 SR_V8_6 ("amevcntvoff13_el2", CPENC (3,4,C13,C10,3), 0),
5163 SR_V8_6 ("amevcntvoff14_el2", CPENC (3,4,C13,C10,4), 0),
5164 SR_V8_6 ("amevcntvoff15_el2", CPENC (3,4,C13,C10,5), 0),
5165 SR_V8_6 ("amevcntvoff16_el2", CPENC (3,4,C13,C10,6), 0),
5166 SR_V8_6 ("amevcntvoff17_el2", CPENC (3,4,C13,C10,7), 0),
5167 SR_V8_6 ("amevcntvoff18_el2", CPENC (3,4,C13,C11,0), 0),
5168 SR_V8_6 ("amevcntvoff19_el2", CPENC (3,4,C13,C11,1), 0),
5169 SR_V8_6 ("amevcntvoff110_el2", CPENC (3,4,C13,C11,2), 0),
5170 SR_V8_6 ("amevcntvoff111_el2", CPENC (3,4,C13,C11,3), 0),
5171 SR_V8_6 ("amevcntvoff112_el2", CPENC (3,4,C13,C11,4), 0),
5172 SR_V8_6 ("amevcntvoff113_el2", CPENC (3,4,C13,C11,5), 0),
5173 SR_V8_6 ("amevcntvoff114_el2", CPENC (3,4,C13,C11,6), 0),
5174 SR_V8_6 ("amevcntvoff115_el2", CPENC (3,4,C13,C11,7), 0),
5175 SR_V8_6 ("cntpoff_el2", CPENC (3,4,C14,C0,6), 0),
5176
5177 SR_V8_7 ("pmsnevfr_el1", CPENC (3,0,C9,C9,1), 0),
5178 SR_V8_7 ("hcrx_el2", CPENC (3,4,C1,C2,2), 0),
5179
5180 SR_V8_8 ("allint", CPENC (3,0,C4,C3,0), 0),
5181 SR_V8_8 ("icc_nmiar1_el1", CPENC (3,0,C12,C9,5), F_REG_READ),
5182
5183 { 0, CPENC (0,0,0,0,0), 0, 0 }
5184 };
5185
5186 bool
5187 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
5188 {
5189 return (reg_flags & F_DEPRECATED) != 0;
5190 }
5191
5192 /* The CPENC below is fairly misleading, the fields
5193 here are not in CPENC form. They are in op2op1 form. The fields are encoded
5194 by ins_pstatefield, which just shifts the value by the width of the fields
5195 in a loop. So if you CPENC them only the first value will be set, the rest
5196 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
5197 value of 0b110000000001000000 (0x30040) while what you want is
5198 0b011010 (0x1a). */
5199 const aarch64_sys_reg aarch64_pstatefields [] =
5200 {
5201 SR_CORE ("spsel", 0x05, F_REG_MAX_VALUE (1)),
5202 SR_CORE ("daifset", 0x1e, F_REG_MAX_VALUE (15)),
5203 SR_CORE ("daifclr", 0x1f, F_REG_MAX_VALUE (15)),
5204 SR_PAN ("pan", 0x04, F_REG_MAX_VALUE (1)),
5205 SR_V8_2 ("uao", 0x03, F_REG_MAX_VALUE (1)),
5206 SR_SSBS ("ssbs", 0x19, F_REG_MAX_VALUE (1)),
5207 SR_V8_4 ("dit", 0x1a, F_REG_MAX_VALUE (1)),
5208 SR_MEMTAG ("tco", 0x1c, F_REG_MAX_VALUE (1)),
5209 SR_SME ("svcrsm", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x2,0x1)
5210 | F_REG_MAX_VALUE (1)),
5211 SR_SME ("svcrza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x4,0x1)
5212 | F_REG_MAX_VALUE (1)),
5213 SR_SME ("svcrsmza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x6,0x1)
5214 | F_REG_MAX_VALUE (1)),
5215 SR_V8_8 ("allint", 0x08, F_REG_MAX_VALUE (1)),
5216 { 0, CPENC (0,0,0,0,0), 0, 0 },
5217 };
5218
5219 bool
5220 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
5221 const aarch64_sys_reg *reg)
5222 {
5223 if (!(reg->flags & F_ARCHEXT))
5224 return true;
5225
5226 return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
5227 }
5228
5229 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
5230 {
5231 { "ialluis", CPENS(0,C7,C1,0), 0 },
5232 { "iallu", CPENS(0,C7,C5,0), 0 },
5233 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
5234 { 0, CPENS(0,0,0,0), 0 }
5235 };
5236
5237 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
5238 {
5239 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
5240 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
5241 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
5242 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
5243 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
5244 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
5245 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
5246 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
5247 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
5248 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
5249 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
5250 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
5251 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
5252 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
5253 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
5254 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
5255 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
5256 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
5257 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
5258 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
5259 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
5260 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
5261 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
5262 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
5263 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
5264 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
5265 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
5266 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
5267 { "cipapa", CPENS (6, C7, C14, 1), F_HASXT },
5268 { "cigdpapa", CPENS (6, C7, C14, 5), F_HASXT },
5269 { 0, CPENS(0,0,0,0), 0 }
5270 };
5271
5272 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
5273 {
5274 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
5275 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
5276 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
5277 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
5278 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
5279 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
5280 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
5281 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
5282 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
5283 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
5284 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
5285 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
5286 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
5287 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
5288 { 0, CPENS(0,0,0,0), 0 }
5289 };
5290
5291 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
5292 {
5293 { "vmalle1", CPENS(0,C8,C7,0), 0 },
5294 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
5295 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
5296 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
5297 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
5298 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
5299 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
5300 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
5301 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
5302 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
5303 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
5304 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
5305 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
5306 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
5307 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
5308 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
5309 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
5310 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
5311 { "alle2", CPENS(4,C8,C7,0), 0 },
5312 { "alle2is", CPENS(4,C8,C3,0), 0 },
5313 { "alle1", CPENS(4,C8,C7,4), 0 },
5314 { "alle1is", CPENS(4,C8,C3,4), 0 },
5315 { "alle3", CPENS(6,C8,C7,0), 0 },
5316 { "alle3is", CPENS(6,C8,C3,0), 0 },
5317 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
5318 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
5319 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
5320 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
5321 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
5322 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
5323 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
5324 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
5325
5326 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
5327 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
5328 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
5329 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
5330 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
5331 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
5332 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
5333 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
5334 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
5335 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
5336 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
5337 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
5338 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
5339 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
5340 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
5341 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
5342
5343 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
5344 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
5345 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
5346 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
5347 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
5348 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
5349 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
5350 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
5351 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
5352 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
5353 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
5354 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
5355 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
5356 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
5357 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
5358 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
5359 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
5360 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
5361 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
5362 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
5363 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
5364 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
5365 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
5366 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
5367 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
5368 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
5369 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
5370 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
5371 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
5372 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
5373
5374 { "rpaos", CPENS (6, C8, C4, 3), F_HASXT },
5375 { "rpalos", CPENS (6, C8, C4, 7), F_HASXT },
5376 { "paallos", CPENS (6, C8, C1, 4), 0},
5377 { "paall", CPENS (6, C8, C7, 4), 0},
5378
5379 { 0, CPENS(0,0,0,0), 0 }
5380 };
5381
5382 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
5383 {
5384 /* RCTX is somewhat unique in a way that it has different values
5385 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
5386 Thus op2 is masked out and instead encoded directly in the
5387 aarch64_opcode_table entries for the respective instructions. */
5388 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
5389
5390 { 0, CPENS(0,0,0,0), 0 }
5391 };
5392
5393 bool
5394 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
5395 {
5396 return (sys_ins_reg->flags & F_HASXT) != 0;
5397 }
5398
5399 extern bool
5400 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
5401 const char *reg_name,
5402 aarch64_insn reg_value,
5403 uint32_t reg_flags,
5404 aarch64_feature_set reg_features)
5405 {
5406 /* Armv8-R has no EL3. */
5407 if (AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_R))
5408 {
5409 const char *suffix = strrchr (reg_name, '_');
5410 if (suffix && !strcmp (suffix, "_el3"))
5411 return false;
5412 }
5413
5414 if (!(reg_flags & F_ARCHEXT))
5415 return true;
5416
5417 if (reg_features
5418 && AARCH64_CPU_HAS_ALL_FEATURES (features, reg_features))
5419 return true;
5420
5421 /* ARMv8.4 TLB instructions. */
5422 if ((reg_value == CPENS (0, C8, C1, 0)
5423 || reg_value == CPENS (0, C8, C1, 1)
5424 || reg_value == CPENS (0, C8, C1, 2)
5425 || reg_value == CPENS (0, C8, C1, 3)
5426 || reg_value == CPENS (0, C8, C1, 5)
5427 || reg_value == CPENS (0, C8, C1, 7)
5428 || reg_value == CPENS (4, C8, C4, 0)
5429 || reg_value == CPENS (4, C8, C4, 4)
5430 || reg_value == CPENS (4, C8, C1, 1)
5431 || reg_value == CPENS (4, C8, C1, 5)
5432 || reg_value == CPENS (4, C8, C1, 6)
5433 || reg_value == CPENS (6, C8, C1, 1)
5434 || reg_value == CPENS (6, C8, C1, 5)
5435 || reg_value == CPENS (4, C8, C1, 0)
5436 || reg_value == CPENS (4, C8, C1, 4)
5437 || reg_value == CPENS (6, C8, C1, 0)
5438 || reg_value == CPENS (0, C8, C6, 1)
5439 || reg_value == CPENS (0, C8, C6, 3)
5440 || reg_value == CPENS (0, C8, C6, 5)
5441 || reg_value == CPENS (0, C8, C6, 7)
5442 || reg_value == CPENS (0, C8, C2, 1)
5443 || reg_value == CPENS (0, C8, C2, 3)
5444 || reg_value == CPENS (0, C8, C2, 5)
5445 || reg_value == CPENS (0, C8, C2, 7)
5446 || reg_value == CPENS (0, C8, C5, 1)
5447 || reg_value == CPENS (0, C8, C5, 3)
5448 || reg_value == CPENS (0, C8, C5, 5)
5449 || reg_value == CPENS (0, C8, C5, 7)
5450 || reg_value == CPENS (4, C8, C0, 2)
5451 || reg_value == CPENS (4, C8, C0, 6)
5452 || reg_value == CPENS (4, C8, C4, 2)
5453 || reg_value == CPENS (4, C8, C4, 6)
5454 || reg_value == CPENS (4, C8, C4, 3)
5455 || reg_value == CPENS (4, C8, C4, 7)
5456 || reg_value == CPENS (4, C8, C6, 1)
5457 || reg_value == CPENS (4, C8, C6, 5)
5458 || reg_value == CPENS (4, C8, C2, 1)
5459 || reg_value == CPENS (4, C8, C2, 5)
5460 || reg_value == CPENS (4, C8, C5, 1)
5461 || reg_value == CPENS (4, C8, C5, 5)
5462 || reg_value == CPENS (6, C8, C6, 1)
5463 || reg_value == CPENS (6, C8, C6, 5)
5464 || reg_value == CPENS (6, C8, C2, 1)
5465 || reg_value == CPENS (6, C8, C2, 5)
5466 || reg_value == CPENS (6, C8, C5, 1)
5467 || reg_value == CPENS (6, C8, C5, 5))
5468 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
5469 return true;
5470
5471 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
5472 if (reg_value == CPENS (3, C7, C12, 1)
5473 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5474 return true;
5475
5476 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
5477 if (reg_value == CPENS (3, C7, C13, 1)
5478 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
5479 return true;
5480
5481 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
5482 if ((reg_value == CPENS (0, C7, C6, 3)
5483 || reg_value == CPENS (0, C7, C6, 4)
5484 || reg_value == CPENS (0, C7, C10, 4)
5485 || reg_value == CPENS (0, C7, C14, 4)
5486 || reg_value == CPENS (3, C7, C10, 3)
5487 || reg_value == CPENS (3, C7, C12, 3)
5488 || reg_value == CPENS (3, C7, C13, 3)
5489 || reg_value == CPENS (3, C7, C14, 3)
5490 || reg_value == CPENS (3, C7, C4, 3)
5491 || reg_value == CPENS (0, C7, C6, 5)
5492 || reg_value == CPENS (0, C7, C6, 6)
5493 || reg_value == CPENS (0, C7, C10, 6)
5494 || reg_value == CPENS (0, C7, C14, 6)
5495 || reg_value == CPENS (3, C7, C10, 5)
5496 || reg_value == CPENS (3, C7, C12, 5)
5497 || reg_value == CPENS (3, C7, C13, 5)
5498 || reg_value == CPENS (3, C7, C14, 5)
5499 || reg_value == CPENS (3, C7, C4, 4))
5500 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
5501 return true;
5502
5503 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
5504 if ((reg_value == CPENS (0, C7, C9, 0)
5505 || reg_value == CPENS (0, C7, C9, 1))
5506 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5507 return true;
5508
5509 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
5510 if (reg_value == CPENS (3, C7, C3, 0)
5511 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
5512 return true;
5513
5514 return false;
5515 }
5516
5517 #undef C0
5518 #undef C1
5519 #undef C2
5520 #undef C3
5521 #undef C4
5522 #undef C5
5523 #undef C6
5524 #undef C7
5525 #undef C8
5526 #undef C9
5527 #undef C10
5528 #undef C11
5529 #undef C12
5530 #undef C13
5531 #undef C14
5532 #undef C15
5533
5534 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5535 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5536
5537 static enum err_type
5538 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
5539 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
5540 bool encoding ATTRIBUTE_UNUSED,
5541 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5542 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5543 {
5544 int t = BITS (insn, 4, 0);
5545 int n = BITS (insn, 9, 5);
5546 int t2 = BITS (insn, 14, 10);
5547
5548 if (BIT (insn, 23))
5549 {
5550 /* Write back enabled. */
5551 if ((t == n || t2 == n) && n != 31)
5552 return ERR_UND;
5553 }
5554
5555 if (BIT (insn, 22))
5556 {
5557 /* Load */
5558 if (t == t2)
5559 return ERR_UND;
5560 }
5561
5562 return ERR_OK;
5563 }
5564
5565 /* Verifier for vector by element 3 operands functions where the
5566 conditions `if sz:L == 11 then UNDEFINED` holds. */
5567
5568 static enum err_type
5569 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
5570 bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
5571 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5572 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5573 {
5574 const aarch64_insn undef_pattern = 0x3;
5575 aarch64_insn value;
5576
5577 assert (inst->opcode);
5578 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
5579 value = encoding ? inst->value : insn;
5580 assert (value);
5581
5582 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
5583 return ERR_UND;
5584
5585 return ERR_OK;
5586 }
5587
5588 /* Check an instruction that takes three register operands and that
5589 requires the register numbers to be distinct from one another. */
5590
5591 static enum err_type
5592 verify_three_different_regs (const struct aarch64_inst *inst,
5593 const aarch64_insn insn ATTRIBUTE_UNUSED,
5594 bfd_vma pc ATTRIBUTE_UNUSED,
5595 bool encoding ATTRIBUTE_UNUSED,
5596 aarch64_operand_error *mismatch_detail
5597 ATTRIBUTE_UNUSED,
5598 aarch64_instr_sequence *insn_sequence
5599 ATTRIBUTE_UNUSED)
5600 {
5601 int rd, rs, rn;
5602
5603 rd = inst->operands[0].reg.regno;
5604 rs = inst->operands[1].reg.regno;
5605 rn = inst->operands[2].reg.regno;
5606 if (rd == rs || rd == rn || rs == rn)
5607 {
5608 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5609 mismatch_detail->error
5610 = _("the three register operands must be distinct from one another");
5611 mismatch_detail->index = -1;
5612 return ERR_UND;
5613 }
5614
5615 return ERR_OK;
5616 }
5617
5618 /* Add INST to the end of INSN_SEQUENCE. */
5619
5620 static void
5621 add_insn_to_sequence (const struct aarch64_inst *inst,
5622 aarch64_instr_sequence *insn_sequence)
5623 {
5624 insn_sequence->instr[insn_sequence->num_added_insns++] = *inst;
5625 }
5626
5627 /* Initialize an instruction sequence insn_sequence with the instruction INST.
5628 If INST is NULL the given insn_sequence is cleared and the sequence is left
5629 uninitialized. */
5630
5631 void
5632 init_insn_sequence (const struct aarch64_inst *inst,
5633 aarch64_instr_sequence *insn_sequence)
5634 {
5635 int num_req_entries = 0;
5636
5637 if (insn_sequence->instr)
5638 {
5639 XDELETE (insn_sequence->instr);
5640 insn_sequence->instr = NULL;
5641 }
5642
5643 /* Handle all the cases here. May need to think of something smarter than
5644 a giant if/else chain if this grows. At that time, a lookup table may be
5645 best. */
5646 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
5647 num_req_entries = 1;
5648 if (inst && (inst->opcode->constraints & C_SCAN_MOPS_PME) == C_SCAN_MOPS_P)
5649 num_req_entries = 2;
5650
5651 insn_sequence->num_added_insns = 0;
5652 insn_sequence->num_allocated_insns = num_req_entries;
5653
5654 if (num_req_entries != 0)
5655 {
5656 insn_sequence->instr = XCNEWVEC (aarch64_inst, num_req_entries);
5657 add_insn_to_sequence (inst, insn_sequence);
5658 }
5659 }
5660
5661 /* Subroutine of verify_constraints. Check whether the instruction
5662 is part of a MOPS P/M/E sequence and, if so, whether sequencing
5663 expectations are met. Return true if the check passes, otherwise
5664 describe the problem in MISMATCH_DETAIL.
5665
5666 IS_NEW_SECTION is true if INST is assumed to start a new section.
5667 The other arguments are as for verify_constraints. */
5668
5669 static bool
5670 verify_mops_pme_sequence (const struct aarch64_inst *inst,
5671 bool is_new_section,
5672 aarch64_operand_error *mismatch_detail,
5673 aarch64_instr_sequence *insn_sequence)
5674 {
5675 const struct aarch64_opcode *opcode;
5676 const struct aarch64_inst *prev_insn;
5677 int i;
5678
5679 opcode = inst->opcode;
5680 if (insn_sequence->instr)
5681 prev_insn = insn_sequence->instr + (insn_sequence->num_added_insns - 1);
5682 else
5683 prev_insn = NULL;
5684
5685 if (prev_insn
5686 && (prev_insn->opcode->constraints & C_SCAN_MOPS_PME)
5687 && prev_insn->opcode != opcode - 1)
5688 {
5689 mismatch_detail->kind = AARCH64_OPDE_EXPECTED_A_AFTER_B;
5690 mismatch_detail->error = NULL;
5691 mismatch_detail->index = -1;
5692 mismatch_detail->data[0].s = prev_insn->opcode[1].name;
5693 mismatch_detail->data[1].s = prev_insn->opcode->name;
5694 mismatch_detail->non_fatal = true;
5695 return false;
5696 }
5697
5698 if (opcode->constraints & C_SCAN_MOPS_PME)
5699 {
5700 if (is_new_section || !prev_insn || prev_insn->opcode != opcode - 1)
5701 {
5702 mismatch_detail->kind = AARCH64_OPDE_A_SHOULD_FOLLOW_B;
5703 mismatch_detail->error = NULL;
5704 mismatch_detail->index = -1;
5705 mismatch_detail->data[0].s = opcode->name;
5706 mismatch_detail->data[1].s = opcode[-1].name;
5707 mismatch_detail->non_fatal = true;
5708 return false;
5709 }
5710
5711 for (i = 0; i < 3; ++i)
5712 /* There's no specific requirement for the data register to be
5713 the same between consecutive SET* instructions. */
5714 if ((opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd
5715 || opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs
5716 || opcode->operands[i] == AARCH64_OPND_MOPS_WB_Rn)
5717 && prev_insn->operands[i].reg.regno != inst->operands[i].reg.regno)
5718 {
5719 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5720 if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd)
5721 mismatch_detail->error = _("destination register differs from "
5722 "preceding instruction");
5723 else if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs)
5724 mismatch_detail->error = _("source register differs from "
5725 "preceding instruction");
5726 else
5727 mismatch_detail->error = _("size register differs from "
5728 "preceding instruction");
5729 mismatch_detail->index = i;
5730 mismatch_detail->non_fatal = true;
5731 return false;
5732 }
5733 }
5734
5735 return true;
5736 }
5737
5738 /* This function verifies that the instruction INST adheres to its specified
5739 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
5740 returned and MISMATCH_DETAIL contains the reason why verification failed.
5741
5742 The function is called both during assembly and disassembly. If assembling
5743 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
5744 and will contain the PC of the current instruction w.r.t to the section.
5745
5746 If ENCODING and PC=0 then you are at a start of a section. The constraints
5747 are verified against the given state insn_sequence which is updated as it
5748 transitions through the verification. */
5749
5750 enum err_type
5751 verify_constraints (const struct aarch64_inst *inst,
5752 const aarch64_insn insn ATTRIBUTE_UNUSED,
5753 bfd_vma pc,
5754 bool encoding,
5755 aarch64_operand_error *mismatch_detail,
5756 aarch64_instr_sequence *insn_sequence)
5757 {
5758 assert (inst);
5759 assert (inst->opcode);
5760
5761 const struct aarch64_opcode *opcode = inst->opcode;
5762 if (!opcode->constraints && !insn_sequence->instr)
5763 return ERR_OK;
5764
5765 assert (insn_sequence);
5766
5767 enum err_type res = ERR_OK;
5768
5769 /* This instruction puts a constraint on the insn_sequence. */
5770 if (opcode->flags & F_SCAN)
5771 {
5772 if (insn_sequence->instr)
5773 {
5774 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5775 mismatch_detail->error = _("instruction opens new dependency "
5776 "sequence without ending previous one");
5777 mismatch_detail->index = -1;
5778 mismatch_detail->non_fatal = true;
5779 res = ERR_VFI;
5780 }
5781
5782 init_insn_sequence (inst, insn_sequence);
5783 return res;
5784 }
5785
5786 bool is_new_section = (!encoding && pc == 0);
5787 if (!verify_mops_pme_sequence (inst, is_new_section, mismatch_detail,
5788 insn_sequence))
5789 {
5790 res = ERR_VFI;
5791 if ((opcode->constraints & C_SCAN_MOPS_PME) != C_SCAN_MOPS_M)
5792 init_insn_sequence (NULL, insn_sequence);
5793 }
5794
5795 /* Verify constraints on an existing sequence. */
5796 if (insn_sequence->instr)
5797 {
5798 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
5799 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5800 closed a previous one that we should have. */
5801 if (is_new_section && res == ERR_OK)
5802 {
5803 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5804 mismatch_detail->error = _("previous `movprfx' sequence not closed");
5805 mismatch_detail->index = -1;
5806 mismatch_detail->non_fatal = true;
5807 res = ERR_VFI;
5808 /* Reset the sequence. */
5809 init_insn_sequence (NULL, insn_sequence);
5810 return res;
5811 }
5812
5813 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
5814 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
5815 {
5816 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5817 instruction for better error messages. */
5818 if (!opcode->avariant
5819 || !(*opcode->avariant &
5820 (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
5821 {
5822 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5823 mismatch_detail->error = _("SVE instruction expected after "
5824 "`movprfx'");
5825 mismatch_detail->index = -1;
5826 mismatch_detail->non_fatal = true;
5827 res = ERR_VFI;
5828 goto done;
5829 }
5830
5831 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5832 instruction that is allowed to be used with a MOVPRFX. */
5833 if (!(opcode->constraints & C_SCAN_MOVPRFX))
5834 {
5835 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5836 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
5837 "expected");
5838 mismatch_detail->index = -1;
5839 mismatch_detail->non_fatal = true;
5840 res = ERR_VFI;
5841 goto done;
5842 }
5843
5844 /* Next check for usage of the predicate register. */
5845 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
5846 aarch64_opnd_info blk_pred, inst_pred;
5847 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
5848 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
5849 bool predicated = false;
5850 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
5851
5852 /* Determine if the movprfx instruction used is predicated or not. */
5853 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
5854 {
5855 predicated = true;
5856 blk_pred = insn_sequence->instr->operands[1];
5857 }
5858
5859 unsigned char max_elem_size = 0;
5860 unsigned char current_elem_size;
5861 int num_op_used = 0, last_op_usage = 0;
5862 int i, inst_pred_idx = -1;
5863 int num_ops = aarch64_num_of_operands (opcode);
5864 for (i = 0; i < num_ops; i++)
5865 {
5866 aarch64_opnd_info inst_op = inst->operands[i];
5867 switch (inst_op.type)
5868 {
5869 case AARCH64_OPND_SVE_Zd:
5870 case AARCH64_OPND_SVE_Zm_5:
5871 case AARCH64_OPND_SVE_Zm_16:
5872 case AARCH64_OPND_SVE_Zn:
5873 case AARCH64_OPND_SVE_Zt:
5874 case AARCH64_OPND_SVE_Vm:
5875 case AARCH64_OPND_SVE_Vn:
5876 case AARCH64_OPND_Va:
5877 case AARCH64_OPND_Vn:
5878 case AARCH64_OPND_Vm:
5879 case AARCH64_OPND_Sn:
5880 case AARCH64_OPND_Sm:
5881 if (inst_op.reg.regno == blk_dest.reg.regno)
5882 {
5883 num_op_used++;
5884 last_op_usage = i;
5885 }
5886 current_elem_size
5887 = aarch64_get_qualifier_esize (inst_op.qualifier);
5888 if (current_elem_size > max_elem_size)
5889 max_elem_size = current_elem_size;
5890 break;
5891 case AARCH64_OPND_SVE_Pd:
5892 case AARCH64_OPND_SVE_Pg3:
5893 case AARCH64_OPND_SVE_Pg4_5:
5894 case AARCH64_OPND_SVE_Pg4_10:
5895 case AARCH64_OPND_SVE_Pg4_16:
5896 case AARCH64_OPND_SVE_Pm:
5897 case AARCH64_OPND_SVE_Pn:
5898 case AARCH64_OPND_SVE_Pt:
5899 case AARCH64_OPND_SME_Pm:
5900 inst_pred = inst_op;
5901 inst_pred_idx = i;
5902 break;
5903 default:
5904 break;
5905 }
5906 }
5907
5908 assert (max_elem_size != 0);
5909 aarch64_opnd_info inst_dest = inst->operands[0];
5910 /* Determine the size that should be used to compare against the
5911 movprfx size. */
5912 current_elem_size
5913 = opcode->constraints & C_MAX_ELEM
5914 ? max_elem_size
5915 : aarch64_get_qualifier_esize (inst_dest.qualifier);
5916
5917 /* If movprfx is predicated do some extra checks. */
5918 if (predicated)
5919 {
5920 /* The instruction must be predicated. */
5921 if (inst_pred_idx < 0)
5922 {
5923 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5924 mismatch_detail->error = _("predicated instruction expected "
5925 "after `movprfx'");
5926 mismatch_detail->index = -1;
5927 mismatch_detail->non_fatal = true;
5928 res = ERR_VFI;
5929 goto done;
5930 }
5931
5932 /* The instruction must have a merging predicate. */
5933 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5934 {
5935 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5936 mismatch_detail->error = _("merging predicate expected due "
5937 "to preceding `movprfx'");
5938 mismatch_detail->index = inst_pred_idx;
5939 mismatch_detail->non_fatal = true;
5940 res = ERR_VFI;
5941 goto done;
5942 }
5943
5944 /* The same register must be used in instruction. */
5945 if (blk_pred.reg.regno != inst_pred.reg.regno)
5946 {
5947 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5948 mismatch_detail->error = _("predicate register differs "
5949 "from that in preceding "
5950 "`movprfx'");
5951 mismatch_detail->index = inst_pred_idx;
5952 mismatch_detail->non_fatal = true;
5953 res = ERR_VFI;
5954 goto done;
5955 }
5956 }
5957
5958 /* Destructive operations by definition must allow one usage of the
5959 same register. */
5960 int allowed_usage
5961 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5962
5963 /* Operand is not used at all. */
5964 if (num_op_used == 0)
5965 {
5966 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5967 mismatch_detail->error = _("output register of preceding "
5968 "`movprfx' not used in current "
5969 "instruction");
5970 mismatch_detail->index = 0;
5971 mismatch_detail->non_fatal = true;
5972 res = ERR_VFI;
5973 goto done;
5974 }
5975
5976 /* We now know it's used, now determine exactly where it's used. */
5977 if (blk_dest.reg.regno != inst_dest.reg.regno)
5978 {
5979 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5980 mismatch_detail->error = _("output register of preceding "
5981 "`movprfx' expected as output");
5982 mismatch_detail->index = 0;
5983 mismatch_detail->non_fatal = true;
5984 res = ERR_VFI;
5985 goto done;
5986 }
5987
5988 /* Operand used more than allowed for the specific opcode type. */
5989 if (num_op_used > allowed_usage)
5990 {
5991 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5992 mismatch_detail->error = _("output register of preceding "
5993 "`movprfx' used as input");
5994 mismatch_detail->index = last_op_usage;
5995 mismatch_detail->non_fatal = true;
5996 res = ERR_VFI;
5997 goto done;
5998 }
5999
6000 /* Now the only thing left is the qualifiers checks. The register
6001 must have the same maximum element size. */
6002 if (inst_dest.qualifier
6003 && blk_dest.qualifier
6004 && current_elem_size
6005 != aarch64_get_qualifier_esize (blk_dest.qualifier))
6006 {
6007 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6008 mismatch_detail->error = _("register size not compatible with "
6009 "previous `movprfx'");
6010 mismatch_detail->index = 0;
6011 mismatch_detail->non_fatal = true;
6012 res = ERR_VFI;
6013 goto done;
6014 }
6015 }
6016
6017 done:
6018 if (insn_sequence->num_added_insns == insn_sequence->num_allocated_insns)
6019 /* We've checked the last instruction in the sequence and so
6020 don't need the sequence any more. */
6021 init_insn_sequence (NULL, insn_sequence);
6022 else
6023 add_insn_to_sequence (inst, insn_sequence);
6024 }
6025
6026 return res;
6027 }
6028
6029
6030 /* Return true if VALUE cannot be moved into an SVE register using DUP
6031 (with any element size, not just ESIZE) and if using DUPM would
6032 therefore be OK. ESIZE is the number of bytes in the immediate. */
6033
6034 bool
6035 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
6036 {
6037 int64_t svalue = uvalue;
6038 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
6039
6040 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
6041 return false;
6042 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
6043 {
6044 svalue = (int32_t) uvalue;
6045 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
6046 {
6047 svalue = (int16_t) uvalue;
6048 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
6049 return false;
6050 }
6051 }
6052 if ((svalue & 0xff) == 0)
6053 svalue /= 256;
6054 return svalue < -128 || svalue >= 128;
6055 }
6056
6057 /* Include the opcode description table as well as the operand description
6058 table. */
6059 #define VERIFIER(x) verify_##x
6060 #include "aarch64-tbl.h"