aarch64: Add the SME2 ADD and SUB instructions
[binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2023 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = false;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Vector length multiples for a predicate-as-counter operand. Used in things
103 like AARCH64_OPND_SME_VLxN_10. */
104 const char *const aarch64_sme_vlxn_array[2] = {
105 "vlx2",
106 "vlx4"
107 };
108
109 /* Helper functions to determine which operand to be used to encode/decode
110 the size:Q fields for AdvSIMD instructions. */
111
112 static inline bool
113 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
114 {
115 return (qualifier >= AARCH64_OPND_QLF_V_8B
116 && qualifier <= AARCH64_OPND_QLF_V_1Q);
117 }
118
119 static inline bool
120 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
121 {
122 return (qualifier >= AARCH64_OPND_QLF_S_B
123 && qualifier <= AARCH64_OPND_QLF_S_Q);
124 }
125
126 enum data_pattern
127 {
128 DP_UNKNOWN,
129 DP_VECTOR_3SAME,
130 DP_VECTOR_LONG,
131 DP_VECTOR_WIDE,
132 DP_VECTOR_ACROSS_LANES,
133 };
134
135 static const char significant_operand_index [] =
136 {
137 0, /* DP_UNKNOWN, by default using operand 0. */
138 0, /* DP_VECTOR_3SAME */
139 1, /* DP_VECTOR_LONG */
140 2, /* DP_VECTOR_WIDE */
141 1, /* DP_VECTOR_ACROSS_LANES */
142 };
143
144 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
145 the data pattern.
146 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
147 corresponds to one of a sequence of operands. */
148
149 static enum data_pattern
150 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
151 {
152 if (vector_qualifier_p (qualifiers[0]))
153 {
154 /* e.g. v.4s, v.4s, v.4s
155 or v.4h, v.4h, v.h[3]. */
156 if (qualifiers[0] == qualifiers[1]
157 && vector_qualifier_p (qualifiers[2])
158 && (aarch64_get_qualifier_esize (qualifiers[0])
159 == aarch64_get_qualifier_esize (qualifiers[1]))
160 && (aarch64_get_qualifier_esize (qualifiers[0])
161 == aarch64_get_qualifier_esize (qualifiers[2])))
162 return DP_VECTOR_3SAME;
163 /* e.g. v.8h, v.8b, v.8b.
164 or v.4s, v.4h, v.h[2].
165 or v.8h, v.16b. */
166 if (vector_qualifier_p (qualifiers[1])
167 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
168 && (aarch64_get_qualifier_esize (qualifiers[0])
169 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
170 return DP_VECTOR_LONG;
171 /* e.g. v.8h, v.8h, v.8b. */
172 if (qualifiers[0] == qualifiers[1]
173 && vector_qualifier_p (qualifiers[2])
174 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
175 && (aarch64_get_qualifier_esize (qualifiers[0])
176 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
177 && (aarch64_get_qualifier_esize (qualifiers[0])
178 == aarch64_get_qualifier_esize (qualifiers[1])))
179 return DP_VECTOR_WIDE;
180 }
181 else if (fp_qualifier_p (qualifiers[0]))
182 {
183 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
184 if (vector_qualifier_p (qualifiers[1])
185 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
186 return DP_VECTOR_ACROSS_LANES;
187 }
188
189 return DP_UNKNOWN;
190 }
191
192 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
193 the AdvSIMD instructions. */
194 /* N.B. it is possible to do some optimization that doesn't call
195 get_data_pattern each time when we need to select an operand. We can
196 either buffer the caculated the result or statically generate the data,
197 however, it is not obvious that the optimization will bring significant
198 benefit. */
199
200 int
201 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
202 {
203 return
204 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
205 }
206 \f
207 /* Instruction bit-fields.
208 + Keep synced with 'enum aarch64_field_kind'. */
209 const aarch64_field fields[] =
210 {
211 { 0, 0 }, /* NIL. */
212 { 8, 4 }, /* CRm: in the system instructions. */
213 { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
214 { 12, 4 }, /* CRn: in the system instructions. */
215 { 10, 8 }, /* CSSC_imm8. */
216 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
217 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
218 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
219 { 22, 1 }, /* N: in logical (immediate) instructions. */
220 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
221 { 10, 5 }, /* Ra: in fp instructions. */
222 { 0, 5 }, /* Rd: in many integer instructions. */
223 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
224 { 5, 5 }, /* Rn: in many integer instructions. */
225 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
226 { 0, 5 }, /* Rt: in load/store instructions. */
227 { 10, 5 }, /* Rt2: in load/store pair instructions. */
228 { 12, 1 }, /* S: in load/store reg offset instructions. */
229 { 12, 2 }, /* SM3_imm2: Indexed element SM3 2 bits index immediate. */
230 { 1, 3 }, /* SME_Pdx2: predicate register, multiple of 2, [3:1]. */
231 { 13, 3 }, /* SME_Pm: second source scalable predicate register P0-P7. */
232 { 0, 3 }, /* SME_PNd3: PN0-PN7, bits [2:0]. */
233 { 5, 3 }, /* SME_PNn3: PN0-PN7, bits [7:5]. */
234 { 16, 1 }, /* SME_Q: Q class bit, bit 16. */
235 { 16, 2 }, /* SME_Rm: index base register W12-W15 [17:16]. */
236 { 13, 2 }, /* SME_Rv: vector select register W12-W15, bits [14:13]. */
237 { 15, 1 }, /* SME_V: (horizontal / vertical tiles), bit 15. */
238 { 10, 1 }, /* SME_VL_10: VLx2 or VLx4, bit [10]. */
239 { 13, 1 }, /* SME_VL_13: VLx2 or VLx4, bit [13]. */
240 { 0, 2 }, /* SME_ZAda_2b: tile ZA0-ZA3. */
241 { 0, 3 }, /* SME_ZAda_3b: tile ZA0-ZA7. */
242 { 1, 4 }, /* SME_Zdn2: Z0-Z31, multiple of 2, bits [4:1]. */
243 { 2, 3 }, /* SME_Zdn4: Z0-Z31, multiple of 4, bits [4:2]. */
244 { 16, 4 }, /* SME_Zm: Z0-Z15, bits [19:16]. */
245 { 17, 4 }, /* SME_Zm2: Z0-Z31, multiple of 2, bits [20:17]. */
246 { 18, 3 }, /* SME_Zm4: Z0-Z31, multiple of 4, bits [20:18]. */
247 { 6, 4 }, /* SME_Zn2: Z0-Z31, multiple of 2, bits [9:6]. */
248 { 7, 3 }, /* SME_Zn4: Z0-Z31, multiple of 4, bits [9:7]. */
249 { 4, 1 }, /* SME_ZtT: upper bit of Zt, bit [4]. */
250 { 0, 3 }, /* SME_Zt3: lower 3 bits of Zt, bits [2:0]. */
251 { 0, 2 }, /* SME_Zt2: lower 2 bits of Zt, bits [1:0]. */
252 { 23, 1 }, /* SME_i1: immediate field, bit 23. */
253 { 12, 2 }, /* SME_size_12: bits [13:12]. */
254 { 22, 2 }, /* SME_size_22: size<1>, size<0> class field, [23:22]. */
255 { 22, 1 }, /* SME_tszh: immediate and qualifier field, bit 22. */
256 { 18, 3 }, /* SME_tszl: immediate and qualifier field, bits [20:18]. */
257 { 0, 8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0]. */
258 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
259 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
260 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
261 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
262 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
263 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
264 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
265 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
266 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
267 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
268 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
269 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
270 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
271 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
272 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
273 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
274 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
275 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
276 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
277 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
278 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
279 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
280 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
281 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
282 { 5, 1 }, /* SVE_i1: single-bit immediate. */
283 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
284 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
285 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
286 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
287 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
288 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
289 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
290 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
291 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
292 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
293 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
294 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
295 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
296 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
297 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
298 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
299 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
300 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
301 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
302 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
303 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
304 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
305 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
306 { 16, 4 }, /* SVE_tsz: triangular size select. */
307 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
308 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
309 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
310 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
311 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
312 { 22, 1 }, /* S_imm10: in LDRAA and LDRAB instructions. */
313 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
314 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
315 { 19, 5 }, /* b40: in the test bit and branch instructions. */
316 { 31, 1 }, /* b5: in the test bit and branch instructions. */
317 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
318 { 12, 4 }, /* cond: condition flags as a source operand. */
319 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
320 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
321 { 21, 2 }, /* hw: in move wide constant instructions. */
322 { 8, 1 }, /* imm1_8: general immediate in bits [8]. */
323 { 16, 1 }, /* imm1_16: general immediate in bits [16]. */
324 { 8, 2 }, /* imm2_8: general immediate in bits [9:8]. */
325 { 15, 2 }, /* imm2_15: 2-bit immediate, bits [16:15] */
326 { 16, 2 }, /* imm2_16: 2-bit immediate, bits [17:16] */
327 { 0, 3 }, /* imm3_0: general immediate in bits [2:0]. */
328 { 5, 3 }, /* imm3_5: general immediate in bits [7:5]. */
329 { 10, 3 }, /* imm3_10: in add/sub extended reg instructions. */
330 { 12, 3 }, /* imm3_12: general immediate in bits [14:12]. */
331 { 14, 3 }, /* imm3_14: general immediate in bits [16:14]. */
332 { 15, 3 }, /* imm3_15: general immediate in bits [17:15]. */
333 { 0, 4 }, /* imm4_0: in rmif instructions. */
334 { 5, 4 }, /* imm4_5: in SME instructions. */
335 { 10, 4 }, /* imm4_10: in adddg/subg instructions. */
336 { 11, 4 }, /* imm4_11: in advsimd ext and advsimd ins instructions. */
337 { 14, 4 }, /* imm4_14: general immediate in bits [17:14]. */
338 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
339 { 10, 6 }, /* imm6_10: in add/sub reg shifted instructions. */
340 { 15, 6 }, /* imm6_15: in rmif instructions. */
341 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
342 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
343 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
344 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
345 { 5, 14 }, /* imm14: in test bit and branch instructions. */
346 { 0, 16 }, /* imm16_0: in udf instruction. */
347 { 5, 16 }, /* imm16_5: in exception instructions. */
348 { 5, 19 }, /* imm19: e.g. in CBZ. */
349 { 0, 26 }, /* imm26: in unconditional branch instructions. */
350 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
351 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
352 { 5, 19 }, /* immhi: e.g. in ADRP. */
353 { 29, 2 }, /* immlo: e.g. in ADRP. */
354 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
355 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
356 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
357 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
358 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
359 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
360 { 30, 1 }, /* lse_sz: in LSE extension atomic instructions. */
361 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
362 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
363 { 19, 2 }, /* op0: in the system instructions. */
364 { 16, 3 }, /* op1: in the system instructions. */
365 { 5, 3 }, /* op2: in the system instructions. */
366 { 22, 2 }, /* opc: in load/store reg offset instructions. */
367 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
368 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
369 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
370 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
371 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
372 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
373 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
374 { 31, 1 }, /* sf: in integer data processing instructions. */
375 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
376 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
377 { 22, 1 }, /* sz: 1-bit element size select. */
378 { 22, 2 }, /* type: floating point type field in fp data inst. */
379 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
380 };
381
382 enum aarch64_operand_class
383 aarch64_get_operand_class (enum aarch64_opnd type)
384 {
385 return aarch64_operands[type].op_class;
386 }
387
388 const char *
389 aarch64_get_operand_name (enum aarch64_opnd type)
390 {
391 return aarch64_operands[type].name;
392 }
393
394 /* Get operand description string.
395 This is usually for the diagnosis purpose. */
396 const char *
397 aarch64_get_operand_desc (enum aarch64_opnd type)
398 {
399 return aarch64_operands[type].desc;
400 }
401
402 /* Table of all conditional affixes. */
403 const aarch64_cond aarch64_conds[16] =
404 {
405 {{"eq", "none"}, 0x0},
406 {{"ne", "any"}, 0x1},
407 {{"cs", "hs", "nlast"}, 0x2},
408 {{"cc", "lo", "ul", "last"}, 0x3},
409 {{"mi", "first"}, 0x4},
410 {{"pl", "nfrst"}, 0x5},
411 {{"vs"}, 0x6},
412 {{"vc"}, 0x7},
413 {{"hi", "pmore"}, 0x8},
414 {{"ls", "plast"}, 0x9},
415 {{"ge", "tcont"}, 0xa},
416 {{"lt", "tstop"}, 0xb},
417 {{"gt"}, 0xc},
418 {{"le"}, 0xd},
419 {{"al"}, 0xe},
420 {{"nv"}, 0xf},
421 };
422
423 const aarch64_cond *
424 get_cond_from_value (aarch64_insn value)
425 {
426 assert (value < 16);
427 return &aarch64_conds[(unsigned int) value];
428 }
429
430 const aarch64_cond *
431 get_inverted_cond (const aarch64_cond *cond)
432 {
433 return &aarch64_conds[cond->value ^ 0x1];
434 }
435
436 /* Table describing the operand extension/shifting operators; indexed by
437 enum aarch64_modifier_kind.
438
439 The value column provides the most common values for encoding modifiers,
440 which enables table-driven encoding/decoding for the modifiers. */
441 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
442 {
443 {"none", 0x0},
444 {"msl", 0x0},
445 {"ror", 0x3},
446 {"asr", 0x2},
447 {"lsr", 0x1},
448 {"lsl", 0x0},
449 {"uxtb", 0x0},
450 {"uxth", 0x1},
451 {"uxtw", 0x2},
452 {"uxtx", 0x3},
453 {"sxtb", 0x4},
454 {"sxth", 0x5},
455 {"sxtw", 0x6},
456 {"sxtx", 0x7},
457 {"mul", 0x0},
458 {"mul vl", 0x0},
459 {NULL, 0},
460 };
461
462 enum aarch64_modifier_kind
463 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
464 {
465 return desc - aarch64_operand_modifiers;
466 }
467
468 aarch64_insn
469 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
470 {
471 return aarch64_operand_modifiers[kind].value;
472 }
473
474 enum aarch64_modifier_kind
475 aarch64_get_operand_modifier_from_value (aarch64_insn value,
476 bool extend_p)
477 {
478 if (extend_p)
479 return AARCH64_MOD_UXTB + value;
480 else
481 return AARCH64_MOD_LSL - value;
482 }
483
484 bool
485 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
486 {
487 return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
488 }
489
490 static inline bool
491 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
492 {
493 return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
494 }
495
496 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
497 {
498 { "#0x00", 0x0 },
499 { "oshld", 0x1 },
500 { "oshst", 0x2 },
501 { "osh", 0x3 },
502 { "#0x04", 0x4 },
503 { "nshld", 0x5 },
504 { "nshst", 0x6 },
505 { "nsh", 0x7 },
506 { "#0x08", 0x8 },
507 { "ishld", 0x9 },
508 { "ishst", 0xa },
509 { "ish", 0xb },
510 { "#0x0c", 0xc },
511 { "ld", 0xd },
512 { "st", 0xe },
513 { "sy", 0xf },
514 };
515
516 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] =
517 { /* CRm<3:2> #imm */
518 { "oshnxs", 16 }, /* 00 16 */
519 { "nshnxs", 20 }, /* 01 20 */
520 { "ishnxs", 24 }, /* 10 24 */
521 { "synxs", 28 }, /* 11 28 */
522 };
523
524 /* Table describing the operands supported by the aliases of the HINT
525 instruction.
526
527 The name column is the operand that is accepted for the alias. The value
528 column is the hint number of the alias. The list of operands is terminated
529 by NULL in the name column. */
530
531 const struct aarch64_name_value_pair aarch64_hint_options[] =
532 {
533 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
534 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
535 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
536 { "c", HINT_OPD_C }, /* BTI C. */
537 { "j", HINT_OPD_J }, /* BTI J. */
538 { "jc", HINT_OPD_JC }, /* BTI JC. */
539 { NULL, HINT_OPD_NULL },
540 };
541
542 /* op -> op: load = 0 instruction = 1 store = 2
543 l -> level: 1-3
544 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
545 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
546 const struct aarch64_name_value_pair aarch64_prfops[32] =
547 {
548 { "pldl1keep", B(0, 1, 0) },
549 { "pldl1strm", B(0, 1, 1) },
550 { "pldl2keep", B(0, 2, 0) },
551 { "pldl2strm", B(0, 2, 1) },
552 { "pldl3keep", B(0, 3, 0) },
553 { "pldl3strm", B(0, 3, 1) },
554 { NULL, 0x06 },
555 { NULL, 0x07 },
556 { "plil1keep", B(1, 1, 0) },
557 { "plil1strm", B(1, 1, 1) },
558 { "plil2keep", B(1, 2, 0) },
559 { "plil2strm", B(1, 2, 1) },
560 { "plil3keep", B(1, 3, 0) },
561 { "plil3strm", B(1, 3, 1) },
562 { NULL, 0x0e },
563 { NULL, 0x0f },
564 { "pstl1keep", B(2, 1, 0) },
565 { "pstl1strm", B(2, 1, 1) },
566 { "pstl2keep", B(2, 2, 0) },
567 { "pstl2strm", B(2, 2, 1) },
568 { "pstl3keep", B(2, 3, 0) },
569 { "pstl3strm", B(2, 3, 1) },
570 { NULL, 0x16 },
571 { NULL, 0x17 },
572 { NULL, 0x18 },
573 { NULL, 0x19 },
574 { NULL, 0x1a },
575 { NULL, 0x1b },
576 { NULL, 0x1c },
577 { NULL, 0x1d },
578 { NULL, 0x1e },
579 { NULL, 0x1f },
580 };
581 #undef B
582 \f
583 /* Utilities on value constraint. */
584
585 static inline int
586 value_in_range_p (int64_t value, int low, int high)
587 {
588 return (value >= low && value <= high) ? 1 : 0;
589 }
590
591 /* Return true if VALUE is a multiple of ALIGN. */
592 static inline int
593 value_aligned_p (int64_t value, int align)
594 {
595 return (value % align) == 0;
596 }
597
598 /* A signed value fits in a field. */
599 static inline int
600 value_fit_signed_field_p (int64_t value, unsigned width)
601 {
602 assert (width < 32);
603 if (width < sizeof (value) * 8)
604 {
605 int64_t lim = (uint64_t) 1 << (width - 1);
606 if (value >= -lim && value < lim)
607 return 1;
608 }
609 return 0;
610 }
611
612 /* An unsigned value fits in a field. */
613 static inline int
614 value_fit_unsigned_field_p (int64_t value, unsigned width)
615 {
616 assert (width < 32);
617 if (width < sizeof (value) * 8)
618 {
619 int64_t lim = (uint64_t) 1 << width;
620 if (value >= 0 && value < lim)
621 return 1;
622 }
623 return 0;
624 }
625
626 /* Return 1 if OPERAND is SP or WSP. */
627 int
628 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
629 {
630 return ((aarch64_get_operand_class (operand->type)
631 == AARCH64_OPND_CLASS_INT_REG)
632 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
633 && operand->reg.regno == 31);
634 }
635
636 /* Return 1 if OPERAND is XZR or WZP. */
637 int
638 aarch64_zero_register_p (const aarch64_opnd_info *operand)
639 {
640 return ((aarch64_get_operand_class (operand->type)
641 == AARCH64_OPND_CLASS_INT_REG)
642 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
643 && operand->reg.regno == 31);
644 }
645
646 /* Return true if the operand *OPERAND that has the operand code
647 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
648 qualified by the qualifier TARGET. */
649
650 static inline int
651 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
652 aarch64_opnd_qualifier_t target)
653 {
654 switch (operand->qualifier)
655 {
656 case AARCH64_OPND_QLF_W:
657 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
658 return 1;
659 break;
660 case AARCH64_OPND_QLF_X:
661 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
662 return 1;
663 break;
664 case AARCH64_OPND_QLF_WSP:
665 if (target == AARCH64_OPND_QLF_W
666 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
667 return 1;
668 break;
669 case AARCH64_OPND_QLF_SP:
670 if (target == AARCH64_OPND_QLF_X
671 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
672 return 1;
673 break;
674 default:
675 break;
676 }
677
678 return 0;
679 }
680
681 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
682 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
683
684 Return NIL if more than one expected qualifiers are found. */
685
686 aarch64_opnd_qualifier_t
687 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
688 int idx,
689 const aarch64_opnd_qualifier_t known_qlf,
690 int known_idx)
691 {
692 int i, saved_i;
693
694 /* Special case.
695
696 When the known qualifier is NIL, we have to assume that there is only
697 one qualifier sequence in the *QSEQ_LIST and return the corresponding
698 qualifier directly. One scenario is that for instruction
699 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
700 which has only one possible valid qualifier sequence
701 NIL, S_D
702 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
703 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
704
705 Because the qualifier NIL has dual roles in the qualifier sequence:
706 it can mean no qualifier for the operand, or the qualifer sequence is
707 not in use (when all qualifiers in the sequence are NILs), we have to
708 handle this special case here. */
709 if (known_qlf == AARCH64_OPND_NIL)
710 {
711 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
712 return qseq_list[0][idx];
713 }
714
715 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
716 {
717 if (qseq_list[i][known_idx] == known_qlf)
718 {
719 if (saved_i != -1)
720 /* More than one sequences are found to have KNOWN_QLF at
721 KNOWN_IDX. */
722 return AARCH64_OPND_NIL;
723 saved_i = i;
724 }
725 }
726
727 return qseq_list[saved_i][idx];
728 }
729
730 enum operand_qualifier_kind
731 {
732 OQK_NIL,
733 OQK_OPD_VARIANT,
734 OQK_VALUE_IN_RANGE,
735 OQK_MISC,
736 };
737
738 /* Operand qualifier description. */
739 struct operand_qualifier_data
740 {
741 /* The usage of the three data fields depends on the qualifier kind. */
742 int data0;
743 int data1;
744 int data2;
745 /* Description. */
746 const char *desc;
747 /* Kind. */
748 enum operand_qualifier_kind kind;
749 };
750
751 /* Indexed by the operand qualifier enumerators. */
752 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
753 {
754 {0, 0, 0, "NIL", OQK_NIL},
755
756 /* Operand variant qualifiers.
757 First 3 fields:
758 element size, number of elements and common value for encoding. */
759
760 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
761 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
762 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
763 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
764
765 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
766 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
767 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
768 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
769 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
770 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
771 {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
772
773 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
774 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
775 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
776 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
777 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
778 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
779 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
780 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
781 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
782 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
783 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
784
785 {0, 0, 0, "z", OQK_OPD_VARIANT},
786 {0, 0, 0, "m", OQK_OPD_VARIANT},
787
788 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
789 {16, 0, 0, "tag", OQK_OPD_VARIANT},
790
791 /* Qualifiers constraining the value range.
792 First 3 fields:
793 Lower bound, higher bound, unused. */
794
795 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
796 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
797 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
798 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
799 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
800 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
801 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
802
803 /* Qualifiers for miscellaneous purpose.
804 First 3 fields:
805 unused, unused and unused. */
806
807 {0, 0, 0, "lsl", 0},
808 {0, 0, 0, "msl", 0},
809
810 {0, 0, 0, "retrieving", 0},
811 };
812
813 static inline bool
814 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
815 {
816 return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
817 }
818
819 static inline bool
820 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
821 {
822 return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
823 }
824
825 const char*
826 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
827 {
828 return aarch64_opnd_qualifiers[qualifier].desc;
829 }
830
831 /* Given an operand qualifier, return the expected data element size
832 of a qualified operand. */
833 unsigned char
834 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
835 {
836 assert (operand_variant_qualifier_p (qualifier));
837 return aarch64_opnd_qualifiers[qualifier].data0;
838 }
839
840 unsigned char
841 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
842 {
843 assert (operand_variant_qualifier_p (qualifier));
844 return aarch64_opnd_qualifiers[qualifier].data1;
845 }
846
847 aarch64_insn
848 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
849 {
850 assert (operand_variant_qualifier_p (qualifier));
851 return aarch64_opnd_qualifiers[qualifier].data2;
852 }
853
854 static int
855 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
856 {
857 assert (qualifier_value_in_range_constraint_p (qualifier));
858 return aarch64_opnd_qualifiers[qualifier].data0;
859 }
860
861 static int
862 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
863 {
864 assert (qualifier_value_in_range_constraint_p (qualifier));
865 return aarch64_opnd_qualifiers[qualifier].data1;
866 }
867
868 #ifdef DEBUG_AARCH64
869 void
870 aarch64_verbose (const char *str, ...)
871 {
872 va_list ap;
873 va_start (ap, str);
874 printf ("#### ");
875 vprintf (str, ap);
876 printf ("\n");
877 va_end (ap);
878 }
879
880 static inline void
881 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
882 {
883 int i;
884 printf ("#### \t");
885 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
886 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
887 printf ("\n");
888 }
889
890 static void
891 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
892 const aarch64_opnd_qualifier_t *qualifier)
893 {
894 int i;
895 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
896
897 aarch64_verbose ("dump_match_qualifiers:");
898 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
899 curr[i] = opnd[i].qualifier;
900 dump_qualifier_sequence (curr);
901 aarch64_verbose ("against");
902 dump_qualifier_sequence (qualifier);
903 }
904 #endif /* DEBUG_AARCH64 */
905
906 /* This function checks if the given instruction INSN is a destructive
907 instruction based on the usage of the registers. It does not recognize
908 unary destructive instructions. */
909 bool
910 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
911 {
912 int i = 0;
913 const enum aarch64_opnd *opnds = opcode->operands;
914
915 if (opnds[0] == AARCH64_OPND_NIL)
916 return false;
917
918 while (opnds[++i] != AARCH64_OPND_NIL)
919 if (opnds[i] == opnds[0])
920 return true;
921
922 return false;
923 }
924
925 /* TODO improve this, we can have an extra field at the runtime to
926 store the number of operands rather than calculating it every time. */
927
928 int
929 aarch64_num_of_operands (const aarch64_opcode *opcode)
930 {
931 int i = 0;
932 const enum aarch64_opnd *opnds = opcode->operands;
933 while (opnds[i++] != AARCH64_OPND_NIL)
934 ;
935 --i;
936 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
937 return i;
938 }
939
940 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
941 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
942
943 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
944 This is always 0 if the function succeeds.
945
946 N.B. on the entry, it is very likely that only some operands in *INST
947 have had their qualifiers been established.
948
949 If STOP_AT is not -1, the function will only try to match
950 the qualifier sequence for operands before and including the operand
951 of index STOP_AT; and on success *RET will only be filled with the first
952 (STOP_AT+1) qualifiers.
953
954 A couple examples of the matching algorithm:
955
956 X,W,NIL should match
957 X,W,NIL
958
959 NIL,NIL should match
960 X ,NIL
961
962 Apart from serving the main encoding routine, this can also be called
963 during or after the operand decoding. */
964
965 int
966 aarch64_find_best_match (const aarch64_inst *inst,
967 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
968 int stop_at, aarch64_opnd_qualifier_t *ret,
969 int *invalid_count)
970 {
971 int i, num_opnds, invalid, min_invalid;
972 const aarch64_opnd_qualifier_t *qualifiers;
973
974 num_opnds = aarch64_num_of_operands (inst->opcode);
975 if (num_opnds == 0)
976 {
977 DEBUG_TRACE ("SUCCEED: no operand");
978 *invalid_count = 0;
979 return 1;
980 }
981
982 if (stop_at < 0 || stop_at >= num_opnds)
983 stop_at = num_opnds - 1;
984
985 /* For each pattern. */
986 min_invalid = num_opnds;
987 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
988 {
989 int j;
990 qualifiers = *qualifiers_list;
991
992 /* Start as positive. */
993 invalid = 0;
994
995 DEBUG_TRACE ("%d", i);
996 #ifdef DEBUG_AARCH64
997 if (debug_dump)
998 dump_match_qualifiers (inst->operands, qualifiers);
999 #endif
1000
1001 /* The first entry should be taken literally, even if it's an empty
1002 qualifier sequence. (This matters for strict testing.) In other
1003 positions an empty sequence acts as a terminator. */
1004 if (i > 0 && empty_qualifier_sequence_p (qualifiers))
1005 break;
1006
1007 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
1008 {
1009 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL
1010 && !(inst->opcode->flags & F_STRICT))
1011 {
1012 /* Either the operand does not have qualifier, or the qualifier
1013 for the operand needs to be deduced from the qualifier
1014 sequence.
1015 In the latter case, any constraint checking related with
1016 the obtained qualifier should be done later in
1017 operand_general_constraint_met_p. */
1018 continue;
1019 }
1020 else if (*qualifiers != inst->operands[j].qualifier)
1021 {
1022 /* Unless the target qualifier can also qualify the operand
1023 (which has already had a non-nil qualifier), non-equal
1024 qualifiers are generally un-matched. */
1025 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
1026 continue;
1027 else
1028 invalid += 1;
1029 }
1030 else
1031 continue; /* Equal qualifiers are certainly matched. */
1032 }
1033
1034 if (min_invalid > invalid)
1035 min_invalid = invalid;
1036
1037 /* Qualifiers established. */
1038 if (min_invalid == 0)
1039 break;
1040 }
1041
1042 *invalid_count = min_invalid;
1043 if (min_invalid == 0)
1044 {
1045 /* Fill the result in *RET. */
1046 int j;
1047 qualifiers = *qualifiers_list;
1048
1049 DEBUG_TRACE ("complete qualifiers using list %d", i);
1050 #ifdef DEBUG_AARCH64
1051 if (debug_dump)
1052 dump_qualifier_sequence (qualifiers);
1053 #endif
1054
1055 for (j = 0; j <= stop_at; ++j, ++qualifiers)
1056 ret[j] = *qualifiers;
1057 for (; j < AARCH64_MAX_OPND_NUM; ++j)
1058 ret[j] = AARCH64_OPND_QLF_NIL;
1059
1060 DEBUG_TRACE ("SUCCESS");
1061 return 1;
1062 }
1063
1064 DEBUG_TRACE ("FAIL");
1065 return 0;
1066 }
1067
1068 /* Operand qualifier matching and resolving.
1069
1070 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1071 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1072
1073 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
1074 This is always 0 if the function succeeds.
1075
1076 if UPDATE_P, update the qualifier(s) in *INST after the matching
1077 succeeds. */
1078
1079 static int
1080 match_operands_qualifier (aarch64_inst *inst, bool update_p,
1081 int *invalid_count)
1082 {
1083 int i;
1084 aarch64_opnd_qualifier_seq_t qualifiers;
1085
1086 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1087 qualifiers, invalid_count))
1088 {
1089 DEBUG_TRACE ("matching FAIL");
1090 return 0;
1091 }
1092
1093 /* Update the qualifiers. */
1094 if (update_p)
1095 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1096 {
1097 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1098 break;
1099 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1100 "update %s with %s for operand %d",
1101 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1102 aarch64_get_qualifier_name (qualifiers[i]), i);
1103 inst->operands[i].qualifier = qualifiers[i];
1104 }
1105
1106 DEBUG_TRACE ("matching SUCCESS");
1107 return 1;
1108 }
1109
1110 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1111 register by MOVZ.
1112
1113 IS32 indicates whether value is a 32-bit immediate or not.
1114 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1115 amount will be returned in *SHIFT_AMOUNT. */
1116
1117 bool
1118 aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1119 {
1120 int amount;
1121
1122 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1123
1124 if (is32)
1125 {
1126 /* Allow all zeros or all ones in top 32-bits, so that
1127 32-bit constant expressions like ~0x80000000 are
1128 permitted. */
1129 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1130 /* Immediate out of range. */
1131 return false;
1132 value &= 0xffffffff;
1133 }
1134
1135 /* first, try movz then movn */
1136 amount = -1;
1137 if ((value & ((uint64_t) 0xffff << 0)) == value)
1138 amount = 0;
1139 else if ((value & ((uint64_t) 0xffff << 16)) == value)
1140 amount = 16;
1141 else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1142 amount = 32;
1143 else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1144 amount = 48;
1145
1146 if (amount == -1)
1147 {
1148 DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1149 return false;
1150 }
1151
1152 if (shift_amount != NULL)
1153 *shift_amount = amount;
1154
1155 DEBUG_TRACE ("exit true with amount %d", amount);
1156
1157 return true;
1158 }
1159
1160 /* Build the accepted values for immediate logical SIMD instructions.
1161
1162 The standard encodings of the immediate value are:
1163 N imms immr SIMD size R S
1164 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1165 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1166 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1167 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1168 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1169 0 11110s 00000r 2 UInt(r) UInt(s)
1170 where all-ones value of S is reserved.
1171
1172 Let's call E the SIMD size.
1173
1174 The immediate value is: S+1 bits '1' rotated to the right by R.
1175
1176 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1177 (remember S != E - 1). */
1178
1179 #define TOTAL_IMM_NB 5334
1180
1181 typedef struct
1182 {
1183 uint64_t imm;
1184 aarch64_insn encoding;
1185 } simd_imm_encoding;
1186
1187 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1188
1189 static int
1190 simd_imm_encoding_cmp(const void *i1, const void *i2)
1191 {
1192 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1193 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1194
1195 if (imm1->imm < imm2->imm)
1196 return -1;
1197 if (imm1->imm > imm2->imm)
1198 return +1;
1199 return 0;
1200 }
1201
1202 /* immediate bitfield standard encoding
1203 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1204 1 ssssss rrrrrr 64 rrrrrr ssssss
1205 0 0sssss 0rrrrr 32 rrrrr sssss
1206 0 10ssss 00rrrr 16 rrrr ssss
1207 0 110sss 000rrr 8 rrr sss
1208 0 1110ss 0000rr 4 rr ss
1209 0 11110s 00000r 2 r s */
1210 static inline int
1211 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1212 {
1213 return (is64 << 12) | (r << 6) | s;
1214 }
1215
1216 static void
1217 build_immediate_table (void)
1218 {
1219 uint32_t log_e, e, s, r, s_mask;
1220 uint64_t mask, imm;
1221 int nb_imms;
1222 int is64;
1223
1224 nb_imms = 0;
1225 for (log_e = 1; log_e <= 6; log_e++)
1226 {
1227 /* Get element size. */
1228 e = 1u << log_e;
1229 if (log_e == 6)
1230 {
1231 is64 = 1;
1232 mask = 0xffffffffffffffffull;
1233 s_mask = 0;
1234 }
1235 else
1236 {
1237 is64 = 0;
1238 mask = (1ull << e) - 1;
1239 /* log_e s_mask
1240 1 ((1 << 4) - 1) << 2 = 111100
1241 2 ((1 << 3) - 1) << 3 = 111000
1242 3 ((1 << 2) - 1) << 4 = 110000
1243 4 ((1 << 1) - 1) << 5 = 100000
1244 5 ((1 << 0) - 1) << 6 = 000000 */
1245 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1246 }
1247 for (s = 0; s < e - 1; s++)
1248 for (r = 0; r < e; r++)
1249 {
1250 /* s+1 consecutive bits to 1 (s < 63) */
1251 imm = (1ull << (s + 1)) - 1;
1252 /* rotate right by r */
1253 if (r != 0)
1254 imm = (imm >> r) | ((imm << (e - r)) & mask);
1255 /* replicate the constant depending on SIMD size */
1256 switch (log_e)
1257 {
1258 case 1: imm = (imm << 2) | imm;
1259 /* Fall through. */
1260 case 2: imm = (imm << 4) | imm;
1261 /* Fall through. */
1262 case 3: imm = (imm << 8) | imm;
1263 /* Fall through. */
1264 case 4: imm = (imm << 16) | imm;
1265 /* Fall through. */
1266 case 5: imm = (imm << 32) | imm;
1267 /* Fall through. */
1268 case 6: break;
1269 default: abort ();
1270 }
1271 simd_immediates[nb_imms].imm = imm;
1272 simd_immediates[nb_imms].encoding =
1273 encode_immediate_bitfield(is64, s | s_mask, r);
1274 nb_imms++;
1275 }
1276 }
1277 assert (nb_imms == TOTAL_IMM_NB);
1278 qsort(simd_immediates, nb_imms,
1279 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1280 }
1281
1282 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1283 be accepted by logical (immediate) instructions
1284 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1285
1286 ESIZE is the number of bytes in the decoded immediate value.
1287 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1288 VALUE will be returned in *ENCODING. */
1289
1290 bool
1291 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1292 {
1293 simd_imm_encoding imm_enc;
1294 const simd_imm_encoding *imm_encoding;
1295 static bool initialized = false;
1296 uint64_t upper;
1297 int i;
1298
1299 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1300 value, esize);
1301
1302 if (!initialized)
1303 {
1304 build_immediate_table ();
1305 initialized = true;
1306 }
1307
1308 /* Allow all zeros or all ones in top bits, so that
1309 constant expressions like ~1 are permitted. */
1310 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1311 if ((value & ~upper) != value && (value | upper) != value)
1312 return false;
1313
1314 /* Replicate to a full 64-bit value. */
1315 value &= ~upper;
1316 for (i = esize * 8; i < 64; i *= 2)
1317 value |= (value << i);
1318
1319 imm_enc.imm = value;
1320 imm_encoding = (const simd_imm_encoding *)
1321 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1322 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1323 if (imm_encoding == NULL)
1324 {
1325 DEBUG_TRACE ("exit with false");
1326 return false;
1327 }
1328 if (encoding != NULL)
1329 *encoding = imm_encoding->encoding;
1330 DEBUG_TRACE ("exit with true");
1331 return true;
1332 }
1333
1334 /* If 64-bit immediate IMM is in the format of
1335 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1336 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1337 of value "abcdefgh". Otherwise return -1. */
1338 int
1339 aarch64_shrink_expanded_imm8 (uint64_t imm)
1340 {
1341 int i, ret;
1342 uint32_t byte;
1343
1344 ret = 0;
1345 for (i = 0; i < 8; i++)
1346 {
1347 byte = (imm >> (8 * i)) & 0xff;
1348 if (byte == 0xff)
1349 ret |= 1 << i;
1350 else if (byte != 0x00)
1351 return -1;
1352 }
1353 return ret;
1354 }
1355
1356 /* Utility inline functions for operand_general_constraint_met_p. */
1357
1358 static inline void
1359 set_error (aarch64_operand_error *mismatch_detail,
1360 enum aarch64_operand_error_kind kind, int idx,
1361 const char* error)
1362 {
1363 if (mismatch_detail == NULL)
1364 return;
1365 mismatch_detail->kind = kind;
1366 mismatch_detail->index = idx;
1367 mismatch_detail->error = error;
1368 }
1369
1370 static inline void
1371 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1372 const char* error)
1373 {
1374 if (mismatch_detail == NULL)
1375 return;
1376 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1377 }
1378
1379 static inline void
1380 set_invalid_regno_error (aarch64_operand_error *mismatch_detail, int idx,
1381 const char *prefix, int lower_bound, int upper_bound)
1382 {
1383 if (mismatch_detail == NULL)
1384 return;
1385 set_error (mismatch_detail, AARCH64_OPDE_INVALID_REGNO, idx, NULL);
1386 mismatch_detail->data[0].s = prefix;
1387 mismatch_detail->data[1].i = lower_bound;
1388 mismatch_detail->data[2].i = upper_bound;
1389 }
1390
1391 static inline void
1392 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1393 int idx, int lower_bound, int upper_bound,
1394 const char* error)
1395 {
1396 if (mismatch_detail == NULL)
1397 return;
1398 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1399 mismatch_detail->data[0].i = lower_bound;
1400 mismatch_detail->data[1].i = upper_bound;
1401 }
1402
1403 static inline void
1404 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1405 int idx, int lower_bound, int upper_bound)
1406 {
1407 if (mismatch_detail == NULL)
1408 return;
1409 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1410 _("immediate value"));
1411 }
1412
1413 static inline void
1414 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1415 int idx, int lower_bound, int upper_bound)
1416 {
1417 if (mismatch_detail == NULL)
1418 return;
1419 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1420 _("immediate offset"));
1421 }
1422
1423 static inline void
1424 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1425 int idx, int lower_bound, int upper_bound)
1426 {
1427 if (mismatch_detail == NULL)
1428 return;
1429 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1430 _("register number"));
1431 }
1432
1433 static inline void
1434 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1435 int idx, int lower_bound, int upper_bound)
1436 {
1437 if (mismatch_detail == NULL)
1438 return;
1439 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1440 _("register element index"));
1441 }
1442
1443 static inline void
1444 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1445 int idx, int lower_bound, int upper_bound)
1446 {
1447 if (mismatch_detail == NULL)
1448 return;
1449 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1450 _("shift amount"));
1451 }
1452
1453 /* Report that the MUL modifier in operand IDX should be in the range
1454 [LOWER_BOUND, UPPER_BOUND]. */
1455 static inline void
1456 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1457 int idx, int lower_bound, int upper_bound)
1458 {
1459 if (mismatch_detail == NULL)
1460 return;
1461 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1462 _("multiplier"));
1463 }
1464
1465 static inline void
1466 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1467 int alignment)
1468 {
1469 if (mismatch_detail == NULL)
1470 return;
1471 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1472 mismatch_detail->data[0].i = alignment;
1473 }
1474
1475 static inline void
1476 set_reg_list_length_error (aarch64_operand_error *mismatch_detail, int idx,
1477 int expected_num)
1478 {
1479 if (mismatch_detail == NULL)
1480 return;
1481 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_LENGTH, idx, NULL);
1482 mismatch_detail->data[0].i = 1 << expected_num;
1483 }
1484
1485 static inline void
1486 set_reg_list_stride_error (aarch64_operand_error *mismatch_detail, int idx,
1487 int expected_num)
1488 {
1489 if (mismatch_detail == NULL)
1490 return;
1491 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST_STRIDE, idx, NULL);
1492 mismatch_detail->data[0].i = 1 << expected_num;
1493 }
1494
1495 static inline void
1496 set_invalid_vg_size (aarch64_operand_error *mismatch_detail,
1497 int idx, int expected)
1498 {
1499 if (mismatch_detail == NULL)
1500 return;
1501 set_error (mismatch_detail, AARCH64_OPDE_INVALID_VG_SIZE, idx, NULL);
1502 mismatch_detail->data[0].i = expected;
1503 }
1504
1505 static inline void
1506 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1507 const char* error)
1508 {
1509 if (mismatch_detail == NULL)
1510 return;
1511 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1512 }
1513
1514 /* Check that indexed register operand OPND has a register in the range
1515 [MIN_REGNO, MAX_REGNO] and an index in the range [MIN_INDEX, MAX_INDEX].
1516 PREFIX is the register prefix, such as "z" for SVE vector registers. */
1517
1518 static bool
1519 check_reglane (const aarch64_opnd_info *opnd,
1520 aarch64_operand_error *mismatch_detail, int idx,
1521 const char *prefix, int min_regno, int max_regno,
1522 int min_index, int max_index)
1523 {
1524 if (!value_in_range_p (opnd->reglane.regno, min_regno, max_regno))
1525 {
1526 set_invalid_regno_error (mismatch_detail, idx, prefix, min_regno,
1527 max_regno);
1528 return false;
1529 }
1530 if (!value_in_range_p (opnd->reglane.index, min_index, max_index))
1531 {
1532 set_elem_idx_out_of_range_error (mismatch_detail, idx, min_index,
1533 max_index);
1534 return false;
1535 }
1536 return true;
1537 }
1538
1539 /* Check that register list operand OPND has NUM_REGS registers and a
1540 register stride of STRIDE. */
1541
1542 static bool
1543 check_reglist (const aarch64_opnd_info *opnd,
1544 aarch64_operand_error *mismatch_detail, int idx,
1545 int num_regs, int stride)
1546 {
1547 if (opnd->reglist.num_regs != num_regs)
1548 {
1549 set_reg_list_length_error (mismatch_detail, idx, num_regs);
1550 return false;
1551 }
1552 if (opnd->reglist.stride != stride)
1553 {
1554 set_reg_list_stride_error (mismatch_detail, idx, stride);
1555 return false;
1556 }
1557 return true;
1558 }
1559
1560 /* Check that indexed ZA operand OPND has:
1561
1562 - a selection register in the range [MIN_WREG, MIN_WREG + 3]
1563
1564 - RANGE_SIZE consecutive immediate offsets.
1565
1566 - an initial immediate offset that is a multiple of RANGE_SIZE
1567 in the range [0, MAX_VALUE * RANGE_SIZE]
1568
1569 - a vector group size of GROUP_SIZE. */
1570
1571 static bool
1572 check_za_access (const aarch64_opnd_info *opnd,
1573 aarch64_operand_error *mismatch_detail, int idx,
1574 int min_wreg, int max_value, unsigned int range_size,
1575 int group_size)
1576 {
1577 if (!value_in_range_p (opnd->indexed_za.index.regno, min_wreg, min_wreg + 3))
1578 {
1579 if (min_wreg == 12)
1580 set_other_error (mismatch_detail, idx,
1581 _("expected a selection register in the"
1582 " range w12-w15"));
1583 else if (min_wreg == 8)
1584 set_other_error (mismatch_detail, idx,
1585 _("expected a selection register in the"
1586 " range w8-w11"));
1587 else
1588 abort ();
1589 return false;
1590 }
1591
1592 int max_index = max_value * range_size;
1593 if (!value_in_range_p (opnd->indexed_za.index.imm, 0, max_index))
1594 {
1595 set_offset_out_of_range_error (mismatch_detail, idx, 0, max_index);
1596 return false;
1597 }
1598
1599 if ((opnd->indexed_za.index.imm % range_size) != 0)
1600 {
1601 assert (range_size == 2 || range_size == 4);
1602 set_other_error (mismatch_detail, idx,
1603 range_size == 2
1604 ? _("starting offset is not a multiple of 2")
1605 : _("starting offset is not a multiple of 4"));
1606 return false;
1607 }
1608
1609 if (opnd->indexed_za.index.countm1 != range_size - 1)
1610 {
1611 if (range_size == 1)
1612 set_other_error (mismatch_detail, idx,
1613 _("expected a single offset rather than"
1614 " a range"));
1615 else if (range_size == 2)
1616 set_other_error (mismatch_detail, idx,
1617 _("expected a range of two offsets"));
1618 else if (range_size == 4)
1619 set_other_error (mismatch_detail, idx,
1620 _("expected a range of four offsets"));
1621 else
1622 abort ();
1623 return false;
1624 }
1625
1626 /* The vector group specifier is optional in assembly code. */
1627 if (opnd->indexed_za.group_size != 0
1628 && opnd->indexed_za.group_size != group_size)
1629 {
1630 set_invalid_vg_size (mismatch_detail, idx, group_size);
1631 return false;
1632 }
1633
1634 return true;
1635 }
1636
1637 /* General constraint checking based on operand code.
1638
1639 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1640 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1641
1642 This function has to be called after the qualifiers for all operands
1643 have been resolved.
1644
1645 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1646 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1647 of error message during the disassembling where error message is not
1648 wanted. We avoid the dynamic construction of strings of error messages
1649 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1650 use a combination of error code, static string and some integer data to
1651 represent an error. */
1652
1653 static int
1654 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1655 enum aarch64_opnd type,
1656 const aarch64_opcode *opcode,
1657 aarch64_operand_error *mismatch_detail)
1658 {
1659 unsigned num, modifiers, shift;
1660 unsigned char size;
1661 int64_t imm, min_value, max_value;
1662 uint64_t uvalue, mask;
1663 const aarch64_opnd_info *opnd = opnds + idx;
1664 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1665 int i;
1666
1667 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1668
1669 switch (aarch64_operands[type].op_class)
1670 {
1671 case AARCH64_OPND_CLASS_INT_REG:
1672 /* Check pair reg constraints for cas* instructions. */
1673 if (type == AARCH64_OPND_PAIRREG)
1674 {
1675 assert (idx == 1 || idx == 3);
1676 if (opnds[idx - 1].reg.regno % 2 != 0)
1677 {
1678 set_syntax_error (mismatch_detail, idx - 1,
1679 _("reg pair must start from even reg"));
1680 return 0;
1681 }
1682 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1683 {
1684 set_syntax_error (mismatch_detail, idx,
1685 _("reg pair must be contiguous"));
1686 return 0;
1687 }
1688 break;
1689 }
1690
1691 /* <Xt> may be optional in some IC and TLBI instructions. */
1692 if (type == AARCH64_OPND_Rt_SYS)
1693 {
1694 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1695 == AARCH64_OPND_CLASS_SYSTEM));
1696 if (opnds[1].present
1697 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1698 {
1699 set_other_error (mismatch_detail, idx, _("extraneous register"));
1700 return 0;
1701 }
1702 if (!opnds[1].present
1703 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1704 {
1705 set_other_error (mismatch_detail, idx, _("missing register"));
1706 return 0;
1707 }
1708 }
1709 switch (qualifier)
1710 {
1711 case AARCH64_OPND_QLF_WSP:
1712 case AARCH64_OPND_QLF_SP:
1713 if (!aarch64_stack_pointer_p (opnd))
1714 {
1715 set_other_error (mismatch_detail, idx,
1716 _("stack pointer register expected"));
1717 return 0;
1718 }
1719 break;
1720 default:
1721 break;
1722 }
1723 break;
1724
1725 case AARCH64_OPND_CLASS_SVE_REG:
1726 switch (type)
1727 {
1728 case AARCH64_OPND_SVE_Zm3_INDEX:
1729 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1730 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1731 case AARCH64_OPND_SVE_Zm4_11_INDEX:
1732 case AARCH64_OPND_SVE_Zm4_INDEX:
1733 size = get_operand_fields_width (get_operand_from_code (type));
1734 shift = get_operand_specific_data (&aarch64_operands[type]);
1735 if (!check_reglane (opnd, mismatch_detail, idx,
1736 "z", 0, (1 << shift) - 1,
1737 0, (1u << (size - shift)) - 1))
1738 return 0;
1739 break;
1740
1741 case AARCH64_OPND_SVE_Zn_INDEX:
1742 size = aarch64_get_qualifier_esize (opnd->qualifier);
1743 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1744 0, 64 / size - 1))
1745 return 0;
1746 break;
1747
1748 case AARCH64_OPND_SME_PNn3_INDEX1:
1749 case AARCH64_OPND_SME_PNn3_INDEX2:
1750 size = get_operand_field_width (get_operand_from_code (type), 1);
1751 if (!check_reglane (opnd, mismatch_detail, idx, "pn", 8, 15,
1752 0, (1 << size) - 1))
1753 return 0;
1754 break;
1755
1756 case AARCH64_OPND_SME_Zn_INDEX1_16:
1757 case AARCH64_OPND_SME_Zn_INDEX2_15:
1758 case AARCH64_OPND_SME_Zn_INDEX2_16:
1759 case AARCH64_OPND_SME_Zn_INDEX3_14:
1760 case AARCH64_OPND_SME_Zn_INDEX3_15:
1761 case AARCH64_OPND_SME_Zn_INDEX4_14:
1762 size = get_operand_fields_width (get_operand_from_code (type)) - 5;
1763 if (!check_reglane (opnd, mismatch_detail, idx, "z", 0, 31,
1764 0, (1 << size) - 1))
1765 return 0;
1766 break;
1767
1768 case AARCH64_OPND_SME_Zm:
1769 if (opnd->reg.regno > 15)
1770 {
1771 set_invalid_regno_error (mismatch_detail, idx, "z", 0, 15);
1772 return 0;
1773 }
1774 break;
1775
1776 case AARCH64_OPND_SME_PnT_Wm_imm:
1777 size = aarch64_get_qualifier_esize (opnd->qualifier);
1778 max_value = 16 / size - 1;
1779 if (!check_za_access (opnd, mismatch_detail, idx,
1780 12, max_value, 1, 0))
1781 return 0;
1782 break;
1783
1784 default:
1785 break;
1786 }
1787 break;
1788
1789 case AARCH64_OPND_CLASS_SVE_REGLIST:
1790 switch (type)
1791 {
1792 case AARCH64_OPND_SME_Pdx2:
1793 case AARCH64_OPND_SME_Zdnx2:
1794 case AARCH64_OPND_SME_Zdnx4:
1795 case AARCH64_OPND_SME_Zmx2:
1796 case AARCH64_OPND_SME_Zmx4:
1797 case AARCH64_OPND_SME_Znx2:
1798 case AARCH64_OPND_SME_Znx4:
1799 num = get_operand_specific_data (&aarch64_operands[type]);
1800 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
1801 return 0;
1802 if ((opnd->reglist.first_regno % num) != 0)
1803 {
1804 set_other_error (mismatch_detail, idx,
1805 _("start register out of range"));
1806 return 0;
1807 }
1808 break;
1809
1810 case AARCH64_OPND_SME_Ztx2_STRIDED:
1811 case AARCH64_OPND_SME_Ztx4_STRIDED:
1812 /* 2-register lists have a stride of 8 and 4-register lists
1813 have a stride of 4. */
1814 num = get_operand_specific_data (&aarch64_operands[type]);
1815 if (!check_reglist (opnd, mismatch_detail, idx, num, 16 / num))
1816 return 0;
1817 num = 16 | (opnd->reglist.stride - 1);
1818 if ((opnd->reglist.first_regno & ~num) != 0)
1819 {
1820 set_other_error (mismatch_detail, idx,
1821 _("start register out of range"));
1822 return 0;
1823 }
1824 break;
1825
1826 case AARCH64_OPND_SME_PdxN:
1827 case AARCH64_OPND_SVE_ZnxN:
1828 case AARCH64_OPND_SVE_ZtxN:
1829 num = get_opcode_dependent_value (opcode);
1830 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
1831 return 0;
1832 break;
1833
1834 default:
1835 abort ();
1836 }
1837 break;
1838
1839 case AARCH64_OPND_CLASS_ZA_ACCESS:
1840 switch (type)
1841 {
1842 case AARCH64_OPND_SME_ZA_HV_idx_src:
1843 case AARCH64_OPND_SME_ZA_HV_idx_dest:
1844 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
1845 size = aarch64_get_qualifier_esize (opnd->qualifier);
1846 max_value = 16 / size - 1;
1847 if (!check_za_access (opnd, mismatch_detail, idx, 12, max_value, 1,
1848 get_opcode_dependent_value (opcode)))
1849 return 0;
1850 break;
1851
1852 case AARCH64_OPND_SME_ZA_array_off4:
1853 if (!check_za_access (opnd, mismatch_detail, idx, 12, 15, 1,
1854 get_opcode_dependent_value (opcode)))
1855 return 0;
1856 break;
1857
1858 case AARCH64_OPND_SME_ZA_array_off3_0:
1859 case AARCH64_OPND_SME_ZA_array_off3_5:
1860 if (!check_za_access (opnd, mismatch_detail, idx, 8, 7, 1,
1861 get_opcode_dependent_value (opcode)))
1862 return 0;
1863 break;
1864
1865 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
1866 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
1867 size = aarch64_get_qualifier_esize (opnd->qualifier);
1868 num = get_opcode_dependent_value (opcode);
1869 max_value = 16 / num / size;
1870 if (max_value > 0)
1871 max_value -= 1;
1872 if (!check_za_access (opnd, mismatch_detail, idx,
1873 12, max_value, num, 0))
1874 return 0;
1875 break;
1876
1877 default:
1878 abort ();
1879 }
1880 break;
1881
1882 case AARCH64_OPND_CLASS_PRED_REG:
1883 switch (type)
1884 {
1885 case AARCH64_OPND_SME_PNd3:
1886 case AARCH64_OPND_SME_PNg3:
1887 if (opnd->reg.regno < 8)
1888 {
1889 set_invalid_regno_error (mismatch_detail, idx, "pn", 8, 15);
1890 return 0;
1891 }
1892 break;
1893
1894 default:
1895 if (opnd->reg.regno >= 8
1896 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1897 {
1898 set_invalid_regno_error (mismatch_detail, idx, "p", 0, 7);
1899 return 0;
1900 }
1901 break;
1902 }
1903 break;
1904
1905 case AARCH64_OPND_CLASS_COND:
1906 if (type == AARCH64_OPND_COND1
1907 && (opnds[idx].cond->value & 0xe) == 0xe)
1908 {
1909 /* Not allow AL or NV. */
1910 set_syntax_error (mismatch_detail, idx, NULL);
1911 }
1912 break;
1913
1914 case AARCH64_OPND_CLASS_ADDRESS:
1915 /* Check writeback. */
1916 switch (opcode->iclass)
1917 {
1918 case ldst_pos:
1919 case ldst_unscaled:
1920 case ldstnapair_offs:
1921 case ldstpair_off:
1922 case ldst_unpriv:
1923 if (opnd->addr.writeback == 1)
1924 {
1925 set_syntax_error (mismatch_detail, idx,
1926 _("unexpected address writeback"));
1927 return 0;
1928 }
1929 break;
1930 case ldst_imm10:
1931 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1932 {
1933 set_syntax_error (mismatch_detail, idx,
1934 _("unexpected address writeback"));
1935 return 0;
1936 }
1937 break;
1938 case ldst_imm9:
1939 case ldstpair_indexed:
1940 case asisdlsep:
1941 case asisdlsop:
1942 if (opnd->addr.writeback == 0)
1943 {
1944 set_syntax_error (mismatch_detail, idx,
1945 _("address writeback expected"));
1946 return 0;
1947 }
1948 break;
1949 default:
1950 assert (opnd->addr.writeback == 0);
1951 break;
1952 }
1953 switch (type)
1954 {
1955 case AARCH64_OPND_ADDR_SIMM7:
1956 /* Scaled signed 7 bits immediate offset. */
1957 /* Get the size of the data element that is accessed, which may be
1958 different from that of the source register size,
1959 e.g. in strb/ldrb. */
1960 size = aarch64_get_qualifier_esize (opnd->qualifier);
1961 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1962 {
1963 set_offset_out_of_range_error (mismatch_detail, idx,
1964 -64 * size, 63 * size);
1965 return 0;
1966 }
1967 if (!value_aligned_p (opnd->addr.offset.imm, size))
1968 {
1969 set_unaligned_error (mismatch_detail, idx, size);
1970 return 0;
1971 }
1972 break;
1973 case AARCH64_OPND_ADDR_OFFSET:
1974 case AARCH64_OPND_ADDR_SIMM9:
1975 /* Unscaled signed 9 bits immediate offset. */
1976 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1977 {
1978 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1979 return 0;
1980 }
1981 break;
1982
1983 case AARCH64_OPND_ADDR_SIMM9_2:
1984 /* Unscaled signed 9 bits immediate offset, which has to be negative
1985 or unaligned. */
1986 size = aarch64_get_qualifier_esize (qualifier);
1987 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1988 && !value_aligned_p (opnd->addr.offset.imm, size))
1989 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1990 return 1;
1991 set_other_error (mismatch_detail, idx,
1992 _("negative or unaligned offset expected"));
1993 return 0;
1994
1995 case AARCH64_OPND_ADDR_SIMM10:
1996 /* Scaled signed 10 bits immediate offset. */
1997 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1998 {
1999 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
2000 return 0;
2001 }
2002 if (!value_aligned_p (opnd->addr.offset.imm, 8))
2003 {
2004 set_unaligned_error (mismatch_detail, idx, 8);
2005 return 0;
2006 }
2007 break;
2008
2009 case AARCH64_OPND_ADDR_SIMM11:
2010 /* Signed 11 bits immediate offset (multiple of 16). */
2011 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
2012 {
2013 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
2014 return 0;
2015 }
2016
2017 if (!value_aligned_p (opnd->addr.offset.imm, 16))
2018 {
2019 set_unaligned_error (mismatch_detail, idx, 16);
2020 return 0;
2021 }
2022 break;
2023
2024 case AARCH64_OPND_ADDR_SIMM13:
2025 /* Signed 13 bits immediate offset (multiple of 16). */
2026 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
2027 {
2028 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
2029 return 0;
2030 }
2031
2032 if (!value_aligned_p (opnd->addr.offset.imm, 16))
2033 {
2034 set_unaligned_error (mismatch_detail, idx, 16);
2035 return 0;
2036 }
2037 break;
2038
2039 case AARCH64_OPND_SIMD_ADDR_POST:
2040 /* AdvSIMD load/store multiple structures, post-index. */
2041 assert (idx == 1);
2042 if (opnd->addr.offset.is_reg)
2043 {
2044 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
2045 return 1;
2046 else
2047 {
2048 set_other_error (mismatch_detail, idx,
2049 _("invalid register offset"));
2050 return 0;
2051 }
2052 }
2053 else
2054 {
2055 const aarch64_opnd_info *prev = &opnds[idx-1];
2056 unsigned num_bytes; /* total number of bytes transferred. */
2057 /* The opcode dependent area stores the number of elements in
2058 each structure to be loaded/stored. */
2059 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
2060 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
2061 /* Special handling of loading single structure to all lane. */
2062 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
2063 * aarch64_get_qualifier_esize (prev->qualifier);
2064 else
2065 num_bytes = prev->reglist.num_regs
2066 * aarch64_get_qualifier_esize (prev->qualifier)
2067 * aarch64_get_qualifier_nelem (prev->qualifier);
2068 if ((int) num_bytes != opnd->addr.offset.imm)
2069 {
2070 set_other_error (mismatch_detail, idx,
2071 _("invalid post-increment amount"));
2072 return 0;
2073 }
2074 }
2075 break;
2076
2077 case AARCH64_OPND_ADDR_REGOFF:
2078 /* Get the size of the data element that is accessed, which may be
2079 different from that of the source register size,
2080 e.g. in strb/ldrb. */
2081 size = aarch64_get_qualifier_esize (opnd->qualifier);
2082 /* It is either no shift or shift by the binary logarithm of SIZE. */
2083 if (opnd->shifter.amount != 0
2084 && opnd->shifter.amount != (int)get_logsz (size))
2085 {
2086 set_other_error (mismatch_detail, idx,
2087 _("invalid shift amount"));
2088 return 0;
2089 }
2090 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
2091 operators. */
2092 switch (opnd->shifter.kind)
2093 {
2094 case AARCH64_MOD_UXTW:
2095 case AARCH64_MOD_LSL:
2096 case AARCH64_MOD_SXTW:
2097 case AARCH64_MOD_SXTX: break;
2098 default:
2099 set_other_error (mismatch_detail, idx,
2100 _("invalid extend/shift operator"));
2101 return 0;
2102 }
2103 break;
2104
2105 case AARCH64_OPND_ADDR_UIMM12:
2106 imm = opnd->addr.offset.imm;
2107 /* Get the size of the data element that is accessed, which may be
2108 different from that of the source register size,
2109 e.g. in strb/ldrb. */
2110 size = aarch64_get_qualifier_esize (qualifier);
2111 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
2112 {
2113 set_offset_out_of_range_error (mismatch_detail, idx,
2114 0, 4095 * size);
2115 return 0;
2116 }
2117 if (!value_aligned_p (opnd->addr.offset.imm, size))
2118 {
2119 set_unaligned_error (mismatch_detail, idx, size);
2120 return 0;
2121 }
2122 break;
2123
2124 case AARCH64_OPND_ADDR_PCREL14:
2125 case AARCH64_OPND_ADDR_PCREL19:
2126 case AARCH64_OPND_ADDR_PCREL21:
2127 case AARCH64_OPND_ADDR_PCREL26:
2128 imm = opnd->imm.value;
2129 if (operand_need_shift_by_two (get_operand_from_code (type)))
2130 {
2131 /* The offset value in a PC-relative branch instruction is alway
2132 4-byte aligned and is encoded without the lowest 2 bits. */
2133 if (!value_aligned_p (imm, 4))
2134 {
2135 set_unaligned_error (mismatch_detail, idx, 4);
2136 return 0;
2137 }
2138 /* Right shift by 2 so that we can carry out the following check
2139 canonically. */
2140 imm >>= 2;
2141 }
2142 size = get_operand_fields_width (get_operand_from_code (type));
2143 if (!value_fit_signed_field_p (imm, size))
2144 {
2145 set_other_error (mismatch_detail, idx,
2146 _("immediate out of range"));
2147 return 0;
2148 }
2149 break;
2150
2151 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
2152 if (!value_in_range_p (opnd->addr.offset.imm, 0, 15))
2153 {
2154 set_offset_out_of_range_error (mismatch_detail, idx, 0, 15);
2155 return 0;
2156 }
2157 break;
2158
2159 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
2160 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
2161 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
2162 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
2163 min_value = -8;
2164 max_value = 7;
2165 sve_imm_offset_vl:
2166 assert (!opnd->addr.offset.is_reg);
2167 assert (opnd->addr.preind);
2168 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
2169 min_value *= num;
2170 max_value *= num;
2171 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
2172 || (opnd->shifter.operator_present
2173 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
2174 {
2175 set_other_error (mismatch_detail, idx,
2176 _("invalid addressing mode"));
2177 return 0;
2178 }
2179 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
2180 {
2181 set_offset_out_of_range_error (mismatch_detail, idx,
2182 min_value, max_value);
2183 return 0;
2184 }
2185 if (!value_aligned_p (opnd->addr.offset.imm, num))
2186 {
2187 set_unaligned_error (mismatch_detail, idx, num);
2188 return 0;
2189 }
2190 break;
2191
2192 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
2193 min_value = -32;
2194 max_value = 31;
2195 goto sve_imm_offset_vl;
2196
2197 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
2198 min_value = -256;
2199 max_value = 255;
2200 goto sve_imm_offset_vl;
2201
2202 case AARCH64_OPND_SVE_ADDR_RI_U6:
2203 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
2204 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
2205 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
2206 min_value = 0;
2207 max_value = 63;
2208 sve_imm_offset:
2209 assert (!opnd->addr.offset.is_reg);
2210 assert (opnd->addr.preind);
2211 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
2212 min_value *= num;
2213 max_value *= num;
2214 if (opnd->shifter.operator_present
2215 || opnd->shifter.amount_present)
2216 {
2217 set_other_error (mismatch_detail, idx,
2218 _("invalid addressing mode"));
2219 return 0;
2220 }
2221 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
2222 {
2223 set_offset_out_of_range_error (mismatch_detail, idx,
2224 min_value, max_value);
2225 return 0;
2226 }
2227 if (!value_aligned_p (opnd->addr.offset.imm, num))
2228 {
2229 set_unaligned_error (mismatch_detail, idx, num);
2230 return 0;
2231 }
2232 break;
2233
2234 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
2235 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
2236 min_value = -8;
2237 max_value = 7;
2238 goto sve_imm_offset;
2239
2240 case AARCH64_OPND_SVE_ADDR_ZX:
2241 /* Everything is already ensured by parse_operands or
2242 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
2243 argument type). */
2244 assert (opnd->addr.offset.is_reg);
2245 assert (opnd->addr.preind);
2246 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
2247 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2248 assert (opnd->shifter.operator_present == 0);
2249 break;
2250
2251 case AARCH64_OPND_SVE_ADDR_R:
2252 case AARCH64_OPND_SVE_ADDR_RR:
2253 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
2254 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
2255 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
2256 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
2257 case AARCH64_OPND_SVE_ADDR_RX:
2258 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
2259 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
2260 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
2261 case AARCH64_OPND_SVE_ADDR_RZ:
2262 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
2263 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
2264 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
2265 modifiers = 1 << AARCH64_MOD_LSL;
2266 sve_rr_operand:
2267 assert (opnd->addr.offset.is_reg);
2268 assert (opnd->addr.preind);
2269 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
2270 && opnd->addr.offset.regno == 31)
2271 {
2272 set_other_error (mismatch_detail, idx,
2273 _("index register xzr is not allowed"));
2274 return 0;
2275 }
2276 if (((1 << opnd->shifter.kind) & modifiers) == 0
2277 || (opnd->shifter.amount
2278 != get_operand_specific_data (&aarch64_operands[type])))
2279 {
2280 set_other_error (mismatch_detail, idx,
2281 _("invalid addressing mode"));
2282 return 0;
2283 }
2284 break;
2285
2286 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
2287 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
2288 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
2289 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
2290 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
2291 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
2292 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
2293 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
2294 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
2295 goto sve_rr_operand;
2296
2297 case AARCH64_OPND_SVE_ADDR_ZI_U5:
2298 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
2299 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
2300 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
2301 min_value = 0;
2302 max_value = 31;
2303 goto sve_imm_offset;
2304
2305 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
2306 modifiers = 1 << AARCH64_MOD_LSL;
2307 sve_zz_operand:
2308 assert (opnd->addr.offset.is_reg);
2309 assert (opnd->addr.preind);
2310 if (((1 << opnd->shifter.kind) & modifiers) == 0
2311 || opnd->shifter.amount < 0
2312 || opnd->shifter.amount > 3)
2313 {
2314 set_other_error (mismatch_detail, idx,
2315 _("invalid addressing mode"));
2316 return 0;
2317 }
2318 break;
2319
2320 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
2321 modifiers = (1 << AARCH64_MOD_SXTW);
2322 goto sve_zz_operand;
2323
2324 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
2325 modifiers = 1 << AARCH64_MOD_UXTW;
2326 goto sve_zz_operand;
2327
2328 default:
2329 break;
2330 }
2331 break;
2332
2333 case AARCH64_OPND_CLASS_SIMD_REGLIST:
2334 if (type == AARCH64_OPND_LEt)
2335 {
2336 /* Get the upper bound for the element index. */
2337 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2338 if (!value_in_range_p (opnd->reglist.index, 0, num))
2339 {
2340 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2341 return 0;
2342 }
2343 }
2344 /* The opcode dependent area stores the number of elements in
2345 each structure to be loaded/stored. */
2346 num = get_opcode_dependent_value (opcode);
2347 switch (type)
2348 {
2349 case AARCH64_OPND_LVt:
2350 assert (num >= 1 && num <= 4);
2351 /* Unless LD1/ST1, the number of registers should be equal to that
2352 of the structure elements. */
2353 if (num != 1 && !check_reglist (opnd, mismatch_detail, idx, num, 1))
2354 return 0;
2355 break;
2356 case AARCH64_OPND_LVt_AL:
2357 case AARCH64_OPND_LEt:
2358 assert (num >= 1 && num <= 4);
2359 /* The number of registers should be equal to that of the structure
2360 elements. */
2361 if (!check_reglist (opnd, mismatch_detail, idx, num, 1))
2362 return 0;
2363 break;
2364 default:
2365 break;
2366 }
2367 if (opnd->reglist.stride != 1)
2368 {
2369 set_reg_list_stride_error (mismatch_detail, idx, 1);
2370 return 0;
2371 }
2372 break;
2373
2374 case AARCH64_OPND_CLASS_IMMEDIATE:
2375 /* Constraint check on immediate operand. */
2376 imm = opnd->imm.value;
2377 /* E.g. imm_0_31 constrains value to be 0..31. */
2378 if (qualifier_value_in_range_constraint_p (qualifier)
2379 && !value_in_range_p (imm, get_lower_bound (qualifier),
2380 get_upper_bound (qualifier)))
2381 {
2382 set_imm_out_of_range_error (mismatch_detail, idx,
2383 get_lower_bound (qualifier),
2384 get_upper_bound (qualifier));
2385 return 0;
2386 }
2387
2388 switch (type)
2389 {
2390 case AARCH64_OPND_AIMM:
2391 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2392 {
2393 set_other_error (mismatch_detail, idx,
2394 _("invalid shift operator"));
2395 return 0;
2396 }
2397 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2398 {
2399 set_other_error (mismatch_detail, idx,
2400 _("shift amount must be 0 or 12"));
2401 return 0;
2402 }
2403 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2404 {
2405 set_other_error (mismatch_detail, idx,
2406 _("immediate out of range"));
2407 return 0;
2408 }
2409 break;
2410
2411 case AARCH64_OPND_HALF:
2412 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2413 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2414 {
2415 set_other_error (mismatch_detail, idx,
2416 _("invalid shift operator"));
2417 return 0;
2418 }
2419 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2420 if (!value_aligned_p (opnd->shifter.amount, 16))
2421 {
2422 set_other_error (mismatch_detail, idx,
2423 _("shift amount must be a multiple of 16"));
2424 return 0;
2425 }
2426 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2427 {
2428 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2429 0, size * 8 - 16);
2430 return 0;
2431 }
2432 if (opnd->imm.value < 0)
2433 {
2434 set_other_error (mismatch_detail, idx,
2435 _("negative immediate value not allowed"));
2436 return 0;
2437 }
2438 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2439 {
2440 set_other_error (mismatch_detail, idx,
2441 _("immediate out of range"));
2442 return 0;
2443 }
2444 break;
2445
2446 case AARCH64_OPND_IMM_MOV:
2447 {
2448 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2449 imm = opnd->imm.value;
2450 assert (idx == 1);
2451 switch (opcode->op)
2452 {
2453 case OP_MOV_IMM_WIDEN:
2454 imm = ~imm;
2455 /* Fall through. */
2456 case OP_MOV_IMM_WIDE:
2457 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2458 {
2459 set_other_error (mismatch_detail, idx,
2460 _("immediate out of range"));
2461 return 0;
2462 }
2463 break;
2464 case OP_MOV_IMM_LOG:
2465 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2466 {
2467 set_other_error (mismatch_detail, idx,
2468 _("immediate out of range"));
2469 return 0;
2470 }
2471 break;
2472 default:
2473 assert (0);
2474 return 0;
2475 }
2476 }
2477 break;
2478
2479 case AARCH64_OPND_NZCV:
2480 case AARCH64_OPND_CCMP_IMM:
2481 case AARCH64_OPND_EXCEPTION:
2482 case AARCH64_OPND_UNDEFINED:
2483 case AARCH64_OPND_TME_UIMM16:
2484 case AARCH64_OPND_UIMM4:
2485 case AARCH64_OPND_UIMM4_ADDG:
2486 case AARCH64_OPND_UIMM7:
2487 case AARCH64_OPND_UIMM3_OP1:
2488 case AARCH64_OPND_UIMM3_OP2:
2489 case AARCH64_OPND_SVE_UIMM3:
2490 case AARCH64_OPND_SVE_UIMM7:
2491 case AARCH64_OPND_SVE_UIMM8:
2492 case AARCH64_OPND_SVE_UIMM8_53:
2493 case AARCH64_OPND_CSSC_UIMM8:
2494 size = get_operand_fields_width (get_operand_from_code (type));
2495 assert (size < 32);
2496 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2497 {
2498 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2499 (1u << size) - 1);
2500 return 0;
2501 }
2502 break;
2503
2504 case AARCH64_OPND_UIMM10:
2505 /* Scaled unsigned 10 bits immediate offset. */
2506 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2507 {
2508 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2509 return 0;
2510 }
2511
2512 if (!value_aligned_p (opnd->imm.value, 16))
2513 {
2514 set_unaligned_error (mismatch_detail, idx, 16);
2515 return 0;
2516 }
2517 break;
2518
2519 case AARCH64_OPND_SIMM5:
2520 case AARCH64_OPND_SVE_SIMM5:
2521 case AARCH64_OPND_SVE_SIMM5B:
2522 case AARCH64_OPND_SVE_SIMM6:
2523 case AARCH64_OPND_SVE_SIMM8:
2524 case AARCH64_OPND_CSSC_SIMM8:
2525 size = get_operand_fields_width (get_operand_from_code (type));
2526 assert (size < 32);
2527 if (!value_fit_signed_field_p (opnd->imm.value, size))
2528 {
2529 set_imm_out_of_range_error (mismatch_detail, idx,
2530 -(1 << (size - 1)),
2531 (1 << (size - 1)) - 1);
2532 return 0;
2533 }
2534 break;
2535
2536 case AARCH64_OPND_WIDTH:
2537 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2538 && opnds[0].type == AARCH64_OPND_Rd);
2539 size = get_upper_bound (qualifier);
2540 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2541 /* lsb+width <= reg.size */
2542 {
2543 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2544 size - opnds[idx-1].imm.value);
2545 return 0;
2546 }
2547 break;
2548
2549 case AARCH64_OPND_LIMM:
2550 case AARCH64_OPND_SVE_LIMM:
2551 {
2552 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2553 uint64_t uimm = opnd->imm.value;
2554 if (opcode->op == OP_BIC)
2555 uimm = ~uimm;
2556 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2557 {
2558 set_other_error (mismatch_detail, idx,
2559 _("immediate out of range"));
2560 return 0;
2561 }
2562 }
2563 break;
2564
2565 case AARCH64_OPND_IMM0:
2566 case AARCH64_OPND_FPIMM0:
2567 if (opnd->imm.value != 0)
2568 {
2569 set_other_error (mismatch_detail, idx,
2570 _("immediate zero expected"));
2571 return 0;
2572 }
2573 break;
2574
2575 case AARCH64_OPND_IMM_ROT1:
2576 case AARCH64_OPND_IMM_ROT2:
2577 case AARCH64_OPND_SVE_IMM_ROT2:
2578 if (opnd->imm.value != 0
2579 && opnd->imm.value != 90
2580 && opnd->imm.value != 180
2581 && opnd->imm.value != 270)
2582 {
2583 set_other_error (mismatch_detail, idx,
2584 _("rotate expected to be 0, 90, 180 or 270"));
2585 return 0;
2586 }
2587 break;
2588
2589 case AARCH64_OPND_IMM_ROT3:
2590 case AARCH64_OPND_SVE_IMM_ROT1:
2591 case AARCH64_OPND_SVE_IMM_ROT3:
2592 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2593 {
2594 set_other_error (mismatch_detail, idx,
2595 _("rotate expected to be 90 or 270"));
2596 return 0;
2597 }
2598 break;
2599
2600 case AARCH64_OPND_SHLL_IMM:
2601 assert (idx == 2);
2602 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2603 if (opnd->imm.value != size)
2604 {
2605 set_other_error (mismatch_detail, idx,
2606 _("invalid shift amount"));
2607 return 0;
2608 }
2609 break;
2610
2611 case AARCH64_OPND_IMM_VLSL:
2612 size = aarch64_get_qualifier_esize (qualifier);
2613 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2614 {
2615 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2616 size * 8 - 1);
2617 return 0;
2618 }
2619 break;
2620
2621 case AARCH64_OPND_IMM_VLSR:
2622 size = aarch64_get_qualifier_esize (qualifier);
2623 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2624 {
2625 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2626 return 0;
2627 }
2628 break;
2629
2630 case AARCH64_OPND_SIMD_IMM:
2631 case AARCH64_OPND_SIMD_IMM_SFT:
2632 /* Qualifier check. */
2633 switch (qualifier)
2634 {
2635 case AARCH64_OPND_QLF_LSL:
2636 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2637 {
2638 set_other_error (mismatch_detail, idx,
2639 _("invalid shift operator"));
2640 return 0;
2641 }
2642 break;
2643 case AARCH64_OPND_QLF_MSL:
2644 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2645 {
2646 set_other_error (mismatch_detail, idx,
2647 _("invalid shift operator"));
2648 return 0;
2649 }
2650 break;
2651 case AARCH64_OPND_QLF_NIL:
2652 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2653 {
2654 set_other_error (mismatch_detail, idx,
2655 _("shift is not permitted"));
2656 return 0;
2657 }
2658 break;
2659 default:
2660 assert (0);
2661 return 0;
2662 }
2663 /* Is the immediate valid? */
2664 assert (idx == 1);
2665 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2666 {
2667 /* uimm8 or simm8 */
2668 if (!value_in_range_p (opnd->imm.value, -128, 255))
2669 {
2670 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2671 return 0;
2672 }
2673 }
2674 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2675 {
2676 /* uimm64 is not
2677 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2678 ffffffffgggggggghhhhhhhh'. */
2679 set_other_error (mismatch_detail, idx,
2680 _("invalid value for immediate"));
2681 return 0;
2682 }
2683 /* Is the shift amount valid? */
2684 switch (opnd->shifter.kind)
2685 {
2686 case AARCH64_MOD_LSL:
2687 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2688 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2689 {
2690 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2691 (size - 1) * 8);
2692 return 0;
2693 }
2694 if (!value_aligned_p (opnd->shifter.amount, 8))
2695 {
2696 set_unaligned_error (mismatch_detail, idx, 8);
2697 return 0;
2698 }
2699 break;
2700 case AARCH64_MOD_MSL:
2701 /* Only 8 and 16 are valid shift amount. */
2702 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2703 {
2704 set_other_error (mismatch_detail, idx,
2705 _("shift amount must be 0 or 16"));
2706 return 0;
2707 }
2708 break;
2709 default:
2710 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2711 {
2712 set_other_error (mismatch_detail, idx,
2713 _("invalid shift operator"));
2714 return 0;
2715 }
2716 break;
2717 }
2718 break;
2719
2720 case AARCH64_OPND_FPIMM:
2721 case AARCH64_OPND_SIMD_FPIMM:
2722 case AARCH64_OPND_SVE_FPIMM8:
2723 if (opnd->imm.is_fp == 0)
2724 {
2725 set_other_error (mismatch_detail, idx,
2726 _("floating-point immediate expected"));
2727 return 0;
2728 }
2729 /* The value is expected to be an 8-bit floating-point constant with
2730 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2731 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2732 instruction). */
2733 if (!value_in_range_p (opnd->imm.value, 0, 255))
2734 {
2735 set_other_error (mismatch_detail, idx,
2736 _("immediate out of range"));
2737 return 0;
2738 }
2739 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2740 {
2741 set_other_error (mismatch_detail, idx,
2742 _("invalid shift operator"));
2743 return 0;
2744 }
2745 break;
2746
2747 case AARCH64_OPND_SVE_AIMM:
2748 min_value = 0;
2749 sve_aimm:
2750 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2751 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2752 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2753 uvalue = opnd->imm.value;
2754 shift = opnd->shifter.amount;
2755 if (size == 1)
2756 {
2757 if (shift != 0)
2758 {
2759 set_other_error (mismatch_detail, idx,
2760 _("no shift amount allowed for"
2761 " 8-bit constants"));
2762 return 0;
2763 }
2764 }
2765 else
2766 {
2767 if (shift != 0 && shift != 8)
2768 {
2769 set_other_error (mismatch_detail, idx,
2770 _("shift amount must be 0 or 8"));
2771 return 0;
2772 }
2773 if (shift == 0 && (uvalue & 0xff) == 0)
2774 {
2775 shift = 8;
2776 uvalue = (int64_t) uvalue / 256;
2777 }
2778 }
2779 mask >>= shift;
2780 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2781 {
2782 set_other_error (mismatch_detail, idx,
2783 _("immediate too big for element size"));
2784 return 0;
2785 }
2786 uvalue = (uvalue - min_value) & mask;
2787 if (uvalue > 0xff)
2788 {
2789 set_other_error (mismatch_detail, idx,
2790 _("invalid arithmetic immediate"));
2791 return 0;
2792 }
2793 break;
2794
2795 case AARCH64_OPND_SVE_ASIMM:
2796 min_value = -128;
2797 goto sve_aimm;
2798
2799 case AARCH64_OPND_SVE_I1_HALF_ONE:
2800 assert (opnd->imm.is_fp);
2801 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2802 {
2803 set_other_error (mismatch_detail, idx,
2804 _("floating-point value must be 0.5 or 1.0"));
2805 return 0;
2806 }
2807 break;
2808
2809 case AARCH64_OPND_SVE_I1_HALF_TWO:
2810 assert (opnd->imm.is_fp);
2811 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2812 {
2813 set_other_error (mismatch_detail, idx,
2814 _("floating-point value must be 0.5 or 2.0"));
2815 return 0;
2816 }
2817 break;
2818
2819 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2820 assert (opnd->imm.is_fp);
2821 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2822 {
2823 set_other_error (mismatch_detail, idx,
2824 _("floating-point value must be 0.0 or 1.0"));
2825 return 0;
2826 }
2827 break;
2828
2829 case AARCH64_OPND_SVE_INV_LIMM:
2830 {
2831 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2832 uint64_t uimm = ~opnd->imm.value;
2833 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2834 {
2835 set_other_error (mismatch_detail, idx,
2836 _("immediate out of range"));
2837 return 0;
2838 }
2839 }
2840 break;
2841
2842 case AARCH64_OPND_SVE_LIMM_MOV:
2843 {
2844 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2845 uint64_t uimm = opnd->imm.value;
2846 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2847 {
2848 set_other_error (mismatch_detail, idx,
2849 _("immediate out of range"));
2850 return 0;
2851 }
2852 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2853 {
2854 set_other_error (mismatch_detail, idx,
2855 _("invalid replicated MOV immediate"));
2856 return 0;
2857 }
2858 }
2859 break;
2860
2861 case AARCH64_OPND_SVE_PATTERN_SCALED:
2862 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2863 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2864 {
2865 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2866 return 0;
2867 }
2868 break;
2869
2870 case AARCH64_OPND_SVE_SHLIMM_PRED:
2871 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2872 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
2873 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2874 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2875 {
2876 set_imm_out_of_range_error (mismatch_detail, idx,
2877 0, 8 * size - 1);
2878 return 0;
2879 }
2880 break;
2881
2882 case AARCH64_OPND_SVE_SHRIMM_PRED:
2883 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2884 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
2885 num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
2886 size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
2887 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2888 {
2889 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
2890 return 0;
2891 }
2892 break;
2893
2894 case AARCH64_OPND_SME_ZT0_INDEX:
2895 if (!value_in_range_p (opnd->imm.value, 0, 56))
2896 {
2897 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, 56);
2898 return 0;
2899 }
2900 if (opnd->imm.value % 8 != 0)
2901 {
2902 set_other_error (mismatch_detail, idx,
2903 _("byte index must be a multiple of 8"));
2904 return 0;
2905 }
2906 break;
2907
2908 default:
2909 break;
2910 }
2911 break;
2912
2913 case AARCH64_OPND_CLASS_SYSTEM:
2914 switch (type)
2915 {
2916 case AARCH64_OPND_PSTATEFIELD:
2917 for (i = 0; aarch64_pstatefields[i].name; ++i)
2918 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2919 break;
2920 assert (aarch64_pstatefields[i].name);
2921 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2922 max_value = F_GET_REG_MAX_VALUE (aarch64_pstatefields[i].flags);
2923 if (opnds[1].imm.value < 0 || opnds[1].imm.value > max_value)
2924 {
2925 set_imm_out_of_range_error (mismatch_detail, 1, 0, max_value);
2926 return 0;
2927 }
2928 break;
2929 case AARCH64_OPND_PRFOP:
2930 if (opcode->iclass == ldst_regoff && opnd->prfop->value >= 24)
2931 {
2932 set_other_error (mismatch_detail, idx,
2933 _("the register-index form of PRFM does"
2934 " not accept opcodes in the range 24-31"));
2935 return 0;
2936 }
2937 break;
2938 default:
2939 break;
2940 }
2941 break;
2942
2943 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2944 /* Get the upper bound for the element index. */
2945 if (opcode->op == OP_FCMLA_ELEM)
2946 /* FCMLA index range depends on the vector size of other operands
2947 and is halfed because complex numbers take two elements. */
2948 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2949 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2950 else
2951 num = 16;
2952 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2953 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2954
2955 /* Index out-of-range. */
2956 if (!value_in_range_p (opnd->reglane.index, 0, num))
2957 {
2958 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2959 return 0;
2960 }
2961 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2962 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2963 number is encoded in "size:M:Rm":
2964 size <Vm>
2965 00 RESERVED
2966 01 0:Rm
2967 10 M:Rm
2968 11 RESERVED */
2969 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2970 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2971 {
2972 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2973 return 0;
2974 }
2975 break;
2976
2977 case AARCH64_OPND_CLASS_MODIFIED_REG:
2978 assert (idx == 1 || idx == 2);
2979 switch (type)
2980 {
2981 case AARCH64_OPND_Rm_EXT:
2982 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2983 && opnd->shifter.kind != AARCH64_MOD_LSL)
2984 {
2985 set_other_error (mismatch_detail, idx,
2986 _("extend operator expected"));
2987 return 0;
2988 }
2989 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2990 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2991 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2992 case. */
2993 if (!aarch64_stack_pointer_p (opnds + 0)
2994 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2995 {
2996 if (!opnd->shifter.operator_present)
2997 {
2998 set_other_error (mismatch_detail, idx,
2999 _("missing extend operator"));
3000 return 0;
3001 }
3002 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
3003 {
3004 set_other_error (mismatch_detail, idx,
3005 _("'LSL' operator not allowed"));
3006 return 0;
3007 }
3008 }
3009 assert (opnd->shifter.operator_present /* Default to LSL. */
3010 || opnd->shifter.kind == AARCH64_MOD_LSL);
3011 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
3012 {
3013 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
3014 return 0;
3015 }
3016 /* In the 64-bit form, the final register operand is written as Wm
3017 for all but the (possibly omitted) UXTX/LSL and SXTX
3018 operators.
3019 N.B. GAS allows X register to be used with any operator as a
3020 programming convenience. */
3021 if (qualifier == AARCH64_OPND_QLF_X
3022 && opnd->shifter.kind != AARCH64_MOD_LSL
3023 && opnd->shifter.kind != AARCH64_MOD_UXTX
3024 && opnd->shifter.kind != AARCH64_MOD_SXTX)
3025 {
3026 set_other_error (mismatch_detail, idx, _("W register expected"));
3027 return 0;
3028 }
3029 break;
3030
3031 case AARCH64_OPND_Rm_SFT:
3032 /* ROR is not available to the shifted register operand in
3033 arithmetic instructions. */
3034 if (!aarch64_shift_operator_p (opnd->shifter.kind))
3035 {
3036 set_other_error (mismatch_detail, idx,
3037 _("shift operator expected"));
3038 return 0;
3039 }
3040 if (opnd->shifter.kind == AARCH64_MOD_ROR
3041 && opcode->iclass != log_shift)
3042 {
3043 set_other_error (mismatch_detail, idx,
3044 _("'ROR' operator not allowed"));
3045 return 0;
3046 }
3047 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
3048 if (!value_in_range_p (opnd->shifter.amount, 0, num))
3049 {
3050 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
3051 return 0;
3052 }
3053 break;
3054
3055 default:
3056 break;
3057 }
3058 break;
3059
3060 default:
3061 break;
3062 }
3063
3064 return 1;
3065 }
3066
3067 /* Main entrypoint for the operand constraint checking.
3068
3069 Return 1 if operands of *INST meet the constraint applied by the operand
3070 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
3071 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
3072 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
3073 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
3074 error kind when it is notified that an instruction does not pass the check).
3075
3076 Un-determined operand qualifiers may get established during the process. */
3077
3078 int
3079 aarch64_match_operands_constraint (aarch64_inst *inst,
3080 aarch64_operand_error *mismatch_detail)
3081 {
3082 int i;
3083
3084 DEBUG_TRACE ("enter");
3085
3086 i = inst->opcode->tied_operand;
3087
3088 if (i > 0)
3089 {
3090 /* Check for tied_operands with specific opcode iclass. */
3091 switch (inst->opcode->iclass)
3092 {
3093 /* For SME LDR and STR instructions #imm must have the same numerical
3094 value for both operands.
3095 */
3096 case sme_ldr:
3097 case sme_str:
3098 assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array_off4);
3099 assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL);
3100 if (inst->operands[0].indexed_za.index.imm
3101 != inst->operands[1].addr.offset.imm)
3102 {
3103 if (mismatch_detail)
3104 {
3105 mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS;
3106 mismatch_detail->index = i;
3107 }
3108 return 0;
3109 }
3110 break;
3111
3112 default:
3113 {
3114 /* Check for cases where a source register needs to be the
3115 same as the destination register. Do this before
3116 matching qualifiers since if an instruction has both
3117 invalid tying and invalid qualifiers, the error about
3118 qualifiers would suggest several alternative instructions
3119 that also have invalid tying. */
3120 enum aarch64_operand_class op_class1
3121 = aarch64_get_operand_class (inst->operands[0].type);
3122 enum aarch64_operand_class op_class2
3123 = aarch64_get_operand_class (inst->operands[i].type);
3124 assert (op_class1 == op_class2);
3125 if (op_class1 == AARCH64_OPND_CLASS_SVE_REGLIST
3126 ? ((inst->operands[0].reglist.first_regno
3127 != inst->operands[i].reglist.first_regno)
3128 || (inst->operands[0].reglist.num_regs
3129 != inst->operands[i].reglist.num_regs)
3130 || (inst->operands[0].reglist.stride
3131 != inst->operands[i].reglist.stride))
3132 : (inst->operands[0].reg.regno
3133 != inst->operands[i].reg.regno))
3134 {
3135 if (mismatch_detail)
3136 {
3137 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
3138 mismatch_detail->index = i;
3139 mismatch_detail->error = NULL;
3140 }
3141 return 0;
3142 }
3143 break;
3144 }
3145 }
3146 }
3147
3148 /* Match operands' qualifier.
3149 *INST has already had qualifier establish for some, if not all, of
3150 its operands; we need to find out whether these established
3151 qualifiers match one of the qualifier sequence in
3152 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
3153 with the corresponding qualifier in such a sequence.
3154 Only basic operand constraint checking is done here; the more thorough
3155 constraint checking will carried out by operand_general_constraint_met_p,
3156 which has be to called after this in order to get all of the operands'
3157 qualifiers established. */
3158 int invalid_count;
3159 if (match_operands_qualifier (inst, true /* update_p */,
3160 &invalid_count) == 0)
3161 {
3162 DEBUG_TRACE ("FAIL on operand qualifier matching");
3163 if (mismatch_detail)
3164 {
3165 /* Return an error type to indicate that it is the qualifier
3166 matching failure; we don't care about which operand as there
3167 are enough information in the opcode table to reproduce it. */
3168 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
3169 mismatch_detail->index = -1;
3170 mismatch_detail->error = NULL;
3171 mismatch_detail->data[0].i = invalid_count;
3172 }
3173 return 0;
3174 }
3175
3176 /* Match operands' constraint. */
3177 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3178 {
3179 enum aarch64_opnd type = inst->opcode->operands[i];
3180 if (type == AARCH64_OPND_NIL)
3181 break;
3182 if (inst->operands[i].skip)
3183 {
3184 DEBUG_TRACE ("skip the incomplete operand %d", i);
3185 continue;
3186 }
3187 if (operand_general_constraint_met_p (inst->operands, i, type,
3188 inst->opcode, mismatch_detail) == 0)
3189 {
3190 DEBUG_TRACE ("FAIL on operand %d", i);
3191 return 0;
3192 }
3193 }
3194
3195 DEBUG_TRACE ("PASS");
3196
3197 return 1;
3198 }
3199
3200 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
3201 Also updates the TYPE of each INST->OPERANDS with the corresponding
3202 value of OPCODE->OPERANDS.
3203
3204 Note that some operand qualifiers may need to be manually cleared by
3205 the caller before it further calls the aarch64_opcode_encode; by
3206 doing this, it helps the qualifier matching facilities work
3207 properly. */
3208
3209 const aarch64_opcode*
3210 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
3211 {
3212 int i;
3213 const aarch64_opcode *old = inst->opcode;
3214
3215 inst->opcode = opcode;
3216
3217 /* Update the operand types. */
3218 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3219 {
3220 inst->operands[i].type = opcode->operands[i];
3221 if (opcode->operands[i] == AARCH64_OPND_NIL)
3222 break;
3223 }
3224
3225 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
3226
3227 return old;
3228 }
3229
3230 int
3231 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
3232 {
3233 int i;
3234 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3235 if (operands[i] == operand)
3236 return i;
3237 else if (operands[i] == AARCH64_OPND_NIL)
3238 break;
3239 return -1;
3240 }
3241 \f
3242 /* R0...R30, followed by FOR31. */
3243 #define BANK(R, FOR31) \
3244 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
3245 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
3246 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
3247 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
3248 /* [0][0] 32-bit integer regs with sp Wn
3249 [0][1] 64-bit integer regs with sp Xn sf=1
3250 [1][0] 32-bit integer regs with #0 Wn
3251 [1][1] 64-bit integer regs with #0 Xn sf=1 */
3252 static const char *int_reg[2][2][32] = {
3253 #define R32(X) "w" #X
3254 #define R64(X) "x" #X
3255 { BANK (R32, "wsp"), BANK (R64, "sp") },
3256 { BANK (R32, "wzr"), BANK (R64, "xzr") }
3257 #undef R64
3258 #undef R32
3259 };
3260
3261 /* Names of the SVE vector registers, first with .S suffixes,
3262 then with .D suffixes. */
3263
3264 static const char *sve_reg[2][32] = {
3265 #define ZS(X) "z" #X ".s"
3266 #define ZD(X) "z" #X ".d"
3267 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
3268 #undef ZD
3269 #undef ZS
3270 };
3271 #undef BANK
3272
3273 /* Return the integer register name.
3274 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
3275
3276 static inline const char *
3277 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
3278 {
3279 const int has_zr = sp_reg_p ? 0 : 1;
3280 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
3281 return int_reg[has_zr][is_64][regno];
3282 }
3283
3284 /* Like get_int_reg_name, but IS_64 is always 1. */
3285
3286 static inline const char *
3287 get_64bit_int_reg_name (int regno, int sp_reg_p)
3288 {
3289 const int has_zr = sp_reg_p ? 0 : 1;
3290 return int_reg[has_zr][1][regno];
3291 }
3292
3293 /* Get the name of the integer offset register in OPND, using the shift type
3294 to decide whether it's a word or doubleword. */
3295
3296 static inline const char *
3297 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
3298 {
3299 switch (opnd->shifter.kind)
3300 {
3301 case AARCH64_MOD_UXTW:
3302 case AARCH64_MOD_SXTW:
3303 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
3304
3305 case AARCH64_MOD_LSL:
3306 case AARCH64_MOD_SXTX:
3307 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
3308
3309 default:
3310 abort ();
3311 }
3312 }
3313
3314 /* Get the name of the SVE vector offset register in OPND, using the operand
3315 qualifier to decide whether the suffix should be .S or .D. */
3316
3317 static inline const char *
3318 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
3319 {
3320 assert (qualifier == AARCH64_OPND_QLF_S_S
3321 || qualifier == AARCH64_OPND_QLF_S_D);
3322 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
3323 }
3324
3325 /* Types for expanding an encoded 8-bit value to a floating-point value. */
3326
3327 typedef union
3328 {
3329 uint64_t i;
3330 double d;
3331 } double_conv_t;
3332
3333 typedef union
3334 {
3335 uint32_t i;
3336 float f;
3337 } single_conv_t;
3338
3339 typedef union
3340 {
3341 uint32_t i;
3342 float f;
3343 } half_conv_t;
3344
3345 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
3346 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
3347 (depending on the type of the instruction). IMM8 will be expanded to a
3348 single-precision floating-point value (SIZE == 4) or a double-precision
3349 floating-point value (SIZE == 8). A half-precision floating-point value
3350 (SIZE == 2) is expanded to a single-precision floating-point value. The
3351 expanded value is returned. */
3352
3353 static uint64_t
3354 expand_fp_imm (int size, uint32_t imm8)
3355 {
3356 uint64_t imm = 0;
3357 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
3358
3359 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
3360 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
3361 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
3362 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
3363 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
3364 if (size == 8)
3365 {
3366 imm = (imm8_7 << (63-32)) /* imm8<7> */
3367 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
3368 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
3369 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
3370 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
3371 imm <<= 32;
3372 }
3373 else if (size == 4 || size == 2)
3374 {
3375 imm = (imm8_7 << 31) /* imm8<7> */
3376 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
3377 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
3378 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
3379 }
3380 else
3381 {
3382 /* An unsupported size. */
3383 assert (0);
3384 }
3385
3386 return imm;
3387 }
3388
3389 /* Return a string based on FMT with the register style applied. */
3390
3391 static const char *
3392 style_reg (struct aarch64_styler *styler, const char *fmt, ...)
3393 {
3394 const char *txt;
3395 va_list ap;
3396
3397 va_start (ap, fmt);
3398 txt = styler->apply_style (styler, dis_style_register, fmt, ap);
3399 va_end (ap);
3400
3401 return txt;
3402 }
3403
3404 /* Return a string based on FMT with the immediate style applied. */
3405
3406 static const char *
3407 style_imm (struct aarch64_styler *styler, const char *fmt, ...)
3408 {
3409 const char *txt;
3410 va_list ap;
3411
3412 va_start (ap, fmt);
3413 txt = styler->apply_style (styler, dis_style_immediate, fmt, ap);
3414 va_end (ap);
3415
3416 return txt;
3417 }
3418
3419 /* Return a string based on FMT with the sub-mnemonic style applied. */
3420
3421 static const char *
3422 style_sub_mnem (struct aarch64_styler *styler, const char *fmt, ...)
3423 {
3424 const char *txt;
3425 va_list ap;
3426
3427 va_start (ap, fmt);
3428 txt = styler->apply_style (styler, dis_style_sub_mnemonic, fmt, ap);
3429 va_end (ap);
3430
3431 return txt;
3432 }
3433
3434 /* Return a string based on FMT with the address style applied. */
3435
3436 static const char *
3437 style_addr (struct aarch64_styler *styler, const char *fmt, ...)
3438 {
3439 const char *txt;
3440 va_list ap;
3441
3442 va_start (ap, fmt);
3443 txt = styler->apply_style (styler, dis_style_address, fmt, ap);
3444 va_end (ap);
3445
3446 return txt;
3447 }
3448
3449 /* Produce the string representation of the register list operand *OPND
3450 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3451 the register name that comes before the register number, such as "v". */
3452 static void
3453 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3454 const char *prefix, struct aarch64_styler *styler)
3455 {
3456 const int mask = (prefix[0] == 'p' ? 15 : 31);
3457 const int num_regs = opnd->reglist.num_regs;
3458 const int stride = opnd->reglist.stride;
3459 const int first_reg = opnd->reglist.first_regno;
3460 const int last_reg = (first_reg + (num_regs - 1) * stride) & mask;
3461 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3462 char tb[16]; /* Temporary buffer. */
3463
3464 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3465 assert (num_regs >= 1 && num_regs <= 4);
3466
3467 /* Prepare the index if any. */
3468 if (opnd->reglist.has_index)
3469 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3470 snprintf (tb, sizeof (tb), "[%s]",
3471 style_imm (styler, "%" PRIi64, (opnd->reglist.index % 100)));
3472 else
3473 tb[0] = '\0';
3474
3475 /* The hyphenated form is preferred for disassembly if there are
3476 more than two registers in the list, and the register numbers
3477 are monotonically increasing in increments of one. */
3478 if (stride == 1 && num_regs > 1)
3479 snprintf (buf, size, "{%s-%s}%s",
3480 style_reg (styler, "%s%d.%s", prefix, first_reg, qlf_name),
3481 style_reg (styler, "%s%d.%s", prefix, last_reg, qlf_name), tb);
3482 else
3483 {
3484 const int reg0 = first_reg;
3485 const int reg1 = (first_reg + stride) & mask;
3486 const int reg2 = (first_reg + stride * 2) & mask;
3487 const int reg3 = (first_reg + stride * 3) & mask;
3488
3489 switch (num_regs)
3490 {
3491 case 1:
3492 snprintf (buf, size, "{%s}%s",
3493 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3494 tb);
3495 break;
3496 case 2:
3497 snprintf (buf, size, "{%s, %s}%s",
3498 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3499 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3500 tb);
3501 break;
3502 case 3:
3503 snprintf (buf, size, "{%s, %s, %s}%s",
3504 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3505 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3506 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3507 tb);
3508 break;
3509 case 4:
3510 snprintf (buf, size, "{%s, %s, %s, %s}%s",
3511 style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3512 style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3513 style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3514 style_reg (styler, "%s%d.%s", prefix, reg3, qlf_name),
3515 tb);
3516 break;
3517 }
3518 }
3519 }
3520
3521 /* Print the register+immediate address in OPND to BUF, which has SIZE
3522 characters. BASE is the name of the base register. */
3523
3524 static void
3525 print_immediate_offset_address (char *buf, size_t size,
3526 const aarch64_opnd_info *opnd,
3527 const char *base,
3528 struct aarch64_styler *styler)
3529 {
3530 if (opnd->addr.writeback)
3531 {
3532 if (opnd->addr.preind)
3533 {
3534 if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3535 snprintf (buf, size, "[%s]!", style_reg (styler, base));
3536 else
3537 snprintf (buf, size, "[%s, %s]!",
3538 style_reg (styler, base),
3539 style_imm (styler, "#%d", opnd->addr.offset.imm));
3540 }
3541 else
3542 snprintf (buf, size, "[%s], %s",
3543 style_reg (styler, base),
3544 style_imm (styler, "#%d", opnd->addr.offset.imm));
3545 }
3546 else
3547 {
3548 if (opnd->shifter.operator_present)
3549 {
3550 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3551 snprintf (buf, size, "[%s, %s, %s]",
3552 style_reg (styler, base),
3553 style_imm (styler, "#%d", opnd->addr.offset.imm),
3554 style_sub_mnem (styler, "mul vl"));
3555 }
3556 else if (opnd->addr.offset.imm)
3557 snprintf (buf, size, "[%s, %s]",
3558 style_reg (styler, base),
3559 style_imm (styler, "#%d", opnd->addr.offset.imm));
3560 else
3561 snprintf (buf, size, "[%s]", style_reg (styler, base));
3562 }
3563 }
3564
3565 /* Produce the string representation of the register offset address operand
3566 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3567 the names of the base and offset registers. */
3568 static void
3569 print_register_offset_address (char *buf, size_t size,
3570 const aarch64_opnd_info *opnd,
3571 const char *base, const char *offset,
3572 struct aarch64_styler *styler)
3573 {
3574 char tb[32]; /* Temporary buffer. */
3575 bool print_extend_p = true;
3576 bool print_amount_p = true;
3577 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3578
3579 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3580 || !opnd->shifter.amount_present))
3581 {
3582 /* Not print the shift/extend amount when the amount is zero and
3583 when it is not the special case of 8-bit load/store instruction. */
3584 print_amount_p = false;
3585 /* Likewise, no need to print the shift operator LSL in such a
3586 situation. */
3587 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3588 print_extend_p = false;
3589 }
3590
3591 /* Prepare for the extend/shift. */
3592 if (print_extend_p)
3593 {
3594 if (print_amount_p)
3595 snprintf (tb, sizeof (tb), ", %s %s",
3596 style_sub_mnem (styler, shift_name),
3597 style_imm (styler, "#%" PRIi64,
3598 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3599 (opnd->shifter.amount % 100)));
3600 else
3601 snprintf (tb, sizeof (tb), ", %s",
3602 style_sub_mnem (styler, shift_name));
3603 }
3604 else
3605 tb[0] = '\0';
3606
3607 snprintf (buf, size, "[%s, %s%s]", style_reg (styler, base),
3608 style_reg (styler, offset), tb);
3609 }
3610
3611 /* Print ZA tiles from imm8 in ZERO instruction.
3612
3613 The preferred disassembly of this instruction uses the shortest list of tile
3614 names that represent the encoded immediate mask.
3615
3616 For example:
3617 * An all-ones immediate is disassembled as {ZA}.
3618 * An all-zeros immediate is disassembled as an empty list { }.
3619 */
3620 static void
3621 print_sme_za_list (char *buf, size_t size, int mask,
3622 struct aarch64_styler *styler)
3623 {
3624 const char* zan[] = { "za", "za0.h", "za1.h", "za0.s",
3625 "za1.s", "za2.s", "za3.s", "za0.d",
3626 "za1.d", "za2.d", "za3.d", "za4.d",
3627 "za5.d", "za6.d", "za7.d", " " };
3628 const int zan_v[] = { 0xff, 0x55, 0xaa, 0x11,
3629 0x22, 0x44, 0x88, 0x01,
3630 0x02, 0x04, 0x08, 0x10,
3631 0x20, 0x40, 0x80, 0x00 };
3632 int i, k;
3633 const int ZAN_SIZE = sizeof(zan) / sizeof(zan[0]);
3634
3635 k = snprintf (buf, size, "{");
3636 for (i = 0; i < ZAN_SIZE; i++)
3637 {
3638 if ((mask & zan_v[i]) == zan_v[i])
3639 {
3640 mask &= ~zan_v[i];
3641 if (k > 1)
3642 k += snprintf (buf + k, size - k, ", ");
3643
3644 k += snprintf (buf + k, size - k, "%s", style_reg (styler, zan[i]));
3645 }
3646 if (mask == 0)
3647 break;
3648 }
3649 snprintf (buf + k, size - k, "}");
3650 }
3651
3652 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3653 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3654 PC, PCREL_P and ADDRESS are used to pass in and return information about
3655 the PC-relative address calculation, where the PC value is passed in
3656 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3657 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3658 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3659
3660 The function serves both the disassembler and the assembler diagnostics
3661 issuer, which is the reason why it lives in this file. */
3662
3663 void
3664 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3665 const aarch64_opcode *opcode,
3666 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3667 bfd_vma *address, char** notes,
3668 char *comment, size_t comment_size,
3669 aarch64_feature_set features,
3670 struct aarch64_styler *styler)
3671 {
3672 unsigned int i, num_conds;
3673 const char *name = NULL;
3674 const aarch64_opnd_info *opnd = opnds + idx;
3675 enum aarch64_modifier_kind kind;
3676 uint64_t addr, enum_value;
3677
3678 if (comment != NULL)
3679 {
3680 assert (comment_size > 0);
3681 comment[0] = '\0';
3682 }
3683 else
3684 assert (comment_size == 0);
3685
3686 buf[0] = '\0';
3687 if (pcrel_p)
3688 *pcrel_p = 0;
3689
3690 switch (opnd->type)
3691 {
3692 case AARCH64_OPND_Rd:
3693 case AARCH64_OPND_Rn:
3694 case AARCH64_OPND_Rm:
3695 case AARCH64_OPND_Rt:
3696 case AARCH64_OPND_Rt2:
3697 case AARCH64_OPND_Rs:
3698 case AARCH64_OPND_Ra:
3699 case AARCH64_OPND_Rt_LS64:
3700 case AARCH64_OPND_Rt_SYS:
3701 case AARCH64_OPND_PAIRREG:
3702 case AARCH64_OPND_SVE_Rm:
3703 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3704 the <ic_op>, therefore we use opnd->present to override the
3705 generic optional-ness information. */
3706 if (opnd->type == AARCH64_OPND_Rt_SYS)
3707 {
3708 if (!opnd->present)
3709 break;
3710 }
3711 /* Omit the operand, e.g. RET. */
3712 else if (optional_operand_p (opcode, idx)
3713 && (opnd->reg.regno
3714 == get_optional_operand_default_value (opcode)))
3715 break;
3716 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3717 || opnd->qualifier == AARCH64_OPND_QLF_X);
3718 snprintf (buf, size, "%s",
3719 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3720 opnd->qualifier, 0)));
3721 break;
3722
3723 case AARCH64_OPND_Rd_SP:
3724 case AARCH64_OPND_Rn_SP:
3725 case AARCH64_OPND_Rt_SP:
3726 case AARCH64_OPND_SVE_Rn_SP:
3727 case AARCH64_OPND_Rm_SP:
3728 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3729 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3730 || opnd->qualifier == AARCH64_OPND_QLF_X
3731 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3732 snprintf (buf, size, "%s",
3733 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3734 opnd->qualifier, 1)));
3735 break;
3736
3737 case AARCH64_OPND_Rm_EXT:
3738 kind = opnd->shifter.kind;
3739 assert (idx == 1 || idx == 2);
3740 if ((aarch64_stack_pointer_p (opnds)
3741 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3742 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3743 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3744 && kind == AARCH64_MOD_UXTW)
3745 || (opnd->qualifier == AARCH64_OPND_QLF_X
3746 && kind == AARCH64_MOD_UXTX)))
3747 {
3748 /* 'LSL' is the preferred form in this case. */
3749 kind = AARCH64_MOD_LSL;
3750 if (opnd->shifter.amount == 0)
3751 {
3752 /* Shifter omitted. */
3753 snprintf (buf, size, "%s",
3754 style_reg (styler,
3755 get_int_reg_name (opnd->reg.regno,
3756 opnd->qualifier, 0)));
3757 break;
3758 }
3759 }
3760 if (opnd->shifter.amount)
3761 snprintf (buf, size, "%s, %s %s",
3762 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3763 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name),
3764 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3765 else
3766 snprintf (buf, size, "%s, %s",
3767 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3768 style_sub_mnem (styler, aarch64_operand_modifiers[kind].name));
3769 break;
3770
3771 case AARCH64_OPND_Rm_SFT:
3772 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3773 || opnd->qualifier == AARCH64_OPND_QLF_X);
3774 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3775 snprintf (buf, size, "%s",
3776 style_reg (styler, get_int_reg_name (opnd->reg.regno,
3777 opnd->qualifier, 0)));
3778 else
3779 snprintf (buf, size, "%s, %s %s",
3780 style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3781 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
3782 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3783 break;
3784
3785 case AARCH64_OPND_Fd:
3786 case AARCH64_OPND_Fn:
3787 case AARCH64_OPND_Fm:
3788 case AARCH64_OPND_Fa:
3789 case AARCH64_OPND_Ft:
3790 case AARCH64_OPND_Ft2:
3791 case AARCH64_OPND_Sd:
3792 case AARCH64_OPND_Sn:
3793 case AARCH64_OPND_Sm:
3794 case AARCH64_OPND_SVE_VZn:
3795 case AARCH64_OPND_SVE_Vd:
3796 case AARCH64_OPND_SVE_Vm:
3797 case AARCH64_OPND_SVE_Vn:
3798 snprintf (buf, size, "%s",
3799 style_reg (styler, "%s%d",
3800 aarch64_get_qualifier_name (opnd->qualifier),
3801 opnd->reg.regno));
3802 break;
3803
3804 case AARCH64_OPND_Va:
3805 case AARCH64_OPND_Vd:
3806 case AARCH64_OPND_Vn:
3807 case AARCH64_OPND_Vm:
3808 snprintf (buf, size, "%s",
3809 style_reg (styler, "v%d.%s", opnd->reg.regno,
3810 aarch64_get_qualifier_name (opnd->qualifier)));
3811 break;
3812
3813 case AARCH64_OPND_Ed:
3814 case AARCH64_OPND_En:
3815 case AARCH64_OPND_Em:
3816 case AARCH64_OPND_Em16:
3817 case AARCH64_OPND_SM3_IMM2:
3818 snprintf (buf, size, "%s[%s]",
3819 style_reg (styler, "v%d.%s", opnd->reglane.regno,
3820 aarch64_get_qualifier_name (opnd->qualifier)),
3821 style_imm (styler, "%" PRIi64, opnd->reglane.index));
3822 break;
3823
3824 case AARCH64_OPND_VdD1:
3825 case AARCH64_OPND_VnD1:
3826 snprintf (buf, size, "%s[%s]",
3827 style_reg (styler, "v%d.d", opnd->reg.regno),
3828 style_imm (styler, "1"));
3829 break;
3830
3831 case AARCH64_OPND_LVn:
3832 case AARCH64_OPND_LVt:
3833 case AARCH64_OPND_LVt_AL:
3834 case AARCH64_OPND_LEt:
3835 print_register_list (buf, size, opnd, "v", styler);
3836 break;
3837
3838 case AARCH64_OPND_SVE_Pd:
3839 case AARCH64_OPND_SVE_Pg3:
3840 case AARCH64_OPND_SVE_Pg4_5:
3841 case AARCH64_OPND_SVE_Pg4_10:
3842 case AARCH64_OPND_SVE_Pg4_16:
3843 case AARCH64_OPND_SVE_Pm:
3844 case AARCH64_OPND_SVE_Pn:
3845 case AARCH64_OPND_SVE_Pt:
3846 case AARCH64_OPND_SME_Pm:
3847 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3848 snprintf (buf, size, "%s",
3849 style_reg (styler, "p%d", opnd->reg.regno));
3850 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3851 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3852 snprintf (buf, size, "%s",
3853 style_reg (styler, "p%d/%s", opnd->reg.regno,
3854 aarch64_get_qualifier_name (opnd->qualifier)));
3855 else
3856 snprintf (buf, size, "%s",
3857 style_reg (styler, "p%d.%s", opnd->reg.regno,
3858 aarch64_get_qualifier_name (opnd->qualifier)));
3859 break;
3860
3861 case AARCH64_OPND_SVE_PNd:
3862 case AARCH64_OPND_SVE_PNg4_10:
3863 case AARCH64_OPND_SVE_PNn:
3864 case AARCH64_OPND_SVE_PNt:
3865 case AARCH64_OPND_SME_PNd3:
3866 case AARCH64_OPND_SME_PNg3:
3867 case AARCH64_OPND_SME_PNn:
3868 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3869 snprintf (buf, size, "%s",
3870 style_reg (styler, "pn%d", opnd->reg.regno));
3871 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3872 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3873 snprintf (buf, size, "%s",
3874 style_reg (styler, "pn%d/%s", opnd->reg.regno,
3875 aarch64_get_qualifier_name (opnd->qualifier)));
3876 else
3877 snprintf (buf, size, "%s",
3878 style_reg (styler, "pn%d.%s", opnd->reg.regno,
3879 aarch64_get_qualifier_name (opnd->qualifier)));
3880 break;
3881
3882 case AARCH64_OPND_SME_Pdx2:
3883 case AARCH64_OPND_SME_PdxN:
3884 print_register_list (buf, size, opnd, "p", styler);
3885 break;
3886
3887 case AARCH64_OPND_SME_PNn3_INDEX1:
3888 case AARCH64_OPND_SME_PNn3_INDEX2:
3889 snprintf (buf, size, "%s[%s]",
3890 style_reg (styler, "pn%d", opnd->reglane.regno),
3891 style_imm (styler, "%" PRIi64, opnd->reglane.index));
3892 break;
3893
3894 case AARCH64_OPND_SVE_Za_5:
3895 case AARCH64_OPND_SVE_Za_16:
3896 case AARCH64_OPND_SVE_Zd:
3897 case AARCH64_OPND_SVE_Zm_5:
3898 case AARCH64_OPND_SVE_Zm_16:
3899 case AARCH64_OPND_SVE_Zn:
3900 case AARCH64_OPND_SVE_Zt:
3901 case AARCH64_OPND_SME_Zm:
3902 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3903 snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno));
3904 else
3905 snprintf (buf, size, "%s",
3906 style_reg (styler, "z%d.%s", opnd->reg.regno,
3907 aarch64_get_qualifier_name (opnd->qualifier)));
3908 break;
3909
3910 case AARCH64_OPND_SVE_ZnxN:
3911 case AARCH64_OPND_SVE_ZtxN:
3912 case AARCH64_OPND_SME_Zdnx2:
3913 case AARCH64_OPND_SME_Zdnx4:
3914 case AARCH64_OPND_SME_Zmx2:
3915 case AARCH64_OPND_SME_Zmx4:
3916 case AARCH64_OPND_SME_Znx2:
3917 case AARCH64_OPND_SME_Znx4:
3918 case AARCH64_OPND_SME_Ztx2_STRIDED:
3919 case AARCH64_OPND_SME_Ztx4_STRIDED:
3920 print_register_list (buf, size, opnd, "z", styler);
3921 break;
3922
3923 case AARCH64_OPND_SVE_Zm3_INDEX:
3924 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3925 case AARCH64_OPND_SVE_Zm3_11_INDEX:
3926 case AARCH64_OPND_SVE_Zm4_11_INDEX:
3927 case AARCH64_OPND_SVE_Zm4_INDEX:
3928 case AARCH64_OPND_SVE_Zn_INDEX:
3929 case AARCH64_OPND_SME_Zn_INDEX1_16:
3930 case AARCH64_OPND_SME_Zn_INDEX2_15:
3931 case AARCH64_OPND_SME_Zn_INDEX2_16:
3932 case AARCH64_OPND_SME_Zn_INDEX3_14:
3933 case AARCH64_OPND_SME_Zn_INDEX3_15:
3934 case AARCH64_OPND_SME_Zn_INDEX4_14:
3935 snprintf (buf, size, "%s[%s]",
3936 (opnd->qualifier == AARCH64_OPND_QLF_NIL
3937 ? style_reg (styler, "z%d", opnd->reglane.regno)
3938 : style_reg (styler, "z%d.%s", opnd->reglane.regno,
3939 aarch64_get_qualifier_name (opnd->qualifier))),
3940 style_imm (styler, "%" PRIi64, opnd->reglane.index));
3941 break;
3942
3943 case AARCH64_OPND_SME_ZAda_2b:
3944 case AARCH64_OPND_SME_ZAda_3b:
3945 snprintf (buf, size, "%s",
3946 style_reg (styler, "za%d.%s", opnd->reg.regno,
3947 aarch64_get_qualifier_name (opnd->qualifier)));
3948 break;
3949
3950 case AARCH64_OPND_SME_ZA_HV_idx_src:
3951 case AARCH64_OPND_SME_ZA_HV_idx_srcxN:
3952 case AARCH64_OPND_SME_ZA_HV_idx_dest:
3953 case AARCH64_OPND_SME_ZA_HV_idx_destxN:
3954 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
3955 snprintf (buf, size, "%s%s[%s, %s%s%s%s%s]%s",
3956 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "",
3957 style_reg (styler, "za%d%c.%s",
3958 opnd->indexed_za.regno,
3959 opnd->indexed_za.v == 1 ? 'v' : 'h',
3960 aarch64_get_qualifier_name (opnd->qualifier)),
3961 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
3962 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
3963 opnd->indexed_za.index.countm1 ? ":" : "",
3964 (opnd->indexed_za.index.countm1
3965 ? style_imm (styler, "%d",
3966 opnd->indexed_za.index.imm
3967 + opnd->indexed_za.index.countm1)
3968 : ""),
3969 opnd->indexed_za.group_size ? ", " : "",
3970 opnd->indexed_za.group_size == 2
3971 ? style_sub_mnem (styler, "vgx2")
3972 : opnd->indexed_za.group_size == 4
3973 ? style_sub_mnem (styler, "vgx4") : "",
3974 opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : "");
3975 break;
3976
3977 case AARCH64_OPND_SME_list_of_64bit_tiles:
3978 print_sme_za_list (buf, size, opnd->reg.regno, styler);
3979 break;
3980
3981 case AARCH64_OPND_SME_ZA_array_off3_0:
3982 case AARCH64_OPND_SME_ZA_array_off3_5:
3983 case AARCH64_OPND_SME_ZA_array_off4:
3984 snprintf (buf, size, "%s[%s, %s%s%s%s%s]",
3985 style_reg (styler, "za%s%s",
3986 opnd->qualifier == AARCH64_OPND_QLF_NIL ? "" : ".",
3987 (opnd->qualifier == AARCH64_OPND_QLF_NIL
3988 ? ""
3989 : aarch64_get_qualifier_name (opnd->qualifier))),
3990 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
3991 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm),
3992 opnd->indexed_za.index.countm1 ? ":" : "",
3993 (opnd->indexed_za.index.countm1
3994 ? style_imm (styler, "%d",
3995 opnd->indexed_za.index.imm
3996 + opnd->indexed_za.index.countm1)
3997 : ""),
3998 opnd->indexed_za.group_size ? ", " : "",
3999 opnd->indexed_za.group_size == 2
4000 ? style_sub_mnem (styler, "vgx2")
4001 : opnd->indexed_za.group_size == 4
4002 ? style_sub_mnem (styler, "vgx4") : "");
4003 break;
4004
4005 case AARCH64_OPND_SME_SM_ZA:
4006 snprintf (buf, size, "%s",
4007 style_reg (styler, opnd->reg.regno == 's' ? "sm" : "za"));
4008 break;
4009
4010 case AARCH64_OPND_SME_PnT_Wm_imm:
4011 snprintf (buf, size, "%s[%s, %s]",
4012 style_reg (styler, "p%d.%s", opnd->indexed_za.regno,
4013 aarch64_get_qualifier_name (opnd->qualifier)),
4014 style_reg (styler, "w%d", opnd->indexed_za.index.regno),
4015 style_imm (styler, "%" PRIi64, opnd->indexed_za.index.imm));
4016 break;
4017
4018 case AARCH64_OPND_SME_VLxN_10:
4019 case AARCH64_OPND_SME_VLxN_13:
4020 enum_value = opnd->imm.value;
4021 assert (enum_value < ARRAY_SIZE (aarch64_sme_vlxn_array));
4022 snprintf (buf, size, "%s",
4023 style_sub_mnem (styler, aarch64_sme_vlxn_array[enum_value]));
4024 break;
4025
4026 case AARCH64_OPND_CRn:
4027 case AARCH64_OPND_CRm:
4028 snprintf (buf, size, "%s",
4029 style_reg (styler, "C%" PRIi64, opnd->imm.value));
4030 break;
4031
4032 case AARCH64_OPND_IDX:
4033 case AARCH64_OPND_MASK:
4034 case AARCH64_OPND_IMM:
4035 case AARCH64_OPND_IMM_2:
4036 case AARCH64_OPND_WIDTH:
4037 case AARCH64_OPND_UIMM3_OP1:
4038 case AARCH64_OPND_UIMM3_OP2:
4039 case AARCH64_OPND_BIT_NUM:
4040 case AARCH64_OPND_IMM_VLSL:
4041 case AARCH64_OPND_IMM_VLSR:
4042 case AARCH64_OPND_SHLL_IMM:
4043 case AARCH64_OPND_IMM0:
4044 case AARCH64_OPND_IMMR:
4045 case AARCH64_OPND_IMMS:
4046 case AARCH64_OPND_UNDEFINED:
4047 case AARCH64_OPND_FBITS:
4048 case AARCH64_OPND_TME_UIMM16:
4049 case AARCH64_OPND_SIMM5:
4050 case AARCH64_OPND_SVE_SHLIMM_PRED:
4051 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
4052 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
4053 case AARCH64_OPND_SVE_SHRIMM_PRED:
4054 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
4055 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
4056 case AARCH64_OPND_SVE_SIMM5:
4057 case AARCH64_OPND_SVE_SIMM5B:
4058 case AARCH64_OPND_SVE_SIMM6:
4059 case AARCH64_OPND_SVE_SIMM8:
4060 case AARCH64_OPND_SVE_UIMM3:
4061 case AARCH64_OPND_SVE_UIMM7:
4062 case AARCH64_OPND_SVE_UIMM8:
4063 case AARCH64_OPND_SVE_UIMM8_53:
4064 case AARCH64_OPND_IMM_ROT1:
4065 case AARCH64_OPND_IMM_ROT2:
4066 case AARCH64_OPND_IMM_ROT3:
4067 case AARCH64_OPND_SVE_IMM_ROT1:
4068 case AARCH64_OPND_SVE_IMM_ROT2:
4069 case AARCH64_OPND_SVE_IMM_ROT3:
4070 case AARCH64_OPND_CSSC_SIMM8:
4071 case AARCH64_OPND_CSSC_UIMM8:
4072 snprintf (buf, size, "%s",
4073 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4074 break;
4075
4076 case AARCH64_OPND_SVE_I1_HALF_ONE:
4077 case AARCH64_OPND_SVE_I1_HALF_TWO:
4078 case AARCH64_OPND_SVE_I1_ZERO_ONE:
4079 {
4080 single_conv_t c;
4081 c.i = opnd->imm.value;
4082 snprintf (buf, size, "%s", style_imm (styler, "#%.1f", c.f));
4083 break;
4084 }
4085
4086 case AARCH64_OPND_SVE_PATTERN:
4087 if (optional_operand_p (opcode, idx)
4088 && opnd->imm.value == get_optional_operand_default_value (opcode))
4089 break;
4090 enum_value = opnd->imm.value;
4091 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
4092 if (aarch64_sve_pattern_array[enum_value])
4093 snprintf (buf, size, "%s",
4094 style_reg (styler, aarch64_sve_pattern_array[enum_value]));
4095 else
4096 snprintf (buf, size, "%s",
4097 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4098 break;
4099
4100 case AARCH64_OPND_SVE_PATTERN_SCALED:
4101 if (optional_operand_p (opcode, idx)
4102 && !opnd->shifter.operator_present
4103 && opnd->imm.value == get_optional_operand_default_value (opcode))
4104 break;
4105 enum_value = opnd->imm.value;
4106 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
4107 if (aarch64_sve_pattern_array[opnd->imm.value])
4108 snprintf (buf, size, "%s",
4109 style_reg (styler,
4110 aarch64_sve_pattern_array[opnd->imm.value]));
4111 else
4112 snprintf (buf, size, "%s",
4113 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4114 if (opnd->shifter.operator_present)
4115 {
4116 size_t len = strlen (buf);
4117 const char *shift_name
4118 = aarch64_operand_modifiers[opnd->shifter.kind].name;
4119 snprintf (buf + len, size - len, ", %s %s",
4120 style_sub_mnem (styler, shift_name),
4121 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4122 }
4123 break;
4124
4125 case AARCH64_OPND_SVE_PRFOP:
4126 enum_value = opnd->imm.value;
4127 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
4128 if (aarch64_sve_prfop_array[enum_value])
4129 snprintf (buf, size, "%s",
4130 style_reg (styler, aarch64_sve_prfop_array[enum_value]));
4131 else
4132 snprintf (buf, size, "%s",
4133 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4134 break;
4135
4136 case AARCH64_OPND_IMM_MOV:
4137 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
4138 {
4139 case 4: /* e.g. MOV Wd, #<imm32>. */
4140 {
4141 int imm32 = opnd->imm.value;
4142 snprintf (buf, size, "%s",
4143 style_imm (styler, "#0x%-20x", imm32));
4144 snprintf (comment, comment_size, "#%d", imm32);
4145 }
4146 break;
4147 case 8: /* e.g. MOV Xd, #<imm64>. */
4148 snprintf (buf, size, "%s", style_imm (styler, "#0x%-20" PRIx64,
4149 opnd->imm.value));
4150 snprintf (comment, comment_size, "#%" PRIi64, opnd->imm.value);
4151 break;
4152 default:
4153 snprintf (buf, size, "<invalid>");
4154 break;
4155 }
4156 break;
4157
4158 case AARCH64_OPND_FPIMM0:
4159 snprintf (buf, size, "%s", style_imm (styler, "#0.0"));
4160 break;
4161
4162 case AARCH64_OPND_LIMM:
4163 case AARCH64_OPND_AIMM:
4164 case AARCH64_OPND_HALF:
4165 case AARCH64_OPND_SVE_INV_LIMM:
4166 case AARCH64_OPND_SVE_LIMM:
4167 case AARCH64_OPND_SVE_LIMM_MOV:
4168 if (opnd->shifter.amount)
4169 snprintf (buf, size, "%s, %s %s",
4170 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
4171 style_sub_mnem (styler, "lsl"),
4172 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4173 else
4174 snprintf (buf, size, "%s",
4175 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
4176 break;
4177
4178 case AARCH64_OPND_SIMD_IMM:
4179 case AARCH64_OPND_SIMD_IMM_SFT:
4180 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
4181 || opnd->shifter.kind == AARCH64_MOD_NONE)
4182 snprintf (buf, size, "%s",
4183 style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
4184 else
4185 snprintf (buf, size, "%s, %s %s",
4186 style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
4187 style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
4188 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4189 break;
4190
4191 case AARCH64_OPND_SVE_AIMM:
4192 case AARCH64_OPND_SVE_ASIMM:
4193 if (opnd->shifter.amount)
4194 snprintf (buf, size, "%s, %s %s",
4195 style_imm (styler, "#%" PRIi64, opnd->imm.value),
4196 style_sub_mnem (styler, "lsl"),
4197 style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
4198 else
4199 snprintf (buf, size, "%s",
4200 style_imm (styler, "#%" PRIi64, opnd->imm.value));
4201 break;
4202
4203 case AARCH64_OPND_FPIMM:
4204 case AARCH64_OPND_SIMD_FPIMM:
4205 case AARCH64_OPND_SVE_FPIMM8:
4206 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
4207 {
4208 case 2: /* e.g. FMOV <Hd>, #<imm>. */
4209 {
4210 half_conv_t c;
4211 c.i = expand_fp_imm (2, opnd->imm.value);
4212 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
4213 }
4214 break;
4215 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
4216 {
4217 single_conv_t c;
4218 c.i = expand_fp_imm (4, opnd->imm.value);
4219 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
4220 }
4221 break;
4222 case 8: /* e.g. FMOV <Sd>, #<imm>. */
4223 {
4224 double_conv_t c;
4225 c.i = expand_fp_imm (8, opnd->imm.value);
4226 snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.d));
4227 }
4228 break;
4229 default:
4230 snprintf (buf, size, "<invalid>");
4231 break;
4232 }
4233 break;
4234
4235 case AARCH64_OPND_CCMP_IMM:
4236 case AARCH64_OPND_NZCV:
4237 case AARCH64_OPND_EXCEPTION:
4238 case AARCH64_OPND_UIMM4:
4239 case AARCH64_OPND_UIMM4_ADDG:
4240 case AARCH64_OPND_UIMM7:
4241 case AARCH64_OPND_UIMM10:
4242 if (optional_operand_p (opcode, idx)
4243 && (opnd->imm.value ==
4244 (int64_t) get_optional_operand_default_value (opcode)))
4245 /* Omit the operand, e.g. DCPS1. */
4246 break;
4247 snprintf (buf, size, "%s",
4248 style_imm (styler, "#0x%x", (unsigned int) opnd->imm.value));
4249 break;
4250
4251 case AARCH64_OPND_COND:
4252 case AARCH64_OPND_COND1:
4253 snprintf (buf, size, "%s",
4254 style_sub_mnem (styler, opnd->cond->names[0]));
4255 num_conds = ARRAY_SIZE (opnd->cond->names);
4256 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
4257 {
4258 size_t len = comment != NULL ? strlen (comment) : 0;
4259 if (i == 1)
4260 snprintf (comment + len, comment_size - len, "%s = %s",
4261 opnd->cond->names[0], opnd->cond->names[i]);
4262 else
4263 snprintf (comment + len, comment_size - len, ", %s",
4264 opnd->cond->names[i]);
4265 }
4266 break;
4267
4268 case AARCH64_OPND_ADDR_ADRP:
4269 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
4270 + opnd->imm.value;
4271 if (pcrel_p)
4272 *pcrel_p = 1;
4273 if (address)
4274 *address = addr;
4275 /* This is not necessary during the disassembling, as print_address_func
4276 in the disassemble_info will take care of the printing. But some
4277 other callers may be still interested in getting the string in *STR,
4278 so here we do snprintf regardless. */
4279 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64 , addr));
4280 break;
4281
4282 case AARCH64_OPND_ADDR_PCREL14:
4283 case AARCH64_OPND_ADDR_PCREL19:
4284 case AARCH64_OPND_ADDR_PCREL21:
4285 case AARCH64_OPND_ADDR_PCREL26:
4286 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
4287 if (pcrel_p)
4288 *pcrel_p = 1;
4289 if (address)
4290 *address = addr;
4291 /* This is not necessary during the disassembling, as print_address_func
4292 in the disassemble_info will take care of the printing. But some
4293 other callers may be still interested in getting the string in *STR,
4294 so here we do snprintf regardless. */
4295 snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64, addr));
4296 break;
4297
4298 case AARCH64_OPND_ADDR_SIMPLE:
4299 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
4300 case AARCH64_OPND_SIMD_ADDR_POST:
4301 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
4302 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
4303 {
4304 if (opnd->addr.offset.is_reg)
4305 snprintf (buf, size, "[%s], %s",
4306 style_reg (styler, name),
4307 style_reg (styler, "x%d", opnd->addr.offset.regno));
4308 else
4309 snprintf (buf, size, "[%s], %s",
4310 style_reg (styler, name),
4311 style_imm (styler, "#%d", opnd->addr.offset.imm));
4312 }
4313 else
4314 snprintf (buf, size, "[%s]", style_reg (styler, name));
4315 break;
4316
4317 case AARCH64_OPND_ADDR_REGOFF:
4318 case AARCH64_OPND_SVE_ADDR_R:
4319 case AARCH64_OPND_SVE_ADDR_RR:
4320 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
4321 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
4322 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
4323 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
4324 case AARCH64_OPND_SVE_ADDR_RX:
4325 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
4326 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
4327 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
4328 print_register_offset_address
4329 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4330 get_offset_int_reg_name (opnd), styler);
4331 break;
4332
4333 case AARCH64_OPND_SVE_ADDR_ZX:
4334 print_register_offset_address
4335 (buf, size, opnd,
4336 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4337 get_64bit_int_reg_name (opnd->addr.offset.regno, 0), styler);
4338 break;
4339
4340 case AARCH64_OPND_SVE_ADDR_RZ:
4341 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
4342 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
4343 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
4344 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
4345 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
4346 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
4347 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
4348 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
4349 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
4350 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
4351 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
4352 print_register_offset_address
4353 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4354 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
4355 styler);
4356 break;
4357
4358 case AARCH64_OPND_ADDR_SIMM7:
4359 case AARCH64_OPND_ADDR_SIMM9:
4360 case AARCH64_OPND_ADDR_SIMM9_2:
4361 case AARCH64_OPND_ADDR_SIMM10:
4362 case AARCH64_OPND_ADDR_SIMM11:
4363 case AARCH64_OPND_ADDR_SIMM13:
4364 case AARCH64_OPND_ADDR_OFFSET:
4365 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
4366 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
4367 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
4368 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
4369 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
4370 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
4371 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
4372 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
4373 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
4374 case AARCH64_OPND_SVE_ADDR_RI_U6:
4375 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
4376 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
4377 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
4378 print_immediate_offset_address
4379 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
4380 styler);
4381 break;
4382
4383 case AARCH64_OPND_SVE_ADDR_ZI_U5:
4384 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
4385 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
4386 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
4387 print_immediate_offset_address
4388 (buf, size, opnd,
4389 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4390 styler);
4391 break;
4392
4393 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
4394 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
4395 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
4396 print_register_offset_address
4397 (buf, size, opnd,
4398 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
4399 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
4400 styler);
4401 break;
4402
4403 case AARCH64_OPND_ADDR_UIMM12:
4404 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
4405 if (opnd->addr.offset.imm)
4406 snprintf (buf, size, "[%s, %s]",
4407 style_reg (styler, name),
4408 style_imm (styler, "#%d", opnd->addr.offset.imm));
4409 else
4410 snprintf (buf, size, "[%s]", style_reg (styler, name));
4411 break;
4412
4413 case AARCH64_OPND_SYSREG:
4414 for (i = 0; aarch64_sys_regs[i].name; ++i)
4415 {
4416 const aarch64_sys_reg *sr = aarch64_sys_regs + i;
4417
4418 bool exact_match
4419 = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
4420 || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
4421 && AARCH64_CPU_HAS_FEATURE (features, sr->features);
4422
4423 /* Try and find an exact match, But if that fails, return the first
4424 partial match that was found. */
4425 if (aarch64_sys_regs[i].value == opnd->sysreg.value
4426 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
4427 && (name == NULL || exact_match))
4428 {
4429 name = aarch64_sys_regs[i].name;
4430 if (exact_match)
4431 {
4432 if (notes)
4433 *notes = NULL;
4434 break;
4435 }
4436
4437 /* If we didn't match exactly, that means the presense of a flag
4438 indicates what we didn't want for this instruction. e.g. If
4439 F_REG_READ is there, that means we were looking for a write
4440 register. See aarch64_ext_sysreg. */
4441 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
4442 *notes = _("reading from a write-only register");
4443 else if (aarch64_sys_regs[i].flags & F_REG_READ)
4444 *notes = _("writing to a read-only register");
4445 }
4446 }
4447
4448 if (name)
4449 snprintf (buf, size, "%s", style_reg (styler, name));
4450 else
4451 {
4452 /* Implementation defined system register. */
4453 unsigned int value = opnd->sysreg.value;
4454 snprintf (buf, size, "%s",
4455 style_reg (styler, "s%u_%u_c%u_c%u_%u",
4456 (value >> 14) & 0x3, (value >> 11) & 0x7,
4457 (value >> 7) & 0xf, (value >> 3) & 0xf,
4458 value & 0x7));
4459 }
4460 break;
4461
4462 case AARCH64_OPND_PSTATEFIELD:
4463 for (i = 0; aarch64_pstatefields[i].name; ++i)
4464 if (aarch64_pstatefields[i].value == opnd->pstatefield)
4465 {
4466 /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
4467 SVCRZA and SVCRSMZA. */
4468 uint32_t flags = aarch64_pstatefields[i].flags;
4469 if (flags & F_REG_IN_CRM
4470 && (PSTATE_DECODE_CRM (opnd->sysreg.flags)
4471 != PSTATE_DECODE_CRM (flags)))
4472 continue;
4473 break;
4474 }
4475 assert (aarch64_pstatefields[i].name);
4476 snprintf (buf, size, "%s",
4477 style_reg (styler, aarch64_pstatefields[i].name));
4478 break;
4479
4480 case AARCH64_OPND_SYSREG_AT:
4481 case AARCH64_OPND_SYSREG_DC:
4482 case AARCH64_OPND_SYSREG_IC:
4483 case AARCH64_OPND_SYSREG_TLBI:
4484 case AARCH64_OPND_SYSREG_SR:
4485 snprintf (buf, size, "%s", style_reg (styler, opnd->sysins_op->name));
4486 break;
4487
4488 case AARCH64_OPND_BARRIER:
4489 case AARCH64_OPND_BARRIER_DSB_NXS:
4490 {
4491 if (opnd->barrier->name[0] == '#')
4492 snprintf (buf, size, "%s", style_imm (styler, opnd->barrier->name));
4493 else
4494 snprintf (buf, size, "%s",
4495 style_sub_mnem (styler, opnd->barrier->name));
4496 }
4497 break;
4498
4499 case AARCH64_OPND_BARRIER_ISB:
4500 /* Operand can be omitted, e.g. in DCPS1. */
4501 if (! optional_operand_p (opcode, idx)
4502 || (opnd->barrier->value
4503 != get_optional_operand_default_value (opcode)))
4504 snprintf (buf, size, "%s",
4505 style_imm (styler, "#0x%x", opnd->barrier->value));
4506 break;
4507
4508 case AARCH64_OPND_PRFOP:
4509 if (opnd->prfop->name != NULL)
4510 snprintf (buf, size, "%s", style_sub_mnem (styler, opnd->prfop->name));
4511 else
4512 snprintf (buf, size, "%s", style_imm (styler, "#0x%02x",
4513 opnd->prfop->value));
4514 break;
4515
4516 case AARCH64_OPND_BARRIER_PSB:
4517 snprintf (buf, size, "%s", style_sub_mnem (styler, "csync"));
4518 break;
4519
4520 case AARCH64_OPND_SME_ZT0:
4521 snprintf (buf, size, "%s", style_reg (styler, "zt0"));
4522 break;
4523
4524 case AARCH64_OPND_SME_ZT0_INDEX:
4525 snprintf (buf, size, "%s[%s]", style_reg (styler, "zt0"),
4526 style_imm (styler, "%d", (int) opnd->imm.value));
4527 break;
4528
4529 case AARCH64_OPND_SME_ZT0_LIST:
4530 snprintf (buf, size, "{%s}", style_reg (styler, "zt0"));
4531 break;
4532
4533 case AARCH64_OPND_BTI_TARGET:
4534 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
4535 snprintf (buf, size, "%s",
4536 style_sub_mnem (styler, opnd->hint_option->name));
4537 break;
4538
4539 case AARCH64_OPND_MOPS_ADDR_Rd:
4540 case AARCH64_OPND_MOPS_ADDR_Rs:
4541 snprintf (buf, size, "[%s]!",
4542 style_reg (styler,
4543 get_int_reg_name (opnd->reg.regno,
4544 AARCH64_OPND_QLF_X, 0)));
4545 break;
4546
4547 case AARCH64_OPND_MOPS_WB_Rn:
4548 snprintf (buf, size, "%s!",
4549 style_reg (styler, get_int_reg_name (opnd->reg.regno,
4550 AARCH64_OPND_QLF_X, 0)));
4551 break;
4552
4553 default:
4554 snprintf (buf, size, "<invalid>");
4555 break;
4556 }
4557 }
4558 \f
4559 #define CPENC(op0,op1,crn,crm,op2) \
4560 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
4561 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
4562 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
4563 /* for 3.9.10 System Instructions */
4564 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
4565
4566 #define C0 0
4567 #define C1 1
4568 #define C2 2
4569 #define C3 3
4570 #define C4 4
4571 #define C5 5
4572 #define C6 6
4573 #define C7 7
4574 #define C8 8
4575 #define C9 9
4576 #define C10 10
4577 #define C11 11
4578 #define C12 12
4579 #define C13 13
4580 #define C14 14
4581 #define C15 15
4582
4583 #define SYSREG(name, encoding, flags, features) \
4584 { name, encoding, flags, features }
4585
4586 #define SR_CORE(n,e,f) SYSREG (n,e,f,0)
4587
4588 #define SR_FEAT(n,e,f,feat) \
4589 SYSREG ((n), (e), (f) | F_ARCHEXT, AARCH64_FEATURE_##feat)
4590
4591 #define SR_FEAT2(n,e,f,fe1,fe2) \
4592 SYSREG ((n), (e), (f) | F_ARCHEXT, \
4593 AARCH64_FEATURE_##fe1 | AARCH64_FEATURE_##fe2)
4594
4595 #define SR_V8_1_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_1)
4596 #define SR_V8_4_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_4)
4597
4598 #define SR_V8_A(n,e,f) SR_FEAT (n,e,f,V8_A)
4599 #define SR_V8_R(n,e,f) SR_FEAT (n,e,f,V8_R)
4600 #define SR_V8_1(n,e,f) SR_FEAT (n,e,f,V8_1)
4601 #define SR_V8_2(n,e,f) SR_FEAT (n,e,f,V8_2)
4602 #define SR_V8_3(n,e,f) SR_FEAT (n,e,f,V8_3)
4603 #define SR_V8_4(n,e,f) SR_FEAT (n,e,f,V8_4)
4604 #define SR_V8_6(n,e,f) SR_FEAT (n,e,f,V8_6)
4605 #define SR_V8_7(n,e,f) SR_FEAT (n,e,f,V8_7)
4606 #define SR_V8_8(n,e,f) SR_FEAT (n,e,f,V8_8)
4607 /* Has no separate libopcodes feature flag, but separated out for clarity. */
4608 #define SR_GIC(n,e,f) SR_CORE (n,e,f)
4609 /* Has no separate libopcodes feature flag, but separated out for clarity. */
4610 #define SR_AMU(n,e,f) SR_FEAT (n,e,f,V8_4)
4611 #define SR_LOR(n,e,f) SR_FEAT (n,e,f,LOR)
4612 #define SR_PAN(n,e,f) SR_FEAT (n,e,f,PAN)
4613 #define SR_RAS(n,e,f) SR_FEAT (n,e,f,RAS)
4614 #define SR_RNG(n,e,f) SR_FEAT (n,e,f,RNG)
4615 #define SR_SME(n,e,f) SR_FEAT (n,e,f,SME)
4616 #define SR_SSBS(n,e,f) SR_FEAT (n,e,f,SSBS)
4617 #define SR_SVE(n,e,f) SR_FEAT (n,e,f,SVE)
4618 #define SR_ID_PFR2(n,e,f) SR_FEAT (n,e,f,ID_PFR2)
4619 #define SR_PROFILE(n,e,f) SR_FEAT (n,e,f,PROFILE)
4620 #define SR_MEMTAG(n,e,f) SR_FEAT (n,e,f,MEMTAG)
4621 #define SR_SCXTNUM(n,e,f) SR_FEAT (n,e,f,SCXTNUM)
4622
4623 #define SR_EXPAND_ELx(f,x) \
4624 f (x, 1), \
4625 f (x, 2), \
4626 f (x, 3), \
4627 f (x, 4), \
4628 f (x, 5), \
4629 f (x, 6), \
4630 f (x, 7), \
4631 f (x, 8), \
4632 f (x, 9), \
4633 f (x, 10), \
4634 f (x, 11), \
4635 f (x, 12), \
4636 f (x, 13), \
4637 f (x, 14), \
4638 f (x, 15),
4639
4640 #define SR_EXPAND_EL12(f) \
4641 SR_EXPAND_ELx (f,1) \
4642 SR_EXPAND_ELx (f,2)
4643
4644 /* TODO there is one more issues need to be resolved
4645 1. handle cpu-implementation-defined system registers.
4646
4647 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
4648 respectively. If neither of these are set then the register is read-write. */
4649 const aarch64_sys_reg aarch64_sys_regs [] =
4650 {
4651 SR_CORE ("spsr_el1", CPEN_ (0,C0,0), 0), /* = spsr_svc. */
4652 SR_V8_1 ("spsr_el12", CPEN_ (5,C0,0), 0),
4653 SR_CORE ("elr_el1", CPEN_ (0,C0,1), 0),
4654 SR_V8_1 ("elr_el12", CPEN_ (5,C0,1), 0),
4655 SR_CORE ("sp_el0", CPEN_ (0,C1,0), 0),
4656 SR_CORE ("spsel", CPEN_ (0,C2,0), 0),
4657 SR_CORE ("daif", CPEN_ (3,C2,1), 0),
4658 SR_CORE ("currentel", CPEN_ (0,C2,2), F_REG_READ),
4659 SR_PAN ("pan", CPEN_ (0,C2,3), 0),
4660 SR_V8_2 ("uao", CPEN_ (0,C2,4), 0),
4661 SR_CORE ("nzcv", CPEN_ (3,C2,0), 0),
4662 SR_SSBS ("ssbs", CPEN_ (3,C2,6), 0),
4663 SR_CORE ("fpcr", CPEN_ (3,C4,0), 0),
4664 SR_CORE ("fpsr", CPEN_ (3,C4,1), 0),
4665 SR_CORE ("dspsr_el0", CPEN_ (3,C5,0), 0),
4666 SR_CORE ("dlr_el0", CPEN_ (3,C5,1), 0),
4667 SR_CORE ("spsr_el2", CPEN_ (4,C0,0), 0), /* = spsr_hyp. */
4668 SR_CORE ("elr_el2", CPEN_ (4,C0,1), 0),
4669 SR_CORE ("sp_el1", CPEN_ (4,C1,0), 0),
4670 SR_CORE ("spsr_irq", CPEN_ (4,C3,0), 0),
4671 SR_CORE ("spsr_abt", CPEN_ (4,C3,1), 0),
4672 SR_CORE ("spsr_und", CPEN_ (4,C3,2), 0),
4673 SR_CORE ("spsr_fiq", CPEN_ (4,C3,3), 0),
4674 SR_CORE ("spsr_el3", CPEN_ (6,C0,0), 0),
4675 SR_CORE ("elr_el3", CPEN_ (6,C0,1), 0),
4676 SR_CORE ("sp_el2", CPEN_ (6,C1,0), 0),
4677 SR_CORE ("spsr_svc", CPEN_ (0,C0,0), F_DEPRECATED), /* = spsr_el1. */
4678 SR_CORE ("spsr_hyp", CPEN_ (4,C0,0), F_DEPRECATED), /* = spsr_el2. */
4679 SR_CORE ("midr_el1", CPENC (3,0,C0,C0,0), F_REG_READ),
4680 SR_CORE ("ctr_el0", CPENC (3,3,C0,C0,1), F_REG_READ),
4681 SR_CORE ("mpidr_el1", CPENC (3,0,C0,C0,5), F_REG_READ),
4682 SR_CORE ("revidr_el1", CPENC (3,0,C0,C0,6), F_REG_READ),
4683 SR_CORE ("aidr_el1", CPENC (3,1,C0,C0,7), F_REG_READ),
4684 SR_CORE ("dczid_el0", CPENC (3,3,C0,C0,7), F_REG_READ),
4685 SR_CORE ("id_dfr0_el1", CPENC (3,0,C0,C1,2), F_REG_READ),
4686 SR_CORE ("id_dfr1_el1", CPENC (3,0,C0,C3,5), F_REG_READ),
4687 SR_CORE ("id_pfr0_el1", CPENC (3,0,C0,C1,0), F_REG_READ),
4688 SR_CORE ("id_pfr1_el1", CPENC (3,0,C0,C1,1), F_REG_READ),
4689 SR_ID_PFR2 ("id_pfr2_el1", CPENC (3,0,C0,C3,4), F_REG_READ),
4690 SR_CORE ("id_afr0_el1", CPENC (3,0,C0,C1,3), F_REG_READ),
4691 SR_CORE ("id_mmfr0_el1", CPENC (3,0,C0,C1,4), F_REG_READ),
4692 SR_CORE ("id_mmfr1_el1", CPENC (3,0,C0,C1,5), F_REG_READ),
4693 SR_CORE ("id_mmfr2_el1", CPENC (3,0,C0,C1,6), F_REG_READ),
4694 SR_CORE ("id_mmfr3_el1", CPENC (3,0,C0,C1,7), F_REG_READ),
4695 SR_CORE ("id_mmfr4_el1", CPENC (3,0,C0,C2,6), F_REG_READ),
4696 SR_CORE ("id_mmfr5_el1", CPENC (3,0,C0,C3,6), F_REG_READ),
4697 SR_CORE ("id_isar0_el1", CPENC (3,0,C0,C2,0), F_REG_READ),
4698 SR_CORE ("id_isar1_el1", CPENC (3,0,C0,C2,1), F_REG_READ),
4699 SR_CORE ("id_isar2_el1", CPENC (3,0,C0,C2,2), F_REG_READ),
4700 SR_CORE ("id_isar3_el1", CPENC (3,0,C0,C2,3), F_REG_READ),
4701 SR_CORE ("id_isar4_el1", CPENC (3,0,C0,C2,4), F_REG_READ),
4702 SR_CORE ("id_isar5_el1", CPENC (3,0,C0,C2,5), F_REG_READ),
4703 SR_CORE ("id_isar6_el1", CPENC (3,0,C0,C2,7), F_REG_READ),
4704 SR_CORE ("mvfr0_el1", CPENC (3,0,C0,C3,0), F_REG_READ),
4705 SR_CORE ("mvfr1_el1", CPENC (3,0,C0,C3,1), F_REG_READ),
4706 SR_CORE ("mvfr2_el1", CPENC (3,0,C0,C3,2), F_REG_READ),
4707 SR_CORE ("ccsidr_el1", CPENC (3,1,C0,C0,0), F_REG_READ),
4708 SR_V8_3 ("ccsidr2_el1", CPENC (3,1,C0,C0,2), F_REG_READ),
4709 SR_CORE ("id_aa64pfr0_el1", CPENC (3,0,C0,C4,0), F_REG_READ),
4710 SR_CORE ("id_aa64pfr1_el1", CPENC (3,0,C0,C4,1), F_REG_READ),
4711 SR_CORE ("id_aa64dfr0_el1", CPENC (3,0,C0,C5,0), F_REG_READ),
4712 SR_CORE ("id_aa64dfr1_el1", CPENC (3,0,C0,C5,1), F_REG_READ),
4713 SR_CORE ("id_aa64isar0_el1", CPENC (3,0,C0,C6,0), F_REG_READ),
4714 SR_CORE ("id_aa64isar1_el1", CPENC (3,0,C0,C6,1), F_REG_READ),
4715 SR_CORE ("id_aa64isar2_el1", CPENC (3,0,C0,C6,2), F_REG_READ),
4716 SR_CORE ("id_aa64mmfr0_el1", CPENC (3,0,C0,C7,0), F_REG_READ),
4717 SR_CORE ("id_aa64mmfr1_el1", CPENC (3,0,C0,C7,1), F_REG_READ),
4718 SR_CORE ("id_aa64mmfr2_el1", CPENC (3,0,C0,C7,2), F_REG_READ),
4719 SR_CORE ("id_aa64afr0_el1", CPENC (3,0,C0,C5,4), F_REG_READ),
4720 SR_CORE ("id_aa64afr1_el1", CPENC (3,0,C0,C5,5), F_REG_READ),
4721 SR_SVE ("id_aa64zfr0_el1", CPENC (3,0,C0,C4,4), F_REG_READ),
4722 SR_CORE ("clidr_el1", CPENC (3,1,C0,C0,1), F_REG_READ),
4723 SR_CORE ("csselr_el1", CPENC (3,2,C0,C0,0), 0),
4724 SR_CORE ("vpidr_el2", CPENC (3,4,C0,C0,0), 0),
4725 SR_CORE ("vmpidr_el2", CPENC (3,4,C0,C0,5), 0),
4726 SR_CORE ("sctlr_el1", CPENC (3,0,C1,C0,0), 0),
4727 SR_CORE ("sctlr_el2", CPENC (3,4,C1,C0,0), 0),
4728 SR_CORE ("sctlr_el3", CPENC (3,6,C1,C0,0), 0),
4729 SR_V8_1 ("sctlr_el12", CPENC (3,5,C1,C0,0), 0),
4730 SR_CORE ("actlr_el1", CPENC (3,0,C1,C0,1), 0),
4731 SR_CORE ("actlr_el2", CPENC (3,4,C1,C0,1), 0),
4732 SR_CORE ("actlr_el3", CPENC (3,6,C1,C0,1), 0),
4733 SR_CORE ("cpacr_el1", CPENC (3,0,C1,C0,2), 0),
4734 SR_V8_1 ("cpacr_el12", CPENC (3,5,C1,C0,2), 0),
4735 SR_CORE ("cptr_el2", CPENC (3,4,C1,C1,2), 0),
4736 SR_CORE ("cptr_el3", CPENC (3,6,C1,C1,2), 0),
4737 SR_CORE ("scr_el3", CPENC (3,6,C1,C1,0), 0),
4738 SR_CORE ("hcr_el2", CPENC (3,4,C1,C1,0), 0),
4739 SR_CORE ("mdcr_el2", CPENC (3,4,C1,C1,1), 0),
4740 SR_CORE ("mdcr_el3", CPENC (3,6,C1,C3,1), 0),
4741 SR_CORE ("hstr_el2", CPENC (3,4,C1,C1,3), 0),
4742 SR_CORE ("hacr_el2", CPENC (3,4,C1,C1,7), 0),
4743 SR_SVE ("zcr_el1", CPENC (3,0,C1,C2,0), 0),
4744 SR_SVE ("zcr_el12", CPENC (3,5,C1,C2,0), 0),
4745 SR_SVE ("zcr_el2", CPENC (3,4,C1,C2,0), 0),
4746 SR_SVE ("zcr_el3", CPENC (3,6,C1,C2,0), 0),
4747 SR_CORE ("ttbr0_el1", CPENC (3,0,C2,C0,0), 0),
4748 SR_CORE ("ttbr1_el1", CPENC (3,0,C2,C0,1), 0),
4749 SR_V8_A ("ttbr0_el2", CPENC (3,4,C2,C0,0), 0),
4750 SR_V8_1_A ("ttbr1_el2", CPENC (3,4,C2,C0,1), 0),
4751 SR_CORE ("ttbr0_el3", CPENC (3,6,C2,C0,0), 0),
4752 SR_V8_1 ("ttbr0_el12", CPENC (3,5,C2,C0,0), 0),
4753 SR_V8_1 ("ttbr1_el12", CPENC (3,5,C2,C0,1), 0),
4754 SR_V8_A ("vttbr_el2", CPENC (3,4,C2,C1,0), 0),
4755 SR_CORE ("tcr_el1", CPENC (3,0,C2,C0,2), 0),
4756 SR_CORE ("tcr_el2", CPENC (3,4,C2,C0,2), 0),
4757 SR_CORE ("tcr_el3", CPENC (3,6,C2,C0,2), 0),
4758 SR_V8_1 ("tcr_el12", CPENC (3,5,C2,C0,2), 0),
4759 SR_CORE ("vtcr_el2", CPENC (3,4,C2,C1,2), 0),
4760 SR_V8_3 ("apiakeylo_el1", CPENC (3,0,C2,C1,0), 0),
4761 SR_V8_3 ("apiakeyhi_el1", CPENC (3,0,C2,C1,1), 0),
4762 SR_V8_3 ("apibkeylo_el1", CPENC (3,0,C2,C1,2), 0),
4763 SR_V8_3 ("apibkeyhi_el1", CPENC (3,0,C2,C1,3), 0),
4764 SR_V8_3 ("apdakeylo_el1", CPENC (3,0,C2,C2,0), 0),
4765 SR_V8_3 ("apdakeyhi_el1", CPENC (3,0,C2,C2,1), 0),
4766 SR_V8_3 ("apdbkeylo_el1", CPENC (3,0,C2,C2,2), 0),
4767 SR_V8_3 ("apdbkeyhi_el1", CPENC (3,0,C2,C2,3), 0),
4768 SR_V8_3 ("apgakeylo_el1", CPENC (3,0,C2,C3,0), 0),
4769 SR_V8_3 ("apgakeyhi_el1", CPENC (3,0,C2,C3,1), 0),
4770 SR_CORE ("afsr0_el1", CPENC (3,0,C5,C1,0), 0),
4771 SR_CORE ("afsr1_el1", CPENC (3,0,C5,C1,1), 0),
4772 SR_CORE ("afsr0_el2", CPENC (3,4,C5,C1,0), 0),
4773 SR_CORE ("afsr1_el2", CPENC (3,4,C5,C1,1), 0),
4774 SR_CORE ("afsr0_el3", CPENC (3,6,C5,C1,0), 0),
4775 SR_V8_1 ("afsr0_el12", CPENC (3,5,C5,C1,0), 0),
4776 SR_CORE ("afsr1_el3", CPENC (3,6,C5,C1,1), 0),
4777 SR_V8_1 ("afsr1_el12", CPENC (3,5,C5,C1,1), 0),
4778 SR_CORE ("esr_el1", CPENC (3,0,C5,C2,0), 0),
4779 SR_CORE ("esr_el2", CPENC (3,4,C5,C2,0), 0),
4780 SR_CORE ("esr_el3", CPENC (3,6,C5,C2,0), 0),
4781 SR_V8_1 ("esr_el12", CPENC (3,5,C5,C2,0), 0),
4782 SR_RAS ("vsesr_el2", CPENC (3,4,C5,C2,3), 0),
4783 SR_CORE ("fpexc32_el2", CPENC (3,4,C5,C3,0), 0),
4784 SR_RAS ("erridr_el1", CPENC (3,0,C5,C3,0), F_REG_READ),
4785 SR_RAS ("errselr_el1", CPENC (3,0,C5,C3,1), 0),
4786 SR_RAS ("erxfr_el1", CPENC (3,0,C5,C4,0), F_REG_READ),
4787 SR_RAS ("erxctlr_el1", CPENC (3,0,C5,C4,1), 0),
4788 SR_RAS ("erxstatus_el1", CPENC (3,0,C5,C4,2), 0),
4789 SR_RAS ("erxaddr_el1", CPENC (3,0,C5,C4,3), 0),
4790 SR_RAS ("erxmisc0_el1", CPENC (3,0,C5,C5,0), 0),
4791 SR_RAS ("erxmisc1_el1", CPENC (3,0,C5,C5,1), 0),
4792 SR_RAS ("erxmisc2_el1", CPENC (3,0,C5,C5,2), 0),
4793 SR_RAS ("erxmisc3_el1", CPENC (3,0,C5,C5,3), 0),
4794 SR_RAS ("erxpfgcdn_el1", CPENC (3,0,C5,C4,6), 0),
4795 SR_RAS ("erxpfgctl_el1", CPENC (3,0,C5,C4,5), 0),
4796 SR_RAS ("erxpfgf_el1", CPENC (3,0,C5,C4,4), F_REG_READ),
4797 SR_CORE ("far_el1", CPENC (3,0,C6,C0,0), 0),
4798 SR_CORE ("far_el2", CPENC (3,4,C6,C0,0), 0),
4799 SR_CORE ("far_el3", CPENC (3,6,C6,C0,0), 0),
4800 SR_V8_1 ("far_el12", CPENC (3,5,C6,C0,0), 0),
4801 SR_CORE ("hpfar_el2", CPENC (3,4,C6,C0,4), 0),
4802 SR_CORE ("par_el1", CPENC (3,0,C7,C4,0), 0),
4803 SR_CORE ("mair_el1", CPENC (3,0,C10,C2,0), 0),
4804 SR_CORE ("mair_el2", CPENC (3,4,C10,C2,0), 0),
4805 SR_CORE ("mair_el3", CPENC (3,6,C10,C2,0), 0),
4806 SR_V8_1 ("mair_el12", CPENC (3,5,C10,C2,0), 0),
4807 SR_CORE ("amair_el1", CPENC (3,0,C10,C3,0), 0),
4808 SR_CORE ("amair_el2", CPENC (3,4,C10,C3,0), 0),
4809 SR_CORE ("amair_el3", CPENC (3,6,C10,C3,0), 0),
4810 SR_V8_1 ("amair_el12", CPENC (3,5,C10,C3,0), 0),
4811 SR_CORE ("vbar_el1", CPENC (3,0,C12,C0,0), 0),
4812 SR_CORE ("vbar_el2", CPENC (3,4,C12,C0,0), 0),
4813 SR_CORE ("vbar_el3", CPENC (3,6,C12,C0,0), 0),
4814 SR_V8_1 ("vbar_el12", CPENC (3,5,C12,C0,0), 0),
4815 SR_CORE ("rvbar_el1", CPENC (3,0,C12,C0,1), F_REG_READ),
4816 SR_CORE ("rvbar_el2", CPENC (3,4,C12,C0,1), F_REG_READ),
4817 SR_CORE ("rvbar_el3", CPENC (3,6,C12,C0,1), F_REG_READ),
4818 SR_CORE ("rmr_el1", CPENC (3,0,C12,C0,2), 0),
4819 SR_CORE ("rmr_el2", CPENC (3,4,C12,C0,2), 0),
4820 SR_CORE ("rmr_el3", CPENC (3,6,C12,C0,2), 0),
4821 SR_CORE ("isr_el1", CPENC (3,0,C12,C1,0), F_REG_READ),
4822 SR_RAS ("disr_el1", CPENC (3,0,C12,C1,1), 0),
4823 SR_RAS ("vdisr_el2", CPENC (3,4,C12,C1,1), 0),
4824 SR_CORE ("contextidr_el1", CPENC (3,0,C13,C0,1), 0),
4825 SR_V8_1 ("contextidr_el2", CPENC (3,4,C13,C0,1), 0),
4826 SR_V8_1 ("contextidr_el12", CPENC (3,5,C13,C0,1), 0),
4827 SR_RNG ("rndr", CPENC (3,3,C2,C4,0), F_REG_READ),
4828 SR_RNG ("rndrrs", CPENC (3,3,C2,C4,1), F_REG_READ),
4829 SR_MEMTAG ("tco", CPENC (3,3,C4,C2,7), 0),
4830 SR_MEMTAG ("tfsre0_el1", CPENC (3,0,C5,C6,1), 0),
4831 SR_MEMTAG ("tfsr_el1", CPENC (3,0,C5,C6,0), 0),
4832 SR_MEMTAG ("tfsr_el2", CPENC (3,4,C5,C6,0), 0),
4833 SR_MEMTAG ("tfsr_el3", CPENC (3,6,C5,C6,0), 0),
4834 SR_MEMTAG ("tfsr_el12", CPENC (3,5,C5,C6,0), 0),
4835 SR_MEMTAG ("rgsr_el1", CPENC (3,0,C1,C0,5), 0),
4836 SR_MEMTAG ("gcr_el1", CPENC (3,0,C1,C0,6), 0),
4837 SR_MEMTAG ("gmid_el1", CPENC (3,1,C0,C0,4), F_REG_READ),
4838 SR_CORE ("tpidr_el0", CPENC (3,3,C13,C0,2), 0),
4839 SR_CORE ("tpidrro_el0", CPENC (3,3,C13,C0,3), 0),
4840 SR_CORE ("tpidr_el1", CPENC (3,0,C13,C0,4), 0),
4841 SR_CORE ("tpidr_el2", CPENC (3,4,C13,C0,2), 0),
4842 SR_CORE ("tpidr_el3", CPENC (3,6,C13,C0,2), 0),
4843 SR_SCXTNUM ("scxtnum_el0", CPENC (3,3,C13,C0,7), 0),
4844 SR_SCXTNUM ("scxtnum_el1", CPENC (3,0,C13,C0,7), 0),
4845 SR_SCXTNUM ("scxtnum_el2", CPENC (3,4,C13,C0,7), 0),
4846 SR_SCXTNUM ("scxtnum_el12", CPENC (3,5,C13,C0,7), 0),
4847 SR_SCXTNUM ("scxtnum_el3", CPENC (3,6,C13,C0,7), 0),
4848 SR_CORE ("teecr32_el1", CPENC (2,2,C0, C0,0), 0), /* See section 3.9.7.1. */
4849 SR_CORE ("cntfrq_el0", CPENC (3,3,C14,C0,0), 0),
4850 SR_CORE ("cntpct_el0", CPENC (3,3,C14,C0,1), F_REG_READ),
4851 SR_CORE ("cntvct_el0", CPENC (3,3,C14,C0,2), F_REG_READ),
4852 SR_CORE ("cntvoff_el2", CPENC (3,4,C14,C0,3), 0),
4853 SR_CORE ("cntkctl_el1", CPENC (3,0,C14,C1,0), 0),
4854 SR_V8_1 ("cntkctl_el12", CPENC (3,5,C14,C1,0), 0),
4855 SR_CORE ("cnthctl_el2", CPENC (3,4,C14,C1,0), 0),
4856 SR_CORE ("cntp_tval_el0", CPENC (3,3,C14,C2,0), 0),
4857 SR_V8_1 ("cntp_tval_el02", CPENC (3,5,C14,C2,0), 0),
4858 SR_CORE ("cntp_ctl_el0", CPENC (3,3,C14,C2,1), 0),
4859 SR_V8_1 ("cntp_ctl_el02", CPENC (3,5,C14,C2,1), 0),
4860 SR_CORE ("cntp_cval_el0", CPENC (3,3,C14,C2,2), 0),
4861 SR_V8_1 ("cntp_cval_el02", CPENC (3,5,C14,C2,2), 0),
4862 SR_CORE ("cntv_tval_el0", CPENC (3,3,C14,C3,0), 0),
4863 SR_V8_1 ("cntv_tval_el02", CPENC (3,5,C14,C3,0), 0),
4864 SR_CORE ("cntv_ctl_el0", CPENC (3,3,C14,C3,1), 0),
4865 SR_V8_1 ("cntv_ctl_el02", CPENC (3,5,C14,C3,1), 0),
4866 SR_CORE ("cntv_cval_el0", CPENC (3,3,C14,C3,2), 0),
4867 SR_V8_1 ("cntv_cval_el02", CPENC (3,5,C14,C3,2), 0),
4868 SR_CORE ("cnthp_tval_el2", CPENC (3,4,C14,C2,0), 0),
4869 SR_CORE ("cnthp_ctl_el2", CPENC (3,4,C14,C2,1), 0),
4870 SR_CORE ("cnthp_cval_el2", CPENC (3,4,C14,C2,2), 0),
4871 SR_CORE ("cntps_tval_el1", CPENC (3,7,C14,C2,0), 0),
4872 SR_CORE ("cntps_ctl_el1", CPENC (3,7,C14,C2,1), 0),
4873 SR_CORE ("cntps_cval_el1", CPENC (3,7,C14,C2,2), 0),
4874 SR_V8_1 ("cnthv_tval_el2", CPENC (3,4,C14,C3,0), 0),
4875 SR_V8_1 ("cnthv_ctl_el2", CPENC (3,4,C14,C3,1), 0),
4876 SR_V8_1 ("cnthv_cval_el2", CPENC (3,4,C14,C3,2), 0),
4877 SR_CORE ("dacr32_el2", CPENC (3,4,C3,C0,0), 0),
4878 SR_CORE ("ifsr32_el2", CPENC (3,4,C5,C0,1), 0),
4879 SR_CORE ("teehbr32_el1", CPENC (2,2,C1,C0,0), 0),
4880 SR_CORE ("sder32_el3", CPENC (3,6,C1,C1,1), 0),
4881 SR_CORE ("mdscr_el1", CPENC (2,0,C0,C2,2), 0),
4882 SR_CORE ("mdccsr_el0", CPENC (2,3,C0,C1,0), F_REG_READ),
4883 SR_CORE ("mdccint_el1", CPENC (2,0,C0,C2,0), 0),
4884 SR_CORE ("dbgdtr_el0", CPENC (2,3,C0,C4,0), 0),
4885 SR_CORE ("dbgdtrrx_el0", CPENC (2,3,C0,C5,0), F_REG_READ),
4886 SR_CORE ("dbgdtrtx_el0", CPENC (2,3,C0,C5,0), F_REG_WRITE),
4887 SR_CORE ("osdtrrx_el1", CPENC (2,0,C0,C0,2), 0),
4888 SR_CORE ("osdtrtx_el1", CPENC (2,0,C0,C3,2), 0),
4889 SR_CORE ("oseccr_el1", CPENC (2,0,C0,C6,2), 0),
4890 SR_CORE ("dbgvcr32_el2", CPENC (2,4,C0,C7,0), 0),
4891 SR_CORE ("dbgbvr0_el1", CPENC (2,0,C0,C0,4), 0),
4892 SR_CORE ("dbgbvr1_el1", CPENC (2,0,C0,C1,4), 0),
4893 SR_CORE ("dbgbvr2_el1", CPENC (2,0,C0,C2,4), 0),
4894 SR_CORE ("dbgbvr3_el1", CPENC (2,0,C0,C3,4), 0),
4895 SR_CORE ("dbgbvr4_el1", CPENC (2,0,C0,C4,4), 0),
4896 SR_CORE ("dbgbvr5_el1", CPENC (2,0,C0,C5,4), 0),
4897 SR_CORE ("dbgbvr6_el1", CPENC (2,0,C0,C6,4), 0),
4898 SR_CORE ("dbgbvr7_el1", CPENC (2,0,C0,C7,4), 0),
4899 SR_CORE ("dbgbvr8_el1", CPENC (2,0,C0,C8,4), 0),
4900 SR_CORE ("dbgbvr9_el1", CPENC (2,0,C0,C9,4), 0),
4901 SR_CORE ("dbgbvr10_el1", CPENC (2,0,C0,C10,4), 0),
4902 SR_CORE ("dbgbvr11_el1", CPENC (2,0,C0,C11,4), 0),
4903 SR_CORE ("dbgbvr12_el1", CPENC (2,0,C0,C12,4), 0),
4904 SR_CORE ("dbgbvr13_el1", CPENC (2,0,C0,C13,4), 0),
4905 SR_CORE ("dbgbvr14_el1", CPENC (2,0,C0,C14,4), 0),
4906 SR_CORE ("dbgbvr15_el1", CPENC (2,0,C0,C15,4), 0),
4907 SR_CORE ("dbgbcr0_el1", CPENC (2,0,C0,C0,5), 0),
4908 SR_CORE ("dbgbcr1_el1", CPENC (2,0,C0,C1,5), 0),
4909 SR_CORE ("dbgbcr2_el1", CPENC (2,0,C0,C2,5), 0),
4910 SR_CORE ("dbgbcr3_el1", CPENC (2,0,C0,C3,5), 0),
4911 SR_CORE ("dbgbcr4_el1", CPENC (2,0,C0,C4,5), 0),
4912 SR_CORE ("dbgbcr5_el1", CPENC (2,0,C0,C5,5), 0),
4913 SR_CORE ("dbgbcr6_el1", CPENC (2,0,C0,C6,5), 0),
4914 SR_CORE ("dbgbcr7_el1", CPENC (2,0,C0,C7,5), 0),
4915 SR_CORE ("dbgbcr8_el1", CPENC (2,0,C0,C8,5), 0),
4916 SR_CORE ("dbgbcr9_el1", CPENC (2,0,C0,C9,5), 0),
4917 SR_CORE ("dbgbcr10_el1", CPENC (2,0,C0,C10,5), 0),
4918 SR_CORE ("dbgbcr11_el1", CPENC (2,0,C0,C11,5), 0),
4919 SR_CORE ("dbgbcr12_el1", CPENC (2,0,C0,C12,5), 0),
4920 SR_CORE ("dbgbcr13_el1", CPENC (2,0,C0,C13,5), 0),
4921 SR_CORE ("dbgbcr14_el1", CPENC (2,0,C0,C14,5), 0),
4922 SR_CORE ("dbgbcr15_el1", CPENC (2,0,C0,C15,5), 0),
4923 SR_CORE ("dbgwvr0_el1", CPENC (2,0,C0,C0,6), 0),
4924 SR_CORE ("dbgwvr1_el1", CPENC (2,0,C0,C1,6), 0),
4925 SR_CORE ("dbgwvr2_el1", CPENC (2,0,C0,C2,6), 0),
4926 SR_CORE ("dbgwvr3_el1", CPENC (2,0,C0,C3,6), 0),
4927 SR_CORE ("dbgwvr4_el1", CPENC (2,0,C0,C4,6), 0),
4928 SR_CORE ("dbgwvr5_el1", CPENC (2,0,C0,C5,6), 0),
4929 SR_CORE ("dbgwvr6_el1", CPENC (2,0,C0,C6,6), 0),
4930 SR_CORE ("dbgwvr7_el1", CPENC (2,0,C0,C7,6), 0),
4931 SR_CORE ("dbgwvr8_el1", CPENC (2,0,C0,C8,6), 0),
4932 SR_CORE ("dbgwvr9_el1", CPENC (2,0,C0,C9,6), 0),
4933 SR_CORE ("dbgwvr10_el1", CPENC (2,0,C0,C10,6), 0),
4934 SR_CORE ("dbgwvr11_el1", CPENC (2,0,C0,C11,6), 0),
4935 SR_CORE ("dbgwvr12_el1", CPENC (2,0,C0,C12,6), 0),
4936 SR_CORE ("dbgwvr13_el1", CPENC (2,0,C0,C13,6), 0),
4937 SR_CORE ("dbgwvr14_el1", CPENC (2,0,C0,C14,6), 0),
4938 SR_CORE ("dbgwvr15_el1", CPENC (2,0,C0,C15,6), 0),
4939 SR_CORE ("dbgwcr0_el1", CPENC (2,0,C0,C0,7), 0),
4940 SR_CORE ("dbgwcr1_el1", CPENC (2,0,C0,C1,7), 0),
4941 SR_CORE ("dbgwcr2_el1", CPENC (2,0,C0,C2,7), 0),
4942 SR_CORE ("dbgwcr3_el1", CPENC (2,0,C0,C3,7), 0),
4943 SR_CORE ("dbgwcr4_el1", CPENC (2,0,C0,C4,7), 0),
4944 SR_CORE ("dbgwcr5_el1", CPENC (2,0,C0,C5,7), 0),
4945 SR_CORE ("dbgwcr6_el1", CPENC (2,0,C0,C6,7), 0),
4946 SR_CORE ("dbgwcr7_el1", CPENC (2,0,C0,C7,7), 0),
4947 SR_CORE ("dbgwcr8_el1", CPENC (2,0,C0,C8,7), 0),
4948 SR_CORE ("dbgwcr9_el1", CPENC (2,0,C0,C9,7), 0),
4949 SR_CORE ("dbgwcr10_el1", CPENC (2,0,C0,C10,7), 0),
4950 SR_CORE ("dbgwcr11_el1", CPENC (2,0,C0,C11,7), 0),
4951 SR_CORE ("dbgwcr12_el1", CPENC (2,0,C0,C12,7), 0),
4952 SR_CORE ("dbgwcr13_el1", CPENC (2,0,C0,C13,7), 0),
4953 SR_CORE ("dbgwcr14_el1", CPENC (2,0,C0,C14,7), 0),
4954 SR_CORE ("dbgwcr15_el1", CPENC (2,0,C0,C15,7), 0),
4955 SR_CORE ("mdrar_el1", CPENC (2,0,C1,C0,0), F_REG_READ),
4956 SR_CORE ("oslar_el1", CPENC (2,0,C1,C0,4), F_REG_WRITE),
4957 SR_CORE ("oslsr_el1", CPENC (2,0,C1,C1,4), F_REG_READ),
4958 SR_CORE ("osdlr_el1", CPENC (2,0,C1,C3,4), 0),
4959 SR_CORE ("dbgprcr_el1", CPENC (2,0,C1,C4,4), 0),
4960 SR_CORE ("dbgclaimset_el1", CPENC (2,0,C7,C8,6), 0),
4961 SR_CORE ("dbgclaimclr_el1", CPENC (2,0,C7,C9,6), 0),
4962 SR_CORE ("dbgauthstatus_el1", CPENC (2,0,C7,C14,6), F_REG_READ),
4963 SR_PROFILE ("pmblimitr_el1", CPENC (3,0,C9,C10,0), 0),
4964 SR_PROFILE ("pmbptr_el1", CPENC (3,0,C9,C10,1), 0),
4965 SR_PROFILE ("pmbsr_el1", CPENC (3,0,C9,C10,3), 0),
4966 SR_PROFILE ("pmbidr_el1", CPENC (3,0,C9,C10,7), F_REG_READ),
4967 SR_PROFILE ("pmscr_el1", CPENC (3,0,C9,C9,0), 0),
4968 SR_PROFILE ("pmsicr_el1", CPENC (3,0,C9,C9,2), 0),
4969 SR_PROFILE ("pmsirr_el1", CPENC (3,0,C9,C9,3), 0),
4970 SR_PROFILE ("pmsfcr_el1", CPENC (3,0,C9,C9,4), 0),
4971 SR_PROFILE ("pmsevfr_el1", CPENC (3,0,C9,C9,5), 0),
4972 SR_PROFILE ("pmslatfr_el1", CPENC (3,0,C9,C9,6), 0),
4973 SR_PROFILE ("pmsidr_el1", CPENC (3,0,C9,C9,7), F_REG_READ),
4974 SR_PROFILE ("pmscr_el2", CPENC (3,4,C9,C9,0), 0),
4975 SR_PROFILE ("pmscr_el12", CPENC (3,5,C9,C9,0), 0),
4976 SR_CORE ("pmcr_el0", CPENC (3,3,C9,C12,0), 0),
4977 SR_CORE ("pmcntenset_el0", CPENC (3,3,C9,C12,1), 0),
4978 SR_CORE ("pmcntenclr_el0", CPENC (3,3,C9,C12,2), 0),
4979 SR_CORE ("pmovsclr_el0", CPENC (3,3,C9,C12,3), 0),
4980 SR_CORE ("pmswinc_el0", CPENC (3,3,C9,C12,4), F_REG_WRITE),
4981 SR_CORE ("pmselr_el0", CPENC (3,3,C9,C12,5), 0),
4982 SR_CORE ("pmceid0_el0", CPENC (3,3,C9,C12,6), F_REG_READ),
4983 SR_CORE ("pmceid1_el0", CPENC (3,3,C9,C12,7), F_REG_READ),
4984 SR_CORE ("pmccntr_el0", CPENC (3,3,C9,C13,0), 0),
4985 SR_CORE ("pmxevtyper_el0", CPENC (3,3,C9,C13,1), 0),
4986 SR_CORE ("pmxevcntr_el0", CPENC (3,3,C9,C13,2), 0),
4987 SR_CORE ("pmuserenr_el0", CPENC (3,3,C9,C14,0), 0),
4988 SR_CORE ("pmintenset_el1", CPENC (3,0,C9,C14,1), 0),
4989 SR_CORE ("pmintenclr_el1", CPENC (3,0,C9,C14,2), 0),
4990 SR_CORE ("pmovsset_el0", CPENC (3,3,C9,C14,3), 0),
4991 SR_CORE ("pmevcntr0_el0", CPENC (3,3,C14,C8,0), 0),
4992 SR_CORE ("pmevcntr1_el0", CPENC (3,3,C14,C8,1), 0),
4993 SR_CORE ("pmevcntr2_el0", CPENC (3,3,C14,C8,2), 0),
4994 SR_CORE ("pmevcntr3_el0", CPENC (3,3,C14,C8,3), 0),
4995 SR_CORE ("pmevcntr4_el0", CPENC (3,3,C14,C8,4), 0),
4996 SR_CORE ("pmevcntr5_el0", CPENC (3,3,C14,C8,5), 0),
4997 SR_CORE ("pmevcntr6_el0", CPENC (3,3,C14,C8,6), 0),
4998 SR_CORE ("pmevcntr7_el0", CPENC (3,3,C14,C8,7), 0),
4999 SR_CORE ("pmevcntr8_el0", CPENC (3,3,C14,C9,0), 0),
5000 SR_CORE ("pmevcntr9_el0", CPENC (3,3,C14,C9,1), 0),
5001 SR_CORE ("pmevcntr10_el0", CPENC (3,3,C14,C9,2), 0),
5002 SR_CORE ("pmevcntr11_el0", CPENC (3,3,C14,C9,3), 0),
5003 SR_CORE ("pmevcntr12_el0", CPENC (3,3,C14,C9,4), 0),
5004 SR_CORE ("pmevcntr13_el0", CPENC (3,3,C14,C9,5), 0),
5005 SR_CORE ("pmevcntr14_el0", CPENC (3,3,C14,C9,6), 0),
5006 SR_CORE ("pmevcntr15_el0", CPENC (3,3,C14,C9,7), 0),
5007 SR_CORE ("pmevcntr16_el0", CPENC (3,3,C14,C10,0), 0),
5008 SR_CORE ("pmevcntr17_el0", CPENC (3,3,C14,C10,1), 0),
5009 SR_CORE ("pmevcntr18_el0", CPENC (3,3,C14,C10,2), 0),
5010 SR_CORE ("pmevcntr19_el0", CPENC (3,3,C14,C10,3), 0),
5011 SR_CORE ("pmevcntr20_el0", CPENC (3,3,C14,C10,4), 0),
5012 SR_CORE ("pmevcntr21_el0", CPENC (3,3,C14,C10,5), 0),
5013 SR_CORE ("pmevcntr22_el0", CPENC (3,3,C14,C10,6), 0),
5014 SR_CORE ("pmevcntr23_el0", CPENC (3,3,C14,C10,7), 0),
5015 SR_CORE ("pmevcntr24_el0", CPENC (3,3,C14,C11,0), 0),
5016 SR_CORE ("pmevcntr25_el0", CPENC (3,3,C14,C11,1), 0),
5017 SR_CORE ("pmevcntr26_el0", CPENC (3,3,C14,C11,2), 0),
5018 SR_CORE ("pmevcntr27_el0", CPENC (3,3,C14,C11,3), 0),
5019 SR_CORE ("pmevcntr28_el0", CPENC (3,3,C14,C11,4), 0),
5020 SR_CORE ("pmevcntr29_el0", CPENC (3,3,C14,C11,5), 0),
5021 SR_CORE ("pmevcntr30_el0", CPENC (3,3,C14,C11,6), 0),
5022 SR_CORE ("pmevtyper0_el0", CPENC (3,3,C14,C12,0), 0),
5023 SR_CORE ("pmevtyper1_el0", CPENC (3,3,C14,C12,1), 0),
5024 SR_CORE ("pmevtyper2_el0", CPENC (3,3,C14,C12,2), 0),
5025 SR_CORE ("pmevtyper3_el0", CPENC (3,3,C14,C12,3), 0),
5026 SR_CORE ("pmevtyper4_el0", CPENC (3,3,C14,C12,4), 0),
5027 SR_CORE ("pmevtyper5_el0", CPENC (3,3,C14,C12,5), 0),
5028 SR_CORE ("pmevtyper6_el0", CPENC (3,3,C14,C12,6), 0),
5029 SR_CORE ("pmevtyper7_el0", CPENC (3,3,C14,C12,7), 0),
5030 SR_CORE ("pmevtyper8_el0", CPENC (3,3,C14,C13,0), 0),
5031 SR_CORE ("pmevtyper9_el0", CPENC (3,3,C14,C13,1), 0),
5032 SR_CORE ("pmevtyper10_el0", CPENC (3,3,C14,C13,2), 0),
5033 SR_CORE ("pmevtyper11_el0", CPENC (3,3,C14,C13,3), 0),
5034 SR_CORE ("pmevtyper12_el0", CPENC (3,3,C14,C13,4), 0),
5035 SR_CORE ("pmevtyper13_el0", CPENC (3,3,C14,C13,5), 0),
5036 SR_CORE ("pmevtyper14_el0", CPENC (3,3,C14,C13,6), 0),
5037 SR_CORE ("pmevtyper15_el0", CPENC (3,3,C14,C13,7), 0),
5038 SR_CORE ("pmevtyper16_el0", CPENC (3,3,C14,C14,0), 0),
5039 SR_CORE ("pmevtyper17_el0", CPENC (3,3,C14,C14,1), 0),
5040 SR_CORE ("pmevtyper18_el0", CPENC (3,3,C14,C14,2), 0),
5041 SR_CORE ("pmevtyper19_el0", CPENC (3,3,C14,C14,3), 0),
5042 SR_CORE ("pmevtyper20_el0", CPENC (3,3,C14,C14,4), 0),
5043 SR_CORE ("pmevtyper21_el0", CPENC (3,3,C14,C14,5), 0),
5044 SR_CORE ("pmevtyper22_el0", CPENC (3,3,C14,C14,6), 0),
5045 SR_CORE ("pmevtyper23_el0", CPENC (3,3,C14,C14,7), 0),
5046 SR_CORE ("pmevtyper24_el0", CPENC (3,3,C14,C15,0), 0),
5047 SR_CORE ("pmevtyper25_el0", CPENC (3,3,C14,C15,1), 0),
5048 SR_CORE ("pmevtyper26_el0", CPENC (3,3,C14,C15,2), 0),
5049 SR_CORE ("pmevtyper27_el0", CPENC (3,3,C14,C15,3), 0),
5050 SR_CORE ("pmevtyper28_el0", CPENC (3,3,C14,C15,4), 0),
5051 SR_CORE ("pmevtyper29_el0", CPENC (3,3,C14,C15,5), 0),
5052 SR_CORE ("pmevtyper30_el0", CPENC (3,3,C14,C15,6), 0),
5053 SR_CORE ("pmccfiltr_el0", CPENC (3,3,C14,C15,7), 0),
5054
5055 SR_V8_4 ("dit", CPEN_ (3,C2,5), 0),
5056 SR_V8_4 ("trfcr_el1", CPENC (3,0,C1,C2,1), 0),
5057 SR_V8_4 ("pmmir_el1", CPENC (3,0,C9,C14,6), F_REG_READ),
5058 SR_V8_4 ("trfcr_el2", CPENC (3,4,C1,C2,1), 0),
5059 SR_V8_4 ("vstcr_el2", CPENC (3,4,C2,C6,2), 0),
5060 SR_V8_4_A ("vsttbr_el2", CPENC (3,4,C2,C6,0), 0),
5061 SR_V8_4 ("cnthvs_tval_el2", CPENC (3,4,C14,C4,0), 0),
5062 SR_V8_4 ("cnthvs_cval_el2", CPENC (3,4,C14,C4,2), 0),
5063 SR_V8_4 ("cnthvs_ctl_el2", CPENC (3,4,C14,C4,1), 0),
5064 SR_V8_4 ("cnthps_tval_el2", CPENC (3,4,C14,C5,0), 0),
5065 SR_V8_4 ("cnthps_cval_el2", CPENC (3,4,C14,C5,2), 0),
5066 SR_V8_4 ("cnthps_ctl_el2", CPENC (3,4,C14,C5,1), 0),
5067 SR_V8_4 ("sder32_el2", CPENC (3,4,C1,C3,1), 0),
5068 SR_V8_4 ("vncr_el2", CPENC (3,4,C2,C2,0), 0),
5069 SR_V8_4 ("trfcr_el12", CPENC (3,5,C1,C2,1), 0),
5070
5071 SR_CORE ("mpam0_el1", CPENC (3,0,C10,C5,1), 0),
5072 SR_CORE ("mpam1_el1", CPENC (3,0,C10,C5,0), 0),
5073 SR_CORE ("mpam1_el12", CPENC (3,5,C10,C5,0), 0),
5074 SR_CORE ("mpam2_el2", CPENC (3,4,C10,C5,0), 0),
5075 SR_CORE ("mpam3_el3", CPENC (3,6,C10,C5,0), 0),
5076 SR_CORE ("mpamhcr_el2", CPENC (3,4,C10,C4,0), 0),
5077 SR_CORE ("mpamidr_el1", CPENC (3,0,C10,C4,4), F_REG_READ),
5078 SR_CORE ("mpamvpm0_el2", CPENC (3,4,C10,C6,0), 0),
5079 SR_CORE ("mpamvpm1_el2", CPENC (3,4,C10,C6,1), 0),
5080 SR_CORE ("mpamvpm2_el2", CPENC (3,4,C10,C6,2), 0),
5081 SR_CORE ("mpamvpm3_el2", CPENC (3,4,C10,C6,3), 0),
5082 SR_CORE ("mpamvpm4_el2", CPENC (3,4,C10,C6,4), 0),
5083 SR_CORE ("mpamvpm5_el2", CPENC (3,4,C10,C6,5), 0),
5084 SR_CORE ("mpamvpm6_el2", CPENC (3,4,C10,C6,6), 0),
5085 SR_CORE ("mpamvpm7_el2", CPENC (3,4,C10,C6,7), 0),
5086 SR_CORE ("mpamvpmv_el2", CPENC (3,4,C10,C4,1), 0),
5087
5088 SR_V8_R ("mpuir_el1", CPENC (3,0,C0,C0,4), F_REG_READ),
5089 SR_V8_R ("mpuir_el2", CPENC (3,4,C0,C0,4), F_REG_READ),
5090 SR_V8_R ("prbar_el1", CPENC (3,0,C6,C8,0), 0),
5091 SR_V8_R ("prbar_el2", CPENC (3,4,C6,C8,0), 0),
5092
5093 #define ENC_BARLAR(x,n,lar) \
5094 CPENC (3, (x-1) << 2, C6, 8 | (n >> 1), ((n & 1) << 2) | lar)
5095
5096 #define PRBARn_ELx(x,n) SR_V8_R ("prbar" #n "_el" #x, ENC_BARLAR (x,n,0), 0)
5097 #define PRLARn_ELx(x,n) SR_V8_R ("prlar" #n "_el" #x, ENC_BARLAR (x,n,1), 0)
5098
5099 SR_EXPAND_EL12 (PRBARn_ELx)
5100 SR_V8_R ("prenr_el1", CPENC (3,0,C6,C1,1), 0),
5101 SR_V8_R ("prenr_el2", CPENC (3,4,C6,C1,1), 0),
5102 SR_V8_R ("prlar_el1", CPENC (3,0,C6,C8,1), 0),
5103 SR_V8_R ("prlar_el2", CPENC (3,4,C6,C8,1), 0),
5104 SR_EXPAND_EL12 (PRLARn_ELx)
5105 SR_V8_R ("prselr_el1", CPENC (3,0,C6,C2,1), 0),
5106 SR_V8_R ("prselr_el2", CPENC (3,4,C6,C2,1), 0),
5107 SR_V8_R ("vsctlr_el2", CPENC (3,4,C2,C0,0), 0),
5108
5109 SR_CORE("trbbaser_el1", CPENC (3,0,C9,C11,2), 0),
5110 SR_CORE("trbidr_el1", CPENC (3,0,C9,C11,7), F_REG_READ),
5111 SR_CORE("trblimitr_el1", CPENC (3,0,C9,C11,0), 0),
5112 SR_CORE("trbmar_el1", CPENC (3,0,C9,C11,4), 0),
5113 SR_CORE("trbptr_el1", CPENC (3,0,C9,C11,1), 0),
5114 SR_CORE("trbsr_el1", CPENC (3,0,C9,C11,3), 0),
5115 SR_CORE("trbtrg_el1", CPENC (3,0,C9,C11,6), 0),
5116
5117 SR_CORE ("trcauthstatus", CPENC (2,1,C7,C14,6), F_REG_READ),
5118 SR_CORE ("trccidr0", CPENC (2,1,C7,C12,7), F_REG_READ),
5119 SR_CORE ("trccidr1", CPENC (2,1,C7,C13,7), F_REG_READ),
5120 SR_CORE ("trccidr2", CPENC (2,1,C7,C14,7), F_REG_READ),
5121 SR_CORE ("trccidr3", CPENC (2,1,C7,C15,7), F_REG_READ),
5122 SR_CORE ("trcdevaff0", CPENC (2,1,C7,C10,6), F_REG_READ),
5123 SR_CORE ("trcdevaff1", CPENC (2,1,C7,C11,6), F_REG_READ),
5124 SR_CORE ("trcdevarch", CPENC (2,1,C7,C15,6), F_REG_READ),
5125 SR_CORE ("trcdevid", CPENC (2,1,C7,C2,7), F_REG_READ),
5126 SR_CORE ("trcdevtype", CPENC (2,1,C7,C3,7), F_REG_READ),
5127 SR_CORE ("trcidr0", CPENC (2,1,C0,C8,7), F_REG_READ),
5128 SR_CORE ("trcidr1", CPENC (2,1,C0,C9,7), F_REG_READ),
5129 SR_CORE ("trcidr2", CPENC (2,1,C0,C10,7), F_REG_READ),
5130 SR_CORE ("trcidr3", CPENC (2,1,C0,C11,7), F_REG_READ),
5131 SR_CORE ("trcidr4", CPENC (2,1,C0,C12,7), F_REG_READ),
5132 SR_CORE ("trcidr5", CPENC (2,1,C0,C13,7), F_REG_READ),
5133 SR_CORE ("trcidr6", CPENC (2,1,C0,C14,7), F_REG_READ),
5134 SR_CORE ("trcidr7", CPENC (2,1,C0,C15,7), F_REG_READ),
5135 SR_CORE ("trcidr8", CPENC (2,1,C0,C0,6), F_REG_READ),
5136 SR_CORE ("trcidr9", CPENC (2,1,C0,C1,6), F_REG_READ),
5137 SR_CORE ("trcidr10", CPENC (2,1,C0,C2,6), F_REG_READ),
5138 SR_CORE ("trcidr11", CPENC (2,1,C0,C3,6), F_REG_READ),
5139 SR_CORE ("trcidr12", CPENC (2,1,C0,C4,6), F_REG_READ),
5140 SR_CORE ("trcidr13", CPENC (2,1,C0,C5,6), F_REG_READ),
5141 SR_CORE ("trclsr", CPENC (2,1,C7,C13,6), F_REG_READ),
5142 SR_CORE ("trcoslsr", CPENC (2,1,C1,C1,4), F_REG_READ),
5143 SR_CORE ("trcpdsr", CPENC (2,1,C1,C5,4), F_REG_READ),
5144 SR_CORE ("trcpidr0", CPENC (2,1,C7,C8,7), F_REG_READ),
5145 SR_CORE ("trcpidr1", CPENC (2,1,C7,C9,7), F_REG_READ),
5146 SR_CORE ("trcpidr2", CPENC (2,1,C7,C10,7), F_REG_READ),
5147 SR_CORE ("trcpidr3", CPENC (2,1,C7,C11,7), F_REG_READ),
5148 SR_CORE ("trcpidr4", CPENC (2,1,C7,C4,7), F_REG_READ),
5149 SR_CORE ("trcpidr5", CPENC (2,1,C7,C5,7), F_REG_READ),
5150 SR_CORE ("trcpidr6", CPENC (2,1,C7,C6,7), F_REG_READ),
5151 SR_CORE ("trcpidr7", CPENC (2,1,C7,C7,7), F_REG_READ),
5152 SR_CORE ("trcstatr", CPENC (2,1,C0,C3,0), F_REG_READ),
5153 SR_CORE ("trcacatr0", CPENC (2,1,C2,C0,2), 0),
5154 SR_CORE ("trcacatr1", CPENC (2,1,C2,C2,2), 0),
5155 SR_CORE ("trcacatr2", CPENC (2,1,C2,C4,2), 0),
5156 SR_CORE ("trcacatr3", CPENC (2,1,C2,C6,2), 0),
5157 SR_CORE ("trcacatr4", CPENC (2,1,C2,C8,2), 0),
5158 SR_CORE ("trcacatr5", CPENC (2,1,C2,C10,2), 0),
5159 SR_CORE ("trcacatr6", CPENC (2,1,C2,C12,2), 0),
5160 SR_CORE ("trcacatr7", CPENC (2,1,C2,C14,2), 0),
5161 SR_CORE ("trcacatr8", CPENC (2,1,C2,C0,3), 0),
5162 SR_CORE ("trcacatr9", CPENC (2,1,C2,C2,3), 0),
5163 SR_CORE ("trcacatr10", CPENC (2,1,C2,C4,3), 0),
5164 SR_CORE ("trcacatr11", CPENC (2,1,C2,C6,3), 0),
5165 SR_CORE ("trcacatr12", CPENC (2,1,C2,C8,3), 0),
5166 SR_CORE ("trcacatr13", CPENC (2,1,C2,C10,3), 0),
5167 SR_CORE ("trcacatr14", CPENC (2,1,C2,C12,3), 0),
5168 SR_CORE ("trcacatr15", CPENC (2,1,C2,C14,3), 0),
5169 SR_CORE ("trcacvr0", CPENC (2,1,C2,C0,0), 0),
5170 SR_CORE ("trcacvr1", CPENC (2,1,C2,C2,0), 0),
5171 SR_CORE ("trcacvr2", CPENC (2,1,C2,C4,0), 0),
5172 SR_CORE ("trcacvr3", CPENC (2,1,C2,C6,0), 0),
5173 SR_CORE ("trcacvr4", CPENC (2,1,C2,C8,0), 0),
5174 SR_CORE ("trcacvr5", CPENC (2,1,C2,C10,0), 0),
5175 SR_CORE ("trcacvr6", CPENC (2,1,C2,C12,0), 0),
5176 SR_CORE ("trcacvr7", CPENC (2,1,C2,C14,0), 0),
5177 SR_CORE ("trcacvr8", CPENC (2,1,C2,C0,1), 0),
5178 SR_CORE ("trcacvr9", CPENC (2,1,C2,C2,1), 0),
5179 SR_CORE ("trcacvr10", CPENC (2,1,C2,C4,1), 0),
5180 SR_CORE ("trcacvr11", CPENC (2,1,C2,C6,1), 0),
5181 SR_CORE ("trcacvr12", CPENC (2,1,C2,C8,1), 0),
5182 SR_CORE ("trcacvr13", CPENC (2,1,C2,C10,1), 0),
5183 SR_CORE ("trcacvr14", CPENC (2,1,C2,C12,1), 0),
5184 SR_CORE ("trcacvr15", CPENC (2,1,C2,C14,1), 0),
5185 SR_CORE ("trcauxctlr", CPENC (2,1,C0,C6,0), 0),
5186 SR_CORE ("trcbbctlr", CPENC (2,1,C0,C15,0), 0),
5187 SR_CORE ("trcccctlr", CPENC (2,1,C0,C14,0), 0),
5188 SR_CORE ("trccidcctlr0", CPENC (2,1,C3,C0,2), 0),
5189 SR_CORE ("trccidcctlr1", CPENC (2,1,C3,C1,2), 0),
5190 SR_CORE ("trccidcvr0", CPENC (2,1,C3,C0,0), 0),
5191 SR_CORE ("trccidcvr1", CPENC (2,1,C3,C2,0), 0),
5192 SR_CORE ("trccidcvr2", CPENC (2,1,C3,C4,0), 0),
5193 SR_CORE ("trccidcvr3", CPENC (2,1,C3,C6,0), 0),
5194 SR_CORE ("trccidcvr4", CPENC (2,1,C3,C8,0), 0),
5195 SR_CORE ("trccidcvr5", CPENC (2,1,C3,C10,0), 0),
5196 SR_CORE ("trccidcvr6", CPENC (2,1,C3,C12,0), 0),
5197 SR_CORE ("trccidcvr7", CPENC (2,1,C3,C14,0), 0),
5198 SR_CORE ("trcclaimclr", CPENC (2,1,C7,C9,6), 0),
5199 SR_CORE ("trcclaimset", CPENC (2,1,C7,C8,6), 0),
5200 SR_CORE ("trccntctlr0", CPENC (2,1,C0,C4,5), 0),
5201 SR_CORE ("trccntctlr1", CPENC (2,1,C0,C5,5), 0),
5202 SR_CORE ("trccntctlr2", CPENC (2,1,C0,C6,5), 0),
5203 SR_CORE ("trccntctlr3", CPENC (2,1,C0,C7,5), 0),
5204 SR_CORE ("trccntrldvr0", CPENC (2,1,C0,C0,5), 0),
5205 SR_CORE ("trccntrldvr1", CPENC (2,1,C0,C1,5), 0),
5206 SR_CORE ("trccntrldvr2", CPENC (2,1,C0,C2,5), 0),
5207 SR_CORE ("trccntrldvr3", CPENC (2,1,C0,C3,5), 0),
5208 SR_CORE ("trccntvr0", CPENC (2,1,C0,C8,5), 0),
5209 SR_CORE ("trccntvr1", CPENC (2,1,C0,C9,5), 0),
5210 SR_CORE ("trccntvr2", CPENC (2,1,C0,C10,5), 0),
5211 SR_CORE ("trccntvr3", CPENC (2,1,C0,C11,5), 0),
5212 SR_CORE ("trcconfigr", CPENC (2,1,C0,C4,0), 0),
5213 SR_CORE ("trcdvcmr0", CPENC (2,1,C2,C0,6), 0),
5214 SR_CORE ("trcdvcmr1", CPENC (2,1,C2,C4,6), 0),
5215 SR_CORE ("trcdvcmr2", CPENC (2,1,C2,C8,6), 0),
5216 SR_CORE ("trcdvcmr3", CPENC (2,1,C2,C12,6), 0),
5217 SR_CORE ("trcdvcmr4", CPENC (2,1,C2,C0,7), 0),
5218 SR_CORE ("trcdvcmr5", CPENC (2,1,C2,C4,7), 0),
5219 SR_CORE ("trcdvcmr6", CPENC (2,1,C2,C8,7), 0),
5220 SR_CORE ("trcdvcmr7", CPENC (2,1,C2,C12,7), 0),
5221 SR_CORE ("trcdvcvr0", CPENC (2,1,C2,C0,4), 0),
5222 SR_CORE ("trcdvcvr1", CPENC (2,1,C2,C4,4), 0),
5223 SR_CORE ("trcdvcvr2", CPENC (2,1,C2,C8,4), 0),
5224 SR_CORE ("trcdvcvr3", CPENC (2,1,C2,C12,4), 0),
5225 SR_CORE ("trcdvcvr4", CPENC (2,1,C2,C0,5), 0),
5226 SR_CORE ("trcdvcvr5", CPENC (2,1,C2,C4,5), 0),
5227 SR_CORE ("trcdvcvr6", CPENC (2,1,C2,C8,5), 0),
5228 SR_CORE ("trcdvcvr7", CPENC (2,1,C2,C12,5), 0),
5229 SR_CORE ("trceventctl0r", CPENC (2,1,C0,C8,0), 0),
5230 SR_CORE ("trceventctl1r", CPENC (2,1,C0,C9,0), 0),
5231 SR_CORE ("trcextinselr0", CPENC (2,1,C0,C8,4), 0),
5232 SR_CORE ("trcextinselr", CPENC (2,1,C0,C8,4), 0),
5233 SR_CORE ("trcextinselr1", CPENC (2,1,C0,C9,4), 0),
5234 SR_CORE ("trcextinselr2", CPENC (2,1,C0,C10,4), 0),
5235 SR_CORE ("trcextinselr3", CPENC (2,1,C0,C11,4), 0),
5236 SR_CORE ("trcimspec0", CPENC (2,1,C0,C0,7), 0),
5237 SR_CORE ("trcimspec1", CPENC (2,1,C0,C1,7), 0),
5238 SR_CORE ("trcimspec2", CPENC (2,1,C0,C2,7), 0),
5239 SR_CORE ("trcimspec3", CPENC (2,1,C0,C3,7), 0),
5240 SR_CORE ("trcimspec4", CPENC (2,1,C0,C4,7), 0),
5241 SR_CORE ("trcimspec5", CPENC (2,1,C0,C5,7), 0),
5242 SR_CORE ("trcimspec6", CPENC (2,1,C0,C6,7), 0),
5243 SR_CORE ("trcimspec7", CPENC (2,1,C0,C7,7), 0),
5244 SR_CORE ("trcitctrl", CPENC (2,1,C7,C0,4), 0),
5245 SR_CORE ("trcpdcr", CPENC (2,1,C1,C4,4), 0),
5246 SR_CORE ("trcprgctlr", CPENC (2,1,C0,C1,0), 0),
5247 SR_CORE ("trcprocselr", CPENC (2,1,C0,C2,0), 0),
5248 SR_CORE ("trcqctlr", CPENC (2,1,C0,C1,1), 0),
5249 SR_CORE ("trcrsr", CPENC (2,1,C0,C10,0), 0),
5250 SR_CORE ("trcrsctlr2", CPENC (2,1,C1,C2,0), 0),
5251 SR_CORE ("trcrsctlr3", CPENC (2,1,C1,C3,0), 0),
5252 SR_CORE ("trcrsctlr4", CPENC (2,1,C1,C4,0), 0),
5253 SR_CORE ("trcrsctlr5", CPENC (2,1,C1,C5,0), 0),
5254 SR_CORE ("trcrsctlr6", CPENC (2,1,C1,C6,0), 0),
5255 SR_CORE ("trcrsctlr7", CPENC (2,1,C1,C7,0), 0),
5256 SR_CORE ("trcrsctlr8", CPENC (2,1,C1,C8,0), 0),
5257 SR_CORE ("trcrsctlr9", CPENC (2,1,C1,C9,0), 0),
5258 SR_CORE ("trcrsctlr10", CPENC (2,1,C1,C10,0), 0),
5259 SR_CORE ("trcrsctlr11", CPENC (2,1,C1,C11,0), 0),
5260 SR_CORE ("trcrsctlr12", CPENC (2,1,C1,C12,0), 0),
5261 SR_CORE ("trcrsctlr13", CPENC (2,1,C1,C13,0), 0),
5262 SR_CORE ("trcrsctlr14", CPENC (2,1,C1,C14,0), 0),
5263 SR_CORE ("trcrsctlr15", CPENC (2,1,C1,C15,0), 0),
5264 SR_CORE ("trcrsctlr16", CPENC (2,1,C1,C0,1), 0),
5265 SR_CORE ("trcrsctlr17", CPENC (2,1,C1,C1,1), 0),
5266 SR_CORE ("trcrsctlr18", CPENC (2,1,C1,C2,1), 0),
5267 SR_CORE ("trcrsctlr19", CPENC (2,1,C1,C3,1), 0),
5268 SR_CORE ("trcrsctlr20", CPENC (2,1,C1,C4,1), 0),
5269 SR_CORE ("trcrsctlr21", CPENC (2,1,C1,C5,1), 0),
5270 SR_CORE ("trcrsctlr22", CPENC (2,1,C1,C6,1), 0),
5271 SR_CORE ("trcrsctlr23", CPENC (2,1,C1,C7,1), 0),
5272 SR_CORE ("trcrsctlr24", CPENC (2,1,C1,C8,1), 0),
5273 SR_CORE ("trcrsctlr25", CPENC (2,1,C1,C9,1), 0),
5274 SR_CORE ("trcrsctlr26", CPENC (2,1,C1,C10,1), 0),
5275 SR_CORE ("trcrsctlr27", CPENC (2,1,C1,C11,1), 0),
5276 SR_CORE ("trcrsctlr28", CPENC (2,1,C1,C12,1), 0),
5277 SR_CORE ("trcrsctlr29", CPENC (2,1,C1,C13,1), 0),
5278 SR_CORE ("trcrsctlr30", CPENC (2,1,C1,C14,1), 0),
5279 SR_CORE ("trcrsctlr31", CPENC (2,1,C1,C15,1), 0),
5280 SR_CORE ("trcseqevr0", CPENC (2,1,C0,C0,4), 0),
5281 SR_CORE ("trcseqevr1", CPENC (2,1,C0,C1,4), 0),
5282 SR_CORE ("trcseqevr2", CPENC (2,1,C0,C2,4), 0),
5283 SR_CORE ("trcseqrstevr", CPENC (2,1,C0,C6,4), 0),
5284 SR_CORE ("trcseqstr", CPENC (2,1,C0,C7,4), 0),
5285 SR_CORE ("trcssccr0", CPENC (2,1,C1,C0,2), 0),
5286 SR_CORE ("trcssccr1", CPENC (2,1,C1,C1,2), 0),
5287 SR_CORE ("trcssccr2", CPENC (2,1,C1,C2,2), 0),
5288 SR_CORE ("trcssccr3", CPENC (2,1,C1,C3,2), 0),
5289 SR_CORE ("trcssccr4", CPENC (2,1,C1,C4,2), 0),
5290 SR_CORE ("trcssccr5", CPENC (2,1,C1,C5,2), 0),
5291 SR_CORE ("trcssccr6", CPENC (2,1,C1,C6,2), 0),
5292 SR_CORE ("trcssccr7", CPENC (2,1,C1,C7,2), 0),
5293 SR_CORE ("trcsscsr0", CPENC (2,1,C1,C8,2), 0),
5294 SR_CORE ("trcsscsr1", CPENC (2,1,C1,C9,2), 0),
5295 SR_CORE ("trcsscsr2", CPENC (2,1,C1,C10,2), 0),
5296 SR_CORE ("trcsscsr3", CPENC (2,1,C1,C11,2), 0),
5297 SR_CORE ("trcsscsr4", CPENC (2,1,C1,C12,2), 0),
5298 SR_CORE ("trcsscsr5", CPENC (2,1,C1,C13,2), 0),
5299 SR_CORE ("trcsscsr6", CPENC (2,1,C1,C14,2), 0),
5300 SR_CORE ("trcsscsr7", CPENC (2,1,C1,C15,2), 0),
5301 SR_CORE ("trcsspcicr0", CPENC (2,1,C1,C0,3), 0),
5302 SR_CORE ("trcsspcicr1", CPENC (2,1,C1,C1,3), 0),
5303 SR_CORE ("trcsspcicr2", CPENC (2,1,C1,C2,3), 0),
5304 SR_CORE ("trcsspcicr3", CPENC (2,1,C1,C3,3), 0),
5305 SR_CORE ("trcsspcicr4", CPENC (2,1,C1,C4,3), 0),
5306 SR_CORE ("trcsspcicr5", CPENC (2,1,C1,C5,3), 0),
5307 SR_CORE ("trcsspcicr6", CPENC (2,1,C1,C6,3), 0),
5308 SR_CORE ("trcsspcicr7", CPENC (2,1,C1,C7,3), 0),
5309 SR_CORE ("trcstallctlr", CPENC (2,1,C0,C11,0), 0),
5310 SR_CORE ("trcsyncpr", CPENC (2,1,C0,C13,0), 0),
5311 SR_CORE ("trctraceidr", CPENC (2,1,C0,C0,1), 0),
5312 SR_CORE ("trctsctlr", CPENC (2,1,C0,C12,0), 0),
5313 SR_CORE ("trcvdarcctlr", CPENC (2,1,C0,C10,2), 0),
5314 SR_CORE ("trcvdctlr", CPENC (2,1,C0,C8,2), 0),
5315 SR_CORE ("trcvdsacctlr", CPENC (2,1,C0,C9,2), 0),
5316 SR_CORE ("trcvictlr", CPENC (2,1,C0,C0,2), 0),
5317 SR_CORE ("trcviiectlr", CPENC (2,1,C0,C1,2), 0),
5318 SR_CORE ("trcvipcssctlr", CPENC (2,1,C0,C3,2), 0),
5319 SR_CORE ("trcvissctlr", CPENC (2,1,C0,C2,2), 0),
5320 SR_CORE ("trcvmidcctlr0", CPENC (2,1,C3,C2,2), 0),
5321 SR_CORE ("trcvmidcctlr1", CPENC (2,1,C3,C3,2), 0),
5322 SR_CORE ("trcvmidcvr0", CPENC (2,1,C3,C0,1), 0),
5323 SR_CORE ("trcvmidcvr1", CPENC (2,1,C3,C2,1), 0),
5324 SR_CORE ("trcvmidcvr2", CPENC (2,1,C3,C4,1), 0),
5325 SR_CORE ("trcvmidcvr3", CPENC (2,1,C3,C6,1), 0),
5326 SR_CORE ("trcvmidcvr4", CPENC (2,1,C3,C8,1), 0),
5327 SR_CORE ("trcvmidcvr5", CPENC (2,1,C3,C10,1), 0),
5328 SR_CORE ("trcvmidcvr6", CPENC (2,1,C3,C12,1), 0),
5329 SR_CORE ("trcvmidcvr7", CPENC (2,1,C3,C14,1), 0),
5330 SR_CORE ("trclar", CPENC (2,1,C7,C12,6), F_REG_WRITE),
5331 SR_CORE ("trcoslar", CPENC (2,1,C1,C0,4), F_REG_WRITE),
5332
5333 SR_CORE ("csrcr_el0", CPENC (2,3,C8,C0,0), 0),
5334 SR_CORE ("csrptr_el0", CPENC (2,3,C8,C0,1), 0),
5335 SR_CORE ("csridr_el0", CPENC (2,3,C8,C0,2), F_REG_READ),
5336 SR_CORE ("csrptridx_el0", CPENC (2,3,C8,C0,3), F_REG_READ),
5337 SR_CORE ("csrcr_el1", CPENC (2,0,C8,C0,0), 0),
5338 SR_CORE ("csrcr_el12", CPENC (2,5,C8,C0,0), 0),
5339 SR_CORE ("csrptr_el1", CPENC (2,0,C8,C0,1), 0),
5340 SR_CORE ("csrptr_el12", CPENC (2,5,C8,C0,1), 0),
5341 SR_CORE ("csrptridx_el1", CPENC (2,0,C8,C0,3), F_REG_READ),
5342 SR_CORE ("csrcr_el2", CPENC (2,4,C8,C0,0), 0),
5343 SR_CORE ("csrptr_el2", CPENC (2,4,C8,C0,1), 0),
5344 SR_CORE ("csrptridx_el2", CPENC (2,4,C8,C0,3), F_REG_READ),
5345
5346 SR_LOR ("lorid_el1", CPENC (3,0,C10,C4,7), F_REG_READ),
5347 SR_LOR ("lorc_el1", CPENC (3,0,C10,C4,3), 0),
5348 SR_LOR ("lorea_el1", CPENC (3,0,C10,C4,1), 0),
5349 SR_LOR ("lorn_el1", CPENC (3,0,C10,C4,2), 0),
5350 SR_LOR ("lorsa_el1", CPENC (3,0,C10,C4,0), 0),
5351
5352 SR_CORE ("icc_ctlr_el3", CPENC (3,6,C12,C12,4), 0),
5353 SR_CORE ("icc_sre_el1", CPENC (3,0,C12,C12,5), 0),
5354 SR_CORE ("icc_sre_el2", CPENC (3,4,C12,C9,5), 0),
5355 SR_CORE ("icc_sre_el3", CPENC (3,6,C12,C12,5), 0),
5356 SR_CORE ("ich_vtr_el2", CPENC (3,4,C12,C11,1), F_REG_READ),
5357
5358 SR_CORE ("brbcr_el1", CPENC (2,1,C9,C0,0), 0),
5359 SR_CORE ("brbcr_el12", CPENC (2,5,C9,C0,0), 0),
5360 SR_CORE ("brbfcr_el1", CPENC (2,1,C9,C0,1), 0),
5361 SR_CORE ("brbts_el1", CPENC (2,1,C9,C0,2), 0),
5362 SR_CORE ("brbinfinj_el1", CPENC (2,1,C9,C1,0), 0),
5363 SR_CORE ("brbsrcinj_el1", CPENC (2,1,C9,C1,1), 0),
5364 SR_CORE ("brbtgtinj_el1", CPENC (2,1,C9,C1,2), 0),
5365 SR_CORE ("brbidr0_el1", CPENC (2,1,C9,C2,0), F_REG_READ),
5366 SR_CORE ("brbcr_el2", CPENC (2,4,C9,C0,0), 0),
5367 SR_CORE ("brbsrc0_el1", CPENC (2,1,C8,C0,1), F_REG_READ),
5368 SR_CORE ("brbsrc1_el1", CPENC (2,1,C8,C1,1), F_REG_READ),
5369 SR_CORE ("brbsrc2_el1", CPENC (2,1,C8,C2,1), F_REG_READ),
5370 SR_CORE ("brbsrc3_el1", CPENC (2,1,C8,C3,1), F_REG_READ),
5371 SR_CORE ("brbsrc4_el1", CPENC (2,1,C8,C4,1), F_REG_READ),
5372 SR_CORE ("brbsrc5_el1", CPENC (2,1,C8,C5,1), F_REG_READ),
5373 SR_CORE ("brbsrc6_el1", CPENC (2,1,C8,C6,1), F_REG_READ),
5374 SR_CORE ("brbsrc7_el1", CPENC (2,1,C8,C7,1), F_REG_READ),
5375 SR_CORE ("brbsrc8_el1", CPENC (2,1,C8,C8,1), F_REG_READ),
5376 SR_CORE ("brbsrc9_el1", CPENC (2,1,C8,C9,1), F_REG_READ),
5377 SR_CORE ("brbsrc10_el1", CPENC (2,1,C8,C10,1), F_REG_READ),
5378 SR_CORE ("brbsrc11_el1", CPENC (2,1,C8,C11,1), F_REG_READ),
5379 SR_CORE ("brbsrc12_el1", CPENC (2,1,C8,C12,1), F_REG_READ),
5380 SR_CORE ("brbsrc13_el1", CPENC (2,1,C8,C13,1), F_REG_READ),
5381 SR_CORE ("brbsrc14_el1", CPENC (2,1,C8,C14,1), F_REG_READ),
5382 SR_CORE ("brbsrc15_el1", CPENC (2,1,C8,C15,1), F_REG_READ),
5383 SR_CORE ("brbsrc16_el1", CPENC (2,1,C8,C0,5), F_REG_READ),
5384 SR_CORE ("brbsrc17_el1", CPENC (2,1,C8,C1,5), F_REG_READ),
5385 SR_CORE ("brbsrc18_el1", CPENC (2,1,C8,C2,5), F_REG_READ),
5386 SR_CORE ("brbsrc19_el1", CPENC (2,1,C8,C3,5), F_REG_READ),
5387 SR_CORE ("brbsrc20_el1", CPENC (2,1,C8,C4,5), F_REG_READ),
5388 SR_CORE ("brbsrc21_el1", CPENC (2,1,C8,C5,5), F_REG_READ),
5389 SR_CORE ("brbsrc22_el1", CPENC (2,1,C8,C6,5), F_REG_READ),
5390 SR_CORE ("brbsrc23_el1", CPENC (2,1,C8,C7,5), F_REG_READ),
5391 SR_CORE ("brbsrc24_el1", CPENC (2,1,C8,C8,5), F_REG_READ),
5392 SR_CORE ("brbsrc25_el1", CPENC (2,1,C8,C9,5), F_REG_READ),
5393 SR_CORE ("brbsrc26_el1", CPENC (2,1,C8,C10,5), F_REG_READ),
5394 SR_CORE ("brbsrc27_el1", CPENC (2,1,C8,C11,5), F_REG_READ),
5395 SR_CORE ("brbsrc28_el1", CPENC (2,1,C8,C12,5), F_REG_READ),
5396 SR_CORE ("brbsrc29_el1", CPENC (2,1,C8,C13,5), F_REG_READ),
5397 SR_CORE ("brbsrc30_el1", CPENC (2,1,C8,C14,5), F_REG_READ),
5398 SR_CORE ("brbsrc31_el1", CPENC (2,1,C8,C15,5), F_REG_READ),
5399 SR_CORE ("brbtgt0_el1", CPENC (2,1,C8,C0,2), F_REG_READ),
5400 SR_CORE ("brbtgt1_el1", CPENC (2,1,C8,C1,2), F_REG_READ),
5401 SR_CORE ("brbtgt2_el1", CPENC (2,1,C8,C2,2), F_REG_READ),
5402 SR_CORE ("brbtgt3_el1", CPENC (2,1,C8,C3,2), F_REG_READ),
5403 SR_CORE ("brbtgt4_el1", CPENC (2,1,C8,C4,2), F_REG_READ),
5404 SR_CORE ("brbtgt5_el1", CPENC (2,1,C8,C5,2), F_REG_READ),
5405 SR_CORE ("brbtgt6_el1", CPENC (2,1,C8,C6,2), F_REG_READ),
5406 SR_CORE ("brbtgt7_el1", CPENC (2,1,C8,C7,2), F_REG_READ),
5407 SR_CORE ("brbtgt8_el1", CPENC (2,1,C8,C8,2), F_REG_READ),
5408 SR_CORE ("brbtgt9_el1", CPENC (2,1,C8,C9,2), F_REG_READ),
5409 SR_CORE ("brbtgt10_el1", CPENC (2,1,C8,C10,2), F_REG_READ),
5410 SR_CORE ("brbtgt11_el1", CPENC (2,1,C8,C11,2), F_REG_READ),
5411 SR_CORE ("brbtgt12_el1", CPENC (2,1,C8,C12,2), F_REG_READ),
5412 SR_CORE ("brbtgt13_el1", CPENC (2,1,C8,C13,2), F_REG_READ),
5413 SR_CORE ("brbtgt14_el1", CPENC (2,1,C8,C14,2), F_REG_READ),
5414 SR_CORE ("brbtgt15_el1", CPENC (2,1,C8,C15,2), F_REG_READ),
5415 SR_CORE ("brbtgt16_el1", CPENC (2,1,C8,C0,6), F_REG_READ),
5416 SR_CORE ("brbtgt17_el1", CPENC (2,1,C8,C1,6), F_REG_READ),
5417 SR_CORE ("brbtgt18_el1", CPENC (2,1,C8,C2,6), F_REG_READ),
5418 SR_CORE ("brbtgt19_el1", CPENC (2,1,C8,C3,6), F_REG_READ),
5419 SR_CORE ("brbtgt20_el1", CPENC (2,1,C8,C4,6), F_REG_READ),
5420 SR_CORE ("brbtgt21_el1", CPENC (2,1,C8,C5,6), F_REG_READ),
5421 SR_CORE ("brbtgt22_el1", CPENC (2,1,C8,C6,6), F_REG_READ),
5422 SR_CORE ("brbtgt23_el1", CPENC (2,1,C8,C7,6), F_REG_READ),
5423 SR_CORE ("brbtgt24_el1", CPENC (2,1,C8,C8,6), F_REG_READ),
5424 SR_CORE ("brbtgt25_el1", CPENC (2,1,C8,C9,6), F_REG_READ),
5425 SR_CORE ("brbtgt26_el1", CPENC (2,1,C8,C10,6), F_REG_READ),
5426 SR_CORE ("brbtgt27_el1", CPENC (2,1,C8,C11,6), F_REG_READ),
5427 SR_CORE ("brbtgt28_el1", CPENC (2,1,C8,C12,6), F_REG_READ),
5428 SR_CORE ("brbtgt29_el1", CPENC (2,1,C8,C13,6), F_REG_READ),
5429 SR_CORE ("brbtgt30_el1", CPENC (2,1,C8,C14,6), F_REG_READ),
5430 SR_CORE ("brbtgt31_el1", CPENC (2,1,C8,C15,6), F_REG_READ),
5431 SR_CORE ("brbinf0_el1", CPENC (2,1,C8,C0,0), F_REG_READ),
5432 SR_CORE ("brbinf1_el1", CPENC (2,1,C8,C1,0), F_REG_READ),
5433 SR_CORE ("brbinf2_el1", CPENC (2,1,C8,C2,0), F_REG_READ),
5434 SR_CORE ("brbinf3_el1", CPENC (2,1,C8,C3,0), F_REG_READ),
5435 SR_CORE ("brbinf4_el1", CPENC (2,1,C8,C4,0), F_REG_READ),
5436 SR_CORE ("brbinf5_el1", CPENC (2,1,C8,C5,0), F_REG_READ),
5437 SR_CORE ("brbinf6_el1", CPENC (2,1,C8,C6,0), F_REG_READ),
5438 SR_CORE ("brbinf7_el1", CPENC (2,1,C8,C7,0), F_REG_READ),
5439 SR_CORE ("brbinf8_el1", CPENC (2,1,C8,C8,0), F_REG_READ),
5440 SR_CORE ("brbinf9_el1", CPENC (2,1,C8,C9,0), F_REG_READ),
5441 SR_CORE ("brbinf10_el1", CPENC (2,1,C8,C10,0), F_REG_READ),
5442 SR_CORE ("brbinf11_el1", CPENC (2,1,C8,C11,0), F_REG_READ),
5443 SR_CORE ("brbinf12_el1", CPENC (2,1,C8,C12,0), F_REG_READ),
5444 SR_CORE ("brbinf13_el1", CPENC (2,1,C8,C13,0), F_REG_READ),
5445 SR_CORE ("brbinf14_el1", CPENC (2,1,C8,C14,0), F_REG_READ),
5446 SR_CORE ("brbinf15_el1", CPENC (2,1,C8,C15,0), F_REG_READ),
5447 SR_CORE ("brbinf16_el1", CPENC (2,1,C8,C0,4), F_REG_READ),
5448 SR_CORE ("brbinf17_el1", CPENC (2,1,C8,C1,4), F_REG_READ),
5449 SR_CORE ("brbinf18_el1", CPENC (2,1,C8,C2,4), F_REG_READ),
5450 SR_CORE ("brbinf19_el1", CPENC (2,1,C8,C3,4), F_REG_READ),
5451 SR_CORE ("brbinf20_el1", CPENC (2,1,C8,C4,4), F_REG_READ),
5452 SR_CORE ("brbinf21_el1", CPENC (2,1,C8,C5,4), F_REG_READ),
5453 SR_CORE ("brbinf22_el1", CPENC (2,1,C8,C6,4), F_REG_READ),
5454 SR_CORE ("brbinf23_el1", CPENC (2,1,C8,C7,4), F_REG_READ),
5455 SR_CORE ("brbinf24_el1", CPENC (2,1,C8,C8,4), F_REG_READ),
5456 SR_CORE ("brbinf25_el1", CPENC (2,1,C8,C9,4), F_REG_READ),
5457 SR_CORE ("brbinf26_el1", CPENC (2,1,C8,C10,4), F_REG_READ),
5458 SR_CORE ("brbinf27_el1", CPENC (2,1,C8,C11,4), F_REG_READ),
5459 SR_CORE ("brbinf28_el1", CPENC (2,1,C8,C12,4), F_REG_READ),
5460 SR_CORE ("brbinf29_el1", CPENC (2,1,C8,C13,4), F_REG_READ),
5461 SR_CORE ("brbinf30_el1", CPENC (2,1,C8,C14,4), F_REG_READ),
5462 SR_CORE ("brbinf31_el1", CPENC (2,1,C8,C15,4), F_REG_READ),
5463
5464 SR_CORE ("accdata_el1", CPENC (3,0,C13,C0,5), 0),
5465
5466 SR_CORE ("mfar_el3", CPENC (3,6,C6,C0,5), 0),
5467 SR_CORE ("gpccr_el3", CPENC (3,6,C2,C1,6), 0),
5468 SR_CORE ("gptbr_el3", CPENC (3,6,C2,C1,4), 0),
5469
5470 SR_CORE ("mecidr_el2", CPENC (3,4,C10,C8,7), F_REG_READ),
5471 SR_CORE ("mecid_p0_el2", CPENC (3,4,C10,C8,0), 0),
5472 SR_CORE ("mecid_a0_el2", CPENC (3,4,C10,C8,1), 0),
5473 SR_CORE ("mecid_p1_el2", CPENC (3,4,C10,C8,2), 0),
5474 SR_CORE ("mecid_a1_el2", CPENC (3,4,C10,C8,3), 0),
5475 SR_CORE ("vmecid_p_el2", CPENC (3,4,C10,C9,0), 0),
5476 SR_CORE ("vmecid_a_el2", CPENC (3,4,C10,C9,1), 0),
5477 SR_CORE ("mecid_rl_a_el3",CPENC (3,6,C10,C10,1), 0),
5478
5479 SR_SME ("svcr", CPENC (3,3,C4,C2,2), 0),
5480 SR_SME ("id_aa64smfr0_el1", CPENC (3,0,C0,C4,5), F_REG_READ),
5481 SR_SME ("smcr_el1", CPENC (3,0,C1,C2,6), 0),
5482 SR_SME ("smcr_el12", CPENC (3,5,C1,C2,6), 0),
5483 SR_SME ("smcr_el2", CPENC (3,4,C1,C2,6), 0),
5484 SR_SME ("smcr_el3", CPENC (3,6,C1,C2,6), 0),
5485 SR_SME ("smpri_el1", CPENC (3,0,C1,C2,4), 0),
5486 SR_SME ("smprimap_el2", CPENC (3,4,C1,C2,5), 0),
5487 SR_SME ("smidr_el1", CPENC (3,1,C0,C0,6), F_REG_READ),
5488 SR_SME ("tpidr2_el0", CPENC (3,3,C13,C0,5), 0),
5489 SR_SME ("mpamsm_el1", CPENC (3,0,C10,C5,3), 0),
5490
5491 SR_AMU ("amcr_el0", CPENC (3,3,C13,C2,0), 0),
5492 SR_AMU ("amcfgr_el0", CPENC (3,3,C13,C2,1), F_REG_READ),
5493 SR_AMU ("amcgcr_el0", CPENC (3,3,C13,C2,2), F_REG_READ),
5494 SR_AMU ("amuserenr_el0", CPENC (3,3,C13,C2,3), 0),
5495 SR_AMU ("amcntenclr0_el0", CPENC (3,3,C13,C2,4), 0),
5496 SR_AMU ("amcntenset0_el0", CPENC (3,3,C13,C2,5), 0),
5497 SR_AMU ("amcntenclr1_el0", CPENC (3,3,C13,C3,0), 0),
5498 SR_AMU ("amcntenset1_el0", CPENC (3,3,C13,C3,1), 0),
5499 SR_AMU ("amevcntr00_el0", CPENC (3,3,C13,C4,0), 0),
5500 SR_AMU ("amevcntr01_el0", CPENC (3,3,C13,C4,1), 0),
5501 SR_AMU ("amevcntr02_el0", CPENC (3,3,C13,C4,2), 0),
5502 SR_AMU ("amevcntr03_el0", CPENC (3,3,C13,C4,3), 0),
5503 SR_AMU ("amevtyper00_el0", CPENC (3,3,C13,C6,0), F_REG_READ),
5504 SR_AMU ("amevtyper01_el0", CPENC (3,3,C13,C6,1), F_REG_READ),
5505 SR_AMU ("amevtyper02_el0", CPENC (3,3,C13,C6,2), F_REG_READ),
5506 SR_AMU ("amevtyper03_el0", CPENC (3,3,C13,C6,3), F_REG_READ),
5507 SR_AMU ("amevcntr10_el0", CPENC (3,3,C13,C12,0), 0),
5508 SR_AMU ("amevcntr11_el0", CPENC (3,3,C13,C12,1), 0),
5509 SR_AMU ("amevcntr12_el0", CPENC (3,3,C13,C12,2), 0),
5510 SR_AMU ("amevcntr13_el0", CPENC (3,3,C13,C12,3), 0),
5511 SR_AMU ("amevcntr14_el0", CPENC (3,3,C13,C12,4), 0),
5512 SR_AMU ("amevcntr15_el0", CPENC (3,3,C13,C12,5), 0),
5513 SR_AMU ("amevcntr16_el0", CPENC (3,3,C13,C12,6), 0),
5514 SR_AMU ("amevcntr17_el0", CPENC (3,3,C13,C12,7), 0),
5515 SR_AMU ("amevcntr18_el0", CPENC (3,3,C13,C13,0), 0),
5516 SR_AMU ("amevcntr19_el0", CPENC (3,3,C13,C13,1), 0),
5517 SR_AMU ("amevcntr110_el0", CPENC (3,3,C13,C13,2), 0),
5518 SR_AMU ("amevcntr111_el0", CPENC (3,3,C13,C13,3), 0),
5519 SR_AMU ("amevcntr112_el0", CPENC (3,3,C13,C13,4), 0),
5520 SR_AMU ("amevcntr113_el0", CPENC (3,3,C13,C13,5), 0),
5521 SR_AMU ("amevcntr114_el0", CPENC (3,3,C13,C13,6), 0),
5522 SR_AMU ("amevcntr115_el0", CPENC (3,3,C13,C13,7), 0),
5523 SR_AMU ("amevtyper10_el0", CPENC (3,3,C13,C14,0), 0),
5524 SR_AMU ("amevtyper11_el0", CPENC (3,3,C13,C14,1), 0),
5525 SR_AMU ("amevtyper12_el0", CPENC (3,3,C13,C14,2), 0),
5526 SR_AMU ("amevtyper13_el0", CPENC (3,3,C13,C14,3), 0),
5527 SR_AMU ("amevtyper14_el0", CPENC (3,3,C13,C14,4), 0),
5528 SR_AMU ("amevtyper15_el0", CPENC (3,3,C13,C14,5), 0),
5529 SR_AMU ("amevtyper16_el0", CPENC (3,3,C13,C14,6), 0),
5530 SR_AMU ("amevtyper17_el0", CPENC (3,3,C13,C14,7), 0),
5531 SR_AMU ("amevtyper18_el0", CPENC (3,3,C13,C15,0), 0),
5532 SR_AMU ("amevtyper19_el0", CPENC (3,3,C13,C15,1), 0),
5533 SR_AMU ("amevtyper110_el0", CPENC (3,3,C13,C15,2), 0),
5534 SR_AMU ("amevtyper111_el0", CPENC (3,3,C13,C15,3), 0),
5535 SR_AMU ("amevtyper112_el0", CPENC (3,3,C13,C15,4), 0),
5536 SR_AMU ("amevtyper113_el0", CPENC (3,3,C13,C15,5), 0),
5537 SR_AMU ("amevtyper114_el0", CPENC (3,3,C13,C15,6), 0),
5538 SR_AMU ("amevtyper115_el0", CPENC (3,3,C13,C15,7), 0),
5539
5540 SR_GIC ("icc_pmr_el1", CPENC (3,0,C4,C6,0), 0),
5541 SR_GIC ("icc_iar0_el1", CPENC (3,0,C12,C8,0), F_REG_READ),
5542 SR_GIC ("icc_eoir0_el1", CPENC (3,0,C12,C8,1), F_REG_WRITE),
5543 SR_GIC ("icc_hppir0_el1", CPENC (3,0,C12,C8,2), F_REG_READ),
5544 SR_GIC ("icc_bpr0_el1", CPENC (3,0,C12,C8,3), 0),
5545 SR_GIC ("icc_ap0r0_el1", CPENC (3,0,C12,C8,4), 0),
5546 SR_GIC ("icc_ap0r1_el1", CPENC (3,0,C12,C8,5), 0),
5547 SR_GIC ("icc_ap0r2_el1", CPENC (3,0,C12,C8,6), 0),
5548 SR_GIC ("icc_ap0r3_el1", CPENC (3,0,C12,C8,7), 0),
5549 SR_GIC ("icc_ap1r0_el1", CPENC (3,0,C12,C9,0), 0),
5550 SR_GIC ("icc_ap1r1_el1", CPENC (3,0,C12,C9,1), 0),
5551 SR_GIC ("icc_ap1r2_el1", CPENC (3,0,C12,C9,2), 0),
5552 SR_GIC ("icc_ap1r3_el1", CPENC (3,0,C12,C9,3), 0),
5553 SR_GIC ("icc_dir_el1", CPENC (3,0,C12,C11,1), F_REG_WRITE),
5554 SR_GIC ("icc_rpr_el1", CPENC (3,0,C12,C11,3), F_REG_READ),
5555 SR_GIC ("icc_sgi1r_el1", CPENC (3,0,C12,C11,5), F_REG_WRITE),
5556 SR_GIC ("icc_asgi1r_el1", CPENC (3,0,C12,C11,6), F_REG_WRITE),
5557 SR_GIC ("icc_sgi0r_el1", CPENC (3,0,C12,C11,7), F_REG_WRITE),
5558 SR_GIC ("icc_iar1_el1", CPENC (3,0,C12,C12,0), F_REG_READ),
5559 SR_GIC ("icc_eoir1_el1", CPENC (3,0,C12,C12,1), F_REG_WRITE),
5560 SR_GIC ("icc_hppir1_el1", CPENC (3,0,C12,C12,2), F_REG_READ),
5561 SR_GIC ("icc_bpr1_el1", CPENC (3,0,C12,C12,3), 0),
5562 SR_GIC ("icc_ctlr_el1", CPENC (3,0,C12,C12,4), 0),
5563 SR_GIC ("icc_igrpen0_el1", CPENC (3,0,C12,C12,6), 0),
5564 SR_GIC ("icc_igrpen1_el1", CPENC (3,0,C12,C12,7), 0),
5565 SR_GIC ("ich_ap0r0_el2", CPENC (3,4,C12,C8,0), 0),
5566 SR_GIC ("ich_ap0r1_el2", CPENC (3,4,C12,C8,1), 0),
5567 SR_GIC ("ich_ap0r2_el2", CPENC (3,4,C12,C8,2), 0),
5568 SR_GIC ("ich_ap0r3_el2", CPENC (3,4,C12,C8,3), 0),
5569 SR_GIC ("ich_ap1r0_el2", CPENC (3,4,C12,C9,0), 0),
5570 SR_GIC ("ich_ap1r1_el2", CPENC (3,4,C12,C9,1), 0),
5571 SR_GIC ("ich_ap1r2_el2", CPENC (3,4,C12,C9,2), 0),
5572 SR_GIC ("ich_ap1r3_el2", CPENC (3,4,C12,C9,3), 0),
5573 SR_GIC ("ich_hcr_el2", CPENC (3,4,C12,C11,0), 0),
5574 SR_GIC ("ich_misr_el2", CPENC (3,4,C12,C11,2), F_REG_READ),
5575 SR_GIC ("ich_eisr_el2", CPENC (3,4,C12,C11,3), F_REG_READ),
5576 SR_GIC ("ich_elrsr_el2", CPENC (3,4,C12,C11,5), F_REG_READ),
5577 SR_GIC ("ich_vmcr_el2", CPENC (3,4,C12,C11,7), 0),
5578 SR_GIC ("ich_lr0_el2", CPENC (3,4,C12,C12,0), 0),
5579 SR_GIC ("ich_lr1_el2", CPENC (3,4,C12,C12,1), 0),
5580 SR_GIC ("ich_lr2_el2", CPENC (3,4,C12,C12,2), 0),
5581 SR_GIC ("ich_lr3_el2", CPENC (3,4,C12,C12,3), 0),
5582 SR_GIC ("ich_lr4_el2", CPENC (3,4,C12,C12,4), 0),
5583 SR_GIC ("ich_lr5_el2", CPENC (3,4,C12,C12,5), 0),
5584 SR_GIC ("ich_lr6_el2", CPENC (3,4,C12,C12,6), 0),
5585 SR_GIC ("ich_lr7_el2", CPENC (3,4,C12,C12,7), 0),
5586 SR_GIC ("ich_lr8_el2", CPENC (3,4,C12,C13,0), 0),
5587 SR_GIC ("ich_lr9_el2", CPENC (3,4,C12,C13,1), 0),
5588 SR_GIC ("ich_lr10_el2", CPENC (3,4,C12,C13,2), 0),
5589 SR_GIC ("ich_lr11_el2", CPENC (3,4,C12,C13,3), 0),
5590 SR_GIC ("ich_lr12_el2", CPENC (3,4,C12,C13,4), 0),
5591 SR_GIC ("ich_lr13_el2", CPENC (3,4,C12,C13,5), 0),
5592 SR_GIC ("ich_lr14_el2", CPENC (3,4,C12,C13,6), 0),
5593 SR_GIC ("ich_lr15_el2", CPENC (3,4,C12,C13,7), 0),
5594 SR_GIC ("icc_igrpen1_el3", CPENC (3,6,C12,C12,7), 0),
5595
5596 SR_V8_6 ("amcg1idr_el0", CPENC (3,3,C13,C2,6), F_REG_READ),
5597 SR_V8_6 ("cntpctss_el0", CPENC (3,3,C14,C0,5), F_REG_READ),
5598 SR_V8_6 ("cntvctss_el0", CPENC (3,3,C14,C0,6), F_REG_READ),
5599 SR_V8_6 ("hfgrtr_el2", CPENC (3,4,C1,C1,4), 0),
5600 SR_V8_6 ("hfgwtr_el2", CPENC (3,4,C1,C1,5), 0),
5601 SR_V8_6 ("hfgitr_el2", CPENC (3,4,C1,C1,6), 0),
5602 SR_V8_6 ("hdfgrtr_el2", CPENC (3,4,C3,C1,4), 0),
5603 SR_V8_6 ("hdfgwtr_el2", CPENC (3,4,C3,C1,5), 0),
5604 SR_V8_6 ("hafgrtr_el2", CPENC (3,4,C3,C1,6), 0),
5605 SR_V8_6 ("amevcntvoff00_el2", CPENC (3,4,C13,C8,0), 0),
5606 SR_V8_6 ("amevcntvoff01_el2", CPENC (3,4,C13,C8,1), 0),
5607 SR_V8_6 ("amevcntvoff02_el2", CPENC (3,4,C13,C8,2), 0),
5608 SR_V8_6 ("amevcntvoff03_el2", CPENC (3,4,C13,C8,3), 0),
5609 SR_V8_6 ("amevcntvoff04_el2", CPENC (3,4,C13,C8,4), 0),
5610 SR_V8_6 ("amevcntvoff05_el2", CPENC (3,4,C13,C8,5), 0),
5611 SR_V8_6 ("amevcntvoff06_el2", CPENC (3,4,C13,C8,6), 0),
5612 SR_V8_6 ("amevcntvoff07_el2", CPENC (3,4,C13,C8,7), 0),
5613 SR_V8_6 ("amevcntvoff08_el2", CPENC (3,4,C13,C9,0), 0),
5614 SR_V8_6 ("amevcntvoff09_el2", CPENC (3,4,C13,C9,1), 0),
5615 SR_V8_6 ("amevcntvoff010_el2", CPENC (3,4,C13,C9,2), 0),
5616 SR_V8_6 ("amevcntvoff011_el2", CPENC (3,4,C13,C9,3), 0),
5617 SR_V8_6 ("amevcntvoff012_el2", CPENC (3,4,C13,C9,4), 0),
5618 SR_V8_6 ("amevcntvoff013_el2", CPENC (3,4,C13,C9,5), 0),
5619 SR_V8_6 ("amevcntvoff014_el2", CPENC (3,4,C13,C9,6), 0),
5620 SR_V8_6 ("amevcntvoff015_el2", CPENC (3,4,C13,C9,7), 0),
5621 SR_V8_6 ("amevcntvoff10_el2", CPENC (3,4,C13,C10,0), 0),
5622 SR_V8_6 ("amevcntvoff11_el2", CPENC (3,4,C13,C10,1), 0),
5623 SR_V8_6 ("amevcntvoff12_el2", CPENC (3,4,C13,C10,2), 0),
5624 SR_V8_6 ("amevcntvoff13_el2", CPENC (3,4,C13,C10,3), 0),
5625 SR_V8_6 ("amevcntvoff14_el2", CPENC (3,4,C13,C10,4), 0),
5626 SR_V8_6 ("amevcntvoff15_el2", CPENC (3,4,C13,C10,5), 0),
5627 SR_V8_6 ("amevcntvoff16_el2", CPENC (3,4,C13,C10,6), 0),
5628 SR_V8_6 ("amevcntvoff17_el2", CPENC (3,4,C13,C10,7), 0),
5629 SR_V8_6 ("amevcntvoff18_el2", CPENC (3,4,C13,C11,0), 0),
5630 SR_V8_6 ("amevcntvoff19_el2", CPENC (3,4,C13,C11,1), 0),
5631 SR_V8_6 ("amevcntvoff110_el2", CPENC (3,4,C13,C11,2), 0),
5632 SR_V8_6 ("amevcntvoff111_el2", CPENC (3,4,C13,C11,3), 0),
5633 SR_V8_6 ("amevcntvoff112_el2", CPENC (3,4,C13,C11,4), 0),
5634 SR_V8_6 ("amevcntvoff113_el2", CPENC (3,4,C13,C11,5), 0),
5635 SR_V8_6 ("amevcntvoff114_el2", CPENC (3,4,C13,C11,6), 0),
5636 SR_V8_6 ("amevcntvoff115_el2", CPENC (3,4,C13,C11,7), 0),
5637 SR_V8_6 ("cntpoff_el2", CPENC (3,4,C14,C0,6), 0),
5638
5639 SR_V8_7 ("pmsnevfr_el1", CPENC (3,0,C9,C9,1), 0),
5640 SR_V8_7 ("hcrx_el2", CPENC (3,4,C1,C2,2), 0),
5641
5642 SR_V8_8 ("allint", CPENC (3,0,C4,C3,0), 0),
5643 SR_V8_8 ("icc_nmiar1_el1", CPENC (3,0,C12,C9,5), F_REG_READ),
5644
5645 { 0, CPENC (0,0,0,0,0), 0, 0 }
5646 };
5647
5648 bool
5649 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
5650 {
5651 return (reg_flags & F_DEPRECATED) != 0;
5652 }
5653
5654 /* The CPENC below is fairly misleading, the fields
5655 here are not in CPENC form. They are in op2op1 form. The fields are encoded
5656 by ins_pstatefield, which just shifts the value by the width of the fields
5657 in a loop. So if you CPENC them only the first value will be set, the rest
5658 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
5659 value of 0b110000000001000000 (0x30040) while what you want is
5660 0b011010 (0x1a). */
5661 const aarch64_sys_reg aarch64_pstatefields [] =
5662 {
5663 SR_CORE ("spsel", 0x05, F_REG_MAX_VALUE (1)),
5664 SR_CORE ("daifset", 0x1e, F_REG_MAX_VALUE (15)),
5665 SR_CORE ("daifclr", 0x1f, F_REG_MAX_VALUE (15)),
5666 SR_PAN ("pan", 0x04, F_REG_MAX_VALUE (1)),
5667 SR_V8_2 ("uao", 0x03, F_REG_MAX_VALUE (1)),
5668 SR_SSBS ("ssbs", 0x19, F_REG_MAX_VALUE (1)),
5669 SR_V8_4 ("dit", 0x1a, F_REG_MAX_VALUE (1)),
5670 SR_MEMTAG ("tco", 0x1c, F_REG_MAX_VALUE (1)),
5671 SR_SME ("svcrsm", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x2,0x1)
5672 | F_REG_MAX_VALUE (1)),
5673 SR_SME ("svcrza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x4,0x1)
5674 | F_REG_MAX_VALUE (1)),
5675 SR_SME ("svcrsmza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x6,0x1)
5676 | F_REG_MAX_VALUE (1)),
5677 SR_V8_8 ("allint", 0x08, F_REG_MAX_VALUE (1)),
5678 { 0, CPENC (0,0,0,0,0), 0, 0 },
5679 };
5680
5681 bool
5682 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
5683 const aarch64_sys_reg *reg)
5684 {
5685 if (!(reg->flags & F_ARCHEXT))
5686 return true;
5687
5688 return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
5689 }
5690
5691 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
5692 {
5693 { "ialluis", CPENS(0,C7,C1,0), 0 },
5694 { "iallu", CPENS(0,C7,C5,0), 0 },
5695 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
5696 { 0, CPENS(0,0,0,0), 0 }
5697 };
5698
5699 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
5700 {
5701 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
5702 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
5703 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
5704 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
5705 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
5706 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
5707 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
5708 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
5709 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
5710 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
5711 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
5712 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
5713 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
5714 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
5715 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
5716 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
5717 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
5718 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
5719 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
5720 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
5721 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
5722 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
5723 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
5724 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
5725 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
5726 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
5727 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
5728 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
5729 { "cipapa", CPENS (6, C7, C14, 1), F_HASXT },
5730 { "cigdpapa", CPENS (6, C7, C14, 5), F_HASXT },
5731 { 0, CPENS(0,0,0,0), 0 }
5732 };
5733
5734 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
5735 {
5736 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
5737 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
5738 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
5739 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
5740 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
5741 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
5742 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
5743 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
5744 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
5745 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
5746 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
5747 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
5748 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
5749 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
5750 { 0, CPENS(0,0,0,0), 0 }
5751 };
5752
5753 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
5754 {
5755 { "vmalle1", CPENS(0,C8,C7,0), 0 },
5756 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
5757 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
5758 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
5759 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
5760 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
5761 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
5762 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
5763 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
5764 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
5765 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
5766 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
5767 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
5768 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
5769 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
5770 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
5771 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
5772 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
5773 { "alle2", CPENS(4,C8,C7,0), 0 },
5774 { "alle2is", CPENS(4,C8,C3,0), 0 },
5775 { "alle1", CPENS(4,C8,C7,4), 0 },
5776 { "alle1is", CPENS(4,C8,C3,4), 0 },
5777 { "alle3", CPENS(6,C8,C7,0), 0 },
5778 { "alle3is", CPENS(6,C8,C3,0), 0 },
5779 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
5780 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
5781 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
5782 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
5783 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
5784 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
5785 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
5786 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
5787
5788 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
5789 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
5790 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
5791 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
5792 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
5793 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
5794 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
5795 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
5796 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
5797 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
5798 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
5799 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
5800 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
5801 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
5802 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
5803 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
5804
5805 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
5806 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
5807 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
5808 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
5809 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
5810 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
5811 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
5812 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
5813 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
5814 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
5815 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
5816 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
5817 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
5818 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
5819 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
5820 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
5821 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
5822 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
5823 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
5824 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
5825 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
5826 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
5827 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
5828 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
5829 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
5830 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
5831 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
5832 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
5833 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
5834 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
5835
5836 { "rpaos", CPENS (6, C8, C4, 3), F_HASXT },
5837 { "rpalos", CPENS (6, C8, C4, 7), F_HASXT },
5838 { "paallos", CPENS (6, C8, C1, 4), 0},
5839 { "paall", CPENS (6, C8, C7, 4), 0},
5840
5841 { 0, CPENS(0,0,0,0), 0 }
5842 };
5843
5844 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
5845 {
5846 /* RCTX is somewhat unique in a way that it has different values
5847 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
5848 Thus op2 is masked out and instead encoded directly in the
5849 aarch64_opcode_table entries for the respective instructions. */
5850 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
5851
5852 { 0, CPENS(0,0,0,0), 0 }
5853 };
5854
5855 bool
5856 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
5857 {
5858 return (sys_ins_reg->flags & F_HASXT) != 0;
5859 }
5860
5861 extern bool
5862 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
5863 const char *reg_name,
5864 aarch64_insn reg_value,
5865 uint32_t reg_flags,
5866 aarch64_feature_set reg_features)
5867 {
5868 /* Armv8-R has no EL3. */
5869 if (AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_R))
5870 {
5871 const char *suffix = strrchr (reg_name, '_');
5872 if (suffix && !strcmp (suffix, "_el3"))
5873 return false;
5874 }
5875
5876 if (!(reg_flags & F_ARCHEXT))
5877 return true;
5878
5879 if (reg_features
5880 && AARCH64_CPU_HAS_ALL_FEATURES (features, reg_features))
5881 return true;
5882
5883 /* ARMv8.4 TLB instructions. */
5884 if ((reg_value == CPENS (0, C8, C1, 0)
5885 || reg_value == CPENS (0, C8, C1, 1)
5886 || reg_value == CPENS (0, C8, C1, 2)
5887 || reg_value == CPENS (0, C8, C1, 3)
5888 || reg_value == CPENS (0, C8, C1, 5)
5889 || reg_value == CPENS (0, C8, C1, 7)
5890 || reg_value == CPENS (4, C8, C4, 0)
5891 || reg_value == CPENS (4, C8, C4, 4)
5892 || reg_value == CPENS (4, C8, C1, 1)
5893 || reg_value == CPENS (4, C8, C1, 5)
5894 || reg_value == CPENS (4, C8, C1, 6)
5895 || reg_value == CPENS (6, C8, C1, 1)
5896 || reg_value == CPENS (6, C8, C1, 5)
5897 || reg_value == CPENS (4, C8, C1, 0)
5898 || reg_value == CPENS (4, C8, C1, 4)
5899 || reg_value == CPENS (6, C8, C1, 0)
5900 || reg_value == CPENS (0, C8, C6, 1)
5901 || reg_value == CPENS (0, C8, C6, 3)
5902 || reg_value == CPENS (0, C8, C6, 5)
5903 || reg_value == CPENS (0, C8, C6, 7)
5904 || reg_value == CPENS (0, C8, C2, 1)
5905 || reg_value == CPENS (0, C8, C2, 3)
5906 || reg_value == CPENS (0, C8, C2, 5)
5907 || reg_value == CPENS (0, C8, C2, 7)
5908 || reg_value == CPENS (0, C8, C5, 1)
5909 || reg_value == CPENS (0, C8, C5, 3)
5910 || reg_value == CPENS (0, C8, C5, 5)
5911 || reg_value == CPENS (0, C8, C5, 7)
5912 || reg_value == CPENS (4, C8, C0, 2)
5913 || reg_value == CPENS (4, C8, C0, 6)
5914 || reg_value == CPENS (4, C8, C4, 2)
5915 || reg_value == CPENS (4, C8, C4, 6)
5916 || reg_value == CPENS (4, C8, C4, 3)
5917 || reg_value == CPENS (4, C8, C4, 7)
5918 || reg_value == CPENS (4, C8, C6, 1)
5919 || reg_value == CPENS (4, C8, C6, 5)
5920 || reg_value == CPENS (4, C8, C2, 1)
5921 || reg_value == CPENS (4, C8, C2, 5)
5922 || reg_value == CPENS (4, C8, C5, 1)
5923 || reg_value == CPENS (4, C8, C5, 5)
5924 || reg_value == CPENS (6, C8, C6, 1)
5925 || reg_value == CPENS (6, C8, C6, 5)
5926 || reg_value == CPENS (6, C8, C2, 1)
5927 || reg_value == CPENS (6, C8, C2, 5)
5928 || reg_value == CPENS (6, C8, C5, 1)
5929 || reg_value == CPENS (6, C8, C5, 5))
5930 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
5931 return true;
5932
5933 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
5934 if (reg_value == CPENS (3, C7, C12, 1)
5935 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5936 return true;
5937
5938 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
5939 if (reg_value == CPENS (3, C7, C13, 1)
5940 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
5941 return true;
5942
5943 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
5944 if ((reg_value == CPENS (0, C7, C6, 3)
5945 || reg_value == CPENS (0, C7, C6, 4)
5946 || reg_value == CPENS (0, C7, C10, 4)
5947 || reg_value == CPENS (0, C7, C14, 4)
5948 || reg_value == CPENS (3, C7, C10, 3)
5949 || reg_value == CPENS (3, C7, C12, 3)
5950 || reg_value == CPENS (3, C7, C13, 3)
5951 || reg_value == CPENS (3, C7, C14, 3)
5952 || reg_value == CPENS (3, C7, C4, 3)
5953 || reg_value == CPENS (0, C7, C6, 5)
5954 || reg_value == CPENS (0, C7, C6, 6)
5955 || reg_value == CPENS (0, C7, C10, 6)
5956 || reg_value == CPENS (0, C7, C14, 6)
5957 || reg_value == CPENS (3, C7, C10, 5)
5958 || reg_value == CPENS (3, C7, C12, 5)
5959 || reg_value == CPENS (3, C7, C13, 5)
5960 || reg_value == CPENS (3, C7, C14, 5)
5961 || reg_value == CPENS (3, C7, C4, 4))
5962 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
5963 return true;
5964
5965 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
5966 if ((reg_value == CPENS (0, C7, C9, 0)
5967 || reg_value == CPENS (0, C7, C9, 1))
5968 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5969 return true;
5970
5971 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
5972 if (reg_value == CPENS (3, C7, C3, 0)
5973 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
5974 return true;
5975
5976 return false;
5977 }
5978
5979 #undef C0
5980 #undef C1
5981 #undef C2
5982 #undef C3
5983 #undef C4
5984 #undef C5
5985 #undef C6
5986 #undef C7
5987 #undef C8
5988 #undef C9
5989 #undef C10
5990 #undef C11
5991 #undef C12
5992 #undef C13
5993 #undef C14
5994 #undef C15
5995
5996 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5997 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5998
5999 static enum err_type
6000 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
6001 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
6002 bool encoding ATTRIBUTE_UNUSED,
6003 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
6004 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
6005 {
6006 int t = BITS (insn, 4, 0);
6007 int n = BITS (insn, 9, 5);
6008 int t2 = BITS (insn, 14, 10);
6009
6010 if (BIT (insn, 23))
6011 {
6012 /* Write back enabled. */
6013 if ((t == n || t2 == n) && n != 31)
6014 return ERR_UND;
6015 }
6016
6017 if (BIT (insn, 22))
6018 {
6019 /* Load */
6020 if (t == t2)
6021 return ERR_UND;
6022 }
6023
6024 return ERR_OK;
6025 }
6026
6027 /* Verifier for vector by element 3 operands functions where the
6028 conditions `if sz:L == 11 then UNDEFINED` holds. */
6029
6030 static enum err_type
6031 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
6032 bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
6033 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
6034 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
6035 {
6036 const aarch64_insn undef_pattern = 0x3;
6037 aarch64_insn value;
6038
6039 assert (inst->opcode);
6040 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
6041 value = encoding ? inst->value : insn;
6042 assert (value);
6043
6044 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
6045 return ERR_UND;
6046
6047 return ERR_OK;
6048 }
6049
6050 /* Check an instruction that takes three register operands and that
6051 requires the register numbers to be distinct from one another. */
6052
6053 static enum err_type
6054 verify_three_different_regs (const struct aarch64_inst *inst,
6055 const aarch64_insn insn ATTRIBUTE_UNUSED,
6056 bfd_vma pc ATTRIBUTE_UNUSED,
6057 bool encoding ATTRIBUTE_UNUSED,
6058 aarch64_operand_error *mismatch_detail
6059 ATTRIBUTE_UNUSED,
6060 aarch64_instr_sequence *insn_sequence
6061 ATTRIBUTE_UNUSED)
6062 {
6063 int rd, rs, rn;
6064
6065 rd = inst->operands[0].reg.regno;
6066 rs = inst->operands[1].reg.regno;
6067 rn = inst->operands[2].reg.regno;
6068 if (rd == rs || rd == rn || rs == rn)
6069 {
6070 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6071 mismatch_detail->error
6072 = _("the three register operands must be distinct from one another");
6073 mismatch_detail->index = -1;
6074 return ERR_UND;
6075 }
6076
6077 return ERR_OK;
6078 }
6079
6080 /* Add INST to the end of INSN_SEQUENCE. */
6081
6082 static void
6083 add_insn_to_sequence (const struct aarch64_inst *inst,
6084 aarch64_instr_sequence *insn_sequence)
6085 {
6086 insn_sequence->instr[insn_sequence->num_added_insns++] = *inst;
6087 }
6088
6089 /* Initialize an instruction sequence insn_sequence with the instruction INST.
6090 If INST is NULL the given insn_sequence is cleared and the sequence is left
6091 uninitialized. */
6092
6093 void
6094 init_insn_sequence (const struct aarch64_inst *inst,
6095 aarch64_instr_sequence *insn_sequence)
6096 {
6097 int num_req_entries = 0;
6098
6099 if (insn_sequence->instr)
6100 {
6101 XDELETE (insn_sequence->instr);
6102 insn_sequence->instr = NULL;
6103 }
6104
6105 /* Handle all the cases here. May need to think of something smarter than
6106 a giant if/else chain if this grows. At that time, a lookup table may be
6107 best. */
6108 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
6109 num_req_entries = 1;
6110 if (inst && (inst->opcode->constraints & C_SCAN_MOPS_PME) == C_SCAN_MOPS_P)
6111 num_req_entries = 2;
6112
6113 insn_sequence->num_added_insns = 0;
6114 insn_sequence->num_allocated_insns = num_req_entries;
6115
6116 if (num_req_entries != 0)
6117 {
6118 insn_sequence->instr = XCNEWVEC (aarch64_inst, num_req_entries);
6119 add_insn_to_sequence (inst, insn_sequence);
6120 }
6121 }
6122
6123 /* Subroutine of verify_constraints. Check whether the instruction
6124 is part of a MOPS P/M/E sequence and, if so, whether sequencing
6125 expectations are met. Return true if the check passes, otherwise
6126 describe the problem in MISMATCH_DETAIL.
6127
6128 IS_NEW_SECTION is true if INST is assumed to start a new section.
6129 The other arguments are as for verify_constraints. */
6130
6131 static bool
6132 verify_mops_pme_sequence (const struct aarch64_inst *inst,
6133 bool is_new_section,
6134 aarch64_operand_error *mismatch_detail,
6135 aarch64_instr_sequence *insn_sequence)
6136 {
6137 const struct aarch64_opcode *opcode;
6138 const struct aarch64_inst *prev_insn;
6139 int i;
6140
6141 opcode = inst->opcode;
6142 if (insn_sequence->instr)
6143 prev_insn = insn_sequence->instr + (insn_sequence->num_added_insns - 1);
6144 else
6145 prev_insn = NULL;
6146
6147 if (prev_insn
6148 && (prev_insn->opcode->constraints & C_SCAN_MOPS_PME)
6149 && prev_insn->opcode != opcode - 1)
6150 {
6151 mismatch_detail->kind = AARCH64_OPDE_EXPECTED_A_AFTER_B;
6152 mismatch_detail->error = NULL;
6153 mismatch_detail->index = -1;
6154 mismatch_detail->data[0].s = prev_insn->opcode[1].name;
6155 mismatch_detail->data[1].s = prev_insn->opcode->name;
6156 mismatch_detail->non_fatal = true;
6157 return false;
6158 }
6159
6160 if (opcode->constraints & C_SCAN_MOPS_PME)
6161 {
6162 if (is_new_section || !prev_insn || prev_insn->opcode != opcode - 1)
6163 {
6164 mismatch_detail->kind = AARCH64_OPDE_A_SHOULD_FOLLOW_B;
6165 mismatch_detail->error = NULL;
6166 mismatch_detail->index = -1;
6167 mismatch_detail->data[0].s = opcode->name;
6168 mismatch_detail->data[1].s = opcode[-1].name;
6169 mismatch_detail->non_fatal = true;
6170 return false;
6171 }
6172
6173 for (i = 0; i < 3; ++i)
6174 /* There's no specific requirement for the data register to be
6175 the same between consecutive SET* instructions. */
6176 if ((opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd
6177 || opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs
6178 || opcode->operands[i] == AARCH64_OPND_MOPS_WB_Rn)
6179 && prev_insn->operands[i].reg.regno != inst->operands[i].reg.regno)
6180 {
6181 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6182 if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd)
6183 mismatch_detail->error = _("destination register differs from "
6184 "preceding instruction");
6185 else if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs)
6186 mismatch_detail->error = _("source register differs from "
6187 "preceding instruction");
6188 else
6189 mismatch_detail->error = _("size register differs from "
6190 "preceding instruction");
6191 mismatch_detail->index = i;
6192 mismatch_detail->non_fatal = true;
6193 return false;
6194 }
6195 }
6196
6197 return true;
6198 }
6199
6200 /* This function verifies that the instruction INST adheres to its specified
6201 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
6202 returned and MISMATCH_DETAIL contains the reason why verification failed.
6203
6204 The function is called both during assembly and disassembly. If assembling
6205 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
6206 and will contain the PC of the current instruction w.r.t to the section.
6207
6208 If ENCODING and PC=0 then you are at a start of a section. The constraints
6209 are verified against the given state insn_sequence which is updated as it
6210 transitions through the verification. */
6211
6212 enum err_type
6213 verify_constraints (const struct aarch64_inst *inst,
6214 const aarch64_insn insn ATTRIBUTE_UNUSED,
6215 bfd_vma pc,
6216 bool encoding,
6217 aarch64_operand_error *mismatch_detail,
6218 aarch64_instr_sequence *insn_sequence)
6219 {
6220 assert (inst);
6221 assert (inst->opcode);
6222
6223 const struct aarch64_opcode *opcode = inst->opcode;
6224 if (!opcode->constraints && !insn_sequence->instr)
6225 return ERR_OK;
6226
6227 assert (insn_sequence);
6228
6229 enum err_type res = ERR_OK;
6230
6231 /* This instruction puts a constraint on the insn_sequence. */
6232 if (opcode->flags & F_SCAN)
6233 {
6234 if (insn_sequence->instr)
6235 {
6236 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6237 mismatch_detail->error = _("instruction opens new dependency "
6238 "sequence without ending previous one");
6239 mismatch_detail->index = -1;
6240 mismatch_detail->non_fatal = true;
6241 res = ERR_VFI;
6242 }
6243
6244 init_insn_sequence (inst, insn_sequence);
6245 return res;
6246 }
6247
6248 bool is_new_section = (!encoding && pc == 0);
6249 if (!verify_mops_pme_sequence (inst, is_new_section, mismatch_detail,
6250 insn_sequence))
6251 {
6252 res = ERR_VFI;
6253 if ((opcode->constraints & C_SCAN_MOPS_PME) != C_SCAN_MOPS_M)
6254 init_insn_sequence (NULL, insn_sequence);
6255 }
6256
6257 /* Verify constraints on an existing sequence. */
6258 if (insn_sequence->instr)
6259 {
6260 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
6261 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
6262 closed a previous one that we should have. */
6263 if (is_new_section && res == ERR_OK)
6264 {
6265 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6266 mismatch_detail->error = _("previous `movprfx' sequence not closed");
6267 mismatch_detail->index = -1;
6268 mismatch_detail->non_fatal = true;
6269 res = ERR_VFI;
6270 /* Reset the sequence. */
6271 init_insn_sequence (NULL, insn_sequence);
6272 return res;
6273 }
6274
6275 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
6276 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
6277 {
6278 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
6279 instruction for better error messages. */
6280 if (!opcode->avariant
6281 || !(*opcode->avariant &
6282 (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
6283 {
6284 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6285 mismatch_detail->error = _("SVE instruction expected after "
6286 "`movprfx'");
6287 mismatch_detail->index = -1;
6288 mismatch_detail->non_fatal = true;
6289 res = ERR_VFI;
6290 goto done;
6291 }
6292
6293 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
6294 instruction that is allowed to be used with a MOVPRFX. */
6295 if (!(opcode->constraints & C_SCAN_MOVPRFX))
6296 {
6297 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6298 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
6299 "expected");
6300 mismatch_detail->index = -1;
6301 mismatch_detail->non_fatal = true;
6302 res = ERR_VFI;
6303 goto done;
6304 }
6305
6306 /* Next check for usage of the predicate register. */
6307 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
6308 aarch64_opnd_info blk_pred, inst_pred;
6309 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
6310 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
6311 bool predicated = false;
6312 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
6313
6314 /* Determine if the movprfx instruction used is predicated or not. */
6315 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
6316 {
6317 predicated = true;
6318 blk_pred = insn_sequence->instr->operands[1];
6319 }
6320
6321 unsigned char max_elem_size = 0;
6322 unsigned char current_elem_size;
6323 int num_op_used = 0, last_op_usage = 0;
6324 int i, inst_pred_idx = -1;
6325 int num_ops = aarch64_num_of_operands (opcode);
6326 for (i = 0; i < num_ops; i++)
6327 {
6328 aarch64_opnd_info inst_op = inst->operands[i];
6329 switch (inst_op.type)
6330 {
6331 case AARCH64_OPND_SVE_Zd:
6332 case AARCH64_OPND_SVE_Zm_5:
6333 case AARCH64_OPND_SVE_Zm_16:
6334 case AARCH64_OPND_SVE_Zn:
6335 case AARCH64_OPND_SVE_Zt:
6336 case AARCH64_OPND_SVE_Vm:
6337 case AARCH64_OPND_SVE_Vn:
6338 case AARCH64_OPND_Va:
6339 case AARCH64_OPND_Vn:
6340 case AARCH64_OPND_Vm:
6341 case AARCH64_OPND_Sn:
6342 case AARCH64_OPND_Sm:
6343 if (inst_op.reg.regno == blk_dest.reg.regno)
6344 {
6345 num_op_used++;
6346 last_op_usage = i;
6347 }
6348 current_elem_size
6349 = aarch64_get_qualifier_esize (inst_op.qualifier);
6350 if (current_elem_size > max_elem_size)
6351 max_elem_size = current_elem_size;
6352 break;
6353 case AARCH64_OPND_SVE_Pd:
6354 case AARCH64_OPND_SVE_Pg3:
6355 case AARCH64_OPND_SVE_Pg4_5:
6356 case AARCH64_OPND_SVE_Pg4_10:
6357 case AARCH64_OPND_SVE_Pg4_16:
6358 case AARCH64_OPND_SVE_Pm:
6359 case AARCH64_OPND_SVE_Pn:
6360 case AARCH64_OPND_SVE_Pt:
6361 case AARCH64_OPND_SME_Pm:
6362 inst_pred = inst_op;
6363 inst_pred_idx = i;
6364 break;
6365 default:
6366 break;
6367 }
6368 }
6369
6370 assert (max_elem_size != 0);
6371 aarch64_opnd_info inst_dest = inst->operands[0];
6372 /* Determine the size that should be used to compare against the
6373 movprfx size. */
6374 current_elem_size
6375 = opcode->constraints & C_MAX_ELEM
6376 ? max_elem_size
6377 : aarch64_get_qualifier_esize (inst_dest.qualifier);
6378
6379 /* If movprfx is predicated do some extra checks. */
6380 if (predicated)
6381 {
6382 /* The instruction must be predicated. */
6383 if (inst_pred_idx < 0)
6384 {
6385 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6386 mismatch_detail->error = _("predicated instruction expected "
6387 "after `movprfx'");
6388 mismatch_detail->index = -1;
6389 mismatch_detail->non_fatal = true;
6390 res = ERR_VFI;
6391 goto done;
6392 }
6393
6394 /* The instruction must have a merging predicate. */
6395 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
6396 {
6397 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6398 mismatch_detail->error = _("merging predicate expected due "
6399 "to preceding `movprfx'");
6400 mismatch_detail->index = inst_pred_idx;
6401 mismatch_detail->non_fatal = true;
6402 res = ERR_VFI;
6403 goto done;
6404 }
6405
6406 /* The same register must be used in instruction. */
6407 if (blk_pred.reg.regno != inst_pred.reg.regno)
6408 {
6409 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6410 mismatch_detail->error = _("predicate register differs "
6411 "from that in preceding "
6412 "`movprfx'");
6413 mismatch_detail->index = inst_pred_idx;
6414 mismatch_detail->non_fatal = true;
6415 res = ERR_VFI;
6416 goto done;
6417 }
6418 }
6419
6420 /* Destructive operations by definition must allow one usage of the
6421 same register. */
6422 int allowed_usage
6423 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
6424
6425 /* Operand is not used at all. */
6426 if (num_op_used == 0)
6427 {
6428 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6429 mismatch_detail->error = _("output register of preceding "
6430 "`movprfx' not used in current "
6431 "instruction");
6432 mismatch_detail->index = 0;
6433 mismatch_detail->non_fatal = true;
6434 res = ERR_VFI;
6435 goto done;
6436 }
6437
6438 /* We now know it's used, now determine exactly where it's used. */
6439 if (blk_dest.reg.regno != inst_dest.reg.regno)
6440 {
6441 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6442 mismatch_detail->error = _("output register of preceding "
6443 "`movprfx' expected as output");
6444 mismatch_detail->index = 0;
6445 mismatch_detail->non_fatal = true;
6446 res = ERR_VFI;
6447 goto done;
6448 }
6449
6450 /* Operand used more than allowed for the specific opcode type. */
6451 if (num_op_used > allowed_usage)
6452 {
6453 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6454 mismatch_detail->error = _("output register of preceding "
6455 "`movprfx' used as input");
6456 mismatch_detail->index = last_op_usage;
6457 mismatch_detail->non_fatal = true;
6458 res = ERR_VFI;
6459 goto done;
6460 }
6461
6462 /* Now the only thing left is the qualifiers checks. The register
6463 must have the same maximum element size. */
6464 if (inst_dest.qualifier
6465 && blk_dest.qualifier
6466 && current_elem_size
6467 != aarch64_get_qualifier_esize (blk_dest.qualifier))
6468 {
6469 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6470 mismatch_detail->error = _("register size not compatible with "
6471 "previous `movprfx'");
6472 mismatch_detail->index = 0;
6473 mismatch_detail->non_fatal = true;
6474 res = ERR_VFI;
6475 goto done;
6476 }
6477 }
6478
6479 done:
6480 if (insn_sequence->num_added_insns == insn_sequence->num_allocated_insns)
6481 /* We've checked the last instruction in the sequence and so
6482 don't need the sequence any more. */
6483 init_insn_sequence (NULL, insn_sequence);
6484 else
6485 add_insn_to_sequence (inst, insn_sequence);
6486 }
6487
6488 return res;
6489 }
6490
6491
6492 /* Return true if VALUE cannot be moved into an SVE register using DUP
6493 (with any element size, not just ESIZE) and if using DUPM would
6494 therefore be OK. ESIZE is the number of bytes in the immediate. */
6495
6496 bool
6497 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
6498 {
6499 int64_t svalue = uvalue;
6500 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
6501
6502 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
6503 return false;
6504 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
6505 {
6506 svalue = (int32_t) uvalue;
6507 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
6508 {
6509 svalue = (int16_t) uvalue;
6510 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
6511 return false;
6512 }
6513 }
6514 if ((svalue & 0xff) == 0)
6515 svalue /= 256;
6516 return svalue < -128 || svalue >= 128;
6517 }
6518
6519 /* Return true if a CPU with the AARCH64_FEATURE_* bits in CPU_VARIANT
6520 supports the instruction described by INST. */
6521
6522 bool
6523 aarch64_cpu_supports_inst_p (uint64_t cpu_variant, aarch64_inst *inst)
6524 {
6525 if (!inst->opcode->avariant
6526 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *inst->opcode->avariant))
6527 return false;
6528
6529 if (inst->opcode->iclass == sme_fp_sd
6530 && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
6531 && !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant,
6532 AARCH64_FEATURE_SME_F64F64))
6533 return false;
6534
6535 if (inst->opcode->iclass == sme_int_sd
6536 && inst->operands[0].qualifier == AARCH64_OPND_QLF_S_D
6537 && !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant,
6538 AARCH64_FEATURE_SME_I16I64))
6539 return false;
6540
6541 return true;
6542 }
6543
6544 /* Include the opcode description table as well as the operand description
6545 table. */
6546 #define VERIFIER(x) verify_##x
6547 #include "aarch64-tbl.h"