aarch64: Use RTL builtins for integer mla intrinsics
[gcc.git] / gcc / config / aarch64 / arm_neon.h
1 /* ARM NEON intrinsics include file.
2
3 Copyright (C) 2011-2021 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published
10 by the Free Software Foundation; either version 3, or (at your
11 option) any later version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 License for more details.
17
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
21
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
26
27 #ifndef _AARCH64_NEON_H_
28 #define _AARCH64_NEON_H_
29
30 #pragma GCC push_options
31 #pragma GCC target ("+nothing+simd")
32
33 #include <stdint.h>
34
35 #define __AARCH64_UINT64_C(__C) ((uint64_t) __C)
36 #define __AARCH64_INT64_C(__C) ((int64_t) __C)
37
38 typedef __Int8x8_t int8x8_t;
39 typedef __Int16x4_t int16x4_t;
40 typedef __Int32x2_t int32x2_t;
41 typedef __Int64x1_t int64x1_t;
42 typedef __Float16x4_t float16x4_t;
43 typedef __Float32x2_t float32x2_t;
44 typedef __Poly8x8_t poly8x8_t;
45 typedef __Poly16x4_t poly16x4_t;
46 typedef __Uint8x8_t uint8x8_t;
47 typedef __Uint16x4_t uint16x4_t;
48 typedef __Uint32x2_t uint32x2_t;
49 typedef __Float64x1_t float64x1_t;
50 typedef __Uint64x1_t uint64x1_t;
51 typedef __Int8x16_t int8x16_t;
52 typedef __Int16x8_t int16x8_t;
53 typedef __Int32x4_t int32x4_t;
54 typedef __Int64x2_t int64x2_t;
55 typedef __Float16x8_t float16x8_t;
56 typedef __Float32x4_t float32x4_t;
57 typedef __Float64x2_t float64x2_t;
58 typedef __Poly8x16_t poly8x16_t;
59 typedef __Poly16x8_t poly16x8_t;
60 typedef __Poly64x2_t poly64x2_t;
61 typedef __Poly64x1_t poly64x1_t;
62 typedef __Uint8x16_t uint8x16_t;
63 typedef __Uint16x8_t uint16x8_t;
64 typedef __Uint32x4_t uint32x4_t;
65 typedef __Uint64x2_t uint64x2_t;
66
67 typedef __Poly8_t poly8_t;
68 typedef __Poly16_t poly16_t;
69 typedef __Poly64_t poly64_t;
70 typedef __Poly128_t poly128_t;
71
72 typedef __fp16 float16_t;
73 typedef float float32_t;
74 typedef double float64_t;
75
76 typedef __Bfloat16x4_t bfloat16x4_t;
77 typedef __Bfloat16x8_t bfloat16x8_t;
78
79 typedef struct bfloat16x4x2_t
80 {
81 bfloat16x4_t val[2];
82 } bfloat16x4x2_t;
83
84 typedef struct bfloat16x8x2_t
85 {
86 bfloat16x8_t val[2];
87 } bfloat16x8x2_t;
88
89 typedef struct bfloat16x4x3_t
90 {
91 bfloat16x4_t val[3];
92 } bfloat16x4x3_t;
93
94 typedef struct bfloat16x8x3_t
95 {
96 bfloat16x8_t val[3];
97 } bfloat16x8x3_t;
98
99 typedef struct bfloat16x4x4_t
100 {
101 bfloat16x4_t val[4];
102 } bfloat16x4x4_t;
103
104 typedef struct bfloat16x8x4_t
105 {
106 bfloat16x8_t val[4];
107 } bfloat16x8x4_t;
108
109 typedef struct int8x8x2_t
110 {
111 int8x8_t val[2];
112 } int8x8x2_t;
113
114 typedef struct int8x16x2_t
115 {
116 int8x16_t val[2];
117 } int8x16x2_t;
118
119 typedef struct int16x4x2_t
120 {
121 int16x4_t val[2];
122 } int16x4x2_t;
123
124 typedef struct int16x8x2_t
125 {
126 int16x8_t val[2];
127 } int16x8x2_t;
128
129 typedef struct int32x2x2_t
130 {
131 int32x2_t val[2];
132 } int32x2x2_t;
133
134 typedef struct int32x4x2_t
135 {
136 int32x4_t val[2];
137 } int32x4x2_t;
138
139 typedef struct int64x1x2_t
140 {
141 int64x1_t val[2];
142 } int64x1x2_t;
143
144 typedef struct int64x2x2_t
145 {
146 int64x2_t val[2];
147 } int64x2x2_t;
148
149 typedef struct uint8x8x2_t
150 {
151 uint8x8_t val[2];
152 } uint8x8x2_t;
153
154 typedef struct uint8x16x2_t
155 {
156 uint8x16_t val[2];
157 } uint8x16x2_t;
158
159 typedef struct uint16x4x2_t
160 {
161 uint16x4_t val[2];
162 } uint16x4x2_t;
163
164 typedef struct uint16x8x2_t
165 {
166 uint16x8_t val[2];
167 } uint16x8x2_t;
168
169 typedef struct uint32x2x2_t
170 {
171 uint32x2_t val[2];
172 } uint32x2x2_t;
173
174 typedef struct uint32x4x2_t
175 {
176 uint32x4_t val[2];
177 } uint32x4x2_t;
178
179 typedef struct uint64x1x2_t
180 {
181 uint64x1_t val[2];
182 } uint64x1x2_t;
183
184 typedef struct uint64x2x2_t
185 {
186 uint64x2_t val[2];
187 } uint64x2x2_t;
188
189 typedef struct float16x4x2_t
190 {
191 float16x4_t val[2];
192 } float16x4x2_t;
193
194 typedef struct float16x8x2_t
195 {
196 float16x8_t val[2];
197 } float16x8x2_t;
198
199 typedef struct float32x2x2_t
200 {
201 float32x2_t val[2];
202 } float32x2x2_t;
203
204 typedef struct float32x4x2_t
205 {
206 float32x4_t val[2];
207 } float32x4x2_t;
208
209 typedef struct float64x2x2_t
210 {
211 float64x2_t val[2];
212 } float64x2x2_t;
213
214 typedef struct float64x1x2_t
215 {
216 float64x1_t val[2];
217 } float64x1x2_t;
218
219 typedef struct poly8x8x2_t
220 {
221 poly8x8_t val[2];
222 } poly8x8x2_t;
223
224 typedef struct poly8x16x2_t
225 {
226 poly8x16_t val[2];
227 } poly8x16x2_t;
228
229 typedef struct poly16x4x2_t
230 {
231 poly16x4_t val[2];
232 } poly16x4x2_t;
233
234 typedef struct poly16x8x2_t
235 {
236 poly16x8_t val[2];
237 } poly16x8x2_t;
238
239 typedef struct poly64x1x2_t
240 {
241 poly64x1_t val[2];
242 } poly64x1x2_t;
243
244 typedef struct poly64x1x3_t
245 {
246 poly64x1_t val[3];
247 } poly64x1x3_t;
248
249 typedef struct poly64x1x4_t
250 {
251 poly64x1_t val[4];
252 } poly64x1x4_t;
253
254 typedef struct poly64x2x2_t
255 {
256 poly64x2_t val[2];
257 } poly64x2x2_t;
258
259 typedef struct poly64x2x3_t
260 {
261 poly64x2_t val[3];
262 } poly64x2x3_t;
263
264 typedef struct poly64x2x4_t
265 {
266 poly64x2_t val[4];
267 } poly64x2x4_t;
268
269 typedef struct int8x8x3_t
270 {
271 int8x8_t val[3];
272 } int8x8x3_t;
273
274 typedef struct int8x16x3_t
275 {
276 int8x16_t val[3];
277 } int8x16x3_t;
278
279 typedef struct int16x4x3_t
280 {
281 int16x4_t val[3];
282 } int16x4x3_t;
283
284 typedef struct int16x8x3_t
285 {
286 int16x8_t val[3];
287 } int16x8x3_t;
288
289 typedef struct int32x2x3_t
290 {
291 int32x2_t val[3];
292 } int32x2x3_t;
293
294 typedef struct int32x4x3_t
295 {
296 int32x4_t val[3];
297 } int32x4x3_t;
298
299 typedef struct int64x1x3_t
300 {
301 int64x1_t val[3];
302 } int64x1x3_t;
303
304 typedef struct int64x2x3_t
305 {
306 int64x2_t val[3];
307 } int64x2x3_t;
308
309 typedef struct uint8x8x3_t
310 {
311 uint8x8_t val[3];
312 } uint8x8x3_t;
313
314 typedef struct uint8x16x3_t
315 {
316 uint8x16_t val[3];
317 } uint8x16x3_t;
318
319 typedef struct uint16x4x3_t
320 {
321 uint16x4_t val[3];
322 } uint16x4x3_t;
323
324 typedef struct uint16x8x3_t
325 {
326 uint16x8_t val[3];
327 } uint16x8x3_t;
328
329 typedef struct uint32x2x3_t
330 {
331 uint32x2_t val[3];
332 } uint32x2x3_t;
333
334 typedef struct uint32x4x3_t
335 {
336 uint32x4_t val[3];
337 } uint32x4x3_t;
338
339 typedef struct uint64x1x3_t
340 {
341 uint64x1_t val[3];
342 } uint64x1x3_t;
343
344 typedef struct uint64x2x3_t
345 {
346 uint64x2_t val[3];
347 } uint64x2x3_t;
348
349 typedef struct float16x4x3_t
350 {
351 float16x4_t val[3];
352 } float16x4x3_t;
353
354 typedef struct float16x8x3_t
355 {
356 float16x8_t val[3];
357 } float16x8x3_t;
358
359 typedef struct float32x2x3_t
360 {
361 float32x2_t val[3];
362 } float32x2x3_t;
363
364 typedef struct float32x4x3_t
365 {
366 float32x4_t val[3];
367 } float32x4x3_t;
368
369 typedef struct float64x2x3_t
370 {
371 float64x2_t val[3];
372 } float64x2x3_t;
373
374 typedef struct float64x1x3_t
375 {
376 float64x1_t val[3];
377 } float64x1x3_t;
378
379 typedef struct poly8x8x3_t
380 {
381 poly8x8_t val[3];
382 } poly8x8x3_t;
383
384 typedef struct poly8x16x3_t
385 {
386 poly8x16_t val[3];
387 } poly8x16x3_t;
388
389 typedef struct poly16x4x3_t
390 {
391 poly16x4_t val[3];
392 } poly16x4x3_t;
393
394 typedef struct poly16x8x3_t
395 {
396 poly16x8_t val[3];
397 } poly16x8x3_t;
398
399 typedef struct int8x8x4_t
400 {
401 int8x8_t val[4];
402 } int8x8x4_t;
403
404 typedef struct int8x16x4_t
405 {
406 int8x16_t val[4];
407 } int8x16x4_t;
408
409 typedef struct int16x4x4_t
410 {
411 int16x4_t val[4];
412 } int16x4x4_t;
413
414 typedef struct int16x8x4_t
415 {
416 int16x8_t val[4];
417 } int16x8x4_t;
418
419 typedef struct int32x2x4_t
420 {
421 int32x2_t val[4];
422 } int32x2x4_t;
423
424 typedef struct int32x4x4_t
425 {
426 int32x4_t val[4];
427 } int32x4x4_t;
428
429 typedef struct int64x1x4_t
430 {
431 int64x1_t val[4];
432 } int64x1x4_t;
433
434 typedef struct int64x2x4_t
435 {
436 int64x2_t val[4];
437 } int64x2x4_t;
438
439 typedef struct uint8x8x4_t
440 {
441 uint8x8_t val[4];
442 } uint8x8x4_t;
443
444 typedef struct uint8x16x4_t
445 {
446 uint8x16_t val[4];
447 } uint8x16x4_t;
448
449 typedef struct uint16x4x4_t
450 {
451 uint16x4_t val[4];
452 } uint16x4x4_t;
453
454 typedef struct uint16x8x4_t
455 {
456 uint16x8_t val[4];
457 } uint16x8x4_t;
458
459 typedef struct uint32x2x4_t
460 {
461 uint32x2_t val[4];
462 } uint32x2x4_t;
463
464 typedef struct uint32x4x4_t
465 {
466 uint32x4_t val[4];
467 } uint32x4x4_t;
468
469 typedef struct uint64x1x4_t
470 {
471 uint64x1_t val[4];
472 } uint64x1x4_t;
473
474 typedef struct uint64x2x4_t
475 {
476 uint64x2_t val[4];
477 } uint64x2x4_t;
478
479 typedef struct float16x4x4_t
480 {
481 float16x4_t val[4];
482 } float16x4x4_t;
483
484 typedef struct float16x8x4_t
485 {
486 float16x8_t val[4];
487 } float16x8x4_t;
488
489 typedef struct float32x2x4_t
490 {
491 float32x2_t val[4];
492 } float32x2x4_t;
493
494 typedef struct float32x4x4_t
495 {
496 float32x4_t val[4];
497 } float32x4x4_t;
498
499 typedef struct float64x2x4_t
500 {
501 float64x2_t val[4];
502 } float64x2x4_t;
503
504 typedef struct float64x1x4_t
505 {
506 float64x1_t val[4];
507 } float64x1x4_t;
508
509 typedef struct poly8x8x4_t
510 {
511 poly8x8_t val[4];
512 } poly8x8x4_t;
513
514 typedef struct poly8x16x4_t
515 {
516 poly8x16_t val[4];
517 } poly8x16x4_t;
518
519 typedef struct poly16x4x4_t
520 {
521 poly16x4_t val[4];
522 } poly16x4x4_t;
523
524 typedef struct poly16x8x4_t
525 {
526 poly16x8_t val[4];
527 } poly16x8x4_t;
528
529 /* __aarch64_vdup_lane internal macros. */
530 #define __aarch64_vdup_lane_any(__size, __q, __a, __b) \
531 vdup##__q##_n_##__size (__aarch64_vget_lane_any (__a, __b))
532
533 #define __aarch64_vdup_lane_f16(__a, __b) \
534 __aarch64_vdup_lane_any (f16, , __a, __b)
535 #define __aarch64_vdup_lane_f32(__a, __b) \
536 __aarch64_vdup_lane_any (f32, , __a, __b)
537 #define __aarch64_vdup_lane_f64(__a, __b) \
538 __aarch64_vdup_lane_any (f64, , __a, __b)
539 #define __aarch64_vdup_lane_p8(__a, __b) \
540 __aarch64_vdup_lane_any (p8, , __a, __b)
541 #define __aarch64_vdup_lane_p16(__a, __b) \
542 __aarch64_vdup_lane_any (p16, , __a, __b)
543 #define __aarch64_vdup_lane_p64(__a, __b) \
544 __aarch64_vdup_lane_any (p64, , __a, __b)
545 #define __aarch64_vdup_lane_s8(__a, __b) \
546 __aarch64_vdup_lane_any (s8, , __a, __b)
547 #define __aarch64_vdup_lane_s16(__a, __b) \
548 __aarch64_vdup_lane_any (s16, , __a, __b)
549 #define __aarch64_vdup_lane_s32(__a, __b) \
550 __aarch64_vdup_lane_any (s32, , __a, __b)
551 #define __aarch64_vdup_lane_s64(__a, __b) \
552 __aarch64_vdup_lane_any (s64, , __a, __b)
553 #define __aarch64_vdup_lane_u8(__a, __b) \
554 __aarch64_vdup_lane_any (u8, , __a, __b)
555 #define __aarch64_vdup_lane_u16(__a, __b) \
556 __aarch64_vdup_lane_any (u16, , __a, __b)
557 #define __aarch64_vdup_lane_u32(__a, __b) \
558 __aarch64_vdup_lane_any (u32, , __a, __b)
559 #define __aarch64_vdup_lane_u64(__a, __b) \
560 __aarch64_vdup_lane_any (u64, , __a, __b)
561
562 /* __aarch64_vdup_laneq internal macros. */
563 #define __aarch64_vdup_laneq_f16(__a, __b) \
564 __aarch64_vdup_lane_any (f16, , __a, __b)
565 #define __aarch64_vdup_laneq_f32(__a, __b) \
566 __aarch64_vdup_lane_any (f32, , __a, __b)
567 #define __aarch64_vdup_laneq_f64(__a, __b) \
568 __aarch64_vdup_lane_any (f64, , __a, __b)
569 #define __aarch64_vdup_laneq_p8(__a, __b) \
570 __aarch64_vdup_lane_any (p8, , __a, __b)
571 #define __aarch64_vdup_laneq_p16(__a, __b) \
572 __aarch64_vdup_lane_any (p16, , __a, __b)
573 #define __aarch64_vdup_laneq_p64(__a, __b) \
574 __aarch64_vdup_lane_any (p64, , __a, __b)
575 #define __aarch64_vdup_laneq_s8(__a, __b) \
576 __aarch64_vdup_lane_any (s8, , __a, __b)
577 #define __aarch64_vdup_laneq_s16(__a, __b) \
578 __aarch64_vdup_lane_any (s16, , __a, __b)
579 #define __aarch64_vdup_laneq_s32(__a, __b) \
580 __aarch64_vdup_lane_any (s32, , __a, __b)
581 #define __aarch64_vdup_laneq_s64(__a, __b) \
582 __aarch64_vdup_lane_any (s64, , __a, __b)
583 #define __aarch64_vdup_laneq_u8(__a, __b) \
584 __aarch64_vdup_lane_any (u8, , __a, __b)
585 #define __aarch64_vdup_laneq_u16(__a, __b) \
586 __aarch64_vdup_lane_any (u16, , __a, __b)
587 #define __aarch64_vdup_laneq_u32(__a, __b) \
588 __aarch64_vdup_lane_any (u32, , __a, __b)
589 #define __aarch64_vdup_laneq_u64(__a, __b) \
590 __aarch64_vdup_lane_any (u64, , __a, __b)
591
592 /* __aarch64_vdupq_lane internal macros. */
593 #define __aarch64_vdupq_lane_f16(__a, __b) \
594 __aarch64_vdup_lane_any (f16, q, __a, __b)
595 #define __aarch64_vdupq_lane_f32(__a, __b) \
596 __aarch64_vdup_lane_any (f32, q, __a, __b)
597 #define __aarch64_vdupq_lane_f64(__a, __b) \
598 __aarch64_vdup_lane_any (f64, q, __a, __b)
599 #define __aarch64_vdupq_lane_p8(__a, __b) \
600 __aarch64_vdup_lane_any (p8, q, __a, __b)
601 #define __aarch64_vdupq_lane_p16(__a, __b) \
602 __aarch64_vdup_lane_any (p16, q, __a, __b)
603 #define __aarch64_vdupq_lane_p64(__a, __b) \
604 __aarch64_vdup_lane_any (p64, q, __a, __b)
605 #define __aarch64_vdupq_lane_s8(__a, __b) \
606 __aarch64_vdup_lane_any (s8, q, __a, __b)
607 #define __aarch64_vdupq_lane_s16(__a, __b) \
608 __aarch64_vdup_lane_any (s16, q, __a, __b)
609 #define __aarch64_vdupq_lane_s32(__a, __b) \
610 __aarch64_vdup_lane_any (s32, q, __a, __b)
611 #define __aarch64_vdupq_lane_s64(__a, __b) \
612 __aarch64_vdup_lane_any (s64, q, __a, __b)
613 #define __aarch64_vdupq_lane_u8(__a, __b) \
614 __aarch64_vdup_lane_any (u8, q, __a, __b)
615 #define __aarch64_vdupq_lane_u16(__a, __b) \
616 __aarch64_vdup_lane_any (u16, q, __a, __b)
617 #define __aarch64_vdupq_lane_u32(__a, __b) \
618 __aarch64_vdup_lane_any (u32, q, __a, __b)
619 #define __aarch64_vdupq_lane_u64(__a, __b) \
620 __aarch64_vdup_lane_any (u64, q, __a, __b)
621
622 /* __aarch64_vdupq_laneq internal macros. */
623 #define __aarch64_vdupq_laneq_f16(__a, __b) \
624 __aarch64_vdup_lane_any (f16, q, __a, __b)
625 #define __aarch64_vdupq_laneq_f32(__a, __b) \
626 __aarch64_vdup_lane_any (f32, q, __a, __b)
627 #define __aarch64_vdupq_laneq_f64(__a, __b) \
628 __aarch64_vdup_lane_any (f64, q, __a, __b)
629 #define __aarch64_vdupq_laneq_p8(__a, __b) \
630 __aarch64_vdup_lane_any (p8, q, __a, __b)
631 #define __aarch64_vdupq_laneq_p16(__a, __b) \
632 __aarch64_vdup_lane_any (p16, q, __a, __b)
633 #define __aarch64_vdupq_laneq_p64(__a, __b) \
634 __aarch64_vdup_lane_any (p64, q, __a, __b)
635 #define __aarch64_vdupq_laneq_s8(__a, __b) \
636 __aarch64_vdup_lane_any (s8, q, __a, __b)
637 #define __aarch64_vdupq_laneq_s16(__a, __b) \
638 __aarch64_vdup_lane_any (s16, q, __a, __b)
639 #define __aarch64_vdupq_laneq_s32(__a, __b) \
640 __aarch64_vdup_lane_any (s32, q, __a, __b)
641 #define __aarch64_vdupq_laneq_s64(__a, __b) \
642 __aarch64_vdup_lane_any (s64, q, __a, __b)
643 #define __aarch64_vdupq_laneq_u8(__a, __b) \
644 __aarch64_vdup_lane_any (u8, q, __a, __b)
645 #define __aarch64_vdupq_laneq_u16(__a, __b) \
646 __aarch64_vdup_lane_any (u16, q, __a, __b)
647 #define __aarch64_vdupq_laneq_u32(__a, __b) \
648 __aarch64_vdup_lane_any (u32, q, __a, __b)
649 #define __aarch64_vdupq_laneq_u64(__a, __b) \
650 __aarch64_vdup_lane_any (u64, q, __a, __b)
651
652 /* Internal macro for lane indices. */
653
654 #define __AARCH64_NUM_LANES(__v) (sizeof (__v) / sizeof (__v[0]))
655 #define __AARCH64_LANE_CHECK(__vec, __idx) \
656 __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __idx)
657
658 /* For big-endian, GCC's vector indices are the opposite way around
659 to the architectural lane indices used by Neon intrinsics. */
660 #ifdef __AARCH64EB__
661 #define __aarch64_lane(__vec, __idx) (__AARCH64_NUM_LANES (__vec) - 1 - __idx)
662 #else
663 #define __aarch64_lane(__vec, __idx) __idx
664 #endif
665
666 /* vget_lane internal macro. */
667 #define __aarch64_vget_lane_any(__vec, __index) \
668 __extension__ \
669 ({ \
670 __AARCH64_LANE_CHECK (__vec, __index); \
671 __vec[__aarch64_lane (__vec, __index)]; \
672 })
673
674 /* vset_lane and vld1_lane internal macro. */
675 #define __aarch64_vset_lane_any(__elem, __vec, __index) \
676 __extension__ \
677 ({ \
678 __AARCH64_LANE_CHECK (__vec, __index); \
679 __vec[__aarch64_lane (__vec, __index)] = __elem; \
680 __vec; \
681 })
682
683 /* vadd */
684 __extension__ extern __inline int8x8_t
685 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
686 vadd_s8 (int8x8_t __a, int8x8_t __b)
687 {
688 return __a + __b;
689 }
690
691 __extension__ extern __inline int16x4_t
692 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
693 vadd_s16 (int16x4_t __a, int16x4_t __b)
694 {
695 return __a + __b;
696 }
697
698 __extension__ extern __inline int32x2_t
699 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
700 vadd_s32 (int32x2_t __a, int32x2_t __b)
701 {
702 return __a + __b;
703 }
704
705 __extension__ extern __inline float32x2_t
706 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
707 vadd_f32 (float32x2_t __a, float32x2_t __b)
708 {
709 return __a + __b;
710 }
711
712 __extension__ extern __inline float64x1_t
713 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
714 vadd_f64 (float64x1_t __a, float64x1_t __b)
715 {
716 return __a + __b;
717 }
718
719 __extension__ extern __inline uint8x8_t
720 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
721 vadd_u8 (uint8x8_t __a, uint8x8_t __b)
722 {
723 return __a + __b;
724 }
725
726 __extension__ extern __inline uint16x4_t
727 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
728 vadd_u16 (uint16x4_t __a, uint16x4_t __b)
729 {
730 return __a + __b;
731 }
732
733 __extension__ extern __inline uint32x2_t
734 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
735 vadd_u32 (uint32x2_t __a, uint32x2_t __b)
736 {
737 return __a + __b;
738 }
739
740 __extension__ extern __inline int64x1_t
741 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
742 vadd_s64 (int64x1_t __a, int64x1_t __b)
743 {
744 return __a + __b;
745 }
746
747 __extension__ extern __inline uint64x1_t
748 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
749 vadd_u64 (uint64x1_t __a, uint64x1_t __b)
750 {
751 return __a + __b;
752 }
753
754 __extension__ extern __inline int8x16_t
755 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
756 vaddq_s8 (int8x16_t __a, int8x16_t __b)
757 {
758 return __a + __b;
759 }
760
761 __extension__ extern __inline int16x8_t
762 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
763 vaddq_s16 (int16x8_t __a, int16x8_t __b)
764 {
765 return __a + __b;
766 }
767
768 __extension__ extern __inline int32x4_t
769 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
770 vaddq_s32 (int32x4_t __a, int32x4_t __b)
771 {
772 return __a + __b;
773 }
774
775 __extension__ extern __inline int64x2_t
776 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
777 vaddq_s64 (int64x2_t __a, int64x2_t __b)
778 {
779 return __a + __b;
780 }
781
782 __extension__ extern __inline float32x4_t
783 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
784 vaddq_f32 (float32x4_t __a, float32x4_t __b)
785 {
786 return __a + __b;
787 }
788
789 __extension__ extern __inline float64x2_t
790 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
791 vaddq_f64 (float64x2_t __a, float64x2_t __b)
792 {
793 return __a + __b;
794 }
795
796 __extension__ extern __inline uint8x16_t
797 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
798 vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
799 {
800 return __a + __b;
801 }
802
803 __extension__ extern __inline uint16x8_t
804 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
805 vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
806 {
807 return __a + __b;
808 }
809
810 __extension__ extern __inline uint32x4_t
811 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
812 vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
813 {
814 return __a + __b;
815 }
816
817 __extension__ extern __inline uint64x2_t
818 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
819 vaddq_u64 (uint64x2_t __a, uint64x2_t __b)
820 {
821 return __a + __b;
822 }
823
824 __extension__ extern __inline int16x8_t
825 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
826 vaddl_s8 (int8x8_t __a, int8x8_t __b)
827 {
828 return (int16x8_t) __builtin_aarch64_saddlv8qi (__a, __b);
829 }
830
831 __extension__ extern __inline int32x4_t
832 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
833 vaddl_s16 (int16x4_t __a, int16x4_t __b)
834 {
835 return (int32x4_t) __builtin_aarch64_saddlv4hi (__a, __b);
836 }
837
838 __extension__ extern __inline int64x2_t
839 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
840 vaddl_s32 (int32x2_t __a, int32x2_t __b)
841 {
842 return (int64x2_t) __builtin_aarch64_saddlv2si (__a, __b);
843 }
844
845 __extension__ extern __inline uint16x8_t
846 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
847 vaddl_u8 (uint8x8_t __a, uint8x8_t __b)
848 {
849 return (uint16x8_t) __builtin_aarch64_uaddlv8qi ((int8x8_t) __a,
850 (int8x8_t) __b);
851 }
852
853 __extension__ extern __inline uint32x4_t
854 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
855 vaddl_u16 (uint16x4_t __a, uint16x4_t __b)
856 {
857 return (uint32x4_t) __builtin_aarch64_uaddlv4hi ((int16x4_t) __a,
858 (int16x4_t) __b);
859 }
860
861 __extension__ extern __inline uint64x2_t
862 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
863 vaddl_u32 (uint32x2_t __a, uint32x2_t __b)
864 {
865 return (uint64x2_t) __builtin_aarch64_uaddlv2si ((int32x2_t) __a,
866 (int32x2_t) __b);
867 }
868
869 __extension__ extern __inline int16x8_t
870 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
871 vaddl_high_s8 (int8x16_t __a, int8x16_t __b)
872 {
873 return (int16x8_t) __builtin_aarch64_saddl2v16qi (__a, __b);
874 }
875
876 __extension__ extern __inline int32x4_t
877 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
878 vaddl_high_s16 (int16x8_t __a, int16x8_t __b)
879 {
880 return (int32x4_t) __builtin_aarch64_saddl2v8hi (__a, __b);
881 }
882
883 __extension__ extern __inline int64x2_t
884 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
885 vaddl_high_s32 (int32x4_t __a, int32x4_t __b)
886 {
887 return (int64x2_t) __builtin_aarch64_saddl2v4si (__a, __b);
888 }
889
890 __extension__ extern __inline uint16x8_t
891 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
892 vaddl_high_u8 (uint8x16_t __a, uint8x16_t __b)
893 {
894 return (uint16x8_t) __builtin_aarch64_uaddl2v16qi ((int8x16_t) __a,
895 (int8x16_t) __b);
896 }
897
898 __extension__ extern __inline uint32x4_t
899 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
900 vaddl_high_u16 (uint16x8_t __a, uint16x8_t __b)
901 {
902 return (uint32x4_t) __builtin_aarch64_uaddl2v8hi ((int16x8_t) __a,
903 (int16x8_t) __b);
904 }
905
906 __extension__ extern __inline uint64x2_t
907 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
908 vaddl_high_u32 (uint32x4_t __a, uint32x4_t __b)
909 {
910 return (uint64x2_t) __builtin_aarch64_uaddl2v4si ((int32x4_t) __a,
911 (int32x4_t) __b);
912 }
913
914 __extension__ extern __inline int16x8_t
915 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
916 vaddw_s8 (int16x8_t __a, int8x8_t __b)
917 {
918 return (int16x8_t) __builtin_aarch64_saddwv8qi (__a, __b);
919 }
920
921 __extension__ extern __inline int32x4_t
922 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
923 vaddw_s16 (int32x4_t __a, int16x4_t __b)
924 {
925 return (int32x4_t) __builtin_aarch64_saddwv4hi (__a, __b);
926 }
927
928 __extension__ extern __inline int64x2_t
929 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
930 vaddw_s32 (int64x2_t __a, int32x2_t __b)
931 {
932 return (int64x2_t) __builtin_aarch64_saddwv2si (__a, __b);
933 }
934
935 __extension__ extern __inline uint16x8_t
936 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
937 vaddw_u8 (uint16x8_t __a, uint8x8_t __b)
938 {
939 return (uint16x8_t) __builtin_aarch64_uaddwv8qi ((int16x8_t) __a,
940 (int8x8_t) __b);
941 }
942
943 __extension__ extern __inline uint32x4_t
944 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
945 vaddw_u16 (uint32x4_t __a, uint16x4_t __b)
946 {
947 return (uint32x4_t) __builtin_aarch64_uaddwv4hi ((int32x4_t) __a,
948 (int16x4_t) __b);
949 }
950
951 __extension__ extern __inline uint64x2_t
952 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
953 vaddw_u32 (uint64x2_t __a, uint32x2_t __b)
954 {
955 return (uint64x2_t) __builtin_aarch64_uaddwv2si ((int64x2_t) __a,
956 (int32x2_t) __b);
957 }
958
959 __extension__ extern __inline int16x8_t
960 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
961 vaddw_high_s8 (int16x8_t __a, int8x16_t __b)
962 {
963 return (int16x8_t) __builtin_aarch64_saddw2v16qi (__a, __b);
964 }
965
966 __extension__ extern __inline int32x4_t
967 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
968 vaddw_high_s16 (int32x4_t __a, int16x8_t __b)
969 {
970 return (int32x4_t) __builtin_aarch64_saddw2v8hi (__a, __b);
971 }
972
973 __extension__ extern __inline int64x2_t
974 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
975 vaddw_high_s32 (int64x2_t __a, int32x4_t __b)
976 {
977 return (int64x2_t) __builtin_aarch64_saddw2v4si (__a, __b);
978 }
979
980 __extension__ extern __inline uint16x8_t
981 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
982 vaddw_high_u8 (uint16x8_t __a, uint8x16_t __b)
983 {
984 return (uint16x8_t) __builtin_aarch64_uaddw2v16qi ((int16x8_t) __a,
985 (int8x16_t) __b);
986 }
987
988 __extension__ extern __inline uint32x4_t
989 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
990 vaddw_high_u16 (uint32x4_t __a, uint16x8_t __b)
991 {
992 return (uint32x4_t) __builtin_aarch64_uaddw2v8hi ((int32x4_t) __a,
993 (int16x8_t) __b);
994 }
995
996 __extension__ extern __inline uint64x2_t
997 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
998 vaddw_high_u32 (uint64x2_t __a, uint32x4_t __b)
999 {
1000 return (uint64x2_t) __builtin_aarch64_uaddw2v4si ((int64x2_t) __a,
1001 (int32x4_t) __b);
1002 }
1003
1004 __extension__ extern __inline int8x8_t
1005 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1006 vhadd_s8 (int8x8_t __a, int8x8_t __b)
1007 {
1008 return (int8x8_t) __builtin_aarch64_shaddv8qi (__a, __b);
1009 }
1010
1011 __extension__ extern __inline int16x4_t
1012 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1013 vhadd_s16 (int16x4_t __a, int16x4_t __b)
1014 {
1015 return (int16x4_t) __builtin_aarch64_shaddv4hi (__a, __b);
1016 }
1017
1018 __extension__ extern __inline int32x2_t
1019 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1020 vhadd_s32 (int32x2_t __a, int32x2_t __b)
1021 {
1022 return (int32x2_t) __builtin_aarch64_shaddv2si (__a, __b);
1023 }
1024
1025 __extension__ extern __inline uint8x8_t
1026 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1027 vhadd_u8 (uint8x8_t __a, uint8x8_t __b)
1028 {
1029 return (uint8x8_t) __builtin_aarch64_uhaddv8qi ((int8x8_t) __a,
1030 (int8x8_t) __b);
1031 }
1032
1033 __extension__ extern __inline uint16x4_t
1034 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1035 vhadd_u16 (uint16x4_t __a, uint16x4_t __b)
1036 {
1037 return (uint16x4_t) __builtin_aarch64_uhaddv4hi ((int16x4_t) __a,
1038 (int16x4_t) __b);
1039 }
1040
1041 __extension__ extern __inline uint32x2_t
1042 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1043 vhadd_u32 (uint32x2_t __a, uint32x2_t __b)
1044 {
1045 return (uint32x2_t) __builtin_aarch64_uhaddv2si ((int32x2_t) __a,
1046 (int32x2_t) __b);
1047 }
1048
1049 __extension__ extern __inline int8x16_t
1050 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1051 vhaddq_s8 (int8x16_t __a, int8x16_t __b)
1052 {
1053 return (int8x16_t) __builtin_aarch64_shaddv16qi (__a, __b);
1054 }
1055
1056 __extension__ extern __inline int16x8_t
1057 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1058 vhaddq_s16 (int16x8_t __a, int16x8_t __b)
1059 {
1060 return (int16x8_t) __builtin_aarch64_shaddv8hi (__a, __b);
1061 }
1062
1063 __extension__ extern __inline int32x4_t
1064 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1065 vhaddq_s32 (int32x4_t __a, int32x4_t __b)
1066 {
1067 return (int32x4_t) __builtin_aarch64_shaddv4si (__a, __b);
1068 }
1069
1070 __extension__ extern __inline uint8x16_t
1071 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1072 vhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
1073 {
1074 return (uint8x16_t) __builtin_aarch64_uhaddv16qi ((int8x16_t) __a,
1075 (int8x16_t) __b);
1076 }
1077
1078 __extension__ extern __inline uint16x8_t
1079 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1080 vhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
1081 {
1082 return (uint16x8_t) __builtin_aarch64_uhaddv8hi ((int16x8_t) __a,
1083 (int16x8_t) __b);
1084 }
1085
1086 __extension__ extern __inline uint32x4_t
1087 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1088 vhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
1089 {
1090 return (uint32x4_t) __builtin_aarch64_uhaddv4si ((int32x4_t) __a,
1091 (int32x4_t) __b);
1092 }
1093
1094 __extension__ extern __inline int8x8_t
1095 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1096 vrhadd_s8 (int8x8_t __a, int8x8_t __b)
1097 {
1098 return (int8x8_t) __builtin_aarch64_srhaddv8qi (__a, __b);
1099 }
1100
1101 __extension__ extern __inline int16x4_t
1102 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1103 vrhadd_s16 (int16x4_t __a, int16x4_t __b)
1104 {
1105 return (int16x4_t) __builtin_aarch64_srhaddv4hi (__a, __b);
1106 }
1107
1108 __extension__ extern __inline int32x2_t
1109 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1110 vrhadd_s32 (int32x2_t __a, int32x2_t __b)
1111 {
1112 return (int32x2_t) __builtin_aarch64_srhaddv2si (__a, __b);
1113 }
1114
1115 __extension__ extern __inline uint8x8_t
1116 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1117 vrhadd_u8 (uint8x8_t __a, uint8x8_t __b)
1118 {
1119 return (uint8x8_t) __builtin_aarch64_urhaddv8qi ((int8x8_t) __a,
1120 (int8x8_t) __b);
1121 }
1122
1123 __extension__ extern __inline uint16x4_t
1124 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1125 vrhadd_u16 (uint16x4_t __a, uint16x4_t __b)
1126 {
1127 return (uint16x4_t) __builtin_aarch64_urhaddv4hi ((int16x4_t) __a,
1128 (int16x4_t) __b);
1129 }
1130
1131 __extension__ extern __inline uint32x2_t
1132 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1133 vrhadd_u32 (uint32x2_t __a, uint32x2_t __b)
1134 {
1135 return (uint32x2_t) __builtin_aarch64_urhaddv2si ((int32x2_t) __a,
1136 (int32x2_t) __b);
1137 }
1138
1139 __extension__ extern __inline int8x16_t
1140 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1141 vrhaddq_s8 (int8x16_t __a, int8x16_t __b)
1142 {
1143 return (int8x16_t) __builtin_aarch64_srhaddv16qi (__a, __b);
1144 }
1145
1146 __extension__ extern __inline int16x8_t
1147 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1148 vrhaddq_s16 (int16x8_t __a, int16x8_t __b)
1149 {
1150 return (int16x8_t) __builtin_aarch64_srhaddv8hi (__a, __b);
1151 }
1152
1153 __extension__ extern __inline int32x4_t
1154 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1155 vrhaddq_s32 (int32x4_t __a, int32x4_t __b)
1156 {
1157 return (int32x4_t) __builtin_aarch64_srhaddv4si (__a, __b);
1158 }
1159
1160 __extension__ extern __inline uint8x16_t
1161 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1162 vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b)
1163 {
1164 return (uint8x16_t) __builtin_aarch64_urhaddv16qi ((int8x16_t) __a,
1165 (int8x16_t) __b);
1166 }
1167
1168 __extension__ extern __inline uint16x8_t
1169 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1170 vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b)
1171 {
1172 return (uint16x8_t) __builtin_aarch64_urhaddv8hi ((int16x8_t) __a,
1173 (int16x8_t) __b);
1174 }
1175
1176 __extension__ extern __inline uint32x4_t
1177 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1178 vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b)
1179 {
1180 return (uint32x4_t) __builtin_aarch64_urhaddv4si ((int32x4_t) __a,
1181 (int32x4_t) __b);
1182 }
1183
1184 __extension__ extern __inline int8x8_t
1185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1186 vaddhn_s16 (int16x8_t __a, int16x8_t __b)
1187 {
1188 return (int8x8_t) __builtin_aarch64_addhnv8hi (__a, __b);
1189 }
1190
1191 __extension__ extern __inline int16x4_t
1192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1193 vaddhn_s32 (int32x4_t __a, int32x4_t __b)
1194 {
1195 return (int16x4_t) __builtin_aarch64_addhnv4si (__a, __b);
1196 }
1197
1198 __extension__ extern __inline int32x2_t
1199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1200 vaddhn_s64 (int64x2_t __a, int64x2_t __b)
1201 {
1202 return (int32x2_t) __builtin_aarch64_addhnv2di (__a, __b);
1203 }
1204
1205 __extension__ extern __inline uint8x8_t
1206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1207 vaddhn_u16 (uint16x8_t __a, uint16x8_t __b)
1208 {
1209 return (uint8x8_t) __builtin_aarch64_addhnv8hi ((int16x8_t) __a,
1210 (int16x8_t) __b);
1211 }
1212
1213 __extension__ extern __inline uint16x4_t
1214 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1215 vaddhn_u32 (uint32x4_t __a, uint32x4_t __b)
1216 {
1217 return (uint16x4_t) __builtin_aarch64_addhnv4si ((int32x4_t) __a,
1218 (int32x4_t) __b);
1219 }
1220
1221 __extension__ extern __inline uint32x2_t
1222 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1223 vaddhn_u64 (uint64x2_t __a, uint64x2_t __b)
1224 {
1225 return (uint32x2_t) __builtin_aarch64_addhnv2di ((int64x2_t) __a,
1226 (int64x2_t) __b);
1227 }
1228
1229 __extension__ extern __inline int8x8_t
1230 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1231 vraddhn_s16 (int16x8_t __a, int16x8_t __b)
1232 {
1233 return (int8x8_t) __builtin_aarch64_raddhnv8hi (__a, __b);
1234 }
1235
1236 __extension__ extern __inline int16x4_t
1237 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1238 vraddhn_s32 (int32x4_t __a, int32x4_t __b)
1239 {
1240 return (int16x4_t) __builtin_aarch64_raddhnv4si (__a, __b);
1241 }
1242
1243 __extension__ extern __inline int32x2_t
1244 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1245 vraddhn_s64 (int64x2_t __a, int64x2_t __b)
1246 {
1247 return (int32x2_t) __builtin_aarch64_raddhnv2di (__a, __b);
1248 }
1249
1250 __extension__ extern __inline uint8x8_t
1251 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1252 vraddhn_u16 (uint16x8_t __a, uint16x8_t __b)
1253 {
1254 return (uint8x8_t) __builtin_aarch64_raddhnv8hi ((int16x8_t) __a,
1255 (int16x8_t) __b);
1256 }
1257
1258 __extension__ extern __inline uint16x4_t
1259 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1260 vraddhn_u32 (uint32x4_t __a, uint32x4_t __b)
1261 {
1262 return (uint16x4_t) __builtin_aarch64_raddhnv4si ((int32x4_t) __a,
1263 (int32x4_t) __b);
1264 }
1265
1266 __extension__ extern __inline uint32x2_t
1267 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1268 vraddhn_u64 (uint64x2_t __a, uint64x2_t __b)
1269 {
1270 return (uint32x2_t) __builtin_aarch64_raddhnv2di ((int64x2_t) __a,
1271 (int64x2_t) __b);
1272 }
1273
1274 __extension__ extern __inline int8x16_t
1275 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1276 vaddhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c)
1277 {
1278 return (int8x16_t) __builtin_aarch64_addhn2v8hi (__a, __b, __c);
1279 }
1280
1281 __extension__ extern __inline int16x8_t
1282 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1283 vaddhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c)
1284 {
1285 return (int16x8_t) __builtin_aarch64_addhn2v4si (__a, __b, __c);
1286 }
1287
1288 __extension__ extern __inline int32x4_t
1289 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1290 vaddhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c)
1291 {
1292 return (int32x4_t) __builtin_aarch64_addhn2v2di (__a, __b, __c);
1293 }
1294
1295 __extension__ extern __inline uint8x16_t
1296 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1297 vaddhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
1298 {
1299 return (uint8x16_t) __builtin_aarch64_addhn2v8hi ((int8x8_t) __a,
1300 (int16x8_t) __b,
1301 (int16x8_t) __c);
1302 }
1303
1304 __extension__ extern __inline uint16x8_t
1305 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1306 vaddhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
1307 {
1308 return (uint16x8_t) __builtin_aarch64_addhn2v4si ((int16x4_t) __a,
1309 (int32x4_t) __b,
1310 (int32x4_t) __c);
1311 }
1312
1313 __extension__ extern __inline uint32x4_t
1314 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1315 vaddhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
1316 {
1317 return (uint32x4_t) __builtin_aarch64_addhn2v2di ((int32x2_t) __a,
1318 (int64x2_t) __b,
1319 (int64x2_t) __c);
1320 }
1321
1322 __extension__ extern __inline int8x16_t
1323 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1324 vraddhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c)
1325 {
1326 return (int8x16_t) __builtin_aarch64_raddhn2v8hi (__a, __b, __c);
1327 }
1328
1329 __extension__ extern __inline int16x8_t
1330 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1331 vraddhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c)
1332 {
1333 return (int16x8_t) __builtin_aarch64_raddhn2v4si (__a, __b, __c);
1334 }
1335
1336 __extension__ extern __inline int32x4_t
1337 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1338 vraddhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c)
1339 {
1340 return (int32x4_t) __builtin_aarch64_raddhn2v2di (__a, __b, __c);
1341 }
1342
1343 __extension__ extern __inline uint8x16_t
1344 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1345 vraddhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
1346 {
1347 return (uint8x16_t) __builtin_aarch64_raddhn2v8hi ((int8x8_t) __a,
1348 (int16x8_t) __b,
1349 (int16x8_t) __c);
1350 }
1351
1352 __extension__ extern __inline uint16x8_t
1353 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1354 vraddhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
1355 {
1356 return (uint16x8_t) __builtin_aarch64_raddhn2v4si ((int16x4_t) __a,
1357 (int32x4_t) __b,
1358 (int32x4_t) __c);
1359 }
1360
1361 __extension__ extern __inline uint32x4_t
1362 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1363 vraddhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
1364 {
1365 return (uint32x4_t) __builtin_aarch64_raddhn2v2di ((int32x2_t) __a,
1366 (int64x2_t) __b,
1367 (int64x2_t) __c);
1368 }
1369
1370 __extension__ extern __inline float32x2_t
1371 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1372 vdiv_f32 (float32x2_t __a, float32x2_t __b)
1373 {
1374 return __a / __b;
1375 }
1376
1377 __extension__ extern __inline float64x1_t
1378 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1379 vdiv_f64 (float64x1_t __a, float64x1_t __b)
1380 {
1381 return __a / __b;
1382 }
1383
1384 __extension__ extern __inline float32x4_t
1385 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1386 vdivq_f32 (float32x4_t __a, float32x4_t __b)
1387 {
1388 return __a / __b;
1389 }
1390
1391 __extension__ extern __inline float64x2_t
1392 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1393 vdivq_f64 (float64x2_t __a, float64x2_t __b)
1394 {
1395 return __a / __b;
1396 }
1397
1398 __extension__ extern __inline int8x8_t
1399 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1400 vmul_s8 (int8x8_t __a, int8x8_t __b)
1401 {
1402 return __a * __b;
1403 }
1404
1405 __extension__ extern __inline int16x4_t
1406 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1407 vmul_s16 (int16x4_t __a, int16x4_t __b)
1408 {
1409 return __a * __b;
1410 }
1411
1412 __extension__ extern __inline int32x2_t
1413 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1414 vmul_s32 (int32x2_t __a, int32x2_t __b)
1415 {
1416 return __a * __b;
1417 }
1418
1419 __extension__ extern __inline float32x2_t
1420 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1421 vmul_f32 (float32x2_t __a, float32x2_t __b)
1422 {
1423 return __a * __b;
1424 }
1425
1426 __extension__ extern __inline float64x1_t
1427 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1428 vmul_f64 (float64x1_t __a, float64x1_t __b)
1429 {
1430 return __a * __b;
1431 }
1432
1433 __extension__ extern __inline uint8x8_t
1434 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1435 vmul_u8 (uint8x8_t __a, uint8x8_t __b)
1436 {
1437 return __a * __b;
1438 }
1439
1440 __extension__ extern __inline uint16x4_t
1441 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1442 vmul_u16 (uint16x4_t __a, uint16x4_t __b)
1443 {
1444 return __a * __b;
1445 }
1446
1447 __extension__ extern __inline uint32x2_t
1448 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1449 vmul_u32 (uint32x2_t __a, uint32x2_t __b)
1450 {
1451 return __a * __b;
1452 }
1453
1454 __extension__ extern __inline poly8x8_t
1455 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1456 vmul_p8 (poly8x8_t __a, poly8x8_t __b)
1457 {
1458 return (poly8x8_t) __builtin_aarch64_pmulv8qi ((int8x8_t) __a,
1459 (int8x8_t) __b);
1460 }
1461
1462 __extension__ extern __inline int8x16_t
1463 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1464 vmulq_s8 (int8x16_t __a, int8x16_t __b)
1465 {
1466 return __a * __b;
1467 }
1468
1469 __extension__ extern __inline int16x8_t
1470 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1471 vmulq_s16 (int16x8_t __a, int16x8_t __b)
1472 {
1473 return __a * __b;
1474 }
1475
1476 __extension__ extern __inline int32x4_t
1477 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1478 vmulq_s32 (int32x4_t __a, int32x4_t __b)
1479 {
1480 return __a * __b;
1481 }
1482
1483 __extension__ extern __inline float32x4_t
1484 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1485 vmulq_f32 (float32x4_t __a, float32x4_t __b)
1486 {
1487 return __a * __b;
1488 }
1489
1490 __extension__ extern __inline float64x2_t
1491 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1492 vmulq_f64 (float64x2_t __a, float64x2_t __b)
1493 {
1494 return __a * __b;
1495 }
1496
1497 __extension__ extern __inline uint8x16_t
1498 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1499 vmulq_u8 (uint8x16_t __a, uint8x16_t __b)
1500 {
1501 return __a * __b;
1502 }
1503
1504 __extension__ extern __inline uint16x8_t
1505 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1506 vmulq_u16 (uint16x8_t __a, uint16x8_t __b)
1507 {
1508 return __a * __b;
1509 }
1510
1511 __extension__ extern __inline uint32x4_t
1512 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1513 vmulq_u32 (uint32x4_t __a, uint32x4_t __b)
1514 {
1515 return __a * __b;
1516 }
1517
1518 __extension__ extern __inline poly8x16_t
1519 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1520 vmulq_p8 (poly8x16_t __a, poly8x16_t __b)
1521 {
1522 return (poly8x16_t) __builtin_aarch64_pmulv16qi ((int8x16_t) __a,
1523 (int8x16_t) __b);
1524 }
1525
1526 __extension__ extern __inline int8x8_t
1527 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1528 vand_s8 (int8x8_t __a, int8x8_t __b)
1529 {
1530 return __a & __b;
1531 }
1532
1533 __extension__ extern __inline int16x4_t
1534 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1535 vand_s16 (int16x4_t __a, int16x4_t __b)
1536 {
1537 return __a & __b;
1538 }
1539
1540 __extension__ extern __inline int32x2_t
1541 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1542 vand_s32 (int32x2_t __a, int32x2_t __b)
1543 {
1544 return __a & __b;
1545 }
1546
1547 __extension__ extern __inline uint8x8_t
1548 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1549 vand_u8 (uint8x8_t __a, uint8x8_t __b)
1550 {
1551 return __a & __b;
1552 }
1553
1554 __extension__ extern __inline uint16x4_t
1555 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1556 vand_u16 (uint16x4_t __a, uint16x4_t __b)
1557 {
1558 return __a & __b;
1559 }
1560
1561 __extension__ extern __inline uint32x2_t
1562 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1563 vand_u32 (uint32x2_t __a, uint32x2_t __b)
1564 {
1565 return __a & __b;
1566 }
1567
1568 __extension__ extern __inline int64x1_t
1569 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1570 vand_s64 (int64x1_t __a, int64x1_t __b)
1571 {
1572 return __a & __b;
1573 }
1574
1575 __extension__ extern __inline uint64x1_t
1576 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1577 vand_u64 (uint64x1_t __a, uint64x1_t __b)
1578 {
1579 return __a & __b;
1580 }
1581
1582 __extension__ extern __inline int8x16_t
1583 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1584 vandq_s8 (int8x16_t __a, int8x16_t __b)
1585 {
1586 return __a & __b;
1587 }
1588
1589 __extension__ extern __inline int16x8_t
1590 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1591 vandq_s16 (int16x8_t __a, int16x8_t __b)
1592 {
1593 return __a & __b;
1594 }
1595
1596 __extension__ extern __inline int32x4_t
1597 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1598 vandq_s32 (int32x4_t __a, int32x4_t __b)
1599 {
1600 return __a & __b;
1601 }
1602
1603 __extension__ extern __inline int64x2_t
1604 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1605 vandq_s64 (int64x2_t __a, int64x2_t __b)
1606 {
1607 return __a & __b;
1608 }
1609
1610 __extension__ extern __inline uint8x16_t
1611 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1612 vandq_u8 (uint8x16_t __a, uint8x16_t __b)
1613 {
1614 return __a & __b;
1615 }
1616
1617 __extension__ extern __inline uint16x8_t
1618 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1619 vandq_u16 (uint16x8_t __a, uint16x8_t __b)
1620 {
1621 return __a & __b;
1622 }
1623
1624 __extension__ extern __inline uint32x4_t
1625 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1626 vandq_u32 (uint32x4_t __a, uint32x4_t __b)
1627 {
1628 return __a & __b;
1629 }
1630
1631 __extension__ extern __inline uint64x2_t
1632 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1633 vandq_u64 (uint64x2_t __a, uint64x2_t __b)
1634 {
1635 return __a & __b;
1636 }
1637
1638 __extension__ extern __inline int8x8_t
1639 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1640 vorr_s8 (int8x8_t __a, int8x8_t __b)
1641 {
1642 return __a | __b;
1643 }
1644
1645 __extension__ extern __inline int16x4_t
1646 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1647 vorr_s16 (int16x4_t __a, int16x4_t __b)
1648 {
1649 return __a | __b;
1650 }
1651
1652 __extension__ extern __inline int32x2_t
1653 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1654 vorr_s32 (int32x2_t __a, int32x2_t __b)
1655 {
1656 return __a | __b;
1657 }
1658
1659 __extension__ extern __inline uint8x8_t
1660 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1661 vorr_u8 (uint8x8_t __a, uint8x8_t __b)
1662 {
1663 return __a | __b;
1664 }
1665
1666 __extension__ extern __inline uint16x4_t
1667 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1668 vorr_u16 (uint16x4_t __a, uint16x4_t __b)
1669 {
1670 return __a | __b;
1671 }
1672
1673 __extension__ extern __inline uint32x2_t
1674 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1675 vorr_u32 (uint32x2_t __a, uint32x2_t __b)
1676 {
1677 return __a | __b;
1678 }
1679
1680 __extension__ extern __inline int64x1_t
1681 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1682 vorr_s64 (int64x1_t __a, int64x1_t __b)
1683 {
1684 return __a | __b;
1685 }
1686
1687 __extension__ extern __inline uint64x1_t
1688 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1689 vorr_u64 (uint64x1_t __a, uint64x1_t __b)
1690 {
1691 return __a | __b;
1692 }
1693
1694 __extension__ extern __inline int8x16_t
1695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1696 vorrq_s8 (int8x16_t __a, int8x16_t __b)
1697 {
1698 return __a | __b;
1699 }
1700
1701 __extension__ extern __inline int16x8_t
1702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1703 vorrq_s16 (int16x8_t __a, int16x8_t __b)
1704 {
1705 return __a | __b;
1706 }
1707
1708 __extension__ extern __inline int32x4_t
1709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1710 vorrq_s32 (int32x4_t __a, int32x4_t __b)
1711 {
1712 return __a | __b;
1713 }
1714
1715 __extension__ extern __inline int64x2_t
1716 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1717 vorrq_s64 (int64x2_t __a, int64x2_t __b)
1718 {
1719 return __a | __b;
1720 }
1721
1722 __extension__ extern __inline uint8x16_t
1723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1724 vorrq_u8 (uint8x16_t __a, uint8x16_t __b)
1725 {
1726 return __a | __b;
1727 }
1728
1729 __extension__ extern __inline uint16x8_t
1730 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1731 vorrq_u16 (uint16x8_t __a, uint16x8_t __b)
1732 {
1733 return __a | __b;
1734 }
1735
1736 __extension__ extern __inline uint32x4_t
1737 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1738 vorrq_u32 (uint32x4_t __a, uint32x4_t __b)
1739 {
1740 return __a | __b;
1741 }
1742
1743 __extension__ extern __inline uint64x2_t
1744 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1745 vorrq_u64 (uint64x2_t __a, uint64x2_t __b)
1746 {
1747 return __a | __b;
1748 }
1749
1750 __extension__ extern __inline int8x8_t
1751 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1752 veor_s8 (int8x8_t __a, int8x8_t __b)
1753 {
1754 return __a ^ __b;
1755 }
1756
1757 __extension__ extern __inline int16x4_t
1758 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1759 veor_s16 (int16x4_t __a, int16x4_t __b)
1760 {
1761 return __a ^ __b;
1762 }
1763
1764 __extension__ extern __inline int32x2_t
1765 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1766 veor_s32 (int32x2_t __a, int32x2_t __b)
1767 {
1768 return __a ^ __b;
1769 }
1770
1771 __extension__ extern __inline uint8x8_t
1772 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1773 veor_u8 (uint8x8_t __a, uint8x8_t __b)
1774 {
1775 return __a ^ __b;
1776 }
1777
1778 __extension__ extern __inline uint16x4_t
1779 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1780 veor_u16 (uint16x4_t __a, uint16x4_t __b)
1781 {
1782 return __a ^ __b;
1783 }
1784
1785 __extension__ extern __inline uint32x2_t
1786 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1787 veor_u32 (uint32x2_t __a, uint32x2_t __b)
1788 {
1789 return __a ^ __b;
1790 }
1791
1792 __extension__ extern __inline int64x1_t
1793 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1794 veor_s64 (int64x1_t __a, int64x1_t __b)
1795 {
1796 return __a ^ __b;
1797 }
1798
1799 __extension__ extern __inline uint64x1_t
1800 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1801 veor_u64 (uint64x1_t __a, uint64x1_t __b)
1802 {
1803 return __a ^ __b;
1804 }
1805
1806 __extension__ extern __inline int8x16_t
1807 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1808 veorq_s8 (int8x16_t __a, int8x16_t __b)
1809 {
1810 return __a ^ __b;
1811 }
1812
1813 __extension__ extern __inline int16x8_t
1814 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1815 veorq_s16 (int16x8_t __a, int16x8_t __b)
1816 {
1817 return __a ^ __b;
1818 }
1819
1820 __extension__ extern __inline int32x4_t
1821 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1822 veorq_s32 (int32x4_t __a, int32x4_t __b)
1823 {
1824 return __a ^ __b;
1825 }
1826
1827 __extension__ extern __inline int64x2_t
1828 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1829 veorq_s64 (int64x2_t __a, int64x2_t __b)
1830 {
1831 return __a ^ __b;
1832 }
1833
1834 __extension__ extern __inline uint8x16_t
1835 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1836 veorq_u8 (uint8x16_t __a, uint8x16_t __b)
1837 {
1838 return __a ^ __b;
1839 }
1840
1841 __extension__ extern __inline uint16x8_t
1842 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1843 veorq_u16 (uint16x8_t __a, uint16x8_t __b)
1844 {
1845 return __a ^ __b;
1846 }
1847
1848 __extension__ extern __inline uint32x4_t
1849 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1850 veorq_u32 (uint32x4_t __a, uint32x4_t __b)
1851 {
1852 return __a ^ __b;
1853 }
1854
1855 __extension__ extern __inline uint64x2_t
1856 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1857 veorq_u64 (uint64x2_t __a, uint64x2_t __b)
1858 {
1859 return __a ^ __b;
1860 }
1861
1862 __extension__ extern __inline int8x8_t
1863 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1864 vbic_s8 (int8x8_t __a, int8x8_t __b)
1865 {
1866 return __a & ~__b;
1867 }
1868
1869 __extension__ extern __inline int16x4_t
1870 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1871 vbic_s16 (int16x4_t __a, int16x4_t __b)
1872 {
1873 return __a & ~__b;
1874 }
1875
1876 __extension__ extern __inline int32x2_t
1877 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1878 vbic_s32 (int32x2_t __a, int32x2_t __b)
1879 {
1880 return __a & ~__b;
1881 }
1882
1883 __extension__ extern __inline uint8x8_t
1884 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1885 vbic_u8 (uint8x8_t __a, uint8x8_t __b)
1886 {
1887 return __a & ~__b;
1888 }
1889
1890 __extension__ extern __inline uint16x4_t
1891 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1892 vbic_u16 (uint16x4_t __a, uint16x4_t __b)
1893 {
1894 return __a & ~__b;
1895 }
1896
1897 __extension__ extern __inline uint32x2_t
1898 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1899 vbic_u32 (uint32x2_t __a, uint32x2_t __b)
1900 {
1901 return __a & ~__b;
1902 }
1903
1904 __extension__ extern __inline int64x1_t
1905 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1906 vbic_s64 (int64x1_t __a, int64x1_t __b)
1907 {
1908 return __a & ~__b;
1909 }
1910
1911 __extension__ extern __inline uint64x1_t
1912 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1913 vbic_u64 (uint64x1_t __a, uint64x1_t __b)
1914 {
1915 return __a & ~__b;
1916 }
1917
1918 __extension__ extern __inline int8x16_t
1919 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1920 vbicq_s8 (int8x16_t __a, int8x16_t __b)
1921 {
1922 return __a & ~__b;
1923 }
1924
1925 __extension__ extern __inline int16x8_t
1926 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1927 vbicq_s16 (int16x8_t __a, int16x8_t __b)
1928 {
1929 return __a & ~__b;
1930 }
1931
1932 __extension__ extern __inline int32x4_t
1933 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1934 vbicq_s32 (int32x4_t __a, int32x4_t __b)
1935 {
1936 return __a & ~__b;
1937 }
1938
1939 __extension__ extern __inline int64x2_t
1940 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1941 vbicq_s64 (int64x2_t __a, int64x2_t __b)
1942 {
1943 return __a & ~__b;
1944 }
1945
1946 __extension__ extern __inline uint8x16_t
1947 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1948 vbicq_u8 (uint8x16_t __a, uint8x16_t __b)
1949 {
1950 return __a & ~__b;
1951 }
1952
1953 __extension__ extern __inline uint16x8_t
1954 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1955 vbicq_u16 (uint16x8_t __a, uint16x8_t __b)
1956 {
1957 return __a & ~__b;
1958 }
1959
1960 __extension__ extern __inline uint32x4_t
1961 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1962 vbicq_u32 (uint32x4_t __a, uint32x4_t __b)
1963 {
1964 return __a & ~__b;
1965 }
1966
1967 __extension__ extern __inline uint64x2_t
1968 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1969 vbicq_u64 (uint64x2_t __a, uint64x2_t __b)
1970 {
1971 return __a & ~__b;
1972 }
1973
1974 __extension__ extern __inline int8x8_t
1975 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1976 vorn_s8 (int8x8_t __a, int8x8_t __b)
1977 {
1978 return __a | ~__b;
1979 }
1980
1981 __extension__ extern __inline int16x4_t
1982 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1983 vorn_s16 (int16x4_t __a, int16x4_t __b)
1984 {
1985 return __a | ~__b;
1986 }
1987
1988 __extension__ extern __inline int32x2_t
1989 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1990 vorn_s32 (int32x2_t __a, int32x2_t __b)
1991 {
1992 return __a | ~__b;
1993 }
1994
1995 __extension__ extern __inline uint8x8_t
1996 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
1997 vorn_u8 (uint8x8_t __a, uint8x8_t __b)
1998 {
1999 return __a | ~__b;
2000 }
2001
2002 __extension__ extern __inline uint16x4_t
2003 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2004 vorn_u16 (uint16x4_t __a, uint16x4_t __b)
2005 {
2006 return __a | ~__b;
2007 }
2008
2009 __extension__ extern __inline uint32x2_t
2010 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2011 vorn_u32 (uint32x2_t __a, uint32x2_t __b)
2012 {
2013 return __a | ~__b;
2014 }
2015
2016 __extension__ extern __inline int64x1_t
2017 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2018 vorn_s64 (int64x1_t __a, int64x1_t __b)
2019 {
2020 return __a | ~__b;
2021 }
2022
2023 __extension__ extern __inline uint64x1_t
2024 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2025 vorn_u64 (uint64x1_t __a, uint64x1_t __b)
2026 {
2027 return __a | ~__b;
2028 }
2029
2030 __extension__ extern __inline int8x16_t
2031 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2032 vornq_s8 (int8x16_t __a, int8x16_t __b)
2033 {
2034 return __a | ~__b;
2035 }
2036
2037 __extension__ extern __inline int16x8_t
2038 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2039 vornq_s16 (int16x8_t __a, int16x8_t __b)
2040 {
2041 return __a | ~__b;
2042 }
2043
2044 __extension__ extern __inline int32x4_t
2045 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2046 vornq_s32 (int32x4_t __a, int32x4_t __b)
2047 {
2048 return __a | ~__b;
2049 }
2050
2051 __extension__ extern __inline int64x2_t
2052 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2053 vornq_s64 (int64x2_t __a, int64x2_t __b)
2054 {
2055 return __a | ~__b;
2056 }
2057
2058 __extension__ extern __inline uint8x16_t
2059 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2060 vornq_u8 (uint8x16_t __a, uint8x16_t __b)
2061 {
2062 return __a | ~__b;
2063 }
2064
2065 __extension__ extern __inline uint16x8_t
2066 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2067 vornq_u16 (uint16x8_t __a, uint16x8_t __b)
2068 {
2069 return __a | ~__b;
2070 }
2071
2072 __extension__ extern __inline uint32x4_t
2073 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2074 vornq_u32 (uint32x4_t __a, uint32x4_t __b)
2075 {
2076 return __a | ~__b;
2077 }
2078
2079 __extension__ extern __inline uint64x2_t
2080 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2081 vornq_u64 (uint64x2_t __a, uint64x2_t __b)
2082 {
2083 return __a | ~__b;
2084 }
2085
2086 __extension__ extern __inline int8x8_t
2087 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2088 vsub_s8 (int8x8_t __a, int8x8_t __b)
2089 {
2090 return __a - __b;
2091 }
2092
2093 __extension__ extern __inline int16x4_t
2094 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2095 vsub_s16 (int16x4_t __a, int16x4_t __b)
2096 {
2097 return __a - __b;
2098 }
2099
2100 __extension__ extern __inline int32x2_t
2101 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2102 vsub_s32 (int32x2_t __a, int32x2_t __b)
2103 {
2104 return __a - __b;
2105 }
2106
2107 __extension__ extern __inline float32x2_t
2108 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2109 vsub_f32 (float32x2_t __a, float32x2_t __b)
2110 {
2111 return __a - __b;
2112 }
2113
2114 __extension__ extern __inline float64x1_t
2115 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2116 vsub_f64 (float64x1_t __a, float64x1_t __b)
2117 {
2118 return __a - __b;
2119 }
2120
2121 __extension__ extern __inline uint8x8_t
2122 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2123 vsub_u8 (uint8x8_t __a, uint8x8_t __b)
2124 {
2125 return __a - __b;
2126 }
2127
2128 __extension__ extern __inline uint16x4_t
2129 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2130 vsub_u16 (uint16x4_t __a, uint16x4_t __b)
2131 {
2132 return __a - __b;
2133 }
2134
2135 __extension__ extern __inline uint32x2_t
2136 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2137 vsub_u32 (uint32x2_t __a, uint32x2_t __b)
2138 {
2139 return __a - __b;
2140 }
2141
2142 __extension__ extern __inline int64x1_t
2143 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2144 vsub_s64 (int64x1_t __a, int64x1_t __b)
2145 {
2146 return __a - __b;
2147 }
2148
2149 __extension__ extern __inline uint64x1_t
2150 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2151 vsub_u64 (uint64x1_t __a, uint64x1_t __b)
2152 {
2153 return __a - __b;
2154 }
2155
2156 __extension__ extern __inline int8x16_t
2157 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2158 vsubq_s8 (int8x16_t __a, int8x16_t __b)
2159 {
2160 return __a - __b;
2161 }
2162
2163 __extension__ extern __inline int16x8_t
2164 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2165 vsubq_s16 (int16x8_t __a, int16x8_t __b)
2166 {
2167 return __a - __b;
2168 }
2169
2170 __extension__ extern __inline int32x4_t
2171 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2172 vsubq_s32 (int32x4_t __a, int32x4_t __b)
2173 {
2174 return __a - __b;
2175 }
2176
2177 __extension__ extern __inline int64x2_t
2178 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2179 vsubq_s64 (int64x2_t __a, int64x2_t __b)
2180 {
2181 return __a - __b;
2182 }
2183
2184 __extension__ extern __inline float32x4_t
2185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2186 vsubq_f32 (float32x4_t __a, float32x4_t __b)
2187 {
2188 return __a - __b;
2189 }
2190
2191 __extension__ extern __inline float64x2_t
2192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2193 vsubq_f64 (float64x2_t __a, float64x2_t __b)
2194 {
2195 return __a - __b;
2196 }
2197
2198 __extension__ extern __inline uint8x16_t
2199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2200 vsubq_u8 (uint8x16_t __a, uint8x16_t __b)
2201 {
2202 return __a - __b;
2203 }
2204
2205 __extension__ extern __inline uint16x8_t
2206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2207 vsubq_u16 (uint16x8_t __a, uint16x8_t __b)
2208 {
2209 return __a - __b;
2210 }
2211
2212 __extension__ extern __inline uint32x4_t
2213 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2214 vsubq_u32 (uint32x4_t __a, uint32x4_t __b)
2215 {
2216 return __a - __b;
2217 }
2218
2219 __extension__ extern __inline uint64x2_t
2220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2221 vsubq_u64 (uint64x2_t __a, uint64x2_t __b)
2222 {
2223 return __a - __b;
2224 }
2225
2226 __extension__ extern __inline int16x8_t
2227 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2228 vsubl_s8 (int8x8_t __a, int8x8_t __b)
2229 {
2230 return (int16x8_t) __builtin_aarch64_ssublv8qi (__a, __b);
2231 }
2232
2233 __extension__ extern __inline int32x4_t
2234 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2235 vsubl_s16 (int16x4_t __a, int16x4_t __b)
2236 {
2237 return (int32x4_t) __builtin_aarch64_ssublv4hi (__a, __b);
2238 }
2239
2240 __extension__ extern __inline int64x2_t
2241 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2242 vsubl_s32 (int32x2_t __a, int32x2_t __b)
2243 {
2244 return (int64x2_t) __builtin_aarch64_ssublv2si (__a, __b);
2245 }
2246
2247 __extension__ extern __inline uint16x8_t
2248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2249 vsubl_u8 (uint8x8_t __a, uint8x8_t __b)
2250 {
2251 return (uint16x8_t) __builtin_aarch64_usublv8qi ((int8x8_t) __a,
2252 (int8x8_t) __b);
2253 }
2254
2255 __extension__ extern __inline uint32x4_t
2256 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2257 vsubl_u16 (uint16x4_t __a, uint16x4_t __b)
2258 {
2259 return (uint32x4_t) __builtin_aarch64_usublv4hi ((int16x4_t) __a,
2260 (int16x4_t) __b);
2261 }
2262
2263 __extension__ extern __inline uint64x2_t
2264 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2265 vsubl_u32 (uint32x2_t __a, uint32x2_t __b)
2266 {
2267 return (uint64x2_t) __builtin_aarch64_usublv2si ((int32x2_t) __a,
2268 (int32x2_t) __b);
2269 }
2270
2271 __extension__ extern __inline int16x8_t
2272 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2273 vsubl_high_s8 (int8x16_t __a, int8x16_t __b)
2274 {
2275 return (int16x8_t) __builtin_aarch64_ssubl2v16qi (__a, __b);
2276 }
2277
2278 __extension__ extern __inline int32x4_t
2279 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2280 vsubl_high_s16 (int16x8_t __a, int16x8_t __b)
2281 {
2282 return (int32x4_t) __builtin_aarch64_ssubl2v8hi (__a, __b);
2283 }
2284
2285 __extension__ extern __inline int64x2_t
2286 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2287 vsubl_high_s32 (int32x4_t __a, int32x4_t __b)
2288 {
2289 return (int64x2_t) __builtin_aarch64_ssubl2v4si (__a, __b);
2290 }
2291
2292 __extension__ extern __inline uint16x8_t
2293 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2294 vsubl_high_u8 (uint8x16_t __a, uint8x16_t __b)
2295 {
2296 return (uint16x8_t) __builtin_aarch64_usubl2v16qi ((int8x16_t) __a,
2297 (int8x16_t) __b);
2298 }
2299
2300 __extension__ extern __inline uint32x4_t
2301 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2302 vsubl_high_u16 (uint16x8_t __a, uint16x8_t __b)
2303 {
2304 return (uint32x4_t) __builtin_aarch64_usubl2v8hi ((int16x8_t) __a,
2305 (int16x8_t) __b);
2306 }
2307
2308 __extension__ extern __inline uint64x2_t
2309 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2310 vsubl_high_u32 (uint32x4_t __a, uint32x4_t __b)
2311 {
2312 return (uint64x2_t) __builtin_aarch64_usubl2v4si ((int32x4_t) __a,
2313 (int32x4_t) __b);
2314 }
2315
2316 __extension__ extern __inline int16x8_t
2317 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2318 vsubw_s8 (int16x8_t __a, int8x8_t __b)
2319 {
2320 return (int16x8_t) __builtin_aarch64_ssubwv8qi (__a, __b);
2321 }
2322
2323 __extension__ extern __inline int32x4_t
2324 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2325 vsubw_s16 (int32x4_t __a, int16x4_t __b)
2326 {
2327 return (int32x4_t) __builtin_aarch64_ssubwv4hi (__a, __b);
2328 }
2329
2330 __extension__ extern __inline int64x2_t
2331 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2332 vsubw_s32 (int64x2_t __a, int32x2_t __b)
2333 {
2334 return (int64x2_t) __builtin_aarch64_ssubwv2si (__a, __b);
2335 }
2336
2337 __extension__ extern __inline uint16x8_t
2338 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2339 vsubw_u8 (uint16x8_t __a, uint8x8_t __b)
2340 {
2341 return (uint16x8_t) __builtin_aarch64_usubwv8qi ((int16x8_t) __a,
2342 (int8x8_t) __b);
2343 }
2344
2345 __extension__ extern __inline uint32x4_t
2346 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2347 vsubw_u16 (uint32x4_t __a, uint16x4_t __b)
2348 {
2349 return (uint32x4_t) __builtin_aarch64_usubwv4hi ((int32x4_t) __a,
2350 (int16x4_t) __b);
2351 }
2352
2353 __extension__ extern __inline uint64x2_t
2354 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2355 vsubw_u32 (uint64x2_t __a, uint32x2_t __b)
2356 {
2357 return (uint64x2_t) __builtin_aarch64_usubwv2si ((int64x2_t) __a,
2358 (int32x2_t) __b);
2359 }
2360
2361 __extension__ extern __inline int16x8_t
2362 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2363 vsubw_high_s8 (int16x8_t __a, int8x16_t __b)
2364 {
2365 return (int16x8_t) __builtin_aarch64_ssubw2v16qi (__a, __b);
2366 }
2367
2368 __extension__ extern __inline int32x4_t
2369 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2370 vsubw_high_s16 (int32x4_t __a, int16x8_t __b)
2371 {
2372 return (int32x4_t) __builtin_aarch64_ssubw2v8hi (__a, __b);
2373 }
2374
2375 __extension__ extern __inline int64x2_t
2376 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2377 vsubw_high_s32 (int64x2_t __a, int32x4_t __b)
2378 {
2379 return (int64x2_t) __builtin_aarch64_ssubw2v4si (__a, __b);
2380 }
2381
2382 __extension__ extern __inline uint16x8_t
2383 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2384 vsubw_high_u8 (uint16x8_t __a, uint8x16_t __b)
2385 {
2386 return (uint16x8_t) __builtin_aarch64_usubw2v16qi ((int16x8_t) __a,
2387 (int8x16_t) __b);
2388 }
2389
2390 __extension__ extern __inline uint32x4_t
2391 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2392 vsubw_high_u16 (uint32x4_t __a, uint16x8_t __b)
2393 {
2394 return (uint32x4_t) __builtin_aarch64_usubw2v8hi ((int32x4_t) __a,
2395 (int16x8_t) __b);
2396 }
2397
2398 __extension__ extern __inline uint64x2_t
2399 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2400 vsubw_high_u32 (uint64x2_t __a, uint32x4_t __b)
2401 {
2402 return (uint64x2_t) __builtin_aarch64_usubw2v4si ((int64x2_t) __a,
2403 (int32x4_t) __b);
2404 }
2405
2406 __extension__ extern __inline int8x8_t
2407 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2408 vqadd_s8 (int8x8_t __a, int8x8_t __b)
2409 {
2410 return (int8x8_t) __builtin_aarch64_sqaddv8qi (__a, __b);
2411 }
2412
2413 __extension__ extern __inline int16x4_t
2414 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2415 vqadd_s16 (int16x4_t __a, int16x4_t __b)
2416 {
2417 return (int16x4_t) __builtin_aarch64_sqaddv4hi (__a, __b);
2418 }
2419
2420 __extension__ extern __inline int32x2_t
2421 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2422 vqadd_s32 (int32x2_t __a, int32x2_t __b)
2423 {
2424 return (int32x2_t) __builtin_aarch64_sqaddv2si (__a, __b);
2425 }
2426
2427 __extension__ extern __inline int64x1_t
2428 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2429 vqadd_s64 (int64x1_t __a, int64x1_t __b)
2430 {
2431 return (int64x1_t) {__builtin_aarch64_sqadddi (__a[0], __b[0])};
2432 }
2433
2434 __extension__ extern __inline uint8x8_t
2435 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2436 vqadd_u8 (uint8x8_t __a, uint8x8_t __b)
2437 {
2438 return __builtin_aarch64_uqaddv8qi_uuu (__a, __b);
2439 }
2440
2441 __extension__ extern __inline int8x8_t
2442 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2443 vhsub_s8 (int8x8_t __a, int8x8_t __b)
2444 {
2445 return (int8x8_t)__builtin_aarch64_shsubv8qi (__a, __b);
2446 }
2447
2448 __extension__ extern __inline int16x4_t
2449 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2450 vhsub_s16 (int16x4_t __a, int16x4_t __b)
2451 {
2452 return (int16x4_t) __builtin_aarch64_shsubv4hi (__a, __b);
2453 }
2454
2455 __extension__ extern __inline int32x2_t
2456 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2457 vhsub_s32 (int32x2_t __a, int32x2_t __b)
2458 {
2459 return (int32x2_t) __builtin_aarch64_shsubv2si (__a, __b);
2460 }
2461
2462 __extension__ extern __inline uint8x8_t
2463 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2464 vhsub_u8 (uint8x8_t __a, uint8x8_t __b)
2465 {
2466 return (uint8x8_t) __builtin_aarch64_uhsubv8qi ((int8x8_t) __a,
2467 (int8x8_t) __b);
2468 }
2469
2470 __extension__ extern __inline uint16x4_t
2471 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2472 vhsub_u16 (uint16x4_t __a, uint16x4_t __b)
2473 {
2474 return (uint16x4_t) __builtin_aarch64_uhsubv4hi ((int16x4_t) __a,
2475 (int16x4_t) __b);
2476 }
2477
2478 __extension__ extern __inline uint32x2_t
2479 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2480 vhsub_u32 (uint32x2_t __a, uint32x2_t __b)
2481 {
2482 return (uint32x2_t) __builtin_aarch64_uhsubv2si ((int32x2_t) __a,
2483 (int32x2_t) __b);
2484 }
2485
2486 __extension__ extern __inline int8x16_t
2487 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2488 vhsubq_s8 (int8x16_t __a, int8x16_t __b)
2489 {
2490 return (int8x16_t) __builtin_aarch64_shsubv16qi (__a, __b);
2491 }
2492
2493 __extension__ extern __inline int16x8_t
2494 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2495 vhsubq_s16 (int16x8_t __a, int16x8_t __b)
2496 {
2497 return (int16x8_t) __builtin_aarch64_shsubv8hi (__a, __b);
2498 }
2499
2500 __extension__ extern __inline int32x4_t
2501 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2502 vhsubq_s32 (int32x4_t __a, int32x4_t __b)
2503 {
2504 return (int32x4_t) __builtin_aarch64_shsubv4si (__a, __b);
2505 }
2506
2507 __extension__ extern __inline uint8x16_t
2508 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2509 vhsubq_u8 (uint8x16_t __a, uint8x16_t __b)
2510 {
2511 return (uint8x16_t) __builtin_aarch64_uhsubv16qi ((int8x16_t) __a,
2512 (int8x16_t) __b);
2513 }
2514
2515 __extension__ extern __inline uint16x8_t
2516 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2517 vhsubq_u16 (uint16x8_t __a, uint16x8_t __b)
2518 {
2519 return (uint16x8_t) __builtin_aarch64_uhsubv8hi ((int16x8_t) __a,
2520 (int16x8_t) __b);
2521 }
2522
2523 __extension__ extern __inline uint32x4_t
2524 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2525 vhsubq_u32 (uint32x4_t __a, uint32x4_t __b)
2526 {
2527 return (uint32x4_t) __builtin_aarch64_uhsubv4si ((int32x4_t) __a,
2528 (int32x4_t) __b);
2529 }
2530
2531 __extension__ extern __inline int8x8_t
2532 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2533 vsubhn_s16 (int16x8_t __a, int16x8_t __b)
2534 {
2535 return (int8x8_t) __builtin_aarch64_subhnv8hi (__a, __b);
2536 }
2537
2538 __extension__ extern __inline int16x4_t
2539 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2540 vsubhn_s32 (int32x4_t __a, int32x4_t __b)
2541 {
2542 return (int16x4_t) __builtin_aarch64_subhnv4si (__a, __b);
2543 }
2544
2545 __extension__ extern __inline int32x2_t
2546 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2547 vsubhn_s64 (int64x2_t __a, int64x2_t __b)
2548 {
2549 return (int32x2_t) __builtin_aarch64_subhnv2di (__a, __b);
2550 }
2551
2552 __extension__ extern __inline uint8x8_t
2553 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2554 vsubhn_u16 (uint16x8_t __a, uint16x8_t __b)
2555 {
2556 return (uint8x8_t) __builtin_aarch64_subhnv8hi ((int16x8_t) __a,
2557 (int16x8_t) __b);
2558 }
2559
2560 __extension__ extern __inline uint16x4_t
2561 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2562 vsubhn_u32 (uint32x4_t __a, uint32x4_t __b)
2563 {
2564 return (uint16x4_t) __builtin_aarch64_subhnv4si ((int32x4_t) __a,
2565 (int32x4_t) __b);
2566 }
2567
2568 __extension__ extern __inline uint32x2_t
2569 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2570 vsubhn_u64 (uint64x2_t __a, uint64x2_t __b)
2571 {
2572 return (uint32x2_t) __builtin_aarch64_subhnv2di ((int64x2_t) __a,
2573 (int64x2_t) __b);
2574 }
2575
2576 __extension__ extern __inline int8x8_t
2577 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2578 vrsubhn_s16 (int16x8_t __a, int16x8_t __b)
2579 {
2580 return (int8x8_t) __builtin_aarch64_rsubhnv8hi (__a, __b);
2581 }
2582
2583 __extension__ extern __inline int16x4_t
2584 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2585 vrsubhn_s32 (int32x4_t __a, int32x4_t __b)
2586 {
2587 return (int16x4_t) __builtin_aarch64_rsubhnv4si (__a, __b);
2588 }
2589
2590 __extension__ extern __inline int32x2_t
2591 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2592 vrsubhn_s64 (int64x2_t __a, int64x2_t __b)
2593 {
2594 return (int32x2_t) __builtin_aarch64_rsubhnv2di (__a, __b);
2595 }
2596
2597 __extension__ extern __inline uint8x8_t
2598 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2599 vrsubhn_u16 (uint16x8_t __a, uint16x8_t __b)
2600 {
2601 return (uint8x8_t) __builtin_aarch64_rsubhnv8hi ((int16x8_t) __a,
2602 (int16x8_t) __b);
2603 }
2604
2605 __extension__ extern __inline uint16x4_t
2606 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2607 vrsubhn_u32 (uint32x4_t __a, uint32x4_t __b)
2608 {
2609 return (uint16x4_t) __builtin_aarch64_rsubhnv4si ((int32x4_t) __a,
2610 (int32x4_t) __b);
2611 }
2612
2613 __extension__ extern __inline uint32x2_t
2614 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2615 vrsubhn_u64 (uint64x2_t __a, uint64x2_t __b)
2616 {
2617 return (uint32x2_t) __builtin_aarch64_rsubhnv2di ((int64x2_t) __a,
2618 (int64x2_t) __b);
2619 }
2620
2621 __extension__ extern __inline int8x16_t
2622 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2623 vrsubhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c)
2624 {
2625 return (int8x16_t) __builtin_aarch64_rsubhn2v8hi (__a, __b, __c);
2626 }
2627
2628 __extension__ extern __inline int16x8_t
2629 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2630 vrsubhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c)
2631 {
2632 return (int16x8_t) __builtin_aarch64_rsubhn2v4si (__a, __b, __c);
2633 }
2634
2635 __extension__ extern __inline int32x4_t
2636 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2637 vrsubhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c)
2638 {
2639 return (int32x4_t) __builtin_aarch64_rsubhn2v2di (__a, __b, __c);
2640 }
2641
2642 __extension__ extern __inline uint8x16_t
2643 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2644 vrsubhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
2645 {
2646 return (uint8x16_t) __builtin_aarch64_rsubhn2v8hi ((int8x8_t) __a,
2647 (int16x8_t) __b,
2648 (int16x8_t) __c);
2649 }
2650
2651 __extension__ extern __inline uint16x8_t
2652 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2653 vrsubhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
2654 {
2655 return (uint16x8_t) __builtin_aarch64_rsubhn2v4si ((int16x4_t) __a,
2656 (int32x4_t) __b,
2657 (int32x4_t) __c);
2658 }
2659
2660 __extension__ extern __inline uint32x4_t
2661 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2662 vrsubhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
2663 {
2664 return (uint32x4_t) __builtin_aarch64_rsubhn2v2di ((int32x2_t) __a,
2665 (int64x2_t) __b,
2666 (int64x2_t) __c);
2667 }
2668
2669 __extension__ extern __inline int8x16_t
2670 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2671 vsubhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c)
2672 {
2673 return (int8x16_t) __builtin_aarch64_subhn2v8hi (__a, __b, __c);
2674 }
2675
2676 __extension__ extern __inline int16x8_t
2677 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2678 vsubhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c)
2679 {
2680 return (int16x8_t) __builtin_aarch64_subhn2v4si (__a, __b, __c);;
2681 }
2682
2683 __extension__ extern __inline int32x4_t
2684 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2685 vsubhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c)
2686 {
2687 return (int32x4_t) __builtin_aarch64_subhn2v2di (__a, __b, __c);
2688 }
2689
2690 __extension__ extern __inline uint8x16_t
2691 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2692 vsubhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c)
2693 {
2694 return (uint8x16_t) __builtin_aarch64_subhn2v8hi ((int8x8_t) __a,
2695 (int16x8_t) __b,
2696 (int16x8_t) __c);
2697 }
2698
2699 __extension__ extern __inline uint16x8_t
2700 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2701 vsubhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c)
2702 {
2703 return (uint16x8_t) __builtin_aarch64_subhn2v4si ((int16x4_t) __a,
2704 (int32x4_t) __b,
2705 (int32x4_t) __c);
2706 }
2707
2708 __extension__ extern __inline uint32x4_t
2709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2710 vsubhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c)
2711 {
2712 return (uint32x4_t) __builtin_aarch64_subhn2v2di ((int32x2_t) __a,
2713 (int64x2_t) __b,
2714 (int64x2_t) __c);
2715 }
2716
2717 __extension__ extern __inline uint16x4_t
2718 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2719 vqadd_u16 (uint16x4_t __a, uint16x4_t __b)
2720 {
2721 return __builtin_aarch64_uqaddv4hi_uuu (__a, __b);
2722 }
2723
2724 __extension__ extern __inline uint32x2_t
2725 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2726 vqadd_u32 (uint32x2_t __a, uint32x2_t __b)
2727 {
2728 return __builtin_aarch64_uqaddv2si_uuu (__a, __b);
2729 }
2730
2731 __extension__ extern __inline uint64x1_t
2732 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2733 vqadd_u64 (uint64x1_t __a, uint64x1_t __b)
2734 {
2735 return (uint64x1_t) {__builtin_aarch64_uqadddi_uuu (__a[0], __b[0])};
2736 }
2737
2738 __extension__ extern __inline int8x16_t
2739 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2740 vqaddq_s8 (int8x16_t __a, int8x16_t __b)
2741 {
2742 return (int8x16_t) __builtin_aarch64_sqaddv16qi (__a, __b);
2743 }
2744
2745 __extension__ extern __inline int16x8_t
2746 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2747 vqaddq_s16 (int16x8_t __a, int16x8_t __b)
2748 {
2749 return (int16x8_t) __builtin_aarch64_sqaddv8hi (__a, __b);
2750 }
2751
2752 __extension__ extern __inline int32x4_t
2753 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2754 vqaddq_s32 (int32x4_t __a, int32x4_t __b)
2755 {
2756 return (int32x4_t) __builtin_aarch64_sqaddv4si (__a, __b);
2757 }
2758
2759 __extension__ extern __inline int64x2_t
2760 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2761 vqaddq_s64 (int64x2_t __a, int64x2_t __b)
2762 {
2763 return (int64x2_t) __builtin_aarch64_sqaddv2di (__a, __b);
2764 }
2765
2766 __extension__ extern __inline uint8x16_t
2767 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2768 vqaddq_u8 (uint8x16_t __a, uint8x16_t __b)
2769 {
2770 return __builtin_aarch64_uqaddv16qi_uuu (__a, __b);
2771 }
2772
2773 __extension__ extern __inline uint16x8_t
2774 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2775 vqaddq_u16 (uint16x8_t __a, uint16x8_t __b)
2776 {
2777 return __builtin_aarch64_uqaddv8hi_uuu (__a, __b);
2778 }
2779
2780 __extension__ extern __inline uint32x4_t
2781 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2782 vqaddq_u32 (uint32x4_t __a, uint32x4_t __b)
2783 {
2784 return __builtin_aarch64_uqaddv4si_uuu (__a, __b);
2785 }
2786
2787 __extension__ extern __inline uint64x2_t
2788 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2789 vqaddq_u64 (uint64x2_t __a, uint64x2_t __b)
2790 {
2791 return __builtin_aarch64_uqaddv2di_uuu (__a, __b);
2792 }
2793
2794 __extension__ extern __inline int8x8_t
2795 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2796 vqsub_s8 (int8x8_t __a, int8x8_t __b)
2797 {
2798 return (int8x8_t) __builtin_aarch64_sqsubv8qi (__a, __b);
2799 }
2800
2801 __extension__ extern __inline int16x4_t
2802 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2803 vqsub_s16 (int16x4_t __a, int16x4_t __b)
2804 {
2805 return (int16x4_t) __builtin_aarch64_sqsubv4hi (__a, __b);
2806 }
2807
2808 __extension__ extern __inline int32x2_t
2809 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2810 vqsub_s32 (int32x2_t __a, int32x2_t __b)
2811 {
2812 return (int32x2_t) __builtin_aarch64_sqsubv2si (__a, __b);
2813 }
2814
2815 __extension__ extern __inline int64x1_t
2816 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2817 vqsub_s64 (int64x1_t __a, int64x1_t __b)
2818 {
2819 return (int64x1_t) {__builtin_aarch64_sqsubdi (__a[0], __b[0])};
2820 }
2821
2822 __extension__ extern __inline uint8x8_t
2823 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2824 vqsub_u8 (uint8x8_t __a, uint8x8_t __b)
2825 {
2826 return __builtin_aarch64_uqsubv8qi_uuu (__a, __b);
2827 }
2828
2829 __extension__ extern __inline uint16x4_t
2830 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2831 vqsub_u16 (uint16x4_t __a, uint16x4_t __b)
2832 {
2833 return __builtin_aarch64_uqsubv4hi_uuu (__a, __b);
2834 }
2835
2836 __extension__ extern __inline uint32x2_t
2837 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2838 vqsub_u32 (uint32x2_t __a, uint32x2_t __b)
2839 {
2840 return __builtin_aarch64_uqsubv2si_uuu (__a, __b);
2841 }
2842
2843 __extension__ extern __inline uint64x1_t
2844 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2845 vqsub_u64 (uint64x1_t __a, uint64x1_t __b)
2846 {
2847 return (uint64x1_t) {__builtin_aarch64_uqsubdi_uuu (__a[0], __b[0])};
2848 }
2849
2850 __extension__ extern __inline int8x16_t
2851 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2852 vqsubq_s8 (int8x16_t __a, int8x16_t __b)
2853 {
2854 return (int8x16_t) __builtin_aarch64_sqsubv16qi (__a, __b);
2855 }
2856
2857 __extension__ extern __inline int16x8_t
2858 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2859 vqsubq_s16 (int16x8_t __a, int16x8_t __b)
2860 {
2861 return (int16x8_t) __builtin_aarch64_sqsubv8hi (__a, __b);
2862 }
2863
2864 __extension__ extern __inline int32x4_t
2865 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2866 vqsubq_s32 (int32x4_t __a, int32x4_t __b)
2867 {
2868 return (int32x4_t) __builtin_aarch64_sqsubv4si (__a, __b);
2869 }
2870
2871 __extension__ extern __inline int64x2_t
2872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2873 vqsubq_s64 (int64x2_t __a, int64x2_t __b)
2874 {
2875 return (int64x2_t) __builtin_aarch64_sqsubv2di (__a, __b);
2876 }
2877
2878 __extension__ extern __inline uint8x16_t
2879 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2880 vqsubq_u8 (uint8x16_t __a, uint8x16_t __b)
2881 {
2882 return __builtin_aarch64_uqsubv16qi_uuu (__a, __b);
2883 }
2884
2885 __extension__ extern __inline uint16x8_t
2886 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2887 vqsubq_u16 (uint16x8_t __a, uint16x8_t __b)
2888 {
2889 return __builtin_aarch64_uqsubv8hi_uuu (__a, __b);
2890 }
2891
2892 __extension__ extern __inline uint32x4_t
2893 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2894 vqsubq_u32 (uint32x4_t __a, uint32x4_t __b)
2895 {
2896 return __builtin_aarch64_uqsubv4si_uuu (__a, __b);
2897 }
2898
2899 __extension__ extern __inline uint64x2_t
2900 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2901 vqsubq_u64 (uint64x2_t __a, uint64x2_t __b)
2902 {
2903 return __builtin_aarch64_uqsubv2di_uuu (__a, __b);
2904 }
2905
2906 __extension__ extern __inline int8x8_t
2907 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2908 vqneg_s8 (int8x8_t __a)
2909 {
2910 return (int8x8_t) __builtin_aarch64_sqnegv8qi (__a);
2911 }
2912
2913 __extension__ extern __inline int16x4_t
2914 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2915 vqneg_s16 (int16x4_t __a)
2916 {
2917 return (int16x4_t) __builtin_aarch64_sqnegv4hi (__a);
2918 }
2919
2920 __extension__ extern __inline int32x2_t
2921 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2922 vqneg_s32 (int32x2_t __a)
2923 {
2924 return (int32x2_t) __builtin_aarch64_sqnegv2si (__a);
2925 }
2926
2927 __extension__ extern __inline int64x1_t
2928 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2929 vqneg_s64 (int64x1_t __a)
2930 {
2931 return (int64x1_t) {__builtin_aarch64_sqnegdi (__a[0])};
2932 }
2933
2934 __extension__ extern __inline int8x16_t
2935 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2936 vqnegq_s8 (int8x16_t __a)
2937 {
2938 return (int8x16_t) __builtin_aarch64_sqnegv16qi (__a);
2939 }
2940
2941 __extension__ extern __inline int16x8_t
2942 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2943 vqnegq_s16 (int16x8_t __a)
2944 {
2945 return (int16x8_t) __builtin_aarch64_sqnegv8hi (__a);
2946 }
2947
2948 __extension__ extern __inline int32x4_t
2949 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2950 vqnegq_s32 (int32x4_t __a)
2951 {
2952 return (int32x4_t) __builtin_aarch64_sqnegv4si (__a);
2953 }
2954
2955 __extension__ extern __inline int8x8_t
2956 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2957 vqabs_s8 (int8x8_t __a)
2958 {
2959 return (int8x8_t) __builtin_aarch64_sqabsv8qi (__a);
2960 }
2961
2962 __extension__ extern __inline int16x4_t
2963 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2964 vqabs_s16 (int16x4_t __a)
2965 {
2966 return (int16x4_t) __builtin_aarch64_sqabsv4hi (__a);
2967 }
2968
2969 __extension__ extern __inline int32x2_t
2970 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2971 vqabs_s32 (int32x2_t __a)
2972 {
2973 return (int32x2_t) __builtin_aarch64_sqabsv2si (__a);
2974 }
2975
2976 __extension__ extern __inline int64x1_t
2977 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2978 vqabs_s64 (int64x1_t __a)
2979 {
2980 return (int64x1_t) {__builtin_aarch64_sqabsdi (__a[0])};
2981 }
2982
2983 __extension__ extern __inline int8x16_t
2984 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2985 vqabsq_s8 (int8x16_t __a)
2986 {
2987 return (int8x16_t) __builtin_aarch64_sqabsv16qi (__a);
2988 }
2989
2990 __extension__ extern __inline int16x8_t
2991 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2992 vqabsq_s16 (int16x8_t __a)
2993 {
2994 return (int16x8_t) __builtin_aarch64_sqabsv8hi (__a);
2995 }
2996
2997 __extension__ extern __inline int32x4_t
2998 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2999 vqabsq_s32 (int32x4_t __a)
3000 {
3001 return (int32x4_t) __builtin_aarch64_sqabsv4si (__a);
3002 }
3003
3004 __extension__ extern __inline int16x4_t
3005 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3006 vqdmulh_s16 (int16x4_t __a, int16x4_t __b)
3007 {
3008 return (int16x4_t) __builtin_aarch64_sqdmulhv4hi (__a, __b);
3009 }
3010
3011 __extension__ extern __inline int32x2_t
3012 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3013 vqdmulh_s32 (int32x2_t __a, int32x2_t __b)
3014 {
3015 return (int32x2_t) __builtin_aarch64_sqdmulhv2si (__a, __b);
3016 }
3017
3018 __extension__ extern __inline int16x8_t
3019 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3020 vqdmulhq_s16 (int16x8_t __a, int16x8_t __b)
3021 {
3022 return (int16x8_t) __builtin_aarch64_sqdmulhv8hi (__a, __b);
3023 }
3024
3025 __extension__ extern __inline int32x4_t
3026 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3027 vqdmulhq_s32 (int32x4_t __a, int32x4_t __b)
3028 {
3029 return (int32x4_t) __builtin_aarch64_sqdmulhv4si (__a, __b);
3030 }
3031
3032 __extension__ extern __inline int16x4_t
3033 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3034 vqrdmulh_s16 (int16x4_t __a, int16x4_t __b)
3035 {
3036 return (int16x4_t) __builtin_aarch64_sqrdmulhv4hi (__a, __b);
3037 }
3038
3039 __extension__ extern __inline int32x2_t
3040 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3041 vqrdmulh_s32 (int32x2_t __a, int32x2_t __b)
3042 {
3043 return (int32x2_t) __builtin_aarch64_sqrdmulhv2si (__a, __b);
3044 }
3045
3046 __extension__ extern __inline int16x8_t
3047 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3048 vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b)
3049 {
3050 return (int16x8_t) __builtin_aarch64_sqrdmulhv8hi (__a, __b);
3051 }
3052
3053 __extension__ extern __inline int32x4_t
3054 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3055 vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b)
3056 {
3057 return (int32x4_t) __builtin_aarch64_sqrdmulhv4si (__a, __b);
3058 }
3059
3060 __extension__ extern __inline int8x8_t
3061 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3062 vcreate_s8 (uint64_t __a)
3063 {
3064 return (int8x8_t) __a;
3065 }
3066
3067 __extension__ extern __inline int16x4_t
3068 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3069 vcreate_s16 (uint64_t __a)
3070 {
3071 return (int16x4_t) __a;
3072 }
3073
3074 __extension__ extern __inline int32x2_t
3075 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3076 vcreate_s32 (uint64_t __a)
3077 {
3078 return (int32x2_t) __a;
3079 }
3080
3081 __extension__ extern __inline int64x1_t
3082 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3083 vcreate_s64 (uint64_t __a)
3084 {
3085 return (int64x1_t) {__a};
3086 }
3087
3088 __extension__ extern __inline float16x4_t
3089 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3090 vcreate_f16 (uint64_t __a)
3091 {
3092 return (float16x4_t) __a;
3093 }
3094
3095 __extension__ extern __inline float32x2_t
3096 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3097 vcreate_f32 (uint64_t __a)
3098 {
3099 return (float32x2_t) __a;
3100 }
3101
3102 __extension__ extern __inline uint8x8_t
3103 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3104 vcreate_u8 (uint64_t __a)
3105 {
3106 return (uint8x8_t) __a;
3107 }
3108
3109 __extension__ extern __inline uint16x4_t
3110 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3111 vcreate_u16 (uint64_t __a)
3112 {
3113 return (uint16x4_t) __a;
3114 }
3115
3116 __extension__ extern __inline uint32x2_t
3117 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3118 vcreate_u32 (uint64_t __a)
3119 {
3120 return (uint32x2_t) __a;
3121 }
3122
3123 __extension__ extern __inline uint64x1_t
3124 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3125 vcreate_u64 (uint64_t __a)
3126 {
3127 return (uint64x1_t) {__a};
3128 }
3129
3130 __extension__ extern __inline float64x1_t
3131 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3132 vcreate_f64 (uint64_t __a)
3133 {
3134 return (float64x1_t) __a;
3135 }
3136
3137 __extension__ extern __inline poly8x8_t
3138 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3139 vcreate_p8 (uint64_t __a)
3140 {
3141 return (poly8x8_t) __a;
3142 }
3143
3144 __extension__ extern __inline poly16x4_t
3145 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3146 vcreate_p16 (uint64_t __a)
3147 {
3148 return (poly16x4_t) __a;
3149 }
3150
3151 __extension__ extern __inline poly64x1_t
3152 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3153 vcreate_p64 (uint64_t __a)
3154 {
3155 return (poly64x1_t) __a;
3156 }
3157
3158 /* vget_lane */
3159
3160 __extension__ extern __inline float16_t
3161 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3162 vget_lane_f16 (float16x4_t __a, const int __b)
3163 {
3164 return __aarch64_vget_lane_any (__a, __b);
3165 }
3166
3167 __extension__ extern __inline float32_t
3168 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3169 vget_lane_f32 (float32x2_t __a, const int __b)
3170 {
3171 return __aarch64_vget_lane_any (__a, __b);
3172 }
3173
3174 __extension__ extern __inline float64_t
3175 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3176 vget_lane_f64 (float64x1_t __a, const int __b)
3177 {
3178 return __aarch64_vget_lane_any (__a, __b);
3179 }
3180
3181 __extension__ extern __inline poly8_t
3182 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3183 vget_lane_p8 (poly8x8_t __a, const int __b)
3184 {
3185 return __aarch64_vget_lane_any (__a, __b);
3186 }
3187
3188 __extension__ extern __inline poly16_t
3189 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3190 vget_lane_p16 (poly16x4_t __a, const int __b)
3191 {
3192 return __aarch64_vget_lane_any (__a, __b);
3193 }
3194
3195 __extension__ extern __inline poly64_t
3196 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3197 vget_lane_p64 (poly64x1_t __a, const int __b)
3198 {
3199 return __aarch64_vget_lane_any (__a, __b);
3200 }
3201
3202 __extension__ extern __inline int8_t
3203 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3204 vget_lane_s8 (int8x8_t __a, const int __b)
3205 {
3206 return __aarch64_vget_lane_any (__a, __b);
3207 }
3208
3209 __extension__ extern __inline int16_t
3210 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3211 vget_lane_s16 (int16x4_t __a, const int __b)
3212 {
3213 return __aarch64_vget_lane_any (__a, __b);
3214 }
3215
3216 __extension__ extern __inline int32_t
3217 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3218 vget_lane_s32 (int32x2_t __a, const int __b)
3219 {
3220 return __aarch64_vget_lane_any (__a, __b);
3221 }
3222
3223 __extension__ extern __inline int64_t
3224 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3225 vget_lane_s64 (int64x1_t __a, const int __b)
3226 {
3227 return __aarch64_vget_lane_any (__a, __b);
3228 }
3229
3230 __extension__ extern __inline uint8_t
3231 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3232 vget_lane_u8 (uint8x8_t __a, const int __b)
3233 {
3234 return __aarch64_vget_lane_any (__a, __b);
3235 }
3236
3237 __extension__ extern __inline uint16_t
3238 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3239 vget_lane_u16 (uint16x4_t __a, const int __b)
3240 {
3241 return __aarch64_vget_lane_any (__a, __b);
3242 }
3243
3244 __extension__ extern __inline uint32_t
3245 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3246 vget_lane_u32 (uint32x2_t __a, const int __b)
3247 {
3248 return __aarch64_vget_lane_any (__a, __b);
3249 }
3250
3251 __extension__ extern __inline uint64_t
3252 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3253 vget_lane_u64 (uint64x1_t __a, const int __b)
3254 {
3255 return __aarch64_vget_lane_any (__a, __b);
3256 }
3257
3258 /* vgetq_lane */
3259
3260 __extension__ extern __inline float16_t
3261 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3262 vgetq_lane_f16 (float16x8_t __a, const int __b)
3263 {
3264 return __aarch64_vget_lane_any (__a, __b);
3265 }
3266
3267 __extension__ extern __inline float32_t
3268 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3269 vgetq_lane_f32 (float32x4_t __a, const int __b)
3270 {
3271 return __aarch64_vget_lane_any (__a, __b);
3272 }
3273
3274 __extension__ extern __inline float64_t
3275 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3276 vgetq_lane_f64 (float64x2_t __a, const int __b)
3277 {
3278 return __aarch64_vget_lane_any (__a, __b);
3279 }
3280
3281 __extension__ extern __inline poly8_t
3282 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3283 vgetq_lane_p8 (poly8x16_t __a, const int __b)
3284 {
3285 return __aarch64_vget_lane_any (__a, __b);
3286 }
3287
3288 __extension__ extern __inline poly16_t
3289 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3290 vgetq_lane_p16 (poly16x8_t __a, const int __b)
3291 {
3292 return __aarch64_vget_lane_any (__a, __b);
3293 }
3294
3295 __extension__ extern __inline poly64_t
3296 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3297 vgetq_lane_p64 (poly64x2_t __a, const int __b)
3298 {
3299 return __aarch64_vget_lane_any (__a, __b);
3300 }
3301
3302 __extension__ extern __inline int8_t
3303 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3304 vgetq_lane_s8 (int8x16_t __a, const int __b)
3305 {
3306 return __aarch64_vget_lane_any (__a, __b);
3307 }
3308
3309 __extension__ extern __inline int16_t
3310 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3311 vgetq_lane_s16 (int16x8_t __a, const int __b)
3312 {
3313 return __aarch64_vget_lane_any (__a, __b);
3314 }
3315
3316 __extension__ extern __inline int32_t
3317 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3318 vgetq_lane_s32 (int32x4_t __a, const int __b)
3319 {
3320 return __aarch64_vget_lane_any (__a, __b);
3321 }
3322
3323 __extension__ extern __inline int64_t
3324 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3325 vgetq_lane_s64 (int64x2_t __a, const int __b)
3326 {
3327 return __aarch64_vget_lane_any (__a, __b);
3328 }
3329
3330 __extension__ extern __inline uint8_t
3331 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3332 vgetq_lane_u8 (uint8x16_t __a, const int __b)
3333 {
3334 return __aarch64_vget_lane_any (__a, __b);
3335 }
3336
3337 __extension__ extern __inline uint16_t
3338 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3339 vgetq_lane_u16 (uint16x8_t __a, const int __b)
3340 {
3341 return __aarch64_vget_lane_any (__a, __b);
3342 }
3343
3344 __extension__ extern __inline uint32_t
3345 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3346 vgetq_lane_u32 (uint32x4_t __a, const int __b)
3347 {
3348 return __aarch64_vget_lane_any (__a, __b);
3349 }
3350
3351 __extension__ extern __inline uint64_t
3352 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3353 vgetq_lane_u64 (uint64x2_t __a, const int __b)
3354 {
3355 return __aarch64_vget_lane_any (__a, __b);
3356 }
3357
3358 /* vreinterpret */
3359
3360 __extension__ extern __inline poly8x8_t
3361 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3362 vreinterpret_p8_f16 (float16x4_t __a)
3363 {
3364 return (poly8x8_t) __a;
3365 }
3366
3367 __extension__ extern __inline poly8x8_t
3368 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3369 vreinterpret_p8_f64 (float64x1_t __a)
3370 {
3371 return (poly8x8_t) __a;
3372 }
3373
3374 __extension__ extern __inline poly8x8_t
3375 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3376 vreinterpret_p8_s8 (int8x8_t __a)
3377 {
3378 return (poly8x8_t) __a;
3379 }
3380
3381 __extension__ extern __inline poly8x8_t
3382 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3383 vreinterpret_p8_s16 (int16x4_t __a)
3384 {
3385 return (poly8x8_t) __a;
3386 }
3387
3388 __extension__ extern __inline poly8x8_t
3389 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3390 vreinterpret_p8_s32 (int32x2_t __a)
3391 {
3392 return (poly8x8_t) __a;
3393 }
3394
3395 __extension__ extern __inline poly8x8_t
3396 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3397 vreinterpret_p8_s64 (int64x1_t __a)
3398 {
3399 return (poly8x8_t) __a;
3400 }
3401
3402 __extension__ extern __inline poly8x8_t
3403 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3404 vreinterpret_p8_f32 (float32x2_t __a)
3405 {
3406 return (poly8x8_t) __a;
3407 }
3408
3409 __extension__ extern __inline poly8x8_t
3410 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3411 vreinterpret_p8_u8 (uint8x8_t __a)
3412 {
3413 return (poly8x8_t) __a;
3414 }
3415
3416 __extension__ extern __inline poly8x8_t
3417 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3418 vreinterpret_p8_u16 (uint16x4_t __a)
3419 {
3420 return (poly8x8_t) __a;
3421 }
3422
3423 __extension__ extern __inline poly8x8_t
3424 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3425 vreinterpret_p8_u32 (uint32x2_t __a)
3426 {
3427 return (poly8x8_t) __a;
3428 }
3429
3430 __extension__ extern __inline poly8x8_t
3431 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3432 vreinterpret_p8_u64 (uint64x1_t __a)
3433 {
3434 return (poly8x8_t) __a;
3435 }
3436
3437 __extension__ extern __inline poly8x8_t
3438 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3439 vreinterpret_p8_p16 (poly16x4_t __a)
3440 {
3441 return (poly8x8_t) __a;
3442 }
3443
3444 __extension__ extern __inline poly8x8_t
3445 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3446 vreinterpret_p8_p64 (poly64x1_t __a)
3447 {
3448 return (poly8x8_t) __a;
3449 }
3450
3451 __extension__ extern __inline poly8x16_t
3452 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3453 vreinterpretq_p8_f64 (float64x2_t __a)
3454 {
3455 return (poly8x16_t) __a;
3456 }
3457
3458 __extension__ extern __inline poly8x16_t
3459 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3460 vreinterpretq_p8_s8 (int8x16_t __a)
3461 {
3462 return (poly8x16_t) __a;
3463 }
3464
3465 __extension__ extern __inline poly8x16_t
3466 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3467 vreinterpretq_p8_s16 (int16x8_t __a)
3468 {
3469 return (poly8x16_t) __a;
3470 }
3471
3472 __extension__ extern __inline poly8x16_t
3473 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3474 vreinterpretq_p8_s32 (int32x4_t __a)
3475 {
3476 return (poly8x16_t) __a;
3477 }
3478
3479 __extension__ extern __inline poly8x16_t
3480 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3481 vreinterpretq_p8_s64 (int64x2_t __a)
3482 {
3483 return (poly8x16_t) __a;
3484 }
3485
3486 __extension__ extern __inline poly8x16_t
3487 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3488 vreinterpretq_p8_f16 (float16x8_t __a)
3489 {
3490 return (poly8x16_t) __a;
3491 }
3492
3493 __extension__ extern __inline poly8x16_t
3494 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3495 vreinterpretq_p8_f32 (float32x4_t __a)
3496 {
3497 return (poly8x16_t) __a;
3498 }
3499
3500 __extension__ extern __inline poly8x16_t
3501 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3502 vreinterpretq_p8_u8 (uint8x16_t __a)
3503 {
3504 return (poly8x16_t) __a;
3505 }
3506
3507 __extension__ extern __inline poly8x16_t
3508 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3509 vreinterpretq_p8_u16 (uint16x8_t __a)
3510 {
3511 return (poly8x16_t) __a;
3512 }
3513
3514 __extension__ extern __inline poly8x16_t
3515 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3516 vreinterpretq_p8_u32 (uint32x4_t __a)
3517 {
3518 return (poly8x16_t) __a;
3519 }
3520
3521 __extension__ extern __inline poly8x16_t
3522 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3523 vreinterpretq_p8_u64 (uint64x2_t __a)
3524 {
3525 return (poly8x16_t) __a;
3526 }
3527
3528 __extension__ extern __inline poly8x16_t
3529 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3530 vreinterpretq_p8_p16 (poly16x8_t __a)
3531 {
3532 return (poly8x16_t) __a;
3533 }
3534
3535 __extension__ extern __inline poly8x16_t
3536 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3537 vreinterpretq_p8_p64 (poly64x2_t __a)
3538 {
3539 return (poly8x16_t) __a;
3540 }
3541
3542 __extension__ extern __inline poly8x16_t
3543 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3544 vreinterpretq_p8_p128 (poly128_t __a)
3545 {
3546 return (poly8x16_t)__a;
3547 }
3548
3549 __extension__ extern __inline poly16x4_t
3550 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3551 vreinterpret_p16_f16 (float16x4_t __a)
3552 {
3553 return (poly16x4_t) __a;
3554 }
3555
3556 __extension__ extern __inline poly16x4_t
3557 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3558 vreinterpret_p16_f64 (float64x1_t __a)
3559 {
3560 return (poly16x4_t) __a;
3561 }
3562
3563 __extension__ extern __inline poly16x4_t
3564 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3565 vreinterpret_p16_s8 (int8x8_t __a)
3566 {
3567 return (poly16x4_t) __a;
3568 }
3569
3570 __extension__ extern __inline poly16x4_t
3571 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3572 vreinterpret_p16_s16 (int16x4_t __a)
3573 {
3574 return (poly16x4_t) __a;
3575 }
3576
3577 __extension__ extern __inline poly16x4_t
3578 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3579 vreinterpret_p16_s32 (int32x2_t __a)
3580 {
3581 return (poly16x4_t) __a;
3582 }
3583
3584 __extension__ extern __inline poly16x4_t
3585 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3586 vreinterpret_p16_s64 (int64x1_t __a)
3587 {
3588 return (poly16x4_t) __a;
3589 }
3590
3591 __extension__ extern __inline poly16x4_t
3592 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3593 vreinterpret_p16_f32 (float32x2_t __a)
3594 {
3595 return (poly16x4_t) __a;
3596 }
3597
3598 __extension__ extern __inline poly16x4_t
3599 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3600 vreinterpret_p16_u8 (uint8x8_t __a)
3601 {
3602 return (poly16x4_t) __a;
3603 }
3604
3605 __extension__ extern __inline poly16x4_t
3606 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3607 vreinterpret_p16_u16 (uint16x4_t __a)
3608 {
3609 return (poly16x4_t) __a;
3610 }
3611
3612 __extension__ extern __inline poly16x4_t
3613 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3614 vreinterpret_p16_u32 (uint32x2_t __a)
3615 {
3616 return (poly16x4_t) __a;
3617 }
3618
3619 __extension__ extern __inline poly16x4_t
3620 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3621 vreinterpret_p16_u64 (uint64x1_t __a)
3622 {
3623 return (poly16x4_t) __a;
3624 }
3625
3626 __extension__ extern __inline poly16x4_t
3627 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3628 vreinterpret_p16_p8 (poly8x8_t __a)
3629 {
3630 return (poly16x4_t) __a;
3631 }
3632
3633 __extension__ extern __inline poly16x4_t
3634 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3635 vreinterpret_p16_p64 (poly64x1_t __a)
3636 {
3637 return (poly16x4_t) __a;
3638 }
3639
3640 __extension__ extern __inline poly16x8_t
3641 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3642 vreinterpretq_p16_f64 (float64x2_t __a)
3643 {
3644 return (poly16x8_t) __a;
3645 }
3646
3647 __extension__ extern __inline poly16x8_t
3648 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3649 vreinterpretq_p16_s8 (int8x16_t __a)
3650 {
3651 return (poly16x8_t) __a;
3652 }
3653
3654 __extension__ extern __inline poly16x8_t
3655 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3656 vreinterpretq_p16_s16 (int16x8_t __a)
3657 {
3658 return (poly16x8_t) __a;
3659 }
3660
3661 __extension__ extern __inline poly16x8_t
3662 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3663 vreinterpretq_p16_s32 (int32x4_t __a)
3664 {
3665 return (poly16x8_t) __a;
3666 }
3667
3668 __extension__ extern __inline poly16x8_t
3669 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3670 vreinterpretq_p16_s64 (int64x2_t __a)
3671 {
3672 return (poly16x8_t) __a;
3673 }
3674
3675 __extension__ extern __inline poly16x8_t
3676 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3677 vreinterpretq_p16_f16 (float16x8_t __a)
3678 {
3679 return (poly16x8_t) __a;
3680 }
3681
3682 __extension__ extern __inline poly16x8_t
3683 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3684 vreinterpretq_p16_f32 (float32x4_t __a)
3685 {
3686 return (poly16x8_t) __a;
3687 }
3688
3689 __extension__ extern __inline poly16x8_t
3690 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3691 vreinterpretq_p16_u8 (uint8x16_t __a)
3692 {
3693 return (poly16x8_t) __a;
3694 }
3695
3696 __extension__ extern __inline poly16x8_t
3697 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3698 vreinterpretq_p16_u16 (uint16x8_t __a)
3699 {
3700 return (poly16x8_t) __a;
3701 }
3702
3703 __extension__ extern __inline poly16x8_t
3704 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3705 vreinterpretq_p16_u32 (uint32x4_t __a)
3706 {
3707 return (poly16x8_t) __a;
3708 }
3709
3710 __extension__ extern __inline poly16x8_t
3711 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3712 vreinterpretq_p16_u64 (uint64x2_t __a)
3713 {
3714 return (poly16x8_t) __a;
3715 }
3716
3717 __extension__ extern __inline poly16x8_t
3718 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3719 vreinterpretq_p16_p8 (poly8x16_t __a)
3720 {
3721 return (poly16x8_t) __a;
3722 }
3723
3724 __extension__ extern __inline poly16x8_t
3725 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3726 vreinterpretq_p16_p64 (poly64x2_t __a)
3727 {
3728 return (poly16x8_t) __a;
3729 }
3730
3731 __extension__ extern __inline poly16x8_t
3732 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3733 vreinterpretq_p16_p128 (poly128_t __a)
3734 {
3735 return (poly16x8_t)__a;
3736 }
3737
3738 __extension__ extern __inline poly64x1_t
3739 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3740 vreinterpret_p64_f16 (float16x4_t __a)
3741 {
3742 return (poly64x1_t) __a;
3743 }
3744
3745 __extension__ extern __inline poly64x1_t
3746 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3747 vreinterpret_p64_f64 (float64x1_t __a)
3748 {
3749 return (poly64x1_t) __a;
3750 }
3751
3752 __extension__ extern __inline poly64x1_t
3753 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3754 vreinterpret_p64_s8 (int8x8_t __a)
3755 {
3756 return (poly64x1_t) __a;
3757 }
3758
3759 __extension__ extern __inline poly64x1_t
3760 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3761 vreinterpret_p64_s16 (int16x4_t __a)
3762 {
3763 return (poly64x1_t) __a;
3764 }
3765
3766 __extension__ extern __inline poly64x1_t
3767 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3768 vreinterpret_p64_s32 (int32x2_t __a)
3769 {
3770 return (poly64x1_t) __a;
3771 }
3772
3773 __extension__ extern __inline poly64x1_t
3774 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3775 vreinterpret_p64_s64 (int64x1_t __a)
3776 {
3777 return (poly64x1_t) __a;
3778 }
3779
3780 __extension__ extern __inline poly64x1_t
3781 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3782 vreinterpret_p64_f32 (float32x2_t __a)
3783 {
3784 return (poly64x1_t) __a;
3785 }
3786
3787 __extension__ extern __inline poly64x1_t
3788 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3789 vreinterpret_p64_u8 (uint8x8_t __a)
3790 {
3791 return (poly64x1_t) __a;
3792 }
3793
3794 __extension__ extern __inline poly64x1_t
3795 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3796 vreinterpret_p64_u16 (uint16x4_t __a)
3797 {
3798 return (poly64x1_t) __a;
3799 }
3800
3801 __extension__ extern __inline poly64x1_t
3802 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3803 vreinterpret_p64_u32 (uint32x2_t __a)
3804 {
3805 return (poly64x1_t) __a;
3806 }
3807
3808 __extension__ extern __inline poly64x1_t
3809 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3810 vreinterpret_p64_u64 (uint64x1_t __a)
3811 {
3812 return (poly64x1_t) __a;
3813 }
3814
3815 __extension__ extern __inline poly64x1_t
3816 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3817 vreinterpret_p64_p8 (poly8x8_t __a)
3818 {
3819 return (poly64x1_t) __a;
3820 }
3821
3822 __extension__ extern __inline poly64x1_t
3823 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3824 vreinterpret_p64_p16 (poly16x4_t __a)
3825 {
3826 return (poly64x1_t)__a;
3827 }
3828
3829 __extension__ extern __inline poly64x2_t
3830 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3831 vreinterpretq_p64_f64 (float64x2_t __a)
3832 {
3833 return (poly64x2_t) __a;
3834 }
3835
3836 __extension__ extern __inline poly64x2_t
3837 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3838 vreinterpretq_p64_s8 (int8x16_t __a)
3839 {
3840 return (poly64x2_t) __a;
3841 }
3842
3843 __extension__ extern __inline poly64x2_t
3844 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3845 vreinterpretq_p64_s16 (int16x8_t __a)
3846 {
3847 return (poly64x2_t) __a;
3848 }
3849
3850 __extension__ extern __inline poly64x2_t
3851 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3852 vreinterpretq_p64_s32 (int32x4_t __a)
3853 {
3854 return (poly64x2_t) __a;
3855 }
3856
3857 __extension__ extern __inline poly64x2_t
3858 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3859 vreinterpretq_p64_s64 (int64x2_t __a)
3860 {
3861 return (poly64x2_t) __a;
3862 }
3863
3864 __extension__ extern __inline poly64x2_t
3865 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3866 vreinterpretq_p64_f16 (float16x8_t __a)
3867 {
3868 return (poly64x2_t) __a;
3869 }
3870
3871 __extension__ extern __inline poly64x2_t
3872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3873 vreinterpretq_p64_f32 (float32x4_t __a)
3874 {
3875 return (poly64x2_t) __a;
3876 }
3877
3878 __extension__ extern __inline poly64x2_t
3879 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3880 vreinterpretq_p64_p128 (poly128_t __a)
3881 {
3882 return (poly64x2_t)__a;
3883 }
3884
3885 __extension__ extern __inline poly64x2_t
3886 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3887 vreinterpretq_p64_u8 (uint8x16_t __a)
3888 {
3889 return (poly64x2_t) __a;
3890 }
3891
3892 __extension__ extern __inline poly64x2_t
3893 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3894 vreinterpretq_p64_u16 (uint16x8_t __a)
3895 {
3896 return (poly64x2_t) __a;
3897 }
3898
3899 __extension__ extern __inline poly64x2_t
3900 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3901 vreinterpretq_p64_p16 (poly16x8_t __a)
3902 {
3903 return (poly64x2_t)__a;
3904 }
3905
3906 __extension__ extern __inline poly64x2_t
3907 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3908 vreinterpretq_p64_u32 (uint32x4_t __a)
3909 {
3910 return (poly64x2_t) __a;
3911 }
3912
3913 __extension__ extern __inline poly64x2_t
3914 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3915 vreinterpretq_p64_u64 (uint64x2_t __a)
3916 {
3917 return (poly64x2_t) __a;
3918 }
3919
3920 __extension__ extern __inline poly64x2_t
3921 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3922 vreinterpretq_p64_p8 (poly8x16_t __a)
3923 {
3924 return (poly64x2_t) __a;
3925 }
3926
3927 __extension__ extern __inline poly128_t
3928 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3929 vreinterpretq_p128_p8 (poly8x16_t __a)
3930 {
3931 return (poly128_t)__a;
3932 }
3933
3934 __extension__ extern __inline poly128_t
3935 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3936 vreinterpretq_p128_p16 (poly16x8_t __a)
3937 {
3938 return (poly128_t)__a;
3939 }
3940
3941 __extension__ extern __inline poly128_t
3942 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3943 vreinterpretq_p128_f16 (float16x8_t __a)
3944 {
3945 return (poly128_t) __a;
3946 }
3947
3948 __extension__ extern __inline poly128_t
3949 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3950 vreinterpretq_p128_f32 (float32x4_t __a)
3951 {
3952 return (poly128_t)__a;
3953 }
3954
3955 __extension__ extern __inline poly128_t
3956 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3957 vreinterpretq_p128_p64 (poly64x2_t __a)
3958 {
3959 return (poly128_t)__a;
3960 }
3961
3962 __extension__ extern __inline poly128_t
3963 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3964 vreinterpretq_p128_s64 (int64x2_t __a)
3965 {
3966 return (poly128_t)__a;
3967 }
3968
3969 __extension__ extern __inline poly128_t
3970 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3971 vreinterpretq_p128_u64 (uint64x2_t __a)
3972 {
3973 return (poly128_t)__a;
3974 }
3975
3976 __extension__ extern __inline poly128_t
3977 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3978 vreinterpretq_p128_s8 (int8x16_t __a)
3979 {
3980 return (poly128_t)__a;
3981 }
3982
3983 __extension__ extern __inline poly128_t
3984 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3985 vreinterpretq_p128_s16 (int16x8_t __a)
3986 {
3987 return (poly128_t)__a;
3988 }
3989
3990 __extension__ extern __inline poly128_t
3991 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3992 vreinterpretq_p128_s32 (int32x4_t __a)
3993 {
3994 return (poly128_t)__a;
3995 }
3996
3997 __extension__ extern __inline poly128_t
3998 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
3999 vreinterpretq_p128_u8 (uint8x16_t __a)
4000 {
4001 return (poly128_t)__a;
4002 }
4003
4004 __extension__ extern __inline poly128_t
4005 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4006 vreinterpretq_p128_u16 (uint16x8_t __a)
4007 {
4008 return (poly128_t)__a;
4009 }
4010
4011 __extension__ extern __inline poly128_t
4012 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4013 vreinterpretq_p128_u32 (uint32x4_t __a)
4014 {
4015 return (poly128_t)__a;
4016 }
4017
4018 __extension__ extern __inline float16x4_t
4019 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4020 vreinterpret_f16_f64 (float64x1_t __a)
4021 {
4022 return (float16x4_t) __a;
4023 }
4024
4025 __extension__ extern __inline float16x4_t
4026 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4027 vreinterpret_f16_s8 (int8x8_t __a)
4028 {
4029 return (float16x4_t) __a;
4030 }
4031
4032 __extension__ extern __inline float16x4_t
4033 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4034 vreinterpret_f16_s16 (int16x4_t __a)
4035 {
4036 return (float16x4_t) __a;
4037 }
4038
4039 __extension__ extern __inline float16x4_t
4040 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4041 vreinterpret_f16_s32 (int32x2_t __a)
4042 {
4043 return (float16x4_t) __a;
4044 }
4045
4046 __extension__ extern __inline float16x4_t
4047 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4048 vreinterpret_f16_s64 (int64x1_t __a)
4049 {
4050 return (float16x4_t) __a;
4051 }
4052
4053 __extension__ extern __inline float16x4_t
4054 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4055 vreinterpret_f16_f32 (float32x2_t __a)
4056 {
4057 return (float16x4_t) __a;
4058 }
4059
4060 __extension__ extern __inline float16x4_t
4061 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4062 vreinterpret_f16_u8 (uint8x8_t __a)
4063 {
4064 return (float16x4_t) __a;
4065 }
4066
4067 __extension__ extern __inline float16x4_t
4068 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4069 vreinterpret_f16_u16 (uint16x4_t __a)
4070 {
4071 return (float16x4_t) __a;
4072 }
4073
4074 __extension__ extern __inline float16x4_t
4075 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4076 vreinterpret_f16_u32 (uint32x2_t __a)
4077 {
4078 return (float16x4_t) __a;
4079 }
4080
4081 __extension__ extern __inline float16x4_t
4082 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4083 vreinterpret_f16_u64 (uint64x1_t __a)
4084 {
4085 return (float16x4_t) __a;
4086 }
4087
4088 __extension__ extern __inline float16x4_t
4089 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4090 vreinterpret_f16_p8 (poly8x8_t __a)
4091 {
4092 return (float16x4_t) __a;
4093 }
4094
4095 __extension__ extern __inline float16x4_t
4096 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4097 vreinterpret_f16_p16 (poly16x4_t __a)
4098 {
4099 return (float16x4_t) __a;
4100 }
4101
4102 __extension__ extern __inline float16x4_t
4103 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4104 vreinterpret_f16_p64 (poly64x1_t __a)
4105 {
4106 return (float16x4_t) __a;
4107 }
4108
4109 __extension__ extern __inline float16x8_t
4110 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4111 vreinterpretq_f16_f64 (float64x2_t __a)
4112 {
4113 return (float16x8_t) __a;
4114 }
4115
4116 __extension__ extern __inline float16x8_t
4117 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4118 vreinterpretq_f16_s8 (int8x16_t __a)
4119 {
4120 return (float16x8_t) __a;
4121 }
4122
4123 __extension__ extern __inline float16x8_t
4124 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4125 vreinterpretq_f16_s16 (int16x8_t __a)
4126 {
4127 return (float16x8_t) __a;
4128 }
4129
4130 __extension__ extern __inline float16x8_t
4131 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4132 vreinterpretq_f16_s32 (int32x4_t __a)
4133 {
4134 return (float16x8_t) __a;
4135 }
4136
4137 __extension__ extern __inline float16x8_t
4138 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4139 vreinterpretq_f16_s64 (int64x2_t __a)
4140 {
4141 return (float16x8_t) __a;
4142 }
4143
4144 __extension__ extern __inline float16x8_t
4145 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4146 vreinterpretq_f16_f32 (float32x4_t __a)
4147 {
4148 return (float16x8_t) __a;
4149 }
4150
4151 __extension__ extern __inline float16x8_t
4152 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4153 vreinterpretq_f16_u8 (uint8x16_t __a)
4154 {
4155 return (float16x8_t) __a;
4156 }
4157
4158 __extension__ extern __inline float16x8_t
4159 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4160 vreinterpretq_f16_u16 (uint16x8_t __a)
4161 {
4162 return (float16x8_t) __a;
4163 }
4164
4165 __extension__ extern __inline float16x8_t
4166 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4167 vreinterpretq_f16_u32 (uint32x4_t __a)
4168 {
4169 return (float16x8_t) __a;
4170 }
4171
4172 __extension__ extern __inline float16x8_t
4173 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4174 vreinterpretq_f16_u64 (uint64x2_t __a)
4175 {
4176 return (float16x8_t) __a;
4177 }
4178
4179 __extension__ extern __inline float16x8_t
4180 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4181 vreinterpretq_f16_p8 (poly8x16_t __a)
4182 {
4183 return (float16x8_t) __a;
4184 }
4185
4186 __extension__ extern __inline float16x8_t
4187 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4188 vreinterpretq_f16_p128 (poly128_t __a)
4189 {
4190 return (float16x8_t) __a;
4191 }
4192
4193 __extension__ extern __inline float16x8_t
4194 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4195 vreinterpretq_f16_p16 (poly16x8_t __a)
4196 {
4197 return (float16x8_t) __a;
4198 }
4199
4200 __extension__ extern __inline float16x8_t
4201 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4202 vreinterpretq_f16_p64 (poly64x2_t __a)
4203 {
4204 return (float16x8_t) __a;
4205 }
4206
4207 __extension__ extern __inline float32x2_t
4208 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4209 vreinterpret_f32_f16 (float16x4_t __a)
4210 {
4211 return (float32x2_t) __a;
4212 }
4213
4214 __extension__ extern __inline float32x2_t
4215 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4216 vreinterpret_f32_f64 (float64x1_t __a)
4217 {
4218 return (float32x2_t) __a;
4219 }
4220
4221 __extension__ extern __inline float32x2_t
4222 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4223 vreinterpret_f32_s8 (int8x8_t __a)
4224 {
4225 return (float32x2_t) __a;
4226 }
4227
4228 __extension__ extern __inline float32x2_t
4229 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4230 vreinterpret_f32_s16 (int16x4_t __a)
4231 {
4232 return (float32x2_t) __a;
4233 }
4234
4235 __extension__ extern __inline float32x2_t
4236 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4237 vreinterpret_f32_s32 (int32x2_t __a)
4238 {
4239 return (float32x2_t) __a;
4240 }
4241
4242 __extension__ extern __inline float32x2_t
4243 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4244 vreinterpret_f32_s64 (int64x1_t __a)
4245 {
4246 return (float32x2_t) __a;
4247 }
4248
4249 __extension__ extern __inline float32x2_t
4250 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4251 vreinterpret_f32_u8 (uint8x8_t __a)
4252 {
4253 return (float32x2_t) __a;
4254 }
4255
4256 __extension__ extern __inline float32x2_t
4257 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4258 vreinterpret_f32_u16 (uint16x4_t __a)
4259 {
4260 return (float32x2_t) __a;
4261 }
4262
4263 __extension__ extern __inline float32x2_t
4264 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4265 vreinterpret_f32_u32 (uint32x2_t __a)
4266 {
4267 return (float32x2_t) __a;
4268 }
4269
4270 __extension__ extern __inline float32x2_t
4271 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4272 vreinterpret_f32_u64 (uint64x1_t __a)
4273 {
4274 return (float32x2_t) __a;
4275 }
4276
4277 __extension__ extern __inline float32x2_t
4278 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4279 vreinterpret_f32_p8 (poly8x8_t __a)
4280 {
4281 return (float32x2_t) __a;
4282 }
4283
4284 __extension__ extern __inline float32x2_t
4285 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4286 vreinterpret_f32_p16 (poly16x4_t __a)
4287 {
4288 return (float32x2_t) __a;
4289 }
4290
4291 __extension__ extern __inline float32x2_t
4292 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4293 vreinterpret_f32_p64 (poly64x1_t __a)
4294 {
4295 return (float32x2_t) __a;
4296 }
4297
4298 __extension__ extern __inline float32x4_t
4299 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4300 vreinterpretq_f32_f16 (float16x8_t __a)
4301 {
4302 return (float32x4_t) __a;
4303 }
4304
4305 __extension__ extern __inline float32x4_t
4306 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4307 vreinterpretq_f32_f64 (float64x2_t __a)
4308 {
4309 return (float32x4_t) __a;
4310 }
4311
4312 __extension__ extern __inline float32x4_t
4313 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4314 vreinterpretq_f32_s8 (int8x16_t __a)
4315 {
4316 return (float32x4_t) __a;
4317 }
4318
4319 __extension__ extern __inline float32x4_t
4320 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4321 vreinterpretq_f32_s16 (int16x8_t __a)
4322 {
4323 return (float32x4_t) __a;
4324 }
4325
4326 __extension__ extern __inline float32x4_t
4327 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4328 vreinterpretq_f32_s32 (int32x4_t __a)
4329 {
4330 return (float32x4_t) __a;
4331 }
4332
4333 __extension__ extern __inline float32x4_t
4334 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4335 vreinterpretq_f32_s64 (int64x2_t __a)
4336 {
4337 return (float32x4_t) __a;
4338 }
4339
4340 __extension__ extern __inline float32x4_t
4341 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4342 vreinterpretq_f32_u8 (uint8x16_t __a)
4343 {
4344 return (float32x4_t) __a;
4345 }
4346
4347 __extension__ extern __inline float32x4_t
4348 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4349 vreinterpretq_f32_u16 (uint16x8_t __a)
4350 {
4351 return (float32x4_t) __a;
4352 }
4353
4354 __extension__ extern __inline float32x4_t
4355 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4356 vreinterpretq_f32_u32 (uint32x4_t __a)
4357 {
4358 return (float32x4_t) __a;
4359 }
4360
4361 __extension__ extern __inline float32x4_t
4362 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4363 vreinterpretq_f32_u64 (uint64x2_t __a)
4364 {
4365 return (float32x4_t) __a;
4366 }
4367
4368 __extension__ extern __inline float32x4_t
4369 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4370 vreinterpretq_f32_p8 (poly8x16_t __a)
4371 {
4372 return (float32x4_t) __a;
4373 }
4374
4375 __extension__ extern __inline float32x4_t
4376 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4377 vreinterpretq_f32_p16 (poly16x8_t __a)
4378 {
4379 return (float32x4_t) __a;
4380 }
4381
4382 __extension__ extern __inline float32x4_t
4383 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4384 vreinterpretq_f32_p64 (poly64x2_t __a)
4385 {
4386 return (float32x4_t) __a;
4387 }
4388
4389 __extension__ extern __inline float32x4_t
4390 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4391 vreinterpretq_f32_p128 (poly128_t __a)
4392 {
4393 return (float32x4_t)__a;
4394 }
4395
4396
4397 __extension__ extern __inline float64x1_t
4398 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4399 vreinterpret_f64_f16 (float16x4_t __a)
4400 {
4401 return (float64x1_t) __a;
4402 }
4403
4404 __extension__ extern __inline float64x1_t
4405 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4406 vreinterpret_f64_f32 (float32x2_t __a)
4407 {
4408 return (float64x1_t) __a;
4409 }
4410
4411 __extension__ extern __inline float64x1_t
4412 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4413 vreinterpret_f64_p8 (poly8x8_t __a)
4414 {
4415 return (float64x1_t) __a;
4416 }
4417
4418 __extension__ extern __inline float64x1_t
4419 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4420 vreinterpret_f64_p16 (poly16x4_t __a)
4421 {
4422 return (float64x1_t) __a;
4423 }
4424
4425 __extension__ extern __inline float64x1_t
4426 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4427 vreinterpret_f64_p64 (poly64x1_t __a)
4428 {
4429 return (float64x1_t) __a;
4430 }
4431
4432 __extension__ extern __inline float64x1_t
4433 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4434 vreinterpret_f64_s8 (int8x8_t __a)
4435 {
4436 return (float64x1_t) __a;
4437 }
4438
4439 __extension__ extern __inline float64x1_t
4440 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4441 vreinterpret_f64_s16 (int16x4_t __a)
4442 {
4443 return (float64x1_t) __a;
4444 }
4445
4446 __extension__ extern __inline float64x1_t
4447 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4448 vreinterpret_f64_s32 (int32x2_t __a)
4449 {
4450 return (float64x1_t) __a;
4451 }
4452
4453 __extension__ extern __inline float64x1_t
4454 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4455 vreinterpret_f64_s64 (int64x1_t __a)
4456 {
4457 return (float64x1_t) __a;
4458 }
4459
4460 __extension__ extern __inline float64x1_t
4461 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4462 vreinterpret_f64_u8 (uint8x8_t __a)
4463 {
4464 return (float64x1_t) __a;
4465 }
4466
4467 __extension__ extern __inline float64x1_t
4468 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4469 vreinterpret_f64_u16 (uint16x4_t __a)
4470 {
4471 return (float64x1_t) __a;
4472 }
4473
4474 __extension__ extern __inline float64x1_t
4475 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4476 vreinterpret_f64_u32 (uint32x2_t __a)
4477 {
4478 return (float64x1_t) __a;
4479 }
4480
4481 __extension__ extern __inline float64x1_t
4482 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4483 vreinterpret_f64_u64 (uint64x1_t __a)
4484 {
4485 return (float64x1_t) __a;
4486 }
4487
4488 __extension__ extern __inline float64x2_t
4489 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4490 vreinterpretq_f64_f16 (float16x8_t __a)
4491 {
4492 return (float64x2_t) __a;
4493 }
4494
4495 __extension__ extern __inline float64x2_t
4496 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4497 vreinterpretq_f64_f32 (float32x4_t __a)
4498 {
4499 return (float64x2_t) __a;
4500 }
4501
4502 __extension__ extern __inline float64x2_t
4503 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4504 vreinterpretq_f64_p8 (poly8x16_t __a)
4505 {
4506 return (float64x2_t) __a;
4507 }
4508
4509 __extension__ extern __inline float64x2_t
4510 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4511 vreinterpretq_f64_p16 (poly16x8_t __a)
4512 {
4513 return (float64x2_t) __a;
4514 }
4515
4516 __extension__ extern __inline float64x2_t
4517 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4518 vreinterpretq_f64_p64 (poly64x2_t __a)
4519 {
4520 return (float64x2_t) __a;
4521 }
4522
4523 __extension__ extern __inline float64x2_t
4524 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4525 vreinterpretq_f64_s8 (int8x16_t __a)
4526 {
4527 return (float64x2_t) __a;
4528 }
4529
4530 __extension__ extern __inline float64x2_t
4531 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4532 vreinterpretq_f64_s16 (int16x8_t __a)
4533 {
4534 return (float64x2_t) __a;
4535 }
4536
4537 __extension__ extern __inline float64x2_t
4538 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4539 vreinterpretq_f64_s32 (int32x4_t __a)
4540 {
4541 return (float64x2_t) __a;
4542 }
4543
4544 __extension__ extern __inline float64x2_t
4545 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4546 vreinterpretq_f64_s64 (int64x2_t __a)
4547 {
4548 return (float64x2_t) __a;
4549 }
4550
4551 __extension__ extern __inline float64x2_t
4552 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4553 vreinterpretq_f64_u8 (uint8x16_t __a)
4554 {
4555 return (float64x2_t) __a;
4556 }
4557
4558 __extension__ extern __inline float64x2_t
4559 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4560 vreinterpretq_f64_u16 (uint16x8_t __a)
4561 {
4562 return (float64x2_t) __a;
4563 }
4564
4565 __extension__ extern __inline float64x2_t
4566 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4567 vreinterpretq_f64_u32 (uint32x4_t __a)
4568 {
4569 return (float64x2_t) __a;
4570 }
4571
4572 __extension__ extern __inline float64x2_t
4573 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4574 vreinterpretq_f64_u64 (uint64x2_t __a)
4575 {
4576 return (float64x2_t) __a;
4577 }
4578
4579 __extension__ extern __inline int64x1_t
4580 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4581 vreinterpret_s64_f16 (float16x4_t __a)
4582 {
4583 return (int64x1_t) __a;
4584 }
4585
4586 __extension__ extern __inline int64x1_t
4587 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4588 vreinterpret_s64_f64 (float64x1_t __a)
4589 {
4590 return (int64x1_t) __a;
4591 }
4592
4593 __extension__ extern __inline int64x1_t
4594 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4595 vreinterpret_s64_s8 (int8x8_t __a)
4596 {
4597 return (int64x1_t) __a;
4598 }
4599
4600 __extension__ extern __inline int64x1_t
4601 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4602 vreinterpret_s64_s16 (int16x4_t __a)
4603 {
4604 return (int64x1_t) __a;
4605 }
4606
4607 __extension__ extern __inline int64x1_t
4608 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4609 vreinterpret_s64_s32 (int32x2_t __a)
4610 {
4611 return (int64x1_t) __a;
4612 }
4613
4614 __extension__ extern __inline int64x1_t
4615 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4616 vreinterpret_s64_f32 (float32x2_t __a)
4617 {
4618 return (int64x1_t) __a;
4619 }
4620
4621 __extension__ extern __inline int64x1_t
4622 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4623 vreinterpret_s64_u8 (uint8x8_t __a)
4624 {
4625 return (int64x1_t) __a;
4626 }
4627
4628 __extension__ extern __inline int64x1_t
4629 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4630 vreinterpret_s64_u16 (uint16x4_t __a)
4631 {
4632 return (int64x1_t) __a;
4633 }
4634
4635 __extension__ extern __inline int64x1_t
4636 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4637 vreinterpret_s64_u32 (uint32x2_t __a)
4638 {
4639 return (int64x1_t) __a;
4640 }
4641
4642 __extension__ extern __inline int64x1_t
4643 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4644 vreinterpret_s64_u64 (uint64x1_t __a)
4645 {
4646 return (int64x1_t) __a;
4647 }
4648
4649 __extension__ extern __inline int64x1_t
4650 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4651 vreinterpret_s64_p8 (poly8x8_t __a)
4652 {
4653 return (int64x1_t) __a;
4654 }
4655
4656 __extension__ extern __inline int64x1_t
4657 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4658 vreinterpret_s64_p16 (poly16x4_t __a)
4659 {
4660 return (int64x1_t) __a;
4661 }
4662
4663 __extension__ extern __inline int64x1_t
4664 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4665 vreinterpret_s64_p64 (poly64x1_t __a)
4666 {
4667 return (int64x1_t) __a;
4668 }
4669
4670 __extension__ extern __inline int64x2_t
4671 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4672 vreinterpretq_s64_f64 (float64x2_t __a)
4673 {
4674 return (int64x2_t) __a;
4675 }
4676
4677 __extension__ extern __inline int64x2_t
4678 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4679 vreinterpretq_s64_s8 (int8x16_t __a)
4680 {
4681 return (int64x2_t) __a;
4682 }
4683
4684 __extension__ extern __inline int64x2_t
4685 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4686 vreinterpretq_s64_s16 (int16x8_t __a)
4687 {
4688 return (int64x2_t) __a;
4689 }
4690
4691 __extension__ extern __inline int64x2_t
4692 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4693 vreinterpretq_s64_s32 (int32x4_t __a)
4694 {
4695 return (int64x2_t) __a;
4696 }
4697
4698 __extension__ extern __inline int64x2_t
4699 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4700 vreinterpretq_s64_f16 (float16x8_t __a)
4701 {
4702 return (int64x2_t) __a;
4703 }
4704
4705 __extension__ extern __inline int64x2_t
4706 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4707 vreinterpretq_s64_f32 (float32x4_t __a)
4708 {
4709 return (int64x2_t) __a;
4710 }
4711
4712 __extension__ extern __inline int64x2_t
4713 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4714 vreinterpretq_s64_u8 (uint8x16_t __a)
4715 {
4716 return (int64x2_t) __a;
4717 }
4718
4719 __extension__ extern __inline int64x2_t
4720 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4721 vreinterpretq_s64_u16 (uint16x8_t __a)
4722 {
4723 return (int64x2_t) __a;
4724 }
4725
4726 __extension__ extern __inline int64x2_t
4727 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4728 vreinterpretq_s64_u32 (uint32x4_t __a)
4729 {
4730 return (int64x2_t) __a;
4731 }
4732
4733 __extension__ extern __inline int64x2_t
4734 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4735 vreinterpretq_s64_u64 (uint64x2_t __a)
4736 {
4737 return (int64x2_t) __a;
4738 }
4739
4740 __extension__ extern __inline int64x2_t
4741 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4742 vreinterpretq_s64_p8 (poly8x16_t __a)
4743 {
4744 return (int64x2_t) __a;
4745 }
4746
4747 __extension__ extern __inline int64x2_t
4748 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4749 vreinterpretq_s64_p16 (poly16x8_t __a)
4750 {
4751 return (int64x2_t) __a;
4752 }
4753
4754 __extension__ extern __inline int64x2_t
4755 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4756 vreinterpretq_s64_p64 (poly64x2_t __a)
4757 {
4758 return (int64x2_t) __a;
4759 }
4760
4761 __extension__ extern __inline int64x2_t
4762 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4763 vreinterpretq_s64_p128 (poly128_t __a)
4764 {
4765 return (int64x2_t)__a;
4766 }
4767
4768 __extension__ extern __inline uint64x1_t
4769 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4770 vreinterpret_u64_f16 (float16x4_t __a)
4771 {
4772 return (uint64x1_t) __a;
4773 }
4774
4775 __extension__ extern __inline uint64x1_t
4776 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4777 vreinterpret_u64_f64 (float64x1_t __a)
4778 {
4779 return (uint64x1_t) __a;
4780 }
4781
4782 __extension__ extern __inline uint64x1_t
4783 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4784 vreinterpret_u64_s8 (int8x8_t __a)
4785 {
4786 return (uint64x1_t) __a;
4787 }
4788
4789 __extension__ extern __inline uint64x1_t
4790 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4791 vreinterpret_u64_s16 (int16x4_t __a)
4792 {
4793 return (uint64x1_t) __a;
4794 }
4795
4796 __extension__ extern __inline uint64x1_t
4797 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4798 vreinterpret_u64_s32 (int32x2_t __a)
4799 {
4800 return (uint64x1_t) __a;
4801 }
4802
4803 __extension__ extern __inline uint64x1_t
4804 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4805 vreinterpret_u64_s64 (int64x1_t __a)
4806 {
4807 return (uint64x1_t) __a;
4808 }
4809
4810 __extension__ extern __inline uint64x1_t
4811 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4812 vreinterpret_u64_f32 (float32x2_t __a)
4813 {
4814 return (uint64x1_t) __a;
4815 }
4816
4817 __extension__ extern __inline uint64x1_t
4818 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4819 vreinterpret_u64_u8 (uint8x8_t __a)
4820 {
4821 return (uint64x1_t) __a;
4822 }
4823
4824 __extension__ extern __inline uint64x1_t
4825 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4826 vreinterpret_u64_u16 (uint16x4_t __a)
4827 {
4828 return (uint64x1_t) __a;
4829 }
4830
4831 __extension__ extern __inline uint64x1_t
4832 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4833 vreinterpret_u64_u32 (uint32x2_t __a)
4834 {
4835 return (uint64x1_t) __a;
4836 }
4837
4838 __extension__ extern __inline uint64x1_t
4839 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4840 vreinterpret_u64_p8 (poly8x8_t __a)
4841 {
4842 return (uint64x1_t) __a;
4843 }
4844
4845 __extension__ extern __inline uint64x1_t
4846 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4847 vreinterpret_u64_p16 (poly16x4_t __a)
4848 {
4849 return (uint64x1_t) __a;
4850 }
4851
4852 __extension__ extern __inline uint64x1_t
4853 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4854 vreinterpret_u64_p64 (poly64x1_t __a)
4855 {
4856 return (uint64x1_t) __a;
4857 }
4858
4859 __extension__ extern __inline uint64x2_t
4860 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4861 vreinterpretq_u64_f64 (float64x2_t __a)
4862 {
4863 return (uint64x2_t) __a;
4864 }
4865
4866 __extension__ extern __inline uint64x2_t
4867 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4868 vreinterpretq_u64_s8 (int8x16_t __a)
4869 {
4870 return (uint64x2_t) __a;
4871 }
4872
4873 __extension__ extern __inline uint64x2_t
4874 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4875 vreinterpretq_u64_s16 (int16x8_t __a)
4876 {
4877 return (uint64x2_t) __a;
4878 }
4879
4880 __extension__ extern __inline uint64x2_t
4881 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4882 vreinterpretq_u64_s32 (int32x4_t __a)
4883 {
4884 return (uint64x2_t) __a;
4885 }
4886
4887 __extension__ extern __inline uint64x2_t
4888 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4889 vreinterpretq_u64_s64 (int64x2_t __a)
4890 {
4891 return (uint64x2_t) __a;
4892 }
4893
4894 __extension__ extern __inline uint64x2_t
4895 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4896 vreinterpretq_u64_f16 (float16x8_t __a)
4897 {
4898 return (uint64x2_t) __a;
4899 }
4900
4901 __extension__ extern __inline uint64x2_t
4902 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4903 vreinterpretq_u64_f32 (float32x4_t __a)
4904 {
4905 return (uint64x2_t) __a;
4906 }
4907
4908 __extension__ extern __inline uint64x2_t
4909 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4910 vreinterpretq_u64_u8 (uint8x16_t __a)
4911 {
4912 return (uint64x2_t) __a;
4913 }
4914
4915 __extension__ extern __inline uint64x2_t
4916 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4917 vreinterpretq_u64_u16 (uint16x8_t __a)
4918 {
4919 return (uint64x2_t) __a;
4920 }
4921
4922 __extension__ extern __inline uint64x2_t
4923 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4924 vreinterpretq_u64_u32 (uint32x4_t __a)
4925 {
4926 return (uint64x2_t) __a;
4927 }
4928
4929 __extension__ extern __inline uint64x2_t
4930 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4931 vreinterpretq_u64_p8 (poly8x16_t __a)
4932 {
4933 return (uint64x2_t) __a;
4934 }
4935
4936 __extension__ extern __inline uint64x2_t
4937 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4938 vreinterpretq_u64_p16 (poly16x8_t __a)
4939 {
4940 return (uint64x2_t) __a;
4941 }
4942
4943 __extension__ extern __inline uint64x2_t
4944 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4945 vreinterpretq_u64_p64 (poly64x2_t __a)
4946 {
4947 return (uint64x2_t) __a;
4948 }
4949
4950 __extension__ extern __inline uint64x2_t
4951 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4952 vreinterpretq_u64_p128 (poly128_t __a)
4953 {
4954 return (uint64x2_t)__a;
4955 }
4956
4957 __extension__ extern __inline int8x8_t
4958 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4959 vreinterpret_s8_f16 (float16x4_t __a)
4960 {
4961 return (int8x8_t) __a;
4962 }
4963
4964 __extension__ extern __inline int8x8_t
4965 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4966 vreinterpret_s8_f64 (float64x1_t __a)
4967 {
4968 return (int8x8_t) __a;
4969 }
4970
4971 __extension__ extern __inline int8x8_t
4972 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4973 vreinterpret_s8_s16 (int16x4_t __a)
4974 {
4975 return (int8x8_t) __a;
4976 }
4977
4978 __extension__ extern __inline int8x8_t
4979 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4980 vreinterpret_s8_s32 (int32x2_t __a)
4981 {
4982 return (int8x8_t) __a;
4983 }
4984
4985 __extension__ extern __inline int8x8_t
4986 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4987 vreinterpret_s8_s64 (int64x1_t __a)
4988 {
4989 return (int8x8_t) __a;
4990 }
4991
4992 __extension__ extern __inline int8x8_t
4993 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
4994 vreinterpret_s8_f32 (float32x2_t __a)
4995 {
4996 return (int8x8_t) __a;
4997 }
4998
4999 __extension__ extern __inline int8x8_t
5000 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5001 vreinterpret_s8_u8 (uint8x8_t __a)
5002 {
5003 return (int8x8_t) __a;
5004 }
5005
5006 __extension__ extern __inline int8x8_t
5007 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5008 vreinterpret_s8_u16 (uint16x4_t __a)
5009 {
5010 return (int8x8_t) __a;
5011 }
5012
5013 __extension__ extern __inline int8x8_t
5014 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5015 vreinterpret_s8_u32 (uint32x2_t __a)
5016 {
5017 return (int8x8_t) __a;
5018 }
5019
5020 __extension__ extern __inline int8x8_t
5021 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5022 vreinterpret_s8_u64 (uint64x1_t __a)
5023 {
5024 return (int8x8_t) __a;
5025 }
5026
5027 __extension__ extern __inline int8x8_t
5028 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5029 vreinterpret_s8_p8 (poly8x8_t __a)
5030 {
5031 return (int8x8_t) __a;
5032 }
5033
5034 __extension__ extern __inline int8x8_t
5035 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5036 vreinterpret_s8_p16 (poly16x4_t __a)
5037 {
5038 return (int8x8_t) __a;
5039 }
5040
5041 __extension__ extern __inline int8x8_t
5042 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5043 vreinterpret_s8_p64 (poly64x1_t __a)
5044 {
5045 return (int8x8_t) __a;
5046 }
5047
5048 __extension__ extern __inline int8x16_t
5049 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5050 vreinterpretq_s8_f64 (float64x2_t __a)
5051 {
5052 return (int8x16_t) __a;
5053 }
5054
5055 __extension__ extern __inline int8x16_t
5056 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5057 vreinterpretq_s8_s16 (int16x8_t __a)
5058 {
5059 return (int8x16_t) __a;
5060 }
5061
5062 __extension__ extern __inline int8x16_t
5063 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5064 vreinterpretq_s8_s32 (int32x4_t __a)
5065 {
5066 return (int8x16_t) __a;
5067 }
5068
5069 __extension__ extern __inline int8x16_t
5070 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5071 vreinterpretq_s8_s64 (int64x2_t __a)
5072 {
5073 return (int8x16_t) __a;
5074 }
5075
5076 __extension__ extern __inline int8x16_t
5077 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5078 vreinterpretq_s8_f16 (float16x8_t __a)
5079 {
5080 return (int8x16_t) __a;
5081 }
5082
5083 __extension__ extern __inline int8x16_t
5084 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5085 vreinterpretq_s8_f32 (float32x4_t __a)
5086 {
5087 return (int8x16_t) __a;
5088 }
5089
5090 __extension__ extern __inline int8x16_t
5091 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5092 vreinterpretq_s8_u8 (uint8x16_t __a)
5093 {
5094 return (int8x16_t) __a;
5095 }
5096
5097 __extension__ extern __inline int8x16_t
5098 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5099 vreinterpretq_s8_u16 (uint16x8_t __a)
5100 {
5101 return (int8x16_t) __a;
5102 }
5103
5104 __extension__ extern __inline int8x16_t
5105 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5106 vreinterpretq_s8_u32 (uint32x4_t __a)
5107 {
5108 return (int8x16_t) __a;
5109 }
5110
5111 __extension__ extern __inline int8x16_t
5112 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5113 vreinterpretq_s8_u64 (uint64x2_t __a)
5114 {
5115 return (int8x16_t) __a;
5116 }
5117
5118 __extension__ extern __inline int8x16_t
5119 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5120 vreinterpretq_s8_p8 (poly8x16_t __a)
5121 {
5122 return (int8x16_t) __a;
5123 }
5124
5125 __extension__ extern __inline int8x16_t
5126 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5127 vreinterpretq_s8_p16 (poly16x8_t __a)
5128 {
5129 return (int8x16_t) __a;
5130 }
5131
5132 __extension__ extern __inline int8x16_t
5133 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5134 vreinterpretq_s8_p64 (poly64x2_t __a)
5135 {
5136 return (int8x16_t) __a;
5137 }
5138
5139 __extension__ extern __inline int8x16_t
5140 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5141 vreinterpretq_s8_p128 (poly128_t __a)
5142 {
5143 return (int8x16_t)__a;
5144 }
5145
5146 __extension__ extern __inline int16x4_t
5147 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5148 vreinterpret_s16_f16 (float16x4_t __a)
5149 {
5150 return (int16x4_t) __a;
5151 }
5152
5153 __extension__ extern __inline int16x4_t
5154 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5155 vreinterpret_s16_f64 (float64x1_t __a)
5156 {
5157 return (int16x4_t) __a;
5158 }
5159
5160 __extension__ extern __inline int16x4_t
5161 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5162 vreinterpret_s16_s8 (int8x8_t __a)
5163 {
5164 return (int16x4_t) __a;
5165 }
5166
5167 __extension__ extern __inline int16x4_t
5168 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5169 vreinterpret_s16_s32 (int32x2_t __a)
5170 {
5171 return (int16x4_t) __a;
5172 }
5173
5174 __extension__ extern __inline int16x4_t
5175 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5176 vreinterpret_s16_s64 (int64x1_t __a)
5177 {
5178 return (int16x4_t) __a;
5179 }
5180
5181 __extension__ extern __inline int16x4_t
5182 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5183 vreinterpret_s16_f32 (float32x2_t __a)
5184 {
5185 return (int16x4_t) __a;
5186 }
5187
5188 __extension__ extern __inline int16x4_t
5189 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5190 vreinterpret_s16_u8 (uint8x8_t __a)
5191 {
5192 return (int16x4_t) __a;
5193 }
5194
5195 __extension__ extern __inline int16x4_t
5196 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5197 vreinterpret_s16_u16 (uint16x4_t __a)
5198 {
5199 return (int16x4_t) __a;
5200 }
5201
5202 __extension__ extern __inline int16x4_t
5203 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5204 vreinterpret_s16_u32 (uint32x2_t __a)
5205 {
5206 return (int16x4_t) __a;
5207 }
5208
5209 __extension__ extern __inline int16x4_t
5210 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5211 vreinterpret_s16_u64 (uint64x1_t __a)
5212 {
5213 return (int16x4_t) __a;
5214 }
5215
5216 __extension__ extern __inline int16x4_t
5217 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5218 vreinterpret_s16_p8 (poly8x8_t __a)
5219 {
5220 return (int16x4_t) __a;
5221 }
5222
5223 __extension__ extern __inline int16x4_t
5224 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5225 vreinterpret_s16_p16 (poly16x4_t __a)
5226 {
5227 return (int16x4_t) __a;
5228 }
5229
5230 __extension__ extern __inline int16x4_t
5231 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5232 vreinterpret_s16_p64 (poly64x1_t __a)
5233 {
5234 return (int16x4_t) __a;
5235 }
5236
5237 __extension__ extern __inline int16x8_t
5238 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5239 vreinterpretq_s16_f64 (float64x2_t __a)
5240 {
5241 return (int16x8_t) __a;
5242 }
5243
5244 __extension__ extern __inline int16x8_t
5245 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5246 vreinterpretq_s16_s8 (int8x16_t __a)
5247 {
5248 return (int16x8_t) __a;
5249 }
5250
5251 __extension__ extern __inline int16x8_t
5252 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5253 vreinterpretq_s16_s32 (int32x4_t __a)
5254 {
5255 return (int16x8_t) __a;
5256 }
5257
5258 __extension__ extern __inline int16x8_t
5259 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5260 vreinterpretq_s16_s64 (int64x2_t __a)
5261 {
5262 return (int16x8_t) __a;
5263 }
5264
5265 __extension__ extern __inline int16x8_t
5266 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5267 vreinterpretq_s16_f16 (float16x8_t __a)
5268 {
5269 return (int16x8_t) __a;
5270 }
5271
5272 __extension__ extern __inline int16x8_t
5273 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5274 vreinterpretq_s16_f32 (float32x4_t __a)
5275 {
5276 return (int16x8_t) __a;
5277 }
5278
5279 __extension__ extern __inline int16x8_t
5280 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5281 vreinterpretq_s16_u8 (uint8x16_t __a)
5282 {
5283 return (int16x8_t) __a;
5284 }
5285
5286 __extension__ extern __inline int16x8_t
5287 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5288 vreinterpretq_s16_u16 (uint16x8_t __a)
5289 {
5290 return (int16x8_t) __a;
5291 }
5292
5293 __extension__ extern __inline int16x8_t
5294 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5295 vreinterpretq_s16_u32 (uint32x4_t __a)
5296 {
5297 return (int16x8_t) __a;
5298 }
5299
5300 __extension__ extern __inline int16x8_t
5301 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5302 vreinterpretq_s16_u64 (uint64x2_t __a)
5303 {
5304 return (int16x8_t) __a;
5305 }
5306
5307 __extension__ extern __inline int16x8_t
5308 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5309 vreinterpretq_s16_p8 (poly8x16_t __a)
5310 {
5311 return (int16x8_t) __a;
5312 }
5313
5314 __extension__ extern __inline int16x8_t
5315 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5316 vreinterpretq_s16_p16 (poly16x8_t __a)
5317 {
5318 return (int16x8_t) __a;
5319 }
5320
5321 __extension__ extern __inline int16x8_t
5322 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5323 vreinterpretq_s16_p64 (poly64x2_t __a)
5324 {
5325 return (int16x8_t) __a;
5326 }
5327
5328 __extension__ extern __inline int16x8_t
5329 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5330 vreinterpretq_s16_p128 (poly128_t __a)
5331 {
5332 return (int16x8_t)__a;
5333 }
5334
5335 __extension__ extern __inline int32x2_t
5336 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5337 vreinterpret_s32_f16 (float16x4_t __a)
5338 {
5339 return (int32x2_t) __a;
5340 }
5341
5342 __extension__ extern __inline int32x2_t
5343 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5344 vreinterpret_s32_f64 (float64x1_t __a)
5345 {
5346 return (int32x2_t) __a;
5347 }
5348
5349 __extension__ extern __inline int32x2_t
5350 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5351 vreinterpret_s32_s8 (int8x8_t __a)
5352 {
5353 return (int32x2_t) __a;
5354 }
5355
5356 __extension__ extern __inline int32x2_t
5357 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5358 vreinterpret_s32_s16 (int16x4_t __a)
5359 {
5360 return (int32x2_t) __a;
5361 }
5362
5363 __extension__ extern __inline int32x2_t
5364 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5365 vreinterpret_s32_s64 (int64x1_t __a)
5366 {
5367 return (int32x2_t) __a;
5368 }
5369
5370 __extension__ extern __inline int32x2_t
5371 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5372 vreinterpret_s32_f32 (float32x2_t __a)
5373 {
5374 return (int32x2_t) __a;
5375 }
5376
5377 __extension__ extern __inline int32x2_t
5378 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5379 vreinterpret_s32_u8 (uint8x8_t __a)
5380 {
5381 return (int32x2_t) __a;
5382 }
5383
5384 __extension__ extern __inline int32x2_t
5385 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5386 vreinterpret_s32_u16 (uint16x4_t __a)
5387 {
5388 return (int32x2_t) __a;
5389 }
5390
5391 __extension__ extern __inline int32x2_t
5392 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5393 vreinterpret_s32_u32 (uint32x2_t __a)
5394 {
5395 return (int32x2_t) __a;
5396 }
5397
5398 __extension__ extern __inline int32x2_t
5399 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5400 vreinterpret_s32_u64 (uint64x1_t __a)
5401 {
5402 return (int32x2_t) __a;
5403 }
5404
5405 __extension__ extern __inline int32x2_t
5406 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5407 vreinterpret_s32_p8 (poly8x8_t __a)
5408 {
5409 return (int32x2_t) __a;
5410 }
5411
5412 __extension__ extern __inline int32x2_t
5413 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5414 vreinterpret_s32_p16 (poly16x4_t __a)
5415 {
5416 return (int32x2_t) __a;
5417 }
5418
5419 __extension__ extern __inline int32x2_t
5420 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5421 vreinterpret_s32_p64 (poly64x1_t __a)
5422 {
5423 return (int32x2_t) __a;
5424 }
5425
5426 __extension__ extern __inline int32x4_t
5427 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5428 vreinterpretq_s32_f64 (float64x2_t __a)
5429 {
5430 return (int32x4_t) __a;
5431 }
5432
5433 __extension__ extern __inline int32x4_t
5434 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5435 vreinterpretq_s32_s8 (int8x16_t __a)
5436 {
5437 return (int32x4_t) __a;
5438 }
5439
5440 __extension__ extern __inline int32x4_t
5441 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5442 vreinterpretq_s32_s16 (int16x8_t __a)
5443 {
5444 return (int32x4_t) __a;
5445 }
5446
5447 __extension__ extern __inline int32x4_t
5448 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5449 vreinterpretq_s32_s64 (int64x2_t __a)
5450 {
5451 return (int32x4_t) __a;
5452 }
5453
5454 __extension__ extern __inline int32x4_t
5455 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5456 vreinterpretq_s32_f16 (float16x8_t __a)
5457 {
5458 return (int32x4_t) __a;
5459 }
5460
5461 __extension__ extern __inline int32x4_t
5462 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5463 vreinterpretq_s32_f32 (float32x4_t __a)
5464 {
5465 return (int32x4_t) __a;
5466 }
5467
5468 __extension__ extern __inline int32x4_t
5469 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5470 vreinterpretq_s32_u8 (uint8x16_t __a)
5471 {
5472 return (int32x4_t) __a;
5473 }
5474
5475 __extension__ extern __inline int32x4_t
5476 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5477 vreinterpretq_s32_u16 (uint16x8_t __a)
5478 {
5479 return (int32x4_t) __a;
5480 }
5481
5482 __extension__ extern __inline int32x4_t
5483 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5484 vreinterpretq_s32_u32 (uint32x4_t __a)
5485 {
5486 return (int32x4_t) __a;
5487 }
5488
5489 __extension__ extern __inline int32x4_t
5490 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5491 vreinterpretq_s32_u64 (uint64x2_t __a)
5492 {
5493 return (int32x4_t) __a;
5494 }
5495
5496 __extension__ extern __inline int32x4_t
5497 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5498 vreinterpretq_s32_p8 (poly8x16_t __a)
5499 {
5500 return (int32x4_t) __a;
5501 }
5502
5503 __extension__ extern __inline int32x4_t
5504 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5505 vreinterpretq_s32_p16 (poly16x8_t __a)
5506 {
5507 return (int32x4_t) __a;
5508 }
5509
5510 __extension__ extern __inline int32x4_t
5511 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5512 vreinterpretq_s32_p64 (poly64x2_t __a)
5513 {
5514 return (int32x4_t) __a;
5515 }
5516
5517 __extension__ extern __inline int32x4_t
5518 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5519 vreinterpretq_s32_p128 (poly128_t __a)
5520 {
5521 return (int32x4_t)__a;
5522 }
5523
5524 __extension__ extern __inline uint8x8_t
5525 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5526 vreinterpret_u8_f16 (float16x4_t __a)
5527 {
5528 return (uint8x8_t) __a;
5529 }
5530
5531 __extension__ extern __inline uint8x8_t
5532 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5533 vreinterpret_u8_f64 (float64x1_t __a)
5534 {
5535 return (uint8x8_t) __a;
5536 }
5537
5538 __extension__ extern __inline uint8x8_t
5539 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5540 vreinterpret_u8_s8 (int8x8_t __a)
5541 {
5542 return (uint8x8_t) __a;
5543 }
5544
5545 __extension__ extern __inline uint8x8_t
5546 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5547 vreinterpret_u8_s16 (int16x4_t __a)
5548 {
5549 return (uint8x8_t) __a;
5550 }
5551
5552 __extension__ extern __inline uint8x8_t
5553 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5554 vreinterpret_u8_s32 (int32x2_t __a)
5555 {
5556 return (uint8x8_t) __a;
5557 }
5558
5559 __extension__ extern __inline uint8x8_t
5560 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5561 vreinterpret_u8_s64 (int64x1_t __a)
5562 {
5563 return (uint8x8_t) __a;
5564 }
5565
5566 __extension__ extern __inline uint8x8_t
5567 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5568 vreinterpret_u8_f32 (float32x2_t __a)
5569 {
5570 return (uint8x8_t) __a;
5571 }
5572
5573 __extension__ extern __inline uint8x8_t
5574 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5575 vreinterpret_u8_u16 (uint16x4_t __a)
5576 {
5577 return (uint8x8_t) __a;
5578 }
5579
5580 __extension__ extern __inline uint8x8_t
5581 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5582 vreinterpret_u8_u32 (uint32x2_t __a)
5583 {
5584 return (uint8x8_t) __a;
5585 }
5586
5587 __extension__ extern __inline uint8x8_t
5588 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5589 vreinterpret_u8_u64 (uint64x1_t __a)
5590 {
5591 return (uint8x8_t) __a;
5592 }
5593
5594 __extension__ extern __inline uint8x8_t
5595 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5596 vreinterpret_u8_p8 (poly8x8_t __a)
5597 {
5598 return (uint8x8_t) __a;
5599 }
5600
5601 __extension__ extern __inline uint8x8_t
5602 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5603 vreinterpret_u8_p16 (poly16x4_t __a)
5604 {
5605 return (uint8x8_t) __a;
5606 }
5607
5608 __extension__ extern __inline uint8x8_t
5609 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5610 vreinterpret_u8_p64 (poly64x1_t __a)
5611 {
5612 return (uint8x8_t) __a;
5613 }
5614
5615 __extension__ extern __inline uint8x16_t
5616 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5617 vreinterpretq_u8_f64 (float64x2_t __a)
5618 {
5619 return (uint8x16_t) __a;
5620 }
5621
5622 __extension__ extern __inline uint8x16_t
5623 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5624 vreinterpretq_u8_s8 (int8x16_t __a)
5625 {
5626 return (uint8x16_t) __a;
5627 }
5628
5629 __extension__ extern __inline uint8x16_t
5630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5631 vreinterpretq_u8_s16 (int16x8_t __a)
5632 {
5633 return (uint8x16_t) __a;
5634 }
5635
5636 __extension__ extern __inline uint8x16_t
5637 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5638 vreinterpretq_u8_s32 (int32x4_t __a)
5639 {
5640 return (uint8x16_t) __a;
5641 }
5642
5643 __extension__ extern __inline uint8x16_t
5644 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5645 vreinterpretq_u8_s64 (int64x2_t __a)
5646 {
5647 return (uint8x16_t) __a;
5648 }
5649
5650 __extension__ extern __inline uint8x16_t
5651 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5652 vreinterpretq_u8_f16 (float16x8_t __a)
5653 {
5654 return (uint8x16_t) __a;
5655 }
5656
5657 __extension__ extern __inline uint8x16_t
5658 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5659 vreinterpretq_u8_f32 (float32x4_t __a)
5660 {
5661 return (uint8x16_t) __a;
5662 }
5663
5664 __extension__ extern __inline uint8x16_t
5665 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5666 vreinterpretq_u8_u16 (uint16x8_t __a)
5667 {
5668 return (uint8x16_t) __a;
5669 }
5670
5671 __extension__ extern __inline uint8x16_t
5672 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5673 vreinterpretq_u8_u32 (uint32x4_t __a)
5674 {
5675 return (uint8x16_t) __a;
5676 }
5677
5678 __extension__ extern __inline uint8x16_t
5679 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5680 vreinterpretq_u8_u64 (uint64x2_t __a)
5681 {
5682 return (uint8x16_t) __a;
5683 }
5684
5685 __extension__ extern __inline uint8x16_t
5686 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5687 vreinterpretq_u8_p8 (poly8x16_t __a)
5688 {
5689 return (uint8x16_t) __a;
5690 }
5691
5692 __extension__ extern __inline uint8x16_t
5693 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5694 vreinterpretq_u8_p16 (poly16x8_t __a)
5695 {
5696 return (uint8x16_t) __a;
5697 }
5698
5699 __extension__ extern __inline uint8x16_t
5700 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5701 vreinterpretq_u8_p64 (poly64x2_t __a)
5702 {
5703 return (uint8x16_t) __a;
5704 }
5705
5706 __extension__ extern __inline uint8x16_t
5707 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5708 vreinterpretq_u8_p128 (poly128_t __a)
5709 {
5710 return (uint8x16_t)__a;
5711 }
5712
5713 __extension__ extern __inline uint16x4_t
5714 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5715 vreinterpret_u16_f16 (float16x4_t __a)
5716 {
5717 return (uint16x4_t) __a;
5718 }
5719
5720 __extension__ extern __inline uint16x4_t
5721 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5722 vreinterpret_u16_f64 (float64x1_t __a)
5723 {
5724 return (uint16x4_t) __a;
5725 }
5726
5727 __extension__ extern __inline uint16x4_t
5728 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5729 vreinterpret_u16_s8 (int8x8_t __a)
5730 {
5731 return (uint16x4_t) __a;
5732 }
5733
5734 __extension__ extern __inline uint16x4_t
5735 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5736 vreinterpret_u16_s16 (int16x4_t __a)
5737 {
5738 return (uint16x4_t) __a;
5739 }
5740
5741 __extension__ extern __inline uint16x4_t
5742 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5743 vreinterpret_u16_s32 (int32x2_t __a)
5744 {
5745 return (uint16x4_t) __a;
5746 }
5747
5748 __extension__ extern __inline uint16x4_t
5749 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5750 vreinterpret_u16_s64 (int64x1_t __a)
5751 {
5752 return (uint16x4_t) __a;
5753 }
5754
5755 __extension__ extern __inline uint16x4_t
5756 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5757 vreinterpret_u16_f32 (float32x2_t __a)
5758 {
5759 return (uint16x4_t) __a;
5760 }
5761
5762 __extension__ extern __inline uint16x4_t
5763 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5764 vreinterpret_u16_u8 (uint8x8_t __a)
5765 {
5766 return (uint16x4_t) __a;
5767 }
5768
5769 __extension__ extern __inline uint16x4_t
5770 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5771 vreinterpret_u16_u32 (uint32x2_t __a)
5772 {
5773 return (uint16x4_t) __a;
5774 }
5775
5776 __extension__ extern __inline uint16x4_t
5777 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5778 vreinterpret_u16_u64 (uint64x1_t __a)
5779 {
5780 return (uint16x4_t) __a;
5781 }
5782
5783 __extension__ extern __inline uint16x4_t
5784 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5785 vreinterpret_u16_p8 (poly8x8_t __a)
5786 {
5787 return (uint16x4_t) __a;
5788 }
5789
5790 __extension__ extern __inline uint16x4_t
5791 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5792 vreinterpret_u16_p16 (poly16x4_t __a)
5793 {
5794 return (uint16x4_t) __a;
5795 }
5796
5797 __extension__ extern __inline uint16x4_t
5798 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5799 vreinterpret_u16_p64 (poly64x1_t __a)
5800 {
5801 return (uint16x4_t) __a;
5802 }
5803
5804 __extension__ extern __inline uint16x8_t
5805 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5806 vreinterpretq_u16_f64 (float64x2_t __a)
5807 {
5808 return (uint16x8_t) __a;
5809 }
5810
5811 __extension__ extern __inline uint16x8_t
5812 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5813 vreinterpretq_u16_s8 (int8x16_t __a)
5814 {
5815 return (uint16x8_t) __a;
5816 }
5817
5818 __extension__ extern __inline uint16x8_t
5819 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5820 vreinterpretq_u16_s16 (int16x8_t __a)
5821 {
5822 return (uint16x8_t) __a;
5823 }
5824
5825 __extension__ extern __inline uint16x8_t
5826 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5827 vreinterpretq_u16_s32 (int32x4_t __a)
5828 {
5829 return (uint16x8_t) __a;
5830 }
5831
5832 __extension__ extern __inline uint16x8_t
5833 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5834 vreinterpretq_u16_s64 (int64x2_t __a)
5835 {
5836 return (uint16x8_t) __a;
5837 }
5838
5839 __extension__ extern __inline uint16x8_t
5840 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5841 vreinterpretq_u16_f16 (float16x8_t __a)
5842 {
5843 return (uint16x8_t) __a;
5844 }
5845
5846 __extension__ extern __inline uint16x8_t
5847 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5848 vreinterpretq_u16_f32 (float32x4_t __a)
5849 {
5850 return (uint16x8_t) __a;
5851 }
5852
5853 __extension__ extern __inline uint16x8_t
5854 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5855 vreinterpretq_u16_u8 (uint8x16_t __a)
5856 {
5857 return (uint16x8_t) __a;
5858 }
5859
5860 __extension__ extern __inline uint16x8_t
5861 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5862 vreinterpretq_u16_u32 (uint32x4_t __a)
5863 {
5864 return (uint16x8_t) __a;
5865 }
5866
5867 __extension__ extern __inline uint16x8_t
5868 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5869 vreinterpretq_u16_u64 (uint64x2_t __a)
5870 {
5871 return (uint16x8_t) __a;
5872 }
5873
5874 __extension__ extern __inline uint16x8_t
5875 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5876 vreinterpretq_u16_p8 (poly8x16_t __a)
5877 {
5878 return (uint16x8_t) __a;
5879 }
5880
5881 __extension__ extern __inline uint16x8_t
5882 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5883 vreinterpretq_u16_p16 (poly16x8_t __a)
5884 {
5885 return (uint16x8_t) __a;
5886 }
5887
5888 __extension__ extern __inline uint16x8_t
5889 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5890 vreinterpretq_u16_p64 (poly64x2_t __a)
5891 {
5892 return (uint16x8_t) __a;
5893 }
5894
5895 __extension__ extern __inline uint16x8_t
5896 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5897 vreinterpretq_u16_p128 (poly128_t __a)
5898 {
5899 return (uint16x8_t)__a;
5900 }
5901
5902 __extension__ extern __inline uint32x2_t
5903 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5904 vreinterpret_u32_f16 (float16x4_t __a)
5905 {
5906 return (uint32x2_t) __a;
5907 }
5908
5909 __extension__ extern __inline uint32x2_t
5910 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5911 vreinterpret_u32_f64 (float64x1_t __a)
5912 {
5913 return (uint32x2_t) __a;
5914 }
5915
5916 __extension__ extern __inline uint32x2_t
5917 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5918 vreinterpret_u32_s8 (int8x8_t __a)
5919 {
5920 return (uint32x2_t) __a;
5921 }
5922
5923 __extension__ extern __inline uint32x2_t
5924 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5925 vreinterpret_u32_s16 (int16x4_t __a)
5926 {
5927 return (uint32x2_t) __a;
5928 }
5929
5930 __extension__ extern __inline uint32x2_t
5931 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5932 vreinterpret_u32_s32 (int32x2_t __a)
5933 {
5934 return (uint32x2_t) __a;
5935 }
5936
5937 __extension__ extern __inline uint32x2_t
5938 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5939 vreinterpret_u32_s64 (int64x1_t __a)
5940 {
5941 return (uint32x2_t) __a;
5942 }
5943
5944 __extension__ extern __inline uint32x2_t
5945 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5946 vreinterpret_u32_f32 (float32x2_t __a)
5947 {
5948 return (uint32x2_t) __a;
5949 }
5950
5951 __extension__ extern __inline uint32x2_t
5952 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5953 vreinterpret_u32_u8 (uint8x8_t __a)
5954 {
5955 return (uint32x2_t) __a;
5956 }
5957
5958 __extension__ extern __inline uint32x2_t
5959 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5960 vreinterpret_u32_u16 (uint16x4_t __a)
5961 {
5962 return (uint32x2_t) __a;
5963 }
5964
5965 __extension__ extern __inline uint32x2_t
5966 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5967 vreinterpret_u32_u64 (uint64x1_t __a)
5968 {
5969 return (uint32x2_t) __a;
5970 }
5971
5972 __extension__ extern __inline uint32x2_t
5973 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5974 vreinterpret_u32_p8 (poly8x8_t __a)
5975 {
5976 return (uint32x2_t) __a;
5977 }
5978
5979 __extension__ extern __inline uint32x2_t
5980 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5981 vreinterpret_u32_p16 (poly16x4_t __a)
5982 {
5983 return (uint32x2_t) __a;
5984 }
5985
5986 __extension__ extern __inline uint32x2_t
5987 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5988 vreinterpret_u32_p64 (poly64x1_t __a)
5989 {
5990 return (uint32x2_t) __a;
5991 }
5992
5993 __extension__ extern __inline uint32x4_t
5994 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5995 vreinterpretq_u32_f64 (float64x2_t __a)
5996 {
5997 return (uint32x4_t) __a;
5998 }
5999
6000 __extension__ extern __inline uint32x4_t
6001 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6002 vreinterpretq_u32_s8 (int8x16_t __a)
6003 {
6004 return (uint32x4_t) __a;
6005 }
6006
6007 __extension__ extern __inline uint32x4_t
6008 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6009 vreinterpretq_u32_s16 (int16x8_t __a)
6010 {
6011 return (uint32x4_t) __a;
6012 }
6013
6014 __extension__ extern __inline uint32x4_t
6015 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6016 vreinterpretq_u32_s32 (int32x4_t __a)
6017 {
6018 return (uint32x4_t) __a;
6019 }
6020
6021 __extension__ extern __inline uint32x4_t
6022 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6023 vreinterpretq_u32_s64 (int64x2_t __a)
6024 {
6025 return (uint32x4_t) __a;
6026 }
6027
6028 __extension__ extern __inline uint32x4_t
6029 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6030 vreinterpretq_u32_f16 (float16x8_t __a)
6031 {
6032 return (uint32x4_t) __a;
6033 }
6034
6035 __extension__ extern __inline uint32x4_t
6036 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6037 vreinterpretq_u32_f32 (float32x4_t __a)
6038 {
6039 return (uint32x4_t) __a;
6040 }
6041
6042 __extension__ extern __inline uint32x4_t
6043 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6044 vreinterpretq_u32_u8 (uint8x16_t __a)
6045 {
6046 return (uint32x4_t) __a;
6047 }
6048
6049 __extension__ extern __inline uint32x4_t
6050 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6051 vreinterpretq_u32_u16 (uint16x8_t __a)
6052 {
6053 return (uint32x4_t) __a;
6054 }
6055
6056 __extension__ extern __inline uint32x4_t
6057 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6058 vreinterpretq_u32_u64 (uint64x2_t __a)
6059 {
6060 return (uint32x4_t) __a;
6061 }
6062
6063 __extension__ extern __inline uint32x4_t
6064 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6065 vreinterpretq_u32_p8 (poly8x16_t __a)
6066 {
6067 return (uint32x4_t) __a;
6068 }
6069
6070 __extension__ extern __inline uint32x4_t
6071 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6072 vreinterpretq_u32_p16 (poly16x8_t __a)
6073 {
6074 return (uint32x4_t) __a;
6075 }
6076
6077 __extension__ extern __inline uint32x4_t
6078 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6079 vreinterpretq_u32_p64 (poly64x2_t __a)
6080 {
6081 return (uint32x4_t) __a;
6082 }
6083
6084 __extension__ extern __inline uint32x4_t
6085 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6086 vreinterpretq_u32_p128 (poly128_t __a)
6087 {
6088 return (uint32x4_t)__a;
6089 }
6090
6091 __extension__ extern __inline float64x2_t
6092 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6093 vreinterpretq_f64_p128 (poly128_t __a)
6094 {
6095 return (float64x2_t) __a;
6096 }
6097
6098 __extension__ extern __inline poly128_t
6099 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6100 vreinterpretq_p128_f64 (float64x2_t __a)
6101 {
6102 return (poly128_t) __a;
6103 }
6104
6105 /* vset_lane */
6106
6107 __extension__ extern __inline float16x4_t
6108 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6109 vset_lane_f16 (float16_t __elem, float16x4_t __vec, const int __index)
6110 {
6111 return __aarch64_vset_lane_any (__elem, __vec, __index);
6112 }
6113
6114 __extension__ extern __inline float32x2_t
6115 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6116 vset_lane_f32 (float32_t __elem, float32x2_t __vec, const int __index)
6117 {
6118 return __aarch64_vset_lane_any (__elem, __vec, __index);
6119 }
6120
6121 __extension__ extern __inline float64x1_t
6122 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6123 vset_lane_f64 (float64_t __elem, float64x1_t __vec, const int __index)
6124 {
6125 return __aarch64_vset_lane_any (__elem, __vec, __index);
6126 }
6127
6128 __extension__ extern __inline poly8x8_t
6129 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6130 vset_lane_p8 (poly8_t __elem, poly8x8_t __vec, const int __index)
6131 {
6132 return __aarch64_vset_lane_any (__elem, __vec, __index);
6133 }
6134
6135 __extension__ extern __inline poly16x4_t
6136 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6137 vset_lane_p16 (poly16_t __elem, poly16x4_t __vec, const int __index)
6138 {
6139 return __aarch64_vset_lane_any (__elem, __vec, __index);
6140 }
6141
6142 __extension__ extern __inline poly64x1_t
6143 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6144 vset_lane_p64 (poly64_t __elem, poly64x1_t __vec, const int __index)
6145 {
6146 return __aarch64_vset_lane_any (__elem, __vec, __index);
6147 }
6148
6149 __extension__ extern __inline int8x8_t
6150 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6151 vset_lane_s8 (int8_t __elem, int8x8_t __vec, const int __index)
6152 {
6153 return __aarch64_vset_lane_any (__elem, __vec, __index);
6154 }
6155
6156 __extension__ extern __inline int16x4_t
6157 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6158 vset_lane_s16 (int16_t __elem, int16x4_t __vec, const int __index)
6159 {
6160 return __aarch64_vset_lane_any (__elem, __vec, __index);
6161 }
6162
6163 __extension__ extern __inline int32x2_t
6164 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6165 vset_lane_s32 (int32_t __elem, int32x2_t __vec, const int __index)
6166 {
6167 return __aarch64_vset_lane_any (__elem, __vec, __index);
6168 }
6169
6170 __extension__ extern __inline int64x1_t
6171 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6172 vset_lane_s64 (int64_t __elem, int64x1_t __vec, const int __index)
6173 {
6174 return __aarch64_vset_lane_any (__elem, __vec, __index);
6175 }
6176
6177 __extension__ extern __inline uint8x8_t
6178 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6179 vset_lane_u8 (uint8_t __elem, uint8x8_t __vec, const int __index)
6180 {
6181 return __aarch64_vset_lane_any (__elem, __vec, __index);
6182 }
6183
6184 __extension__ extern __inline uint16x4_t
6185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6186 vset_lane_u16 (uint16_t __elem, uint16x4_t __vec, const int __index)
6187 {
6188 return __aarch64_vset_lane_any (__elem, __vec, __index);
6189 }
6190
6191 __extension__ extern __inline uint32x2_t
6192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6193 vset_lane_u32 (uint32_t __elem, uint32x2_t __vec, const int __index)
6194 {
6195 return __aarch64_vset_lane_any (__elem, __vec, __index);
6196 }
6197
6198 __extension__ extern __inline uint64x1_t
6199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6200 vset_lane_u64 (uint64_t __elem, uint64x1_t __vec, const int __index)
6201 {
6202 return __aarch64_vset_lane_any (__elem, __vec, __index);
6203 }
6204
6205 /* vsetq_lane */
6206
6207 __extension__ extern __inline float16x8_t
6208 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6209 vsetq_lane_f16 (float16_t __elem, float16x8_t __vec, const int __index)
6210 {
6211 return __aarch64_vset_lane_any (__elem, __vec, __index);
6212 }
6213
6214 __extension__ extern __inline float32x4_t
6215 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6216 vsetq_lane_f32 (float32_t __elem, float32x4_t __vec, const int __index)
6217 {
6218 return __aarch64_vset_lane_any (__elem, __vec, __index);
6219 }
6220
6221 __extension__ extern __inline float64x2_t
6222 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6223 vsetq_lane_f64 (float64_t __elem, float64x2_t __vec, const int __index)
6224 {
6225 return __aarch64_vset_lane_any (__elem, __vec, __index);
6226 }
6227
6228 __extension__ extern __inline poly8x16_t
6229 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6230 vsetq_lane_p8 (poly8_t __elem, poly8x16_t __vec, const int __index)
6231 {
6232 return __aarch64_vset_lane_any (__elem, __vec, __index);
6233 }
6234
6235 __extension__ extern __inline poly16x8_t
6236 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6237 vsetq_lane_p16 (poly16_t __elem, poly16x8_t __vec, const int __index)
6238 {
6239 return __aarch64_vset_lane_any (__elem, __vec, __index);
6240 }
6241
6242 __extension__ extern __inline poly64x2_t
6243 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6244 vsetq_lane_p64 (poly64_t __elem, poly64x2_t __vec, const int __index)
6245 {
6246 return __aarch64_vset_lane_any (__elem, __vec, __index);
6247 }
6248
6249 __extension__ extern __inline int8x16_t
6250 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6251 vsetq_lane_s8 (int8_t __elem, int8x16_t __vec, const int __index)
6252 {
6253 return __aarch64_vset_lane_any (__elem, __vec, __index);
6254 }
6255
6256 __extension__ extern __inline int16x8_t
6257 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6258 vsetq_lane_s16 (int16_t __elem, int16x8_t __vec, const int __index)
6259 {
6260 return __aarch64_vset_lane_any (__elem, __vec, __index);
6261 }
6262
6263 __extension__ extern __inline int32x4_t
6264 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6265 vsetq_lane_s32 (int32_t __elem, int32x4_t __vec, const int __index)
6266 {
6267 return __aarch64_vset_lane_any (__elem, __vec, __index);
6268 }
6269
6270 __extension__ extern __inline int64x2_t
6271 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6272 vsetq_lane_s64 (int64_t __elem, int64x2_t __vec, const int __index)
6273 {
6274 return __aarch64_vset_lane_any (__elem, __vec, __index);
6275 }
6276
6277 __extension__ extern __inline uint8x16_t
6278 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6279 vsetq_lane_u8 (uint8_t __elem, uint8x16_t __vec, const int __index)
6280 {
6281 return __aarch64_vset_lane_any (__elem, __vec, __index);
6282 }
6283
6284 __extension__ extern __inline uint16x8_t
6285 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6286 vsetq_lane_u16 (uint16_t __elem, uint16x8_t __vec, const int __index)
6287 {
6288 return __aarch64_vset_lane_any (__elem, __vec, __index);
6289 }
6290
6291 __extension__ extern __inline uint32x4_t
6292 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6293 vsetq_lane_u32 (uint32_t __elem, uint32x4_t __vec, const int __index)
6294 {
6295 return __aarch64_vset_lane_any (__elem, __vec, __index);
6296 }
6297
6298 __extension__ extern __inline uint64x2_t
6299 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6300 vsetq_lane_u64 (uint64_t __elem, uint64x2_t __vec, const int __index)
6301 {
6302 return __aarch64_vset_lane_any (__elem, __vec, __index);
6303 }
6304
6305 #define __GET_LOW(__TYPE) \
6306 uint64x2_t tmp = vreinterpretq_u64_##__TYPE (__a); \
6307 uint64x1_t lo = vcreate_u64 (vgetq_lane_u64 (tmp, 0)); \
6308 return vreinterpret_##__TYPE##_u64 (lo);
6309
6310 __extension__ extern __inline float16x4_t
6311 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6312 vget_low_f16 (float16x8_t __a)
6313 {
6314 __GET_LOW (f16);
6315 }
6316
6317 __extension__ extern __inline float32x2_t
6318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6319 vget_low_f32 (float32x4_t __a)
6320 {
6321 __GET_LOW (f32);
6322 }
6323
6324 __extension__ extern __inline float64x1_t
6325 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6326 vget_low_f64 (float64x2_t __a)
6327 {
6328 return (float64x1_t) {vgetq_lane_f64 (__a, 0)};
6329 }
6330
6331 __extension__ extern __inline poly8x8_t
6332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6333 vget_low_p8 (poly8x16_t __a)
6334 {
6335 __GET_LOW (p8);
6336 }
6337
6338 __extension__ extern __inline poly16x4_t
6339 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6340 vget_low_p16 (poly16x8_t __a)
6341 {
6342 __GET_LOW (p16);
6343 }
6344
6345 __extension__ extern __inline poly64x1_t
6346 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6347 vget_low_p64 (poly64x2_t __a)
6348 {
6349 __GET_LOW (p64);
6350 }
6351
6352 __extension__ extern __inline int8x8_t
6353 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6354 vget_low_s8 (int8x16_t __a)
6355 {
6356 __GET_LOW (s8);
6357 }
6358
6359 __extension__ extern __inline int16x4_t
6360 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6361 vget_low_s16 (int16x8_t __a)
6362 {
6363 __GET_LOW (s16);
6364 }
6365
6366 __extension__ extern __inline int32x2_t
6367 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6368 vget_low_s32 (int32x4_t __a)
6369 {
6370 __GET_LOW (s32);
6371 }
6372
6373 __extension__ extern __inline int64x1_t
6374 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6375 vget_low_s64 (int64x2_t __a)
6376 {
6377 __GET_LOW (s64);
6378 }
6379
6380 __extension__ extern __inline uint8x8_t
6381 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6382 vget_low_u8 (uint8x16_t __a)
6383 {
6384 __GET_LOW (u8);
6385 }
6386
6387 __extension__ extern __inline uint16x4_t
6388 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6389 vget_low_u16 (uint16x8_t __a)
6390 {
6391 __GET_LOW (u16);
6392 }
6393
6394 __extension__ extern __inline uint32x2_t
6395 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6396 vget_low_u32 (uint32x4_t __a)
6397 {
6398 __GET_LOW (u32);
6399 }
6400
6401 __extension__ extern __inline uint64x1_t
6402 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6403 vget_low_u64 (uint64x2_t __a)
6404 {
6405 return vcreate_u64 (vgetq_lane_u64 (__a, 0));
6406 }
6407
6408 #undef __GET_LOW
6409
6410 #define __GET_HIGH(__TYPE) \
6411 uint64x2_t tmp = vreinterpretq_u64_##__TYPE (__a); \
6412 uint64x1_t hi = vcreate_u64 (vgetq_lane_u64 (tmp, 1)); \
6413 return vreinterpret_##__TYPE##_u64 (hi);
6414
6415 __extension__ extern __inline float16x4_t
6416 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6417 vget_high_f16 (float16x8_t __a)
6418 {
6419 __GET_HIGH (f16);
6420 }
6421
6422 __extension__ extern __inline float32x2_t
6423 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6424 vget_high_f32 (float32x4_t __a)
6425 {
6426 __GET_HIGH (f32);
6427 }
6428
6429 __extension__ extern __inline float64x1_t
6430 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6431 vget_high_f64 (float64x2_t __a)
6432 {
6433 __GET_HIGH (f64);
6434 }
6435
6436 __extension__ extern __inline poly8x8_t
6437 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6438 vget_high_p8 (poly8x16_t __a)
6439 {
6440 __GET_HIGH (p8);
6441 }
6442
6443 __extension__ extern __inline poly16x4_t
6444 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6445 vget_high_p16 (poly16x8_t __a)
6446 {
6447 __GET_HIGH (p16);
6448 }
6449
6450 __extension__ extern __inline poly64x1_t
6451 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6452 vget_high_p64 (poly64x2_t __a)
6453 {
6454 __GET_HIGH (p64);
6455 }
6456
6457 __extension__ extern __inline int8x8_t
6458 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6459 vget_high_s8 (int8x16_t __a)
6460 {
6461 __GET_HIGH (s8);
6462 }
6463
6464 __extension__ extern __inline int16x4_t
6465 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6466 vget_high_s16 (int16x8_t __a)
6467 {
6468 __GET_HIGH (s16);
6469 }
6470
6471 __extension__ extern __inline int32x2_t
6472 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6473 vget_high_s32 (int32x4_t __a)
6474 {
6475 __GET_HIGH (s32);
6476 }
6477
6478 __extension__ extern __inline int64x1_t
6479 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6480 vget_high_s64 (int64x2_t __a)
6481 {
6482 __GET_HIGH (s64);
6483 }
6484
6485 __extension__ extern __inline uint8x8_t
6486 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6487 vget_high_u8 (uint8x16_t __a)
6488 {
6489 __GET_HIGH (u8);
6490 }
6491
6492 __extension__ extern __inline uint16x4_t
6493 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6494 vget_high_u16 (uint16x8_t __a)
6495 {
6496 __GET_HIGH (u16);
6497 }
6498
6499 __extension__ extern __inline uint32x2_t
6500 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6501 vget_high_u32 (uint32x4_t __a)
6502 {
6503 __GET_HIGH (u32);
6504 }
6505
6506 #undef __GET_HIGH
6507
6508 __extension__ extern __inline uint64x1_t
6509 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6510 vget_high_u64 (uint64x2_t __a)
6511 {
6512 return vcreate_u64 (vgetq_lane_u64 (__a, 1));
6513 }
6514
6515 __extension__ extern __inline int8x16_t
6516 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6517 vcombine_s8 (int8x8_t __a, int8x8_t __b)
6518 {
6519 return (int8x16_t) __builtin_aarch64_combinev8qi (__a, __b);
6520 }
6521
6522 __extension__ extern __inline int16x8_t
6523 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6524 vcombine_s16 (int16x4_t __a, int16x4_t __b)
6525 {
6526 return (int16x8_t) __builtin_aarch64_combinev4hi (__a, __b);
6527 }
6528
6529 __extension__ extern __inline int32x4_t
6530 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6531 vcombine_s32 (int32x2_t __a, int32x2_t __b)
6532 {
6533 return (int32x4_t) __builtin_aarch64_combinev2si (__a, __b);
6534 }
6535
6536 __extension__ extern __inline int64x2_t
6537 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6538 vcombine_s64 (int64x1_t __a, int64x1_t __b)
6539 {
6540 return __builtin_aarch64_combinedi (__a[0], __b[0]);
6541 }
6542
6543 __extension__ extern __inline float16x8_t
6544 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6545 vcombine_f16 (float16x4_t __a, float16x4_t __b)
6546 {
6547 return __builtin_aarch64_combinev4hf (__a, __b);
6548 }
6549
6550 __extension__ extern __inline float32x4_t
6551 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6552 vcombine_f32 (float32x2_t __a, float32x2_t __b)
6553 {
6554 return (float32x4_t) __builtin_aarch64_combinev2sf (__a, __b);
6555 }
6556
6557 __extension__ extern __inline uint8x16_t
6558 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6559 vcombine_u8 (uint8x8_t __a, uint8x8_t __b)
6560 {
6561 return (uint8x16_t) __builtin_aarch64_combinev8qi ((int8x8_t) __a,
6562 (int8x8_t) __b);
6563 }
6564
6565 __extension__ extern __inline uint16x8_t
6566 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6567 vcombine_u16 (uint16x4_t __a, uint16x4_t __b)
6568 {
6569 return (uint16x8_t) __builtin_aarch64_combinev4hi ((int16x4_t) __a,
6570 (int16x4_t) __b);
6571 }
6572
6573 __extension__ extern __inline uint32x4_t
6574 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6575 vcombine_u32 (uint32x2_t __a, uint32x2_t __b)
6576 {
6577 return (uint32x4_t) __builtin_aarch64_combinev2si ((int32x2_t) __a,
6578 (int32x2_t) __b);
6579 }
6580
6581 __extension__ extern __inline uint64x2_t
6582 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6583 vcombine_u64 (uint64x1_t __a, uint64x1_t __b)
6584 {
6585 return (uint64x2_t) __builtin_aarch64_combinedi (__a[0], __b[0]);
6586 }
6587
6588 __extension__ extern __inline float64x2_t
6589 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6590 vcombine_f64 (float64x1_t __a, float64x1_t __b)
6591 {
6592 return __builtin_aarch64_combinedf (__a[0], __b[0]);
6593 }
6594
6595 __extension__ extern __inline poly8x16_t
6596 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6597 vcombine_p8 (poly8x8_t __a, poly8x8_t __b)
6598 {
6599 return (poly8x16_t) __builtin_aarch64_combinev8qi ((int8x8_t) __a,
6600 (int8x8_t) __b);
6601 }
6602
6603 __extension__ extern __inline poly16x8_t
6604 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6605 vcombine_p16 (poly16x4_t __a, poly16x4_t __b)
6606 {
6607 return (poly16x8_t) __builtin_aarch64_combinev4hi ((int16x4_t) __a,
6608 (int16x4_t) __b);
6609 }
6610
6611 __extension__ extern __inline poly64x2_t
6612 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6613 vcombine_p64 (poly64x1_t __a, poly64x1_t __b)
6614 {
6615 return (poly64x2_t) __builtin_aarch64_combinedi_ppp (__a[0], __b[0]);
6616 }
6617
6618 /* Start of temporary inline asm implementations. */
6619
6620 __extension__ extern __inline int8x8_t
6621 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6622 vaba_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
6623 {
6624 return __builtin_aarch64_sabav8qi (__a, __b, __c);
6625 }
6626
6627 __extension__ extern __inline int16x4_t
6628 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6629 vaba_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
6630 {
6631 return __builtin_aarch64_sabav4hi (__a, __b, __c);
6632 }
6633
6634 __extension__ extern __inline int32x2_t
6635 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6636 vaba_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
6637 {
6638 return __builtin_aarch64_sabav2si (__a, __b, __c);
6639 }
6640
6641 __extension__ extern __inline uint8x8_t
6642 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6643 vaba_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
6644 {
6645 return __builtin_aarch64_uabav8qi_uuuu (__a, __b, __c);
6646 }
6647
6648 __extension__ extern __inline uint16x4_t
6649 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6650 vaba_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
6651 {
6652 return __builtin_aarch64_uabav4hi_uuuu (__a, __b, __c);
6653 }
6654
6655 __extension__ extern __inline uint32x2_t
6656 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6657 vaba_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
6658 {
6659 return __builtin_aarch64_uabav2si_uuuu (__a, __b, __c);
6660 }
6661
6662 __extension__ extern __inline int16x8_t
6663 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6664 vabal_high_s8 (int16x8_t __a, int8x16_t __b, int8x16_t __c)
6665 {
6666 int16x8_t __result;
6667 __asm__ ("sabal2 %0.8h,%2.16b,%3.16b"
6668 : "=w"(__result)
6669 : "0"(__a), "w"(__b), "w"(__c)
6670 : /* No clobbers */);
6671 return __result;
6672 }
6673
6674 __extension__ extern __inline int32x4_t
6675 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6676 vabal_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
6677 {
6678 int32x4_t __result;
6679 __asm__ ("sabal2 %0.4s,%2.8h,%3.8h"
6680 : "=w"(__result)
6681 : "0"(__a), "w"(__b), "w"(__c)
6682 : /* No clobbers */);
6683 return __result;
6684 }
6685
6686 __extension__ extern __inline int64x2_t
6687 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6688 vabal_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
6689 {
6690 int64x2_t __result;
6691 __asm__ ("sabal2 %0.2d,%2.4s,%3.4s"
6692 : "=w"(__result)
6693 : "0"(__a), "w"(__b), "w"(__c)
6694 : /* No clobbers */);
6695 return __result;
6696 }
6697
6698 __extension__ extern __inline uint16x8_t
6699 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6700 vabal_high_u8 (uint16x8_t __a, uint8x16_t __b, uint8x16_t __c)
6701 {
6702 uint16x8_t __result;
6703 __asm__ ("uabal2 %0.8h,%2.16b,%3.16b"
6704 : "=w"(__result)
6705 : "0"(__a), "w"(__b), "w"(__c)
6706 : /* No clobbers */);
6707 return __result;
6708 }
6709
6710 __extension__ extern __inline uint32x4_t
6711 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6712 vabal_high_u16 (uint32x4_t __a, uint16x8_t __b, uint16x8_t __c)
6713 {
6714 uint32x4_t __result;
6715 __asm__ ("uabal2 %0.4s,%2.8h,%3.8h"
6716 : "=w"(__result)
6717 : "0"(__a), "w"(__b), "w"(__c)
6718 : /* No clobbers */);
6719 return __result;
6720 }
6721
6722 __extension__ extern __inline uint64x2_t
6723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6724 vabal_high_u32 (uint64x2_t __a, uint32x4_t __b, uint32x4_t __c)
6725 {
6726 uint64x2_t __result;
6727 __asm__ ("uabal2 %0.2d,%2.4s,%3.4s"
6728 : "=w"(__result)
6729 : "0"(__a), "w"(__b), "w"(__c)
6730 : /* No clobbers */);
6731 return __result;
6732 }
6733
6734 __extension__ extern __inline int16x8_t
6735 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6736 vabal_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
6737 {
6738 int16x8_t __result;
6739 __asm__ ("sabal %0.8h,%2.8b,%3.8b"
6740 : "=w"(__result)
6741 : "0"(__a), "w"(__b), "w"(__c)
6742 : /* No clobbers */);
6743 return __result;
6744 }
6745
6746 __extension__ extern __inline int32x4_t
6747 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6748 vabal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
6749 {
6750 int32x4_t __result;
6751 __asm__ ("sabal %0.4s,%2.4h,%3.4h"
6752 : "=w"(__result)
6753 : "0"(__a), "w"(__b), "w"(__c)
6754 : /* No clobbers */);
6755 return __result;
6756 }
6757
6758 __extension__ extern __inline int64x2_t
6759 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6760 vabal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
6761 {
6762 int64x2_t __result;
6763 __asm__ ("sabal %0.2d,%2.2s,%3.2s"
6764 : "=w"(__result)
6765 : "0"(__a), "w"(__b), "w"(__c)
6766 : /* No clobbers */);
6767 return __result;
6768 }
6769
6770 __extension__ extern __inline uint16x8_t
6771 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6772 vabal_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
6773 {
6774 uint16x8_t __result;
6775 __asm__ ("uabal %0.8h,%2.8b,%3.8b"
6776 : "=w"(__result)
6777 : "0"(__a), "w"(__b), "w"(__c)
6778 : /* No clobbers */);
6779 return __result;
6780 }
6781
6782 __extension__ extern __inline uint32x4_t
6783 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6784 vabal_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
6785 {
6786 uint32x4_t __result;
6787 __asm__ ("uabal %0.4s,%2.4h,%3.4h"
6788 : "=w"(__result)
6789 : "0"(__a), "w"(__b), "w"(__c)
6790 : /* No clobbers */);
6791 return __result;
6792 }
6793
6794 __extension__ extern __inline uint64x2_t
6795 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6796 vabal_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
6797 {
6798 uint64x2_t __result;
6799 __asm__ ("uabal %0.2d,%2.2s,%3.2s"
6800 : "=w"(__result)
6801 : "0"(__a), "w"(__b), "w"(__c)
6802 : /* No clobbers */);
6803 return __result;
6804 }
6805
6806 __extension__ extern __inline int8x16_t
6807 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6808 vabaq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
6809 {
6810 return __builtin_aarch64_sabav16qi (__a, __b, __c);
6811 }
6812
6813 __extension__ extern __inline int16x8_t
6814 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6815 vabaq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
6816 {
6817 return __builtin_aarch64_sabav8hi (__a, __b, __c);
6818 }
6819
6820 __extension__ extern __inline int32x4_t
6821 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6822 vabaq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
6823 {
6824 return __builtin_aarch64_sabav4si (__a, __b, __c);
6825 }
6826
6827 __extension__ extern __inline uint8x16_t
6828 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6829 vabaq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
6830 {
6831 return __builtin_aarch64_uabav16qi_uuuu (__a, __b, __c);
6832 }
6833
6834 __extension__ extern __inline uint16x8_t
6835 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6836 vabaq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
6837 {
6838 return __builtin_aarch64_uabav8hi_uuuu (__a, __b, __c);
6839 }
6840
6841 __extension__ extern __inline uint32x4_t
6842 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6843 vabaq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
6844 {
6845 return __builtin_aarch64_uabav4si_uuuu (__a, __b, __c);
6846 }
6847
6848 __extension__ extern __inline int8x8_t
6849 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6850 vabd_s8 (int8x8_t __a, int8x8_t __b)
6851 {
6852 return __builtin_aarch64_sabdv8qi (__a, __b);
6853 }
6854
6855 __extension__ extern __inline int16x4_t
6856 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6857 vabd_s16 (int16x4_t __a, int16x4_t __b)
6858 {
6859 return __builtin_aarch64_sabdv4hi (__a, __b);
6860 }
6861
6862 __extension__ extern __inline int32x2_t
6863 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6864 vabd_s32 (int32x2_t __a, int32x2_t __b)
6865 {
6866 return __builtin_aarch64_sabdv2si (__a, __b);
6867 }
6868
6869 __extension__ extern __inline uint8x8_t
6870 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6871 vabd_u8 (uint8x8_t __a, uint8x8_t __b)
6872 {
6873 return __builtin_aarch64_uabdv8qi_uuu (__a, __b);
6874 }
6875
6876 __extension__ extern __inline uint16x4_t
6877 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6878 vabd_u16 (uint16x4_t __a, uint16x4_t __b)
6879 {
6880 return __builtin_aarch64_uabdv4hi_uuu (__a, __b);
6881 }
6882
6883 __extension__ extern __inline uint32x2_t
6884 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6885 vabd_u32 (uint32x2_t __a, uint32x2_t __b)
6886 {
6887 return __builtin_aarch64_uabdv2si_uuu (__a, __b);
6888 }
6889
6890 __extension__ extern __inline int16x8_t
6891 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6892 vabdl_high_s8 (int8x16_t __a, int8x16_t __b)
6893 {
6894 int16x8_t __result;
6895 __asm__ ("sabdl2 %0.8h,%1.16b,%2.16b"
6896 : "=w"(__result)
6897 : "w"(__a), "w"(__b)
6898 : /* No clobbers */);
6899 return __result;
6900 }
6901
6902 __extension__ extern __inline int32x4_t
6903 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6904 vabdl_high_s16 (int16x8_t __a, int16x8_t __b)
6905 {
6906 int32x4_t __result;
6907 __asm__ ("sabdl2 %0.4s,%1.8h,%2.8h"
6908 : "=w"(__result)
6909 : "w"(__a), "w"(__b)
6910 : /* No clobbers */);
6911 return __result;
6912 }
6913
6914 __extension__ extern __inline int64x2_t
6915 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6916 vabdl_high_s32 (int32x4_t __a, int32x4_t __b)
6917 {
6918 int64x2_t __result;
6919 __asm__ ("sabdl2 %0.2d,%1.4s,%2.4s"
6920 : "=w"(__result)
6921 : "w"(__a), "w"(__b)
6922 : /* No clobbers */);
6923 return __result;
6924 }
6925
6926 __extension__ extern __inline uint16x8_t
6927 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6928 vabdl_high_u8 (uint8x16_t __a, uint8x16_t __b)
6929 {
6930 uint16x8_t __result;
6931 __asm__ ("uabdl2 %0.8h,%1.16b,%2.16b"
6932 : "=w"(__result)
6933 : "w"(__a), "w"(__b)
6934 : /* No clobbers */);
6935 return __result;
6936 }
6937
6938 __extension__ extern __inline uint32x4_t
6939 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6940 vabdl_high_u16 (uint16x8_t __a, uint16x8_t __b)
6941 {
6942 uint32x4_t __result;
6943 __asm__ ("uabdl2 %0.4s,%1.8h,%2.8h"
6944 : "=w"(__result)
6945 : "w"(__a), "w"(__b)
6946 : /* No clobbers */);
6947 return __result;
6948 }
6949
6950 __extension__ extern __inline uint64x2_t
6951 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6952 vabdl_high_u32 (uint32x4_t __a, uint32x4_t __b)
6953 {
6954 uint64x2_t __result;
6955 __asm__ ("uabdl2 %0.2d,%1.4s,%2.4s"
6956 : "=w"(__result)
6957 : "w"(__a), "w"(__b)
6958 : /* No clobbers */);
6959 return __result;
6960 }
6961
6962 __extension__ extern __inline int16x8_t
6963 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6964 vabdl_s8 (int8x8_t __a, int8x8_t __b)
6965 {
6966 int16x8_t __result;
6967 __asm__ ("sabdl %0.8h, %1.8b, %2.8b"
6968 : "=w"(__result)
6969 : "w"(__a), "w"(__b)
6970 : /* No clobbers */);
6971 return __result;
6972 }
6973
6974 __extension__ extern __inline int32x4_t
6975 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6976 vabdl_s16 (int16x4_t __a, int16x4_t __b)
6977 {
6978 int32x4_t __result;
6979 __asm__ ("sabdl %0.4s, %1.4h, %2.4h"
6980 : "=w"(__result)
6981 : "w"(__a), "w"(__b)
6982 : /* No clobbers */);
6983 return __result;
6984 }
6985
6986 __extension__ extern __inline int64x2_t
6987 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
6988 vabdl_s32 (int32x2_t __a, int32x2_t __b)
6989 {
6990 int64x2_t __result;
6991 __asm__ ("sabdl %0.2d, %1.2s, %2.2s"
6992 : "=w"(__result)
6993 : "w"(__a), "w"(__b)
6994 : /* No clobbers */);
6995 return __result;
6996 }
6997
6998 __extension__ extern __inline uint16x8_t
6999 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7000 vabdl_u8 (uint8x8_t __a, uint8x8_t __b)
7001 {
7002 uint16x8_t __result;
7003 __asm__ ("uabdl %0.8h, %1.8b, %2.8b"
7004 : "=w"(__result)
7005 : "w"(__a), "w"(__b)
7006 : /* No clobbers */);
7007 return __result;
7008 }
7009
7010 __extension__ extern __inline uint32x4_t
7011 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7012 vabdl_u16 (uint16x4_t __a, uint16x4_t __b)
7013 {
7014 uint32x4_t __result;
7015 __asm__ ("uabdl %0.4s, %1.4h, %2.4h"
7016 : "=w"(__result)
7017 : "w"(__a), "w"(__b)
7018 : /* No clobbers */);
7019 return __result;
7020 }
7021
7022 __extension__ extern __inline uint64x2_t
7023 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7024 vabdl_u32 (uint32x2_t __a, uint32x2_t __b)
7025 {
7026 uint64x2_t __result;
7027 __asm__ ("uabdl %0.2d, %1.2s, %2.2s"
7028 : "=w"(__result)
7029 : "w"(__a), "w"(__b)
7030 : /* No clobbers */);
7031 return __result;
7032 }
7033
7034 __extension__ extern __inline int8x16_t
7035 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7036 vabdq_s8 (int8x16_t __a, int8x16_t __b)
7037 {
7038 return __builtin_aarch64_sabdv16qi (__a, __b);
7039 }
7040
7041 __extension__ extern __inline int16x8_t
7042 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7043 vabdq_s16 (int16x8_t __a, int16x8_t __b)
7044 {
7045 return __builtin_aarch64_sabdv8hi (__a, __b);
7046 }
7047
7048 __extension__ extern __inline int32x4_t
7049 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7050 vabdq_s32 (int32x4_t __a, int32x4_t __b)
7051 {
7052 return __builtin_aarch64_sabdv4si (__a, __b);
7053 }
7054
7055 __extension__ extern __inline uint8x16_t
7056 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7057 vabdq_u8 (uint8x16_t __a, uint8x16_t __b)
7058 {
7059 return __builtin_aarch64_uabdv16qi_uuu (__a, __b);
7060 }
7061
7062 __extension__ extern __inline uint16x8_t
7063 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7064 vabdq_u16 (uint16x8_t __a, uint16x8_t __b)
7065 {
7066 return __builtin_aarch64_uabdv8hi_uuu (__a, __b);
7067 }
7068
7069 __extension__ extern __inline uint32x4_t
7070 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7071 vabdq_u32 (uint32x4_t __a, uint32x4_t __b)
7072 {
7073 return __builtin_aarch64_uabdv4si_uuu (__a, __b);
7074 }
7075
7076 __extension__ extern __inline int16_t
7077 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7078 vaddlv_s8 (int8x8_t __a)
7079 {
7080 int16_t __result;
7081 __asm__ ("saddlv %h0,%1.8b"
7082 : "=w"(__result)
7083 : "w"(__a)
7084 : /* No clobbers */);
7085 return __result;
7086 }
7087
7088 __extension__ extern __inline int32_t
7089 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7090 vaddlv_s16 (int16x4_t __a)
7091 {
7092 int32_t __result;
7093 __asm__ ("saddlv %s0,%1.4h"
7094 : "=w"(__result)
7095 : "w"(__a)
7096 : /* No clobbers */);
7097 return __result;
7098 }
7099
7100 __extension__ extern __inline uint16_t
7101 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7102 vaddlv_u8 (uint8x8_t __a)
7103 {
7104 uint16_t __result;
7105 __asm__ ("uaddlv %h0,%1.8b"
7106 : "=w"(__result)
7107 : "w"(__a)
7108 : /* No clobbers */);
7109 return __result;
7110 }
7111
7112 __extension__ extern __inline uint32_t
7113 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7114 vaddlv_u16 (uint16x4_t __a)
7115 {
7116 uint32_t __result;
7117 __asm__ ("uaddlv %s0,%1.4h"
7118 : "=w"(__result)
7119 : "w"(__a)
7120 : /* No clobbers */);
7121 return __result;
7122 }
7123
7124 __extension__ extern __inline int16_t
7125 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7126 vaddlvq_s8 (int8x16_t __a)
7127 {
7128 int16_t __result;
7129 __asm__ ("saddlv %h0,%1.16b"
7130 : "=w"(__result)
7131 : "w"(__a)
7132 : /* No clobbers */);
7133 return __result;
7134 }
7135
7136 __extension__ extern __inline int32_t
7137 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7138 vaddlvq_s16 (int16x8_t __a)
7139 {
7140 int32_t __result;
7141 __asm__ ("saddlv %s0,%1.8h"
7142 : "=w"(__result)
7143 : "w"(__a)
7144 : /* No clobbers */);
7145 return __result;
7146 }
7147
7148 __extension__ extern __inline int64_t
7149 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7150 vaddlvq_s32 (int32x4_t __a)
7151 {
7152 int64_t __result;
7153 __asm__ ("saddlv %d0,%1.4s"
7154 : "=w"(__result)
7155 : "w"(__a)
7156 : /* No clobbers */);
7157 return __result;
7158 }
7159
7160 __extension__ extern __inline uint16_t
7161 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7162 vaddlvq_u8 (uint8x16_t __a)
7163 {
7164 uint16_t __result;
7165 __asm__ ("uaddlv %h0,%1.16b"
7166 : "=w"(__result)
7167 : "w"(__a)
7168 : /* No clobbers */);
7169 return __result;
7170 }
7171
7172 __extension__ extern __inline uint32_t
7173 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7174 vaddlvq_u16 (uint16x8_t __a)
7175 {
7176 uint32_t __result;
7177 __asm__ ("uaddlv %s0,%1.8h"
7178 : "=w"(__result)
7179 : "w"(__a)
7180 : /* No clobbers */);
7181 return __result;
7182 }
7183
7184 __extension__ extern __inline uint64_t
7185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7186 vaddlvq_u32 (uint32x4_t __a)
7187 {
7188 uint64_t __result;
7189 __asm__ ("uaddlv %d0,%1.4s"
7190 : "=w"(__result)
7191 : "w"(__a)
7192 : /* No clobbers */);
7193 return __result;
7194 }
7195
7196 __extension__ extern __inline float32x2_t
7197 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7198 vcvtx_f32_f64 (float64x2_t __a)
7199 {
7200 float32x2_t __result;
7201 __asm__ ("fcvtxn %0.2s,%1.2d"
7202 : "=w"(__result)
7203 : "w"(__a)
7204 : /* No clobbers */);
7205 return __result;
7206 }
7207
7208 __extension__ extern __inline float32x4_t
7209 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7210 vcvtx_high_f32_f64 (float32x2_t __a, float64x2_t __b)
7211 {
7212 float32x4_t __result;
7213 __asm__ ("fcvtxn2 %0.4s,%1.2d"
7214 : "=w"(__result)
7215 : "w" (__b), "0"(__a)
7216 : /* No clobbers */);
7217 return __result;
7218 }
7219
7220 __extension__ extern __inline float32_t
7221 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7222 vcvtxd_f32_f64 (float64_t __a)
7223 {
7224 float32_t __result;
7225 __asm__ ("fcvtxn %s0,%d1"
7226 : "=w"(__result)
7227 : "w"(__a)
7228 : /* No clobbers */);
7229 return __result;
7230 }
7231
7232 __extension__ extern __inline float32x2_t
7233 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7234 vmla_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
7235 {
7236 float32x2_t __result;
7237 float32x2_t __t1;
7238 __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fadd %0.2s, %0.2s, %1.2s"
7239 : "=w"(__result), "=w"(__t1)
7240 : "0"(__a), "w"(__b), "w"(__c)
7241 : /* No clobbers */);
7242 return __result;
7243 }
7244
7245 __extension__ extern __inline int16x4_t
7246 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7247 vmla_n_s16 (int16x4_t __a, int16x4_t __b, int16_t __c)
7248 {
7249 int16x4_t __result;
7250 __asm__ ("mla %0.4h,%2.4h,%3.h[0]"
7251 : "=w"(__result)
7252 : "0"(__a), "w"(__b), "x"(__c)
7253 : /* No clobbers */);
7254 return __result;
7255 }
7256
7257 __extension__ extern __inline int32x2_t
7258 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7259 vmla_n_s32 (int32x2_t __a, int32x2_t __b, int32_t __c)
7260 {
7261 int32x2_t __result;
7262 __asm__ ("mla %0.2s,%2.2s,%3.s[0]"
7263 : "=w"(__result)
7264 : "0"(__a), "w"(__b), "w"(__c)
7265 : /* No clobbers */);
7266 return __result;
7267 }
7268
7269 __extension__ extern __inline uint16x4_t
7270 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7271 vmla_n_u16 (uint16x4_t __a, uint16x4_t __b, uint16_t __c)
7272 {
7273 uint16x4_t __result;
7274 __asm__ ("mla %0.4h,%2.4h,%3.h[0]"
7275 : "=w"(__result)
7276 : "0"(__a), "w"(__b), "x"(__c)
7277 : /* No clobbers */);
7278 return __result;
7279 }
7280
7281 __extension__ extern __inline uint32x2_t
7282 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7283 vmla_n_u32 (uint32x2_t __a, uint32x2_t __b, uint32_t __c)
7284 {
7285 uint32x2_t __result;
7286 __asm__ ("mla %0.2s,%2.2s,%3.s[0]"
7287 : "=w"(__result)
7288 : "0"(__a), "w"(__b), "w"(__c)
7289 : /* No clobbers */);
7290 return __result;
7291 }
7292
7293 __extension__ extern __inline int8x8_t
7294 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7295 vmla_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
7296 {
7297 return __builtin_aarch64_mlav8qi (__a, __b, __c);
7298 }
7299
7300 __extension__ extern __inline int16x4_t
7301 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7302 vmla_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
7303 {
7304 return __builtin_aarch64_mlav4hi (__a, __b, __c);
7305 }
7306
7307 __extension__ extern __inline int32x2_t
7308 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7309 vmla_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
7310 {
7311 return __builtin_aarch64_mlav2si (__a, __b, __c);
7312 }
7313
7314 __extension__ extern __inline uint8x8_t
7315 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7316 vmla_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
7317 {
7318 return (uint8x8_t) __builtin_aarch64_mlav8qi ((int8x8_t) __a,
7319 (int8x8_t) __b,
7320 (int8x8_t) __c);
7321 }
7322
7323 __extension__ extern __inline uint16x4_t
7324 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7325 vmla_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
7326 {
7327 return (uint16x4_t) __builtin_aarch64_mlav4hi ((int16x4_t) __a,
7328 (int16x4_t) __b,
7329 (int16x4_t) __c);
7330 }
7331
7332 __extension__ extern __inline uint32x2_t
7333 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7334 vmla_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
7335 {
7336 return (uint32x2_t) __builtin_aarch64_mlav2si ((int32x2_t) __a,
7337 (int32x2_t) __b,
7338 (int32x2_t) __c);
7339 }
7340
7341 #define vmlal_high_lane_s16(a, b, c, d) \
7342 __extension__ \
7343 ({ \
7344 int16x4_t c_ = (c); \
7345 int16x8_t b_ = (b); \
7346 int32x4_t a_ = (a); \
7347 int32x4_t result; \
7348 __asm__ ("smlal2 %0.4s, %2.8h, %3.h[%4]" \
7349 : "=w"(result) \
7350 : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
7351 : /* No clobbers */); \
7352 result; \
7353 })
7354
7355 #define vmlal_high_lane_s32(a, b, c, d) \
7356 __extension__ \
7357 ({ \
7358 int32x2_t c_ = (c); \
7359 int32x4_t b_ = (b); \
7360 int64x2_t a_ = (a); \
7361 int64x2_t result; \
7362 __asm__ ("smlal2 %0.2d, %2.4s, %3.s[%4]" \
7363 : "=w"(result) \
7364 : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
7365 : /* No clobbers */); \
7366 result; \
7367 })
7368
7369 #define vmlal_high_lane_u16(a, b, c, d) \
7370 __extension__ \
7371 ({ \
7372 uint16x4_t c_ = (c); \
7373 uint16x8_t b_ = (b); \
7374 uint32x4_t a_ = (a); \
7375 uint32x4_t result; \
7376 __asm__ ("umlal2 %0.4s, %2.8h, %3.h[%4]" \
7377 : "=w"(result) \
7378 : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
7379 : /* No clobbers */); \
7380 result; \
7381 })
7382
7383 #define vmlal_high_lane_u32(a, b, c, d) \
7384 __extension__ \
7385 ({ \
7386 uint32x2_t c_ = (c); \
7387 uint32x4_t b_ = (b); \
7388 uint64x2_t a_ = (a); \
7389 uint64x2_t result; \
7390 __asm__ ("umlal2 %0.2d, %2.4s, %3.s[%4]" \
7391 : "=w"(result) \
7392 : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
7393 : /* No clobbers */); \
7394 result; \
7395 })
7396
7397 #define vmlal_high_laneq_s16(a, b, c, d) \
7398 __extension__ \
7399 ({ \
7400 int16x8_t c_ = (c); \
7401 int16x8_t b_ = (b); \
7402 int32x4_t a_ = (a); \
7403 int32x4_t result; \
7404 __asm__ ("smlal2 %0.4s, %2.8h, %3.h[%4]" \
7405 : "=w"(result) \
7406 : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
7407 : /* No clobbers */); \
7408 result; \
7409 })
7410
7411 #define vmlal_high_laneq_s32(a, b, c, d) \
7412 __extension__ \
7413 ({ \
7414 int32x4_t c_ = (c); \
7415 int32x4_t b_ = (b); \
7416 int64x2_t a_ = (a); \
7417 int64x2_t result; \
7418 __asm__ ("smlal2 %0.2d, %2.4s, %3.s[%4]" \
7419 : "=w"(result) \
7420 : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
7421 : /* No clobbers */); \
7422 result; \
7423 })
7424
7425 #define vmlal_high_laneq_u16(a, b, c, d) \
7426 __extension__ \
7427 ({ \
7428 uint16x8_t c_ = (c); \
7429 uint16x8_t b_ = (b); \
7430 uint32x4_t a_ = (a); \
7431 uint32x4_t result; \
7432 __asm__ ("umlal2 %0.4s, %2.8h, %3.h[%4]" \
7433 : "=w"(result) \
7434 : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
7435 : /* No clobbers */); \
7436 result; \
7437 })
7438
7439 #define vmlal_high_laneq_u32(a, b, c, d) \
7440 __extension__ \
7441 ({ \
7442 uint32x4_t c_ = (c); \
7443 uint32x4_t b_ = (b); \
7444 uint64x2_t a_ = (a); \
7445 uint64x2_t result; \
7446 __asm__ ("umlal2 %0.2d, %2.4s, %3.s[%4]" \
7447 : "=w"(result) \
7448 : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
7449 : /* No clobbers */); \
7450 result; \
7451 })
7452
7453 __extension__ extern __inline int32x4_t
7454 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7455 vmlal_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
7456 {
7457 int32x4_t __result;
7458 __asm__ ("smlal2 %0.4s,%2.8h,%3.h[0]"
7459 : "=w"(__result)
7460 : "0"(__a), "w"(__b), "x"(__c)
7461 : /* No clobbers */);
7462 return __result;
7463 }
7464
7465 __extension__ extern __inline int64x2_t
7466 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7467 vmlal_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
7468 {
7469 int64x2_t __result;
7470 __asm__ ("smlal2 %0.2d,%2.4s,%3.s[0]"
7471 : "=w"(__result)
7472 : "0"(__a), "w"(__b), "w"(__c)
7473 : /* No clobbers */);
7474 return __result;
7475 }
7476
7477 __extension__ extern __inline uint32x4_t
7478 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7479 vmlal_high_n_u16 (uint32x4_t __a, uint16x8_t __b, uint16_t __c)
7480 {
7481 uint32x4_t __result;
7482 __asm__ ("umlal2 %0.4s,%2.8h,%3.h[0]"
7483 : "=w"(__result)
7484 : "0"(__a), "w"(__b), "x"(__c)
7485 : /* No clobbers */);
7486 return __result;
7487 }
7488
7489 __extension__ extern __inline uint64x2_t
7490 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7491 vmlal_high_n_u32 (uint64x2_t __a, uint32x4_t __b, uint32_t __c)
7492 {
7493 uint64x2_t __result;
7494 __asm__ ("umlal2 %0.2d,%2.4s,%3.s[0]"
7495 : "=w"(__result)
7496 : "0"(__a), "w"(__b), "w"(__c)
7497 : /* No clobbers */);
7498 return __result;
7499 }
7500
7501 __extension__ extern __inline int16x8_t
7502 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7503 vmlal_high_s8 (int16x8_t __a, int8x16_t __b, int8x16_t __c)
7504 {
7505 int16x8_t __result;
7506 __asm__ ("smlal2 %0.8h,%2.16b,%3.16b"
7507 : "=w"(__result)
7508 : "0"(__a), "w"(__b), "w"(__c)
7509 : /* No clobbers */);
7510 return __result;
7511 }
7512
7513 __extension__ extern __inline int32x4_t
7514 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7515 vmlal_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
7516 {
7517 int32x4_t __result;
7518 __asm__ ("smlal2 %0.4s,%2.8h,%3.8h"
7519 : "=w"(__result)
7520 : "0"(__a), "w"(__b), "w"(__c)
7521 : /* No clobbers */);
7522 return __result;
7523 }
7524
7525 __extension__ extern __inline int64x2_t
7526 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7527 vmlal_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
7528 {
7529 int64x2_t __result;
7530 __asm__ ("smlal2 %0.2d,%2.4s,%3.4s"
7531 : "=w"(__result)
7532 : "0"(__a), "w"(__b), "w"(__c)
7533 : /* No clobbers */);
7534 return __result;
7535 }
7536
7537 __extension__ extern __inline uint16x8_t
7538 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7539 vmlal_high_u8 (uint16x8_t __a, uint8x16_t __b, uint8x16_t __c)
7540 {
7541 uint16x8_t __result;
7542 __asm__ ("umlal2 %0.8h,%2.16b,%3.16b"
7543 : "=w"(__result)
7544 : "0"(__a), "w"(__b), "w"(__c)
7545 : /* No clobbers */);
7546 return __result;
7547 }
7548
7549 __extension__ extern __inline uint32x4_t
7550 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7551 vmlal_high_u16 (uint32x4_t __a, uint16x8_t __b, uint16x8_t __c)
7552 {
7553 uint32x4_t __result;
7554 __asm__ ("umlal2 %0.4s,%2.8h,%3.8h"
7555 : "=w"(__result)
7556 : "0"(__a), "w"(__b), "w"(__c)
7557 : /* No clobbers */);
7558 return __result;
7559 }
7560
7561 __extension__ extern __inline uint64x2_t
7562 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7563 vmlal_high_u32 (uint64x2_t __a, uint32x4_t __b, uint32x4_t __c)
7564 {
7565 uint64x2_t __result;
7566 __asm__ ("umlal2 %0.2d,%2.4s,%3.4s"
7567 : "=w"(__result)
7568 : "0"(__a), "w"(__b), "w"(__c)
7569 : /* No clobbers */);
7570 return __result;
7571 }
7572
7573 __extension__ extern __inline int32x4_t
7574 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7575 vmlal_lane_s16 (int32x4_t __acc, int16x4_t __a, int16x4_t __b, const int __c)
7576 {
7577 return __builtin_aarch64_vec_smlal_lane_v4hi (__acc, __a, __b, __c);
7578 }
7579
7580 __extension__ extern __inline int64x2_t
7581 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7582 vmlal_lane_s32 (int64x2_t __acc, int32x2_t __a, int32x2_t __b, const int __c)
7583 {
7584 return __builtin_aarch64_vec_smlal_lane_v2si (__acc, __a, __b, __c);
7585 }
7586
7587 __extension__ extern __inline uint32x4_t
7588 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7589 vmlal_lane_u16 (uint32x4_t __acc, uint16x4_t __a, uint16x4_t __b, const int __c)
7590 {
7591 return __builtin_aarch64_vec_umlal_lane_v4hi_uuuus (__acc, __a, __b, __c);
7592 }
7593
7594 __extension__ extern __inline uint64x2_t
7595 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7596 vmlal_lane_u32 (uint64x2_t __acc, uint32x2_t __a, uint32x2_t __b, const int __c)
7597 {
7598 return __builtin_aarch64_vec_umlal_lane_v2si_uuuus (__acc, __a, __b, __c);
7599 }
7600
7601 __extension__ extern __inline int32x4_t
7602 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7603 vmlal_laneq_s16 (int32x4_t __acc, int16x4_t __a, int16x8_t __b, const int __c)
7604 {
7605 return __builtin_aarch64_vec_smlal_laneq_v4hi (__acc, __a, __b, __c);
7606 }
7607
7608 __extension__ extern __inline int64x2_t
7609 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7610 vmlal_laneq_s32 (int64x2_t __acc, int32x2_t __a, int32x4_t __b, const int __c)
7611 {
7612 return __builtin_aarch64_vec_smlal_laneq_v2si (__acc, __a, __b, __c);
7613 }
7614
7615 __extension__ extern __inline uint32x4_t
7616 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7617 vmlal_laneq_u16 (uint32x4_t __acc, uint16x4_t __a, uint16x8_t __b, const int __c)
7618 {
7619 return __builtin_aarch64_vec_umlal_laneq_v4hi_uuuus (__acc, __a, __b, __c);
7620 }
7621
7622 __extension__ extern __inline uint64x2_t
7623 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7624 vmlal_laneq_u32 (uint64x2_t __acc, uint32x2_t __a, uint32x4_t __b, const int __c)
7625 {
7626 return __builtin_aarch64_vec_umlal_laneq_v2si_uuuus (__acc, __a, __b, __c);
7627 }
7628
7629 __extension__ extern __inline int32x4_t
7630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7631 vmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
7632 {
7633 int32x4_t __result;
7634 __asm__ ("smlal %0.4s,%2.4h,%3.h[0]"
7635 : "=w"(__result)
7636 : "0"(__a), "w"(__b), "x"(__c)
7637 : /* No clobbers */);
7638 return __result;
7639 }
7640
7641 __extension__ extern __inline int64x2_t
7642 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7643 vmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
7644 {
7645 int64x2_t __result;
7646 __asm__ ("smlal %0.2d,%2.2s,%3.s[0]"
7647 : "=w"(__result)
7648 : "0"(__a), "w"(__b), "w"(__c)
7649 : /* No clobbers */);
7650 return __result;
7651 }
7652
7653 __extension__ extern __inline uint32x4_t
7654 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7655 vmlal_n_u16 (uint32x4_t __a, uint16x4_t __b, uint16_t __c)
7656 {
7657 uint32x4_t __result;
7658 __asm__ ("umlal %0.4s,%2.4h,%3.h[0]"
7659 : "=w"(__result)
7660 : "0"(__a), "w"(__b), "x"(__c)
7661 : /* No clobbers */);
7662 return __result;
7663 }
7664
7665 __extension__ extern __inline uint64x2_t
7666 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7667 vmlal_n_u32 (uint64x2_t __a, uint32x2_t __b, uint32_t __c)
7668 {
7669 uint64x2_t __result;
7670 __asm__ ("umlal %0.2d,%2.2s,%3.s[0]"
7671 : "=w"(__result)
7672 : "0"(__a), "w"(__b), "w"(__c)
7673 : /* No clobbers */);
7674 return __result;
7675 }
7676
7677 __extension__ extern __inline int16x8_t
7678 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7679 vmlal_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
7680 {
7681 int16x8_t __result;
7682 __asm__ ("smlal %0.8h,%2.8b,%3.8b"
7683 : "=w"(__result)
7684 : "0"(__a), "w"(__b), "w"(__c)
7685 : /* No clobbers */);
7686 return __result;
7687 }
7688
7689 __extension__ extern __inline int32x4_t
7690 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7691 vmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
7692 {
7693 int32x4_t __result;
7694 __asm__ ("smlal %0.4s,%2.4h,%3.4h"
7695 : "=w"(__result)
7696 : "0"(__a), "w"(__b), "w"(__c)
7697 : /* No clobbers */);
7698 return __result;
7699 }
7700
7701 __extension__ extern __inline int64x2_t
7702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7703 vmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
7704 {
7705 int64x2_t __result;
7706 __asm__ ("smlal %0.2d,%2.2s,%3.2s"
7707 : "=w"(__result)
7708 : "0"(__a), "w"(__b), "w"(__c)
7709 : /* No clobbers */);
7710 return __result;
7711 }
7712
7713 __extension__ extern __inline uint16x8_t
7714 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7715 vmlal_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
7716 {
7717 uint16x8_t __result;
7718 __asm__ ("umlal %0.8h,%2.8b,%3.8b"
7719 : "=w"(__result)
7720 : "0"(__a), "w"(__b), "w"(__c)
7721 : /* No clobbers */);
7722 return __result;
7723 }
7724
7725 __extension__ extern __inline uint32x4_t
7726 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7727 vmlal_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
7728 {
7729 uint32x4_t __result;
7730 __asm__ ("umlal %0.4s,%2.4h,%3.4h"
7731 : "=w"(__result)
7732 : "0"(__a), "w"(__b), "w"(__c)
7733 : /* No clobbers */);
7734 return __result;
7735 }
7736
7737 __extension__ extern __inline uint64x2_t
7738 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7739 vmlal_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
7740 {
7741 uint64x2_t __result;
7742 __asm__ ("umlal %0.2d,%2.2s,%3.2s"
7743 : "=w"(__result)
7744 : "0"(__a), "w"(__b), "w"(__c)
7745 : /* No clobbers */);
7746 return __result;
7747 }
7748
7749 __extension__ extern __inline float32x4_t
7750 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7751 vmlaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
7752 {
7753 float32x4_t __result;
7754 float32x4_t __t1;
7755 __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fadd %0.4s, %0.4s, %1.4s"
7756 : "=w"(__result), "=w"(__t1)
7757 : "0"(__a), "w"(__b), "w"(__c)
7758 : /* No clobbers */);
7759 return __result;
7760 }
7761
7762 __extension__ extern __inline int16x8_t
7763 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7764 vmlaq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
7765 {
7766 int16x8_t __result;
7767 __asm__ ("mla %0.8h,%2.8h,%3.h[0]"
7768 : "=w"(__result)
7769 : "0"(__a), "w"(__b), "x"(__c)
7770 : /* No clobbers */);
7771 return __result;
7772 }
7773
7774 __extension__ extern __inline int32x4_t
7775 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7776 vmlaq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
7777 {
7778 int32x4_t __result;
7779 __asm__ ("mla %0.4s,%2.4s,%3.s[0]"
7780 : "=w"(__result)
7781 : "0"(__a), "w"(__b), "w"(__c)
7782 : /* No clobbers */);
7783 return __result;
7784 }
7785
7786 __extension__ extern __inline uint16x8_t
7787 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7788 vmlaq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
7789 {
7790 uint16x8_t __result;
7791 __asm__ ("mla %0.8h,%2.8h,%3.h[0]"
7792 : "=w"(__result)
7793 : "0"(__a), "w"(__b), "x"(__c)
7794 : /* No clobbers */);
7795 return __result;
7796 }
7797
7798 __extension__ extern __inline uint32x4_t
7799 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7800 vmlaq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
7801 {
7802 uint32x4_t __result;
7803 __asm__ ("mla %0.4s,%2.4s,%3.s[0]"
7804 : "=w"(__result)
7805 : "0"(__a), "w"(__b), "w"(__c)
7806 : /* No clobbers */);
7807 return __result;
7808 }
7809
7810 __extension__ extern __inline int8x16_t
7811 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7812 vmlaq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
7813 {
7814 return __builtin_aarch64_mlav16qi (__a, __b, __c);
7815 }
7816
7817 __extension__ extern __inline int16x8_t
7818 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7819 vmlaq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
7820 {
7821 return __builtin_aarch64_mlav8hi (__a, __b, __c);
7822 }
7823
7824 __extension__ extern __inline int32x4_t
7825 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7826 vmlaq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
7827 {
7828 return __builtin_aarch64_mlav4si (__a, __b, __c);
7829 }
7830
7831 __extension__ extern __inline uint8x16_t
7832 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7833 vmlaq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
7834 {
7835 return (uint8x16_t) __builtin_aarch64_mlav16qi ((int8x16_t) __a,
7836 (int8x16_t) __b,
7837 (int8x16_t) __c);
7838 }
7839
7840 __extension__ extern __inline uint16x8_t
7841 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7842 vmlaq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
7843 {
7844 return (uint16x8_t) __builtin_aarch64_mlav8hi ((int16x8_t) __a,
7845 (int16x8_t) __b,
7846 (int16x8_t) __c);
7847 }
7848
7849 __extension__ extern __inline uint32x4_t
7850 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7851 vmlaq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
7852 {
7853 return (uint32x4_t) __builtin_aarch64_mlav4si ((int32x4_t) __a,
7854 (int32x4_t) __b,
7855 (int32x4_t) __c);
7856 }
7857
7858 __extension__ extern __inline float32x2_t
7859 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7860 vmls_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
7861 {
7862 float32x2_t __result;
7863 float32x2_t __t1;
7864 __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fsub %0.2s, %0.2s, %1.2s"
7865 : "=w"(__result), "=w"(__t1)
7866 : "0"(__a), "w"(__b), "w"(__c)
7867 : /* No clobbers */);
7868 return __result;
7869 }
7870
7871 __extension__ extern __inline int16x4_t
7872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7873 vmls_n_s16 (int16x4_t __a, int16x4_t __b, int16_t __c)
7874 {
7875 int16x4_t __result;
7876 __asm__ ("mls %0.4h, %2.4h, %3.h[0]"
7877 : "=w"(__result)
7878 : "0"(__a), "w"(__b), "x"(__c)
7879 : /* No clobbers */);
7880 return __result;
7881 }
7882
7883 __extension__ extern __inline int32x2_t
7884 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7885 vmls_n_s32 (int32x2_t __a, int32x2_t __b, int32_t __c)
7886 {
7887 int32x2_t __result;
7888 __asm__ ("mls %0.2s, %2.2s, %3.s[0]"
7889 : "=w"(__result)
7890 : "0"(__a), "w"(__b), "w"(__c)
7891 : /* No clobbers */);
7892 return __result;
7893 }
7894
7895 __extension__ extern __inline uint16x4_t
7896 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7897 vmls_n_u16 (uint16x4_t __a, uint16x4_t __b, uint16_t __c)
7898 {
7899 uint16x4_t __result;
7900 __asm__ ("mls %0.4h, %2.4h, %3.h[0]"
7901 : "=w"(__result)
7902 : "0"(__a), "w"(__b), "x"(__c)
7903 : /* No clobbers */);
7904 return __result;
7905 }
7906
7907 __extension__ extern __inline uint32x2_t
7908 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7909 vmls_n_u32 (uint32x2_t __a, uint32x2_t __b, uint32_t __c)
7910 {
7911 uint32x2_t __result;
7912 __asm__ ("mls %0.2s, %2.2s, %3.s[0]"
7913 : "=w"(__result)
7914 : "0"(__a), "w"(__b), "w"(__c)
7915 : /* No clobbers */);
7916 return __result;
7917 }
7918
7919 __extension__ extern __inline int8x8_t
7920 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7921 vmls_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
7922 {
7923 int8x8_t __result;
7924 __asm__ ("mls %0.8b,%2.8b,%3.8b"
7925 : "=w"(__result)
7926 : "0"(__a), "w"(__b), "w"(__c)
7927 : /* No clobbers */);
7928 return __result;
7929 }
7930
7931 __extension__ extern __inline int16x4_t
7932 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7933 vmls_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
7934 {
7935 int16x4_t __result;
7936 __asm__ ("mls %0.4h,%2.4h,%3.4h"
7937 : "=w"(__result)
7938 : "0"(__a), "w"(__b), "w"(__c)
7939 : /* No clobbers */);
7940 return __result;
7941 }
7942
7943 __extension__ extern __inline int32x2_t
7944 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7945 vmls_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
7946 {
7947 int32x2_t __result;
7948 __asm__ ("mls %0.2s,%2.2s,%3.2s"
7949 : "=w"(__result)
7950 : "0"(__a), "w"(__b), "w"(__c)
7951 : /* No clobbers */);
7952 return __result;
7953 }
7954
7955 __extension__ extern __inline uint8x8_t
7956 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7957 vmls_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
7958 {
7959 uint8x8_t __result;
7960 __asm__ ("mls %0.8b,%2.8b,%3.8b"
7961 : "=w"(__result)
7962 : "0"(__a), "w"(__b), "w"(__c)
7963 : /* No clobbers */);
7964 return __result;
7965 }
7966
7967 __extension__ extern __inline uint16x4_t
7968 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7969 vmls_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
7970 {
7971 uint16x4_t __result;
7972 __asm__ ("mls %0.4h,%2.4h,%3.4h"
7973 : "=w"(__result)
7974 : "0"(__a), "w"(__b), "w"(__c)
7975 : /* No clobbers */);
7976 return __result;
7977 }
7978
7979 __extension__ extern __inline uint32x2_t
7980 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
7981 vmls_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
7982 {
7983 uint32x2_t __result;
7984 __asm__ ("mls %0.2s,%2.2s,%3.2s"
7985 : "=w"(__result)
7986 : "0"(__a), "w"(__b), "w"(__c)
7987 : /* No clobbers */);
7988 return __result;
7989 }
7990
7991 #define vmlsl_high_lane_s16(a, b, c, d) \
7992 __extension__ \
7993 ({ \
7994 int16x4_t c_ = (c); \
7995 int16x8_t b_ = (b); \
7996 int32x4_t a_ = (a); \
7997 int32x4_t result; \
7998 __asm__ ("smlsl2 %0.4s, %2.8h, %3.h[%4]" \
7999 : "=w"(result) \
8000 : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
8001 : /* No clobbers */); \
8002 result; \
8003 })
8004
8005 #define vmlsl_high_lane_s32(a, b, c, d) \
8006 __extension__ \
8007 ({ \
8008 int32x2_t c_ = (c); \
8009 int32x4_t b_ = (b); \
8010 int64x2_t a_ = (a); \
8011 int64x2_t result; \
8012 __asm__ ("smlsl2 %0.2d, %2.4s, %3.s[%4]" \
8013 : "=w"(result) \
8014 : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
8015 : /* No clobbers */); \
8016 result; \
8017 })
8018
8019 #define vmlsl_high_lane_u16(a, b, c, d) \
8020 __extension__ \
8021 ({ \
8022 uint16x4_t c_ = (c); \
8023 uint16x8_t b_ = (b); \
8024 uint32x4_t a_ = (a); \
8025 uint32x4_t result; \
8026 __asm__ ("umlsl2 %0.4s, %2.8h, %3.h[%4]" \
8027 : "=w"(result) \
8028 : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
8029 : /* No clobbers */); \
8030 result; \
8031 })
8032
8033 #define vmlsl_high_lane_u32(a, b, c, d) \
8034 __extension__ \
8035 ({ \
8036 uint32x2_t c_ = (c); \
8037 uint32x4_t b_ = (b); \
8038 uint64x2_t a_ = (a); \
8039 uint64x2_t result; \
8040 __asm__ ("umlsl2 %0.2d, %2.4s, %3.s[%4]" \
8041 : "=w"(result) \
8042 : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
8043 : /* No clobbers */); \
8044 result; \
8045 })
8046
8047 #define vmlsl_high_laneq_s16(a, b, c, d) \
8048 __extension__ \
8049 ({ \
8050 int16x8_t c_ = (c); \
8051 int16x8_t b_ = (b); \
8052 int32x4_t a_ = (a); \
8053 int32x4_t result; \
8054 __asm__ ("smlsl2 %0.4s, %2.8h, %3.h[%4]" \
8055 : "=w"(result) \
8056 : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
8057 : /* No clobbers */); \
8058 result; \
8059 })
8060
8061 #define vmlsl_high_laneq_s32(a, b, c, d) \
8062 __extension__ \
8063 ({ \
8064 int32x4_t c_ = (c); \
8065 int32x4_t b_ = (b); \
8066 int64x2_t a_ = (a); \
8067 int64x2_t result; \
8068 __asm__ ("smlsl2 %0.2d, %2.4s, %3.s[%4]" \
8069 : "=w"(result) \
8070 : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
8071 : /* No clobbers */); \
8072 result; \
8073 })
8074
8075 #define vmlsl_high_laneq_u16(a, b, c, d) \
8076 __extension__ \
8077 ({ \
8078 uint16x8_t c_ = (c); \
8079 uint16x8_t b_ = (b); \
8080 uint32x4_t a_ = (a); \
8081 uint32x4_t result; \
8082 __asm__ ("umlsl2 %0.4s, %2.8h, %3.h[%4]" \
8083 : "=w"(result) \
8084 : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
8085 : /* No clobbers */); \
8086 result; \
8087 })
8088
8089 #define vmlsl_high_laneq_u32(a, b, c, d) \
8090 __extension__ \
8091 ({ \
8092 uint32x4_t c_ = (c); \
8093 uint32x4_t b_ = (b); \
8094 uint64x2_t a_ = (a); \
8095 uint64x2_t result; \
8096 __asm__ ("umlsl2 %0.2d, %2.4s, %3.s[%4]" \
8097 : "=w"(result) \
8098 : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
8099 : /* No clobbers */); \
8100 result; \
8101 })
8102
8103 __extension__ extern __inline int32x4_t
8104 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8105 vmlsl_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
8106 {
8107 int32x4_t __result;
8108 __asm__ ("smlsl2 %0.4s, %2.8h, %3.h[0]"
8109 : "=w"(__result)
8110 : "0"(__a), "w"(__b), "x"(__c)
8111 : /* No clobbers */);
8112 return __result;
8113 }
8114
8115 __extension__ extern __inline int64x2_t
8116 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8117 vmlsl_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
8118 {
8119 int64x2_t __result;
8120 __asm__ ("smlsl2 %0.2d, %2.4s, %3.s[0]"
8121 : "=w"(__result)
8122 : "0"(__a), "w"(__b), "w"(__c)
8123 : /* No clobbers */);
8124 return __result;
8125 }
8126
8127 __extension__ extern __inline uint32x4_t
8128 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8129 vmlsl_high_n_u16 (uint32x4_t __a, uint16x8_t __b, uint16_t __c)
8130 {
8131 uint32x4_t __result;
8132 __asm__ ("umlsl2 %0.4s, %2.8h, %3.h[0]"
8133 : "=w"(__result)
8134 : "0"(__a), "w"(__b), "x"(__c)
8135 : /* No clobbers */);
8136 return __result;
8137 }
8138
8139 __extension__ extern __inline uint64x2_t
8140 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8141 vmlsl_high_n_u32 (uint64x2_t __a, uint32x4_t __b, uint32_t __c)
8142 {
8143 uint64x2_t __result;
8144 __asm__ ("umlsl2 %0.2d, %2.4s, %3.s[0]"
8145 : "=w"(__result)
8146 : "0"(__a), "w"(__b), "w"(__c)
8147 : /* No clobbers */);
8148 return __result;
8149 }
8150
8151 __extension__ extern __inline int16x8_t
8152 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8153 vmlsl_high_s8 (int16x8_t __a, int8x16_t __b, int8x16_t __c)
8154 {
8155 return __builtin_aarch64_smlsl_hiv16qi (__a, __b, __c);
8156 }
8157
8158 __extension__ extern __inline int32x4_t
8159 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8160 vmlsl_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
8161 {
8162 return __builtin_aarch64_smlsl_hiv8hi (__a, __b, __c);
8163 }
8164
8165 __extension__ extern __inline int64x2_t
8166 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8167 vmlsl_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
8168 {
8169 return __builtin_aarch64_smlsl_hiv4si (__a, __b, __c);
8170 }
8171
8172 __extension__ extern __inline uint16x8_t
8173 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8174 vmlsl_high_u8 (uint16x8_t __a, uint8x16_t __b, uint8x16_t __c)
8175 {
8176 return __builtin_aarch64_umlsl_hiv16qi_uuuu (__a, __b, __c);
8177 }
8178
8179 __extension__ extern __inline uint32x4_t
8180 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8181 vmlsl_high_u16 (uint32x4_t __a, uint16x8_t __b, uint16x8_t __c)
8182 {
8183 return __builtin_aarch64_umlsl_hiv8hi_uuuu (__a, __b, __c);
8184 }
8185
8186 __extension__ extern __inline uint64x2_t
8187 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8188 vmlsl_high_u32 (uint64x2_t __a, uint32x4_t __b, uint32x4_t __c)
8189 {
8190 return __builtin_aarch64_umlsl_hiv4si_uuuu (__a, __b, __c);
8191 }
8192
8193 #define vmlsl_lane_s16(a, b, c, d) \
8194 __extension__ \
8195 ({ \
8196 int16x4_t c_ = (c); \
8197 int16x4_t b_ = (b); \
8198 int32x4_t a_ = (a); \
8199 int32x4_t result; \
8200 __asm__ ("smlsl %0.4s, %2.4h, %3.h[%4]" \
8201 : "=w"(result) \
8202 : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
8203 : /* No clobbers */); \
8204 result; \
8205 })
8206
8207 #define vmlsl_lane_s32(a, b, c, d) \
8208 __extension__ \
8209 ({ \
8210 int32x2_t c_ = (c); \
8211 int32x2_t b_ = (b); \
8212 int64x2_t a_ = (a); \
8213 int64x2_t result; \
8214 __asm__ ("smlsl %0.2d, %2.2s, %3.s[%4]" \
8215 : "=w"(result) \
8216 : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
8217 : /* No clobbers */); \
8218 result; \
8219 })
8220
8221 #define vmlsl_lane_u16(a, b, c, d) \
8222 __extension__ \
8223 ({ \
8224 uint16x4_t c_ = (c); \
8225 uint16x4_t b_ = (b); \
8226 uint32x4_t a_ = (a); \
8227 uint32x4_t result; \
8228 __asm__ ("umlsl %0.4s, %2.4h, %3.h[%4]" \
8229 : "=w"(result) \
8230 : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
8231 : /* No clobbers */); \
8232 result; \
8233 })
8234
8235 #define vmlsl_lane_u32(a, b, c, d) \
8236 __extension__ \
8237 ({ \
8238 uint32x2_t c_ = (c); \
8239 uint32x2_t b_ = (b); \
8240 uint64x2_t a_ = (a); \
8241 uint64x2_t result; \
8242 __asm__ ("umlsl %0.2d, %2.2s, %3.s[%4]" \
8243 : "=w"(result) \
8244 : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
8245 : /* No clobbers */); \
8246 result; \
8247 })
8248
8249 #define vmlsl_laneq_s16(a, b, c, d) \
8250 __extension__ \
8251 ({ \
8252 int16x8_t c_ = (c); \
8253 int16x4_t b_ = (b); \
8254 int32x4_t a_ = (a); \
8255 int32x4_t result; \
8256 __asm__ ("smlsl %0.4s, %2.4h, %3.h[%4]" \
8257 : "=w"(result) \
8258 : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
8259 : /* No clobbers */); \
8260 result; \
8261 })
8262
8263 #define vmlsl_laneq_s32(a, b, c, d) \
8264 __extension__ \
8265 ({ \
8266 int32x4_t c_ = (c); \
8267 int32x2_t b_ = (b); \
8268 int64x2_t a_ = (a); \
8269 int64x2_t result; \
8270 __asm__ ("smlsl %0.2d, %2.2s, %3.s[%4]" \
8271 : "=w"(result) \
8272 : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
8273 : /* No clobbers */); \
8274 result; \
8275 })
8276
8277 #define vmlsl_laneq_u16(a, b, c, d) \
8278 __extension__ \
8279 ({ \
8280 uint16x8_t c_ = (c); \
8281 uint16x4_t b_ = (b); \
8282 uint32x4_t a_ = (a); \
8283 uint32x4_t result; \
8284 __asm__ ("umlsl %0.4s, %2.4h, %3.h[%4]" \
8285 : "=w"(result) \
8286 : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
8287 : /* No clobbers */); \
8288 result; \
8289 })
8290
8291 #define vmlsl_laneq_u32(a, b, c, d) \
8292 __extension__ \
8293 ({ \
8294 uint32x4_t c_ = (c); \
8295 uint32x2_t b_ = (b); \
8296 uint64x2_t a_ = (a); \
8297 uint64x2_t result; \
8298 __asm__ ("umlsl %0.2d, %2.2s, %3.s[%4]" \
8299 : "=w"(result) \
8300 : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
8301 : /* No clobbers */); \
8302 result; \
8303 })
8304
8305 __extension__ extern __inline int32x4_t
8306 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8307 vmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
8308 {
8309 int32x4_t __result;
8310 __asm__ ("smlsl %0.4s, %2.4h, %3.h[0]"
8311 : "=w"(__result)
8312 : "0"(__a), "w"(__b), "x"(__c)
8313 : /* No clobbers */);
8314 return __result;
8315 }
8316
8317 __extension__ extern __inline int64x2_t
8318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8319 vmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
8320 {
8321 int64x2_t __result;
8322 __asm__ ("smlsl %0.2d, %2.2s, %3.s[0]"
8323 : "=w"(__result)
8324 : "0"(__a), "w"(__b), "w"(__c)
8325 : /* No clobbers */);
8326 return __result;
8327 }
8328
8329 __extension__ extern __inline uint32x4_t
8330 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8331 vmlsl_n_u16 (uint32x4_t __a, uint16x4_t __b, uint16_t __c)
8332 {
8333 uint32x4_t __result;
8334 __asm__ ("umlsl %0.4s, %2.4h, %3.h[0]"
8335 : "=w"(__result)
8336 : "0"(__a), "w"(__b), "x"(__c)
8337 : /* No clobbers */);
8338 return __result;
8339 }
8340
8341 __extension__ extern __inline uint64x2_t
8342 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8343 vmlsl_n_u32 (uint64x2_t __a, uint32x2_t __b, uint32_t __c)
8344 {
8345 uint64x2_t __result;
8346 __asm__ ("umlsl %0.2d, %2.2s, %3.s[0]"
8347 : "=w"(__result)
8348 : "0"(__a), "w"(__b), "w"(__c)
8349 : /* No clobbers */);
8350 return __result;
8351 }
8352
8353 __extension__ extern __inline int16x8_t
8354 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8355 vmlsl_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c)
8356 {
8357 return __builtin_aarch64_smlslv8qi (__a, __b, __c);
8358 }
8359
8360 __extension__ extern __inline int32x4_t
8361 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8362 vmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
8363 {
8364 return __builtin_aarch64_smlslv4hi (__a, __b, __c);
8365 }
8366
8367 __extension__ extern __inline int64x2_t
8368 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8369 vmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
8370 {
8371 return __builtin_aarch64_smlslv2si (__a, __b, __c);
8372 }
8373
8374 __extension__ extern __inline uint16x8_t
8375 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8376 vmlsl_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
8377 {
8378 return __builtin_aarch64_umlslv8qi_uuuu (__a, __b, __c);
8379 }
8380
8381 __extension__ extern __inline uint32x4_t
8382 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8383 vmlsl_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
8384 {
8385 return __builtin_aarch64_umlslv4hi_uuuu (__a, __b, __c);
8386 }
8387
8388 __extension__ extern __inline uint64x2_t
8389 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8390 vmlsl_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
8391 {
8392 return __builtin_aarch64_umlslv2si_uuuu (__a, __b, __c);
8393 }
8394
8395 __extension__ extern __inline float32x4_t
8396 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8397 vmlsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
8398 {
8399 float32x4_t __result;
8400 float32x4_t __t1;
8401 __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fsub %0.4s, %0.4s, %1.4s"
8402 : "=w"(__result), "=w"(__t1)
8403 : "0"(__a), "w"(__b), "w"(__c)
8404 : /* No clobbers */);
8405 return __result;
8406 }
8407
8408 __extension__ extern __inline int16x8_t
8409 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8410 vmlsq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c)
8411 {
8412 int16x8_t __result;
8413 __asm__ ("mls %0.8h, %2.8h, %3.h[0]"
8414 : "=w"(__result)
8415 : "0"(__a), "w"(__b), "x"(__c)
8416 : /* No clobbers */);
8417 return __result;
8418 }
8419
8420 __extension__ extern __inline int32x4_t
8421 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8422 vmlsq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c)
8423 {
8424 int32x4_t __result;
8425 __asm__ ("mls %0.4s, %2.4s, %3.s[0]"
8426 : "=w"(__result)
8427 : "0"(__a), "w"(__b), "w"(__c)
8428 : /* No clobbers */);
8429 return __result;
8430 }
8431
8432 __extension__ extern __inline uint16x8_t
8433 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8434 vmlsq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c)
8435 {
8436 uint16x8_t __result;
8437 __asm__ ("mls %0.8h, %2.8h, %3.h[0]"
8438 : "=w"(__result)
8439 : "0"(__a), "w"(__b), "x"(__c)
8440 : /* No clobbers */);
8441 return __result;
8442 }
8443
8444 __extension__ extern __inline uint32x4_t
8445 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8446 vmlsq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c)
8447 {
8448 uint32x4_t __result;
8449 __asm__ ("mls %0.4s, %2.4s, %3.s[0]"
8450 : "=w"(__result)
8451 : "0"(__a), "w"(__b), "w"(__c)
8452 : /* No clobbers */);
8453 return __result;
8454 }
8455
8456 __extension__ extern __inline int8x16_t
8457 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8458 vmlsq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
8459 {
8460 int8x16_t __result;
8461 __asm__ ("mls %0.16b,%2.16b,%3.16b"
8462 : "=w"(__result)
8463 : "0"(__a), "w"(__b), "w"(__c)
8464 : /* No clobbers */);
8465 return __result;
8466 }
8467
8468 __extension__ extern __inline int16x8_t
8469 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8470 vmlsq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
8471 {
8472 int16x8_t __result;
8473 __asm__ ("mls %0.8h,%2.8h,%3.8h"
8474 : "=w"(__result)
8475 : "0"(__a), "w"(__b), "w"(__c)
8476 : /* No clobbers */);
8477 return __result;
8478 }
8479
8480 __extension__ extern __inline int32x4_t
8481 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8482 vmlsq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
8483 {
8484 int32x4_t __result;
8485 __asm__ ("mls %0.4s,%2.4s,%3.4s"
8486 : "=w"(__result)
8487 : "0"(__a), "w"(__b), "w"(__c)
8488 : /* No clobbers */);
8489 return __result;
8490 }
8491
8492 __extension__ extern __inline uint8x16_t
8493 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8494 vmlsq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
8495 {
8496 uint8x16_t __result;
8497 __asm__ ("mls %0.16b,%2.16b,%3.16b"
8498 : "=w"(__result)
8499 : "0"(__a), "w"(__b), "w"(__c)
8500 : /* No clobbers */);
8501 return __result;
8502 }
8503
8504 __extension__ extern __inline uint16x8_t
8505 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8506 vmlsq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
8507 {
8508 uint16x8_t __result;
8509 __asm__ ("mls %0.8h,%2.8h,%3.8h"
8510 : "=w"(__result)
8511 : "0"(__a), "w"(__b), "w"(__c)
8512 : /* No clobbers */);
8513 return __result;
8514 }
8515
8516 __extension__ extern __inline uint32x4_t
8517 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8518 vmlsq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
8519 {
8520 uint32x4_t __result;
8521 __asm__ ("mls %0.4s,%2.4s,%3.4s"
8522 : "=w"(__result)
8523 : "0"(__a), "w"(__b), "w"(__c)
8524 : /* No clobbers */);
8525 return __result;
8526 }
8527
8528 __extension__ extern __inline int16x8_t
8529 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8530 vmovl_high_s8 (int8x16_t __a)
8531 {
8532 int16x8_t __result;
8533 __asm__ ("sshll2 %0.8h,%1.16b,#0"
8534 : "=w"(__result)
8535 : "w"(__a)
8536 : /* No clobbers */);
8537 return __result;
8538 }
8539
8540 __extension__ extern __inline int32x4_t
8541 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8542 vmovl_high_s16 (int16x8_t __a)
8543 {
8544 int32x4_t __result;
8545 __asm__ ("sshll2 %0.4s,%1.8h,#0"
8546 : "=w"(__result)
8547 : "w"(__a)
8548 : /* No clobbers */);
8549 return __result;
8550 }
8551
8552 __extension__ extern __inline int64x2_t
8553 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8554 vmovl_high_s32 (int32x4_t __a)
8555 {
8556 int64x2_t __result;
8557 __asm__ ("sshll2 %0.2d,%1.4s,#0"
8558 : "=w"(__result)
8559 : "w"(__a)
8560 : /* No clobbers */);
8561 return __result;
8562 }
8563
8564 __extension__ extern __inline uint16x8_t
8565 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8566 vmovl_high_u8 (uint8x16_t __a)
8567 {
8568 uint16x8_t __result;
8569 __asm__ ("ushll2 %0.8h,%1.16b,#0"
8570 : "=w"(__result)
8571 : "w"(__a)
8572 : /* No clobbers */);
8573 return __result;
8574 }
8575
8576 __extension__ extern __inline uint32x4_t
8577 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8578 vmovl_high_u16 (uint16x8_t __a)
8579 {
8580 uint32x4_t __result;
8581 __asm__ ("ushll2 %0.4s,%1.8h,#0"
8582 : "=w"(__result)
8583 : "w"(__a)
8584 : /* No clobbers */);
8585 return __result;
8586 }
8587
8588 __extension__ extern __inline uint64x2_t
8589 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8590 vmovl_high_u32 (uint32x4_t __a)
8591 {
8592 uint64x2_t __result;
8593 __asm__ ("ushll2 %0.2d,%1.4s,#0"
8594 : "=w"(__result)
8595 : "w"(__a)
8596 : /* No clobbers */);
8597 return __result;
8598 }
8599
8600 __extension__ extern __inline int16x8_t
8601 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8602 vmovl_s8 (int8x8_t __a)
8603 {
8604 return __builtin_aarch64_sxtlv8hi (__a);
8605 }
8606
8607 __extension__ extern __inline int32x4_t
8608 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8609 vmovl_s16 (int16x4_t __a)
8610 {
8611 return __builtin_aarch64_sxtlv4si (__a);
8612 }
8613
8614 __extension__ extern __inline int64x2_t
8615 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8616 vmovl_s32 (int32x2_t __a)
8617 {
8618 return __builtin_aarch64_sxtlv2di (__a);
8619 }
8620
8621 __extension__ extern __inline uint16x8_t
8622 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8623 vmovl_u8 (uint8x8_t __a)
8624 {
8625 return __builtin_aarch64_uxtlv8hi_uu (__a);
8626 }
8627
8628 __extension__ extern __inline uint32x4_t
8629 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8630 vmovl_u16 (uint16x4_t __a)
8631 {
8632 return __builtin_aarch64_uxtlv4si_uu (__a);
8633 }
8634
8635 __extension__ extern __inline uint64x2_t
8636 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8637 vmovl_u32 (uint32x2_t __a)
8638 {
8639 return __builtin_aarch64_uxtlv2di_uu (__a);
8640 }
8641
8642 __extension__ extern __inline int8x16_t
8643 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8644 vmovn_high_s16 (int8x8_t __a, int16x8_t __b)
8645 {
8646 return __builtin_aarch64_xtn2v8hi (__a, __b);
8647 }
8648
8649 __extension__ extern __inline int16x8_t
8650 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8651 vmovn_high_s32 (int16x4_t __a, int32x4_t __b)
8652 {
8653 return __builtin_aarch64_xtn2v4si (__a, __b);
8654 }
8655
8656 __extension__ extern __inline int32x4_t
8657 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8658 vmovn_high_s64 (int32x2_t __a, int64x2_t __b)
8659 {
8660 return __builtin_aarch64_xtn2v2di (__a, __b);
8661 }
8662
8663 __extension__ extern __inline uint8x16_t
8664 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8665 vmovn_high_u16 (uint8x8_t __a, uint16x8_t __b)
8666 {
8667 return (uint8x16_t)
8668 __builtin_aarch64_xtn2v8hi ((int8x8_t) __a, (int16x8_t) __b);
8669 }
8670
8671 __extension__ extern __inline uint16x8_t
8672 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8673 vmovn_high_u32 (uint16x4_t __a, uint32x4_t __b)
8674 {
8675 return (uint16x8_t)
8676 __builtin_aarch64_xtn2v4si ((int16x4_t) __a, (int32x4_t) __b);
8677 }
8678
8679 __extension__ extern __inline uint32x4_t
8680 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8681 vmovn_high_u64 (uint32x2_t __a, uint64x2_t __b)
8682 {
8683 return (uint32x4_t)
8684 __builtin_aarch64_xtn2v2di ((int32x2_t) __a, (int64x2_t) __b);
8685 }
8686
8687 __extension__ extern __inline int8x8_t
8688 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8689 vmovn_s16 (int16x8_t __a)
8690 {
8691 return __builtin_aarch64_xtnv8hi (__a);
8692 }
8693
8694 __extension__ extern __inline int16x4_t
8695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8696 vmovn_s32 (int32x4_t __a)
8697 {
8698 return __builtin_aarch64_xtnv4si (__a);
8699 }
8700
8701 __extension__ extern __inline int32x2_t
8702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8703 vmovn_s64 (int64x2_t __a)
8704 {
8705 return __builtin_aarch64_xtnv2di (__a);
8706 }
8707
8708 __extension__ extern __inline uint8x8_t
8709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8710 vmovn_u16 (uint16x8_t __a)
8711 {
8712 return (uint8x8_t)__builtin_aarch64_xtnv8hi ((int16x8_t) __a);
8713 }
8714
8715 __extension__ extern __inline uint16x4_t
8716 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8717 vmovn_u32 (uint32x4_t __a)
8718 {
8719 return (uint16x4_t) __builtin_aarch64_xtnv4si ((int32x4_t )__a);
8720 }
8721
8722 __extension__ extern __inline uint32x2_t
8723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8724 vmovn_u64 (uint64x2_t __a)
8725 {
8726 return (uint32x2_t) __builtin_aarch64_xtnv2di ((int64x2_t) __a);
8727 }
8728
8729 #define vmull_high_lane_s16(a, b, c) \
8730 __extension__ \
8731 ({ \
8732 int16x4_t b_ = (b); \
8733 int16x8_t a_ = (a); \
8734 int32x4_t result; \
8735 __asm__ ("smull2 %0.4s, %1.8h, %2.h[%3]" \
8736 : "=w"(result) \
8737 : "w"(a_), "x"(b_), "i"(c) \
8738 : /* No clobbers */); \
8739 result; \
8740 })
8741
8742 #define vmull_high_lane_s32(a, b, c) \
8743 __extension__ \
8744 ({ \
8745 int32x2_t b_ = (b); \
8746 int32x4_t a_ = (a); \
8747 int64x2_t result; \
8748 __asm__ ("smull2 %0.2d, %1.4s, %2.s[%3]" \
8749 : "=w"(result) \
8750 : "w"(a_), "w"(b_), "i"(c) \
8751 : /* No clobbers */); \
8752 result; \
8753 })
8754
8755 #define vmull_high_lane_u16(a, b, c) \
8756 __extension__ \
8757 ({ \
8758 uint16x4_t b_ = (b); \
8759 uint16x8_t a_ = (a); \
8760 uint32x4_t result; \
8761 __asm__ ("umull2 %0.4s, %1.8h, %2.h[%3]" \
8762 : "=w"(result) \
8763 : "w"(a_), "x"(b_), "i"(c) \
8764 : /* No clobbers */); \
8765 result; \
8766 })
8767
8768 #define vmull_high_lane_u32(a, b, c) \
8769 __extension__ \
8770 ({ \
8771 uint32x2_t b_ = (b); \
8772 uint32x4_t a_ = (a); \
8773 uint64x2_t result; \
8774 __asm__ ("umull2 %0.2d, %1.4s, %2.s[%3]" \
8775 : "=w"(result) \
8776 : "w"(a_), "w"(b_), "i"(c) \
8777 : /* No clobbers */); \
8778 result; \
8779 })
8780
8781 #define vmull_high_laneq_s16(a, b, c) \
8782 __extension__ \
8783 ({ \
8784 int16x8_t b_ = (b); \
8785 int16x8_t a_ = (a); \
8786 int32x4_t result; \
8787 __asm__ ("smull2 %0.4s, %1.8h, %2.h[%3]" \
8788 : "=w"(result) \
8789 : "w"(a_), "x"(b_), "i"(c) \
8790 : /* No clobbers */); \
8791 result; \
8792 })
8793
8794 #define vmull_high_laneq_s32(a, b, c) \
8795 __extension__ \
8796 ({ \
8797 int32x4_t b_ = (b); \
8798 int32x4_t a_ = (a); \
8799 int64x2_t result; \
8800 __asm__ ("smull2 %0.2d, %1.4s, %2.s[%3]" \
8801 : "=w"(result) \
8802 : "w"(a_), "w"(b_), "i"(c) \
8803 : /* No clobbers */); \
8804 result; \
8805 })
8806
8807 #define vmull_high_laneq_u16(a, b, c) \
8808 __extension__ \
8809 ({ \
8810 uint16x8_t b_ = (b); \
8811 uint16x8_t a_ = (a); \
8812 uint32x4_t result; \
8813 __asm__ ("umull2 %0.4s, %1.8h, %2.h[%3]" \
8814 : "=w"(result) \
8815 : "w"(a_), "x"(b_), "i"(c) \
8816 : /* No clobbers */); \
8817 result; \
8818 })
8819
8820 #define vmull_high_laneq_u32(a, b, c) \
8821 __extension__ \
8822 ({ \
8823 uint32x4_t b_ = (b); \
8824 uint32x4_t a_ = (a); \
8825 uint64x2_t result; \
8826 __asm__ ("umull2 %0.2d, %1.4s, %2.s[%3]" \
8827 : "=w"(result) \
8828 : "w"(a_), "w"(b_), "i"(c) \
8829 : /* No clobbers */); \
8830 result; \
8831 })
8832
8833 __extension__ extern __inline int32x4_t
8834 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8835 vmull_high_n_s16 (int16x8_t __a, int16_t __b)
8836 {
8837 int32x4_t __result;
8838 __asm__ ("smull2 %0.4s,%1.8h,%2.h[0]"
8839 : "=w"(__result)
8840 : "w"(__a), "x"(__b)
8841 : /* No clobbers */);
8842 return __result;
8843 }
8844
8845 __extension__ extern __inline int64x2_t
8846 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8847 vmull_high_n_s32 (int32x4_t __a, int32_t __b)
8848 {
8849 int64x2_t __result;
8850 __asm__ ("smull2 %0.2d,%1.4s,%2.s[0]"
8851 : "=w"(__result)
8852 : "w"(__a), "w"(__b)
8853 : /* No clobbers */);
8854 return __result;
8855 }
8856
8857 __extension__ extern __inline uint32x4_t
8858 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8859 vmull_high_n_u16 (uint16x8_t __a, uint16_t __b)
8860 {
8861 uint32x4_t __result;
8862 __asm__ ("umull2 %0.4s,%1.8h,%2.h[0]"
8863 : "=w"(__result)
8864 : "w"(__a), "x"(__b)
8865 : /* No clobbers */);
8866 return __result;
8867 }
8868
8869 __extension__ extern __inline uint64x2_t
8870 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8871 vmull_high_n_u32 (uint32x4_t __a, uint32_t __b)
8872 {
8873 uint64x2_t __result;
8874 __asm__ ("umull2 %0.2d,%1.4s,%2.s[0]"
8875 : "=w"(__result)
8876 : "w"(__a), "w"(__b)
8877 : /* No clobbers */);
8878 return __result;
8879 }
8880
8881 __extension__ extern __inline poly16x8_t
8882 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8883 vmull_high_p8 (poly8x16_t __a, poly8x16_t __b)
8884 {
8885 poly16x8_t __result;
8886 __asm__ ("pmull2 %0.8h,%1.16b,%2.16b"
8887 : "=w"(__result)
8888 : "w"(__a), "w"(__b)
8889 : /* No clobbers */);
8890 return __result;
8891 }
8892
8893 __extension__ extern __inline int16x8_t
8894 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8895 vmull_high_s8 (int8x16_t __a, int8x16_t __b)
8896 {
8897 return __builtin_aarch64_vec_widen_smult_hi_v16qi (__a, __b);
8898 }
8899
8900 __extension__ extern __inline int32x4_t
8901 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8902 vmull_high_s16 (int16x8_t __a, int16x8_t __b)
8903 {
8904 return __builtin_aarch64_vec_widen_smult_hi_v8hi (__a, __b);
8905 }
8906
8907 __extension__ extern __inline int64x2_t
8908 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8909 vmull_high_s32 (int32x4_t __a, int32x4_t __b)
8910 {
8911 return __builtin_aarch64_vec_widen_smult_hi_v4si (__a, __b);
8912 }
8913
8914 __extension__ extern __inline uint16x8_t
8915 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8916 vmull_high_u8 (uint8x16_t __a, uint8x16_t __b)
8917 {
8918 return __builtin_aarch64_vec_widen_umult_hi_v16qi_uuu (__a, __b);
8919 }
8920
8921 __extension__ extern __inline uint32x4_t
8922 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8923 vmull_high_u16 (uint16x8_t __a, uint16x8_t __b)
8924 {
8925 return __builtin_aarch64_vec_widen_umult_hi_v8hi_uuu (__a, __b);
8926 }
8927
8928 __extension__ extern __inline uint64x2_t
8929 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8930 vmull_high_u32 (uint32x4_t __a, uint32x4_t __b)
8931 {
8932 return __builtin_aarch64_vec_widen_umult_hi_v4si_uuu (__a, __b);
8933 }
8934
8935 __extension__ extern __inline int32x4_t
8936 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8937 vmull_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
8938 {
8939 return __builtin_aarch64_vec_smult_lane_v4hi (__a, __b, __c);
8940 }
8941
8942 __extension__ extern __inline int64x2_t
8943 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8944 vmull_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
8945 {
8946 return __builtin_aarch64_vec_smult_lane_v2si (__a, __b, __c);
8947 }
8948
8949 __extension__ extern __inline uint32x4_t
8950 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8951 vmull_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
8952 {
8953 return __builtin_aarch64_vec_umult_lane_v4hi_uuus (__a, __b, __c);
8954 }
8955
8956 __extension__ extern __inline uint64x2_t
8957 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8958 vmull_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
8959 {
8960 return __builtin_aarch64_vec_umult_lane_v2si_uuus (__a, __b, __c);
8961 }
8962
8963 __extension__ extern __inline int32x4_t
8964 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8965 vmull_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c)
8966 {
8967 return __builtin_aarch64_vec_smult_laneq_v4hi (__a, __b, __c);
8968 }
8969
8970 __extension__ extern __inline int64x2_t
8971 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8972 vmull_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c)
8973 {
8974 return __builtin_aarch64_vec_smult_laneq_v2si (__a, __b, __c);
8975 }
8976
8977 __extension__ extern __inline uint32x4_t
8978 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8979 vmull_laneq_u16 (uint16x4_t __a, uint16x8_t __b, const int __c)
8980 {
8981 return __builtin_aarch64_vec_umult_laneq_v4hi_uuus (__a, __b, __c);
8982 }
8983
8984 __extension__ extern __inline uint64x2_t
8985 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8986 vmull_laneq_u32 (uint32x2_t __a, uint32x4_t __b, const int __c)
8987 {
8988 return __builtin_aarch64_vec_umult_laneq_v2si_uuus (__a, __b, __c);
8989 }
8990
8991 __extension__ extern __inline int32x4_t
8992 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
8993 vmull_n_s16 (int16x4_t __a, int16_t __b)
8994 {
8995 int32x4_t __result;
8996 __asm__ ("smull %0.4s,%1.4h,%2.h[0]"
8997 : "=w"(__result)
8998 : "w"(__a), "x"(__b)
8999 : /* No clobbers */);
9000 return __result;
9001 }
9002
9003 __extension__ extern __inline int64x2_t
9004 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9005 vmull_n_s32 (int32x2_t __a, int32_t __b)
9006 {
9007 int64x2_t __result;
9008 __asm__ ("smull %0.2d,%1.2s,%2.s[0]"
9009 : "=w"(__result)
9010 : "w"(__a), "w"(__b)
9011 : /* No clobbers */);
9012 return __result;
9013 }
9014
9015 __extension__ extern __inline uint32x4_t
9016 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9017 vmull_n_u16 (uint16x4_t __a, uint16_t __b)
9018 {
9019 uint32x4_t __result;
9020 __asm__ ("umull %0.4s,%1.4h,%2.h[0]"
9021 : "=w"(__result)
9022 : "w"(__a), "x"(__b)
9023 : /* No clobbers */);
9024 return __result;
9025 }
9026
9027 __extension__ extern __inline uint64x2_t
9028 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9029 vmull_n_u32 (uint32x2_t __a, uint32_t __b)
9030 {
9031 uint64x2_t __result;
9032 __asm__ ("umull %0.2d,%1.2s,%2.s[0]"
9033 : "=w"(__result)
9034 : "w"(__a), "w"(__b)
9035 : /* No clobbers */);
9036 return __result;
9037 }
9038
9039 __extension__ extern __inline poly16x8_t
9040 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9041 vmull_p8 (poly8x8_t __a, poly8x8_t __b)
9042 {
9043 poly16x8_t __result;
9044 __asm__ ("pmull %0.8h, %1.8b, %2.8b"
9045 : "=w"(__result)
9046 : "w"(__a), "w"(__b)
9047 : /* No clobbers */);
9048 return __result;
9049 }
9050
9051 __extension__ extern __inline int16x8_t
9052 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9053 vmull_s8 (int8x8_t __a, int8x8_t __b)
9054 {
9055 return __builtin_aarch64_intrinsic_vec_smult_lo_v8qi (__a, __b);
9056 }
9057
9058 __extension__ extern __inline int32x4_t
9059 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9060 vmull_s16 (int16x4_t __a, int16x4_t __b)
9061 {
9062 return __builtin_aarch64_intrinsic_vec_smult_lo_v4hi (__a, __b);
9063 }
9064
9065 __extension__ extern __inline int64x2_t
9066 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9067 vmull_s32 (int32x2_t __a, int32x2_t __b)
9068 {
9069 return __builtin_aarch64_intrinsic_vec_smult_lo_v2si (__a, __b);
9070 }
9071
9072 __extension__ extern __inline uint16x8_t
9073 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9074 vmull_u8 (uint8x8_t __a, uint8x8_t __b)
9075 {
9076 return __builtin_aarch64_intrinsic_vec_umult_lo_v8qi_uuu (__a, __b);
9077 }
9078
9079 __extension__ extern __inline uint32x4_t
9080 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9081 vmull_u16 (uint16x4_t __a, uint16x4_t __b)
9082 {
9083 return __builtin_aarch64_intrinsic_vec_umult_lo_v4hi_uuu (__a, __b);
9084 }
9085
9086 __extension__ extern __inline uint64x2_t
9087 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9088 vmull_u32 (uint32x2_t __a, uint32x2_t __b)
9089 {
9090 return __builtin_aarch64_intrinsic_vec_umult_lo_v2si_uuu (__a, __b);
9091 }
9092
9093 __extension__ extern __inline int16x4_t
9094 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9095 vpadal_s8 (int16x4_t __a, int8x8_t __b)
9096 {
9097 return __builtin_aarch64_sadalpv8qi (__a, __b);
9098 }
9099
9100 __extension__ extern __inline int32x2_t
9101 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9102 vpadal_s16 (int32x2_t __a, int16x4_t __b)
9103 {
9104 return __builtin_aarch64_sadalpv4hi (__a, __b);
9105 }
9106
9107 __extension__ extern __inline int64x1_t
9108 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9109 vpadal_s32 (int64x1_t __a, int32x2_t __b)
9110 {
9111 int64x1_t __result;
9112 __asm__ ("sadalp %0.1d,%2.2s"
9113 : "=w"(__result)
9114 : "0"(__a), "w"(__b)
9115 : /* No clobbers */);
9116 return __result;
9117 }
9118
9119 __extension__ extern __inline uint16x4_t
9120 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9121 vpadal_u8 (uint16x4_t __a, uint8x8_t __b)
9122 {
9123 return __builtin_aarch64_uadalpv8qi_uuu (__a, __b);
9124 }
9125
9126 __extension__ extern __inline uint32x2_t
9127 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9128 vpadal_u16 (uint32x2_t __a, uint16x4_t __b)
9129 {
9130 return __builtin_aarch64_uadalpv4hi_uuu (__a, __b);
9131 }
9132
9133 __extension__ extern __inline uint64x1_t
9134 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9135 vpadal_u32 (uint64x1_t __a, uint32x2_t __b)
9136 {
9137 uint64x1_t __result;
9138 __asm__ ("uadalp %0.1d,%2.2s"
9139 : "=w"(__result)
9140 : "0"(__a), "w"(__b)
9141 : /* No clobbers */);
9142 return __result;
9143 }
9144
9145 __extension__ extern __inline int16x8_t
9146 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9147 vpadalq_s8 (int16x8_t __a, int8x16_t __b)
9148 {
9149 return __builtin_aarch64_sadalpv16qi (__a, __b);
9150 }
9151
9152 __extension__ extern __inline int32x4_t
9153 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9154 vpadalq_s16 (int32x4_t __a, int16x8_t __b)
9155 {
9156 return __builtin_aarch64_sadalpv8hi (__a, __b);
9157 }
9158
9159 __extension__ extern __inline int64x2_t
9160 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9161 vpadalq_s32 (int64x2_t __a, int32x4_t __b)
9162 {
9163 return __builtin_aarch64_sadalpv4si (__a, __b);
9164 }
9165
9166 __extension__ extern __inline uint16x8_t
9167 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9168 vpadalq_u8 (uint16x8_t __a, uint8x16_t __b)
9169 {
9170 return __builtin_aarch64_uadalpv16qi_uuu (__a, __b);
9171 }
9172
9173 __extension__ extern __inline uint32x4_t
9174 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9175 vpadalq_u16 (uint32x4_t __a, uint16x8_t __b)
9176 {
9177 return __builtin_aarch64_uadalpv8hi_uuu (__a, __b);
9178 }
9179
9180 __extension__ extern __inline uint64x2_t
9181 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9182 vpadalq_u32 (uint64x2_t __a, uint32x4_t __b)
9183 {
9184 return __builtin_aarch64_uadalpv4si_uuu (__a, __b);
9185 }
9186
9187 __extension__ extern __inline int16x4_t
9188 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9189 vpaddl_s8 (int8x8_t __a)
9190 {
9191 int16x4_t __result;
9192 __asm__ ("saddlp %0.4h,%1.8b"
9193 : "=w"(__result)
9194 : "w"(__a)
9195 : /* No clobbers */);
9196 return __result;
9197 }
9198
9199 __extension__ extern __inline int32x2_t
9200 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9201 vpaddl_s16 (int16x4_t __a)
9202 {
9203 int32x2_t __result;
9204 __asm__ ("saddlp %0.2s,%1.4h"
9205 : "=w"(__result)
9206 : "w"(__a)
9207 : /* No clobbers */);
9208 return __result;
9209 }
9210
9211 __extension__ extern __inline int64x1_t
9212 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9213 vpaddl_s32 (int32x2_t __a)
9214 {
9215 int64x1_t __result;
9216 __asm__ ("saddlp %0.1d,%1.2s"
9217 : "=w"(__result)
9218 : "w"(__a)
9219 : /* No clobbers */);
9220 return __result;
9221 }
9222
9223 __extension__ extern __inline uint16x4_t
9224 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9225 vpaddl_u8 (uint8x8_t __a)
9226 {
9227 uint16x4_t __result;
9228 __asm__ ("uaddlp %0.4h,%1.8b"
9229 : "=w"(__result)
9230 : "w"(__a)
9231 : /* No clobbers */);
9232 return __result;
9233 }
9234
9235 __extension__ extern __inline uint32x2_t
9236 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9237 vpaddl_u16 (uint16x4_t __a)
9238 {
9239 uint32x2_t __result;
9240 __asm__ ("uaddlp %0.2s,%1.4h"
9241 : "=w"(__result)
9242 : "w"(__a)
9243 : /* No clobbers */);
9244 return __result;
9245 }
9246
9247 __extension__ extern __inline uint64x1_t
9248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9249 vpaddl_u32 (uint32x2_t __a)
9250 {
9251 uint64x1_t __result;
9252 __asm__ ("uaddlp %0.1d,%1.2s"
9253 : "=w"(__result)
9254 : "w"(__a)
9255 : /* No clobbers */);
9256 return __result;
9257 }
9258
9259 __extension__ extern __inline int16x8_t
9260 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9261 vpaddlq_s8 (int8x16_t __a)
9262 {
9263 int16x8_t __result;
9264 __asm__ ("saddlp %0.8h,%1.16b"
9265 : "=w"(__result)
9266 : "w"(__a)
9267 : /* No clobbers */);
9268 return __result;
9269 }
9270
9271 __extension__ extern __inline int32x4_t
9272 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9273 vpaddlq_s16 (int16x8_t __a)
9274 {
9275 int32x4_t __result;
9276 __asm__ ("saddlp %0.4s,%1.8h"
9277 : "=w"(__result)
9278 : "w"(__a)
9279 : /* No clobbers */);
9280 return __result;
9281 }
9282
9283 __extension__ extern __inline int64x2_t
9284 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9285 vpaddlq_s32 (int32x4_t __a)
9286 {
9287 int64x2_t __result;
9288 __asm__ ("saddlp %0.2d,%1.4s"
9289 : "=w"(__result)
9290 : "w"(__a)
9291 : /* No clobbers */);
9292 return __result;
9293 }
9294
9295 __extension__ extern __inline uint16x8_t
9296 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9297 vpaddlq_u8 (uint8x16_t __a)
9298 {
9299 uint16x8_t __result;
9300 __asm__ ("uaddlp %0.8h,%1.16b"
9301 : "=w"(__result)
9302 : "w"(__a)
9303 : /* No clobbers */);
9304 return __result;
9305 }
9306
9307 __extension__ extern __inline uint32x4_t
9308 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9309 vpaddlq_u16 (uint16x8_t __a)
9310 {
9311 uint32x4_t __result;
9312 __asm__ ("uaddlp %0.4s,%1.8h"
9313 : "=w"(__result)
9314 : "w"(__a)
9315 : /* No clobbers */);
9316 return __result;
9317 }
9318
9319 __extension__ extern __inline uint64x2_t
9320 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9321 vpaddlq_u32 (uint32x4_t __a)
9322 {
9323 uint64x2_t __result;
9324 __asm__ ("uaddlp %0.2d,%1.4s"
9325 : "=w"(__result)
9326 : "w"(__a)
9327 : /* No clobbers */);
9328 return __result;
9329 }
9330
9331 __extension__ extern __inline int8x16_t
9332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9333 vpaddq_s8 (int8x16_t __a, int8x16_t __b)
9334 {
9335 int8x16_t __result;
9336 __asm__ ("addp %0.16b,%1.16b,%2.16b"
9337 : "=w"(__result)
9338 : "w"(__a), "w"(__b)
9339 : /* No clobbers */);
9340 return __result;
9341 }
9342
9343 __extension__ extern __inline int16x8_t
9344 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9345 vpaddq_s16 (int16x8_t __a, int16x8_t __b)
9346 {
9347 int16x8_t __result;
9348 __asm__ ("addp %0.8h,%1.8h,%2.8h"
9349 : "=w"(__result)
9350 : "w"(__a), "w"(__b)
9351 : /* No clobbers */);
9352 return __result;
9353 }
9354
9355 __extension__ extern __inline int32x4_t
9356 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9357 vpaddq_s32 (int32x4_t __a, int32x4_t __b)
9358 {
9359 int32x4_t __result;
9360 __asm__ ("addp %0.4s,%1.4s,%2.4s"
9361 : "=w"(__result)
9362 : "w"(__a), "w"(__b)
9363 : /* No clobbers */);
9364 return __result;
9365 }
9366
9367 __extension__ extern __inline int64x2_t
9368 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9369 vpaddq_s64 (int64x2_t __a, int64x2_t __b)
9370 {
9371 int64x2_t __result;
9372 __asm__ ("addp %0.2d,%1.2d,%2.2d"
9373 : "=w"(__result)
9374 : "w"(__a), "w"(__b)
9375 : /* No clobbers */);
9376 return __result;
9377 }
9378
9379 __extension__ extern __inline uint8x16_t
9380 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9381 vpaddq_u8 (uint8x16_t __a, uint8x16_t __b)
9382 {
9383 uint8x16_t __result;
9384 __asm__ ("addp %0.16b,%1.16b,%2.16b"
9385 : "=w"(__result)
9386 : "w"(__a), "w"(__b)
9387 : /* No clobbers */);
9388 return __result;
9389 }
9390
9391 __extension__ extern __inline uint16x8_t
9392 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9393 vpaddq_u16 (uint16x8_t __a, uint16x8_t __b)
9394 {
9395 uint16x8_t __result;
9396 __asm__ ("addp %0.8h,%1.8h,%2.8h"
9397 : "=w"(__result)
9398 : "w"(__a), "w"(__b)
9399 : /* No clobbers */);
9400 return __result;
9401 }
9402
9403 __extension__ extern __inline uint32x4_t
9404 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9405 vpaddq_u32 (uint32x4_t __a, uint32x4_t __b)
9406 {
9407 uint32x4_t __result;
9408 __asm__ ("addp %0.4s,%1.4s,%2.4s"
9409 : "=w"(__result)
9410 : "w"(__a), "w"(__b)
9411 : /* No clobbers */);
9412 return __result;
9413 }
9414
9415 __extension__ extern __inline uint64x2_t
9416 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9417 vpaddq_u64 (uint64x2_t __a, uint64x2_t __b)
9418 {
9419 uint64x2_t __result;
9420 __asm__ ("addp %0.2d,%1.2d,%2.2d"
9421 : "=w"(__result)
9422 : "w"(__a), "w"(__b)
9423 : /* No clobbers */);
9424 return __result;
9425 }
9426
9427 __extension__ extern __inline int16x4_t
9428 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9429 vqdmulh_n_s16 (int16x4_t __a, int16_t __b)
9430 {
9431 int16x4_t __result;
9432 __asm__ ("sqdmulh %0.4h,%1.4h,%2.h[0]"
9433 : "=w"(__result)
9434 : "w"(__a), "x"(__b)
9435 : /* No clobbers */);
9436 return __result;
9437 }
9438
9439 __extension__ extern __inline int32x2_t
9440 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9441 vqdmulh_n_s32 (int32x2_t __a, int32_t __b)
9442 {
9443 int32x2_t __result;
9444 __asm__ ("sqdmulh %0.2s,%1.2s,%2.s[0]"
9445 : "=w"(__result)
9446 : "w"(__a), "w"(__b)
9447 : /* No clobbers */);
9448 return __result;
9449 }
9450
9451 __extension__ extern __inline int16x8_t
9452 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9453 vqdmulhq_n_s16 (int16x8_t __a, int16_t __b)
9454 {
9455 int16x8_t __result;
9456 __asm__ ("sqdmulh %0.8h,%1.8h,%2.h[0]"
9457 : "=w"(__result)
9458 : "w"(__a), "x"(__b)
9459 : /* No clobbers */);
9460 return __result;
9461 }
9462
9463 __extension__ extern __inline int32x4_t
9464 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9465 vqdmulhq_n_s32 (int32x4_t __a, int32_t __b)
9466 {
9467 int32x4_t __result;
9468 __asm__ ("sqdmulh %0.4s,%1.4s,%2.s[0]"
9469 : "=w"(__result)
9470 : "w"(__a), "w"(__b)
9471 : /* No clobbers */);
9472 return __result;
9473 }
9474
9475 __extension__ extern __inline int8x16_t
9476 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9477 vqmovn_high_s16 (int8x8_t __a, int16x8_t __b)
9478 {
9479 return __builtin_aarch64_sqxtn2v8hi (__a, __b);
9480 }
9481
9482 __extension__ extern __inline int16x8_t
9483 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9484 vqmovn_high_s32 (int16x4_t __a, int32x4_t __b)
9485 {
9486 return __builtin_aarch64_sqxtn2v4si (__a, __b);
9487 }
9488
9489 __extension__ extern __inline int32x4_t
9490 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9491 vqmovn_high_s64 (int32x2_t __a, int64x2_t __b)
9492 {
9493 return __builtin_aarch64_sqxtn2v2di (__a, __b);
9494 }
9495
9496 __extension__ extern __inline uint8x16_t
9497 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9498 vqmovn_high_u16 (uint8x8_t __a, uint16x8_t __b)
9499 {
9500 return __builtin_aarch64_uqxtn2v8hi_uuu (__a, __b);
9501 }
9502
9503 __extension__ extern __inline uint16x8_t
9504 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9505 vqmovn_high_u32 (uint16x4_t __a, uint32x4_t __b)
9506 {
9507 return __builtin_aarch64_uqxtn2v4si_uuu (__a, __b);
9508 }
9509
9510 __extension__ extern __inline uint32x4_t
9511 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9512 vqmovn_high_u64 (uint32x2_t __a, uint64x2_t __b)
9513 {
9514 return __builtin_aarch64_uqxtn2v2di_uuu (__a, __b);
9515 }
9516
9517 __extension__ extern __inline uint8x16_t
9518 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9519 vqmovun_high_s16 (uint8x8_t __a, int16x8_t __b)
9520 {
9521 uint8x16_t __result = vcombine_u8 (__a, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
9522 __asm__ ("sqxtun2 %0.16b, %1.8h"
9523 : "+w"(__result)
9524 : "w"(__b)
9525 : /* No clobbers */);
9526 return __result;
9527 }
9528
9529 __extension__ extern __inline uint16x8_t
9530 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9531 vqmovun_high_s32 (uint16x4_t __a, int32x4_t __b)
9532 {
9533 uint16x8_t __result = vcombine_u16 (__a, vcreate_u16 (__AARCH64_UINT64_C (0x0)));
9534 __asm__ ("sqxtun2 %0.8h, %1.4s"
9535 : "+w"(__result)
9536 : "w"(__b)
9537 : /* No clobbers */);
9538 return __result;
9539 }
9540
9541 __extension__ extern __inline uint32x4_t
9542 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9543 vqmovun_high_s64 (uint32x2_t __a, int64x2_t __b)
9544 {
9545 uint32x4_t __result = vcombine_u32 (__a, vcreate_u32 (__AARCH64_UINT64_C (0x0)));
9546 __asm__ ("sqxtun2 %0.4s, %1.2d"
9547 : "+w"(__result)
9548 : "w"(__b)
9549 : /* No clobbers */);
9550 return __result;
9551 }
9552
9553 __extension__ extern __inline int16x4_t
9554 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9555 vqrdmulh_n_s16 (int16x4_t __a, int16_t __b)
9556 {
9557 int16x4_t __result;
9558 __asm__ ("sqrdmulh %0.4h,%1.4h,%2.h[0]"
9559 : "=w"(__result)
9560 : "w"(__a), "x"(__b)
9561 : /* No clobbers */);
9562 return __result;
9563 }
9564
9565 __extension__ extern __inline int32x2_t
9566 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9567 vqrdmulh_n_s32 (int32x2_t __a, int32_t __b)
9568 {
9569 int32x2_t __result;
9570 __asm__ ("sqrdmulh %0.2s,%1.2s,%2.s[0]"
9571 : "=w"(__result)
9572 : "w"(__a), "w"(__b)
9573 : /* No clobbers */);
9574 return __result;
9575 }
9576
9577 __extension__ extern __inline int16x8_t
9578 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9579 vqrdmulhq_n_s16 (int16x8_t __a, int16_t __b)
9580 {
9581 int16x8_t __result;
9582 __asm__ ("sqrdmulh %0.8h,%1.8h,%2.h[0]"
9583 : "=w"(__result)
9584 : "w"(__a), "x"(__b)
9585 : /* No clobbers */);
9586 return __result;
9587 }
9588
9589 __extension__ extern __inline int32x4_t
9590 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9591 vqrdmulhq_n_s32 (int32x4_t __a, int32_t __b)
9592 {
9593 int32x4_t __result;
9594 __asm__ ("sqrdmulh %0.4s,%1.4s,%2.s[0]"
9595 : "=w"(__result)
9596 : "w"(__a), "w"(__b)
9597 : /* No clobbers */);
9598 return __result;
9599 }
9600
9601 __extension__ extern __inline int8x16_t
9602 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9603 vqrshrn_high_n_s16 (int8x8_t __a, int16x8_t __b, const int __c)
9604 {
9605 return __builtin_aarch64_sqrshrn2_nv8hi (__a, __b, __c);
9606 }
9607
9608 __extension__ extern __inline int16x8_t
9609 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9610 vqrshrn_high_n_s32 (int16x4_t __a, int32x4_t __b, const int __c)
9611 {
9612 return __builtin_aarch64_sqrshrn2_nv4si (__a, __b, __c);
9613 }
9614
9615 __extension__ extern __inline int32x4_t
9616 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9617 vqrshrn_high_n_s64 (int32x2_t __a, int64x2_t __b, const int __c)
9618 {
9619 return __builtin_aarch64_sqrshrn2_nv2di (__a, __b, __c);
9620 }
9621
9622 __extension__ extern __inline uint8x16_t
9623 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9624 vqrshrn_high_n_u16 (uint8x8_t __a, uint16x8_t __b, const int __c)
9625 {
9626 return __builtin_aarch64_uqrshrn2_nv8hi_uuus (__a, __b, __c);
9627 }
9628
9629 __extension__ extern __inline uint16x8_t
9630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9631 vqrshrn_high_n_u32 (uint16x4_t __a, uint32x4_t __b, const int __c)
9632 {
9633 return __builtin_aarch64_uqrshrn2_nv4si_uuus (__a, __b, __c);
9634 }
9635
9636 __extension__ extern __inline uint32x4_t
9637 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9638 vqrshrn_high_n_u64 (uint32x2_t __a, uint64x2_t __b, const int __c)
9639 {
9640 return __builtin_aarch64_uqrshrn2_nv2di_uuus (__a, __b, __c);
9641 }
9642
9643 __extension__ extern __inline uint8x16_t
9644 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9645 vqrshrun_high_n_s16 (uint8x8_t __a, int16x8_t __b, const int __c)
9646 {
9647 return __builtin_aarch64_sqrshrun2_nv8hi_uuss (__a, __b, __c);
9648 }
9649
9650 __extension__ extern __inline uint16x8_t
9651 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9652 vqrshrun_high_n_s32 (uint16x4_t __a, int32x4_t __b, const int __c)
9653 {
9654 return __builtin_aarch64_sqrshrun2_nv4si_uuss (__a, __b, __c);
9655 }
9656
9657 __extension__ extern __inline uint32x4_t
9658 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9659 vqrshrun_high_n_s64 (uint32x2_t __a, int64x2_t __b, const int __c)
9660 {
9661 return __builtin_aarch64_sqrshrun2_nv2di_uuss (__a, __b, __c);
9662 }
9663
9664 __extension__ extern __inline int8x16_t
9665 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9666 vqshrn_high_n_s16 (int8x8_t __a, int16x8_t __b, const int __c)
9667 {
9668 return __builtin_aarch64_sqshrn2_nv8hi (__a, __b, __c);
9669 }
9670
9671 __extension__ extern __inline int16x8_t
9672 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9673 vqshrn_high_n_s32 (int16x4_t __a, int32x4_t __b, const int __c)
9674 {
9675 return __builtin_aarch64_sqshrn2_nv4si (__a, __b, __c);
9676 }
9677
9678 __extension__ extern __inline int32x4_t
9679 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9680 vqshrn_high_n_s64 (int32x2_t __a, int64x2_t __b, const int __c)
9681 {
9682 return __builtin_aarch64_sqshrn2_nv2di (__a, __b, __c);
9683 }
9684
9685 __extension__ extern __inline uint8x16_t
9686 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9687 vqshrn_high_n_u16 (uint8x8_t __a, uint16x8_t __b, const int __c)
9688 {
9689 return __builtin_aarch64_uqshrn2_nv8hi_uuus (__a, __b, __c);
9690 }
9691
9692 __extension__ extern __inline uint16x8_t
9693 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9694 vqshrn_high_n_u32 (uint16x4_t __a, uint32x4_t __b, const int __c)
9695 {
9696 return __builtin_aarch64_uqshrn2_nv4si_uuus (__a, __b, __c);
9697 }
9698
9699 __extension__ extern __inline uint32x4_t
9700 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9701 vqshrn_high_n_u64 (uint32x2_t __a, uint64x2_t __b, const int __c)
9702 {
9703 return __builtin_aarch64_uqshrn2_nv2di_uuus (__a, __b, __c);
9704 }
9705
9706 __extension__ extern __inline uint8x16_t
9707 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9708 vqshrun_high_n_s16 (uint8x8_t __a, int16x8_t __b, const int __c)
9709 {
9710 return __builtin_aarch64_sqshrun2_nv8hi_uuss (__a, __b, __c);
9711 }
9712
9713 __extension__ extern __inline uint16x8_t
9714 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9715 vqshrun_high_n_s32 (uint16x4_t __a, int32x4_t __b, const int __c)
9716 {
9717 return __builtin_aarch64_sqshrun2_nv4si_uuss (__a, __b, __c);
9718 }
9719
9720 __extension__ extern __inline uint32x4_t
9721 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9722 vqshrun_high_n_s64 (uint32x2_t __a, int64x2_t __b, const int __c)
9723 {
9724 return __builtin_aarch64_sqshrun2_nv2di_uuss (__a, __b, __c);
9725 }
9726
9727 #define vrshrn_high_n_s16(a, b, c) \
9728 __extension__ \
9729 ({ \
9730 int16x8_t b_ = (b); \
9731 int8x8_t a_ = (a); \
9732 int8x16_t result = vcombine_s8 \
9733 (a_, vcreate_s8 \
9734 (__AARCH64_UINT64_C (0x0))); \
9735 __asm__ ("rshrn2 %0.16b,%1.8h,#%2" \
9736 : "+w"(result) \
9737 : "w"(b_), "i"(c) \
9738 : /* No clobbers */); \
9739 result; \
9740 })
9741
9742 #define vrshrn_high_n_s32(a, b, c) \
9743 __extension__ \
9744 ({ \
9745 int32x4_t b_ = (b); \
9746 int16x4_t a_ = (a); \
9747 int16x8_t result = vcombine_s16 \
9748 (a_, vcreate_s16 \
9749 (__AARCH64_UINT64_C (0x0))); \
9750 __asm__ ("rshrn2 %0.8h,%1.4s,#%2" \
9751 : "+w"(result) \
9752 : "w"(b_), "i"(c) \
9753 : /* No clobbers */); \
9754 result; \
9755 })
9756
9757 #define vrshrn_high_n_s64(a, b, c) \
9758 __extension__ \
9759 ({ \
9760 int64x2_t b_ = (b); \
9761 int32x2_t a_ = (a); \
9762 int32x4_t result = vcombine_s32 \
9763 (a_, vcreate_s32 \
9764 (__AARCH64_UINT64_C (0x0))); \
9765 __asm__ ("rshrn2 %0.4s,%1.2d,#%2" \
9766 : "+w"(result) \
9767 : "w"(b_), "i"(c) \
9768 : /* No clobbers */); \
9769 result; \
9770 })
9771
9772 #define vrshrn_high_n_u16(a, b, c) \
9773 __extension__ \
9774 ({ \
9775 uint16x8_t b_ = (b); \
9776 uint8x8_t a_ = (a); \
9777 uint8x16_t result = vcombine_u8 \
9778 (a_, vcreate_u8 \
9779 (__AARCH64_UINT64_C (0x0))); \
9780 __asm__ ("rshrn2 %0.16b,%1.8h,#%2" \
9781 : "+w"(result) \
9782 : "w"(b_), "i"(c) \
9783 : /* No clobbers */); \
9784 result; \
9785 })
9786
9787 #define vrshrn_high_n_u32(a, b, c) \
9788 __extension__ \
9789 ({ \
9790 uint32x4_t b_ = (b); \
9791 uint16x4_t a_ = (a); \
9792 uint16x8_t result = vcombine_u16 \
9793 (a_, vcreate_u16 \
9794 (__AARCH64_UINT64_C (0x0))); \
9795 __asm__ ("rshrn2 %0.8h,%1.4s,#%2" \
9796 : "+w"(result) \
9797 : "w"(b_), "i"(c) \
9798 : /* No clobbers */); \
9799 result; \
9800 })
9801
9802 #define vrshrn_high_n_u64(a, b, c) \
9803 __extension__ \
9804 ({ \
9805 uint64x2_t b_ = (b); \
9806 uint32x2_t a_ = (a); \
9807 uint32x4_t result = vcombine_u32 \
9808 (a_, vcreate_u32 \
9809 (__AARCH64_UINT64_C (0x0))); \
9810 __asm__ ("rshrn2 %0.4s,%1.2d,#%2" \
9811 : "+w"(result) \
9812 : "w"(b_), "i"(c) \
9813 : /* No clobbers */); \
9814 result; \
9815 })
9816
9817 #define vrshrn_n_s16(a, b) \
9818 __extension__ \
9819 ({ \
9820 int16x8_t a_ = (a); \
9821 int8x8_t result; \
9822 __asm__ ("rshrn %0.8b,%1.8h,%2" \
9823 : "=w"(result) \
9824 : "w"(a_), "i"(b) \
9825 : /* No clobbers */); \
9826 result; \
9827 })
9828
9829 #define vrshrn_n_s32(a, b) \
9830 __extension__ \
9831 ({ \
9832 int32x4_t a_ = (a); \
9833 int16x4_t result; \
9834 __asm__ ("rshrn %0.4h,%1.4s,%2" \
9835 : "=w"(result) \
9836 : "w"(a_), "i"(b) \
9837 : /* No clobbers */); \
9838 result; \
9839 })
9840
9841 #define vrshrn_n_s64(a, b) \
9842 __extension__ \
9843 ({ \
9844 int64x2_t a_ = (a); \
9845 int32x2_t result; \
9846 __asm__ ("rshrn %0.2s,%1.2d,%2" \
9847 : "=w"(result) \
9848 : "w"(a_), "i"(b) \
9849 : /* No clobbers */); \
9850 result; \
9851 })
9852
9853 #define vrshrn_n_u16(a, b) \
9854 __extension__ \
9855 ({ \
9856 uint16x8_t a_ = (a); \
9857 uint8x8_t result; \
9858 __asm__ ("rshrn %0.8b,%1.8h,%2" \
9859 : "=w"(result) \
9860 : "w"(a_), "i"(b) \
9861 : /* No clobbers */); \
9862 result; \
9863 })
9864
9865 #define vrshrn_n_u32(a, b) \
9866 __extension__ \
9867 ({ \
9868 uint32x4_t a_ = (a); \
9869 uint16x4_t result; \
9870 __asm__ ("rshrn %0.4h,%1.4s,%2" \
9871 : "=w"(result) \
9872 : "w"(a_), "i"(b) \
9873 : /* No clobbers */); \
9874 result; \
9875 })
9876
9877 #define vrshrn_n_u64(a, b) \
9878 __extension__ \
9879 ({ \
9880 uint64x2_t a_ = (a); \
9881 uint32x2_t result; \
9882 __asm__ ("rshrn %0.2s,%1.2d,%2" \
9883 : "=w"(result) \
9884 : "w"(a_), "i"(b) \
9885 : /* No clobbers */); \
9886 result; \
9887 })
9888
9889 __extension__ extern __inline uint32x2_t
9890 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9891 vrsqrte_u32 (uint32x2_t __a)
9892 {
9893 uint32x2_t __result;
9894 __asm__ ("ursqrte %0.2s,%1.2s"
9895 : "=w"(__result)
9896 : "w"(__a)
9897 : /* No clobbers */);
9898 return __result;
9899 }
9900
9901 __extension__ extern __inline uint32x4_t
9902 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
9903 vrsqrteq_u32 (uint32x4_t __a)
9904 {
9905 uint32x4_t __result;
9906 __asm__ ("ursqrte %0.4s,%1.4s"
9907 : "=w"(__result)
9908 : "w"(__a)
9909 : /* No clobbers */);
9910 return __result;
9911 }
9912
9913 #define vshrn_high_n_s16(a, b, c) \
9914 __extension__ \
9915 ({ \
9916 int16x8_t b_ = (b); \
9917 int8x8_t a_ = (a); \
9918 int8x16_t result = vcombine_s8 \
9919 (a_, vcreate_s8 \
9920 (__AARCH64_UINT64_C (0x0))); \
9921 __asm__ ("shrn2 %0.16b,%1.8h,#%2" \
9922 : "+w"(result) \
9923 : "w"(b_), "i"(c) \
9924 : /* No clobbers */); \
9925 result; \
9926 })
9927
9928 #define vshrn_high_n_s32(a, b, c) \
9929 __extension__ \
9930 ({ \
9931 int32x4_t b_ = (b); \
9932 int16x4_t a_ = (a); \
9933 int16x8_t result = vcombine_s16 \
9934 (a_, vcreate_s16 \
9935 (__AARCH64_UINT64_C (0x0))); \
9936 __asm__ ("shrn2 %0.8h,%1.4s,#%2" \
9937 : "+w"(result) \
9938 : "w"(b_), "i"(c) \
9939 : /* No clobbers */); \
9940 result; \
9941 })
9942
9943 #define vshrn_high_n_s64(a, b, c) \
9944 __extension__ \
9945 ({ \
9946 int64x2_t b_ = (b); \
9947 int32x2_t a_ = (a); \
9948 int32x4_t result = vcombine_s32 \
9949 (a_, vcreate_s32 \
9950 (__AARCH64_UINT64_C (0x0))); \
9951 __asm__ ("shrn2 %0.4s,%1.2d,#%2" \
9952 : "+w"(result) \
9953 : "w"(b_), "i"(c) \
9954 : /* No clobbers */); \
9955 result; \
9956 })
9957
9958 #define vshrn_high_n_u16(a, b, c) \
9959 __extension__ \
9960 ({ \
9961 uint16x8_t b_ = (b); \
9962 uint8x8_t a_ = (a); \
9963 uint8x16_t result = vcombine_u8 \
9964 (a_, vcreate_u8 \
9965 (__AARCH64_UINT64_C (0x0))); \
9966 __asm__ ("shrn2 %0.16b,%1.8h,#%2" \
9967 : "+w"(result) \
9968 : "w"(b_), "i"(c) \
9969 : /* No clobbers */); \
9970 result; \
9971 })
9972
9973 #define vshrn_high_n_u32(a, b, c) \
9974 __extension__ \
9975 ({ \
9976 uint32x4_t b_ = (b); \
9977 uint16x4_t a_ = (a); \
9978 uint16x8_t result = vcombine_u16 \
9979 (a_, vcreate_u16 \
9980 (__AARCH64_UINT64_C (0x0))); \
9981 __asm__ ("shrn2 %0.8h,%1.4s,#%2" \
9982 : "+w"(result) \
9983 : "w"(b_), "i"(c) \
9984 : /* No clobbers */); \
9985 result; \
9986 })
9987
9988 #define vshrn_high_n_u64(a, b, c) \
9989 __extension__ \
9990 ({ \
9991 uint64x2_t b_ = (b); \
9992 uint32x2_t a_ = (a); \
9993 uint32x4_t result = vcombine_u32 \
9994 (a_, vcreate_u32 \
9995 (__AARCH64_UINT64_C (0x0))); \
9996 __asm__ ("shrn2 %0.4s,%1.2d,#%2" \
9997 : "+w"(result) \
9998 : "w"(b_), "i"(c) \
9999 : /* No clobbers */); \
10000 result; \
10001 })
10002
10003 #define vshrn_n_s16(a, b) \
10004 __extension__ \
10005 ({ \
10006 int16x8_t a_ = (a); \
10007 int8x8_t result; \
10008 __asm__ ("shrn %0.8b,%1.8h,%2" \
10009 : "=w"(result) \
10010 : "w"(a_), "i"(b) \
10011 : /* No clobbers */); \
10012 result; \
10013 })
10014
10015 #define vshrn_n_s32(a, b) \
10016 __extension__ \
10017 ({ \
10018 int32x4_t a_ = (a); \
10019 int16x4_t result; \
10020 __asm__ ("shrn %0.4h,%1.4s,%2" \
10021 : "=w"(result) \
10022 : "w"(a_), "i"(b) \
10023 : /* No clobbers */); \
10024 result; \
10025 })
10026
10027 #define vshrn_n_s64(a, b) \
10028 __extension__ \
10029 ({ \
10030 int64x2_t a_ = (a); \
10031 int32x2_t result; \
10032 __asm__ ("shrn %0.2s,%1.2d,%2" \
10033 : "=w"(result) \
10034 : "w"(a_), "i"(b) \
10035 : /* No clobbers */); \
10036 result; \
10037 })
10038
10039 #define vshrn_n_u16(a, b) \
10040 __extension__ \
10041 ({ \
10042 uint16x8_t a_ = (a); \
10043 uint8x8_t result; \
10044 __asm__ ("shrn %0.8b,%1.8h,%2" \
10045 : "=w"(result) \
10046 : "w"(a_), "i"(b) \
10047 : /* No clobbers */); \
10048 result; \
10049 })
10050
10051 #define vshrn_n_u32(a, b) \
10052 __extension__ \
10053 ({ \
10054 uint32x4_t a_ = (a); \
10055 uint16x4_t result; \
10056 __asm__ ("shrn %0.4h,%1.4s,%2" \
10057 : "=w"(result) \
10058 : "w"(a_), "i"(b) \
10059 : /* No clobbers */); \
10060 result; \
10061 })
10062
10063 #define vshrn_n_u64(a, b) \
10064 __extension__ \
10065 ({ \
10066 uint64x2_t a_ = (a); \
10067 uint32x2_t result; \
10068 __asm__ ("shrn %0.2s,%1.2d,%2" \
10069 : "=w"(result) \
10070 : "w"(a_), "i"(b) \
10071 : /* No clobbers */); \
10072 result; \
10073 })
10074
10075 #define vsli_n_p8(a, b, c) \
10076 __extension__ \
10077 ({ \
10078 poly8x8_t b_ = (b); \
10079 poly8x8_t a_ = (a); \
10080 poly8x8_t result; \
10081 __asm__ ("sli %0.8b,%2.8b,%3" \
10082 : "=w"(result) \
10083 : "0"(a_), "w"(b_), "i"(c) \
10084 : /* No clobbers */); \
10085 result; \
10086 })
10087
10088 #define vsli_n_p16(a, b, c) \
10089 __extension__ \
10090 ({ \
10091 poly16x4_t b_ = (b); \
10092 poly16x4_t a_ = (a); \
10093 poly16x4_t result; \
10094 __asm__ ("sli %0.4h,%2.4h,%3" \
10095 : "=w"(result) \
10096 : "0"(a_), "w"(b_), "i"(c) \
10097 : /* No clobbers */); \
10098 result; \
10099 })
10100
10101 #define vsliq_n_p8(a, b, c) \
10102 __extension__ \
10103 ({ \
10104 poly8x16_t b_ = (b); \
10105 poly8x16_t a_ = (a); \
10106 poly8x16_t result; \
10107 __asm__ ("sli %0.16b,%2.16b,%3" \
10108 : "=w"(result) \
10109 : "0"(a_), "w"(b_), "i"(c) \
10110 : /* No clobbers */); \
10111 result; \
10112 })
10113
10114 #define vsliq_n_p16(a, b, c) \
10115 __extension__ \
10116 ({ \
10117 poly16x8_t b_ = (b); \
10118 poly16x8_t a_ = (a); \
10119 poly16x8_t result; \
10120 __asm__ ("sli %0.8h,%2.8h,%3" \
10121 : "=w"(result) \
10122 : "0"(a_), "w"(b_), "i"(c) \
10123 : /* No clobbers */); \
10124 result; \
10125 })
10126
10127 #define vsri_n_p8(a, b, c) \
10128 __extension__ \
10129 ({ \
10130 poly8x8_t b_ = (b); \
10131 poly8x8_t a_ = (a); \
10132 poly8x8_t result; \
10133 __asm__ ("sri %0.8b,%2.8b,%3" \
10134 : "=w"(result) \
10135 : "0"(a_), "w"(b_), "i"(c) \
10136 : /* No clobbers */); \
10137 result; \
10138 })
10139
10140 #define vsri_n_p16(a, b, c) \
10141 __extension__ \
10142 ({ \
10143 poly16x4_t b_ = (b); \
10144 poly16x4_t a_ = (a); \
10145 poly16x4_t result; \
10146 __asm__ ("sri %0.4h,%2.4h,%3" \
10147 : "=w"(result) \
10148 : "0"(a_), "w"(b_), "i"(c) \
10149 : /* No clobbers */); \
10150 result; \
10151 })
10152
10153 #define vsri_n_p64(a, b, c) \
10154 __extension__ \
10155 ({ \
10156 poly64x1_t b_ = (b); \
10157 poly64x1_t a_ = (a); \
10158 poly64x1_t result; \
10159 __asm__ ("sri %d0,%d2,%3" \
10160 : "=w"(result) \
10161 : "0"(a_), "w"(b_), "i"(c) \
10162 : /* No clobbers. */); \
10163 result; \
10164 })
10165
10166 #define vsriq_n_p8(a, b, c) \
10167 __extension__ \
10168 ({ \
10169 poly8x16_t b_ = (b); \
10170 poly8x16_t a_ = (a); \
10171 poly8x16_t result; \
10172 __asm__ ("sri %0.16b,%2.16b,%3" \
10173 : "=w"(result) \
10174 : "0"(a_), "w"(b_), "i"(c) \
10175 : /* No clobbers */); \
10176 result; \
10177 })
10178
10179 #define vsriq_n_p16(a, b, c) \
10180 __extension__ \
10181 ({ \
10182 poly16x8_t b_ = (b); \
10183 poly16x8_t a_ = (a); \
10184 poly16x8_t result; \
10185 __asm__ ("sri %0.8h,%2.8h,%3" \
10186 : "=w"(result) \
10187 : "0"(a_), "w"(b_), "i"(c) \
10188 : /* No clobbers */); \
10189 result; \
10190 })
10191
10192 #define vsriq_n_p64(a, b, c) \
10193 __extension__ \
10194 ({ \
10195 poly64x2_t b_ = (b); \
10196 poly64x2_t a_ = (a); \
10197 poly64x2_t result; \
10198 __asm__ ("sri %0.2d,%2.2d,%3" \
10199 : "=w"(result) \
10200 : "0"(a_), "w"(b_), "i"(c) \
10201 : /* No clobbers. */); \
10202 result; \
10203 })
10204
10205 __extension__ extern __inline uint8x8_t
10206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10207 vtst_p8 (poly8x8_t __a, poly8x8_t __b)
10208 {
10209 return (uint8x8_t) ((((uint8x8_t) __a) & ((uint8x8_t) __b))
10210 != 0);
10211 }
10212
10213 __extension__ extern __inline uint16x4_t
10214 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10215 vtst_p16 (poly16x4_t __a, poly16x4_t __b)
10216 {
10217 return (uint16x4_t) ((((uint16x4_t) __a) & ((uint16x4_t) __b))
10218 != 0);
10219 }
10220
10221 __extension__ extern __inline uint64x1_t
10222 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10223 vtst_p64 (poly64x1_t __a, poly64x1_t __b)
10224 {
10225 return (uint64x1_t) ((__a & __b) != __AARCH64_INT64_C (0));
10226 }
10227
10228 __extension__ extern __inline uint8x16_t
10229 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10230 vtstq_p8 (poly8x16_t __a, poly8x16_t __b)
10231 {
10232 return (uint8x16_t) ((((uint8x16_t) __a) & ((uint8x16_t) __b))
10233 != 0);
10234 }
10235
10236 __extension__ extern __inline uint16x8_t
10237 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10238 vtstq_p16 (poly16x8_t __a, poly16x8_t __b)
10239 {
10240 return (uint16x8_t) ((((uint16x8_t) __a) & ((uint16x8_t) __b))
10241 != 0);
10242 }
10243
10244 __extension__ extern __inline uint64x2_t
10245 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10246 vtstq_p64 (poly64x2_t __a, poly64x2_t __b)
10247 {
10248 return (uint64x2_t) ((((uint64x2_t) __a) & ((uint64x2_t) __b))
10249 != __AARCH64_INT64_C (0));
10250 }
10251
10252 /* End of temporary inline asm implementations. */
10253
10254 /* Start of temporary inline asm for vldn, vstn and friends. */
10255
10256 /* Create struct element types for duplicating loads.
10257
10258 Create 2 element structures of:
10259
10260 +------+----+----+----+----+
10261 | | 8 | 16 | 32 | 64 |
10262 +------+----+----+----+----+
10263 |int | Y | Y | N | N |
10264 +------+----+----+----+----+
10265 |uint | Y | Y | N | N |
10266 +------+----+----+----+----+
10267 |float | - | Y | N | N |
10268 +------+----+----+----+----+
10269 |poly | Y | Y | - | - |
10270 +------+----+----+----+----+
10271
10272 Create 3 element structures of:
10273
10274 +------+----+----+----+----+
10275 | | 8 | 16 | 32 | 64 |
10276 +------+----+----+----+----+
10277 |int | Y | Y | Y | Y |
10278 +------+----+----+----+----+
10279 |uint | Y | Y | Y | Y |
10280 +------+----+----+----+----+
10281 |float | - | Y | Y | Y |
10282 +------+----+----+----+----+
10283 |poly | Y | Y | - | - |
10284 +------+----+----+----+----+
10285
10286 Create 4 element structures of:
10287
10288 +------+----+----+----+----+
10289 | | 8 | 16 | 32 | 64 |
10290 +------+----+----+----+----+
10291 |int | Y | N | N | Y |
10292 +------+----+----+----+----+
10293 |uint | Y | N | N | Y |
10294 +------+----+----+----+----+
10295 |float | - | N | N | Y |
10296 +------+----+----+----+----+
10297 |poly | Y | N | - | - |
10298 +------+----+----+----+----+
10299
10300 This is required for casting memory reference. */
10301 #define __STRUCTN(t, sz, nelem) \
10302 typedef struct t ## sz ## x ## nelem ## _t { \
10303 t ## sz ## _t val[nelem]; \
10304 } t ## sz ## x ## nelem ## _t;
10305
10306 /* 2-element structs. */
10307 __STRUCTN (int, 8, 2)
10308 __STRUCTN (int, 16, 2)
10309 __STRUCTN (uint, 8, 2)
10310 __STRUCTN (uint, 16, 2)
10311 __STRUCTN (float, 16, 2)
10312 __STRUCTN (poly, 8, 2)
10313 __STRUCTN (poly, 16, 2)
10314 /* 3-element structs. */
10315 __STRUCTN (int, 8, 3)
10316 __STRUCTN (int, 16, 3)
10317 __STRUCTN (int, 32, 3)
10318 __STRUCTN (int, 64, 3)
10319 __STRUCTN (uint, 8, 3)
10320 __STRUCTN (uint, 16, 3)
10321 __STRUCTN (uint, 32, 3)
10322 __STRUCTN (uint, 64, 3)
10323 __STRUCTN (float, 16, 3)
10324 __STRUCTN (float, 32, 3)
10325 __STRUCTN (float, 64, 3)
10326 __STRUCTN (poly, 8, 3)
10327 __STRUCTN (poly, 16, 3)
10328 /* 4-element structs. */
10329 __STRUCTN (int, 8, 4)
10330 __STRUCTN (int, 64, 4)
10331 __STRUCTN (uint, 8, 4)
10332 __STRUCTN (uint, 64, 4)
10333 __STRUCTN (poly, 8, 4)
10334 __STRUCTN (float, 64, 4)
10335 #undef __STRUCTN
10336
10337
10338 #define __ST2_LANE_FUNC(intype, largetype, ptrtype, mode, \
10339 qmode, ptr_mode, funcsuffix, signedtype) \
10340 __extension__ extern __inline void \
10341 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
10342 vst2_lane_ ## funcsuffix (ptrtype *__ptr, \
10343 intype __b, const int __c) \
10344 { \
10345 __builtin_aarch64_simd_oi __o; \
10346 largetype __temp; \
10347 __temp.val[0] \
10348 = vcombine_##funcsuffix (__b.val[0], \
10349 vcreate_##funcsuffix (__AARCH64_UINT64_C (0))); \
10350 __temp.val[1] \
10351 = vcombine_##funcsuffix (__b.val[1], \
10352 vcreate_##funcsuffix (__AARCH64_UINT64_C (0))); \
10353 __o = __builtin_aarch64_set_qregoi##qmode (__o, \
10354 (signedtype) __temp.val[0], 0); \
10355 __o = __builtin_aarch64_set_qregoi##qmode (__o, \
10356 (signedtype) __temp.val[1], 1); \
10357 __builtin_aarch64_st2_lane##mode ((__builtin_aarch64_simd_ ## ptr_mode *) \
10358 __ptr, __o, __c); \
10359 }
10360
10361 __ST2_LANE_FUNC (float16x4x2_t, float16x8x2_t, float16_t, v4hf, v8hf, hf, f16,
10362 float16x8_t)
10363 __ST2_LANE_FUNC (float32x2x2_t, float32x4x2_t, float32_t, v2sf, v4sf, sf, f32,
10364 float32x4_t)
10365 __ST2_LANE_FUNC (float64x1x2_t, float64x2x2_t, float64_t, df, v2df, df, f64,
10366 float64x2_t)
10367 __ST2_LANE_FUNC (poly8x8x2_t, poly8x16x2_t, poly8_t, v8qi, v16qi, qi, p8,
10368 int8x16_t)
10369 __ST2_LANE_FUNC (poly16x4x2_t, poly16x8x2_t, poly16_t, v4hi, v8hi, hi, p16,
10370 int16x8_t)
10371 __ST2_LANE_FUNC (poly64x1x2_t, poly64x2x2_t, poly64_t, di, v2di_ssps, di, p64,
10372 poly64x2_t)
10373 __ST2_LANE_FUNC (int8x8x2_t, int8x16x2_t, int8_t, v8qi, v16qi, qi, s8,
10374 int8x16_t)
10375 __ST2_LANE_FUNC (int16x4x2_t, int16x8x2_t, int16_t, v4hi, v8hi, hi, s16,
10376 int16x8_t)
10377 __ST2_LANE_FUNC (int32x2x2_t, int32x4x2_t, int32_t, v2si, v4si, si, s32,
10378 int32x4_t)
10379 __ST2_LANE_FUNC (int64x1x2_t, int64x2x2_t, int64_t, di, v2di, di, s64,
10380 int64x2_t)
10381 __ST2_LANE_FUNC (uint8x8x2_t, uint8x16x2_t, uint8_t, v8qi, v16qi, qi, u8,
10382 int8x16_t)
10383 __ST2_LANE_FUNC (uint16x4x2_t, uint16x8x2_t, uint16_t, v4hi, v8hi, hi, u16,
10384 int16x8_t)
10385 __ST2_LANE_FUNC (uint32x2x2_t, uint32x4x2_t, uint32_t, v2si, v4si, si, u32,
10386 int32x4_t)
10387 __ST2_LANE_FUNC (uint64x1x2_t, uint64x2x2_t, uint64_t, di, v2di, di, u64,
10388 int64x2_t)
10389
10390 #define __ST2Q_LANE_FUNC(intype, ptrtype, mode, ptr_mode, funcsuffix) \
10391 __extension__ extern __inline void \
10392 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
10393 vst2q_lane_ ## funcsuffix (ptrtype *__ptr, \
10394 intype __b, const int __c) \
10395 { \
10396 union { intype __i; \
10397 __builtin_aarch64_simd_oi __o; } __temp = { __b }; \
10398 __builtin_aarch64_st2_lane##mode ((__builtin_aarch64_simd_ ## ptr_mode *) \
10399 __ptr, __temp.__o, __c); \
10400 }
10401
10402 __ST2Q_LANE_FUNC (float16x8x2_t, float16_t, v8hf, hf, f16)
10403 __ST2Q_LANE_FUNC (float32x4x2_t, float32_t, v4sf, sf, f32)
10404 __ST2Q_LANE_FUNC (float64x2x2_t, float64_t, v2df, df, f64)
10405 __ST2Q_LANE_FUNC (poly8x16x2_t, poly8_t, v16qi, qi, p8)
10406 __ST2Q_LANE_FUNC (poly16x8x2_t, poly16_t, v8hi, hi, p16)
10407 __ST2Q_LANE_FUNC (poly64x2x2_t, poly64_t, v2di, di, p64)
10408 __ST2Q_LANE_FUNC (int8x16x2_t, int8_t, v16qi, qi, s8)
10409 __ST2Q_LANE_FUNC (int16x8x2_t, int16_t, v8hi, hi, s16)
10410 __ST2Q_LANE_FUNC (int32x4x2_t, int32_t, v4si, si, s32)
10411 __ST2Q_LANE_FUNC (int64x2x2_t, int64_t, v2di, di, s64)
10412 __ST2Q_LANE_FUNC (uint8x16x2_t, uint8_t, v16qi, qi, u8)
10413 __ST2Q_LANE_FUNC (uint16x8x2_t, uint16_t, v8hi, hi, u16)
10414 __ST2Q_LANE_FUNC (uint32x4x2_t, uint32_t, v4si, si, u32)
10415 __ST2Q_LANE_FUNC (uint64x2x2_t, uint64_t, v2di, di, u64)
10416
10417 #define __ST3_LANE_FUNC(intype, largetype, ptrtype, mode, \
10418 qmode, ptr_mode, funcsuffix, signedtype) \
10419 __extension__ extern __inline void \
10420 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
10421 vst3_lane_ ## funcsuffix (ptrtype *__ptr, \
10422 intype __b, const int __c) \
10423 { \
10424 __builtin_aarch64_simd_ci __o; \
10425 largetype __temp; \
10426 __temp.val[0] \
10427 = vcombine_##funcsuffix (__b.val[0], \
10428 vcreate_##funcsuffix (__AARCH64_UINT64_C (0))); \
10429 __temp.val[1] \
10430 = vcombine_##funcsuffix (__b.val[1], \
10431 vcreate_##funcsuffix (__AARCH64_UINT64_C (0))); \
10432 __temp.val[2] \
10433 = vcombine_##funcsuffix (__b.val[2], \
10434 vcreate_##funcsuffix (__AARCH64_UINT64_C (0))); \
10435 __o = __builtin_aarch64_set_qregci##qmode (__o, \
10436 (signedtype) __temp.val[0], 0); \
10437 __o = __builtin_aarch64_set_qregci##qmode (__o, \
10438 (signedtype) __temp.val[1], 1); \
10439 __o = __builtin_aarch64_set_qregci##qmode (__o, \
10440 (signedtype) __temp.val[2], 2); \
10441 __builtin_aarch64_st3_lane##mode ((__builtin_aarch64_simd_ ## ptr_mode *) \
10442 __ptr, __o, __c); \
10443 }
10444
10445 __ST3_LANE_FUNC (float16x4x3_t, float16x8x3_t, float16_t, v4hf, v8hf, hf, f16,
10446 float16x8_t)
10447 __ST3_LANE_FUNC (float32x2x3_t, float32x4x3_t, float32_t, v2sf, v4sf, sf, f32,
10448 float32x4_t)
10449 __ST3_LANE_FUNC (float64x1x3_t, float64x2x3_t, float64_t, df, v2df, df, f64,
10450 float64x2_t)
10451 __ST3_LANE_FUNC (poly8x8x3_t, poly8x16x3_t, poly8_t, v8qi, v16qi, qi, p8,
10452 int8x16_t)
10453 __ST3_LANE_FUNC (poly16x4x3_t, poly16x8x3_t, poly16_t, v4hi, v8hi, hi, p16,
10454 int16x8_t)
10455 __ST3_LANE_FUNC (poly64x1x3_t, poly64x2x3_t, poly64_t, di, v2di_ssps, di, p64,
10456 poly64x2_t)
10457 __ST3_LANE_FUNC (int8x8x3_t, int8x16x3_t, int8_t, v8qi, v16qi, qi, s8,
10458 int8x16_t)
10459 __ST3_LANE_FUNC (int16x4x3_t, int16x8x3_t, int16_t, v4hi, v8hi, hi, s16,
10460 int16x8_t)
10461 __ST3_LANE_FUNC (int32x2x3_t, int32x4x3_t, int32_t, v2si, v4si, si, s32,
10462 int32x4_t)
10463 __ST3_LANE_FUNC (int64x1x3_t, int64x2x3_t, int64_t, di, v2di, di, s64,
10464 int64x2_t)
10465 __ST3_LANE_FUNC (uint8x8x3_t, uint8x16x3_t, uint8_t, v8qi, v16qi, qi, u8,
10466 int8x16_t)
10467 __ST3_LANE_FUNC (uint16x4x3_t, uint16x8x3_t, uint16_t, v4hi, v8hi, hi, u16,
10468 int16x8_t)
10469 __ST3_LANE_FUNC (uint32x2x3_t, uint32x4x3_t, uint32_t, v2si, v4si, si, u32,
10470 int32x4_t)
10471 __ST3_LANE_FUNC (uint64x1x3_t, uint64x2x3_t, uint64_t, di, v2di, di, u64,
10472 int64x2_t)
10473
10474 #define __ST3Q_LANE_FUNC(intype, ptrtype, mode, ptr_mode, funcsuffix) \
10475 __extension__ extern __inline void \
10476 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
10477 vst3q_lane_ ## funcsuffix (ptrtype *__ptr, \
10478 intype __b, const int __c) \
10479 { \
10480 union { intype __i; \
10481 __builtin_aarch64_simd_ci __o; } __temp = { __b }; \
10482 __builtin_aarch64_st3_lane##mode ((__builtin_aarch64_simd_ ## ptr_mode *) \
10483 __ptr, __temp.__o, __c); \
10484 }
10485
10486 __ST3Q_LANE_FUNC (float16x8x3_t, float16_t, v8hf, hf, f16)
10487 __ST3Q_LANE_FUNC (float32x4x3_t, float32_t, v4sf, sf, f32)
10488 __ST3Q_LANE_FUNC (float64x2x3_t, float64_t, v2df, df, f64)
10489 __ST3Q_LANE_FUNC (poly8x16x3_t, poly8_t, v16qi, qi, p8)
10490 __ST3Q_LANE_FUNC (poly16x8x3_t, poly16_t, v8hi, hi, p16)
10491 __ST3Q_LANE_FUNC (poly64x2x3_t, poly64_t, v2di, di, p64)
10492 __ST3Q_LANE_FUNC (int8x16x3_t, int8_t, v16qi, qi, s8)
10493 __ST3Q_LANE_FUNC (int16x8x3_t, int16_t, v8hi, hi, s16)
10494 __ST3Q_LANE_FUNC (int32x4x3_t, int32_t, v4si, si, s32)
10495 __ST3Q_LANE_FUNC (int64x2x3_t, int64_t, v2di, di, s64)
10496 __ST3Q_LANE_FUNC (uint8x16x3_t, uint8_t, v16qi, qi, u8)
10497 __ST3Q_LANE_FUNC (uint16x8x3_t, uint16_t, v8hi, hi, u16)
10498 __ST3Q_LANE_FUNC (uint32x4x3_t, uint32_t, v4si, si, u32)
10499 __ST3Q_LANE_FUNC (uint64x2x3_t, uint64_t, v2di, di, u64)
10500
10501 #define __ST4_LANE_FUNC(intype, largetype, ptrtype, mode, \
10502 qmode, ptr_mode, funcsuffix, signedtype) \
10503 __extension__ extern __inline void \
10504 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
10505 vst4_lane_ ## funcsuffix (ptrtype *__ptr, \
10506 intype __b, const int __c) \
10507 { \
10508 __builtin_aarch64_simd_xi __o; \
10509 largetype __temp; \
10510 __temp.val[0] \
10511 = vcombine_##funcsuffix (__b.val[0], \
10512 vcreate_##funcsuffix (__AARCH64_UINT64_C (0))); \
10513 __temp.val[1] \
10514 = vcombine_##funcsuffix (__b.val[1], \
10515 vcreate_##funcsuffix (__AARCH64_UINT64_C (0))); \
10516 __temp.val[2] \
10517 = vcombine_##funcsuffix (__b.val[2], \
10518 vcreate_##funcsuffix (__AARCH64_UINT64_C (0))); \
10519 __temp.val[3] \
10520 = vcombine_##funcsuffix (__b.val[3], \
10521 vcreate_##funcsuffix (__AARCH64_UINT64_C (0))); \
10522 __o = __builtin_aarch64_set_qregxi##qmode (__o, \
10523 (signedtype) __temp.val[0], 0); \
10524 __o = __builtin_aarch64_set_qregxi##qmode (__o, \
10525 (signedtype) __temp.val[1], 1); \
10526 __o = __builtin_aarch64_set_qregxi##qmode (__o, \
10527 (signedtype) __temp.val[2], 2); \
10528 __o = __builtin_aarch64_set_qregxi##qmode (__o, \
10529 (signedtype) __temp.val[3], 3); \
10530 __builtin_aarch64_st4_lane##mode ((__builtin_aarch64_simd_ ## ptr_mode *) \
10531 __ptr, __o, __c); \
10532 }
10533
10534 __ST4_LANE_FUNC (float16x4x4_t, float16x8x4_t, float16_t, v4hf, v8hf, hf, f16,
10535 float16x8_t)
10536 __ST4_LANE_FUNC (float32x2x4_t, float32x4x4_t, float32_t, v2sf, v4sf, sf, f32,
10537 float32x4_t)
10538 __ST4_LANE_FUNC (float64x1x4_t, float64x2x4_t, float64_t, df, v2df, df, f64,
10539 float64x2_t)
10540 __ST4_LANE_FUNC (poly8x8x4_t, poly8x16x4_t, poly8_t, v8qi, v16qi, qi, p8,
10541 int8x16_t)
10542 __ST4_LANE_FUNC (poly16x4x4_t, poly16x8x4_t, poly16_t, v4hi, v8hi, hi, p16,
10543 int16x8_t)
10544 __ST4_LANE_FUNC (poly64x1x4_t, poly64x2x4_t, poly64_t, di, v2di_ssps, di, p64,
10545 poly64x2_t)
10546 __ST4_LANE_FUNC (int8x8x4_t, int8x16x4_t, int8_t, v8qi, v16qi, qi, s8,
10547 int8x16_t)
10548 __ST4_LANE_FUNC (int16x4x4_t, int16x8x4_t, int16_t, v4hi, v8hi, hi, s16,
10549 int16x8_t)
10550 __ST4_LANE_FUNC (int32x2x4_t, int32x4x4_t, int32_t, v2si, v4si, si, s32,
10551 int32x4_t)
10552 __ST4_LANE_FUNC (int64x1x4_t, int64x2x4_t, int64_t, di, v2di, di, s64,
10553 int64x2_t)
10554 __ST4_LANE_FUNC (uint8x8x4_t, uint8x16x4_t, uint8_t, v8qi, v16qi, qi, u8,
10555 int8x16_t)
10556 __ST4_LANE_FUNC (uint16x4x4_t, uint16x8x4_t, uint16_t, v4hi, v8hi, hi, u16,
10557 int16x8_t)
10558 __ST4_LANE_FUNC (uint32x2x4_t, uint32x4x4_t, uint32_t, v2si, v4si, si, u32,
10559 int32x4_t)
10560 __ST4_LANE_FUNC (uint64x1x4_t, uint64x2x4_t, uint64_t, di, v2di, di, u64,
10561 int64x2_t)
10562
10563 #define __ST4Q_LANE_FUNC(intype, ptrtype, mode, ptr_mode, funcsuffix) \
10564 __extension__ extern __inline void \
10565 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
10566 vst4q_lane_ ## funcsuffix (ptrtype *__ptr, \
10567 intype __b, const int __c) \
10568 { \
10569 union { intype __i; \
10570 __builtin_aarch64_simd_xi __o; } __temp = { __b }; \
10571 __builtin_aarch64_st4_lane##mode ((__builtin_aarch64_simd_ ## ptr_mode *) \
10572 __ptr, __temp.__o, __c); \
10573 }
10574
10575 __ST4Q_LANE_FUNC (float16x8x4_t, float16_t, v8hf, hf, f16)
10576 __ST4Q_LANE_FUNC (float32x4x4_t, float32_t, v4sf, sf, f32)
10577 __ST4Q_LANE_FUNC (float64x2x4_t, float64_t, v2df, df, f64)
10578 __ST4Q_LANE_FUNC (poly8x16x4_t, poly8_t, v16qi, qi, p8)
10579 __ST4Q_LANE_FUNC (poly16x8x4_t, poly16_t, v8hi, hi, p16)
10580 __ST4Q_LANE_FUNC (poly64x2x4_t, poly64_t, v2di, di, p64)
10581 __ST4Q_LANE_FUNC (int8x16x4_t, int8_t, v16qi, qi, s8)
10582 __ST4Q_LANE_FUNC (int16x8x4_t, int16_t, v8hi, hi, s16)
10583 __ST4Q_LANE_FUNC (int32x4x4_t, int32_t, v4si, si, s32)
10584 __ST4Q_LANE_FUNC (int64x2x4_t, int64_t, v2di, di, s64)
10585 __ST4Q_LANE_FUNC (uint8x16x4_t, uint8_t, v16qi, qi, u8)
10586 __ST4Q_LANE_FUNC (uint16x8x4_t, uint16_t, v8hi, hi, u16)
10587 __ST4Q_LANE_FUNC (uint32x4x4_t, uint32_t, v4si, si, u32)
10588 __ST4Q_LANE_FUNC (uint64x2x4_t, uint64_t, v2di, di, u64)
10589
10590 __extension__ extern __inline int64_t
10591 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10592 vaddlv_s32 (int32x2_t __a)
10593 {
10594 int64_t __result;
10595 __asm__ ("saddlp %0.1d, %1.2s" : "=w"(__result) : "w"(__a) : );
10596 return __result;
10597 }
10598
10599 __extension__ extern __inline uint64_t
10600 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10601 vaddlv_u32 (uint32x2_t __a)
10602 {
10603 uint64_t __result;
10604 __asm__ ("uaddlp %0.1d, %1.2s" : "=w"(__result) : "w"(__a) : );
10605 return __result;
10606 }
10607
10608 __extension__ extern __inline int16x4_t
10609 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10610 vqdmulh_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c)
10611 {
10612 return __builtin_aarch64_sqdmulh_laneqv4hi (__a, __b, __c);
10613 }
10614
10615 __extension__ extern __inline int32x2_t
10616 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10617 vqdmulh_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c)
10618 {
10619 return __builtin_aarch64_sqdmulh_laneqv2si (__a, __b, __c);
10620 }
10621
10622 __extension__ extern __inline int16x8_t
10623 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10624 vqdmulhq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __c)
10625 {
10626 return __builtin_aarch64_sqdmulh_laneqv8hi (__a, __b, __c);
10627 }
10628
10629 __extension__ extern __inline int32x4_t
10630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10631 vqdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
10632 {
10633 return __builtin_aarch64_sqdmulh_laneqv4si (__a, __b, __c);
10634 }
10635
10636 __extension__ extern __inline int16x4_t
10637 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10638 vqrdmulh_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c)
10639 {
10640 return __builtin_aarch64_sqrdmulh_laneqv4hi (__a, __b, __c);
10641 }
10642
10643 __extension__ extern __inline int32x2_t
10644 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10645 vqrdmulh_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c)
10646 {
10647 return __builtin_aarch64_sqrdmulh_laneqv2si (__a, __b, __c);
10648 }
10649
10650 __extension__ extern __inline int16x8_t
10651 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10652 vqrdmulhq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __c)
10653 {
10654 return __builtin_aarch64_sqrdmulh_laneqv8hi (__a, __b, __c);
10655 }
10656
10657 __extension__ extern __inline int32x4_t
10658 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10659 vqrdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c)
10660 {
10661 return __builtin_aarch64_sqrdmulh_laneqv4si (__a, __b, __c);
10662 }
10663
10664 /* Table intrinsics. */
10665
10666 __extension__ extern __inline poly8x8_t
10667 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10668 vqtbl1_p8 (poly8x16_t __a, uint8x8_t __b)
10669 {
10670 poly8x8_t __result;
10671 __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
10672 : "=w"(__result)
10673 : "w"(__a), "w"(__b)
10674 : /* No clobbers */);
10675 return __result;
10676 }
10677
10678 __extension__ extern __inline int8x8_t
10679 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10680 vqtbl1_s8 (int8x16_t __a, uint8x8_t __b)
10681 {
10682 int8x8_t __result;
10683 __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
10684 : "=w"(__result)
10685 : "w"(__a), "w"(__b)
10686 : /* No clobbers */);
10687 return __result;
10688 }
10689
10690 __extension__ extern __inline uint8x8_t
10691 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10692 vqtbl1_u8 (uint8x16_t __a, uint8x8_t __b)
10693 {
10694 uint8x8_t __result;
10695 __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
10696 : "=w"(__result)
10697 : "w"(__a), "w"(__b)
10698 : /* No clobbers */);
10699 return __result;
10700 }
10701
10702 __extension__ extern __inline poly8x16_t
10703 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10704 vqtbl1q_p8 (poly8x16_t __a, uint8x16_t __b)
10705 {
10706 poly8x16_t __result;
10707 __asm__ ("tbl %0.16b, {%1.16b}, %2.16b"
10708 : "=w"(__result)
10709 : "w"(__a), "w"(__b)
10710 : /* No clobbers */);
10711 return __result;
10712 }
10713
10714 __extension__ extern __inline int8x16_t
10715 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10716 vqtbl1q_s8 (int8x16_t __a, uint8x16_t __b)
10717 {
10718 int8x16_t __result;
10719 __asm__ ("tbl %0.16b, {%1.16b}, %2.16b"
10720 : "=w"(__result)
10721 : "w"(__a), "w"(__b)
10722 : /* No clobbers */);
10723 return __result;
10724 }
10725
10726 __extension__ extern __inline uint8x16_t
10727 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10728 vqtbl1q_u8 (uint8x16_t __a, uint8x16_t __b)
10729 {
10730 uint8x16_t __result;
10731 __asm__ ("tbl %0.16b, {%1.16b}, %2.16b"
10732 : "=w"(__result)
10733 : "w"(__a), "w"(__b)
10734 : /* No clobbers */);
10735 return __result;
10736 }
10737
10738 __extension__ extern __inline int8x8_t
10739 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10740 vqtbx1_s8 (int8x8_t __r, int8x16_t __tab, uint8x8_t __idx)
10741 {
10742 int8x8_t __result = __r;
10743 __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
10744 : "+w"(__result)
10745 : "w"(__tab), "w"(__idx)
10746 : /* No clobbers */);
10747 return __result;
10748 }
10749
10750 __extension__ extern __inline uint8x8_t
10751 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10752 vqtbx1_u8 (uint8x8_t __r, uint8x16_t __tab, uint8x8_t __idx)
10753 {
10754 uint8x8_t __result = __r;
10755 __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
10756 : "+w"(__result)
10757 : "w"(__tab), "w"(__idx)
10758 : /* No clobbers */);
10759 return __result;
10760 }
10761
10762 __extension__ extern __inline poly8x8_t
10763 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10764 vqtbx1_p8 (poly8x8_t __r, poly8x16_t __tab, uint8x8_t __idx)
10765 {
10766 poly8x8_t __result = __r;
10767 __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
10768 : "+w"(__result)
10769 : "w"(__tab), "w"(__idx)
10770 : /* No clobbers */);
10771 return __result;
10772 }
10773
10774 __extension__ extern __inline int8x16_t
10775 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10776 vqtbx1q_s8 (int8x16_t __r, int8x16_t __tab, uint8x16_t __idx)
10777 {
10778 int8x16_t __result = __r;
10779 __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
10780 : "+w"(__result)
10781 : "w"(__tab), "w"(__idx)
10782 : /* No clobbers */);
10783 return __result;
10784 }
10785
10786 __extension__ extern __inline uint8x16_t
10787 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10788 vqtbx1q_u8 (uint8x16_t __r, uint8x16_t __tab, uint8x16_t __idx)
10789 {
10790 uint8x16_t __result = __r;
10791 __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
10792 : "+w"(__result)
10793 : "w"(__tab), "w"(__idx)
10794 : /* No clobbers */);
10795 return __result;
10796 }
10797
10798 __extension__ extern __inline poly8x16_t
10799 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10800 vqtbx1q_p8 (poly8x16_t __r, poly8x16_t __tab, uint8x16_t __idx)
10801 {
10802 poly8x16_t __result = __r;
10803 __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
10804 : "+w"(__result)
10805 : "w"(__tab), "w"(__idx)
10806 : /* No clobbers */);
10807 return __result;
10808 }
10809
10810 /* V7 legacy table intrinsics. */
10811
10812 __extension__ extern __inline int8x8_t
10813 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10814 vtbl1_s8 (int8x8_t __tab, int8x8_t __idx)
10815 {
10816 int8x8_t __result;
10817 int8x16_t __temp = vcombine_s8 (__tab, vcreate_s8 (__AARCH64_UINT64_C (0x0)));
10818 __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
10819 : "=w"(__result)
10820 : "w"(__temp), "w"(__idx)
10821 : /* No clobbers */);
10822 return __result;
10823 }
10824
10825 __extension__ extern __inline uint8x8_t
10826 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10827 vtbl1_u8 (uint8x8_t __tab, uint8x8_t __idx)
10828 {
10829 uint8x8_t __result;
10830 uint8x16_t __temp = vcombine_u8 (__tab, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
10831 __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
10832 : "=w"(__result)
10833 : "w"(__temp), "w"(__idx)
10834 : /* No clobbers */);
10835 return __result;
10836 }
10837
10838 __extension__ extern __inline poly8x8_t
10839 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10840 vtbl1_p8 (poly8x8_t __tab, uint8x8_t __idx)
10841 {
10842 poly8x8_t __result;
10843 poly8x16_t __temp = vcombine_p8 (__tab, vcreate_p8 (__AARCH64_UINT64_C (0x0)));
10844 __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
10845 : "=w"(__result)
10846 : "w"(__temp), "w"(__idx)
10847 : /* No clobbers */);
10848 return __result;
10849 }
10850
10851 __extension__ extern __inline int8x8_t
10852 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10853 vtbl2_s8 (int8x8x2_t __tab, int8x8_t __idx)
10854 {
10855 int8x8_t __result;
10856 int8x16_t __temp = vcombine_s8 (__tab.val[0], __tab.val[1]);
10857 __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
10858 : "=w"(__result)
10859 : "w"(__temp), "w"(__idx)
10860 : /* No clobbers */);
10861 return __result;
10862 }
10863
10864 __extension__ extern __inline uint8x8_t
10865 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10866 vtbl2_u8 (uint8x8x2_t __tab, uint8x8_t __idx)
10867 {
10868 uint8x8_t __result;
10869 uint8x16_t __temp = vcombine_u8 (__tab.val[0], __tab.val[1]);
10870 __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
10871 : "=w"(__result)
10872 : "w"(__temp), "w"(__idx)
10873 : /* No clobbers */);
10874 return __result;
10875 }
10876
10877 __extension__ extern __inline poly8x8_t
10878 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10879 vtbl2_p8 (poly8x8x2_t __tab, uint8x8_t __idx)
10880 {
10881 poly8x8_t __result;
10882 poly8x16_t __temp = vcombine_p8 (__tab.val[0], __tab.val[1]);
10883 __asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
10884 : "=w"(__result)
10885 : "w"(__temp), "w"(__idx)
10886 : /* No clobbers */);
10887 return __result;
10888 }
10889
10890 __extension__ extern __inline int8x8_t
10891 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10892 vtbl3_s8 (int8x8x3_t __tab, int8x8_t __idx)
10893 {
10894 int8x8_t __result;
10895 int8x16x2_t __temp;
10896 __builtin_aarch64_simd_oi __o;
10897 __temp.val[0] = vcombine_s8 (__tab.val[0], __tab.val[1]);
10898 __temp.val[1] = vcombine_s8 (__tab.val[2], vcreate_s8 (__AARCH64_UINT64_C (0x0)));
10899 __o = __builtin_aarch64_set_qregoiv16qi (__o,
10900 (int8x16_t) __temp.val[0], 0);
10901 __o = __builtin_aarch64_set_qregoiv16qi (__o,
10902 (int8x16_t) __temp.val[1], 1);
10903 __result = __builtin_aarch64_tbl3v8qi (__o, __idx);
10904 return __result;
10905 }
10906
10907 __extension__ extern __inline uint8x8_t
10908 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10909 vtbl3_u8 (uint8x8x3_t __tab, uint8x8_t __idx)
10910 {
10911 uint8x8_t __result;
10912 uint8x16x2_t __temp;
10913 __builtin_aarch64_simd_oi __o;
10914 __temp.val[0] = vcombine_u8 (__tab.val[0], __tab.val[1]);
10915 __temp.val[1] = vcombine_u8 (__tab.val[2], vcreate_u8 (__AARCH64_UINT64_C (0x0)));
10916 __o = __builtin_aarch64_set_qregoiv16qi (__o,
10917 (int8x16_t) __temp.val[0], 0);
10918 __o = __builtin_aarch64_set_qregoiv16qi (__o,
10919 (int8x16_t) __temp.val[1], 1);
10920 __result = (uint8x8_t)__builtin_aarch64_tbl3v8qi (__o, (int8x8_t)__idx);
10921 return __result;
10922 }
10923
10924 __extension__ extern __inline poly8x8_t
10925 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10926 vtbl3_p8 (poly8x8x3_t __tab, uint8x8_t __idx)
10927 {
10928 poly8x8_t __result;
10929 poly8x16x2_t __temp;
10930 __builtin_aarch64_simd_oi __o;
10931 __temp.val[0] = vcombine_p8 (__tab.val[0], __tab.val[1]);
10932 __temp.val[1] = vcombine_p8 (__tab.val[2], vcreate_p8 (__AARCH64_UINT64_C (0x0)));
10933 __o = __builtin_aarch64_set_qregoiv16qi (__o,
10934 (int8x16_t) __temp.val[0], 0);
10935 __o = __builtin_aarch64_set_qregoiv16qi (__o,
10936 (int8x16_t) __temp.val[1], 1);
10937 __result = (poly8x8_t)__builtin_aarch64_tbl3v8qi (__o, (int8x8_t)__idx);
10938 return __result;
10939 }
10940
10941 __extension__ extern __inline int8x8_t
10942 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10943 vtbl4_s8 (int8x8x4_t __tab, int8x8_t __idx)
10944 {
10945 int8x8_t __result;
10946 int8x16x2_t __temp;
10947 __builtin_aarch64_simd_oi __o;
10948 __temp.val[0] = vcombine_s8 (__tab.val[0], __tab.val[1]);
10949 __temp.val[1] = vcombine_s8 (__tab.val[2], __tab.val[3]);
10950 __o = __builtin_aarch64_set_qregoiv16qi (__o,
10951 (int8x16_t) __temp.val[0], 0);
10952 __o = __builtin_aarch64_set_qregoiv16qi (__o,
10953 (int8x16_t) __temp.val[1], 1);
10954 __result = __builtin_aarch64_tbl3v8qi (__o, __idx);
10955 return __result;
10956 }
10957
10958 __extension__ extern __inline uint8x8_t
10959 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10960 vtbl4_u8 (uint8x8x4_t __tab, uint8x8_t __idx)
10961 {
10962 uint8x8_t __result;
10963 uint8x16x2_t __temp;
10964 __builtin_aarch64_simd_oi __o;
10965 __temp.val[0] = vcombine_u8 (__tab.val[0], __tab.val[1]);
10966 __temp.val[1] = vcombine_u8 (__tab.val[2], __tab.val[3]);
10967 __o = __builtin_aarch64_set_qregoiv16qi (__o,
10968 (int8x16_t) __temp.val[0], 0);
10969 __o = __builtin_aarch64_set_qregoiv16qi (__o,
10970 (int8x16_t) __temp.val[1], 1);
10971 __result = (uint8x8_t)__builtin_aarch64_tbl3v8qi (__o, (int8x8_t)__idx);
10972 return __result;
10973 }
10974
10975 __extension__ extern __inline poly8x8_t
10976 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10977 vtbl4_p8 (poly8x8x4_t __tab, uint8x8_t __idx)
10978 {
10979 poly8x8_t __result;
10980 poly8x16x2_t __temp;
10981 __builtin_aarch64_simd_oi __o;
10982 __temp.val[0] = vcombine_p8 (__tab.val[0], __tab.val[1]);
10983 __temp.val[1] = vcombine_p8 (__tab.val[2], __tab.val[3]);
10984 __o = __builtin_aarch64_set_qregoiv16qi (__o,
10985 (int8x16_t) __temp.val[0], 0);
10986 __o = __builtin_aarch64_set_qregoiv16qi (__o,
10987 (int8x16_t) __temp.val[1], 1);
10988 __result = (poly8x8_t)__builtin_aarch64_tbl3v8qi (__o, (int8x8_t)__idx);
10989 return __result;
10990 }
10991
10992 __extension__ extern __inline int8x8_t
10993 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
10994 vtbx2_s8 (int8x8_t __r, int8x8x2_t __tab, int8x8_t __idx)
10995 {
10996 int8x8_t __result = __r;
10997 int8x16_t __temp = vcombine_s8 (__tab.val[0], __tab.val[1]);
10998 __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
10999 : "+w"(__result)
11000 : "w"(__temp), "w"(__idx)
11001 : /* No clobbers */);
11002 return __result;
11003 }
11004
11005 __extension__ extern __inline uint8x8_t
11006 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11007 vtbx2_u8 (uint8x8_t __r, uint8x8x2_t __tab, uint8x8_t __idx)
11008 {
11009 uint8x8_t __result = __r;
11010 uint8x16_t __temp = vcombine_u8 (__tab.val[0], __tab.val[1]);
11011 __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
11012 : "+w"(__result)
11013 : "w"(__temp), "w"(__idx)
11014 : /* No clobbers */);
11015 return __result;
11016 }
11017
11018 __extension__ extern __inline poly8x8_t
11019 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11020 vtbx2_p8 (poly8x8_t __r, poly8x8x2_t __tab, uint8x8_t __idx)
11021 {
11022 poly8x8_t __result = __r;
11023 poly8x16_t __temp = vcombine_p8 (__tab.val[0], __tab.val[1]);
11024 __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
11025 : "+w"(__result)
11026 : "w"(__temp), "w"(__idx)
11027 : /* No clobbers */);
11028 return __result;
11029 }
11030
11031 /* End of temporary inline asm. */
11032
11033 /* Start of optimal implementations in approved order. */
11034
11035 /* vabd. */
11036
11037 __extension__ extern __inline float32_t
11038 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11039 vabds_f32 (float32_t __a, float32_t __b)
11040 {
11041 return __builtin_aarch64_fabdsf (__a, __b);
11042 }
11043
11044 __extension__ extern __inline float64_t
11045 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11046 vabdd_f64 (float64_t __a, float64_t __b)
11047 {
11048 return __builtin_aarch64_fabddf (__a, __b);
11049 }
11050
11051 __extension__ extern __inline float32x2_t
11052 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11053 vabd_f32 (float32x2_t __a, float32x2_t __b)
11054 {
11055 return __builtin_aarch64_fabdv2sf (__a, __b);
11056 }
11057
11058 __extension__ extern __inline float64x1_t
11059 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11060 vabd_f64 (float64x1_t __a, float64x1_t __b)
11061 {
11062 return (float64x1_t) {vabdd_f64 (vget_lane_f64 (__a, 0),
11063 vget_lane_f64 (__b, 0))};
11064 }
11065
11066 __extension__ extern __inline float32x4_t
11067 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11068 vabdq_f32 (float32x4_t __a, float32x4_t __b)
11069 {
11070 return __builtin_aarch64_fabdv4sf (__a, __b);
11071 }
11072
11073 __extension__ extern __inline float64x2_t
11074 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11075 vabdq_f64 (float64x2_t __a, float64x2_t __b)
11076 {
11077 return __builtin_aarch64_fabdv2df (__a, __b);
11078 }
11079
11080 /* vabs */
11081
11082 __extension__ extern __inline float32x2_t
11083 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11084 vabs_f32 (float32x2_t __a)
11085 {
11086 return __builtin_aarch64_absv2sf (__a);
11087 }
11088
11089 __extension__ extern __inline float64x1_t
11090 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11091 vabs_f64 (float64x1_t __a)
11092 {
11093 return (float64x1_t) {__builtin_fabs (__a[0])};
11094 }
11095
11096 __extension__ extern __inline int8x8_t
11097 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11098 vabs_s8 (int8x8_t __a)
11099 {
11100 return __builtin_aarch64_absv8qi (__a);
11101 }
11102
11103 __extension__ extern __inline int16x4_t
11104 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11105 vabs_s16 (int16x4_t __a)
11106 {
11107 return __builtin_aarch64_absv4hi (__a);
11108 }
11109
11110 __extension__ extern __inline int32x2_t
11111 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11112 vabs_s32 (int32x2_t __a)
11113 {
11114 return __builtin_aarch64_absv2si (__a);
11115 }
11116
11117 __extension__ extern __inline int64x1_t
11118 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11119 vabs_s64 (int64x1_t __a)
11120 {
11121 return (int64x1_t) {__builtin_aarch64_absdi (__a[0])};
11122 }
11123
11124 __extension__ extern __inline float32x4_t
11125 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11126 vabsq_f32 (float32x4_t __a)
11127 {
11128 return __builtin_aarch64_absv4sf (__a);
11129 }
11130
11131 __extension__ extern __inline float64x2_t
11132 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11133 vabsq_f64 (float64x2_t __a)
11134 {
11135 return __builtin_aarch64_absv2df (__a);
11136 }
11137
11138 __extension__ extern __inline int8x16_t
11139 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11140 vabsq_s8 (int8x16_t __a)
11141 {
11142 return __builtin_aarch64_absv16qi (__a);
11143 }
11144
11145 __extension__ extern __inline int16x8_t
11146 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11147 vabsq_s16 (int16x8_t __a)
11148 {
11149 return __builtin_aarch64_absv8hi (__a);
11150 }
11151
11152 __extension__ extern __inline int32x4_t
11153 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11154 vabsq_s32 (int32x4_t __a)
11155 {
11156 return __builtin_aarch64_absv4si (__a);
11157 }
11158
11159 __extension__ extern __inline int64x2_t
11160 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11161 vabsq_s64 (int64x2_t __a)
11162 {
11163 return __builtin_aarch64_absv2di (__a);
11164 }
11165
11166 /* Try to avoid moving between integer and vector registers.
11167 For why the cast to unsigned is needed check the vnegd_s64 intrinsic.
11168 There is a testcase related to this issue:
11169 gcc.target/aarch64/vabsd_s64.c. */
11170
11171 __extension__ extern __inline int64_t
11172 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11173 vabsd_s64 (int64_t __a)
11174 {
11175 return __a < 0 ? - (uint64_t) __a : __a;
11176 }
11177
11178 /* vadd */
11179
11180 __extension__ extern __inline int64_t
11181 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11182 vaddd_s64 (int64_t __a, int64_t __b)
11183 {
11184 return __a + __b;
11185 }
11186
11187 __extension__ extern __inline uint64_t
11188 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11189 vaddd_u64 (uint64_t __a, uint64_t __b)
11190 {
11191 return __a + __b;
11192 }
11193
11194 /* vaddv */
11195
11196 __extension__ extern __inline int8_t
11197 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11198 vaddv_s8 (int8x8_t __a)
11199 {
11200 return __builtin_aarch64_reduc_plus_scal_v8qi (__a);
11201 }
11202
11203 __extension__ extern __inline int16_t
11204 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11205 vaddv_s16 (int16x4_t __a)
11206 {
11207 return __builtin_aarch64_reduc_plus_scal_v4hi (__a);
11208 }
11209
11210 __extension__ extern __inline int32_t
11211 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11212 vaddv_s32 (int32x2_t __a)
11213 {
11214 return __builtin_aarch64_reduc_plus_scal_v2si (__a);
11215 }
11216
11217 __extension__ extern __inline uint8_t
11218 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11219 vaddv_u8 (uint8x8_t __a)
11220 {
11221 return (uint8_t) __builtin_aarch64_reduc_plus_scal_v8qi ((int8x8_t) __a);
11222 }
11223
11224 __extension__ extern __inline uint16_t
11225 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11226 vaddv_u16 (uint16x4_t __a)
11227 {
11228 return (uint16_t) __builtin_aarch64_reduc_plus_scal_v4hi ((int16x4_t) __a);
11229 }
11230
11231 __extension__ extern __inline uint32_t
11232 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11233 vaddv_u32 (uint32x2_t __a)
11234 {
11235 return (int32_t) __builtin_aarch64_reduc_plus_scal_v2si ((int32x2_t) __a);
11236 }
11237
11238 __extension__ extern __inline int8_t
11239 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11240 vaddvq_s8 (int8x16_t __a)
11241 {
11242 return __builtin_aarch64_reduc_plus_scal_v16qi (__a);
11243 }
11244
11245 __extension__ extern __inline int16_t
11246 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11247 vaddvq_s16 (int16x8_t __a)
11248 {
11249 return __builtin_aarch64_reduc_plus_scal_v8hi (__a);
11250 }
11251
11252 __extension__ extern __inline int32_t
11253 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11254 vaddvq_s32 (int32x4_t __a)
11255 {
11256 return __builtin_aarch64_reduc_plus_scal_v4si (__a);
11257 }
11258
11259 __extension__ extern __inline int64_t
11260 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11261 vaddvq_s64 (int64x2_t __a)
11262 {
11263 return __builtin_aarch64_reduc_plus_scal_v2di (__a);
11264 }
11265
11266 __extension__ extern __inline uint8_t
11267 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11268 vaddvq_u8 (uint8x16_t __a)
11269 {
11270 return (uint8_t) __builtin_aarch64_reduc_plus_scal_v16qi ((int8x16_t) __a);
11271 }
11272
11273 __extension__ extern __inline uint16_t
11274 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11275 vaddvq_u16 (uint16x8_t __a)
11276 {
11277 return (uint16_t) __builtin_aarch64_reduc_plus_scal_v8hi ((int16x8_t) __a);
11278 }
11279
11280 __extension__ extern __inline uint32_t
11281 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11282 vaddvq_u32 (uint32x4_t __a)
11283 {
11284 return (uint32_t) __builtin_aarch64_reduc_plus_scal_v4si ((int32x4_t) __a);
11285 }
11286
11287 __extension__ extern __inline uint64_t
11288 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11289 vaddvq_u64 (uint64x2_t __a)
11290 {
11291 return (uint64_t) __builtin_aarch64_reduc_plus_scal_v2di ((int64x2_t) __a);
11292 }
11293
11294 __extension__ extern __inline float32_t
11295 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11296 vaddv_f32 (float32x2_t __a)
11297 {
11298 return __builtin_aarch64_reduc_plus_scal_v2sf (__a);
11299 }
11300
11301 __extension__ extern __inline float32_t
11302 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11303 vaddvq_f32 (float32x4_t __a)
11304 {
11305 return __builtin_aarch64_reduc_plus_scal_v4sf (__a);
11306 }
11307
11308 __extension__ extern __inline float64_t
11309 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11310 vaddvq_f64 (float64x2_t __a)
11311 {
11312 return __builtin_aarch64_reduc_plus_scal_v2df (__a);
11313 }
11314
11315 /* vbsl */
11316
11317 __extension__ extern __inline float16x4_t
11318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11319 vbsl_f16 (uint16x4_t __a, float16x4_t __b, float16x4_t __c)
11320 {
11321 return __builtin_aarch64_simd_bslv4hf_suss (__a, __b, __c);
11322 }
11323
11324 __extension__ extern __inline float32x2_t
11325 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11326 vbsl_f32 (uint32x2_t __a, float32x2_t __b, float32x2_t __c)
11327 {
11328 return __builtin_aarch64_simd_bslv2sf_suss (__a, __b, __c);
11329 }
11330
11331 __extension__ extern __inline float64x1_t
11332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11333 vbsl_f64 (uint64x1_t __a, float64x1_t __b, float64x1_t __c)
11334 {
11335 return (float64x1_t)
11336 { __builtin_aarch64_simd_bsldf_suss (__a[0], __b[0], __c[0]) };
11337 }
11338
11339 __extension__ extern __inline poly8x8_t
11340 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11341 vbsl_p8 (uint8x8_t __a, poly8x8_t __b, poly8x8_t __c)
11342 {
11343 return __builtin_aarch64_simd_bslv8qi_pupp (__a, __b, __c);
11344 }
11345
11346 __extension__ extern __inline poly16x4_t
11347 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11348 vbsl_p16 (uint16x4_t __a, poly16x4_t __b, poly16x4_t __c)
11349 {
11350 return __builtin_aarch64_simd_bslv4hi_pupp (__a, __b, __c);
11351 }
11352 __extension__ extern __inline poly64x1_t
11353 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11354 vbsl_p64 (uint64x1_t __a, poly64x1_t __b, poly64x1_t __c)
11355 {
11356 return (poly64x1_t)
11357 {__builtin_aarch64_simd_bsldi_pupp (__a[0], __b[0], __c[0])};
11358 }
11359
11360 __extension__ extern __inline int8x8_t
11361 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11362 vbsl_s8 (uint8x8_t __a, int8x8_t __b, int8x8_t __c)
11363 {
11364 return __builtin_aarch64_simd_bslv8qi_suss (__a, __b, __c);
11365 }
11366
11367 __extension__ extern __inline int16x4_t
11368 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11369 vbsl_s16 (uint16x4_t __a, int16x4_t __b, int16x4_t __c)
11370 {
11371 return __builtin_aarch64_simd_bslv4hi_suss (__a, __b, __c);
11372 }
11373
11374 __extension__ extern __inline int32x2_t
11375 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11376 vbsl_s32 (uint32x2_t __a, int32x2_t __b, int32x2_t __c)
11377 {
11378 return __builtin_aarch64_simd_bslv2si_suss (__a, __b, __c);
11379 }
11380
11381 __extension__ extern __inline int64x1_t
11382 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11383 vbsl_s64 (uint64x1_t __a, int64x1_t __b, int64x1_t __c)
11384 {
11385 return (int64x1_t)
11386 {__builtin_aarch64_simd_bsldi_suss (__a[0], __b[0], __c[0])};
11387 }
11388
11389 __extension__ extern __inline uint8x8_t
11390 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11391 vbsl_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
11392 {
11393 return __builtin_aarch64_simd_bslv8qi_uuuu (__a, __b, __c);
11394 }
11395
11396 __extension__ extern __inline uint16x4_t
11397 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11398 vbsl_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
11399 {
11400 return __builtin_aarch64_simd_bslv4hi_uuuu (__a, __b, __c);
11401 }
11402
11403 __extension__ extern __inline uint32x2_t
11404 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11405 vbsl_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
11406 {
11407 return __builtin_aarch64_simd_bslv2si_uuuu (__a, __b, __c);
11408 }
11409
11410 __extension__ extern __inline uint64x1_t
11411 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11412 vbsl_u64 (uint64x1_t __a, uint64x1_t __b, uint64x1_t __c)
11413 {
11414 return (uint64x1_t)
11415 {__builtin_aarch64_simd_bsldi_uuuu (__a[0], __b[0], __c[0])};
11416 }
11417
11418 __extension__ extern __inline float16x8_t
11419 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11420 vbslq_f16 (uint16x8_t __a, float16x8_t __b, float16x8_t __c)
11421 {
11422 return __builtin_aarch64_simd_bslv8hf_suss (__a, __b, __c);
11423 }
11424
11425 __extension__ extern __inline float32x4_t
11426 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11427 vbslq_f32 (uint32x4_t __a, float32x4_t __b, float32x4_t __c)
11428 {
11429 return __builtin_aarch64_simd_bslv4sf_suss (__a, __b, __c);
11430 }
11431
11432 __extension__ extern __inline float64x2_t
11433 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11434 vbslq_f64 (uint64x2_t __a, float64x2_t __b, float64x2_t __c)
11435 {
11436 return __builtin_aarch64_simd_bslv2df_suss (__a, __b, __c);
11437 }
11438
11439 __extension__ extern __inline poly8x16_t
11440 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11441 vbslq_p8 (uint8x16_t __a, poly8x16_t __b, poly8x16_t __c)
11442 {
11443 return __builtin_aarch64_simd_bslv16qi_pupp (__a, __b, __c);
11444 }
11445
11446 __extension__ extern __inline poly16x8_t
11447 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11448 vbslq_p16 (uint16x8_t __a, poly16x8_t __b, poly16x8_t __c)
11449 {
11450 return __builtin_aarch64_simd_bslv8hi_pupp (__a, __b, __c);
11451 }
11452
11453 __extension__ extern __inline int8x16_t
11454 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11455 vbslq_s8 (uint8x16_t __a, int8x16_t __b, int8x16_t __c)
11456 {
11457 return __builtin_aarch64_simd_bslv16qi_suss (__a, __b, __c);
11458 }
11459
11460 __extension__ extern __inline int16x8_t
11461 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11462 vbslq_s16 (uint16x8_t __a, int16x8_t __b, int16x8_t __c)
11463 {
11464 return __builtin_aarch64_simd_bslv8hi_suss (__a, __b, __c);
11465 }
11466
11467 __extension__ extern __inline poly64x2_t
11468 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11469 vbslq_p64 (uint64x2_t __a, poly64x2_t __b, poly64x2_t __c)
11470 {
11471 return __builtin_aarch64_simd_bslv2di_pupp (__a, __b, __c);
11472 }
11473
11474 __extension__ extern __inline int32x4_t
11475 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11476 vbslq_s32 (uint32x4_t __a, int32x4_t __b, int32x4_t __c)
11477 {
11478 return __builtin_aarch64_simd_bslv4si_suss (__a, __b, __c);
11479 }
11480
11481 __extension__ extern __inline int64x2_t
11482 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11483 vbslq_s64 (uint64x2_t __a, int64x2_t __b, int64x2_t __c)
11484 {
11485 return __builtin_aarch64_simd_bslv2di_suss (__a, __b, __c);
11486 }
11487
11488 __extension__ extern __inline uint8x16_t
11489 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11490 vbslq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
11491 {
11492 return __builtin_aarch64_simd_bslv16qi_uuuu (__a, __b, __c);
11493 }
11494
11495 __extension__ extern __inline uint16x8_t
11496 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11497 vbslq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
11498 {
11499 return __builtin_aarch64_simd_bslv8hi_uuuu (__a, __b, __c);
11500 }
11501
11502 __extension__ extern __inline uint32x4_t
11503 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11504 vbslq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
11505 {
11506 return __builtin_aarch64_simd_bslv4si_uuuu (__a, __b, __c);
11507 }
11508
11509 __extension__ extern __inline uint64x2_t
11510 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11511 vbslq_u64 (uint64x2_t __a, uint64x2_t __b, uint64x2_t __c)
11512 {
11513 return __builtin_aarch64_simd_bslv2di_uuuu (__a, __b, __c);
11514 }
11515
11516 /* ARMv8.1-A instrinsics. */
11517 #pragma GCC push_options
11518 #pragma GCC target ("+nothing+rdma")
11519
11520 __extension__ extern __inline int16x4_t
11521 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11522 vqrdmlah_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
11523 {
11524 return __builtin_aarch64_sqrdmlahv4hi (__a, __b, __c);
11525 }
11526
11527 __extension__ extern __inline int32x2_t
11528 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11529 vqrdmlah_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
11530 {
11531 return __builtin_aarch64_sqrdmlahv2si (__a, __b, __c);
11532 }
11533
11534 __extension__ extern __inline int16x8_t
11535 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11536 vqrdmlahq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
11537 {
11538 return __builtin_aarch64_sqrdmlahv8hi (__a, __b, __c);
11539 }
11540
11541 __extension__ extern __inline int32x4_t
11542 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11543 vqrdmlahq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
11544 {
11545 return __builtin_aarch64_sqrdmlahv4si (__a, __b, __c);
11546 }
11547
11548 __extension__ extern __inline int16x4_t
11549 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11550 vqrdmlsh_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
11551 {
11552 return __builtin_aarch64_sqrdmlshv4hi (__a, __b, __c);
11553 }
11554
11555 __extension__ extern __inline int32x2_t
11556 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11557 vqrdmlsh_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
11558 {
11559 return __builtin_aarch64_sqrdmlshv2si (__a, __b, __c);
11560 }
11561
11562 __extension__ extern __inline int16x8_t
11563 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11564 vqrdmlshq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
11565 {
11566 return __builtin_aarch64_sqrdmlshv8hi (__a, __b, __c);
11567 }
11568
11569 __extension__ extern __inline int32x4_t
11570 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11571 vqrdmlshq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
11572 {
11573 return __builtin_aarch64_sqrdmlshv4si (__a, __b, __c);
11574 }
11575
11576 __extension__ extern __inline int16x4_t
11577 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11578 vqrdmlah_laneq_s16 (int16x4_t __a, int16x4_t __b, int16x8_t __c, const int __d)
11579 {
11580 return __builtin_aarch64_sqrdmlah_laneqv4hi (__a, __b, __c, __d);
11581 }
11582
11583 __extension__ extern __inline int32x2_t
11584 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11585 vqrdmlah_laneq_s32 (int32x2_t __a, int32x2_t __b, int32x4_t __c, const int __d)
11586 {
11587 return __builtin_aarch64_sqrdmlah_laneqv2si (__a, __b, __c, __d);
11588 }
11589
11590 __extension__ extern __inline int16x8_t
11591 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11592 vqrdmlahq_laneq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c, const int __d)
11593 {
11594 return __builtin_aarch64_sqrdmlah_laneqv8hi (__a, __b, __c, __d);
11595 }
11596
11597 __extension__ extern __inline int32x4_t
11598 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11599 vqrdmlahq_laneq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c, const int __d)
11600 {
11601 return __builtin_aarch64_sqrdmlah_laneqv4si (__a, __b, __c, __d);
11602 }
11603
11604 __extension__ extern __inline int16x4_t
11605 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11606 vqrdmlsh_laneq_s16 (int16x4_t __a, int16x4_t __b, int16x8_t __c, const int __d)
11607 {
11608 return __builtin_aarch64_sqrdmlsh_laneqv4hi (__a, __b, __c, __d);
11609 }
11610
11611 __extension__ extern __inline int32x2_t
11612 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11613 vqrdmlsh_laneq_s32 (int32x2_t __a, int32x2_t __b, int32x4_t __c, const int __d)
11614 {
11615 return __builtin_aarch64_sqrdmlsh_laneqv2si (__a, __b, __c, __d);
11616 }
11617
11618 __extension__ extern __inline int16x8_t
11619 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11620 vqrdmlshq_laneq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c, const int __d)
11621 {
11622 return __builtin_aarch64_sqrdmlsh_laneqv8hi (__a, __b, __c, __d);
11623 }
11624
11625 __extension__ extern __inline int32x4_t
11626 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11627 vqrdmlshq_laneq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c, const int __d)
11628 {
11629 return __builtin_aarch64_sqrdmlsh_laneqv4si (__a, __b, __c, __d);
11630 }
11631
11632 __extension__ extern __inline int16x4_t
11633 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11634 vqrdmlah_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
11635 {
11636 return __builtin_aarch64_sqrdmlah_lanev4hi (__a, __b, __c, __d);
11637 }
11638
11639 __extension__ extern __inline int32x2_t
11640 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11641 vqrdmlah_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
11642 {
11643 return __builtin_aarch64_sqrdmlah_lanev2si (__a, __b, __c, __d);
11644 }
11645
11646 __extension__ extern __inline int16x8_t
11647 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11648 vqrdmlahq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d)
11649 {
11650 return __builtin_aarch64_sqrdmlah_lanev8hi (__a, __b, __c, __d);
11651 }
11652
11653 __extension__ extern __inline int32x4_t
11654 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11655 vqrdmlahq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d)
11656 {
11657 return __builtin_aarch64_sqrdmlah_lanev4si (__a, __b, __c, __d);
11658 }
11659
11660 __extension__ extern __inline int16_t
11661 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11662 vqrdmlahh_s16 (int16_t __a, int16_t __b, int16_t __c)
11663 {
11664 return (int16_t) __builtin_aarch64_sqrdmlahhi (__a, __b, __c);
11665 }
11666
11667 __extension__ extern __inline int16_t
11668 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11669 vqrdmlahh_lane_s16 (int16_t __a, int16_t __b, int16x4_t __c, const int __d)
11670 {
11671 return __builtin_aarch64_sqrdmlah_lanehi (__a, __b, __c, __d);
11672 }
11673
11674 __extension__ extern __inline int16_t
11675 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11676 vqrdmlahh_laneq_s16 (int16_t __a, int16_t __b, int16x8_t __c, const int __d)
11677 {
11678 return __builtin_aarch64_sqrdmlah_laneqhi (__a, __b, __c, __d);
11679 }
11680
11681 __extension__ extern __inline int32_t
11682 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11683 vqrdmlahs_s32 (int32_t __a, int32_t __b, int32_t __c)
11684 {
11685 return (int32_t) __builtin_aarch64_sqrdmlahsi (__a, __b, __c);
11686 }
11687
11688 __extension__ extern __inline int32_t
11689 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11690 vqrdmlahs_lane_s32 (int32_t __a, int32_t __b, int32x2_t __c, const int __d)
11691 {
11692 return __builtin_aarch64_sqrdmlah_lanesi (__a, __b, __c, __d);
11693 }
11694
11695 __extension__ extern __inline int32_t
11696 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11697 vqrdmlahs_laneq_s32 (int32_t __a, int32_t __b, int32x4_t __c, const int __d)
11698 {
11699 return __builtin_aarch64_sqrdmlah_laneqsi (__a, __b, __c, __d);
11700 }
11701
11702 __extension__ extern __inline int16x4_t
11703 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11704 vqrdmlsh_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
11705 {
11706 return __builtin_aarch64_sqrdmlsh_lanev4hi (__a, __b, __c, __d);
11707 }
11708
11709 __extension__ extern __inline int32x2_t
11710 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11711 vqrdmlsh_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
11712 {
11713 return __builtin_aarch64_sqrdmlsh_lanev2si (__a, __b, __c, __d);
11714 }
11715
11716 __extension__ extern __inline int16x8_t
11717 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11718 vqrdmlshq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d)
11719 {
11720 return __builtin_aarch64_sqrdmlsh_lanev8hi (__a, __b, __c, __d);
11721 }
11722
11723 __extension__ extern __inline int32x4_t
11724 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11725 vqrdmlshq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d)
11726 {
11727 return __builtin_aarch64_sqrdmlsh_lanev4si (__a, __b, __c, __d);
11728 }
11729
11730 __extension__ extern __inline int16_t
11731 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11732 vqrdmlshh_s16 (int16_t __a, int16_t __b, int16_t __c)
11733 {
11734 return (int16_t) __builtin_aarch64_sqrdmlshhi (__a, __b, __c);
11735 }
11736
11737 __extension__ extern __inline int16_t
11738 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11739 vqrdmlshh_lane_s16 (int16_t __a, int16_t __b, int16x4_t __c, const int __d)
11740 {
11741 return __builtin_aarch64_sqrdmlsh_lanehi (__a, __b, __c, __d);
11742 }
11743
11744 __extension__ extern __inline int16_t
11745 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11746 vqrdmlshh_laneq_s16 (int16_t __a, int16_t __b, int16x8_t __c, const int __d)
11747 {
11748 return __builtin_aarch64_sqrdmlsh_laneqhi (__a, __b, __c, __d);
11749 }
11750
11751 __extension__ extern __inline int32_t
11752 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11753 vqrdmlshs_s32 (int32_t __a, int32_t __b, int32_t __c)
11754 {
11755 return (int32_t) __builtin_aarch64_sqrdmlshsi (__a, __b, __c);
11756 }
11757
11758 __extension__ extern __inline int32_t
11759 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11760 vqrdmlshs_lane_s32 (int32_t __a, int32_t __b, int32x2_t __c, const int __d)
11761 {
11762 return __builtin_aarch64_sqrdmlsh_lanesi (__a, __b, __c, __d);
11763 }
11764
11765 __extension__ extern __inline int32_t
11766 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11767 vqrdmlshs_laneq_s32 (int32_t __a, int32_t __b, int32x4_t __c, const int __d)
11768 {
11769 return __builtin_aarch64_sqrdmlsh_laneqsi (__a, __b, __c, __d);
11770 }
11771 #pragma GCC pop_options
11772
11773 #pragma GCC push_options
11774 #pragma GCC target ("+nothing+crypto")
11775 /* vaes */
11776
11777 __extension__ extern __inline uint8x16_t
11778 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11779 vaeseq_u8 (uint8x16_t data, uint8x16_t key)
11780 {
11781 return __builtin_aarch64_crypto_aesev16qi_uuu (data, key);
11782 }
11783
11784 __extension__ extern __inline uint8x16_t
11785 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11786 vaesdq_u8 (uint8x16_t data, uint8x16_t key)
11787 {
11788 return __builtin_aarch64_crypto_aesdv16qi_uuu (data, key);
11789 }
11790
11791 __extension__ extern __inline uint8x16_t
11792 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11793 vaesmcq_u8 (uint8x16_t data)
11794 {
11795 return __builtin_aarch64_crypto_aesmcv16qi_uu (data);
11796 }
11797
11798 __extension__ extern __inline uint8x16_t
11799 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11800 vaesimcq_u8 (uint8x16_t data)
11801 {
11802 return __builtin_aarch64_crypto_aesimcv16qi_uu (data);
11803 }
11804 #pragma GCC pop_options
11805
11806 /* vcage */
11807
11808 __extension__ extern __inline uint64x1_t
11809 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11810 vcage_f64 (float64x1_t __a, float64x1_t __b)
11811 {
11812 return vabs_f64 (__a) >= vabs_f64 (__b);
11813 }
11814
11815 __extension__ extern __inline uint32_t
11816 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11817 vcages_f32 (float32_t __a, float32_t __b)
11818 {
11819 return __builtin_fabsf (__a) >= __builtin_fabsf (__b) ? -1 : 0;
11820 }
11821
11822 __extension__ extern __inline uint32x2_t
11823 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11824 vcage_f32 (float32x2_t __a, float32x2_t __b)
11825 {
11826 return vabs_f32 (__a) >= vabs_f32 (__b);
11827 }
11828
11829 __extension__ extern __inline uint32x4_t
11830 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11831 vcageq_f32 (float32x4_t __a, float32x4_t __b)
11832 {
11833 return vabsq_f32 (__a) >= vabsq_f32 (__b);
11834 }
11835
11836 __extension__ extern __inline uint64_t
11837 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11838 vcaged_f64 (float64_t __a, float64_t __b)
11839 {
11840 return __builtin_fabs (__a) >= __builtin_fabs (__b) ? -1 : 0;
11841 }
11842
11843 __extension__ extern __inline uint64x2_t
11844 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11845 vcageq_f64 (float64x2_t __a, float64x2_t __b)
11846 {
11847 return vabsq_f64 (__a) >= vabsq_f64 (__b);
11848 }
11849
11850 /* vcagt */
11851
11852 __extension__ extern __inline uint32_t
11853 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11854 vcagts_f32 (float32_t __a, float32_t __b)
11855 {
11856 return __builtin_fabsf (__a) > __builtin_fabsf (__b) ? -1 : 0;
11857 }
11858
11859 __extension__ extern __inline uint32x2_t
11860 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11861 vcagt_f32 (float32x2_t __a, float32x2_t __b)
11862 {
11863 return vabs_f32 (__a) > vabs_f32 (__b);
11864 }
11865
11866 __extension__ extern __inline uint64x1_t
11867 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11868 vcagt_f64 (float64x1_t __a, float64x1_t __b)
11869 {
11870 return vabs_f64 (__a) > vabs_f64 (__b);
11871 }
11872
11873 __extension__ extern __inline uint32x4_t
11874 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11875 vcagtq_f32 (float32x4_t __a, float32x4_t __b)
11876 {
11877 return vabsq_f32 (__a) > vabsq_f32 (__b);
11878 }
11879
11880 __extension__ extern __inline uint64_t
11881 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11882 vcagtd_f64 (float64_t __a, float64_t __b)
11883 {
11884 return __builtin_fabs (__a) > __builtin_fabs (__b) ? -1 : 0;
11885 }
11886
11887 __extension__ extern __inline uint64x2_t
11888 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11889 vcagtq_f64 (float64x2_t __a, float64x2_t __b)
11890 {
11891 return vabsq_f64 (__a) > vabsq_f64 (__b);
11892 }
11893
11894 /* vcale */
11895
11896 __extension__ extern __inline uint32x2_t
11897 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11898 vcale_f32 (float32x2_t __a, float32x2_t __b)
11899 {
11900 return vabs_f32 (__a) <= vabs_f32 (__b);
11901 }
11902
11903 __extension__ extern __inline uint64x1_t
11904 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11905 vcale_f64 (float64x1_t __a, float64x1_t __b)
11906 {
11907 return vabs_f64 (__a) <= vabs_f64 (__b);
11908 }
11909
11910 __extension__ extern __inline uint64_t
11911 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11912 vcaled_f64 (float64_t __a, float64_t __b)
11913 {
11914 return __builtin_fabs (__a) <= __builtin_fabs (__b) ? -1 : 0;
11915 }
11916
11917 __extension__ extern __inline uint32_t
11918 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11919 vcales_f32 (float32_t __a, float32_t __b)
11920 {
11921 return __builtin_fabsf (__a) <= __builtin_fabsf (__b) ? -1 : 0;
11922 }
11923
11924 __extension__ extern __inline uint32x4_t
11925 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11926 vcaleq_f32 (float32x4_t __a, float32x4_t __b)
11927 {
11928 return vabsq_f32 (__a) <= vabsq_f32 (__b);
11929 }
11930
11931 __extension__ extern __inline uint64x2_t
11932 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11933 vcaleq_f64 (float64x2_t __a, float64x2_t __b)
11934 {
11935 return vabsq_f64 (__a) <= vabsq_f64 (__b);
11936 }
11937
11938 /* vcalt */
11939
11940 __extension__ extern __inline uint32x2_t
11941 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11942 vcalt_f32 (float32x2_t __a, float32x2_t __b)
11943 {
11944 return vabs_f32 (__a) < vabs_f32 (__b);
11945 }
11946
11947 __extension__ extern __inline uint64x1_t
11948 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11949 vcalt_f64 (float64x1_t __a, float64x1_t __b)
11950 {
11951 return vabs_f64 (__a) < vabs_f64 (__b);
11952 }
11953
11954 __extension__ extern __inline uint64_t
11955 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11956 vcaltd_f64 (float64_t __a, float64_t __b)
11957 {
11958 return __builtin_fabs (__a) < __builtin_fabs (__b) ? -1 : 0;
11959 }
11960
11961 __extension__ extern __inline uint32x4_t
11962 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11963 vcaltq_f32 (float32x4_t __a, float32x4_t __b)
11964 {
11965 return vabsq_f32 (__a) < vabsq_f32 (__b);
11966 }
11967
11968 __extension__ extern __inline uint64x2_t
11969 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11970 vcaltq_f64 (float64x2_t __a, float64x2_t __b)
11971 {
11972 return vabsq_f64 (__a) < vabsq_f64 (__b);
11973 }
11974
11975 __extension__ extern __inline uint32_t
11976 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11977 vcalts_f32 (float32_t __a, float32_t __b)
11978 {
11979 return __builtin_fabsf (__a) < __builtin_fabsf (__b) ? -1 : 0;
11980 }
11981
11982 /* vceq - vector. */
11983
11984 __extension__ extern __inline uint32x2_t
11985 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11986 vceq_f32 (float32x2_t __a, float32x2_t __b)
11987 {
11988 return (uint32x2_t) (__a == __b);
11989 }
11990
11991 __extension__ extern __inline uint64x1_t
11992 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
11993 vceq_f64 (float64x1_t __a, float64x1_t __b)
11994 {
11995 return (uint64x1_t) (__a == __b);
11996 }
11997
11998 __extension__ extern __inline uint8x8_t
11999 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12000 vceq_p8 (poly8x8_t __a, poly8x8_t __b)
12001 {
12002 return (uint8x8_t) (__a == __b);
12003 }
12004
12005 __extension__ extern __inline uint64x1_t
12006 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12007 vceq_p64 (poly64x1_t __a, poly64x1_t __b)
12008 {
12009 return (uint64x1_t) (__a == __b);
12010 }
12011
12012 __extension__ extern __inline uint8x8_t
12013 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12014 vceq_s8 (int8x8_t __a, int8x8_t __b)
12015 {
12016 return (uint8x8_t) (__a == __b);
12017 }
12018
12019 __extension__ extern __inline uint16x4_t
12020 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12021 vceq_s16 (int16x4_t __a, int16x4_t __b)
12022 {
12023 return (uint16x4_t) (__a == __b);
12024 }
12025
12026 __extension__ extern __inline uint32x2_t
12027 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12028 vceq_s32 (int32x2_t __a, int32x2_t __b)
12029 {
12030 return (uint32x2_t) (__a == __b);
12031 }
12032
12033 __extension__ extern __inline uint64x1_t
12034 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12035 vceq_s64 (int64x1_t __a, int64x1_t __b)
12036 {
12037 return (uint64x1_t) (__a == __b);
12038 }
12039
12040 __extension__ extern __inline uint8x8_t
12041 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12042 vceq_u8 (uint8x8_t __a, uint8x8_t __b)
12043 {
12044 return (__a == __b);
12045 }
12046
12047 __extension__ extern __inline uint16x4_t
12048 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12049 vceq_u16 (uint16x4_t __a, uint16x4_t __b)
12050 {
12051 return (__a == __b);
12052 }
12053
12054 __extension__ extern __inline uint32x2_t
12055 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12056 vceq_u32 (uint32x2_t __a, uint32x2_t __b)
12057 {
12058 return (__a == __b);
12059 }
12060
12061 __extension__ extern __inline uint64x1_t
12062 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12063 vceq_u64 (uint64x1_t __a, uint64x1_t __b)
12064 {
12065 return (__a == __b);
12066 }
12067
12068 __extension__ extern __inline uint32x4_t
12069 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12070 vceqq_f32 (float32x4_t __a, float32x4_t __b)
12071 {
12072 return (uint32x4_t) (__a == __b);
12073 }
12074
12075 __extension__ extern __inline uint64x2_t
12076 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12077 vceqq_f64 (float64x2_t __a, float64x2_t __b)
12078 {
12079 return (uint64x2_t) (__a == __b);
12080 }
12081
12082 __extension__ extern __inline uint8x16_t
12083 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12084 vceqq_p8 (poly8x16_t __a, poly8x16_t __b)
12085 {
12086 return (uint8x16_t) (__a == __b);
12087 }
12088
12089 __extension__ extern __inline uint8x16_t
12090 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12091 vceqq_s8 (int8x16_t __a, int8x16_t __b)
12092 {
12093 return (uint8x16_t) (__a == __b);
12094 }
12095
12096 __extension__ extern __inline uint16x8_t
12097 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12098 vceqq_s16 (int16x8_t __a, int16x8_t __b)
12099 {
12100 return (uint16x8_t) (__a == __b);
12101 }
12102
12103 __extension__ extern __inline uint32x4_t
12104 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12105 vceqq_s32 (int32x4_t __a, int32x4_t __b)
12106 {
12107 return (uint32x4_t) (__a == __b);
12108 }
12109
12110 __extension__ extern __inline uint64x2_t
12111 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12112 vceqq_s64 (int64x2_t __a, int64x2_t __b)
12113 {
12114 return (uint64x2_t) (__a == __b);
12115 }
12116
12117 __extension__ extern __inline uint8x16_t
12118 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12119 vceqq_u8 (uint8x16_t __a, uint8x16_t __b)
12120 {
12121 return (__a == __b);
12122 }
12123
12124 __extension__ extern __inline uint16x8_t
12125 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12126 vceqq_u16 (uint16x8_t __a, uint16x8_t __b)
12127 {
12128 return (__a == __b);
12129 }
12130
12131 __extension__ extern __inline uint32x4_t
12132 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12133 vceqq_u32 (uint32x4_t __a, uint32x4_t __b)
12134 {
12135 return (__a == __b);
12136 }
12137
12138 __extension__ extern __inline uint64x2_t
12139 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12140 vceqq_u64 (uint64x2_t __a, uint64x2_t __b)
12141 {
12142 return (__a == __b);
12143 }
12144
12145 __extension__ extern __inline uint64x2_t
12146 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12147 vceqq_p64 (poly64x2_t __a, poly64x2_t __b)
12148 {
12149 return (__a == __b);
12150 }
12151
12152 /* vceq - scalar. */
12153
12154 __extension__ extern __inline uint32_t
12155 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12156 vceqs_f32 (float32_t __a, float32_t __b)
12157 {
12158 return __a == __b ? -1 : 0;
12159 }
12160
12161 __extension__ extern __inline uint64_t
12162 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12163 vceqd_s64 (int64_t __a, int64_t __b)
12164 {
12165 return __a == __b ? -1ll : 0ll;
12166 }
12167
12168 __extension__ extern __inline uint64_t
12169 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12170 vceqd_u64 (uint64_t __a, uint64_t __b)
12171 {
12172 return __a == __b ? -1ll : 0ll;
12173 }
12174
12175 __extension__ extern __inline uint64_t
12176 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12177 vceqd_f64 (float64_t __a, float64_t __b)
12178 {
12179 return __a == __b ? -1ll : 0ll;
12180 }
12181
12182 /* vceqz - vector. */
12183
12184 __extension__ extern __inline uint32x2_t
12185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12186 vceqz_f32 (float32x2_t __a)
12187 {
12188 return (uint32x2_t) (__a == 0.0f);
12189 }
12190
12191 __extension__ extern __inline uint64x1_t
12192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12193 vceqz_f64 (float64x1_t __a)
12194 {
12195 return (uint64x1_t) (__a == (float64x1_t) {0.0});
12196 }
12197
12198 __extension__ extern __inline uint8x8_t
12199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12200 vceqz_p8 (poly8x8_t __a)
12201 {
12202 return (uint8x8_t) (__a == 0);
12203 }
12204
12205 __extension__ extern __inline uint8x8_t
12206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12207 vceqz_s8 (int8x8_t __a)
12208 {
12209 return (uint8x8_t) (__a == 0);
12210 }
12211
12212 __extension__ extern __inline uint16x4_t
12213 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12214 vceqz_s16 (int16x4_t __a)
12215 {
12216 return (uint16x4_t) (__a == 0);
12217 }
12218
12219 __extension__ extern __inline uint32x2_t
12220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12221 vceqz_s32 (int32x2_t __a)
12222 {
12223 return (uint32x2_t) (__a == 0);
12224 }
12225
12226 __extension__ extern __inline uint64x1_t
12227 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12228 vceqz_s64 (int64x1_t __a)
12229 {
12230 return (uint64x1_t) (__a == __AARCH64_INT64_C (0));
12231 }
12232
12233 __extension__ extern __inline uint8x8_t
12234 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12235 vceqz_u8 (uint8x8_t __a)
12236 {
12237 return (__a == 0);
12238 }
12239
12240 __extension__ extern __inline uint16x4_t
12241 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12242 vceqz_u16 (uint16x4_t __a)
12243 {
12244 return (__a == 0);
12245 }
12246
12247 __extension__ extern __inline uint32x2_t
12248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12249 vceqz_u32 (uint32x2_t __a)
12250 {
12251 return (__a == 0);
12252 }
12253
12254 __extension__ extern __inline uint64x1_t
12255 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12256 vceqz_u64 (uint64x1_t __a)
12257 {
12258 return (__a == __AARCH64_UINT64_C (0));
12259 }
12260
12261 __extension__ extern __inline uint64x1_t
12262 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12263 vceqz_p64 (poly64x1_t __a)
12264 {
12265 return (__a == __AARCH64_UINT64_C (0));
12266 }
12267
12268 __extension__ extern __inline uint32x4_t
12269 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12270 vceqzq_f32 (float32x4_t __a)
12271 {
12272 return (uint32x4_t) (__a == 0.0f);
12273 }
12274
12275 __extension__ extern __inline uint64x2_t
12276 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12277 vceqzq_f64 (float64x2_t __a)
12278 {
12279 return (uint64x2_t) (__a == 0.0f);
12280 }
12281
12282 __extension__ extern __inline uint8x16_t
12283 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12284 vceqzq_p8 (poly8x16_t __a)
12285 {
12286 return (uint8x16_t) (__a == 0);
12287 }
12288
12289 __extension__ extern __inline uint8x16_t
12290 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12291 vceqzq_s8 (int8x16_t __a)
12292 {
12293 return (uint8x16_t) (__a == 0);
12294 }
12295
12296 __extension__ extern __inline uint16x8_t
12297 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12298 vceqzq_s16 (int16x8_t __a)
12299 {
12300 return (uint16x8_t) (__a == 0);
12301 }
12302
12303 __extension__ extern __inline uint32x4_t
12304 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12305 vceqzq_s32 (int32x4_t __a)
12306 {
12307 return (uint32x4_t) (__a == 0);
12308 }
12309
12310 __extension__ extern __inline uint64x2_t
12311 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12312 vceqzq_s64 (int64x2_t __a)
12313 {
12314 return (uint64x2_t) (__a == __AARCH64_INT64_C (0));
12315 }
12316
12317 __extension__ extern __inline uint8x16_t
12318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12319 vceqzq_u8 (uint8x16_t __a)
12320 {
12321 return (__a == 0);
12322 }
12323
12324 __extension__ extern __inline uint16x8_t
12325 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12326 vceqzq_u16 (uint16x8_t __a)
12327 {
12328 return (__a == 0);
12329 }
12330
12331 __extension__ extern __inline uint32x4_t
12332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12333 vceqzq_u32 (uint32x4_t __a)
12334 {
12335 return (__a == 0);
12336 }
12337
12338 __extension__ extern __inline uint64x2_t
12339 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12340 vceqzq_u64 (uint64x2_t __a)
12341 {
12342 return (__a == __AARCH64_UINT64_C (0));
12343 }
12344
12345 __extension__ extern __inline uint64x2_t
12346 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12347 vceqzq_p64 (poly64x2_t __a)
12348 {
12349 return (__a == __AARCH64_UINT64_C (0));
12350 }
12351
12352 /* vceqz - scalar. */
12353
12354 __extension__ extern __inline uint32_t
12355 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12356 vceqzs_f32 (float32_t __a)
12357 {
12358 return __a == 0.0f ? -1 : 0;
12359 }
12360
12361 __extension__ extern __inline uint64_t
12362 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12363 vceqzd_s64 (int64_t __a)
12364 {
12365 return __a == 0 ? -1ll : 0ll;
12366 }
12367
12368 __extension__ extern __inline uint64_t
12369 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12370 vceqzd_u64 (uint64_t __a)
12371 {
12372 return __a == 0 ? -1ll : 0ll;
12373 }
12374
12375 __extension__ extern __inline uint64_t
12376 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12377 vceqzd_f64 (float64_t __a)
12378 {
12379 return __a == 0.0 ? -1ll : 0ll;
12380 }
12381
12382 /* vcge - vector. */
12383
12384 __extension__ extern __inline uint32x2_t
12385 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12386 vcge_f32 (float32x2_t __a, float32x2_t __b)
12387 {
12388 return (uint32x2_t) (__a >= __b);
12389 }
12390
12391 __extension__ extern __inline uint64x1_t
12392 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12393 vcge_f64 (float64x1_t __a, float64x1_t __b)
12394 {
12395 return (uint64x1_t) (__a >= __b);
12396 }
12397
12398 __extension__ extern __inline uint8x8_t
12399 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12400 vcge_s8 (int8x8_t __a, int8x8_t __b)
12401 {
12402 return (uint8x8_t) (__a >= __b);
12403 }
12404
12405 __extension__ extern __inline uint16x4_t
12406 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12407 vcge_s16 (int16x4_t __a, int16x4_t __b)
12408 {
12409 return (uint16x4_t) (__a >= __b);
12410 }
12411
12412 __extension__ extern __inline uint32x2_t
12413 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12414 vcge_s32 (int32x2_t __a, int32x2_t __b)
12415 {
12416 return (uint32x2_t) (__a >= __b);
12417 }
12418
12419 __extension__ extern __inline uint64x1_t
12420 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12421 vcge_s64 (int64x1_t __a, int64x1_t __b)
12422 {
12423 return (uint64x1_t) (__a >= __b);
12424 }
12425
12426 __extension__ extern __inline uint8x8_t
12427 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12428 vcge_u8 (uint8x8_t __a, uint8x8_t __b)
12429 {
12430 return (__a >= __b);
12431 }
12432
12433 __extension__ extern __inline uint16x4_t
12434 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12435 vcge_u16 (uint16x4_t __a, uint16x4_t __b)
12436 {
12437 return (__a >= __b);
12438 }
12439
12440 __extension__ extern __inline uint32x2_t
12441 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12442 vcge_u32 (uint32x2_t __a, uint32x2_t __b)
12443 {
12444 return (__a >= __b);
12445 }
12446
12447 __extension__ extern __inline uint64x1_t
12448 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12449 vcge_u64 (uint64x1_t __a, uint64x1_t __b)
12450 {
12451 return (__a >= __b);
12452 }
12453
12454 __extension__ extern __inline uint32x4_t
12455 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12456 vcgeq_f32 (float32x4_t __a, float32x4_t __b)
12457 {
12458 return (uint32x4_t) (__a >= __b);
12459 }
12460
12461 __extension__ extern __inline uint64x2_t
12462 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12463 vcgeq_f64 (float64x2_t __a, float64x2_t __b)
12464 {
12465 return (uint64x2_t) (__a >= __b);
12466 }
12467
12468 __extension__ extern __inline uint8x16_t
12469 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12470 vcgeq_s8 (int8x16_t __a, int8x16_t __b)
12471 {
12472 return (uint8x16_t) (__a >= __b);
12473 }
12474
12475 __extension__ extern __inline uint16x8_t
12476 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12477 vcgeq_s16 (int16x8_t __a, int16x8_t __b)
12478 {
12479 return (uint16x8_t) (__a >= __b);
12480 }
12481
12482 __extension__ extern __inline uint32x4_t
12483 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12484 vcgeq_s32 (int32x4_t __a, int32x4_t __b)
12485 {
12486 return (uint32x4_t) (__a >= __b);
12487 }
12488
12489 __extension__ extern __inline uint64x2_t
12490 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12491 vcgeq_s64 (int64x2_t __a, int64x2_t __b)
12492 {
12493 return (uint64x2_t) (__a >= __b);
12494 }
12495
12496 __extension__ extern __inline uint8x16_t
12497 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12498 vcgeq_u8 (uint8x16_t __a, uint8x16_t __b)
12499 {
12500 return (__a >= __b);
12501 }
12502
12503 __extension__ extern __inline uint16x8_t
12504 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12505 vcgeq_u16 (uint16x8_t __a, uint16x8_t __b)
12506 {
12507 return (__a >= __b);
12508 }
12509
12510 __extension__ extern __inline uint32x4_t
12511 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12512 vcgeq_u32 (uint32x4_t __a, uint32x4_t __b)
12513 {
12514 return (__a >= __b);
12515 }
12516
12517 __extension__ extern __inline uint64x2_t
12518 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12519 vcgeq_u64 (uint64x2_t __a, uint64x2_t __b)
12520 {
12521 return (__a >= __b);
12522 }
12523
12524 /* vcge - scalar. */
12525
12526 __extension__ extern __inline uint32_t
12527 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12528 vcges_f32 (float32_t __a, float32_t __b)
12529 {
12530 return __a >= __b ? -1 : 0;
12531 }
12532
12533 __extension__ extern __inline uint64_t
12534 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12535 vcged_s64 (int64_t __a, int64_t __b)
12536 {
12537 return __a >= __b ? -1ll : 0ll;
12538 }
12539
12540 __extension__ extern __inline uint64_t
12541 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12542 vcged_u64 (uint64_t __a, uint64_t __b)
12543 {
12544 return __a >= __b ? -1ll : 0ll;
12545 }
12546
12547 __extension__ extern __inline uint64_t
12548 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12549 vcged_f64 (float64_t __a, float64_t __b)
12550 {
12551 return __a >= __b ? -1ll : 0ll;
12552 }
12553
12554 /* vcgez - vector. */
12555
12556 __extension__ extern __inline uint32x2_t
12557 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12558 vcgez_f32 (float32x2_t __a)
12559 {
12560 return (uint32x2_t) (__a >= 0.0f);
12561 }
12562
12563 __extension__ extern __inline uint64x1_t
12564 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12565 vcgez_f64 (float64x1_t __a)
12566 {
12567 return (uint64x1_t) (__a[0] >= (float64x1_t) {0.0});
12568 }
12569
12570 __extension__ extern __inline uint8x8_t
12571 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12572 vcgez_s8 (int8x8_t __a)
12573 {
12574 return (uint8x8_t) (__a >= 0);
12575 }
12576
12577 __extension__ extern __inline uint16x4_t
12578 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12579 vcgez_s16 (int16x4_t __a)
12580 {
12581 return (uint16x4_t) (__a >= 0);
12582 }
12583
12584 __extension__ extern __inline uint32x2_t
12585 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12586 vcgez_s32 (int32x2_t __a)
12587 {
12588 return (uint32x2_t) (__a >= 0);
12589 }
12590
12591 __extension__ extern __inline uint64x1_t
12592 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12593 vcgez_s64 (int64x1_t __a)
12594 {
12595 return (uint64x1_t) (__a >= __AARCH64_INT64_C (0));
12596 }
12597
12598 __extension__ extern __inline uint32x4_t
12599 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12600 vcgezq_f32 (float32x4_t __a)
12601 {
12602 return (uint32x4_t) (__a >= 0.0f);
12603 }
12604
12605 __extension__ extern __inline uint64x2_t
12606 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12607 vcgezq_f64 (float64x2_t __a)
12608 {
12609 return (uint64x2_t) (__a >= 0.0);
12610 }
12611
12612 __extension__ extern __inline uint8x16_t
12613 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12614 vcgezq_s8 (int8x16_t __a)
12615 {
12616 return (uint8x16_t) (__a >= 0);
12617 }
12618
12619 __extension__ extern __inline uint16x8_t
12620 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12621 vcgezq_s16 (int16x8_t __a)
12622 {
12623 return (uint16x8_t) (__a >= 0);
12624 }
12625
12626 __extension__ extern __inline uint32x4_t
12627 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12628 vcgezq_s32 (int32x4_t __a)
12629 {
12630 return (uint32x4_t) (__a >= 0);
12631 }
12632
12633 __extension__ extern __inline uint64x2_t
12634 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12635 vcgezq_s64 (int64x2_t __a)
12636 {
12637 return (uint64x2_t) (__a >= __AARCH64_INT64_C (0));
12638 }
12639
12640 /* vcgez - scalar. */
12641
12642 __extension__ extern __inline uint32_t
12643 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12644 vcgezs_f32 (float32_t __a)
12645 {
12646 return __a >= 0.0f ? -1 : 0;
12647 }
12648
12649 __extension__ extern __inline uint64_t
12650 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12651 vcgezd_s64 (int64_t __a)
12652 {
12653 return __a >= 0 ? -1ll : 0ll;
12654 }
12655
12656 __extension__ extern __inline uint64_t
12657 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12658 vcgezd_f64 (float64_t __a)
12659 {
12660 return __a >= 0.0 ? -1ll : 0ll;
12661 }
12662
12663 /* vcgt - vector. */
12664
12665 __extension__ extern __inline uint32x2_t
12666 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12667 vcgt_f32 (float32x2_t __a, float32x2_t __b)
12668 {
12669 return (uint32x2_t) (__a > __b);
12670 }
12671
12672 __extension__ extern __inline uint64x1_t
12673 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12674 vcgt_f64 (float64x1_t __a, float64x1_t __b)
12675 {
12676 return (uint64x1_t) (__a > __b);
12677 }
12678
12679 __extension__ extern __inline uint8x8_t
12680 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12681 vcgt_s8 (int8x8_t __a, int8x8_t __b)
12682 {
12683 return (uint8x8_t) (__a > __b);
12684 }
12685
12686 __extension__ extern __inline uint16x4_t
12687 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12688 vcgt_s16 (int16x4_t __a, int16x4_t __b)
12689 {
12690 return (uint16x4_t) (__a > __b);
12691 }
12692
12693 __extension__ extern __inline uint32x2_t
12694 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12695 vcgt_s32 (int32x2_t __a, int32x2_t __b)
12696 {
12697 return (uint32x2_t) (__a > __b);
12698 }
12699
12700 __extension__ extern __inline uint64x1_t
12701 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12702 vcgt_s64 (int64x1_t __a, int64x1_t __b)
12703 {
12704 return (uint64x1_t) (__a > __b);
12705 }
12706
12707 __extension__ extern __inline uint8x8_t
12708 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12709 vcgt_u8 (uint8x8_t __a, uint8x8_t __b)
12710 {
12711 return (__a > __b);
12712 }
12713
12714 __extension__ extern __inline uint16x4_t
12715 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12716 vcgt_u16 (uint16x4_t __a, uint16x4_t __b)
12717 {
12718 return (__a > __b);
12719 }
12720
12721 __extension__ extern __inline uint32x2_t
12722 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12723 vcgt_u32 (uint32x2_t __a, uint32x2_t __b)
12724 {
12725 return (__a > __b);
12726 }
12727
12728 __extension__ extern __inline uint64x1_t
12729 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12730 vcgt_u64 (uint64x1_t __a, uint64x1_t __b)
12731 {
12732 return (__a > __b);
12733 }
12734
12735 __extension__ extern __inline uint32x4_t
12736 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12737 vcgtq_f32 (float32x4_t __a, float32x4_t __b)
12738 {
12739 return (uint32x4_t) (__a > __b);
12740 }
12741
12742 __extension__ extern __inline uint64x2_t
12743 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12744 vcgtq_f64 (float64x2_t __a, float64x2_t __b)
12745 {
12746 return (uint64x2_t) (__a > __b);
12747 }
12748
12749 __extension__ extern __inline uint8x16_t
12750 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12751 vcgtq_s8 (int8x16_t __a, int8x16_t __b)
12752 {
12753 return (uint8x16_t) (__a > __b);
12754 }
12755
12756 __extension__ extern __inline uint16x8_t
12757 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12758 vcgtq_s16 (int16x8_t __a, int16x8_t __b)
12759 {
12760 return (uint16x8_t) (__a > __b);
12761 }
12762
12763 __extension__ extern __inline uint32x4_t
12764 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12765 vcgtq_s32 (int32x4_t __a, int32x4_t __b)
12766 {
12767 return (uint32x4_t) (__a > __b);
12768 }
12769
12770 __extension__ extern __inline uint64x2_t
12771 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12772 vcgtq_s64 (int64x2_t __a, int64x2_t __b)
12773 {
12774 return (uint64x2_t) (__a > __b);
12775 }
12776
12777 __extension__ extern __inline uint8x16_t
12778 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12779 vcgtq_u8 (uint8x16_t __a, uint8x16_t __b)
12780 {
12781 return (__a > __b);
12782 }
12783
12784 __extension__ extern __inline uint16x8_t
12785 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12786 vcgtq_u16 (uint16x8_t __a, uint16x8_t __b)
12787 {
12788 return (__a > __b);
12789 }
12790
12791 __extension__ extern __inline uint32x4_t
12792 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12793 vcgtq_u32 (uint32x4_t __a, uint32x4_t __b)
12794 {
12795 return (__a > __b);
12796 }
12797
12798 __extension__ extern __inline uint64x2_t
12799 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12800 vcgtq_u64 (uint64x2_t __a, uint64x2_t __b)
12801 {
12802 return (__a > __b);
12803 }
12804
12805 /* vcgt - scalar. */
12806
12807 __extension__ extern __inline uint32_t
12808 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12809 vcgts_f32 (float32_t __a, float32_t __b)
12810 {
12811 return __a > __b ? -1 : 0;
12812 }
12813
12814 __extension__ extern __inline uint64_t
12815 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12816 vcgtd_s64 (int64_t __a, int64_t __b)
12817 {
12818 return __a > __b ? -1ll : 0ll;
12819 }
12820
12821 __extension__ extern __inline uint64_t
12822 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12823 vcgtd_u64 (uint64_t __a, uint64_t __b)
12824 {
12825 return __a > __b ? -1ll : 0ll;
12826 }
12827
12828 __extension__ extern __inline uint64_t
12829 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12830 vcgtd_f64 (float64_t __a, float64_t __b)
12831 {
12832 return __a > __b ? -1ll : 0ll;
12833 }
12834
12835 /* vcgtz - vector. */
12836
12837 __extension__ extern __inline uint32x2_t
12838 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12839 vcgtz_f32 (float32x2_t __a)
12840 {
12841 return (uint32x2_t) (__a > 0.0f);
12842 }
12843
12844 __extension__ extern __inline uint64x1_t
12845 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12846 vcgtz_f64 (float64x1_t __a)
12847 {
12848 return (uint64x1_t) (__a > (float64x1_t) {0.0});
12849 }
12850
12851 __extension__ extern __inline uint8x8_t
12852 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12853 vcgtz_s8 (int8x8_t __a)
12854 {
12855 return (uint8x8_t) (__a > 0);
12856 }
12857
12858 __extension__ extern __inline uint16x4_t
12859 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12860 vcgtz_s16 (int16x4_t __a)
12861 {
12862 return (uint16x4_t) (__a > 0);
12863 }
12864
12865 __extension__ extern __inline uint32x2_t
12866 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12867 vcgtz_s32 (int32x2_t __a)
12868 {
12869 return (uint32x2_t) (__a > 0);
12870 }
12871
12872 __extension__ extern __inline uint64x1_t
12873 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12874 vcgtz_s64 (int64x1_t __a)
12875 {
12876 return (uint64x1_t) (__a > __AARCH64_INT64_C (0));
12877 }
12878
12879 __extension__ extern __inline uint32x4_t
12880 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12881 vcgtzq_f32 (float32x4_t __a)
12882 {
12883 return (uint32x4_t) (__a > 0.0f);
12884 }
12885
12886 __extension__ extern __inline uint64x2_t
12887 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12888 vcgtzq_f64 (float64x2_t __a)
12889 {
12890 return (uint64x2_t) (__a > 0.0);
12891 }
12892
12893 __extension__ extern __inline uint8x16_t
12894 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12895 vcgtzq_s8 (int8x16_t __a)
12896 {
12897 return (uint8x16_t) (__a > 0);
12898 }
12899
12900 __extension__ extern __inline uint16x8_t
12901 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12902 vcgtzq_s16 (int16x8_t __a)
12903 {
12904 return (uint16x8_t) (__a > 0);
12905 }
12906
12907 __extension__ extern __inline uint32x4_t
12908 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12909 vcgtzq_s32 (int32x4_t __a)
12910 {
12911 return (uint32x4_t) (__a > 0);
12912 }
12913
12914 __extension__ extern __inline uint64x2_t
12915 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12916 vcgtzq_s64 (int64x2_t __a)
12917 {
12918 return (uint64x2_t) (__a > __AARCH64_INT64_C (0));
12919 }
12920
12921 /* vcgtz - scalar. */
12922
12923 __extension__ extern __inline uint32_t
12924 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12925 vcgtzs_f32 (float32_t __a)
12926 {
12927 return __a > 0.0f ? -1 : 0;
12928 }
12929
12930 __extension__ extern __inline uint64_t
12931 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12932 vcgtzd_s64 (int64_t __a)
12933 {
12934 return __a > 0 ? -1ll : 0ll;
12935 }
12936
12937 __extension__ extern __inline uint64_t
12938 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12939 vcgtzd_f64 (float64_t __a)
12940 {
12941 return __a > 0.0 ? -1ll : 0ll;
12942 }
12943
12944 /* vcle - vector. */
12945
12946 __extension__ extern __inline uint32x2_t
12947 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12948 vcle_f32 (float32x2_t __a, float32x2_t __b)
12949 {
12950 return (uint32x2_t) (__a <= __b);
12951 }
12952
12953 __extension__ extern __inline uint64x1_t
12954 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12955 vcle_f64 (float64x1_t __a, float64x1_t __b)
12956 {
12957 return (uint64x1_t) (__a <= __b);
12958 }
12959
12960 __extension__ extern __inline uint8x8_t
12961 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12962 vcle_s8 (int8x8_t __a, int8x8_t __b)
12963 {
12964 return (uint8x8_t) (__a <= __b);
12965 }
12966
12967 __extension__ extern __inline uint16x4_t
12968 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12969 vcle_s16 (int16x4_t __a, int16x4_t __b)
12970 {
12971 return (uint16x4_t) (__a <= __b);
12972 }
12973
12974 __extension__ extern __inline uint32x2_t
12975 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12976 vcle_s32 (int32x2_t __a, int32x2_t __b)
12977 {
12978 return (uint32x2_t) (__a <= __b);
12979 }
12980
12981 __extension__ extern __inline uint64x1_t
12982 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12983 vcle_s64 (int64x1_t __a, int64x1_t __b)
12984 {
12985 return (uint64x1_t) (__a <= __b);
12986 }
12987
12988 __extension__ extern __inline uint8x8_t
12989 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12990 vcle_u8 (uint8x8_t __a, uint8x8_t __b)
12991 {
12992 return (__a <= __b);
12993 }
12994
12995 __extension__ extern __inline uint16x4_t
12996 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
12997 vcle_u16 (uint16x4_t __a, uint16x4_t __b)
12998 {
12999 return (__a <= __b);
13000 }
13001
13002 __extension__ extern __inline uint32x2_t
13003 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13004 vcle_u32 (uint32x2_t __a, uint32x2_t __b)
13005 {
13006 return (__a <= __b);
13007 }
13008
13009 __extension__ extern __inline uint64x1_t
13010 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13011 vcle_u64 (uint64x1_t __a, uint64x1_t __b)
13012 {
13013 return (__a <= __b);
13014 }
13015
13016 __extension__ extern __inline uint32x4_t
13017 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13018 vcleq_f32 (float32x4_t __a, float32x4_t __b)
13019 {
13020 return (uint32x4_t) (__a <= __b);
13021 }
13022
13023 __extension__ extern __inline uint64x2_t
13024 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13025 vcleq_f64 (float64x2_t __a, float64x2_t __b)
13026 {
13027 return (uint64x2_t) (__a <= __b);
13028 }
13029
13030 __extension__ extern __inline uint8x16_t
13031 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13032 vcleq_s8 (int8x16_t __a, int8x16_t __b)
13033 {
13034 return (uint8x16_t) (__a <= __b);
13035 }
13036
13037 __extension__ extern __inline uint16x8_t
13038 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13039 vcleq_s16 (int16x8_t __a, int16x8_t __b)
13040 {
13041 return (uint16x8_t) (__a <= __b);
13042 }
13043
13044 __extension__ extern __inline uint32x4_t
13045 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13046 vcleq_s32 (int32x4_t __a, int32x4_t __b)
13047 {
13048 return (uint32x4_t) (__a <= __b);
13049 }
13050
13051 __extension__ extern __inline uint64x2_t
13052 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13053 vcleq_s64 (int64x2_t __a, int64x2_t __b)
13054 {
13055 return (uint64x2_t) (__a <= __b);
13056 }
13057
13058 __extension__ extern __inline uint8x16_t
13059 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13060 vcleq_u8 (uint8x16_t __a, uint8x16_t __b)
13061 {
13062 return (__a <= __b);
13063 }
13064
13065 __extension__ extern __inline uint16x8_t
13066 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13067 vcleq_u16 (uint16x8_t __a, uint16x8_t __b)
13068 {
13069 return (__a <= __b);
13070 }
13071
13072 __extension__ extern __inline uint32x4_t
13073 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13074 vcleq_u32 (uint32x4_t __a, uint32x4_t __b)
13075 {
13076 return (__a <= __b);
13077 }
13078
13079 __extension__ extern __inline uint64x2_t
13080 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13081 vcleq_u64 (uint64x2_t __a, uint64x2_t __b)
13082 {
13083 return (__a <= __b);
13084 }
13085
13086 /* vcle - scalar. */
13087
13088 __extension__ extern __inline uint32_t
13089 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13090 vcles_f32 (float32_t __a, float32_t __b)
13091 {
13092 return __a <= __b ? -1 : 0;
13093 }
13094
13095 __extension__ extern __inline uint64_t
13096 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13097 vcled_s64 (int64_t __a, int64_t __b)
13098 {
13099 return __a <= __b ? -1ll : 0ll;
13100 }
13101
13102 __extension__ extern __inline uint64_t
13103 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13104 vcled_u64 (uint64_t __a, uint64_t __b)
13105 {
13106 return __a <= __b ? -1ll : 0ll;
13107 }
13108
13109 __extension__ extern __inline uint64_t
13110 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13111 vcled_f64 (float64_t __a, float64_t __b)
13112 {
13113 return __a <= __b ? -1ll : 0ll;
13114 }
13115
13116 /* vclez - vector. */
13117
13118 __extension__ extern __inline uint32x2_t
13119 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13120 vclez_f32 (float32x2_t __a)
13121 {
13122 return (uint32x2_t) (__a <= 0.0f);
13123 }
13124
13125 __extension__ extern __inline uint64x1_t
13126 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13127 vclez_f64 (float64x1_t __a)
13128 {
13129 return (uint64x1_t) (__a <= (float64x1_t) {0.0});
13130 }
13131
13132 __extension__ extern __inline uint8x8_t
13133 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13134 vclez_s8 (int8x8_t __a)
13135 {
13136 return (uint8x8_t) (__a <= 0);
13137 }
13138
13139 __extension__ extern __inline uint16x4_t
13140 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13141 vclez_s16 (int16x4_t __a)
13142 {
13143 return (uint16x4_t) (__a <= 0);
13144 }
13145
13146 __extension__ extern __inline uint32x2_t
13147 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13148 vclez_s32 (int32x2_t __a)
13149 {
13150 return (uint32x2_t) (__a <= 0);
13151 }
13152
13153 __extension__ extern __inline uint64x1_t
13154 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13155 vclez_s64 (int64x1_t __a)
13156 {
13157 return (uint64x1_t) (__a <= __AARCH64_INT64_C (0));
13158 }
13159
13160 __extension__ extern __inline uint32x4_t
13161 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13162 vclezq_f32 (float32x4_t __a)
13163 {
13164 return (uint32x4_t) (__a <= 0.0f);
13165 }
13166
13167 __extension__ extern __inline uint64x2_t
13168 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13169 vclezq_f64 (float64x2_t __a)
13170 {
13171 return (uint64x2_t) (__a <= 0.0);
13172 }
13173
13174 __extension__ extern __inline uint8x16_t
13175 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13176 vclezq_s8 (int8x16_t __a)
13177 {
13178 return (uint8x16_t) (__a <= 0);
13179 }
13180
13181 __extension__ extern __inline uint16x8_t
13182 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13183 vclezq_s16 (int16x8_t __a)
13184 {
13185 return (uint16x8_t) (__a <= 0);
13186 }
13187
13188 __extension__ extern __inline uint32x4_t
13189 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13190 vclezq_s32 (int32x4_t __a)
13191 {
13192 return (uint32x4_t) (__a <= 0);
13193 }
13194
13195 __extension__ extern __inline uint64x2_t
13196 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13197 vclezq_s64 (int64x2_t __a)
13198 {
13199 return (uint64x2_t) (__a <= __AARCH64_INT64_C (0));
13200 }
13201
13202 /* vclez - scalar. */
13203
13204 __extension__ extern __inline uint32_t
13205 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13206 vclezs_f32 (float32_t __a)
13207 {
13208 return __a <= 0.0f ? -1 : 0;
13209 }
13210
13211 __extension__ extern __inline uint64_t
13212 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13213 vclezd_s64 (int64_t __a)
13214 {
13215 return __a <= 0 ? -1ll : 0ll;
13216 }
13217
13218 __extension__ extern __inline uint64_t
13219 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13220 vclezd_f64 (float64_t __a)
13221 {
13222 return __a <= 0.0 ? -1ll : 0ll;
13223 }
13224
13225 /* vclt - vector. */
13226
13227 __extension__ extern __inline uint32x2_t
13228 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13229 vclt_f32 (float32x2_t __a, float32x2_t __b)
13230 {
13231 return (uint32x2_t) (__a < __b);
13232 }
13233
13234 __extension__ extern __inline uint64x1_t
13235 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13236 vclt_f64 (float64x1_t __a, float64x1_t __b)
13237 {
13238 return (uint64x1_t) (__a < __b);
13239 }
13240
13241 __extension__ extern __inline uint8x8_t
13242 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13243 vclt_s8 (int8x8_t __a, int8x8_t __b)
13244 {
13245 return (uint8x8_t) (__a < __b);
13246 }
13247
13248 __extension__ extern __inline uint16x4_t
13249 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13250 vclt_s16 (int16x4_t __a, int16x4_t __b)
13251 {
13252 return (uint16x4_t) (__a < __b);
13253 }
13254
13255 __extension__ extern __inline uint32x2_t
13256 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13257 vclt_s32 (int32x2_t __a, int32x2_t __b)
13258 {
13259 return (uint32x2_t) (__a < __b);
13260 }
13261
13262 __extension__ extern __inline uint64x1_t
13263 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13264 vclt_s64 (int64x1_t __a, int64x1_t __b)
13265 {
13266 return (uint64x1_t) (__a < __b);
13267 }
13268
13269 __extension__ extern __inline uint8x8_t
13270 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13271 vclt_u8 (uint8x8_t __a, uint8x8_t __b)
13272 {
13273 return (__a < __b);
13274 }
13275
13276 __extension__ extern __inline uint16x4_t
13277 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13278 vclt_u16 (uint16x4_t __a, uint16x4_t __b)
13279 {
13280 return (__a < __b);
13281 }
13282
13283 __extension__ extern __inline uint32x2_t
13284 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13285 vclt_u32 (uint32x2_t __a, uint32x2_t __b)
13286 {
13287 return (__a < __b);
13288 }
13289
13290 __extension__ extern __inline uint64x1_t
13291 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13292 vclt_u64 (uint64x1_t __a, uint64x1_t __b)
13293 {
13294 return (__a < __b);
13295 }
13296
13297 __extension__ extern __inline uint32x4_t
13298 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13299 vcltq_f32 (float32x4_t __a, float32x4_t __b)
13300 {
13301 return (uint32x4_t) (__a < __b);
13302 }
13303
13304 __extension__ extern __inline uint64x2_t
13305 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13306 vcltq_f64 (float64x2_t __a, float64x2_t __b)
13307 {
13308 return (uint64x2_t) (__a < __b);
13309 }
13310
13311 __extension__ extern __inline uint8x16_t
13312 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13313 vcltq_s8 (int8x16_t __a, int8x16_t __b)
13314 {
13315 return (uint8x16_t) (__a < __b);
13316 }
13317
13318 __extension__ extern __inline uint16x8_t
13319 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13320 vcltq_s16 (int16x8_t __a, int16x8_t __b)
13321 {
13322 return (uint16x8_t) (__a < __b);
13323 }
13324
13325 __extension__ extern __inline uint32x4_t
13326 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13327 vcltq_s32 (int32x4_t __a, int32x4_t __b)
13328 {
13329 return (uint32x4_t) (__a < __b);
13330 }
13331
13332 __extension__ extern __inline uint64x2_t
13333 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13334 vcltq_s64 (int64x2_t __a, int64x2_t __b)
13335 {
13336 return (uint64x2_t) (__a < __b);
13337 }
13338
13339 __extension__ extern __inline uint8x16_t
13340 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13341 vcltq_u8 (uint8x16_t __a, uint8x16_t __b)
13342 {
13343 return (__a < __b);
13344 }
13345
13346 __extension__ extern __inline uint16x8_t
13347 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13348 vcltq_u16 (uint16x8_t __a, uint16x8_t __b)
13349 {
13350 return (__a < __b);
13351 }
13352
13353 __extension__ extern __inline uint32x4_t
13354 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13355 vcltq_u32 (uint32x4_t __a, uint32x4_t __b)
13356 {
13357 return (__a < __b);
13358 }
13359
13360 __extension__ extern __inline uint64x2_t
13361 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13362 vcltq_u64 (uint64x2_t __a, uint64x2_t __b)
13363 {
13364 return (__a < __b);
13365 }
13366
13367 /* vclt - scalar. */
13368
13369 __extension__ extern __inline uint32_t
13370 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13371 vclts_f32 (float32_t __a, float32_t __b)
13372 {
13373 return __a < __b ? -1 : 0;
13374 }
13375
13376 __extension__ extern __inline uint64_t
13377 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13378 vcltd_s64 (int64_t __a, int64_t __b)
13379 {
13380 return __a < __b ? -1ll : 0ll;
13381 }
13382
13383 __extension__ extern __inline uint64_t
13384 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13385 vcltd_u64 (uint64_t __a, uint64_t __b)
13386 {
13387 return __a < __b ? -1ll : 0ll;
13388 }
13389
13390 __extension__ extern __inline uint64_t
13391 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13392 vcltd_f64 (float64_t __a, float64_t __b)
13393 {
13394 return __a < __b ? -1ll : 0ll;
13395 }
13396
13397 /* vcltz - vector. */
13398
13399 __extension__ extern __inline uint32x2_t
13400 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13401 vcltz_f32 (float32x2_t __a)
13402 {
13403 return (uint32x2_t) (__a < 0.0f);
13404 }
13405
13406 __extension__ extern __inline uint64x1_t
13407 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13408 vcltz_f64 (float64x1_t __a)
13409 {
13410 return (uint64x1_t) (__a < (float64x1_t) {0.0});
13411 }
13412
13413 __extension__ extern __inline uint8x8_t
13414 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13415 vcltz_s8 (int8x8_t __a)
13416 {
13417 return (uint8x8_t) (__a < 0);
13418 }
13419
13420 __extension__ extern __inline uint16x4_t
13421 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13422 vcltz_s16 (int16x4_t __a)
13423 {
13424 return (uint16x4_t) (__a < 0);
13425 }
13426
13427 __extension__ extern __inline uint32x2_t
13428 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13429 vcltz_s32 (int32x2_t __a)
13430 {
13431 return (uint32x2_t) (__a < 0);
13432 }
13433
13434 __extension__ extern __inline uint64x1_t
13435 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13436 vcltz_s64 (int64x1_t __a)
13437 {
13438 return (uint64x1_t) (__a < __AARCH64_INT64_C (0));
13439 }
13440
13441 __extension__ extern __inline uint32x4_t
13442 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13443 vcltzq_f32 (float32x4_t __a)
13444 {
13445 return (uint32x4_t) (__a < 0.0f);
13446 }
13447
13448 __extension__ extern __inline uint64x2_t
13449 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13450 vcltzq_f64 (float64x2_t __a)
13451 {
13452 return (uint64x2_t) (__a < 0.0);
13453 }
13454
13455 __extension__ extern __inline uint8x16_t
13456 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13457 vcltzq_s8 (int8x16_t __a)
13458 {
13459 return (uint8x16_t) (__a < 0);
13460 }
13461
13462 __extension__ extern __inline uint16x8_t
13463 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13464 vcltzq_s16 (int16x8_t __a)
13465 {
13466 return (uint16x8_t) (__a < 0);
13467 }
13468
13469 __extension__ extern __inline uint32x4_t
13470 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13471 vcltzq_s32 (int32x4_t __a)
13472 {
13473 return (uint32x4_t) (__a < 0);
13474 }
13475
13476 __extension__ extern __inline uint64x2_t
13477 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13478 vcltzq_s64 (int64x2_t __a)
13479 {
13480 return (uint64x2_t) (__a < __AARCH64_INT64_C (0));
13481 }
13482
13483 /* vcltz - scalar. */
13484
13485 __extension__ extern __inline uint32_t
13486 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13487 vcltzs_f32 (float32_t __a)
13488 {
13489 return __a < 0.0f ? -1 : 0;
13490 }
13491
13492 __extension__ extern __inline uint64_t
13493 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13494 vcltzd_s64 (int64_t __a)
13495 {
13496 return __a < 0 ? -1ll : 0ll;
13497 }
13498
13499 __extension__ extern __inline uint64_t
13500 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13501 vcltzd_f64 (float64_t __a)
13502 {
13503 return __a < 0.0 ? -1ll : 0ll;
13504 }
13505
13506 /* vcls. */
13507
13508 __extension__ extern __inline int8x8_t
13509 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13510 vcls_s8 (int8x8_t __a)
13511 {
13512 return __builtin_aarch64_clrsbv8qi (__a);
13513 }
13514
13515 __extension__ extern __inline int16x4_t
13516 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13517 vcls_s16 (int16x4_t __a)
13518 {
13519 return __builtin_aarch64_clrsbv4hi (__a);
13520 }
13521
13522 __extension__ extern __inline int32x2_t
13523 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13524 vcls_s32 (int32x2_t __a)
13525 {
13526 return __builtin_aarch64_clrsbv2si (__a);
13527 }
13528
13529 __extension__ extern __inline int8x16_t
13530 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13531 vclsq_s8 (int8x16_t __a)
13532 {
13533 return __builtin_aarch64_clrsbv16qi (__a);
13534 }
13535
13536 __extension__ extern __inline int16x8_t
13537 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13538 vclsq_s16 (int16x8_t __a)
13539 {
13540 return __builtin_aarch64_clrsbv8hi (__a);
13541 }
13542
13543 __extension__ extern __inline int32x4_t
13544 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13545 vclsq_s32 (int32x4_t __a)
13546 {
13547 return __builtin_aarch64_clrsbv4si (__a);
13548 }
13549
13550 __extension__ extern __inline int8x8_t
13551 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13552 vcls_u8 (uint8x8_t __a)
13553 {
13554 return __builtin_aarch64_clrsbv8qi ((int8x8_t) __a);
13555 }
13556
13557 __extension__ extern __inline int16x4_t
13558 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13559 vcls_u16 (uint16x4_t __a)
13560 {
13561 return __builtin_aarch64_clrsbv4hi ((int16x4_t) __a);
13562 }
13563
13564 __extension__ extern __inline int32x2_t
13565 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13566 vcls_u32 (uint32x2_t __a)
13567 {
13568 return __builtin_aarch64_clrsbv2si ((int32x2_t) __a);
13569 }
13570
13571 __extension__ extern __inline int8x16_t
13572 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13573 vclsq_u8 (uint8x16_t __a)
13574 {
13575 return __builtin_aarch64_clrsbv16qi ((int8x16_t) __a);
13576 }
13577
13578 __extension__ extern __inline int16x8_t
13579 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13580 vclsq_u16 (uint16x8_t __a)
13581 {
13582 return __builtin_aarch64_clrsbv8hi ((int16x8_t) __a);
13583 }
13584
13585 __extension__ extern __inline int32x4_t
13586 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13587 vclsq_u32 (uint32x4_t __a)
13588 {
13589 return __builtin_aarch64_clrsbv4si ((int32x4_t) __a);
13590 }
13591
13592 /* vclz. */
13593
13594 __extension__ extern __inline int8x8_t
13595 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13596 vclz_s8 (int8x8_t __a)
13597 {
13598 return __builtin_aarch64_clzv8qi (__a);
13599 }
13600
13601 __extension__ extern __inline int16x4_t
13602 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13603 vclz_s16 (int16x4_t __a)
13604 {
13605 return __builtin_aarch64_clzv4hi (__a);
13606 }
13607
13608 __extension__ extern __inline int32x2_t
13609 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13610 vclz_s32 (int32x2_t __a)
13611 {
13612 return __builtin_aarch64_clzv2si (__a);
13613 }
13614
13615 __extension__ extern __inline uint8x8_t
13616 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13617 vclz_u8 (uint8x8_t __a)
13618 {
13619 return (uint8x8_t)__builtin_aarch64_clzv8qi ((int8x8_t)__a);
13620 }
13621
13622 __extension__ extern __inline uint16x4_t
13623 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13624 vclz_u16 (uint16x4_t __a)
13625 {
13626 return (uint16x4_t)__builtin_aarch64_clzv4hi ((int16x4_t)__a);
13627 }
13628
13629 __extension__ extern __inline uint32x2_t
13630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13631 vclz_u32 (uint32x2_t __a)
13632 {
13633 return (uint32x2_t)__builtin_aarch64_clzv2si ((int32x2_t)__a);
13634 }
13635
13636 __extension__ extern __inline int8x16_t
13637 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13638 vclzq_s8 (int8x16_t __a)
13639 {
13640 return __builtin_aarch64_clzv16qi (__a);
13641 }
13642
13643 __extension__ extern __inline int16x8_t
13644 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13645 vclzq_s16 (int16x8_t __a)
13646 {
13647 return __builtin_aarch64_clzv8hi (__a);
13648 }
13649
13650 __extension__ extern __inline int32x4_t
13651 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13652 vclzq_s32 (int32x4_t __a)
13653 {
13654 return __builtin_aarch64_clzv4si (__a);
13655 }
13656
13657 __extension__ extern __inline uint8x16_t
13658 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13659 vclzq_u8 (uint8x16_t __a)
13660 {
13661 return (uint8x16_t)__builtin_aarch64_clzv16qi ((int8x16_t)__a);
13662 }
13663
13664 __extension__ extern __inline uint16x8_t
13665 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13666 vclzq_u16 (uint16x8_t __a)
13667 {
13668 return (uint16x8_t)__builtin_aarch64_clzv8hi ((int16x8_t)__a);
13669 }
13670
13671 __extension__ extern __inline uint32x4_t
13672 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13673 vclzq_u32 (uint32x4_t __a)
13674 {
13675 return (uint32x4_t)__builtin_aarch64_clzv4si ((int32x4_t)__a);
13676 }
13677
13678 /* vcnt. */
13679
13680 __extension__ extern __inline poly8x8_t
13681 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13682 vcnt_p8 (poly8x8_t __a)
13683 {
13684 return (poly8x8_t) __builtin_aarch64_popcountv8qi ((int8x8_t) __a);
13685 }
13686
13687 __extension__ extern __inline int8x8_t
13688 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13689 vcnt_s8 (int8x8_t __a)
13690 {
13691 return __builtin_aarch64_popcountv8qi (__a);
13692 }
13693
13694 __extension__ extern __inline uint8x8_t
13695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13696 vcnt_u8 (uint8x8_t __a)
13697 {
13698 return (uint8x8_t) __builtin_aarch64_popcountv8qi ((int8x8_t) __a);
13699 }
13700
13701 __extension__ extern __inline poly8x16_t
13702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13703 vcntq_p8 (poly8x16_t __a)
13704 {
13705 return (poly8x16_t) __builtin_aarch64_popcountv16qi ((int8x16_t) __a);
13706 }
13707
13708 __extension__ extern __inline int8x16_t
13709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13710 vcntq_s8 (int8x16_t __a)
13711 {
13712 return __builtin_aarch64_popcountv16qi (__a);
13713 }
13714
13715 __extension__ extern __inline uint8x16_t
13716 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13717 vcntq_u8 (uint8x16_t __a)
13718 {
13719 return (uint8x16_t) __builtin_aarch64_popcountv16qi ((int8x16_t) __a);
13720 }
13721
13722 /* vcopy_lane. */
13723
13724 __extension__ extern __inline float32x2_t
13725 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13726 vcopy_lane_f32 (float32x2_t __a, const int __lane1,
13727 float32x2_t __b, const int __lane2)
13728 {
13729 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13730 __a, __lane1);
13731 }
13732
13733 __extension__ extern __inline float64x1_t
13734 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13735 vcopy_lane_f64 (float64x1_t __a, const int __lane1,
13736 float64x1_t __b, const int __lane2)
13737 {
13738 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13739 __a, __lane1);
13740 }
13741
13742 __extension__ extern __inline poly8x8_t
13743 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13744 vcopy_lane_p8 (poly8x8_t __a, const int __lane1,
13745 poly8x8_t __b, const int __lane2)
13746 {
13747 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13748 __a, __lane1);
13749 }
13750
13751 __extension__ extern __inline poly16x4_t
13752 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13753 vcopy_lane_p16 (poly16x4_t __a, const int __lane1,
13754 poly16x4_t __b, const int __lane2)
13755 {
13756 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13757 __a, __lane1);
13758 }
13759
13760 __extension__ extern __inline poly64x1_t
13761 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13762 vcopy_lane_p64 (poly64x1_t __a, const int __lane1,
13763 poly64x1_t __b, const int __lane2)
13764 {
13765 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13766 __a, __lane1);
13767 }
13768
13769 __extension__ extern __inline int8x8_t
13770 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13771 vcopy_lane_s8 (int8x8_t __a, const int __lane1,
13772 int8x8_t __b, const int __lane2)
13773 {
13774 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13775 __a, __lane1);
13776 }
13777
13778 __extension__ extern __inline int16x4_t
13779 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13780 vcopy_lane_s16 (int16x4_t __a, const int __lane1,
13781 int16x4_t __b, const int __lane2)
13782 {
13783 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13784 __a, __lane1);
13785 }
13786
13787 __extension__ extern __inline int32x2_t
13788 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13789 vcopy_lane_s32 (int32x2_t __a, const int __lane1,
13790 int32x2_t __b, const int __lane2)
13791 {
13792 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13793 __a, __lane1);
13794 }
13795
13796 __extension__ extern __inline int64x1_t
13797 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13798 vcopy_lane_s64 (int64x1_t __a, const int __lane1,
13799 int64x1_t __b, const int __lane2)
13800 {
13801 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13802 __a, __lane1);
13803 }
13804
13805 __extension__ extern __inline uint8x8_t
13806 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13807 vcopy_lane_u8 (uint8x8_t __a, const int __lane1,
13808 uint8x8_t __b, const int __lane2)
13809 {
13810 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13811 __a, __lane1);
13812 }
13813
13814 __extension__ extern __inline uint16x4_t
13815 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13816 vcopy_lane_u16 (uint16x4_t __a, const int __lane1,
13817 uint16x4_t __b, const int __lane2)
13818 {
13819 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13820 __a, __lane1);
13821 }
13822
13823 __extension__ extern __inline uint32x2_t
13824 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13825 vcopy_lane_u32 (uint32x2_t __a, const int __lane1,
13826 uint32x2_t __b, const int __lane2)
13827 {
13828 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13829 __a, __lane1);
13830 }
13831
13832 __extension__ extern __inline uint64x1_t
13833 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13834 vcopy_lane_u64 (uint64x1_t __a, const int __lane1,
13835 uint64x1_t __b, const int __lane2)
13836 {
13837 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13838 __a, __lane1);
13839 }
13840
13841 /* vcopy_laneq. */
13842
13843 __extension__ extern __inline float32x2_t
13844 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13845 vcopy_laneq_f32 (float32x2_t __a, const int __lane1,
13846 float32x4_t __b, const int __lane2)
13847 {
13848 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13849 __a, __lane1);
13850 }
13851
13852 __extension__ extern __inline float64x1_t
13853 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13854 vcopy_laneq_f64 (float64x1_t __a, const int __lane1,
13855 float64x2_t __b, const int __lane2)
13856 {
13857 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13858 __a, __lane1);
13859 }
13860
13861 __extension__ extern __inline poly8x8_t
13862 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13863 vcopy_laneq_p8 (poly8x8_t __a, const int __lane1,
13864 poly8x16_t __b, const int __lane2)
13865 {
13866 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13867 __a, __lane1);
13868 }
13869
13870 __extension__ extern __inline poly16x4_t
13871 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13872 vcopy_laneq_p16 (poly16x4_t __a, const int __lane1,
13873 poly16x8_t __b, const int __lane2)
13874 {
13875 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13876 __a, __lane1);
13877 }
13878
13879 __extension__ extern __inline poly64x1_t
13880 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13881 vcopy_laneq_p64 (poly64x1_t __a, const int __lane1,
13882 poly64x2_t __b, const int __lane2)
13883 {
13884 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13885 __a, __lane1);
13886 }
13887
13888 __extension__ extern __inline int8x8_t
13889 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13890 vcopy_laneq_s8 (int8x8_t __a, const int __lane1,
13891 int8x16_t __b, const int __lane2)
13892 {
13893 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13894 __a, __lane1);
13895 }
13896
13897 __extension__ extern __inline int16x4_t
13898 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13899 vcopy_laneq_s16 (int16x4_t __a, const int __lane1,
13900 int16x8_t __b, const int __lane2)
13901 {
13902 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13903 __a, __lane1);
13904 }
13905
13906 __extension__ extern __inline int32x2_t
13907 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13908 vcopy_laneq_s32 (int32x2_t __a, const int __lane1,
13909 int32x4_t __b, const int __lane2)
13910 {
13911 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13912 __a, __lane1);
13913 }
13914
13915 __extension__ extern __inline int64x1_t
13916 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13917 vcopy_laneq_s64 (int64x1_t __a, const int __lane1,
13918 int64x2_t __b, const int __lane2)
13919 {
13920 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13921 __a, __lane1);
13922 }
13923
13924 __extension__ extern __inline uint8x8_t
13925 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13926 vcopy_laneq_u8 (uint8x8_t __a, const int __lane1,
13927 uint8x16_t __b, const int __lane2)
13928 {
13929 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13930 __a, __lane1);
13931 }
13932
13933 __extension__ extern __inline uint16x4_t
13934 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13935 vcopy_laneq_u16 (uint16x4_t __a, const int __lane1,
13936 uint16x8_t __b, const int __lane2)
13937 {
13938 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13939 __a, __lane1);
13940 }
13941
13942 __extension__ extern __inline uint32x2_t
13943 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13944 vcopy_laneq_u32 (uint32x2_t __a, const int __lane1,
13945 uint32x4_t __b, const int __lane2)
13946 {
13947 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13948 __a, __lane1);
13949 }
13950
13951 __extension__ extern __inline uint64x1_t
13952 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13953 vcopy_laneq_u64 (uint64x1_t __a, const int __lane1,
13954 uint64x2_t __b, const int __lane2)
13955 {
13956 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13957 __a, __lane1);
13958 }
13959
13960 /* vcopyq_lane. */
13961
13962 __extension__ extern __inline float32x4_t
13963 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13964 vcopyq_lane_f32 (float32x4_t __a, const int __lane1,
13965 float32x2_t __b, const int __lane2)
13966 {
13967 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13968 __a, __lane1);
13969 }
13970
13971 __extension__ extern __inline float64x2_t
13972 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13973 vcopyq_lane_f64 (float64x2_t __a, const int __lane1,
13974 float64x1_t __b, const int __lane2)
13975 {
13976 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13977 __a, __lane1);
13978 }
13979
13980 __extension__ extern __inline poly8x16_t
13981 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13982 vcopyq_lane_p8 (poly8x16_t __a, const int __lane1,
13983 poly8x8_t __b, const int __lane2)
13984 {
13985 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13986 __a, __lane1);
13987 }
13988
13989 __extension__ extern __inline poly16x8_t
13990 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
13991 vcopyq_lane_p16 (poly16x8_t __a, const int __lane1,
13992 poly16x4_t __b, const int __lane2)
13993 {
13994 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
13995 __a, __lane1);
13996 }
13997
13998 __extension__ extern __inline poly64x2_t
13999 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14000 vcopyq_lane_p64 (poly64x2_t __a, const int __lane1,
14001 poly64x1_t __b, const int __lane2)
14002 {
14003 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14004 __a, __lane1);
14005 }
14006
14007 __extension__ extern __inline int8x16_t
14008 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14009 vcopyq_lane_s8 (int8x16_t __a, const int __lane1,
14010 int8x8_t __b, const int __lane2)
14011 {
14012 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14013 __a, __lane1);
14014 }
14015
14016 __extension__ extern __inline int16x8_t
14017 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14018 vcopyq_lane_s16 (int16x8_t __a, const int __lane1,
14019 int16x4_t __b, const int __lane2)
14020 {
14021 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14022 __a, __lane1);
14023 }
14024
14025 __extension__ extern __inline int32x4_t
14026 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14027 vcopyq_lane_s32 (int32x4_t __a, const int __lane1,
14028 int32x2_t __b, const int __lane2)
14029 {
14030 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14031 __a, __lane1);
14032 }
14033
14034 __extension__ extern __inline int64x2_t
14035 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14036 vcopyq_lane_s64 (int64x2_t __a, const int __lane1,
14037 int64x1_t __b, const int __lane2)
14038 {
14039 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14040 __a, __lane1);
14041 }
14042
14043 __extension__ extern __inline uint8x16_t
14044 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14045 vcopyq_lane_u8 (uint8x16_t __a, const int __lane1,
14046 uint8x8_t __b, const int __lane2)
14047 {
14048 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14049 __a, __lane1);
14050 }
14051
14052 __extension__ extern __inline uint16x8_t
14053 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14054 vcopyq_lane_u16 (uint16x8_t __a, const int __lane1,
14055 uint16x4_t __b, const int __lane2)
14056 {
14057 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14058 __a, __lane1);
14059 }
14060
14061 __extension__ extern __inline uint32x4_t
14062 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14063 vcopyq_lane_u32 (uint32x4_t __a, const int __lane1,
14064 uint32x2_t __b, const int __lane2)
14065 {
14066 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14067 __a, __lane1);
14068 }
14069
14070 __extension__ extern __inline uint64x2_t
14071 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14072 vcopyq_lane_u64 (uint64x2_t __a, const int __lane1,
14073 uint64x1_t __b, const int __lane2)
14074 {
14075 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14076 __a, __lane1);
14077 }
14078
14079 /* vcopyq_laneq. */
14080
14081 __extension__ extern __inline float32x4_t
14082 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14083 vcopyq_laneq_f32 (float32x4_t __a, const int __lane1,
14084 float32x4_t __b, const int __lane2)
14085 {
14086 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14087 __a, __lane1);
14088 }
14089
14090 __extension__ extern __inline float64x2_t
14091 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14092 vcopyq_laneq_f64 (float64x2_t __a, const int __lane1,
14093 float64x2_t __b, const int __lane2)
14094 {
14095 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14096 __a, __lane1);
14097 }
14098
14099 __extension__ extern __inline poly8x16_t
14100 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14101 vcopyq_laneq_p8 (poly8x16_t __a, const int __lane1,
14102 poly8x16_t __b, const int __lane2)
14103 {
14104 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14105 __a, __lane1);
14106 }
14107
14108 __extension__ extern __inline poly16x8_t
14109 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14110 vcopyq_laneq_p16 (poly16x8_t __a, const int __lane1,
14111 poly16x8_t __b, const int __lane2)
14112 {
14113 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14114 __a, __lane1);
14115 }
14116
14117 __extension__ extern __inline poly64x2_t
14118 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14119 vcopyq_laneq_p64 (poly64x2_t __a, const int __lane1,
14120 poly64x2_t __b, const int __lane2)
14121 {
14122 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14123 __a, __lane1);
14124 }
14125
14126 __extension__ extern __inline int8x16_t
14127 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14128 vcopyq_laneq_s8 (int8x16_t __a, const int __lane1,
14129 int8x16_t __b, const int __lane2)
14130 {
14131 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14132 __a, __lane1);
14133 }
14134
14135 __extension__ extern __inline int16x8_t
14136 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14137 vcopyq_laneq_s16 (int16x8_t __a, const int __lane1,
14138 int16x8_t __b, const int __lane2)
14139 {
14140 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14141 __a, __lane1);
14142 }
14143
14144 __extension__ extern __inline int32x4_t
14145 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14146 vcopyq_laneq_s32 (int32x4_t __a, const int __lane1,
14147 int32x4_t __b, const int __lane2)
14148 {
14149 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14150 __a, __lane1);
14151 }
14152
14153 __extension__ extern __inline int64x2_t
14154 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14155 vcopyq_laneq_s64 (int64x2_t __a, const int __lane1,
14156 int64x2_t __b, const int __lane2)
14157 {
14158 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14159 __a, __lane1);
14160 }
14161
14162 __extension__ extern __inline uint8x16_t
14163 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14164 vcopyq_laneq_u8 (uint8x16_t __a, const int __lane1,
14165 uint8x16_t __b, const int __lane2)
14166 {
14167 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14168 __a, __lane1);
14169 }
14170
14171 __extension__ extern __inline uint16x8_t
14172 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14173 vcopyq_laneq_u16 (uint16x8_t __a, const int __lane1,
14174 uint16x8_t __b, const int __lane2)
14175 {
14176 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14177 __a, __lane1);
14178 }
14179
14180 __extension__ extern __inline uint32x4_t
14181 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14182 vcopyq_laneq_u32 (uint32x4_t __a, const int __lane1,
14183 uint32x4_t __b, const int __lane2)
14184 {
14185 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14186 __a, __lane1);
14187 }
14188
14189 __extension__ extern __inline uint64x2_t
14190 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14191 vcopyq_laneq_u64 (uint64x2_t __a, const int __lane1,
14192 uint64x2_t __b, const int __lane2)
14193 {
14194 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
14195 __a, __lane1);
14196 }
14197
14198 /* vcvt (double -> float). */
14199
14200 __extension__ extern __inline float16x4_t
14201 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14202 vcvt_f16_f32 (float32x4_t __a)
14203 {
14204 return __builtin_aarch64_float_truncate_lo_v4hf (__a);
14205 }
14206
14207 __extension__ extern __inline float16x8_t
14208 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14209 vcvt_high_f16_f32 (float16x4_t __a, float32x4_t __b)
14210 {
14211 return __builtin_aarch64_float_truncate_hi_v8hf (__a, __b);
14212 }
14213
14214 __extension__ extern __inline float32x2_t
14215 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14216 vcvt_f32_f64 (float64x2_t __a)
14217 {
14218 return __builtin_aarch64_float_truncate_lo_v2sf (__a);
14219 }
14220
14221 __extension__ extern __inline float32x4_t
14222 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14223 vcvt_high_f32_f64 (float32x2_t __a, float64x2_t __b)
14224 {
14225 return __builtin_aarch64_float_truncate_hi_v4sf (__a, __b);
14226 }
14227
14228 /* vcvt (float -> double). */
14229
14230 __extension__ extern __inline float32x4_t
14231 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14232 vcvt_f32_f16 (float16x4_t __a)
14233 {
14234 return __builtin_aarch64_float_extend_lo_v4sf (__a);
14235 }
14236
14237 __extension__ extern __inline float64x2_t
14238 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14239 vcvt_f64_f32 (float32x2_t __a)
14240 {
14241
14242 return __builtin_aarch64_float_extend_lo_v2df (__a);
14243 }
14244
14245 __extension__ extern __inline float32x4_t
14246 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14247 vcvt_high_f32_f16 (float16x8_t __a)
14248 {
14249 return __builtin_aarch64_vec_unpacks_hi_v8hf (__a);
14250 }
14251
14252 __extension__ extern __inline float64x2_t
14253 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14254 vcvt_high_f64_f32 (float32x4_t __a)
14255 {
14256 return __builtin_aarch64_vec_unpacks_hi_v4sf (__a);
14257 }
14258
14259 /* vcvt (<u>fixed-point -> float). */
14260
14261 __extension__ extern __inline float64_t
14262 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14263 vcvtd_n_f64_s64 (int64_t __a, const int __b)
14264 {
14265 return __builtin_aarch64_scvtfdi (__a, __b);
14266 }
14267
14268 __extension__ extern __inline float64_t
14269 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14270 vcvtd_n_f64_u64 (uint64_t __a, const int __b)
14271 {
14272 return __builtin_aarch64_ucvtfdi_sus (__a, __b);
14273 }
14274
14275 __extension__ extern __inline float32_t
14276 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14277 vcvts_n_f32_s32 (int32_t __a, const int __b)
14278 {
14279 return __builtin_aarch64_scvtfsi (__a, __b);
14280 }
14281
14282 __extension__ extern __inline float32_t
14283 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14284 vcvts_n_f32_u32 (uint32_t __a, const int __b)
14285 {
14286 return __builtin_aarch64_ucvtfsi_sus (__a, __b);
14287 }
14288
14289 __extension__ extern __inline float32x2_t
14290 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14291 vcvt_n_f32_s32 (int32x2_t __a, const int __b)
14292 {
14293 return __builtin_aarch64_scvtfv2si (__a, __b);
14294 }
14295
14296 __extension__ extern __inline float32x2_t
14297 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14298 vcvt_n_f32_u32 (uint32x2_t __a, const int __b)
14299 {
14300 return __builtin_aarch64_ucvtfv2si_sus (__a, __b);
14301 }
14302
14303 __extension__ extern __inline float64x1_t
14304 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14305 vcvt_n_f64_s64 (int64x1_t __a, const int __b)
14306 {
14307 return (float64x1_t)
14308 { __builtin_aarch64_scvtfdi (vget_lane_s64 (__a, 0), __b) };
14309 }
14310
14311 __extension__ extern __inline float64x1_t
14312 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14313 vcvt_n_f64_u64 (uint64x1_t __a, const int __b)
14314 {
14315 return (float64x1_t)
14316 { __builtin_aarch64_ucvtfdi_sus (vget_lane_u64 (__a, 0), __b) };
14317 }
14318
14319 __extension__ extern __inline float32x4_t
14320 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14321 vcvtq_n_f32_s32 (int32x4_t __a, const int __b)
14322 {
14323 return __builtin_aarch64_scvtfv4si (__a, __b);
14324 }
14325
14326 __extension__ extern __inline float32x4_t
14327 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14328 vcvtq_n_f32_u32 (uint32x4_t __a, const int __b)
14329 {
14330 return __builtin_aarch64_ucvtfv4si_sus (__a, __b);
14331 }
14332
14333 __extension__ extern __inline float64x2_t
14334 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14335 vcvtq_n_f64_s64 (int64x2_t __a, const int __b)
14336 {
14337 return __builtin_aarch64_scvtfv2di (__a, __b);
14338 }
14339
14340 __extension__ extern __inline float64x2_t
14341 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14342 vcvtq_n_f64_u64 (uint64x2_t __a, const int __b)
14343 {
14344 return __builtin_aarch64_ucvtfv2di_sus (__a, __b);
14345 }
14346
14347 /* vcvt (float -> <u>fixed-point). */
14348
14349 __extension__ extern __inline int64_t
14350 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14351 vcvtd_n_s64_f64 (float64_t __a, const int __b)
14352 {
14353 return __builtin_aarch64_fcvtzsdf (__a, __b);
14354 }
14355
14356 __extension__ extern __inline uint64_t
14357 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14358 vcvtd_n_u64_f64 (float64_t __a, const int __b)
14359 {
14360 return __builtin_aarch64_fcvtzudf_uss (__a, __b);
14361 }
14362
14363 __extension__ extern __inline int32_t
14364 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14365 vcvts_n_s32_f32 (float32_t __a, const int __b)
14366 {
14367 return __builtin_aarch64_fcvtzssf (__a, __b);
14368 }
14369
14370 __extension__ extern __inline uint32_t
14371 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14372 vcvts_n_u32_f32 (float32_t __a, const int __b)
14373 {
14374 return __builtin_aarch64_fcvtzusf_uss (__a, __b);
14375 }
14376
14377 __extension__ extern __inline int32x2_t
14378 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14379 vcvt_n_s32_f32 (float32x2_t __a, const int __b)
14380 {
14381 return __builtin_aarch64_fcvtzsv2sf (__a, __b);
14382 }
14383
14384 __extension__ extern __inline uint32x2_t
14385 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14386 vcvt_n_u32_f32 (float32x2_t __a, const int __b)
14387 {
14388 return __builtin_aarch64_fcvtzuv2sf_uss (__a, __b);
14389 }
14390
14391 __extension__ extern __inline int64x1_t
14392 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14393 vcvt_n_s64_f64 (float64x1_t __a, const int __b)
14394 {
14395 return (int64x1_t)
14396 { __builtin_aarch64_fcvtzsdf (vget_lane_f64 (__a, 0), __b) };
14397 }
14398
14399 __extension__ extern __inline uint64x1_t
14400 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14401 vcvt_n_u64_f64 (float64x1_t __a, const int __b)
14402 {
14403 return (uint64x1_t)
14404 { __builtin_aarch64_fcvtzudf_uss (vget_lane_f64 (__a, 0), __b) };
14405 }
14406
14407 __extension__ extern __inline int32x4_t
14408 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14409 vcvtq_n_s32_f32 (float32x4_t __a, const int __b)
14410 {
14411 return __builtin_aarch64_fcvtzsv4sf (__a, __b);
14412 }
14413
14414 __extension__ extern __inline uint32x4_t
14415 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14416 vcvtq_n_u32_f32 (float32x4_t __a, const int __b)
14417 {
14418 return __builtin_aarch64_fcvtzuv4sf_uss (__a, __b);
14419 }
14420
14421 __extension__ extern __inline int64x2_t
14422 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14423 vcvtq_n_s64_f64 (float64x2_t __a, const int __b)
14424 {
14425 return __builtin_aarch64_fcvtzsv2df (__a, __b);
14426 }
14427
14428 __extension__ extern __inline uint64x2_t
14429 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14430 vcvtq_n_u64_f64 (float64x2_t __a, const int __b)
14431 {
14432 return __builtin_aarch64_fcvtzuv2df_uss (__a, __b);
14433 }
14434
14435 /* vcvt (<u>int -> float) */
14436
14437 __extension__ extern __inline float64_t
14438 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14439 vcvtd_f64_s64 (int64_t __a)
14440 {
14441 return (float64_t) __a;
14442 }
14443
14444 __extension__ extern __inline float64_t
14445 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14446 vcvtd_f64_u64 (uint64_t __a)
14447 {
14448 return (float64_t) __a;
14449 }
14450
14451 __extension__ extern __inline float32_t
14452 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14453 vcvts_f32_s32 (int32_t __a)
14454 {
14455 return (float32_t) __a;
14456 }
14457
14458 __extension__ extern __inline float32_t
14459 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14460 vcvts_f32_u32 (uint32_t __a)
14461 {
14462 return (float32_t) __a;
14463 }
14464
14465 __extension__ extern __inline float32x2_t
14466 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14467 vcvt_f32_s32 (int32x2_t __a)
14468 {
14469 return __builtin_aarch64_floatv2siv2sf (__a);
14470 }
14471
14472 __extension__ extern __inline float32x2_t
14473 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14474 vcvt_f32_u32 (uint32x2_t __a)
14475 {
14476 return __builtin_aarch64_floatunsv2siv2sf ((int32x2_t) __a);
14477 }
14478
14479 __extension__ extern __inline float64x1_t
14480 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14481 vcvt_f64_s64 (int64x1_t __a)
14482 {
14483 return (float64x1_t) { vget_lane_s64 (__a, 0) };
14484 }
14485
14486 __extension__ extern __inline float64x1_t
14487 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14488 vcvt_f64_u64 (uint64x1_t __a)
14489 {
14490 return (float64x1_t) { vget_lane_u64 (__a, 0) };
14491 }
14492
14493 __extension__ extern __inline float32x4_t
14494 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14495 vcvtq_f32_s32 (int32x4_t __a)
14496 {
14497 return __builtin_aarch64_floatv4siv4sf (__a);
14498 }
14499
14500 __extension__ extern __inline float32x4_t
14501 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14502 vcvtq_f32_u32 (uint32x4_t __a)
14503 {
14504 return __builtin_aarch64_floatunsv4siv4sf ((int32x4_t) __a);
14505 }
14506
14507 __extension__ extern __inline float64x2_t
14508 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14509 vcvtq_f64_s64 (int64x2_t __a)
14510 {
14511 return __builtin_aarch64_floatv2div2df (__a);
14512 }
14513
14514 __extension__ extern __inline float64x2_t
14515 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14516 vcvtq_f64_u64 (uint64x2_t __a)
14517 {
14518 return __builtin_aarch64_floatunsv2div2df ((int64x2_t) __a);
14519 }
14520
14521 /* vcvt (float -> <u>int) */
14522
14523 __extension__ extern __inline int64_t
14524 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14525 vcvtd_s64_f64 (float64_t __a)
14526 {
14527 return (int64_t) __a;
14528 }
14529
14530 __extension__ extern __inline uint64_t
14531 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14532 vcvtd_u64_f64 (float64_t __a)
14533 {
14534 return (uint64_t) __a;
14535 }
14536
14537 __extension__ extern __inline int32_t
14538 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14539 vcvts_s32_f32 (float32_t __a)
14540 {
14541 return (int32_t) __a;
14542 }
14543
14544 __extension__ extern __inline uint32_t
14545 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14546 vcvts_u32_f32 (float32_t __a)
14547 {
14548 return (uint32_t) __a;
14549 }
14550
14551 __extension__ extern __inline int32x2_t
14552 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14553 vcvt_s32_f32 (float32x2_t __a)
14554 {
14555 return __builtin_aarch64_lbtruncv2sfv2si (__a);
14556 }
14557
14558 __extension__ extern __inline uint32x2_t
14559 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14560 vcvt_u32_f32 (float32x2_t __a)
14561 {
14562 return __builtin_aarch64_lbtruncuv2sfv2si_us (__a);
14563 }
14564
14565 __extension__ extern __inline int32x4_t
14566 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14567 vcvtq_s32_f32 (float32x4_t __a)
14568 {
14569 return __builtin_aarch64_lbtruncv4sfv4si (__a);
14570 }
14571
14572 __extension__ extern __inline uint32x4_t
14573 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14574 vcvtq_u32_f32 (float32x4_t __a)
14575 {
14576 return __builtin_aarch64_lbtruncuv4sfv4si_us (__a);
14577 }
14578
14579 __extension__ extern __inline int64x1_t
14580 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14581 vcvt_s64_f64 (float64x1_t __a)
14582 {
14583 return (int64x1_t) {vcvtd_s64_f64 (__a[0])};
14584 }
14585
14586 __extension__ extern __inline uint64x1_t
14587 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14588 vcvt_u64_f64 (float64x1_t __a)
14589 {
14590 return (uint64x1_t) {vcvtd_u64_f64 (__a[0])};
14591 }
14592
14593 __extension__ extern __inline int64x2_t
14594 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14595 vcvtq_s64_f64 (float64x2_t __a)
14596 {
14597 return __builtin_aarch64_lbtruncv2dfv2di (__a);
14598 }
14599
14600 __extension__ extern __inline uint64x2_t
14601 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14602 vcvtq_u64_f64 (float64x2_t __a)
14603 {
14604 return __builtin_aarch64_lbtruncuv2dfv2di_us (__a);
14605 }
14606
14607 /* vcvta */
14608
14609 __extension__ extern __inline int64_t
14610 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14611 vcvtad_s64_f64 (float64_t __a)
14612 {
14613 return __builtin_aarch64_lrounddfdi (__a);
14614 }
14615
14616 __extension__ extern __inline uint64_t
14617 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14618 vcvtad_u64_f64 (float64_t __a)
14619 {
14620 return __builtin_aarch64_lroundudfdi_us (__a);
14621 }
14622
14623 __extension__ extern __inline int32_t
14624 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14625 vcvtas_s32_f32 (float32_t __a)
14626 {
14627 return __builtin_aarch64_lroundsfsi (__a);
14628 }
14629
14630 __extension__ extern __inline uint32_t
14631 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14632 vcvtas_u32_f32 (float32_t __a)
14633 {
14634 return __builtin_aarch64_lroundusfsi_us (__a);
14635 }
14636
14637 __extension__ extern __inline int32x2_t
14638 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14639 vcvta_s32_f32 (float32x2_t __a)
14640 {
14641 return __builtin_aarch64_lroundv2sfv2si (__a);
14642 }
14643
14644 __extension__ extern __inline uint32x2_t
14645 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14646 vcvta_u32_f32 (float32x2_t __a)
14647 {
14648 return __builtin_aarch64_lrounduv2sfv2si_us (__a);
14649 }
14650
14651 __extension__ extern __inline int32x4_t
14652 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14653 vcvtaq_s32_f32 (float32x4_t __a)
14654 {
14655 return __builtin_aarch64_lroundv4sfv4si (__a);
14656 }
14657
14658 __extension__ extern __inline uint32x4_t
14659 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14660 vcvtaq_u32_f32 (float32x4_t __a)
14661 {
14662 return __builtin_aarch64_lrounduv4sfv4si_us (__a);
14663 }
14664
14665 __extension__ extern __inline int64x1_t
14666 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14667 vcvta_s64_f64 (float64x1_t __a)
14668 {
14669 return (int64x1_t) {vcvtad_s64_f64 (__a[0])};
14670 }
14671
14672 __extension__ extern __inline uint64x1_t
14673 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14674 vcvta_u64_f64 (float64x1_t __a)
14675 {
14676 return (uint64x1_t) {vcvtad_u64_f64 (__a[0])};
14677 }
14678
14679 __extension__ extern __inline int64x2_t
14680 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14681 vcvtaq_s64_f64 (float64x2_t __a)
14682 {
14683 return __builtin_aarch64_lroundv2dfv2di (__a);
14684 }
14685
14686 __extension__ extern __inline uint64x2_t
14687 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14688 vcvtaq_u64_f64 (float64x2_t __a)
14689 {
14690 return __builtin_aarch64_lrounduv2dfv2di_us (__a);
14691 }
14692
14693 /* vcvtm */
14694
14695 __extension__ extern __inline int64_t
14696 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14697 vcvtmd_s64_f64 (float64_t __a)
14698 {
14699 return __builtin_llfloor (__a);
14700 }
14701
14702 __extension__ extern __inline uint64_t
14703 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14704 vcvtmd_u64_f64 (float64_t __a)
14705 {
14706 return __builtin_aarch64_lfloorudfdi_us (__a);
14707 }
14708
14709 __extension__ extern __inline int32_t
14710 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14711 vcvtms_s32_f32 (float32_t __a)
14712 {
14713 return __builtin_ifloorf (__a);
14714 }
14715
14716 __extension__ extern __inline uint32_t
14717 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14718 vcvtms_u32_f32 (float32_t __a)
14719 {
14720 return __builtin_aarch64_lfloorusfsi_us (__a);
14721 }
14722
14723 __extension__ extern __inline int32x2_t
14724 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14725 vcvtm_s32_f32 (float32x2_t __a)
14726 {
14727 return __builtin_aarch64_lfloorv2sfv2si (__a);
14728 }
14729
14730 __extension__ extern __inline uint32x2_t
14731 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14732 vcvtm_u32_f32 (float32x2_t __a)
14733 {
14734 return __builtin_aarch64_lflooruv2sfv2si_us (__a);
14735 }
14736
14737 __extension__ extern __inline int32x4_t
14738 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14739 vcvtmq_s32_f32 (float32x4_t __a)
14740 {
14741 return __builtin_aarch64_lfloorv4sfv4si (__a);
14742 }
14743
14744 __extension__ extern __inline uint32x4_t
14745 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14746 vcvtmq_u32_f32 (float32x4_t __a)
14747 {
14748 return __builtin_aarch64_lflooruv4sfv4si_us (__a);
14749 }
14750
14751 __extension__ extern __inline int64x1_t
14752 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14753 vcvtm_s64_f64 (float64x1_t __a)
14754 {
14755 return (int64x1_t) {vcvtmd_s64_f64 (__a[0])};
14756 }
14757
14758 __extension__ extern __inline uint64x1_t
14759 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14760 vcvtm_u64_f64 (float64x1_t __a)
14761 {
14762 return (uint64x1_t) {vcvtmd_u64_f64 (__a[0])};
14763 }
14764
14765 __extension__ extern __inline int64x2_t
14766 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14767 vcvtmq_s64_f64 (float64x2_t __a)
14768 {
14769 return __builtin_aarch64_lfloorv2dfv2di (__a);
14770 }
14771
14772 __extension__ extern __inline uint64x2_t
14773 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14774 vcvtmq_u64_f64 (float64x2_t __a)
14775 {
14776 return __builtin_aarch64_lflooruv2dfv2di_us (__a);
14777 }
14778
14779 /* vcvtn */
14780
14781 __extension__ extern __inline int64_t
14782 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14783 vcvtnd_s64_f64 (float64_t __a)
14784 {
14785 return __builtin_aarch64_lfrintndfdi (__a);
14786 }
14787
14788 __extension__ extern __inline uint64_t
14789 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14790 vcvtnd_u64_f64 (float64_t __a)
14791 {
14792 return __builtin_aarch64_lfrintnudfdi_us (__a);
14793 }
14794
14795 __extension__ extern __inline int32_t
14796 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14797 vcvtns_s32_f32 (float32_t __a)
14798 {
14799 return __builtin_aarch64_lfrintnsfsi (__a);
14800 }
14801
14802 __extension__ extern __inline uint32_t
14803 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14804 vcvtns_u32_f32 (float32_t __a)
14805 {
14806 return __builtin_aarch64_lfrintnusfsi_us (__a);
14807 }
14808
14809 __extension__ extern __inline int32x2_t
14810 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14811 vcvtn_s32_f32 (float32x2_t __a)
14812 {
14813 return __builtin_aarch64_lfrintnv2sfv2si (__a);
14814 }
14815
14816 __extension__ extern __inline uint32x2_t
14817 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14818 vcvtn_u32_f32 (float32x2_t __a)
14819 {
14820 return __builtin_aarch64_lfrintnuv2sfv2si_us (__a);
14821 }
14822
14823 __extension__ extern __inline int32x4_t
14824 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14825 vcvtnq_s32_f32 (float32x4_t __a)
14826 {
14827 return __builtin_aarch64_lfrintnv4sfv4si (__a);
14828 }
14829
14830 __extension__ extern __inline uint32x4_t
14831 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14832 vcvtnq_u32_f32 (float32x4_t __a)
14833 {
14834 return __builtin_aarch64_lfrintnuv4sfv4si_us (__a);
14835 }
14836
14837 __extension__ extern __inline int64x1_t
14838 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14839 vcvtn_s64_f64 (float64x1_t __a)
14840 {
14841 return (int64x1_t) {vcvtnd_s64_f64 (__a[0])};
14842 }
14843
14844 __extension__ extern __inline uint64x1_t
14845 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14846 vcvtn_u64_f64 (float64x1_t __a)
14847 {
14848 return (uint64x1_t) {vcvtnd_u64_f64 (__a[0])};
14849 }
14850
14851 __extension__ extern __inline int64x2_t
14852 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14853 vcvtnq_s64_f64 (float64x2_t __a)
14854 {
14855 return __builtin_aarch64_lfrintnv2dfv2di (__a);
14856 }
14857
14858 __extension__ extern __inline uint64x2_t
14859 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14860 vcvtnq_u64_f64 (float64x2_t __a)
14861 {
14862 return __builtin_aarch64_lfrintnuv2dfv2di_us (__a);
14863 }
14864
14865 /* vcvtp */
14866
14867 __extension__ extern __inline int64_t
14868 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14869 vcvtpd_s64_f64 (float64_t __a)
14870 {
14871 return __builtin_llceil (__a);
14872 }
14873
14874 __extension__ extern __inline uint64_t
14875 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14876 vcvtpd_u64_f64 (float64_t __a)
14877 {
14878 return __builtin_aarch64_lceiludfdi_us (__a);
14879 }
14880
14881 __extension__ extern __inline int32_t
14882 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14883 vcvtps_s32_f32 (float32_t __a)
14884 {
14885 return __builtin_iceilf (__a);
14886 }
14887
14888 __extension__ extern __inline uint32_t
14889 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14890 vcvtps_u32_f32 (float32_t __a)
14891 {
14892 return __builtin_aarch64_lceilusfsi_us (__a);
14893 }
14894
14895 __extension__ extern __inline int32x2_t
14896 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14897 vcvtp_s32_f32 (float32x2_t __a)
14898 {
14899 return __builtin_aarch64_lceilv2sfv2si (__a);
14900 }
14901
14902 __extension__ extern __inline uint32x2_t
14903 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14904 vcvtp_u32_f32 (float32x2_t __a)
14905 {
14906 return __builtin_aarch64_lceiluv2sfv2si_us (__a);
14907 }
14908
14909 __extension__ extern __inline int32x4_t
14910 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14911 vcvtpq_s32_f32 (float32x4_t __a)
14912 {
14913 return __builtin_aarch64_lceilv4sfv4si (__a);
14914 }
14915
14916 __extension__ extern __inline uint32x4_t
14917 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14918 vcvtpq_u32_f32 (float32x4_t __a)
14919 {
14920 return __builtin_aarch64_lceiluv4sfv4si_us (__a);
14921 }
14922
14923 __extension__ extern __inline int64x1_t
14924 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14925 vcvtp_s64_f64 (float64x1_t __a)
14926 {
14927 return (int64x1_t) {vcvtpd_s64_f64 (__a[0])};
14928 }
14929
14930 __extension__ extern __inline uint64x1_t
14931 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14932 vcvtp_u64_f64 (float64x1_t __a)
14933 {
14934 return (uint64x1_t) {vcvtpd_u64_f64 (__a[0])};
14935 }
14936
14937 __extension__ extern __inline int64x2_t
14938 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14939 vcvtpq_s64_f64 (float64x2_t __a)
14940 {
14941 return __builtin_aarch64_lceilv2dfv2di (__a);
14942 }
14943
14944 __extension__ extern __inline uint64x2_t
14945 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14946 vcvtpq_u64_f64 (float64x2_t __a)
14947 {
14948 return __builtin_aarch64_lceiluv2dfv2di_us (__a);
14949 }
14950
14951 /* vdup_n */
14952
14953 __extension__ extern __inline float16x4_t
14954 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14955 vdup_n_f16 (float16_t __a)
14956 {
14957 return (float16x4_t) {__a, __a, __a, __a};
14958 }
14959
14960 __extension__ extern __inline float32x2_t
14961 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14962 vdup_n_f32 (float32_t __a)
14963 {
14964 return (float32x2_t) {__a, __a};
14965 }
14966
14967 __extension__ extern __inline float64x1_t
14968 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14969 vdup_n_f64 (float64_t __a)
14970 {
14971 return (float64x1_t) {__a};
14972 }
14973
14974 __extension__ extern __inline poly8x8_t
14975 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14976 vdup_n_p8 (poly8_t __a)
14977 {
14978 return (poly8x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
14979 }
14980
14981 __extension__ extern __inline poly16x4_t
14982 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14983 vdup_n_p16 (poly16_t __a)
14984 {
14985 return (poly16x4_t) {__a, __a, __a, __a};
14986 }
14987
14988 __extension__ extern __inline poly64x1_t
14989 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14990 vdup_n_p64 (poly64_t __a)
14991 {
14992 return (poly64x1_t) {__a};
14993 }
14994
14995 __extension__ extern __inline int8x8_t
14996 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14997 vdup_n_s8 (int8_t __a)
14998 {
14999 return (int8x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
15000 }
15001
15002 __extension__ extern __inline int16x4_t
15003 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15004 vdup_n_s16 (int16_t __a)
15005 {
15006 return (int16x4_t) {__a, __a, __a, __a};
15007 }
15008
15009 __extension__ extern __inline int32x2_t
15010 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15011 vdup_n_s32 (int32_t __a)
15012 {
15013 return (int32x2_t) {__a, __a};
15014 }
15015
15016 __extension__ extern __inline int64x1_t
15017 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15018 vdup_n_s64 (int64_t __a)
15019 {
15020 return (int64x1_t) {__a};
15021 }
15022
15023 __extension__ extern __inline uint8x8_t
15024 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15025 vdup_n_u8 (uint8_t __a)
15026 {
15027 return (uint8x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
15028 }
15029
15030 __extension__ extern __inline uint16x4_t
15031 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15032 vdup_n_u16 (uint16_t __a)
15033 {
15034 return (uint16x4_t) {__a, __a, __a, __a};
15035 }
15036
15037 __extension__ extern __inline uint32x2_t
15038 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15039 vdup_n_u32 (uint32_t __a)
15040 {
15041 return (uint32x2_t) {__a, __a};
15042 }
15043
15044 __extension__ extern __inline uint64x1_t
15045 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15046 vdup_n_u64 (uint64_t __a)
15047 {
15048 return (uint64x1_t) {__a};
15049 }
15050
15051 /* vdupq_n */
15052
15053 __extension__ extern __inline float16x8_t
15054 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15055 vdupq_n_f16 (float16_t __a)
15056 {
15057 return (float16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
15058 }
15059
15060 __extension__ extern __inline float32x4_t
15061 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15062 vdupq_n_f32 (float32_t __a)
15063 {
15064 return (float32x4_t) {__a, __a, __a, __a};
15065 }
15066
15067 __extension__ extern __inline float64x2_t
15068 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15069 vdupq_n_f64 (float64_t __a)
15070 {
15071 return (float64x2_t) {__a, __a};
15072 }
15073
15074 __extension__ extern __inline poly8x16_t
15075 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15076 vdupq_n_p8 (poly8_t __a)
15077 {
15078 return (poly8x16_t) {__a, __a, __a, __a, __a, __a, __a, __a,
15079 __a, __a, __a, __a, __a, __a, __a, __a};
15080 }
15081
15082 __extension__ extern __inline poly16x8_t
15083 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15084 vdupq_n_p16 (poly16_t __a)
15085 {
15086 return (poly16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
15087 }
15088
15089 __extension__ extern __inline poly64x2_t
15090 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15091 vdupq_n_p64 (poly64_t __a)
15092 {
15093 return (poly64x2_t) {__a, __a};
15094 }
15095
15096 __extension__ extern __inline int8x16_t
15097 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15098 vdupq_n_s8 (int8_t __a)
15099 {
15100 return (int8x16_t) {__a, __a, __a, __a, __a, __a, __a, __a,
15101 __a, __a, __a, __a, __a, __a, __a, __a};
15102 }
15103
15104 __extension__ extern __inline int16x8_t
15105 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15106 vdupq_n_s16 (int16_t __a)
15107 {
15108 return (int16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
15109 }
15110
15111 __extension__ extern __inline int32x4_t
15112 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15113 vdupq_n_s32 (int32_t __a)
15114 {
15115 return (int32x4_t) {__a, __a, __a, __a};
15116 }
15117
15118 __extension__ extern __inline int64x2_t
15119 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15120 vdupq_n_s64 (int64_t __a)
15121 {
15122 return (int64x2_t) {__a, __a};
15123 }
15124
15125 __extension__ extern __inline uint8x16_t
15126 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15127 vdupq_n_u8 (uint8_t __a)
15128 {
15129 return (uint8x16_t) {__a, __a, __a, __a, __a, __a, __a, __a,
15130 __a, __a, __a, __a, __a, __a, __a, __a};
15131 }
15132
15133 __extension__ extern __inline uint16x8_t
15134 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15135 vdupq_n_u16 (uint16_t __a)
15136 {
15137 return (uint16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
15138 }
15139
15140 __extension__ extern __inline uint32x4_t
15141 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15142 vdupq_n_u32 (uint32_t __a)
15143 {
15144 return (uint32x4_t) {__a, __a, __a, __a};
15145 }
15146
15147 __extension__ extern __inline uint64x2_t
15148 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15149 vdupq_n_u64 (uint64_t __a)
15150 {
15151 return (uint64x2_t) {__a, __a};
15152 }
15153
15154 /* vdup_lane */
15155
15156 __extension__ extern __inline float16x4_t
15157 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15158 vdup_lane_f16 (float16x4_t __a, const int __b)
15159 {
15160 return __aarch64_vdup_lane_f16 (__a, __b);
15161 }
15162
15163 __extension__ extern __inline float32x2_t
15164 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15165 vdup_lane_f32 (float32x2_t __a, const int __b)
15166 {
15167 return __aarch64_vdup_lane_f32 (__a, __b);
15168 }
15169
15170 __extension__ extern __inline float64x1_t
15171 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15172 vdup_lane_f64 (float64x1_t __a, const int __b)
15173 {
15174 return __aarch64_vdup_lane_f64 (__a, __b);
15175 }
15176
15177 __extension__ extern __inline poly8x8_t
15178 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15179 vdup_lane_p8 (poly8x8_t __a, const int __b)
15180 {
15181 return __aarch64_vdup_lane_p8 (__a, __b);
15182 }
15183
15184 __extension__ extern __inline poly16x4_t
15185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15186 vdup_lane_p16 (poly16x4_t __a, const int __b)
15187 {
15188 return __aarch64_vdup_lane_p16 (__a, __b);
15189 }
15190
15191 __extension__ extern __inline poly64x1_t
15192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15193 vdup_lane_p64 (poly64x1_t __a, const int __b)
15194 {
15195 return __aarch64_vdup_lane_p64 (__a, __b);
15196 }
15197
15198 __extension__ extern __inline int8x8_t
15199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15200 vdup_lane_s8 (int8x8_t __a, const int __b)
15201 {
15202 return __aarch64_vdup_lane_s8 (__a, __b);
15203 }
15204
15205 __extension__ extern __inline int16x4_t
15206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15207 vdup_lane_s16 (int16x4_t __a, const int __b)
15208 {
15209 return __aarch64_vdup_lane_s16 (__a, __b);
15210 }
15211
15212 __extension__ extern __inline int32x2_t
15213 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15214 vdup_lane_s32 (int32x2_t __a, const int __b)
15215 {
15216 return __aarch64_vdup_lane_s32 (__a, __b);
15217 }
15218
15219 __extension__ extern __inline int64x1_t
15220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15221 vdup_lane_s64 (int64x1_t __a, const int __b)
15222 {
15223 return __aarch64_vdup_lane_s64 (__a, __b);
15224 }
15225
15226 __extension__ extern __inline uint8x8_t
15227 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15228 vdup_lane_u8 (uint8x8_t __a, const int __b)
15229 {
15230 return __aarch64_vdup_lane_u8 (__a, __b);
15231 }
15232
15233 __extension__ extern __inline uint16x4_t
15234 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15235 vdup_lane_u16 (uint16x4_t __a, const int __b)
15236 {
15237 return __aarch64_vdup_lane_u16 (__a, __b);
15238 }
15239
15240 __extension__ extern __inline uint32x2_t
15241 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15242 vdup_lane_u32 (uint32x2_t __a, const int __b)
15243 {
15244 return __aarch64_vdup_lane_u32 (__a, __b);
15245 }
15246
15247 __extension__ extern __inline uint64x1_t
15248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15249 vdup_lane_u64 (uint64x1_t __a, const int __b)
15250 {
15251 return __aarch64_vdup_lane_u64 (__a, __b);
15252 }
15253
15254 /* vdup_laneq */
15255
15256 __extension__ extern __inline float16x4_t
15257 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15258 vdup_laneq_f16 (float16x8_t __a, const int __b)
15259 {
15260 return __aarch64_vdup_laneq_f16 (__a, __b);
15261 }
15262
15263 __extension__ extern __inline float32x2_t
15264 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15265 vdup_laneq_f32 (float32x4_t __a, const int __b)
15266 {
15267 return __aarch64_vdup_laneq_f32 (__a, __b);
15268 }
15269
15270 __extension__ extern __inline float64x1_t
15271 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15272 vdup_laneq_f64 (float64x2_t __a, const int __b)
15273 {
15274 return __aarch64_vdup_laneq_f64 (__a, __b);
15275 }
15276
15277 __extension__ extern __inline poly8x8_t
15278 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15279 vdup_laneq_p8 (poly8x16_t __a, const int __b)
15280 {
15281 return __aarch64_vdup_laneq_p8 (__a, __b);
15282 }
15283
15284 __extension__ extern __inline poly16x4_t
15285 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15286 vdup_laneq_p16 (poly16x8_t __a, const int __b)
15287 {
15288 return __aarch64_vdup_laneq_p16 (__a, __b);
15289 }
15290
15291 __extension__ extern __inline poly64x1_t
15292 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15293 vdup_laneq_p64 (poly64x2_t __a, const int __b)
15294 {
15295 return __aarch64_vdup_laneq_p64 (__a, __b);
15296 }
15297
15298 __extension__ extern __inline int8x8_t
15299 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15300 vdup_laneq_s8 (int8x16_t __a, const int __b)
15301 {
15302 return __aarch64_vdup_laneq_s8 (__a, __b);
15303 }
15304
15305 __extension__ extern __inline int16x4_t
15306 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15307 vdup_laneq_s16 (int16x8_t __a, const int __b)
15308 {
15309 return __aarch64_vdup_laneq_s16 (__a, __b);
15310 }
15311
15312 __extension__ extern __inline int32x2_t
15313 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15314 vdup_laneq_s32 (int32x4_t __a, const int __b)
15315 {
15316 return __aarch64_vdup_laneq_s32 (__a, __b);
15317 }
15318
15319 __extension__ extern __inline int64x1_t
15320 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15321 vdup_laneq_s64 (int64x2_t __a, const int __b)
15322 {
15323 return __aarch64_vdup_laneq_s64 (__a, __b);
15324 }
15325
15326 __extension__ extern __inline uint8x8_t
15327 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15328 vdup_laneq_u8 (uint8x16_t __a, const int __b)
15329 {
15330 return __aarch64_vdup_laneq_u8 (__a, __b);
15331 }
15332
15333 __extension__ extern __inline uint16x4_t
15334 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15335 vdup_laneq_u16 (uint16x8_t __a, const int __b)
15336 {
15337 return __aarch64_vdup_laneq_u16 (__a, __b);
15338 }
15339
15340 __extension__ extern __inline uint32x2_t
15341 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15342 vdup_laneq_u32 (uint32x4_t __a, const int __b)
15343 {
15344 return __aarch64_vdup_laneq_u32 (__a, __b);
15345 }
15346
15347 __extension__ extern __inline uint64x1_t
15348 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15349 vdup_laneq_u64 (uint64x2_t __a, const int __b)
15350 {
15351 return __aarch64_vdup_laneq_u64 (__a, __b);
15352 }
15353
15354 /* vdupq_lane */
15355
15356 __extension__ extern __inline float16x8_t
15357 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15358 vdupq_lane_f16 (float16x4_t __a, const int __b)
15359 {
15360 return __aarch64_vdupq_lane_f16 (__a, __b);
15361 }
15362
15363 __extension__ extern __inline float32x4_t
15364 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15365 vdupq_lane_f32 (float32x2_t __a, const int __b)
15366 {
15367 return __aarch64_vdupq_lane_f32 (__a, __b);
15368 }
15369
15370 __extension__ extern __inline float64x2_t
15371 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15372 vdupq_lane_f64 (float64x1_t __a, const int __b)
15373 {
15374 return __aarch64_vdupq_lane_f64 (__a, __b);
15375 }
15376
15377 __extension__ extern __inline poly8x16_t
15378 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15379 vdupq_lane_p8 (poly8x8_t __a, const int __b)
15380 {
15381 return __aarch64_vdupq_lane_p8 (__a, __b);
15382 }
15383
15384 __extension__ extern __inline poly16x8_t
15385 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15386 vdupq_lane_p16 (poly16x4_t __a, const int __b)
15387 {
15388 return __aarch64_vdupq_lane_p16 (__a, __b);
15389 }
15390
15391 __extension__ extern __inline poly64x2_t
15392 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15393 vdupq_lane_p64 (poly64x1_t __a, const int __b)
15394 {
15395 return __aarch64_vdupq_lane_p64 (__a, __b);
15396 }
15397
15398 __extension__ extern __inline int8x16_t
15399 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15400 vdupq_lane_s8 (int8x8_t __a, const int __b)
15401 {
15402 return __aarch64_vdupq_lane_s8 (__a, __b);
15403 }
15404
15405 __extension__ extern __inline int16x8_t
15406 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15407 vdupq_lane_s16 (int16x4_t __a, const int __b)
15408 {
15409 return __aarch64_vdupq_lane_s16 (__a, __b);
15410 }
15411
15412 __extension__ extern __inline int32x4_t
15413 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15414 vdupq_lane_s32 (int32x2_t __a, const int __b)
15415 {
15416 return __aarch64_vdupq_lane_s32 (__a, __b);
15417 }
15418
15419 __extension__ extern __inline int64x2_t
15420 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15421 vdupq_lane_s64 (int64x1_t __a, const int __b)
15422 {
15423 return __aarch64_vdupq_lane_s64 (__a, __b);
15424 }
15425
15426 __extension__ extern __inline uint8x16_t
15427 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15428 vdupq_lane_u8 (uint8x8_t __a, const int __b)
15429 {
15430 return __aarch64_vdupq_lane_u8 (__a, __b);
15431 }
15432
15433 __extension__ extern __inline uint16x8_t
15434 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15435 vdupq_lane_u16 (uint16x4_t __a, const int __b)
15436 {
15437 return __aarch64_vdupq_lane_u16 (__a, __b);
15438 }
15439
15440 __extension__ extern __inline uint32x4_t
15441 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15442 vdupq_lane_u32 (uint32x2_t __a, const int __b)
15443 {
15444 return __aarch64_vdupq_lane_u32 (__a, __b);
15445 }
15446
15447 __extension__ extern __inline uint64x2_t
15448 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15449 vdupq_lane_u64 (uint64x1_t __a, const int __b)
15450 {
15451 return __aarch64_vdupq_lane_u64 (__a, __b);
15452 }
15453
15454 /* vdupq_laneq */
15455
15456 __extension__ extern __inline float16x8_t
15457 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15458 vdupq_laneq_f16 (float16x8_t __a, const int __b)
15459 {
15460 return __aarch64_vdupq_laneq_f16 (__a, __b);
15461 }
15462
15463 __extension__ extern __inline float32x4_t
15464 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15465 vdupq_laneq_f32 (float32x4_t __a, const int __b)
15466 {
15467 return __aarch64_vdupq_laneq_f32 (__a, __b);
15468 }
15469
15470 __extension__ extern __inline float64x2_t
15471 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15472 vdupq_laneq_f64 (float64x2_t __a, const int __b)
15473 {
15474 return __aarch64_vdupq_laneq_f64 (__a, __b);
15475 }
15476
15477 __extension__ extern __inline poly8x16_t
15478 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15479 vdupq_laneq_p8 (poly8x16_t __a, const int __b)
15480 {
15481 return __aarch64_vdupq_laneq_p8 (__a, __b);
15482 }
15483
15484 __extension__ extern __inline poly16x8_t
15485 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15486 vdupq_laneq_p16 (poly16x8_t __a, const int __b)
15487 {
15488 return __aarch64_vdupq_laneq_p16 (__a, __b);
15489 }
15490
15491 __extension__ extern __inline poly64x2_t
15492 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15493 vdupq_laneq_p64 (poly64x2_t __a, const int __b)
15494 {
15495 return __aarch64_vdupq_laneq_p64 (__a, __b);
15496 }
15497
15498 __extension__ extern __inline int8x16_t
15499 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15500 vdupq_laneq_s8 (int8x16_t __a, const int __b)
15501 {
15502 return __aarch64_vdupq_laneq_s8 (__a, __b);
15503 }
15504
15505 __extension__ extern __inline int16x8_t
15506 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15507 vdupq_laneq_s16 (int16x8_t __a, const int __b)
15508 {
15509 return __aarch64_vdupq_laneq_s16 (__a, __b);
15510 }
15511
15512 __extension__ extern __inline int32x4_t
15513 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15514 vdupq_laneq_s32 (int32x4_t __a, const int __b)
15515 {
15516 return __aarch64_vdupq_laneq_s32 (__a, __b);
15517 }
15518
15519 __extension__ extern __inline int64x2_t
15520 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15521 vdupq_laneq_s64 (int64x2_t __a, const int __b)
15522 {
15523 return __aarch64_vdupq_laneq_s64 (__a, __b);
15524 }
15525
15526 __extension__ extern __inline uint8x16_t
15527 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15528 vdupq_laneq_u8 (uint8x16_t __a, const int __b)
15529 {
15530 return __aarch64_vdupq_laneq_u8 (__a, __b);
15531 }
15532
15533 __extension__ extern __inline uint16x8_t
15534 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15535 vdupq_laneq_u16 (uint16x8_t __a, const int __b)
15536 {
15537 return __aarch64_vdupq_laneq_u16 (__a, __b);
15538 }
15539
15540 __extension__ extern __inline uint32x4_t
15541 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15542 vdupq_laneq_u32 (uint32x4_t __a, const int __b)
15543 {
15544 return __aarch64_vdupq_laneq_u32 (__a, __b);
15545 }
15546
15547 __extension__ extern __inline uint64x2_t
15548 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15549 vdupq_laneq_u64 (uint64x2_t __a, const int __b)
15550 {
15551 return __aarch64_vdupq_laneq_u64 (__a, __b);
15552 }
15553
15554 /* vdupb_lane */
15555 __extension__ extern __inline poly8_t
15556 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15557 vdupb_lane_p8 (poly8x8_t __a, const int __b)
15558 {
15559 return __aarch64_vget_lane_any (__a, __b);
15560 }
15561
15562 __extension__ extern __inline int8_t
15563 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15564 vdupb_lane_s8 (int8x8_t __a, const int __b)
15565 {
15566 return __aarch64_vget_lane_any (__a, __b);
15567 }
15568
15569 __extension__ extern __inline uint8_t
15570 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15571 vdupb_lane_u8 (uint8x8_t __a, const int __b)
15572 {
15573 return __aarch64_vget_lane_any (__a, __b);
15574 }
15575
15576 /* vduph_lane */
15577
15578 __extension__ extern __inline float16_t
15579 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15580 vduph_lane_f16 (float16x4_t __a, const int __b)
15581 {
15582 return __aarch64_vget_lane_any (__a, __b);
15583 }
15584
15585 __extension__ extern __inline poly16_t
15586 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15587 vduph_lane_p16 (poly16x4_t __a, const int __b)
15588 {
15589 return __aarch64_vget_lane_any (__a, __b);
15590 }
15591
15592 __extension__ extern __inline int16_t
15593 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15594 vduph_lane_s16 (int16x4_t __a, const int __b)
15595 {
15596 return __aarch64_vget_lane_any (__a, __b);
15597 }
15598
15599 __extension__ extern __inline uint16_t
15600 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15601 vduph_lane_u16 (uint16x4_t __a, const int __b)
15602 {
15603 return __aarch64_vget_lane_any (__a, __b);
15604 }
15605
15606 /* vdups_lane */
15607
15608 __extension__ extern __inline float32_t
15609 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15610 vdups_lane_f32 (float32x2_t __a, const int __b)
15611 {
15612 return __aarch64_vget_lane_any (__a, __b);
15613 }
15614
15615 __extension__ extern __inline int32_t
15616 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15617 vdups_lane_s32 (int32x2_t __a, const int __b)
15618 {
15619 return __aarch64_vget_lane_any (__a, __b);
15620 }
15621
15622 __extension__ extern __inline uint32_t
15623 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15624 vdups_lane_u32 (uint32x2_t __a, const int __b)
15625 {
15626 return __aarch64_vget_lane_any (__a, __b);
15627 }
15628
15629 /* vdupd_lane */
15630 __extension__ extern __inline float64_t
15631 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15632 vdupd_lane_f64 (float64x1_t __a, const int __b)
15633 {
15634 __AARCH64_LANE_CHECK (__a, __b);
15635 return __a[0];
15636 }
15637
15638 __extension__ extern __inline int64_t
15639 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15640 vdupd_lane_s64 (int64x1_t __a, const int __b)
15641 {
15642 __AARCH64_LANE_CHECK (__a, __b);
15643 return __a[0];
15644 }
15645
15646 __extension__ extern __inline uint64_t
15647 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15648 vdupd_lane_u64 (uint64x1_t __a, const int __b)
15649 {
15650 __AARCH64_LANE_CHECK (__a, __b);
15651 return __a[0];
15652 }
15653
15654 /* vdupb_laneq */
15655 __extension__ extern __inline poly8_t
15656 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15657 vdupb_laneq_p8 (poly8x16_t __a, const int __b)
15658 {
15659 return __aarch64_vget_lane_any (__a, __b);
15660 }
15661
15662 __extension__ extern __inline int8_t
15663 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15664 vdupb_laneq_s8 (int8x16_t __a, const int __b)
15665 {
15666 return __aarch64_vget_lane_any (__a, __b);
15667 }
15668
15669 __extension__ extern __inline uint8_t
15670 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15671 vdupb_laneq_u8 (uint8x16_t __a, const int __b)
15672 {
15673 return __aarch64_vget_lane_any (__a, __b);
15674 }
15675
15676 /* vduph_laneq */
15677
15678 __extension__ extern __inline float16_t
15679 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15680 vduph_laneq_f16 (float16x8_t __a, const int __b)
15681 {
15682 return __aarch64_vget_lane_any (__a, __b);
15683 }
15684
15685 __extension__ extern __inline poly16_t
15686 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15687 vduph_laneq_p16 (poly16x8_t __a, const int __b)
15688 {
15689 return __aarch64_vget_lane_any (__a, __b);
15690 }
15691
15692 __extension__ extern __inline int16_t
15693 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15694 vduph_laneq_s16 (int16x8_t __a, const int __b)
15695 {
15696 return __aarch64_vget_lane_any (__a, __b);
15697 }
15698
15699 __extension__ extern __inline uint16_t
15700 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15701 vduph_laneq_u16 (uint16x8_t __a, const int __b)
15702 {
15703 return __aarch64_vget_lane_any (__a, __b);
15704 }
15705
15706 /* vdups_laneq */
15707
15708 __extension__ extern __inline float32_t
15709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15710 vdups_laneq_f32 (float32x4_t __a, const int __b)
15711 {
15712 return __aarch64_vget_lane_any (__a, __b);
15713 }
15714
15715 __extension__ extern __inline int32_t
15716 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15717 vdups_laneq_s32 (int32x4_t __a, const int __b)
15718 {
15719 return __aarch64_vget_lane_any (__a, __b);
15720 }
15721
15722 __extension__ extern __inline uint32_t
15723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15724 vdups_laneq_u32 (uint32x4_t __a, const int __b)
15725 {
15726 return __aarch64_vget_lane_any (__a, __b);
15727 }
15728
15729 /* vdupd_laneq */
15730 __extension__ extern __inline float64_t
15731 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15732 vdupd_laneq_f64 (float64x2_t __a, const int __b)
15733 {
15734 return __aarch64_vget_lane_any (__a, __b);
15735 }
15736
15737 __extension__ extern __inline int64_t
15738 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15739 vdupd_laneq_s64 (int64x2_t __a, const int __b)
15740 {
15741 return __aarch64_vget_lane_any (__a, __b);
15742 }
15743
15744 __extension__ extern __inline uint64_t
15745 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15746 vdupd_laneq_u64 (uint64x2_t __a, const int __b)
15747 {
15748 return __aarch64_vget_lane_any (__a, __b);
15749 }
15750
15751 /* vext */
15752
15753 __extension__ extern __inline float16x4_t
15754 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15755 vext_f16 (float16x4_t __a, float16x4_t __b, __const int __c)
15756 {
15757 __AARCH64_LANE_CHECK (__a, __c);
15758 #ifdef __AARCH64EB__
15759 return __builtin_shuffle (__b, __a,
15760 (uint16x4_t) {4 - __c, 5 - __c, 6 - __c, 7 - __c});
15761 #else
15762 return __builtin_shuffle (__a, __b,
15763 (uint16x4_t) {__c, __c + 1, __c + 2, __c + 3});
15764 #endif
15765 }
15766
15767 __extension__ extern __inline float32x2_t
15768 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15769 vext_f32 (float32x2_t __a, float32x2_t __b, __const int __c)
15770 {
15771 __AARCH64_LANE_CHECK (__a, __c);
15772 #ifdef __AARCH64EB__
15773 return __builtin_shuffle (__b, __a, (uint32x2_t) {2-__c, 3-__c});
15774 #else
15775 return __builtin_shuffle (__a, __b, (uint32x2_t) {__c, __c+1});
15776 #endif
15777 }
15778
15779 __extension__ extern __inline float64x1_t
15780 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15781 vext_f64 (float64x1_t __a, float64x1_t __b, __const int __c)
15782 {
15783 __AARCH64_LANE_CHECK (__a, __c);
15784 /* The only possible index to the assembler instruction returns element 0. */
15785 return __a;
15786 }
15787 __extension__ extern __inline poly8x8_t
15788 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15789 vext_p8 (poly8x8_t __a, poly8x8_t __b, __const int __c)
15790 {
15791 __AARCH64_LANE_CHECK (__a, __c);
15792 #ifdef __AARCH64EB__
15793 return __builtin_shuffle (__b, __a, (uint8x8_t)
15794 {8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c});
15795 #else
15796 return __builtin_shuffle (__a, __b,
15797 (uint8x8_t) {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7});
15798 #endif
15799 }
15800
15801 __extension__ extern __inline poly16x4_t
15802 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15803 vext_p16 (poly16x4_t __a, poly16x4_t __b, __const int __c)
15804 {
15805 __AARCH64_LANE_CHECK (__a, __c);
15806 #ifdef __AARCH64EB__
15807 return __builtin_shuffle (__b, __a,
15808 (uint16x4_t) {4-__c, 5-__c, 6-__c, 7-__c});
15809 #else
15810 return __builtin_shuffle (__a, __b, (uint16x4_t) {__c, __c+1, __c+2, __c+3});
15811 #endif
15812 }
15813
15814 __extension__ extern __inline poly64x1_t
15815 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15816 vext_p64 (poly64x1_t __a, poly64x1_t __b, __const int __c)
15817 {
15818 __AARCH64_LANE_CHECK (__a, __c);
15819 /* The only possible index to the assembler instruction returns element 0. */
15820 return __a;
15821 }
15822
15823 __extension__ extern __inline int8x8_t
15824 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15825 vext_s8 (int8x8_t __a, int8x8_t __b, __const int __c)
15826 {
15827 __AARCH64_LANE_CHECK (__a, __c);
15828 #ifdef __AARCH64EB__
15829 return __builtin_shuffle (__b, __a, (uint8x8_t)
15830 {8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c});
15831 #else
15832 return __builtin_shuffle (__a, __b,
15833 (uint8x8_t) {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7});
15834 #endif
15835 }
15836
15837 __extension__ extern __inline int16x4_t
15838 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15839 vext_s16 (int16x4_t __a, int16x4_t __b, __const int __c)
15840 {
15841 __AARCH64_LANE_CHECK (__a, __c);
15842 #ifdef __AARCH64EB__
15843 return __builtin_shuffle (__b, __a,
15844 (uint16x4_t) {4-__c, 5-__c, 6-__c, 7-__c});
15845 #else
15846 return __builtin_shuffle (__a, __b, (uint16x4_t) {__c, __c+1, __c+2, __c+3});
15847 #endif
15848 }
15849
15850 __extension__ extern __inline int32x2_t
15851 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15852 vext_s32 (int32x2_t __a, int32x2_t __b, __const int __c)
15853 {
15854 __AARCH64_LANE_CHECK (__a, __c);
15855 #ifdef __AARCH64EB__
15856 return __builtin_shuffle (__b, __a, (uint32x2_t) {2-__c, 3-__c});
15857 #else
15858 return __builtin_shuffle (__a, __b, (uint32x2_t) {__c, __c+1});
15859 #endif
15860 }
15861
15862 __extension__ extern __inline int64x1_t
15863 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15864 vext_s64 (int64x1_t __a, int64x1_t __b, __const int __c)
15865 {
15866 __AARCH64_LANE_CHECK (__a, __c);
15867 /* The only possible index to the assembler instruction returns element 0. */
15868 return __a;
15869 }
15870
15871 __extension__ extern __inline uint8x8_t
15872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15873 vext_u8 (uint8x8_t __a, uint8x8_t __b, __const int __c)
15874 {
15875 __AARCH64_LANE_CHECK (__a, __c);
15876 #ifdef __AARCH64EB__
15877 return __builtin_shuffle (__b, __a, (uint8x8_t)
15878 {8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c});
15879 #else
15880 return __builtin_shuffle (__a, __b,
15881 (uint8x8_t) {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7});
15882 #endif
15883 }
15884
15885 __extension__ extern __inline uint16x4_t
15886 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15887 vext_u16 (uint16x4_t __a, uint16x4_t __b, __const int __c)
15888 {
15889 __AARCH64_LANE_CHECK (__a, __c);
15890 #ifdef __AARCH64EB__
15891 return __builtin_shuffle (__b, __a,
15892 (uint16x4_t) {4-__c, 5-__c, 6-__c, 7-__c});
15893 #else
15894 return __builtin_shuffle (__a, __b, (uint16x4_t) {__c, __c+1, __c+2, __c+3});
15895 #endif
15896 }
15897
15898 __extension__ extern __inline uint32x2_t
15899 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15900 vext_u32 (uint32x2_t __a, uint32x2_t __b, __const int __c)
15901 {
15902 __AARCH64_LANE_CHECK (__a, __c);
15903 #ifdef __AARCH64EB__
15904 return __builtin_shuffle (__b, __a, (uint32x2_t) {2-__c, 3-__c});
15905 #else
15906 return __builtin_shuffle (__a, __b, (uint32x2_t) {__c, __c+1});
15907 #endif
15908 }
15909
15910 __extension__ extern __inline uint64x1_t
15911 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15912 vext_u64 (uint64x1_t __a, uint64x1_t __b, __const int __c)
15913 {
15914 __AARCH64_LANE_CHECK (__a, __c);
15915 /* The only possible index to the assembler instruction returns element 0. */
15916 return __a;
15917 }
15918
15919 __extension__ extern __inline float16x8_t
15920 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15921 vextq_f16 (float16x8_t __a, float16x8_t __b, __const int __c)
15922 {
15923 __AARCH64_LANE_CHECK (__a, __c);
15924 #ifdef __AARCH64EB__
15925 return __builtin_shuffle (__b, __a,
15926 (uint16x8_t) {8 - __c, 9 - __c, 10 - __c, 11 - __c,
15927 12 - __c, 13 - __c, 14 - __c,
15928 15 - __c});
15929 #else
15930 return __builtin_shuffle (__a, __b,
15931 (uint16x8_t) {__c, __c + 1, __c + 2, __c + 3,
15932 __c + 4, __c + 5, __c + 6, __c + 7});
15933 #endif
15934 }
15935
15936 __extension__ extern __inline float32x4_t
15937 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15938 vextq_f32 (float32x4_t __a, float32x4_t __b, __const int __c)
15939 {
15940 __AARCH64_LANE_CHECK (__a, __c);
15941 #ifdef __AARCH64EB__
15942 return __builtin_shuffle (__b, __a,
15943 (uint32x4_t) {4-__c, 5-__c, 6-__c, 7-__c});
15944 #else
15945 return __builtin_shuffle (__a, __b, (uint32x4_t) {__c, __c+1, __c+2, __c+3});
15946 #endif
15947 }
15948
15949 __extension__ extern __inline float64x2_t
15950 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15951 vextq_f64 (float64x2_t __a, float64x2_t __b, __const int __c)
15952 {
15953 __AARCH64_LANE_CHECK (__a, __c);
15954 #ifdef __AARCH64EB__
15955 return __builtin_shuffle (__b, __a, (uint64x2_t) {2-__c, 3-__c});
15956 #else
15957 return __builtin_shuffle (__a, __b, (uint64x2_t) {__c, __c+1});
15958 #endif
15959 }
15960
15961 __extension__ extern __inline poly8x16_t
15962 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15963 vextq_p8 (poly8x16_t __a, poly8x16_t __b, __const int __c)
15964 {
15965 __AARCH64_LANE_CHECK (__a, __c);
15966 #ifdef __AARCH64EB__
15967 return __builtin_shuffle (__b, __a, (uint8x16_t)
15968 {16-__c, 17-__c, 18-__c, 19-__c, 20-__c, 21-__c, 22-__c, 23-__c,
15969 24-__c, 25-__c, 26-__c, 27-__c, 28-__c, 29-__c, 30-__c, 31-__c});
15970 #else
15971 return __builtin_shuffle (__a, __b, (uint8x16_t)
15972 {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
15973 __c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15});
15974 #endif
15975 }
15976
15977 __extension__ extern __inline poly16x8_t
15978 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15979 vextq_p16 (poly16x8_t __a, poly16x8_t __b, __const int __c)
15980 {
15981 __AARCH64_LANE_CHECK (__a, __c);
15982 #ifdef __AARCH64EB__
15983 return __builtin_shuffle (__b, __a, (uint16x8_t)
15984 {8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c});
15985 #else
15986 return __builtin_shuffle (__a, __b,
15987 (uint16x8_t) {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7});
15988 #endif
15989 }
15990
15991 __extension__ extern __inline poly64x2_t
15992 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
15993 vextq_p64 (poly64x2_t __a, poly64x2_t __b, __const int __c)
15994 {
15995 __AARCH64_LANE_CHECK (__a, __c);
15996 #ifdef __AARCH64EB__
15997 return __builtin_shuffle (__b, __a, (uint64x2_t) {2-__c, 3-__c});
15998 #else
15999 return __builtin_shuffle (__a, __b, (uint64x2_t) {__c, __c+1});
16000 #endif
16001 }
16002
16003 __extension__ extern __inline int8x16_t
16004 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16005 vextq_s8 (int8x16_t __a, int8x16_t __b, __const int __c)
16006 {
16007 __AARCH64_LANE_CHECK (__a, __c);
16008 #ifdef __AARCH64EB__
16009 return __builtin_shuffle (__b, __a, (uint8x16_t)
16010 {16-__c, 17-__c, 18-__c, 19-__c, 20-__c, 21-__c, 22-__c, 23-__c,
16011 24-__c, 25-__c, 26-__c, 27-__c, 28-__c, 29-__c, 30-__c, 31-__c});
16012 #else
16013 return __builtin_shuffle (__a, __b, (uint8x16_t)
16014 {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
16015 __c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15});
16016 #endif
16017 }
16018
16019 __extension__ extern __inline int16x8_t
16020 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16021 vextq_s16 (int16x8_t __a, int16x8_t __b, __const int __c)
16022 {
16023 __AARCH64_LANE_CHECK (__a, __c);
16024 #ifdef __AARCH64EB__
16025 return __builtin_shuffle (__b, __a, (uint16x8_t)
16026 {8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c});
16027 #else
16028 return __builtin_shuffle (__a, __b,
16029 (uint16x8_t) {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7});
16030 #endif
16031 }
16032
16033 __extension__ extern __inline int32x4_t
16034 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16035 vextq_s32 (int32x4_t __a, int32x4_t __b, __const int __c)
16036 {
16037 __AARCH64_LANE_CHECK (__a, __c);
16038 #ifdef __AARCH64EB__
16039 return __builtin_shuffle (__b, __a,
16040 (uint32x4_t) {4-__c, 5-__c, 6-__c, 7-__c});
16041 #else
16042 return __builtin_shuffle (__a, __b, (uint32x4_t) {__c, __c+1, __c+2, __c+3});
16043 #endif
16044 }
16045
16046 __extension__ extern __inline int64x2_t
16047 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16048 vextq_s64 (int64x2_t __a, int64x2_t __b, __const int __c)
16049 {
16050 __AARCH64_LANE_CHECK (__a, __c);
16051 #ifdef __AARCH64EB__
16052 return __builtin_shuffle (__b, __a, (uint64x2_t) {2-__c, 3-__c});
16053 #else
16054 return __builtin_shuffle (__a, __b, (uint64x2_t) {__c, __c+1});
16055 #endif
16056 }
16057
16058 __extension__ extern __inline uint8x16_t
16059 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16060 vextq_u8 (uint8x16_t __a, uint8x16_t __b, __const int __c)
16061 {
16062 __AARCH64_LANE_CHECK (__a, __c);
16063 #ifdef __AARCH64EB__
16064 return __builtin_shuffle (__b, __a, (uint8x16_t)
16065 {16-__c, 17-__c, 18-__c, 19-__c, 20-__c, 21-__c, 22-__c, 23-__c,
16066 24-__c, 25-__c, 26-__c, 27-__c, 28-__c, 29-__c, 30-__c, 31-__c});
16067 #else
16068 return __builtin_shuffle (__a, __b, (uint8x16_t)
16069 {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7,
16070 __c+8, __c+9, __c+10, __c+11, __c+12, __c+13, __c+14, __c+15});
16071 #endif
16072 }
16073
16074 __extension__ extern __inline uint16x8_t
16075 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16076 vextq_u16 (uint16x8_t __a, uint16x8_t __b, __const int __c)
16077 {
16078 __AARCH64_LANE_CHECK (__a, __c);
16079 #ifdef __AARCH64EB__
16080 return __builtin_shuffle (__b, __a, (uint16x8_t)
16081 {8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c});
16082 #else
16083 return __builtin_shuffle (__a, __b,
16084 (uint16x8_t) {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7});
16085 #endif
16086 }
16087
16088 __extension__ extern __inline uint32x4_t
16089 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16090 vextq_u32 (uint32x4_t __a, uint32x4_t __b, __const int __c)
16091 {
16092 __AARCH64_LANE_CHECK (__a, __c);
16093 #ifdef __AARCH64EB__
16094 return __builtin_shuffle (__b, __a,
16095 (uint32x4_t) {4-__c, 5-__c, 6-__c, 7-__c});
16096 #else
16097 return __builtin_shuffle (__a, __b, (uint32x4_t) {__c, __c+1, __c+2, __c+3});
16098 #endif
16099 }
16100
16101 __extension__ extern __inline uint64x2_t
16102 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16103 vextq_u64 (uint64x2_t __a, uint64x2_t __b, __const int __c)
16104 {
16105 __AARCH64_LANE_CHECK (__a, __c);
16106 #ifdef __AARCH64EB__
16107 return __builtin_shuffle (__b, __a, (uint64x2_t) {2-__c, 3-__c});
16108 #else
16109 return __builtin_shuffle (__a, __b, (uint64x2_t) {__c, __c+1});
16110 #endif
16111 }
16112
16113 /* vfma */
16114
16115 __extension__ extern __inline float64x1_t
16116 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16117 vfma_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c)
16118 {
16119 return (float64x1_t) {__builtin_fma (__b[0], __c[0], __a[0])};
16120 }
16121
16122 __extension__ extern __inline float32x2_t
16123 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16124 vfma_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
16125 {
16126 return __builtin_aarch64_fmav2sf (__b, __c, __a);
16127 }
16128
16129 __extension__ extern __inline float32x4_t
16130 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16131 vfmaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
16132 {
16133 return __builtin_aarch64_fmav4sf (__b, __c, __a);
16134 }
16135
16136 __extension__ extern __inline float64x2_t
16137 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16138 vfmaq_f64 (float64x2_t __a, float64x2_t __b, float64x2_t __c)
16139 {
16140 return __builtin_aarch64_fmav2df (__b, __c, __a);
16141 }
16142
16143 __extension__ extern __inline float32x2_t
16144 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16145 vfma_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
16146 {
16147 return __builtin_aarch64_fmav2sf (__b, vdup_n_f32 (__c), __a);
16148 }
16149
16150 __extension__ extern __inline float64x1_t
16151 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16152 vfma_n_f64 (float64x1_t __a, float64x1_t __b, float64_t __c)
16153 {
16154 return (float64x1_t) {__b[0] * __c + __a[0]};
16155 }
16156
16157 __extension__ extern __inline float32x4_t
16158 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16159 vfmaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
16160 {
16161 return __builtin_aarch64_fmav4sf (__b, vdupq_n_f32 (__c), __a);
16162 }
16163
16164 __extension__ extern __inline float64x2_t
16165 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16166 vfmaq_n_f64 (float64x2_t __a, float64x2_t __b, float64_t __c)
16167 {
16168 return __builtin_aarch64_fmav2df (__b, vdupq_n_f64 (__c), __a);
16169 }
16170
16171 /* vfma_lane */
16172
16173 __extension__ extern __inline float32x2_t
16174 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16175 vfma_lane_f32 (float32x2_t __a, float32x2_t __b,
16176 float32x2_t __c, const int __lane)
16177 {
16178 return __builtin_aarch64_fmav2sf (__b,
16179 __aarch64_vdup_lane_f32 (__c, __lane),
16180 __a);
16181 }
16182
16183 __extension__ extern __inline float64x1_t
16184 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16185 vfma_lane_f64 (float64x1_t __a, float64x1_t __b,
16186 float64x1_t __c, const int __lane)
16187 {
16188 return (float64x1_t) {__builtin_fma (__b[0], __c[0], __a[0])};
16189 }
16190
16191 __extension__ extern __inline float64_t
16192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16193 vfmad_lane_f64 (float64_t __a, float64_t __b,
16194 float64x1_t __c, const int __lane)
16195 {
16196 return __builtin_fma (__b, __c[0], __a);
16197 }
16198
16199 __extension__ extern __inline float32_t
16200 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16201 vfmas_lane_f32 (float32_t __a, float32_t __b,
16202 float32x2_t __c, const int __lane)
16203 {
16204 return __builtin_fmaf (__b, __aarch64_vget_lane_any (__c, __lane), __a);
16205 }
16206
16207 /* vfma_laneq */
16208
16209 __extension__ extern __inline float32x2_t
16210 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16211 vfma_laneq_f32 (float32x2_t __a, float32x2_t __b,
16212 float32x4_t __c, const int __lane)
16213 {
16214 return __builtin_aarch64_fmav2sf (__b,
16215 __aarch64_vdup_laneq_f32 (__c, __lane),
16216 __a);
16217 }
16218
16219 __extension__ extern __inline float64x1_t
16220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16221 vfma_laneq_f64 (float64x1_t __a, float64x1_t __b,
16222 float64x2_t __c, const int __lane)
16223 {
16224 float64_t __c0 = __aarch64_vget_lane_any (__c, __lane);
16225 return (float64x1_t) {__builtin_fma (__b[0], __c0, __a[0])};
16226 }
16227
16228 __extension__ extern __inline float64_t
16229 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16230 vfmad_laneq_f64 (float64_t __a, float64_t __b,
16231 float64x2_t __c, const int __lane)
16232 {
16233 return __builtin_fma (__b, __aarch64_vget_lane_any (__c, __lane), __a);
16234 }
16235
16236 __extension__ extern __inline float32_t
16237 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16238 vfmas_laneq_f32 (float32_t __a, float32_t __b,
16239 float32x4_t __c, const int __lane)
16240 {
16241 return __builtin_fmaf (__b, __aarch64_vget_lane_any (__c, __lane), __a);
16242 }
16243
16244 /* vfmaq_lane */
16245
16246 __extension__ extern __inline float32x4_t
16247 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16248 vfmaq_lane_f32 (float32x4_t __a, float32x4_t __b,
16249 float32x2_t __c, const int __lane)
16250 {
16251 return __builtin_aarch64_fmav4sf (__b,
16252 __aarch64_vdupq_lane_f32 (__c, __lane),
16253 __a);
16254 }
16255
16256 __extension__ extern __inline float64x2_t
16257 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16258 vfmaq_lane_f64 (float64x2_t __a, float64x2_t __b,
16259 float64x1_t __c, const int __lane)
16260 {
16261 return __builtin_aarch64_fmav2df (__b, vdupq_n_f64 (__c[0]), __a);
16262 }
16263
16264 /* vfmaq_laneq */
16265
16266 __extension__ extern __inline float32x4_t
16267 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16268 vfmaq_laneq_f32 (float32x4_t __a, float32x4_t __b,
16269 float32x4_t __c, const int __lane)
16270 {
16271 return __builtin_aarch64_fmav4sf (__b,
16272 __aarch64_vdupq_laneq_f32 (__c, __lane),
16273 __a);
16274 }
16275
16276 __extension__ extern __inline float64x2_t
16277 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16278 vfmaq_laneq_f64 (float64x2_t __a, float64x2_t __b,
16279 float64x2_t __c, const int __lane)
16280 {
16281 return __builtin_aarch64_fmav2df (__b,
16282 __aarch64_vdupq_laneq_f64 (__c, __lane),
16283 __a);
16284 }
16285
16286 /* vfms */
16287
16288 __extension__ extern __inline float64x1_t
16289 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16290 vfms_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c)
16291 {
16292 return (float64x1_t) {__builtin_fma (-__b[0], __c[0], __a[0])};
16293 }
16294
16295 __extension__ extern __inline float32x2_t
16296 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16297 vfms_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
16298 {
16299 return __builtin_aarch64_fmav2sf (-__b, __c, __a);
16300 }
16301
16302 __extension__ extern __inline float32x4_t
16303 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16304 vfmsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
16305 {
16306 return __builtin_aarch64_fmav4sf (-__b, __c, __a);
16307 }
16308
16309 __extension__ extern __inline float64x2_t
16310 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16311 vfmsq_f64 (float64x2_t __a, float64x2_t __b, float64x2_t __c)
16312 {
16313 return __builtin_aarch64_fmav2df (-__b, __c, __a);
16314 }
16315
16316 __extension__ extern __inline float32x2_t
16317 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16318 vfms_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
16319 {
16320 return __builtin_aarch64_fmav2sf (-__b, vdup_n_f32 (__c), __a);
16321 }
16322
16323 __extension__ extern __inline float64x1_t
16324 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16325 vfms_n_f64 (float64x1_t __a, float64x1_t __b, float64_t __c)
16326 {
16327 return (float64x1_t) {-__b[0] * __c + __a[0]};
16328 }
16329
16330 __extension__ extern __inline float32x4_t
16331 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16332 vfmsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
16333 {
16334 return __builtin_aarch64_fmav4sf (-__b, vdupq_n_f32 (__c), __a);
16335 }
16336
16337 __extension__ extern __inline float64x2_t
16338 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16339 vfmsq_n_f64 (float64x2_t __a, float64x2_t __b, float64_t __c)
16340 {
16341 return __builtin_aarch64_fmav2df (-__b, vdupq_n_f64 (__c), __a);
16342 }
16343
16344 /* vfms_lane */
16345
16346 __extension__ extern __inline float32x2_t
16347 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16348 vfms_lane_f32 (float32x2_t __a, float32x2_t __b,
16349 float32x2_t __c, const int __lane)
16350 {
16351 return __builtin_aarch64_fmav2sf (-__b,
16352 __aarch64_vdup_lane_f32 (__c, __lane),
16353 __a);
16354 }
16355
16356 __extension__ extern __inline float64x1_t
16357 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16358 vfms_lane_f64 (float64x1_t __a, float64x1_t __b,
16359 float64x1_t __c, const int __lane)
16360 {
16361 return (float64x1_t) {__builtin_fma (-__b[0], __c[0], __a[0])};
16362 }
16363
16364 __extension__ extern __inline float64_t
16365 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16366 vfmsd_lane_f64 (float64_t __a, float64_t __b,
16367 float64x1_t __c, const int __lane)
16368 {
16369 return __builtin_fma (-__b, __c[0], __a);
16370 }
16371
16372 __extension__ extern __inline float32_t
16373 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16374 vfmss_lane_f32 (float32_t __a, float32_t __b,
16375 float32x2_t __c, const int __lane)
16376 {
16377 return __builtin_fmaf (-__b, __aarch64_vget_lane_any (__c, __lane), __a);
16378 }
16379
16380 /* vfms_laneq */
16381
16382 __extension__ extern __inline float32x2_t
16383 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16384 vfms_laneq_f32 (float32x2_t __a, float32x2_t __b,
16385 float32x4_t __c, const int __lane)
16386 {
16387 return __builtin_aarch64_fmav2sf (-__b,
16388 __aarch64_vdup_laneq_f32 (__c, __lane),
16389 __a);
16390 }
16391
16392 __extension__ extern __inline float64x1_t
16393 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16394 vfms_laneq_f64 (float64x1_t __a, float64x1_t __b,
16395 float64x2_t __c, const int __lane)
16396 {
16397 float64_t __c0 = __aarch64_vget_lane_any (__c, __lane);
16398 return (float64x1_t) {__builtin_fma (-__b[0], __c0, __a[0])};
16399 }
16400
16401 __extension__ extern __inline float64_t
16402 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16403 vfmsd_laneq_f64 (float64_t __a, float64_t __b,
16404 float64x2_t __c, const int __lane)
16405 {
16406 return __builtin_fma (-__b, __aarch64_vget_lane_any (__c, __lane), __a);
16407 }
16408
16409 __extension__ extern __inline float32_t
16410 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16411 vfmss_laneq_f32 (float32_t __a, float32_t __b,
16412 float32x4_t __c, const int __lane)
16413 {
16414 return __builtin_fmaf (-__b, __aarch64_vget_lane_any (__c, __lane), __a);
16415 }
16416
16417 /* vfmsq_lane */
16418
16419 __extension__ extern __inline float32x4_t
16420 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16421 vfmsq_lane_f32 (float32x4_t __a, float32x4_t __b,
16422 float32x2_t __c, const int __lane)
16423 {
16424 return __builtin_aarch64_fmav4sf (-__b,
16425 __aarch64_vdupq_lane_f32 (__c, __lane),
16426 __a);
16427 }
16428
16429 __extension__ extern __inline float64x2_t
16430 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16431 vfmsq_lane_f64 (float64x2_t __a, float64x2_t __b,
16432 float64x1_t __c, const int __lane)
16433 {
16434 return __builtin_aarch64_fmav2df (-__b, vdupq_n_f64 (__c[0]), __a);
16435 }
16436
16437 /* vfmsq_laneq */
16438
16439 __extension__ extern __inline float32x4_t
16440 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16441 vfmsq_laneq_f32 (float32x4_t __a, float32x4_t __b,
16442 float32x4_t __c, const int __lane)
16443 {
16444 return __builtin_aarch64_fmav4sf (-__b,
16445 __aarch64_vdupq_laneq_f32 (__c, __lane),
16446 __a);
16447 }
16448
16449 __extension__ extern __inline float64x2_t
16450 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16451 vfmsq_laneq_f64 (float64x2_t __a, float64x2_t __b,
16452 float64x2_t __c, const int __lane)
16453 {
16454 return __builtin_aarch64_fmav2df (-__b,
16455 __aarch64_vdupq_laneq_f64 (__c, __lane),
16456 __a);
16457 }
16458
16459 /* vld1 */
16460
16461 __extension__ extern __inline float16x4_t
16462 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16463 vld1_f16 (const float16_t *__a)
16464 {
16465 return __builtin_aarch64_ld1v4hf (__a);
16466 }
16467
16468 __extension__ extern __inline float32x2_t
16469 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16470 vld1_f32 (const float32_t *__a)
16471 {
16472 return __builtin_aarch64_ld1v2sf ((const __builtin_aarch64_simd_sf *) __a);
16473 }
16474
16475 __extension__ extern __inline float64x1_t
16476 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16477 vld1_f64 (const float64_t *__a)
16478 {
16479 return (float64x1_t) {*__a};
16480 }
16481
16482 __extension__ extern __inline poly8x8_t
16483 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16484 vld1_p8 (const poly8_t *__a)
16485 {
16486 return (poly8x8_t)
16487 __builtin_aarch64_ld1v8qi ((const __builtin_aarch64_simd_qi *) __a);
16488 }
16489
16490 __extension__ extern __inline poly16x4_t
16491 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16492 vld1_p16 (const poly16_t *__a)
16493 {
16494 return (poly16x4_t)
16495 __builtin_aarch64_ld1v4hi ((const __builtin_aarch64_simd_hi *) __a);
16496 }
16497
16498 __extension__ extern __inline poly64x1_t
16499 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16500 vld1_p64 (const poly64_t *__a)
16501 {
16502 return (poly64x1_t) {*__a};
16503 }
16504
16505 __extension__ extern __inline int8x8_t
16506 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16507 vld1_s8 (const int8_t *__a)
16508 {
16509 return __builtin_aarch64_ld1v8qi ((const __builtin_aarch64_simd_qi *) __a);
16510 }
16511
16512 __extension__ extern __inline int16x4_t
16513 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16514 vld1_s16 (const int16_t *__a)
16515 {
16516 return __builtin_aarch64_ld1v4hi ((const __builtin_aarch64_simd_hi *) __a);
16517 }
16518
16519 __extension__ extern __inline int32x2_t
16520 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16521 vld1_s32 (const int32_t *__a)
16522 {
16523 return __builtin_aarch64_ld1v2si ((const __builtin_aarch64_simd_si *) __a);
16524 }
16525
16526 __extension__ extern __inline int64x1_t
16527 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16528 vld1_s64 (const int64_t *__a)
16529 {
16530 return (int64x1_t) {*__a};
16531 }
16532
16533 __extension__ extern __inline uint8x8_t
16534 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16535 vld1_u8 (const uint8_t *__a)
16536 {
16537 return (uint8x8_t)
16538 __builtin_aarch64_ld1v8qi ((const __builtin_aarch64_simd_qi *) __a);
16539 }
16540
16541 __extension__ extern __inline uint16x4_t
16542 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16543 vld1_u16 (const uint16_t *__a)
16544 {
16545 return (uint16x4_t)
16546 __builtin_aarch64_ld1v4hi ((const __builtin_aarch64_simd_hi *) __a);
16547 }
16548
16549 __extension__ extern __inline uint32x2_t
16550 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16551 vld1_u32 (const uint32_t *__a)
16552 {
16553 return (uint32x2_t)
16554 __builtin_aarch64_ld1v2si ((const __builtin_aarch64_simd_si *) __a);
16555 }
16556
16557 __extension__ extern __inline uint64x1_t
16558 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16559 vld1_u64 (const uint64_t *__a)
16560 {
16561 return (uint64x1_t) {*__a};
16562 }
16563
16564 /* vld1x3 */
16565
16566 __extension__ extern __inline uint8x8x3_t
16567 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16568 vld1_u8_x3 (const uint8_t *__a)
16569 {
16570 uint8x8x3_t __i;
16571 __builtin_aarch64_simd_ci __o;
16572 __o = (__builtin_aarch64_simd_ci)__builtin_aarch64_ld1x3v8qi ((const __builtin_aarch64_simd_qi *) __a);
16573 __i.val[0] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
16574 __i.val[1] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
16575 __i.val[2] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
16576 return __i;
16577 }
16578
16579 __extension__ extern __inline int8x8x3_t
16580 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16581 vld1_s8_x3 (const uint8_t *__a)
16582 {
16583 int8x8x3_t __i;
16584 __builtin_aarch64_simd_ci __o;
16585 __o = __builtin_aarch64_ld1x3v8qi ((const __builtin_aarch64_simd_qi *) __a);
16586 __i.val[0] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
16587 __i.val[1] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
16588 __i.val[2] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
16589 return __i;
16590 }
16591
16592 __extension__ extern __inline uint16x4x3_t
16593 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16594 vld1_u16_x3 (const uint16_t *__a)
16595 {
16596 uint16x4x3_t __i;
16597 __builtin_aarch64_simd_ci __o;
16598 __o = __builtin_aarch64_ld1x3v4hi ((const __builtin_aarch64_simd_hi *) __a);
16599 __i.val[0] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
16600 __i.val[1] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
16601 __i.val[2] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
16602 return __i;
16603 }
16604
16605 __extension__ extern __inline int16x4x3_t
16606 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16607 vld1_s16_x3 (const int16_t *__a)
16608 {
16609 int16x4x3_t __i;
16610 __builtin_aarch64_simd_ci __o;
16611 __o = __builtin_aarch64_ld1x3v4hi ((const __builtin_aarch64_simd_hi *) __a);
16612 __i.val[0] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
16613 __i.val[1] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
16614 __i.val[2] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
16615 return __i;
16616 }
16617
16618 __extension__ extern __inline uint32x2x3_t
16619 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16620 vld1_u32_x3 (const uint32_t *__a)
16621 {
16622 uint32x2x3_t __i;
16623 __builtin_aarch64_simd_ci __o;
16624 __o = __builtin_aarch64_ld1x3v2si ((const __builtin_aarch64_simd_si *) __a);
16625 __i.val[0] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 0);
16626 __i.val[1] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 1);
16627 __i.val[2] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 2);
16628 return __i;
16629 }
16630
16631 __extension__ extern __inline int32x2x3_t
16632 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16633 vld1_s32_x3 (const uint32_t *__a)
16634 {
16635 int32x2x3_t __i;
16636 __builtin_aarch64_simd_ci __o;
16637 __o = __builtin_aarch64_ld1x3v2si ((const __builtin_aarch64_simd_si *) __a);
16638 __i.val[0] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 0);
16639 __i.val[1] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 1);
16640 __i.val[2] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 2);
16641 return __i;
16642 }
16643
16644 __extension__ extern __inline uint64x1x3_t
16645 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16646 vld1_u64_x3 (const uint64_t *__a)
16647 {
16648 uint64x1x3_t __i;
16649 __builtin_aarch64_simd_ci __o;
16650 __o = __builtin_aarch64_ld1x3di ((const __builtin_aarch64_simd_di *) __a);
16651 __i.val[0] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 0);
16652 __i.val[1] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 1);
16653 __i.val[2] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 2);
16654 return __i;
16655 }
16656
16657 __extension__ extern __inline int64x1x3_t
16658 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16659 vld1_s64_x3 (const int64_t *__a)
16660 {
16661 int64x1x3_t __i;
16662 __builtin_aarch64_simd_ci __o;
16663 __o = __builtin_aarch64_ld1x3di ((const __builtin_aarch64_simd_di *) __a);
16664 __i.val[0] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 0);
16665 __i.val[1] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 1);
16666 __i.val[2] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 2);
16667
16668 return __i;
16669 }
16670
16671 __extension__ extern __inline float16x4x3_t
16672 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16673 vld1_f16_x3 (const float16_t *__a)
16674 {
16675 float16x4x3_t __i;
16676 __builtin_aarch64_simd_ci __o;
16677 __o = __builtin_aarch64_ld1x3v4hf ((const __builtin_aarch64_simd_hf *) __a);
16678 __i.val[0] = (float16x4_t) __builtin_aarch64_get_dregciv4hf (__o, 0);
16679 __i.val[1] = (float16x4_t) __builtin_aarch64_get_dregciv4hf (__o, 1);
16680 __i.val[2] = (float16x4_t) __builtin_aarch64_get_dregciv4hf (__o, 2);
16681 return __i;
16682 }
16683
16684 __extension__ extern __inline float32x2x3_t
16685 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16686 vld1_f32_x3 (const float32_t *__a)
16687 {
16688 float32x2x3_t __i;
16689 __builtin_aarch64_simd_ci __o;
16690 __o = __builtin_aarch64_ld1x3v2sf ((const __builtin_aarch64_simd_sf *) __a);
16691 __i.val[0] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 0);
16692 __i.val[1] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 1);
16693 __i.val[2] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 2);
16694 return __i;
16695 }
16696
16697 __extension__ extern __inline float64x1x3_t
16698 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16699 vld1_f64_x3 (const float64_t *__a)
16700 {
16701 float64x1x3_t __i;
16702 __builtin_aarch64_simd_ci __o;
16703 __o = __builtin_aarch64_ld1x3df ((const __builtin_aarch64_simd_df *) __a);
16704 __i.val[0] = (float64x1_t) __builtin_aarch64_get_dregcidi (__o, 0);
16705 __i.val[1] = (float64x1_t) __builtin_aarch64_get_dregcidi (__o, 1);
16706 __i.val[2] = (float64x1_t) __builtin_aarch64_get_dregcidi (__o, 2);
16707 return __i;
16708 }
16709
16710 __extension__ extern __inline poly8x8x3_t
16711 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16712 vld1_p8_x3 (const poly8_t *__a)
16713 {
16714 poly8x8x3_t __i;
16715 __builtin_aarch64_simd_ci __o;
16716 __o = __builtin_aarch64_ld1x3v8qi ((const __builtin_aarch64_simd_qi *) __a);
16717 __i.val[0] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
16718 __i.val[1] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
16719 __i.val[2] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
16720 return __i;
16721 }
16722
16723 __extension__ extern __inline poly16x4x3_t
16724 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16725 vld1_p16_x3 (const poly16_t *__a)
16726 {
16727 poly16x4x3_t __i;
16728 __builtin_aarch64_simd_ci __o;
16729 __o = __builtin_aarch64_ld1x3v4hi ((const __builtin_aarch64_simd_hi *) __a);
16730 __i.val[0] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
16731 __i.val[1] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
16732 __i.val[2] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
16733 return __i;
16734 }
16735
16736 __extension__ extern __inline poly64x1x3_t
16737 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16738 vld1_p64_x3 (const poly64_t *__a)
16739 {
16740 poly64x1x3_t __i;
16741 __builtin_aarch64_simd_ci __o;
16742 __o = __builtin_aarch64_ld1x3di ((const __builtin_aarch64_simd_di *) __a);
16743 __i.val[0] = (poly64x1_t) __builtin_aarch64_get_dregcidi (__o, 0);
16744 __i.val[1] = (poly64x1_t) __builtin_aarch64_get_dregcidi (__o, 1);
16745 __i.val[2] = (poly64x1_t) __builtin_aarch64_get_dregcidi (__o, 2);
16746
16747 return __i;
16748 }
16749
16750 __extension__ extern __inline uint8x16x3_t
16751 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16752 vld1q_u8_x3 (const uint8_t *__a)
16753 {
16754 uint8x16x3_t __i;
16755 __builtin_aarch64_simd_ci __o;
16756 __o = __builtin_aarch64_ld1x3v16qi ((const __builtin_aarch64_simd_qi *) __a);
16757 __i.val[0] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
16758 __i.val[1] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
16759 __i.val[2] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
16760 return __i;
16761 }
16762
16763 __extension__ extern __inline int8x16x3_t
16764 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16765 vld1q_s8_x3 (const int8_t *__a)
16766 {
16767 int8x16x3_t __i;
16768 __builtin_aarch64_simd_ci __o;
16769 __o = __builtin_aarch64_ld1x3v16qi ((const __builtin_aarch64_simd_qi *) __a);
16770 __i.val[0] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
16771 __i.val[1] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
16772 __i.val[2] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
16773 return __i;
16774 }
16775
16776 __extension__ extern __inline uint16x8x3_t
16777 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16778 vld1q_u16_x3 (const uint16_t *__a)
16779 {
16780 uint16x8x3_t __i;
16781 __builtin_aarch64_simd_ci __o;
16782 __o = __builtin_aarch64_ld1x3v8hi ((const __builtin_aarch64_simd_hi *) __a);
16783 __i.val[0] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
16784 __i.val[1] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
16785 __i.val[2] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
16786 return __i;
16787 }
16788
16789 __extension__ extern __inline int16x8x3_t
16790 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16791 vld1q_s16_x3 (const int16_t *__a)
16792 {
16793 int16x8x3_t __i;
16794 __builtin_aarch64_simd_ci __o;
16795 __o = __builtin_aarch64_ld1x3v8hi ((const __builtin_aarch64_simd_hi *) __a);
16796 __i.val[0] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
16797 __i.val[1] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
16798 __i.val[2] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
16799 return __i;
16800 }
16801
16802 __extension__ extern __inline uint32x4x3_t
16803 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16804 vld1q_u32_x3 (const uint32_t *__a)
16805 {
16806 uint32x4x3_t __i;
16807 __builtin_aarch64_simd_ci __o;
16808 __o = __builtin_aarch64_ld1x3v4si ((const __builtin_aarch64_simd_si *) __a);
16809 __i.val[0] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0);
16810 __i.val[1] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1);
16811 __i.val[2] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2);
16812 return __i;
16813 }
16814
16815 __extension__ extern __inline int32x4x3_t
16816 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16817 vld1q_s32_x3 (const int32_t *__a)
16818 {
16819 int32x4x3_t __i;
16820 __builtin_aarch64_simd_ci __o;
16821 __o = __builtin_aarch64_ld1x3v4si ((const __builtin_aarch64_simd_si *) __a);
16822 __i.val[0] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0);
16823 __i.val[1] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1);
16824 __i.val[2] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2);
16825 return __i;
16826 }
16827
16828 __extension__ extern __inline uint64x2x3_t
16829 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16830 vld1q_u64_x3 (const uint64_t *__a)
16831 {
16832 uint64x2x3_t __i;
16833 __builtin_aarch64_simd_ci __o;
16834 __o = __builtin_aarch64_ld1x3v2di ((const __builtin_aarch64_simd_di *) __a);
16835 __i.val[0] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0);
16836 __i.val[1] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1);
16837 __i.val[2] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2);
16838 return __i;
16839 }
16840
16841 __extension__ extern __inline int64x2x3_t
16842 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16843 vld1q_s64_x3 (const int64_t *__a)
16844 {
16845 int64x2x3_t __i;
16846 __builtin_aarch64_simd_ci __o;
16847 __o = __builtin_aarch64_ld1x3v2di ((const __builtin_aarch64_simd_di *) __a);
16848 __i.val[0] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0);
16849 __i.val[1] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1);
16850 __i.val[2] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2);
16851 return __i;
16852 }
16853
16854 __extension__ extern __inline float16x8x3_t
16855 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16856 vld1q_f16_x3 (const float16_t *__a)
16857 {
16858 float16x8x3_t __i;
16859 __builtin_aarch64_simd_ci __o;
16860 __o = __builtin_aarch64_ld1x3v8hf ((const __builtin_aarch64_simd_hf *) __a);
16861 __i.val[0] = (float16x8_t) __builtin_aarch64_get_qregciv8hf (__o, 0);
16862 __i.val[1] = (float16x8_t) __builtin_aarch64_get_qregciv8hf (__o, 1);
16863 __i.val[2] = (float16x8_t) __builtin_aarch64_get_qregciv8hf (__o, 2);
16864 return __i;
16865 }
16866
16867 __extension__ extern __inline float32x4x3_t
16868 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16869 vld1q_f32_x3 (const float32_t *__a)
16870 {
16871 float32x4x3_t __i;
16872 __builtin_aarch64_simd_ci __o;
16873 __o = __builtin_aarch64_ld1x3v4sf ((const __builtin_aarch64_simd_sf *) __a);
16874 __i.val[0] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 0);
16875 __i.val[1] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 1);
16876 __i.val[2] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 2);
16877 return __i;
16878 }
16879
16880 __extension__ extern __inline float64x2x3_t
16881 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16882 vld1q_f64_x3 (const float64_t *__a)
16883 {
16884 float64x2x3_t __i;
16885 __builtin_aarch64_simd_ci __o;
16886 __o = __builtin_aarch64_ld1x3v2df ((const __builtin_aarch64_simd_df *) __a);
16887 __i.val[0] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 0);
16888 __i.val[1] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 1);
16889 __i.val[2] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 2);
16890 return __i;
16891 }
16892
16893 __extension__ extern __inline poly8x16x3_t
16894 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16895 vld1q_p8_x3 (const poly8_t *__a)
16896 {
16897 poly8x16x3_t __i;
16898 __builtin_aarch64_simd_ci __o;
16899 __o = __builtin_aarch64_ld1x3v16qi ((const __builtin_aarch64_simd_qi *) __a);
16900 __i.val[0] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
16901 __i.val[1] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
16902 __i.val[2] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
16903 return __i;
16904 }
16905
16906 __extension__ extern __inline poly16x8x3_t
16907 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16908 vld1q_p16_x3 (const poly16_t *__a)
16909 {
16910 poly16x8x3_t __i;
16911 __builtin_aarch64_simd_ci __o;
16912 __o = __builtin_aarch64_ld1x3v8hi ((const __builtin_aarch64_simd_hi *) __a);
16913 __i.val[0] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
16914 __i.val[1] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
16915 __i.val[2] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
16916 return __i;
16917 }
16918
16919 __extension__ extern __inline poly64x2x3_t
16920 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16921 vld1q_p64_x3 (const poly64_t *__a)
16922 {
16923 poly64x2x3_t __i;
16924 __builtin_aarch64_simd_ci __o;
16925 __o = __builtin_aarch64_ld1x3v2di ((const __builtin_aarch64_simd_di *) __a);
16926 __i.val[0] = (poly64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0);
16927 __i.val[1] = (poly64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1);
16928 __i.val[2] = (poly64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2);
16929 return __i;
16930 }
16931
16932 /* vld1q */
16933
16934 __extension__ extern __inline float16x8_t
16935 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16936 vld1q_f16 (const float16_t *__a)
16937 {
16938 return __builtin_aarch64_ld1v8hf (__a);
16939 }
16940
16941 __extension__ extern __inline float32x4_t
16942 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16943 vld1q_f32 (const float32_t *__a)
16944 {
16945 return __builtin_aarch64_ld1v4sf ((const __builtin_aarch64_simd_sf *) __a);
16946 }
16947
16948 __extension__ extern __inline float64x2_t
16949 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16950 vld1q_f64 (const float64_t *__a)
16951 {
16952 return __builtin_aarch64_ld1v2df ((const __builtin_aarch64_simd_df *) __a);
16953 }
16954
16955 __extension__ extern __inline poly8x16_t
16956 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16957 vld1q_p8 (const poly8_t *__a)
16958 {
16959 return (poly8x16_t)
16960 __builtin_aarch64_ld1v16qi ((const __builtin_aarch64_simd_qi *) __a);
16961 }
16962
16963 __extension__ extern __inline poly16x8_t
16964 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16965 vld1q_p16 (const poly16_t *__a)
16966 {
16967 return (poly16x8_t)
16968 __builtin_aarch64_ld1v8hi ((const __builtin_aarch64_simd_hi *) __a);
16969 }
16970
16971 __extension__ extern __inline poly64x2_t
16972 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16973 vld1q_p64 (const poly64_t *__a)
16974 {
16975 return (poly64x2_t)
16976 __builtin_aarch64_ld1v2di ((const __builtin_aarch64_simd_di *) __a);
16977 }
16978
16979 __extension__ extern __inline int8x16_t
16980 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16981 vld1q_s8 (const int8_t *__a)
16982 {
16983 return __builtin_aarch64_ld1v16qi ((const __builtin_aarch64_simd_qi *) __a);
16984 }
16985
16986 __extension__ extern __inline int16x8_t
16987 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16988 vld1q_s16 (const int16_t *__a)
16989 {
16990 return __builtin_aarch64_ld1v8hi ((const __builtin_aarch64_simd_hi *) __a);
16991 }
16992
16993 __extension__ extern __inline int32x4_t
16994 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
16995 vld1q_s32 (const int32_t *__a)
16996 {
16997 return __builtin_aarch64_ld1v4si ((const __builtin_aarch64_simd_si *) __a);
16998 }
16999
17000 __extension__ extern __inline int64x2_t
17001 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17002 vld1q_s64 (const int64_t *__a)
17003 {
17004 return __builtin_aarch64_ld1v2di ((const __builtin_aarch64_simd_di *) __a);
17005 }
17006
17007 __extension__ extern __inline uint8x16_t
17008 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17009 vld1q_u8 (const uint8_t *__a)
17010 {
17011 return (uint8x16_t)
17012 __builtin_aarch64_ld1v16qi ((const __builtin_aarch64_simd_qi *) __a);
17013 }
17014
17015 __extension__ extern __inline uint8x8x2_t
17016 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17017 vld1_u8_x2 (const uint8_t *__a)
17018 {
17019 uint8x8x2_t ret;
17020 __builtin_aarch64_simd_oi __o;
17021 __o = __builtin_aarch64_ld1x2v8qi ((const __builtin_aarch64_simd_qi *) __a);
17022 ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
17023 ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
17024 return ret;
17025 }
17026
17027 __extension__ extern __inline int8x8x2_t
17028 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17029 vld1_s8_x2 (const int8_t *__a)
17030 {
17031 int8x8x2_t ret;
17032 __builtin_aarch64_simd_oi __o;
17033 __o = __builtin_aarch64_ld1x2v8qi ((const __builtin_aarch64_simd_qi *) __a);
17034 ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
17035 ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
17036 return ret;
17037 }
17038
17039 __extension__ extern __inline uint16x4x2_t
17040 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17041 vld1_u16_x2 (const uint16_t *__a)
17042 {
17043 uint16x4x2_t ret;
17044 __builtin_aarch64_simd_oi __o;
17045 __o = __builtin_aarch64_ld1x2v4hi ((const __builtin_aarch64_simd_hi *) __a);
17046 ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
17047 ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
17048 return ret;
17049 }
17050
17051 __extension__ extern __inline int16x4x2_t
17052 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17053 vld1_s16_x2 (const int16_t *__a)
17054 {
17055 int16x4x2_t ret;
17056 __builtin_aarch64_simd_oi __o;
17057 __o = __builtin_aarch64_ld1x2v4hi ((const __builtin_aarch64_simd_hi *) __a);
17058 ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
17059 ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
17060 return ret;
17061 }
17062
17063 __extension__ extern __inline uint32x2x2_t
17064 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17065 vld1_u32_x2 (const uint32_t *__a)
17066 {
17067 uint32x2x2_t ret;
17068 __builtin_aarch64_simd_oi __o;
17069 __o = __builtin_aarch64_ld1x2v2si ((const __builtin_aarch64_simd_si *) __a);
17070 ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 0);
17071 ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 1);
17072 return ret;
17073 }
17074
17075 __extension__ extern __inline int32x2x2_t
17076 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17077 vld1_s32_x2 (const int32_t *__a)
17078 {
17079 int32x2x2_t ret;
17080 __builtin_aarch64_simd_oi __o;
17081 __o = __builtin_aarch64_ld1x2v2si ((const __builtin_aarch64_simd_si *) __a);
17082 ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 0);
17083 ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 1);
17084 return ret;
17085 }
17086
17087 __extension__ extern __inline uint64x1x2_t
17088 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17089 vld1_u64_x2 (const uint64_t *__a)
17090 {
17091 uint64x1x2_t ret;
17092 __builtin_aarch64_simd_oi __o;
17093 __o = __builtin_aarch64_ld1x2di ((const __builtin_aarch64_simd_di *) __a);
17094 ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
17095 ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
17096 return ret;
17097 }
17098
17099 __extension__ extern __inline int64x1x2_t
17100 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17101 vld1_s64_x2 (const int64_t *__a)
17102 {
17103 int64x1x2_t ret;
17104 __builtin_aarch64_simd_oi __o;
17105 __o = __builtin_aarch64_ld1x2di ((const __builtin_aarch64_simd_di *) __a);
17106 ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
17107 ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
17108 return ret;
17109 }
17110
17111 __extension__ extern __inline float16x4x2_t
17112 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17113 vld1_f16_x2 (const float16_t *__a)
17114 {
17115 float16x4x2_t ret;
17116 __builtin_aarch64_simd_oi __o;
17117 __o = __builtin_aarch64_ld1x2v4hf ((const __builtin_aarch64_simd_hf *) __a);
17118 ret.val[0] = (float16x4_t) __builtin_aarch64_get_dregoiv4hf (__o, 0);
17119 ret.val[1] = (float16x4_t) __builtin_aarch64_get_dregoiv4hf (__o, 1);
17120 return ret;
17121 }
17122
17123 __extension__ extern __inline float32x2x2_t
17124 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17125 vld1_f32_x2 (const float32_t *__a)
17126 {
17127 float32x2x2_t ret;
17128 __builtin_aarch64_simd_oi __o;
17129 __o = __builtin_aarch64_ld1x2v2sf ((const __builtin_aarch64_simd_sf *) __a);
17130 ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregoiv2sf (__o, 0);
17131 ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregoiv2sf (__o, 1);
17132 return ret;
17133 }
17134
17135 __extension__ extern __inline float64x1x2_t
17136 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17137 vld1_f64_x2 (const float64_t *__a)
17138 {
17139 float64x1x2_t ret;
17140 __builtin_aarch64_simd_oi __o;
17141 __o = __builtin_aarch64_ld1x2df ((const __builtin_aarch64_simd_df *) __a);
17142 ret.val[0] = (float64x1_t) {__builtin_aarch64_get_dregoidf (__o, 0)};
17143 ret.val[1] = (float64x1_t) {__builtin_aarch64_get_dregoidf (__o, 1)};
17144 return ret;
17145 }
17146
17147 __extension__ extern __inline poly8x8x2_t
17148 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17149 vld1_p8_x2 (const poly8_t *__a)
17150 {
17151 poly8x8x2_t ret;
17152 __builtin_aarch64_simd_oi __o;
17153 __o = __builtin_aarch64_ld1x2v8qi ((const __builtin_aarch64_simd_qi *) __a);
17154 ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
17155 ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
17156 return ret;
17157 }
17158
17159 __extension__ extern __inline poly16x4x2_t
17160 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17161 vld1_p16_x2 (const poly16_t *__a)
17162 {
17163 poly16x4x2_t ret;
17164 __builtin_aarch64_simd_oi __o;
17165 __o = __builtin_aarch64_ld1x2v4hi ((const __builtin_aarch64_simd_hi *) __a);
17166 ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
17167 ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
17168 return ret;
17169 }
17170
17171 __extension__ extern __inline poly64x1x2_t
17172 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17173 vld1_p64_x2 (const poly64_t *__a)
17174 {
17175 poly64x1x2_t ret;
17176 __builtin_aarch64_simd_oi __o;
17177 __o = __builtin_aarch64_ld1x2di ((const __builtin_aarch64_simd_di *) __a);
17178 ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
17179 ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
17180 return ret;
17181 }
17182
17183 __extension__ extern __inline uint8x16x2_t
17184 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17185 vld1q_u8_x2 (const uint8_t *__a)
17186 {
17187 uint8x16x2_t ret;
17188 __builtin_aarch64_simd_oi __o;
17189 __o = __builtin_aarch64_ld1x2v16qi ((const __builtin_aarch64_simd_qi *) __a);
17190 ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
17191 ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
17192 return ret;
17193 }
17194
17195 __extension__ extern __inline int8x16x2_t
17196 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17197 vld1q_s8_x2 (const int8_t *__a)
17198 {
17199 int8x16x2_t ret;
17200 __builtin_aarch64_simd_oi __o;
17201 __o = __builtin_aarch64_ld1x2v16qi ((const __builtin_aarch64_simd_qi *) __a);
17202 ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
17203 ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
17204 return ret;
17205 }
17206
17207 __extension__ extern __inline uint16x8x2_t
17208 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17209 vld1q_u16_x2 (const uint16_t *__a)
17210 {
17211 uint16x8x2_t ret;
17212 __builtin_aarch64_simd_oi __o;
17213 __o = __builtin_aarch64_ld1x2v8hi ((const __builtin_aarch64_simd_hi *) __a);
17214 ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
17215 ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
17216 return ret;
17217 }
17218
17219 __extension__ extern __inline int16x8x2_t
17220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17221 vld1q_s16_x2 (const int16_t *__a)
17222 {
17223 int16x8x2_t ret;
17224 __builtin_aarch64_simd_oi __o;
17225 __o = __builtin_aarch64_ld1x2v8hi ((const __builtin_aarch64_simd_hi *) __a);
17226 ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
17227 ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
17228 return ret;
17229 }
17230
17231 __extension__ extern __inline uint32x4x2_t
17232 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17233 vld1q_u32_x2 (const uint32_t *__a)
17234 {
17235 uint32x4x2_t ret;
17236 __builtin_aarch64_simd_oi __o;
17237 __o = __builtin_aarch64_ld1x2v4si ((const __builtin_aarch64_simd_si *) __a);
17238 ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
17239 ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
17240 return ret;
17241 }
17242
17243 __extension__ extern __inline int32x4x2_t
17244 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17245 vld1q_s32_x2 (const int32_t *__a)
17246 {
17247 int32x4x2_t ret;
17248 __builtin_aarch64_simd_oi __o;
17249 __o = __builtin_aarch64_ld1x2v4si ((const __builtin_aarch64_simd_si *) __a);
17250 ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
17251 ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
17252 return ret;
17253 }
17254
17255 __extension__ extern __inline uint64x2x2_t
17256 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17257 vld1q_u64_x2 (const uint64_t *__a)
17258 {
17259 uint64x2x2_t ret;
17260 __builtin_aarch64_simd_oi __o;
17261 __o = __builtin_aarch64_ld1x2v2di ((const __builtin_aarch64_simd_di *) __a);
17262 ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0);
17263 ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1);
17264 return ret;
17265 }
17266
17267 __extension__ extern __inline int64x2x2_t
17268 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17269 vld1q_s64_x2 (const int64_t *__a)
17270 {
17271 int64x2x2_t ret;
17272 __builtin_aarch64_simd_oi __o;
17273 __o = __builtin_aarch64_ld1x2v2di ((const __builtin_aarch64_simd_di *) __a);
17274 ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0);
17275 ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1);
17276 return ret;
17277 }
17278
17279 __extension__ extern __inline float16x8x2_t
17280 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17281 vld1q_f16_x2 (const float16_t *__a)
17282 {
17283 float16x8x2_t ret;
17284 __builtin_aarch64_simd_oi __o;
17285 __o = __builtin_aarch64_ld1x2v8hf ((const __builtin_aarch64_simd_hf *) __a);
17286 ret.val[0] = (float16x8_t) __builtin_aarch64_get_qregoiv8hf (__o, 0);
17287 ret.val[1] = (float16x8_t) __builtin_aarch64_get_qregoiv8hf (__o, 1);
17288 return ret;
17289 }
17290
17291 __extension__ extern __inline float32x4x2_t
17292 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17293 vld1q_f32_x2 (const float32_t *__a)
17294 {
17295 float32x4x2_t ret;
17296 __builtin_aarch64_simd_oi __o;
17297 __o = __builtin_aarch64_ld1x2v4sf ((const __builtin_aarch64_simd_sf *) __a);
17298 ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregoiv4sf (__o, 0);
17299 ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregoiv4sf (__o, 1);
17300 return ret;
17301 }
17302
17303 __extension__ extern __inline float64x2x2_t
17304 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17305 vld1q_f64_x2 (const float64_t *__a)
17306 {
17307 float64x2x2_t ret;
17308 __builtin_aarch64_simd_oi __o;
17309 __o = __builtin_aarch64_ld1x2v2df ((const __builtin_aarch64_simd_df *) __a);
17310 ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregoiv2df (__o, 0);
17311 ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregoiv2df (__o, 1);
17312 return ret;
17313 }
17314
17315 __extension__ extern __inline poly8x16x2_t
17316 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17317 vld1q_p8_x2 (const poly8_t *__a)
17318 {
17319 poly8x16x2_t ret;
17320 __builtin_aarch64_simd_oi __o;
17321 __o = __builtin_aarch64_ld1x2v16qi ((const __builtin_aarch64_simd_qi *) __a);
17322 ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
17323 ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
17324 return ret;
17325 }
17326
17327 __extension__ extern __inline poly16x8x2_t
17328 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17329 vld1q_p16_x2 (const poly16_t *__a)
17330 {
17331 poly16x8x2_t ret;
17332 __builtin_aarch64_simd_oi __o;
17333 __o = __builtin_aarch64_ld1x2v8hi ((const __builtin_aarch64_simd_hi *) __a);
17334 ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
17335 ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
17336 return ret;
17337 }
17338
17339 __extension__ extern __inline poly64x2x2_t
17340 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17341 vld1q_p64_x2 (const poly64_t *__a)
17342 {
17343 poly64x2x2_t ret;
17344 __builtin_aarch64_simd_oi __o;
17345 __o = __builtin_aarch64_ld1x2v2di ((const __builtin_aarch64_simd_di *) __a);
17346 ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0);
17347 ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1);
17348 return ret;
17349 }
17350
17351 __extension__ extern __inline uint16x8_t
17352 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17353 vld1q_u16 (const uint16_t *__a)
17354 {
17355 return (uint16x8_t)
17356 __builtin_aarch64_ld1v8hi ((const __builtin_aarch64_simd_hi *) __a);
17357 }
17358
17359 __extension__ extern __inline uint32x4_t
17360 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17361 vld1q_u32 (const uint32_t *__a)
17362 {
17363 return (uint32x4_t)
17364 __builtin_aarch64_ld1v4si ((const __builtin_aarch64_simd_si *) __a);
17365 }
17366
17367 __extension__ extern __inline uint64x2_t
17368 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17369 vld1q_u64 (const uint64_t *__a)
17370 {
17371 return (uint64x2_t)
17372 __builtin_aarch64_ld1v2di ((const __builtin_aarch64_simd_di *) __a);
17373 }
17374
17375 /* vld1(q)_x4. */
17376
17377 __extension__ extern __inline int8x8x4_t
17378 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17379 vld1_s8_x4 (const int8_t *__a)
17380 {
17381 union { int8x8x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17382 __au.__o
17383 = __builtin_aarch64_ld1x4v8qi ((const __builtin_aarch64_simd_qi *) __a);
17384 return __au.__i;
17385 }
17386
17387 __extension__ extern __inline int8x16x4_t
17388 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17389 vld1q_s8_x4 (const int8_t *__a)
17390 {
17391 union { int8x16x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17392 __au.__o
17393 = __builtin_aarch64_ld1x4v16qi ((const __builtin_aarch64_simd_qi *) __a);
17394 return __au.__i;
17395 }
17396
17397 __extension__ extern __inline int16x4x4_t
17398 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17399 vld1_s16_x4 (const int16_t *__a)
17400 {
17401 union { int16x4x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17402 __au.__o
17403 = __builtin_aarch64_ld1x4v4hi ((const __builtin_aarch64_simd_hi *) __a);
17404 return __au.__i;
17405 }
17406
17407 __extension__ extern __inline int16x8x4_t
17408 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17409 vld1q_s16_x4 (const int16_t *__a)
17410 {
17411 union { int16x8x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17412 __au.__o
17413 = __builtin_aarch64_ld1x4v8hi ((const __builtin_aarch64_simd_hi *) __a);
17414 return __au.__i;
17415 }
17416
17417 __extension__ extern __inline int32x2x4_t
17418 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17419 vld1_s32_x4 (const int32_t *__a)
17420 {
17421 union { int32x2x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17422 __au.__o
17423 = __builtin_aarch64_ld1x4v2si ((const __builtin_aarch64_simd_si *) __a);
17424 return __au.__i;
17425 }
17426
17427 __extension__ extern __inline int32x4x4_t
17428 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17429 vld1q_s32_x4 (const int32_t *__a)
17430 {
17431 union { int32x4x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17432 __au.__o
17433 = __builtin_aarch64_ld1x4v4si ((const __builtin_aarch64_simd_si *) __a);
17434 return __au.__i;
17435 }
17436
17437 __extension__ extern __inline uint8x8x4_t
17438 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17439 vld1_u8_x4 (const uint8_t *__a)
17440 {
17441 union { uint8x8x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17442 __au.__o
17443 = __builtin_aarch64_ld1x4v8qi ((const __builtin_aarch64_simd_qi *) __a);
17444 return __au.__i;
17445 }
17446
17447 __extension__ extern __inline uint8x16x4_t
17448 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17449 vld1q_u8_x4 (const uint8_t *__a)
17450 {
17451 union { uint8x16x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17452 __au.__o
17453 = __builtin_aarch64_ld1x4v16qi ((const __builtin_aarch64_simd_qi *) __a);
17454 return __au.__i;
17455 }
17456
17457 __extension__ extern __inline uint16x4x4_t
17458 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17459 vld1_u16_x4 (const uint16_t *__a)
17460 {
17461 union { uint16x4x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17462 __au.__o
17463 = __builtin_aarch64_ld1x4v4hi ((const __builtin_aarch64_simd_hi *) __a);
17464 return __au.__i;
17465 }
17466
17467 __extension__ extern __inline uint16x8x4_t
17468 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17469 vld1q_u16_x4 (const uint16_t *__a)
17470 {
17471 union { uint16x8x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17472 __au.__o
17473 = __builtin_aarch64_ld1x4v8hi ((const __builtin_aarch64_simd_hi *) __a);
17474 return __au.__i;
17475 }
17476
17477 __extension__ extern __inline uint32x2x4_t
17478 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17479 vld1_u32_x4 (const uint32_t *__a)
17480 {
17481 union { uint32x2x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17482 __au.__o
17483 = __builtin_aarch64_ld1x4v2si ((const __builtin_aarch64_simd_si *) __a);
17484 return __au.__i;
17485 }
17486
17487 __extension__ extern __inline uint32x4x4_t
17488 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17489 vld1q_u32_x4 (const uint32_t *__a)
17490 {
17491 union { uint32x4x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17492 __au.__o
17493 = __builtin_aarch64_ld1x4v4si ((const __builtin_aarch64_simd_si *) __a);
17494 return __au.__i;
17495 }
17496
17497 __extension__ extern __inline float16x4x4_t
17498 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17499 vld1_f16_x4 (const float16_t *__a)
17500 {
17501 union { float16x4x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17502 __au.__o
17503 = __builtin_aarch64_ld1x4v4hf ((const __builtin_aarch64_simd_hf *) __a);
17504 return __au.__i;
17505 }
17506
17507 __extension__ extern __inline float16x8x4_t
17508 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17509 vld1q_f16_x4 (const float16_t *__a)
17510 {
17511 union { float16x8x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17512 __au.__o
17513 = __builtin_aarch64_ld1x4v8hf ((const __builtin_aarch64_simd_hf *) __a);
17514 return __au.__i;
17515 }
17516
17517 __extension__ extern __inline float32x2x4_t
17518 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17519 vld1_f32_x4 (const float32_t *__a)
17520 {
17521 union { float32x2x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17522 __au.__o
17523 = __builtin_aarch64_ld1x4v2sf ((const __builtin_aarch64_simd_sf *) __a);
17524 return __au.__i;
17525 }
17526
17527 __extension__ extern __inline float32x4x4_t
17528 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17529 vld1q_f32_x4 (const float32_t *__a)
17530 {
17531 union { float32x4x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17532 __au.__o
17533 = __builtin_aarch64_ld1x4v4sf ((const __builtin_aarch64_simd_sf *) __a);
17534 return __au.__i;
17535 }
17536
17537 __extension__ extern __inline poly8x8x4_t
17538 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17539 vld1_p8_x4 (const poly8_t *__a)
17540 {
17541 union { poly8x8x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17542 __au.__o
17543 = __builtin_aarch64_ld1x4v8qi ((const __builtin_aarch64_simd_qi *) __a);
17544 return __au.__i;
17545 }
17546
17547 __extension__ extern __inline poly8x16x4_t
17548 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17549 vld1q_p8_x4 (const poly8_t *__a)
17550 {
17551 union { poly8x16x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17552 __au.__o
17553 = __builtin_aarch64_ld1x4v16qi ((const __builtin_aarch64_simd_qi *) __a);
17554 return __au.__i;
17555 }
17556
17557 __extension__ extern __inline poly16x4x4_t
17558 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17559 vld1_p16_x4 (const poly16_t *__a)
17560 {
17561 union { poly16x4x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17562 __au.__o
17563 = __builtin_aarch64_ld1x4v4hi ((const __builtin_aarch64_simd_hi *) __a);
17564 return __au.__i;
17565 }
17566
17567 __extension__ extern __inline poly16x8x4_t
17568 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17569 vld1q_p16_x4 (const poly16_t *__a)
17570 {
17571 union { poly16x8x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17572 __au.__o
17573 = __builtin_aarch64_ld1x4v8hi ((const __builtin_aarch64_simd_hi *) __a);
17574 return __au.__i;
17575 }
17576
17577 __extension__ extern __inline int64x1x4_t
17578 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17579 vld1_s64_x4 (const int64_t *__a)
17580 {
17581 union { int64x1x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17582 __au.__o
17583 = __builtin_aarch64_ld1x4di ((const __builtin_aarch64_simd_di *) __a);
17584 return __au.__i;
17585 }
17586
17587 __extension__ extern __inline uint64x1x4_t
17588 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17589 vld1_u64_x4 (const uint64_t *__a)
17590 {
17591 union { uint64x1x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17592 __au.__o
17593 = __builtin_aarch64_ld1x4di ((const __builtin_aarch64_simd_di *) __a);
17594 return __au.__i;
17595 }
17596
17597 __extension__ extern __inline poly64x1x4_t
17598 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17599 vld1_p64_x4 (const poly64_t *__a)
17600 {
17601 union { poly64x1x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17602 __au.__o
17603 = __builtin_aarch64_ld1x4di ((const __builtin_aarch64_simd_di *) __a);
17604 return __au.__i;
17605 }
17606
17607 __extension__ extern __inline int64x2x4_t
17608 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17609 vld1q_s64_x4 (const int64_t *__a)
17610 {
17611 union { int64x2x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17612 __au.__o
17613 = __builtin_aarch64_ld1x4v2di ((const __builtin_aarch64_simd_di *) __a);
17614 return __au.__i;
17615 }
17616
17617 __extension__ extern __inline uint64x2x4_t
17618 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17619 vld1q_u64_x4 (const uint64_t *__a)
17620 {
17621 union { uint64x2x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17622 __au.__o
17623 = __builtin_aarch64_ld1x4v2di ((const __builtin_aarch64_simd_di *) __a);
17624 return __au.__i;
17625 }
17626
17627 __extension__ extern __inline poly64x2x4_t
17628 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17629 vld1q_p64_x4 (const poly64_t *__a)
17630 {
17631 union { poly64x2x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17632 __au.__o
17633 = __builtin_aarch64_ld1x4v2di ((const __builtin_aarch64_simd_di *) __a);
17634 return __au.__i;
17635 }
17636
17637 __extension__ extern __inline float64x1x4_t
17638 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17639 vld1_f64_x4 (const float64_t *__a)
17640 {
17641 union { float64x1x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17642 __au.__o
17643 = __builtin_aarch64_ld1x4df ((const __builtin_aarch64_simd_df *) __a);
17644 return __au.__i;
17645 }
17646
17647 __extension__ extern __inline float64x2x4_t
17648 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17649 vld1q_f64_x4 (const float64_t *__a)
17650 {
17651 union { float64x2x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
17652 __au.__o
17653 = __builtin_aarch64_ld1x4v2df ((const __builtin_aarch64_simd_df *) __a);
17654 return __au.__i;
17655 }
17656
17657 /* vld1_dup */
17658
17659 __extension__ extern __inline float16x4_t
17660 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17661 vld1_dup_f16 (const float16_t* __a)
17662 {
17663 return vdup_n_f16 (*__a);
17664 }
17665
17666 __extension__ extern __inline float32x2_t
17667 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17668 vld1_dup_f32 (const float32_t* __a)
17669 {
17670 return vdup_n_f32 (*__a);
17671 }
17672
17673 __extension__ extern __inline float64x1_t
17674 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17675 vld1_dup_f64 (const float64_t* __a)
17676 {
17677 return vdup_n_f64 (*__a);
17678 }
17679
17680 __extension__ extern __inline poly8x8_t
17681 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17682 vld1_dup_p8 (const poly8_t* __a)
17683 {
17684 return vdup_n_p8 (*__a);
17685 }
17686
17687 __extension__ extern __inline poly16x4_t
17688 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17689 vld1_dup_p16 (const poly16_t* __a)
17690 {
17691 return vdup_n_p16 (*__a);
17692 }
17693
17694 __extension__ extern __inline poly64x1_t
17695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17696 vld1_dup_p64 (const poly64_t* __a)
17697 {
17698 return vdup_n_p64 (*__a);
17699 }
17700
17701 __extension__ extern __inline int8x8_t
17702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17703 vld1_dup_s8 (const int8_t* __a)
17704 {
17705 return vdup_n_s8 (*__a);
17706 }
17707
17708 __extension__ extern __inline int16x4_t
17709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17710 vld1_dup_s16 (const int16_t* __a)
17711 {
17712 return vdup_n_s16 (*__a);
17713 }
17714
17715 __extension__ extern __inline int32x2_t
17716 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17717 vld1_dup_s32 (const int32_t* __a)
17718 {
17719 return vdup_n_s32 (*__a);
17720 }
17721
17722 __extension__ extern __inline int64x1_t
17723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17724 vld1_dup_s64 (const int64_t* __a)
17725 {
17726 return vdup_n_s64 (*__a);
17727 }
17728
17729 __extension__ extern __inline uint8x8_t
17730 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17731 vld1_dup_u8 (const uint8_t* __a)
17732 {
17733 return vdup_n_u8 (*__a);
17734 }
17735
17736 __extension__ extern __inline uint16x4_t
17737 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17738 vld1_dup_u16 (const uint16_t* __a)
17739 {
17740 return vdup_n_u16 (*__a);
17741 }
17742
17743 __extension__ extern __inline uint32x2_t
17744 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17745 vld1_dup_u32 (const uint32_t* __a)
17746 {
17747 return vdup_n_u32 (*__a);
17748 }
17749
17750 __extension__ extern __inline uint64x1_t
17751 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17752 vld1_dup_u64 (const uint64_t* __a)
17753 {
17754 return vdup_n_u64 (*__a);
17755 }
17756
17757 /* vld1q_dup */
17758
17759 __extension__ extern __inline float16x8_t
17760 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17761 vld1q_dup_f16 (const float16_t* __a)
17762 {
17763 return vdupq_n_f16 (*__a);
17764 }
17765
17766 __extension__ extern __inline float32x4_t
17767 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17768 vld1q_dup_f32 (const float32_t* __a)
17769 {
17770 return vdupq_n_f32 (*__a);
17771 }
17772
17773 __extension__ extern __inline float64x2_t
17774 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17775 vld1q_dup_f64 (const float64_t* __a)
17776 {
17777 return vdupq_n_f64 (*__a);
17778 }
17779
17780 __extension__ extern __inline poly8x16_t
17781 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17782 vld1q_dup_p8 (const poly8_t* __a)
17783 {
17784 return vdupq_n_p8 (*__a);
17785 }
17786
17787 __extension__ extern __inline poly16x8_t
17788 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17789 vld1q_dup_p16 (const poly16_t* __a)
17790 {
17791 return vdupq_n_p16 (*__a);
17792 }
17793
17794 __extension__ extern __inline poly64x2_t
17795 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17796 vld1q_dup_p64 (const poly64_t* __a)
17797 {
17798 return vdupq_n_p64 (*__a);
17799 }
17800
17801 __extension__ extern __inline int8x16_t
17802 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17803 vld1q_dup_s8 (const int8_t* __a)
17804 {
17805 return vdupq_n_s8 (*__a);
17806 }
17807
17808 __extension__ extern __inline int16x8_t
17809 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17810 vld1q_dup_s16 (const int16_t* __a)
17811 {
17812 return vdupq_n_s16 (*__a);
17813 }
17814
17815 __extension__ extern __inline int32x4_t
17816 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17817 vld1q_dup_s32 (const int32_t* __a)
17818 {
17819 return vdupq_n_s32 (*__a);
17820 }
17821
17822 __extension__ extern __inline int64x2_t
17823 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17824 vld1q_dup_s64 (const int64_t* __a)
17825 {
17826 return vdupq_n_s64 (*__a);
17827 }
17828
17829 __extension__ extern __inline uint8x16_t
17830 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17831 vld1q_dup_u8 (const uint8_t* __a)
17832 {
17833 return vdupq_n_u8 (*__a);
17834 }
17835
17836 __extension__ extern __inline uint16x8_t
17837 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17838 vld1q_dup_u16 (const uint16_t* __a)
17839 {
17840 return vdupq_n_u16 (*__a);
17841 }
17842
17843 __extension__ extern __inline uint32x4_t
17844 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17845 vld1q_dup_u32 (const uint32_t* __a)
17846 {
17847 return vdupq_n_u32 (*__a);
17848 }
17849
17850 __extension__ extern __inline uint64x2_t
17851 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17852 vld1q_dup_u64 (const uint64_t* __a)
17853 {
17854 return vdupq_n_u64 (*__a);
17855 }
17856
17857 /* vld1_lane */
17858
17859 __extension__ extern __inline float16x4_t
17860 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17861 vld1_lane_f16 (const float16_t *__src, float16x4_t __vec, const int __lane)
17862 {
17863 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17864 }
17865
17866 __extension__ extern __inline float32x2_t
17867 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17868 vld1_lane_f32 (const float32_t *__src, float32x2_t __vec, const int __lane)
17869 {
17870 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17871 }
17872
17873 __extension__ extern __inline float64x1_t
17874 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17875 vld1_lane_f64 (const float64_t *__src, float64x1_t __vec, const int __lane)
17876 {
17877 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17878 }
17879
17880 __extension__ extern __inline poly8x8_t
17881 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17882 vld1_lane_p8 (const poly8_t *__src, poly8x8_t __vec, const int __lane)
17883 {
17884 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17885 }
17886
17887 __extension__ extern __inline poly16x4_t
17888 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17889 vld1_lane_p16 (const poly16_t *__src, poly16x4_t __vec, const int __lane)
17890 {
17891 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17892 }
17893
17894 __extension__ extern __inline poly64x1_t
17895 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17896 vld1_lane_p64 (const poly64_t *__src, poly64x1_t __vec, const int __lane)
17897 {
17898 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17899 }
17900
17901 __extension__ extern __inline int8x8_t
17902 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17903 vld1_lane_s8 (const int8_t *__src, int8x8_t __vec, const int __lane)
17904 {
17905 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17906 }
17907
17908 __extension__ extern __inline int16x4_t
17909 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17910 vld1_lane_s16 (const int16_t *__src, int16x4_t __vec, const int __lane)
17911 {
17912 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17913 }
17914
17915 __extension__ extern __inline int32x2_t
17916 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17917 vld1_lane_s32 (const int32_t *__src, int32x2_t __vec, const int __lane)
17918 {
17919 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17920 }
17921
17922 __extension__ extern __inline int64x1_t
17923 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17924 vld1_lane_s64 (const int64_t *__src, int64x1_t __vec, const int __lane)
17925 {
17926 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17927 }
17928
17929 __extension__ extern __inline uint8x8_t
17930 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17931 vld1_lane_u8 (const uint8_t *__src, uint8x8_t __vec, const int __lane)
17932 {
17933 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17934 }
17935
17936 __extension__ extern __inline uint16x4_t
17937 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17938 vld1_lane_u16 (const uint16_t *__src, uint16x4_t __vec, const int __lane)
17939 {
17940 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17941 }
17942
17943 __extension__ extern __inline uint32x2_t
17944 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17945 vld1_lane_u32 (const uint32_t *__src, uint32x2_t __vec, const int __lane)
17946 {
17947 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17948 }
17949
17950 __extension__ extern __inline uint64x1_t
17951 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17952 vld1_lane_u64 (const uint64_t *__src, uint64x1_t __vec, const int __lane)
17953 {
17954 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17955 }
17956
17957 /* vld1q_lane */
17958
17959 __extension__ extern __inline float16x8_t
17960 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17961 vld1q_lane_f16 (const float16_t *__src, float16x8_t __vec, const int __lane)
17962 {
17963 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17964 }
17965
17966 __extension__ extern __inline float32x4_t
17967 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17968 vld1q_lane_f32 (const float32_t *__src, float32x4_t __vec, const int __lane)
17969 {
17970 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17971 }
17972
17973 __extension__ extern __inline float64x2_t
17974 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17975 vld1q_lane_f64 (const float64_t *__src, float64x2_t __vec, const int __lane)
17976 {
17977 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17978 }
17979
17980 __extension__ extern __inline poly8x16_t
17981 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17982 vld1q_lane_p8 (const poly8_t *__src, poly8x16_t __vec, const int __lane)
17983 {
17984 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17985 }
17986
17987 __extension__ extern __inline poly16x8_t
17988 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17989 vld1q_lane_p16 (const poly16_t *__src, poly16x8_t __vec, const int __lane)
17990 {
17991 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17992 }
17993
17994 __extension__ extern __inline poly64x2_t
17995 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
17996 vld1q_lane_p64 (const poly64_t *__src, poly64x2_t __vec, const int __lane)
17997 {
17998 return __aarch64_vset_lane_any (*__src, __vec, __lane);
17999 }
18000
18001 __extension__ extern __inline int8x16_t
18002 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18003 vld1q_lane_s8 (const int8_t *__src, int8x16_t __vec, const int __lane)
18004 {
18005 return __aarch64_vset_lane_any (*__src, __vec, __lane);
18006 }
18007
18008 __extension__ extern __inline int16x8_t
18009 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18010 vld1q_lane_s16 (const int16_t *__src, int16x8_t __vec, const int __lane)
18011 {
18012 return __aarch64_vset_lane_any (*__src, __vec, __lane);
18013 }
18014
18015 __extension__ extern __inline int32x4_t
18016 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18017 vld1q_lane_s32 (const int32_t *__src, int32x4_t __vec, const int __lane)
18018 {
18019 return __aarch64_vset_lane_any (*__src, __vec, __lane);
18020 }
18021
18022 __extension__ extern __inline int64x2_t
18023 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18024 vld1q_lane_s64 (const int64_t *__src, int64x2_t __vec, const int __lane)
18025 {
18026 return __aarch64_vset_lane_any (*__src, __vec, __lane);
18027 }
18028
18029 __extension__ extern __inline uint8x16_t
18030 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18031 vld1q_lane_u8 (const uint8_t *__src, uint8x16_t __vec, const int __lane)
18032 {
18033 return __aarch64_vset_lane_any (*__src, __vec, __lane);
18034 }
18035
18036 __extension__ extern __inline uint16x8_t
18037 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18038 vld1q_lane_u16 (const uint16_t *__src, uint16x8_t __vec, const int __lane)
18039 {
18040 return __aarch64_vset_lane_any (*__src, __vec, __lane);
18041 }
18042
18043 __extension__ extern __inline uint32x4_t
18044 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18045 vld1q_lane_u32 (const uint32_t *__src, uint32x4_t __vec, const int __lane)
18046 {
18047 return __aarch64_vset_lane_any (*__src, __vec, __lane);
18048 }
18049
18050 __extension__ extern __inline uint64x2_t
18051 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18052 vld1q_lane_u64 (const uint64_t *__src, uint64x2_t __vec, const int __lane)
18053 {
18054 return __aarch64_vset_lane_any (*__src, __vec, __lane);
18055 }
18056
18057 /* vldn */
18058
18059 __extension__ extern __inline int64x1x2_t
18060 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18061 vld2_s64 (const int64_t * __a)
18062 {
18063 int64x1x2_t ret;
18064 __builtin_aarch64_simd_oi __o;
18065 __o = __builtin_aarch64_ld2di ((const __builtin_aarch64_simd_di *) __a);
18066 ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
18067 ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
18068 return ret;
18069 }
18070
18071 __extension__ extern __inline uint64x1x2_t
18072 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18073 vld2_u64 (const uint64_t * __a)
18074 {
18075 uint64x1x2_t ret;
18076 __builtin_aarch64_simd_oi __o;
18077 __o = __builtin_aarch64_ld2di ((const __builtin_aarch64_simd_di *) __a);
18078 ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
18079 ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
18080 return ret;
18081 }
18082
18083 __extension__ extern __inline float64x1x2_t
18084 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18085 vld2_f64 (const float64_t * __a)
18086 {
18087 float64x1x2_t ret;
18088 __builtin_aarch64_simd_oi __o;
18089 __o = __builtin_aarch64_ld2df ((const __builtin_aarch64_simd_df *) __a);
18090 ret.val[0] = (float64x1_t) {__builtin_aarch64_get_dregoidf (__o, 0)};
18091 ret.val[1] = (float64x1_t) {__builtin_aarch64_get_dregoidf (__o, 1)};
18092 return ret;
18093 }
18094
18095 __extension__ extern __inline int8x8x2_t
18096 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18097 vld2_s8 (const int8_t * __a)
18098 {
18099 int8x8x2_t ret;
18100 __builtin_aarch64_simd_oi __o;
18101 __o = __builtin_aarch64_ld2v8qi ((const __builtin_aarch64_simd_qi *) __a);
18102 ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
18103 ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
18104 return ret;
18105 }
18106
18107 __extension__ extern __inline poly8x8x2_t
18108 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18109 vld2_p8 (const poly8_t * __a)
18110 {
18111 poly8x8x2_t ret;
18112 __builtin_aarch64_simd_oi __o;
18113 __o = __builtin_aarch64_ld2v8qi ((const __builtin_aarch64_simd_qi *) __a);
18114 ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
18115 ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
18116 return ret;
18117 }
18118
18119 __extension__ extern __inline poly64x1x2_t
18120 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18121 vld2_p64 (const poly64_t * __a)
18122 {
18123 poly64x1x2_t ret;
18124 __builtin_aarch64_simd_oi __o;
18125 __o = __builtin_aarch64_ld2di ((const __builtin_aarch64_simd_di *) __a);
18126 ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregoidi_pss (__o, 0);
18127 ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregoidi_pss (__o, 1);
18128 return ret;
18129 }
18130
18131 __extension__ extern __inline int16x4x2_t
18132 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18133 vld2_s16 (const int16_t * __a)
18134 {
18135 int16x4x2_t ret;
18136 __builtin_aarch64_simd_oi __o;
18137 __o = __builtin_aarch64_ld2v4hi ((const __builtin_aarch64_simd_hi *) __a);
18138 ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
18139 ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
18140 return ret;
18141 }
18142
18143 __extension__ extern __inline poly16x4x2_t
18144 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18145 vld2_p16 (const poly16_t * __a)
18146 {
18147 poly16x4x2_t ret;
18148 __builtin_aarch64_simd_oi __o;
18149 __o = __builtin_aarch64_ld2v4hi ((const __builtin_aarch64_simd_hi *) __a);
18150 ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
18151 ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
18152 return ret;
18153 }
18154
18155 __extension__ extern __inline int32x2x2_t
18156 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18157 vld2_s32 (const int32_t * __a)
18158 {
18159 int32x2x2_t ret;
18160 __builtin_aarch64_simd_oi __o;
18161 __o = __builtin_aarch64_ld2v2si ((const __builtin_aarch64_simd_si *) __a);
18162 ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 0);
18163 ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 1);
18164 return ret;
18165 }
18166
18167 __extension__ extern __inline uint8x8x2_t
18168 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18169 vld2_u8 (const uint8_t * __a)
18170 {
18171 uint8x8x2_t ret;
18172 __builtin_aarch64_simd_oi __o;
18173 __o = __builtin_aarch64_ld2v8qi ((const __builtin_aarch64_simd_qi *) __a);
18174 ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
18175 ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
18176 return ret;
18177 }
18178
18179 __extension__ extern __inline uint16x4x2_t
18180 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18181 vld2_u16 (const uint16_t * __a)
18182 {
18183 uint16x4x2_t ret;
18184 __builtin_aarch64_simd_oi __o;
18185 __o = __builtin_aarch64_ld2v4hi ((const __builtin_aarch64_simd_hi *) __a);
18186 ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
18187 ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
18188 return ret;
18189 }
18190
18191 __extension__ extern __inline uint32x2x2_t
18192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18193 vld2_u32 (const uint32_t * __a)
18194 {
18195 uint32x2x2_t ret;
18196 __builtin_aarch64_simd_oi __o;
18197 __o = __builtin_aarch64_ld2v2si ((const __builtin_aarch64_simd_si *) __a);
18198 ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 0);
18199 ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 1);
18200 return ret;
18201 }
18202
18203 __extension__ extern __inline float16x4x2_t
18204 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18205 vld2_f16 (const float16_t * __a)
18206 {
18207 float16x4x2_t ret;
18208 __builtin_aarch64_simd_oi __o;
18209 __o = __builtin_aarch64_ld2v4hf (__a);
18210 ret.val[0] = __builtin_aarch64_get_dregoiv4hf (__o, 0);
18211 ret.val[1] = __builtin_aarch64_get_dregoiv4hf (__o, 1);
18212 return ret;
18213 }
18214
18215 __extension__ extern __inline float32x2x2_t
18216 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18217 vld2_f32 (const float32_t * __a)
18218 {
18219 float32x2x2_t ret;
18220 __builtin_aarch64_simd_oi __o;
18221 __o = __builtin_aarch64_ld2v2sf ((const __builtin_aarch64_simd_sf *) __a);
18222 ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregoiv2sf (__o, 0);
18223 ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregoiv2sf (__o, 1);
18224 return ret;
18225 }
18226
18227 __extension__ extern __inline int8x16x2_t
18228 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18229 vld2q_s8 (const int8_t * __a)
18230 {
18231 int8x16x2_t ret;
18232 __builtin_aarch64_simd_oi __o;
18233 __o = __builtin_aarch64_ld2v16qi ((const __builtin_aarch64_simd_qi *) __a);
18234 ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
18235 ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
18236 return ret;
18237 }
18238
18239 __extension__ extern __inline poly8x16x2_t
18240 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18241 vld2q_p8 (const poly8_t * __a)
18242 {
18243 poly8x16x2_t ret;
18244 __builtin_aarch64_simd_oi __o;
18245 __o = __builtin_aarch64_ld2v16qi ((const __builtin_aarch64_simd_qi *) __a);
18246 ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
18247 ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
18248 return ret;
18249 }
18250
18251 __extension__ extern __inline int16x8x2_t
18252 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18253 vld2q_s16 (const int16_t * __a)
18254 {
18255 int16x8x2_t ret;
18256 __builtin_aarch64_simd_oi __o;
18257 __o = __builtin_aarch64_ld2v8hi ((const __builtin_aarch64_simd_hi *) __a);
18258 ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
18259 ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
18260 return ret;
18261 }
18262
18263 __extension__ extern __inline poly16x8x2_t
18264 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18265 vld2q_p16 (const poly16_t * __a)
18266 {
18267 poly16x8x2_t ret;
18268 __builtin_aarch64_simd_oi __o;
18269 __o = __builtin_aarch64_ld2v8hi ((const __builtin_aarch64_simd_hi *) __a);
18270 ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
18271 ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
18272 return ret;
18273 }
18274
18275 __extension__ extern __inline poly64x2x2_t
18276 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18277 vld2q_p64 (const poly64_t * __a)
18278 {
18279 poly64x2x2_t ret;
18280 __builtin_aarch64_simd_oi __o;
18281 __o = __builtin_aarch64_ld2v2di ((const __builtin_aarch64_simd_di *) __a);
18282 ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregoiv2di_pss (__o, 0);
18283 ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregoiv2di_pss (__o, 1);
18284 return ret;
18285 }
18286
18287 __extension__ extern __inline int32x4x2_t
18288 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18289 vld2q_s32 (const int32_t * __a)
18290 {
18291 int32x4x2_t ret;
18292 __builtin_aarch64_simd_oi __o;
18293 __o = __builtin_aarch64_ld2v4si ((const __builtin_aarch64_simd_si *) __a);
18294 ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
18295 ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
18296 return ret;
18297 }
18298
18299 __extension__ extern __inline int64x2x2_t
18300 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18301 vld2q_s64 (const int64_t * __a)
18302 {
18303 int64x2x2_t ret;
18304 __builtin_aarch64_simd_oi __o;
18305 __o = __builtin_aarch64_ld2v2di ((const __builtin_aarch64_simd_di *) __a);
18306 ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0);
18307 ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1);
18308 return ret;
18309 }
18310
18311 __extension__ extern __inline uint8x16x2_t
18312 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18313 vld2q_u8 (const uint8_t * __a)
18314 {
18315 uint8x16x2_t ret;
18316 __builtin_aarch64_simd_oi __o;
18317 __o = __builtin_aarch64_ld2v16qi ((const __builtin_aarch64_simd_qi *) __a);
18318 ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
18319 ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
18320 return ret;
18321 }
18322
18323 __extension__ extern __inline uint16x8x2_t
18324 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18325 vld2q_u16 (const uint16_t * __a)
18326 {
18327 uint16x8x2_t ret;
18328 __builtin_aarch64_simd_oi __o;
18329 __o = __builtin_aarch64_ld2v8hi ((const __builtin_aarch64_simd_hi *) __a);
18330 ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
18331 ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
18332 return ret;
18333 }
18334
18335 __extension__ extern __inline uint32x4x2_t
18336 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18337 vld2q_u32 (const uint32_t * __a)
18338 {
18339 uint32x4x2_t ret;
18340 __builtin_aarch64_simd_oi __o;
18341 __o = __builtin_aarch64_ld2v4si ((const __builtin_aarch64_simd_si *) __a);
18342 ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
18343 ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
18344 return ret;
18345 }
18346
18347 __extension__ extern __inline uint64x2x2_t
18348 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18349 vld2q_u64 (const uint64_t * __a)
18350 {
18351 uint64x2x2_t ret;
18352 __builtin_aarch64_simd_oi __o;
18353 __o = __builtin_aarch64_ld2v2di ((const __builtin_aarch64_simd_di *) __a);
18354 ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0);
18355 ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1);
18356 return ret;
18357 }
18358
18359 __extension__ extern __inline float16x8x2_t
18360 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18361 vld2q_f16 (const float16_t * __a)
18362 {
18363 float16x8x2_t ret;
18364 __builtin_aarch64_simd_oi __o;
18365 __o = __builtin_aarch64_ld2v8hf (__a);
18366 ret.val[0] = __builtin_aarch64_get_qregoiv8hf (__o, 0);
18367 ret.val[1] = __builtin_aarch64_get_qregoiv8hf (__o, 1);
18368 return ret;
18369 }
18370
18371 __extension__ extern __inline float32x4x2_t
18372 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18373 vld2q_f32 (const float32_t * __a)
18374 {
18375 float32x4x2_t ret;
18376 __builtin_aarch64_simd_oi __o;
18377 __o = __builtin_aarch64_ld2v4sf ((const __builtin_aarch64_simd_sf *) __a);
18378 ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregoiv4sf (__o, 0);
18379 ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregoiv4sf (__o, 1);
18380 return ret;
18381 }
18382
18383 __extension__ extern __inline float64x2x2_t
18384 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18385 vld2q_f64 (const float64_t * __a)
18386 {
18387 float64x2x2_t ret;
18388 __builtin_aarch64_simd_oi __o;
18389 __o = __builtin_aarch64_ld2v2df ((const __builtin_aarch64_simd_df *) __a);
18390 ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregoiv2df (__o, 0);
18391 ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregoiv2df (__o, 1);
18392 return ret;
18393 }
18394
18395 __extension__ extern __inline int64x1x3_t
18396 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18397 vld3_s64 (const int64_t * __a)
18398 {
18399 int64x1x3_t ret;
18400 __builtin_aarch64_simd_ci __o;
18401 __o = __builtin_aarch64_ld3di ((const __builtin_aarch64_simd_di *) __a);
18402 ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 0);
18403 ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 1);
18404 ret.val[2] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 2);
18405 return ret;
18406 }
18407
18408 __extension__ extern __inline uint64x1x3_t
18409 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18410 vld3_u64 (const uint64_t * __a)
18411 {
18412 uint64x1x3_t ret;
18413 __builtin_aarch64_simd_ci __o;
18414 __o = __builtin_aarch64_ld3di ((const __builtin_aarch64_simd_di *) __a);
18415 ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 0);
18416 ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 1);
18417 ret.val[2] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 2);
18418 return ret;
18419 }
18420
18421 __extension__ extern __inline float64x1x3_t
18422 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18423 vld3_f64 (const float64_t * __a)
18424 {
18425 float64x1x3_t ret;
18426 __builtin_aarch64_simd_ci __o;
18427 __o = __builtin_aarch64_ld3df ((const __builtin_aarch64_simd_df *) __a);
18428 ret.val[0] = (float64x1_t) {__builtin_aarch64_get_dregcidf (__o, 0)};
18429 ret.val[1] = (float64x1_t) {__builtin_aarch64_get_dregcidf (__o, 1)};
18430 ret.val[2] = (float64x1_t) {__builtin_aarch64_get_dregcidf (__o, 2)};
18431 return ret;
18432 }
18433
18434 __extension__ extern __inline int8x8x3_t
18435 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18436 vld3_s8 (const int8_t * __a)
18437 {
18438 int8x8x3_t ret;
18439 __builtin_aarch64_simd_ci __o;
18440 __o = __builtin_aarch64_ld3v8qi ((const __builtin_aarch64_simd_qi *) __a);
18441 ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
18442 ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
18443 ret.val[2] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
18444 return ret;
18445 }
18446
18447 __extension__ extern __inline poly8x8x3_t
18448 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18449 vld3_p8 (const poly8_t * __a)
18450 {
18451 poly8x8x3_t ret;
18452 __builtin_aarch64_simd_ci __o;
18453 __o = __builtin_aarch64_ld3v8qi ((const __builtin_aarch64_simd_qi *) __a);
18454 ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
18455 ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
18456 ret.val[2] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
18457 return ret;
18458 }
18459
18460 __extension__ extern __inline int16x4x3_t
18461 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18462 vld3_s16 (const int16_t * __a)
18463 {
18464 int16x4x3_t ret;
18465 __builtin_aarch64_simd_ci __o;
18466 __o = __builtin_aarch64_ld3v4hi ((const __builtin_aarch64_simd_hi *) __a);
18467 ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
18468 ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
18469 ret.val[2] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
18470 return ret;
18471 }
18472
18473 __extension__ extern __inline poly16x4x3_t
18474 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18475 vld3_p16 (const poly16_t * __a)
18476 {
18477 poly16x4x3_t ret;
18478 __builtin_aarch64_simd_ci __o;
18479 __o = __builtin_aarch64_ld3v4hi ((const __builtin_aarch64_simd_hi *) __a);
18480 ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
18481 ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
18482 ret.val[2] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
18483 return ret;
18484 }
18485
18486 __extension__ extern __inline int32x2x3_t
18487 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18488 vld3_s32 (const int32_t * __a)
18489 {
18490 int32x2x3_t ret;
18491 __builtin_aarch64_simd_ci __o;
18492 __o = __builtin_aarch64_ld3v2si ((const __builtin_aarch64_simd_si *) __a);
18493 ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 0);
18494 ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 1);
18495 ret.val[2] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 2);
18496 return ret;
18497 }
18498
18499 __extension__ extern __inline uint8x8x3_t
18500 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18501 vld3_u8 (const uint8_t * __a)
18502 {
18503 uint8x8x3_t ret;
18504 __builtin_aarch64_simd_ci __o;
18505 __o = __builtin_aarch64_ld3v8qi ((const __builtin_aarch64_simd_qi *) __a);
18506 ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
18507 ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
18508 ret.val[2] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
18509 return ret;
18510 }
18511
18512 __extension__ extern __inline uint16x4x3_t
18513 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18514 vld3_u16 (const uint16_t * __a)
18515 {
18516 uint16x4x3_t ret;
18517 __builtin_aarch64_simd_ci __o;
18518 __o = __builtin_aarch64_ld3v4hi ((const __builtin_aarch64_simd_hi *) __a);
18519 ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
18520 ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
18521 ret.val[2] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
18522 return ret;
18523 }
18524
18525 __extension__ extern __inline uint32x2x3_t
18526 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18527 vld3_u32 (const uint32_t * __a)
18528 {
18529 uint32x2x3_t ret;
18530 __builtin_aarch64_simd_ci __o;
18531 __o = __builtin_aarch64_ld3v2si ((const __builtin_aarch64_simd_si *) __a);
18532 ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 0);
18533 ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 1);
18534 ret.val[2] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 2);
18535 return ret;
18536 }
18537
18538 __extension__ extern __inline float16x4x3_t
18539 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18540 vld3_f16 (const float16_t * __a)
18541 {
18542 float16x4x3_t ret;
18543 __builtin_aarch64_simd_ci __o;
18544 __o = __builtin_aarch64_ld3v4hf (__a);
18545 ret.val[0] = __builtin_aarch64_get_dregciv4hf (__o, 0);
18546 ret.val[1] = __builtin_aarch64_get_dregciv4hf (__o, 1);
18547 ret.val[2] = __builtin_aarch64_get_dregciv4hf (__o, 2);
18548 return ret;
18549 }
18550
18551 __extension__ extern __inline float32x2x3_t
18552 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18553 vld3_f32 (const float32_t * __a)
18554 {
18555 float32x2x3_t ret;
18556 __builtin_aarch64_simd_ci __o;
18557 __o = __builtin_aarch64_ld3v2sf ((const __builtin_aarch64_simd_sf *) __a);
18558 ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 0);
18559 ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 1);
18560 ret.val[2] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 2);
18561 return ret;
18562 }
18563
18564 __extension__ extern __inline poly64x1x3_t
18565 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18566 vld3_p64 (const poly64_t * __a)
18567 {
18568 poly64x1x3_t ret;
18569 __builtin_aarch64_simd_ci __o;
18570 __o = __builtin_aarch64_ld3di ((const __builtin_aarch64_simd_di *) __a);
18571 ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregcidi_pss (__o, 0);
18572 ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregcidi_pss (__o, 1);
18573 ret.val[2] = (poly64x1_t) __builtin_aarch64_get_dregcidi_pss (__o, 2);
18574 return ret;
18575 }
18576
18577 __extension__ extern __inline int8x16x3_t
18578 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18579 vld3q_s8 (const int8_t * __a)
18580 {
18581 int8x16x3_t ret;
18582 __builtin_aarch64_simd_ci __o;
18583 __o = __builtin_aarch64_ld3v16qi ((const __builtin_aarch64_simd_qi *) __a);
18584 ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
18585 ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
18586 ret.val[2] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
18587 return ret;
18588 }
18589
18590 __extension__ extern __inline poly8x16x3_t
18591 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18592 vld3q_p8 (const poly8_t * __a)
18593 {
18594 poly8x16x3_t ret;
18595 __builtin_aarch64_simd_ci __o;
18596 __o = __builtin_aarch64_ld3v16qi ((const __builtin_aarch64_simd_qi *) __a);
18597 ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
18598 ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
18599 ret.val[2] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
18600 return ret;
18601 }
18602
18603 __extension__ extern __inline int16x8x3_t
18604 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18605 vld3q_s16 (const int16_t * __a)
18606 {
18607 int16x8x3_t ret;
18608 __builtin_aarch64_simd_ci __o;
18609 __o = __builtin_aarch64_ld3v8hi ((const __builtin_aarch64_simd_hi *) __a);
18610 ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
18611 ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
18612 ret.val[2] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
18613 return ret;
18614 }
18615
18616 __extension__ extern __inline poly16x8x3_t
18617 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18618 vld3q_p16 (const poly16_t * __a)
18619 {
18620 poly16x8x3_t ret;
18621 __builtin_aarch64_simd_ci __o;
18622 __o = __builtin_aarch64_ld3v8hi ((const __builtin_aarch64_simd_hi *) __a);
18623 ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
18624 ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
18625 ret.val[2] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
18626 return ret;
18627 }
18628
18629 __extension__ extern __inline int32x4x3_t
18630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18631 vld3q_s32 (const int32_t * __a)
18632 {
18633 int32x4x3_t ret;
18634 __builtin_aarch64_simd_ci __o;
18635 __o = __builtin_aarch64_ld3v4si ((const __builtin_aarch64_simd_si *) __a);
18636 ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0);
18637 ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1);
18638 ret.val[2] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2);
18639 return ret;
18640 }
18641
18642 __extension__ extern __inline int64x2x3_t
18643 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18644 vld3q_s64 (const int64_t * __a)
18645 {
18646 int64x2x3_t ret;
18647 __builtin_aarch64_simd_ci __o;
18648 __o = __builtin_aarch64_ld3v2di ((const __builtin_aarch64_simd_di *) __a);
18649 ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0);
18650 ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1);
18651 ret.val[2] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2);
18652 return ret;
18653 }
18654
18655 __extension__ extern __inline uint8x16x3_t
18656 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18657 vld3q_u8 (const uint8_t * __a)
18658 {
18659 uint8x16x3_t ret;
18660 __builtin_aarch64_simd_ci __o;
18661 __o = __builtin_aarch64_ld3v16qi ((const __builtin_aarch64_simd_qi *) __a);
18662 ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
18663 ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
18664 ret.val[2] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
18665 return ret;
18666 }
18667
18668 __extension__ extern __inline uint16x8x3_t
18669 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18670 vld3q_u16 (const uint16_t * __a)
18671 {
18672 uint16x8x3_t ret;
18673 __builtin_aarch64_simd_ci __o;
18674 __o = __builtin_aarch64_ld3v8hi ((const __builtin_aarch64_simd_hi *) __a);
18675 ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
18676 ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
18677 ret.val[2] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
18678 return ret;
18679 }
18680
18681 __extension__ extern __inline uint32x4x3_t
18682 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18683 vld3q_u32 (const uint32_t * __a)
18684 {
18685 uint32x4x3_t ret;
18686 __builtin_aarch64_simd_ci __o;
18687 __o = __builtin_aarch64_ld3v4si ((const __builtin_aarch64_simd_si *) __a);
18688 ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0);
18689 ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1);
18690 ret.val[2] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2);
18691 return ret;
18692 }
18693
18694 __extension__ extern __inline uint64x2x3_t
18695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18696 vld3q_u64 (const uint64_t * __a)
18697 {
18698 uint64x2x3_t ret;
18699 __builtin_aarch64_simd_ci __o;
18700 __o = __builtin_aarch64_ld3v2di ((const __builtin_aarch64_simd_di *) __a);
18701 ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0);
18702 ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1);
18703 ret.val[2] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2);
18704 return ret;
18705 }
18706
18707 __extension__ extern __inline float16x8x3_t
18708 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18709 vld3q_f16 (const float16_t * __a)
18710 {
18711 float16x8x3_t ret;
18712 __builtin_aarch64_simd_ci __o;
18713 __o = __builtin_aarch64_ld3v8hf (__a);
18714 ret.val[0] = __builtin_aarch64_get_qregciv8hf (__o, 0);
18715 ret.val[1] = __builtin_aarch64_get_qregciv8hf (__o, 1);
18716 ret.val[2] = __builtin_aarch64_get_qregciv8hf (__o, 2);
18717 return ret;
18718 }
18719
18720 __extension__ extern __inline float32x4x3_t
18721 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18722 vld3q_f32 (const float32_t * __a)
18723 {
18724 float32x4x3_t ret;
18725 __builtin_aarch64_simd_ci __o;
18726 __o = __builtin_aarch64_ld3v4sf ((const __builtin_aarch64_simd_sf *) __a);
18727 ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 0);
18728 ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 1);
18729 ret.val[2] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 2);
18730 return ret;
18731 }
18732
18733 __extension__ extern __inline float64x2x3_t
18734 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18735 vld3q_f64 (const float64_t * __a)
18736 {
18737 float64x2x3_t ret;
18738 __builtin_aarch64_simd_ci __o;
18739 __o = __builtin_aarch64_ld3v2df ((const __builtin_aarch64_simd_df *) __a);
18740 ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 0);
18741 ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 1);
18742 ret.val[2] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 2);
18743 return ret;
18744 }
18745
18746 __extension__ extern __inline poly64x2x3_t
18747 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18748 vld3q_p64 (const poly64_t * __a)
18749 {
18750 poly64x2x3_t ret;
18751 __builtin_aarch64_simd_ci __o;
18752 __o = __builtin_aarch64_ld3v2di ((const __builtin_aarch64_simd_di *) __a);
18753 ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregciv2di_pss (__o, 0);
18754 ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregciv2di_pss (__o, 1);
18755 ret.val[2] = (poly64x2_t) __builtin_aarch64_get_qregciv2di_pss (__o, 2);
18756 return ret;
18757 }
18758
18759 __extension__ extern __inline int64x1x4_t
18760 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18761 vld4_s64 (const int64_t * __a)
18762 {
18763 int64x1x4_t ret;
18764 __builtin_aarch64_simd_xi __o;
18765 __o = __builtin_aarch64_ld4di ((const __builtin_aarch64_simd_di *) __a);
18766 ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 0);
18767 ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 1);
18768 ret.val[2] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 2);
18769 ret.val[3] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 3);
18770 return ret;
18771 }
18772
18773 __extension__ extern __inline uint64x1x4_t
18774 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18775 vld4_u64 (const uint64_t * __a)
18776 {
18777 uint64x1x4_t ret;
18778 __builtin_aarch64_simd_xi __o;
18779 __o = __builtin_aarch64_ld4di ((const __builtin_aarch64_simd_di *) __a);
18780 ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 0);
18781 ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 1);
18782 ret.val[2] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 2);
18783 ret.val[3] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 3);
18784 return ret;
18785 }
18786
18787 __extension__ extern __inline float64x1x4_t
18788 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18789 vld4_f64 (const float64_t * __a)
18790 {
18791 float64x1x4_t ret;
18792 __builtin_aarch64_simd_xi __o;
18793 __o = __builtin_aarch64_ld4df ((const __builtin_aarch64_simd_df *) __a);
18794 ret.val[0] = (float64x1_t) {__builtin_aarch64_get_dregxidf (__o, 0)};
18795 ret.val[1] = (float64x1_t) {__builtin_aarch64_get_dregxidf (__o, 1)};
18796 ret.val[2] = (float64x1_t) {__builtin_aarch64_get_dregxidf (__o, 2)};
18797 ret.val[3] = (float64x1_t) {__builtin_aarch64_get_dregxidf (__o, 3)};
18798 return ret;
18799 }
18800
18801 __extension__ extern __inline int8x8x4_t
18802 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18803 vld4_s8 (const int8_t * __a)
18804 {
18805 int8x8x4_t ret;
18806 __builtin_aarch64_simd_xi __o;
18807 __o = __builtin_aarch64_ld4v8qi ((const __builtin_aarch64_simd_qi *) __a);
18808 ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0);
18809 ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1);
18810 ret.val[2] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2);
18811 ret.val[3] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3);
18812 return ret;
18813 }
18814
18815 __extension__ extern __inline poly8x8x4_t
18816 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18817 vld4_p8 (const poly8_t * __a)
18818 {
18819 poly8x8x4_t ret;
18820 __builtin_aarch64_simd_xi __o;
18821 __o = __builtin_aarch64_ld4v8qi ((const __builtin_aarch64_simd_qi *) __a);
18822 ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0);
18823 ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1);
18824 ret.val[2] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2);
18825 ret.val[3] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3);
18826 return ret;
18827 }
18828
18829 __extension__ extern __inline int16x4x4_t
18830 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18831 vld4_s16 (const int16_t * __a)
18832 {
18833 int16x4x4_t ret;
18834 __builtin_aarch64_simd_xi __o;
18835 __o = __builtin_aarch64_ld4v4hi ((const __builtin_aarch64_simd_hi *) __a);
18836 ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0);
18837 ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1);
18838 ret.val[2] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2);
18839 ret.val[3] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3);
18840 return ret;
18841 }
18842
18843 __extension__ extern __inline poly16x4x4_t
18844 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18845 vld4_p16 (const poly16_t * __a)
18846 {
18847 poly16x4x4_t ret;
18848 __builtin_aarch64_simd_xi __o;
18849 __o = __builtin_aarch64_ld4v4hi ((const __builtin_aarch64_simd_hi *) __a);
18850 ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0);
18851 ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1);
18852 ret.val[2] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2);
18853 ret.val[3] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3);
18854 return ret;
18855 }
18856
18857 __extension__ extern __inline int32x2x4_t
18858 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18859 vld4_s32 (const int32_t * __a)
18860 {
18861 int32x2x4_t ret;
18862 __builtin_aarch64_simd_xi __o;
18863 __o = __builtin_aarch64_ld4v2si ((const __builtin_aarch64_simd_si *) __a);
18864 ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 0);
18865 ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 1);
18866 ret.val[2] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 2);
18867 ret.val[3] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 3);
18868 return ret;
18869 }
18870
18871 __extension__ extern __inline uint8x8x4_t
18872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18873 vld4_u8 (const uint8_t * __a)
18874 {
18875 uint8x8x4_t ret;
18876 __builtin_aarch64_simd_xi __o;
18877 __o = __builtin_aarch64_ld4v8qi ((const __builtin_aarch64_simd_qi *) __a);
18878 ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0);
18879 ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1);
18880 ret.val[2] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2);
18881 ret.val[3] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3);
18882 return ret;
18883 }
18884
18885 __extension__ extern __inline uint16x4x4_t
18886 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18887 vld4_u16 (const uint16_t * __a)
18888 {
18889 uint16x4x4_t ret;
18890 __builtin_aarch64_simd_xi __o;
18891 __o = __builtin_aarch64_ld4v4hi ((const __builtin_aarch64_simd_hi *) __a);
18892 ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0);
18893 ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1);
18894 ret.val[2] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2);
18895 ret.val[3] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3);
18896 return ret;
18897 }
18898
18899 __extension__ extern __inline uint32x2x4_t
18900 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18901 vld4_u32 (const uint32_t * __a)
18902 {
18903 uint32x2x4_t ret;
18904 __builtin_aarch64_simd_xi __o;
18905 __o = __builtin_aarch64_ld4v2si ((const __builtin_aarch64_simd_si *) __a);
18906 ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 0);
18907 ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 1);
18908 ret.val[2] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 2);
18909 ret.val[3] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 3);
18910 return ret;
18911 }
18912
18913 __extension__ extern __inline float16x4x4_t
18914 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18915 vld4_f16 (const float16_t * __a)
18916 {
18917 float16x4x4_t ret;
18918 __builtin_aarch64_simd_xi __o;
18919 __o = __builtin_aarch64_ld4v4hf (__a);
18920 ret.val[0] = __builtin_aarch64_get_dregxiv4hf (__o, 0);
18921 ret.val[1] = __builtin_aarch64_get_dregxiv4hf (__o, 1);
18922 ret.val[2] = __builtin_aarch64_get_dregxiv4hf (__o, 2);
18923 ret.val[3] = __builtin_aarch64_get_dregxiv4hf (__o, 3);
18924 return ret;
18925 }
18926
18927 __extension__ extern __inline float32x2x4_t
18928 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18929 vld4_f32 (const float32_t * __a)
18930 {
18931 float32x2x4_t ret;
18932 __builtin_aarch64_simd_xi __o;
18933 __o = __builtin_aarch64_ld4v2sf ((const __builtin_aarch64_simd_sf *) __a);
18934 ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 0);
18935 ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 1);
18936 ret.val[2] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 2);
18937 ret.val[3] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 3);
18938 return ret;
18939 }
18940
18941 __extension__ extern __inline poly64x1x4_t
18942 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18943 vld4_p64 (const poly64_t * __a)
18944 {
18945 poly64x1x4_t ret;
18946 __builtin_aarch64_simd_xi __o;
18947 __o = __builtin_aarch64_ld4di ((const __builtin_aarch64_simd_di *) __a);
18948 ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 0);
18949 ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 1);
18950 ret.val[2] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 2);
18951 ret.val[3] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 3);
18952 return ret;
18953 }
18954
18955 __extension__ extern __inline int8x16x4_t
18956 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18957 vld4q_s8 (const int8_t * __a)
18958 {
18959 int8x16x4_t ret;
18960 __builtin_aarch64_simd_xi __o;
18961 __o = __builtin_aarch64_ld4v16qi ((const __builtin_aarch64_simd_qi *) __a);
18962 ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0);
18963 ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1);
18964 ret.val[2] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2);
18965 ret.val[3] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3);
18966 return ret;
18967 }
18968
18969 __extension__ extern __inline poly8x16x4_t
18970 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18971 vld4q_p8 (const poly8_t * __a)
18972 {
18973 poly8x16x4_t ret;
18974 __builtin_aarch64_simd_xi __o;
18975 __o = __builtin_aarch64_ld4v16qi ((const __builtin_aarch64_simd_qi *) __a);
18976 ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0);
18977 ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1);
18978 ret.val[2] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2);
18979 ret.val[3] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3);
18980 return ret;
18981 }
18982
18983 __extension__ extern __inline int16x8x4_t
18984 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18985 vld4q_s16 (const int16_t * __a)
18986 {
18987 int16x8x4_t ret;
18988 __builtin_aarch64_simd_xi __o;
18989 __o = __builtin_aarch64_ld4v8hi ((const __builtin_aarch64_simd_hi *) __a);
18990 ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0);
18991 ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1);
18992 ret.val[2] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2);
18993 ret.val[3] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3);
18994 return ret;
18995 }
18996
18997 __extension__ extern __inline poly16x8x4_t
18998 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
18999 vld4q_p16 (const poly16_t * __a)
19000 {
19001 poly16x8x4_t ret;
19002 __builtin_aarch64_simd_xi __o;
19003 __o = __builtin_aarch64_ld4v8hi ((const __builtin_aarch64_simd_hi *) __a);
19004 ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0);
19005 ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1);
19006 ret.val[2] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2);
19007 ret.val[3] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3);
19008 return ret;
19009 }
19010
19011 __extension__ extern __inline int32x4x4_t
19012 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19013 vld4q_s32 (const int32_t * __a)
19014 {
19015 int32x4x4_t ret;
19016 __builtin_aarch64_simd_xi __o;
19017 __o = __builtin_aarch64_ld4v4si ((const __builtin_aarch64_simd_si *) __a);
19018 ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0);
19019 ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1);
19020 ret.val[2] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2);
19021 ret.val[3] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3);
19022 return ret;
19023 }
19024
19025 __extension__ extern __inline int64x2x4_t
19026 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19027 vld4q_s64 (const int64_t * __a)
19028 {
19029 int64x2x4_t ret;
19030 __builtin_aarch64_simd_xi __o;
19031 __o = __builtin_aarch64_ld4v2di ((const __builtin_aarch64_simd_di *) __a);
19032 ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 0);
19033 ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 1);
19034 ret.val[2] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 2);
19035 ret.val[3] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 3);
19036 return ret;
19037 }
19038
19039 __extension__ extern __inline uint8x16x4_t
19040 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19041 vld4q_u8 (const uint8_t * __a)
19042 {
19043 uint8x16x4_t ret;
19044 __builtin_aarch64_simd_xi __o;
19045 __o = __builtin_aarch64_ld4v16qi ((const __builtin_aarch64_simd_qi *) __a);
19046 ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0);
19047 ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1);
19048 ret.val[2] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2);
19049 ret.val[3] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3);
19050 return ret;
19051 }
19052
19053 __extension__ extern __inline uint16x8x4_t
19054 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19055 vld4q_u16 (const uint16_t * __a)
19056 {
19057 uint16x8x4_t ret;
19058 __builtin_aarch64_simd_xi __o;
19059 __o = __builtin_aarch64_ld4v8hi ((const __builtin_aarch64_simd_hi *) __a);
19060 ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0);
19061 ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1);
19062 ret.val[2] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2);
19063 ret.val[3] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3);
19064 return ret;
19065 }
19066
19067 __extension__ extern __inline uint32x4x4_t
19068 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19069 vld4q_u32 (const uint32_t * __a)
19070 {
19071 uint32x4x4_t ret;
19072 __builtin_aarch64_simd_xi __o;
19073 __o = __builtin_aarch64_ld4v4si ((const __builtin_aarch64_simd_si *) __a);
19074 ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0);
19075 ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1);
19076 ret.val[2] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2);
19077 ret.val[3] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3);
19078 return ret;
19079 }
19080
19081 __extension__ extern __inline uint64x2x4_t
19082 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19083 vld4q_u64 (const uint64_t * __a)
19084 {
19085 uint64x2x4_t ret;
19086 __builtin_aarch64_simd_xi __o;
19087 __o = __builtin_aarch64_ld4v2di ((const __builtin_aarch64_simd_di *) __a);
19088 ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 0);
19089 ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 1);
19090 ret.val[2] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 2);
19091 ret.val[3] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 3);
19092 return ret;
19093 }
19094
19095 __extension__ extern __inline float16x8x4_t
19096 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19097 vld4q_f16 (const float16_t * __a)
19098 {
19099 float16x8x4_t ret;
19100 __builtin_aarch64_simd_xi __o;
19101 __o = __builtin_aarch64_ld4v8hf (__a);
19102 ret.val[0] = __builtin_aarch64_get_qregxiv8hf (__o, 0);
19103 ret.val[1] = __builtin_aarch64_get_qregxiv8hf (__o, 1);
19104 ret.val[2] = __builtin_aarch64_get_qregxiv8hf (__o, 2);
19105 ret.val[3] = __builtin_aarch64_get_qregxiv8hf (__o, 3);
19106 return ret;
19107 }
19108
19109 __extension__ extern __inline float32x4x4_t
19110 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19111 vld4q_f32 (const float32_t * __a)
19112 {
19113 float32x4x4_t ret;
19114 __builtin_aarch64_simd_xi __o;
19115 __o = __builtin_aarch64_ld4v4sf ((const __builtin_aarch64_simd_sf *) __a);
19116 ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 0);
19117 ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 1);
19118 ret.val[2] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 2);
19119 ret.val[3] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 3);
19120 return ret;
19121 }
19122
19123 __extension__ extern __inline float64x2x4_t
19124 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19125 vld4q_f64 (const float64_t * __a)
19126 {
19127 float64x2x4_t ret;
19128 __builtin_aarch64_simd_xi __o;
19129 __o = __builtin_aarch64_ld4v2df ((const __builtin_aarch64_simd_df *) __a);
19130 ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 0);
19131 ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 1);
19132 ret.val[2] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 2);
19133 ret.val[3] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 3);
19134 return ret;
19135 }
19136
19137 __extension__ extern __inline poly64x2x4_t
19138 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19139 vld4q_p64 (const poly64_t * __a)
19140 {
19141 poly64x2x4_t ret;
19142 __builtin_aarch64_simd_xi __o;
19143 __o = __builtin_aarch64_ld4v2di ((const __builtin_aarch64_simd_di *) __a);
19144 ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 0);
19145 ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 1);
19146 ret.val[2] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 2);
19147 ret.val[3] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 3);
19148 return ret;
19149 }
19150
19151 __extension__ extern __inline poly128_t
19152 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19153 vldrq_p128 (const poly128_t * __ptr)
19154 {
19155 return *__ptr;
19156 }
19157
19158 /* vldn_dup */
19159
19160 __extension__ extern __inline int8x8x2_t
19161 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19162 vld2_dup_s8 (const int8_t * __a)
19163 {
19164 int8x8x2_t ret;
19165 __builtin_aarch64_simd_oi __o;
19166 __o = __builtin_aarch64_ld2rv8qi ((const __builtin_aarch64_simd_qi *) __a);
19167 ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
19168 ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
19169 return ret;
19170 }
19171
19172 __extension__ extern __inline int16x4x2_t
19173 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19174 vld2_dup_s16 (const int16_t * __a)
19175 {
19176 int16x4x2_t ret;
19177 __builtin_aarch64_simd_oi __o;
19178 __o = __builtin_aarch64_ld2rv4hi ((const __builtin_aarch64_simd_hi *) __a);
19179 ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
19180 ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
19181 return ret;
19182 }
19183
19184 __extension__ extern __inline int32x2x2_t
19185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19186 vld2_dup_s32 (const int32_t * __a)
19187 {
19188 int32x2x2_t ret;
19189 __builtin_aarch64_simd_oi __o;
19190 __o = __builtin_aarch64_ld2rv2si ((const __builtin_aarch64_simd_si *) __a);
19191 ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 0);
19192 ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 1);
19193 return ret;
19194 }
19195
19196 __extension__ extern __inline float16x4x2_t
19197 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19198 vld2_dup_f16 (const float16_t * __a)
19199 {
19200 float16x4x2_t ret;
19201 __builtin_aarch64_simd_oi __o;
19202 __o = __builtin_aarch64_ld2rv4hf ((const __builtin_aarch64_simd_hf *) __a);
19203 ret.val[0] = __builtin_aarch64_get_dregoiv4hf (__o, 0);
19204 ret.val[1] = (float16x4_t) __builtin_aarch64_get_dregoiv4hf (__o, 1);
19205 return ret;
19206 }
19207
19208 __extension__ extern __inline float32x2x2_t
19209 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19210 vld2_dup_f32 (const float32_t * __a)
19211 {
19212 float32x2x2_t ret;
19213 __builtin_aarch64_simd_oi __o;
19214 __o = __builtin_aarch64_ld2rv2sf ((const __builtin_aarch64_simd_sf *) __a);
19215 ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregoiv2sf (__o, 0);
19216 ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregoiv2sf (__o, 1);
19217 return ret;
19218 }
19219
19220 __extension__ extern __inline float64x1x2_t
19221 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19222 vld2_dup_f64 (const float64_t * __a)
19223 {
19224 float64x1x2_t ret;
19225 __builtin_aarch64_simd_oi __o;
19226 __o = __builtin_aarch64_ld2rdf ((const __builtin_aarch64_simd_df *) __a);
19227 ret.val[0] = (float64x1_t) {__builtin_aarch64_get_dregoidf (__o, 0)};
19228 ret.val[1] = (float64x1_t) {__builtin_aarch64_get_dregoidf (__o, 1)};
19229 return ret;
19230 }
19231
19232 __extension__ extern __inline uint8x8x2_t
19233 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19234 vld2_dup_u8 (const uint8_t * __a)
19235 {
19236 uint8x8x2_t ret;
19237 __builtin_aarch64_simd_oi __o;
19238 __o = __builtin_aarch64_ld2rv8qi ((const __builtin_aarch64_simd_qi *) __a);
19239 ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
19240 ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
19241 return ret;
19242 }
19243
19244 __extension__ extern __inline uint16x4x2_t
19245 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19246 vld2_dup_u16 (const uint16_t * __a)
19247 {
19248 uint16x4x2_t ret;
19249 __builtin_aarch64_simd_oi __o;
19250 __o = __builtin_aarch64_ld2rv4hi ((const __builtin_aarch64_simd_hi *) __a);
19251 ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
19252 ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
19253 return ret;
19254 }
19255
19256 __extension__ extern __inline uint32x2x2_t
19257 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19258 vld2_dup_u32 (const uint32_t * __a)
19259 {
19260 uint32x2x2_t ret;
19261 __builtin_aarch64_simd_oi __o;
19262 __o = __builtin_aarch64_ld2rv2si ((const __builtin_aarch64_simd_si *) __a);
19263 ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 0);
19264 ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregoiv2si (__o, 1);
19265 return ret;
19266 }
19267
19268 __extension__ extern __inline poly8x8x2_t
19269 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19270 vld2_dup_p8 (const poly8_t * __a)
19271 {
19272 poly8x8x2_t ret;
19273 __builtin_aarch64_simd_oi __o;
19274 __o = __builtin_aarch64_ld2rv8qi ((const __builtin_aarch64_simd_qi *) __a);
19275 ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 0);
19276 ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregoiv8qi (__o, 1);
19277 return ret;
19278 }
19279
19280 __extension__ extern __inline poly16x4x2_t
19281 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19282 vld2_dup_p16 (const poly16_t * __a)
19283 {
19284 poly16x4x2_t ret;
19285 __builtin_aarch64_simd_oi __o;
19286 __o = __builtin_aarch64_ld2rv4hi ((const __builtin_aarch64_simd_hi *) __a);
19287 ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 0);
19288 ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregoiv4hi (__o, 1);
19289 return ret;
19290 }
19291
19292 __extension__ extern __inline poly64x1x2_t
19293 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19294 vld2_dup_p64 (const poly64_t * __a)
19295 {
19296 poly64x1x2_t ret;
19297 __builtin_aarch64_simd_oi __o;
19298 __o = __builtin_aarch64_ld2rv2di ((const __builtin_aarch64_simd_di *) __a);
19299 ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregoidi_pss (__o, 0);
19300 ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregoidi_pss (__o, 1);
19301 return ret;
19302 }
19303
19304
19305 __extension__ extern __inline int64x1x2_t
19306 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19307 vld2_dup_s64 (const int64_t * __a)
19308 {
19309 int64x1x2_t ret;
19310 __builtin_aarch64_simd_oi __o;
19311 __o = __builtin_aarch64_ld2rdi ((const __builtin_aarch64_simd_di *) __a);
19312 ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
19313 ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
19314 return ret;
19315 }
19316
19317 __extension__ extern __inline uint64x1x2_t
19318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19319 vld2_dup_u64 (const uint64_t * __a)
19320 {
19321 uint64x1x2_t ret;
19322 __builtin_aarch64_simd_oi __o;
19323 __o = __builtin_aarch64_ld2rdi ((const __builtin_aarch64_simd_di *) __a);
19324 ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 0);
19325 ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregoidi (__o, 1);
19326 return ret;
19327 }
19328
19329 __extension__ extern __inline int8x16x2_t
19330 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19331 vld2q_dup_s8 (const int8_t * __a)
19332 {
19333 int8x16x2_t ret;
19334 __builtin_aarch64_simd_oi __o;
19335 __o = __builtin_aarch64_ld2rv16qi ((const __builtin_aarch64_simd_qi *) __a);
19336 ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
19337 ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
19338 return ret;
19339 }
19340
19341 __extension__ extern __inline poly8x16x2_t
19342 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19343 vld2q_dup_p8 (const poly8_t * __a)
19344 {
19345 poly8x16x2_t ret;
19346 __builtin_aarch64_simd_oi __o;
19347 __o = __builtin_aarch64_ld2rv16qi ((const __builtin_aarch64_simd_qi *) __a);
19348 ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
19349 ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
19350 return ret;
19351 }
19352
19353 __extension__ extern __inline int16x8x2_t
19354 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19355 vld2q_dup_s16 (const int16_t * __a)
19356 {
19357 int16x8x2_t ret;
19358 __builtin_aarch64_simd_oi __o;
19359 __o = __builtin_aarch64_ld2rv8hi ((const __builtin_aarch64_simd_hi *) __a);
19360 ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
19361 ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
19362 return ret;
19363 }
19364
19365 __extension__ extern __inline poly16x8x2_t
19366 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19367 vld2q_dup_p16 (const poly16_t * __a)
19368 {
19369 poly16x8x2_t ret;
19370 __builtin_aarch64_simd_oi __o;
19371 __o = __builtin_aarch64_ld2rv8hi ((const __builtin_aarch64_simd_hi *) __a);
19372 ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
19373 ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
19374 return ret;
19375 }
19376
19377 __extension__ extern __inline int32x4x2_t
19378 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19379 vld2q_dup_s32 (const int32_t * __a)
19380 {
19381 int32x4x2_t ret;
19382 __builtin_aarch64_simd_oi __o;
19383 __o = __builtin_aarch64_ld2rv4si ((const __builtin_aarch64_simd_si *) __a);
19384 ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
19385 ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
19386 return ret;
19387 }
19388
19389 __extension__ extern __inline int64x2x2_t
19390 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19391 vld2q_dup_s64 (const int64_t * __a)
19392 {
19393 int64x2x2_t ret;
19394 __builtin_aarch64_simd_oi __o;
19395 __o = __builtin_aarch64_ld2rv2di ((const __builtin_aarch64_simd_di *) __a);
19396 ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0);
19397 ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1);
19398 return ret;
19399 }
19400
19401 __extension__ extern __inline uint8x16x2_t
19402 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19403 vld2q_dup_u8 (const uint8_t * __a)
19404 {
19405 uint8x16x2_t ret;
19406 __builtin_aarch64_simd_oi __o;
19407 __o = __builtin_aarch64_ld2rv16qi ((const __builtin_aarch64_simd_qi *) __a);
19408 ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 0);
19409 ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregoiv16qi (__o, 1);
19410 return ret;
19411 }
19412
19413 __extension__ extern __inline uint16x8x2_t
19414 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19415 vld2q_dup_u16 (const uint16_t * __a)
19416 {
19417 uint16x8x2_t ret;
19418 __builtin_aarch64_simd_oi __o;
19419 __o = __builtin_aarch64_ld2rv8hi ((const __builtin_aarch64_simd_hi *) __a);
19420 ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 0);
19421 ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregoiv8hi (__o, 1);
19422 return ret;
19423 }
19424
19425 __extension__ extern __inline uint32x4x2_t
19426 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19427 vld2q_dup_u32 (const uint32_t * __a)
19428 {
19429 uint32x4x2_t ret;
19430 __builtin_aarch64_simd_oi __o;
19431 __o = __builtin_aarch64_ld2rv4si ((const __builtin_aarch64_simd_si *) __a);
19432 ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 0);
19433 ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregoiv4si (__o, 1);
19434 return ret;
19435 }
19436
19437 __extension__ extern __inline uint64x2x2_t
19438 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19439 vld2q_dup_u64 (const uint64_t * __a)
19440 {
19441 uint64x2x2_t ret;
19442 __builtin_aarch64_simd_oi __o;
19443 __o = __builtin_aarch64_ld2rv2di ((const __builtin_aarch64_simd_di *) __a);
19444 ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 0);
19445 ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregoiv2di (__o, 1);
19446 return ret;
19447 }
19448
19449 __extension__ extern __inline float16x8x2_t
19450 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19451 vld2q_dup_f16 (const float16_t * __a)
19452 {
19453 float16x8x2_t ret;
19454 __builtin_aarch64_simd_oi __o;
19455 __o = __builtin_aarch64_ld2rv8hf ((const __builtin_aarch64_simd_hf *) __a);
19456 ret.val[0] = (float16x8_t) __builtin_aarch64_get_qregoiv8hf (__o, 0);
19457 ret.val[1] = __builtin_aarch64_get_qregoiv8hf (__o, 1);
19458 return ret;
19459 }
19460
19461 __extension__ extern __inline float32x4x2_t
19462 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19463 vld2q_dup_f32 (const float32_t * __a)
19464 {
19465 float32x4x2_t ret;
19466 __builtin_aarch64_simd_oi __o;
19467 __o = __builtin_aarch64_ld2rv4sf ((const __builtin_aarch64_simd_sf *) __a);
19468 ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregoiv4sf (__o, 0);
19469 ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregoiv4sf (__o, 1);
19470 return ret;
19471 }
19472
19473 __extension__ extern __inline float64x2x2_t
19474 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19475 vld2q_dup_f64 (const float64_t * __a)
19476 {
19477 float64x2x2_t ret;
19478 __builtin_aarch64_simd_oi __o;
19479 __o = __builtin_aarch64_ld2rv2df ((const __builtin_aarch64_simd_df *) __a);
19480 ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregoiv2df (__o, 0);
19481 ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregoiv2df (__o, 1);
19482 return ret;
19483 }
19484
19485 __extension__ extern __inline poly64x2x2_t
19486 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19487 vld2q_dup_p64 (const poly64_t * __a)
19488 {
19489 poly64x2x2_t ret;
19490 __builtin_aarch64_simd_oi __o;
19491 __o = __builtin_aarch64_ld2rv2di ((const __builtin_aarch64_simd_di *) __a);
19492 ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregoiv2di_pss (__o, 0);
19493 ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregoiv2di_pss (__o, 1);
19494 return ret;
19495 }
19496
19497 __extension__ extern __inline int64x1x3_t
19498 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19499 vld3_dup_s64 (const int64_t * __a)
19500 {
19501 int64x1x3_t ret;
19502 __builtin_aarch64_simd_ci __o;
19503 __o = __builtin_aarch64_ld3rdi ((const __builtin_aarch64_simd_di *) __a);
19504 ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 0);
19505 ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 1);
19506 ret.val[2] = (int64x1_t) __builtin_aarch64_get_dregcidi (__o, 2);
19507 return ret;
19508 }
19509
19510 __extension__ extern __inline uint64x1x3_t
19511 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19512 vld3_dup_u64 (const uint64_t * __a)
19513 {
19514 uint64x1x3_t ret;
19515 __builtin_aarch64_simd_ci __o;
19516 __o = __builtin_aarch64_ld3rdi ((const __builtin_aarch64_simd_di *) __a);
19517 ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 0);
19518 ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 1);
19519 ret.val[2] = (uint64x1_t) __builtin_aarch64_get_dregcidi (__o, 2);
19520 return ret;
19521 }
19522
19523 __extension__ extern __inline float64x1x3_t
19524 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19525 vld3_dup_f64 (const float64_t * __a)
19526 {
19527 float64x1x3_t ret;
19528 __builtin_aarch64_simd_ci __o;
19529 __o = __builtin_aarch64_ld3rdf ((const __builtin_aarch64_simd_df *) __a);
19530 ret.val[0] = (float64x1_t) {__builtin_aarch64_get_dregcidf (__o, 0)};
19531 ret.val[1] = (float64x1_t) {__builtin_aarch64_get_dregcidf (__o, 1)};
19532 ret.val[2] = (float64x1_t) {__builtin_aarch64_get_dregcidf (__o, 2)};
19533 return ret;
19534 }
19535
19536 __extension__ extern __inline int8x8x3_t
19537 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19538 vld3_dup_s8 (const int8_t * __a)
19539 {
19540 int8x8x3_t ret;
19541 __builtin_aarch64_simd_ci __o;
19542 __o = __builtin_aarch64_ld3rv8qi ((const __builtin_aarch64_simd_qi *) __a);
19543 ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
19544 ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
19545 ret.val[2] = (int8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
19546 return ret;
19547 }
19548
19549 __extension__ extern __inline poly8x8x3_t
19550 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19551 vld3_dup_p8 (const poly8_t * __a)
19552 {
19553 poly8x8x3_t ret;
19554 __builtin_aarch64_simd_ci __o;
19555 __o = __builtin_aarch64_ld3rv8qi ((const __builtin_aarch64_simd_qi *) __a);
19556 ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
19557 ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
19558 ret.val[2] = (poly8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
19559 return ret;
19560 }
19561
19562 __extension__ extern __inline int16x4x3_t
19563 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19564 vld3_dup_s16 (const int16_t * __a)
19565 {
19566 int16x4x3_t ret;
19567 __builtin_aarch64_simd_ci __o;
19568 __o = __builtin_aarch64_ld3rv4hi ((const __builtin_aarch64_simd_hi *) __a);
19569 ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
19570 ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
19571 ret.val[2] = (int16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
19572 return ret;
19573 }
19574
19575 __extension__ extern __inline poly16x4x3_t
19576 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19577 vld3_dup_p16 (const poly16_t * __a)
19578 {
19579 poly16x4x3_t ret;
19580 __builtin_aarch64_simd_ci __o;
19581 __o = __builtin_aarch64_ld3rv4hi ((const __builtin_aarch64_simd_hi *) __a);
19582 ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
19583 ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
19584 ret.val[2] = (poly16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
19585 return ret;
19586 }
19587
19588 __extension__ extern __inline int32x2x3_t
19589 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19590 vld3_dup_s32 (const int32_t * __a)
19591 {
19592 int32x2x3_t ret;
19593 __builtin_aarch64_simd_ci __o;
19594 __o = __builtin_aarch64_ld3rv2si ((const __builtin_aarch64_simd_si *) __a);
19595 ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 0);
19596 ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 1);
19597 ret.val[2] = (int32x2_t) __builtin_aarch64_get_dregciv2si (__o, 2);
19598 return ret;
19599 }
19600
19601 __extension__ extern __inline uint8x8x3_t
19602 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19603 vld3_dup_u8 (const uint8_t * __a)
19604 {
19605 uint8x8x3_t ret;
19606 __builtin_aarch64_simd_ci __o;
19607 __o = __builtin_aarch64_ld3rv8qi ((const __builtin_aarch64_simd_qi *) __a);
19608 ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 0);
19609 ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 1);
19610 ret.val[2] = (uint8x8_t) __builtin_aarch64_get_dregciv8qi (__o, 2);
19611 return ret;
19612 }
19613
19614 __extension__ extern __inline uint16x4x3_t
19615 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19616 vld3_dup_u16 (const uint16_t * __a)
19617 {
19618 uint16x4x3_t ret;
19619 __builtin_aarch64_simd_ci __o;
19620 __o = __builtin_aarch64_ld3rv4hi ((const __builtin_aarch64_simd_hi *) __a);
19621 ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 0);
19622 ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 1);
19623 ret.val[2] = (uint16x4_t) __builtin_aarch64_get_dregciv4hi (__o, 2);
19624 return ret;
19625 }
19626
19627 __extension__ extern __inline uint32x2x3_t
19628 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19629 vld3_dup_u32 (const uint32_t * __a)
19630 {
19631 uint32x2x3_t ret;
19632 __builtin_aarch64_simd_ci __o;
19633 __o = __builtin_aarch64_ld3rv2si ((const __builtin_aarch64_simd_si *) __a);
19634 ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 0);
19635 ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 1);
19636 ret.val[2] = (uint32x2_t) __builtin_aarch64_get_dregciv2si (__o, 2);
19637 return ret;
19638 }
19639
19640 __extension__ extern __inline float16x4x3_t
19641 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19642 vld3_dup_f16 (const float16_t * __a)
19643 {
19644 float16x4x3_t ret;
19645 __builtin_aarch64_simd_ci __o;
19646 __o = __builtin_aarch64_ld3rv4hf ((const __builtin_aarch64_simd_hf *) __a);
19647 ret.val[0] = (float16x4_t) __builtin_aarch64_get_dregciv4hf (__o, 0);
19648 ret.val[1] = (float16x4_t) __builtin_aarch64_get_dregciv4hf (__o, 1);
19649 ret.val[2] = (float16x4_t) __builtin_aarch64_get_dregciv4hf (__o, 2);
19650 return ret;
19651 }
19652
19653 __extension__ extern __inline float32x2x3_t
19654 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19655 vld3_dup_f32 (const float32_t * __a)
19656 {
19657 float32x2x3_t ret;
19658 __builtin_aarch64_simd_ci __o;
19659 __o = __builtin_aarch64_ld3rv2sf ((const __builtin_aarch64_simd_sf *) __a);
19660 ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 0);
19661 ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 1);
19662 ret.val[2] = (float32x2_t) __builtin_aarch64_get_dregciv2sf (__o, 2);
19663 return ret;
19664 }
19665
19666 __extension__ extern __inline poly64x1x3_t
19667 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19668 vld3_dup_p64 (const poly64_t * __a)
19669 {
19670 poly64x1x3_t ret;
19671 __builtin_aarch64_simd_ci __o;
19672 __o = __builtin_aarch64_ld3rv2di ((const __builtin_aarch64_simd_di *) __a);
19673 ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregcidi_pss (__o, 0);
19674 ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregcidi_pss (__o, 1);
19675 ret.val[2] = (poly64x1_t) __builtin_aarch64_get_dregcidi_pss (__o, 2);
19676 return ret;
19677 }
19678
19679 __extension__ extern __inline int8x16x3_t
19680 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19681 vld3q_dup_s8 (const int8_t * __a)
19682 {
19683 int8x16x3_t ret;
19684 __builtin_aarch64_simd_ci __o;
19685 __o = __builtin_aarch64_ld3rv16qi ((const __builtin_aarch64_simd_qi *) __a);
19686 ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
19687 ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
19688 ret.val[2] = (int8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
19689 return ret;
19690 }
19691
19692 __extension__ extern __inline poly8x16x3_t
19693 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19694 vld3q_dup_p8 (const poly8_t * __a)
19695 {
19696 poly8x16x3_t ret;
19697 __builtin_aarch64_simd_ci __o;
19698 __o = __builtin_aarch64_ld3rv16qi ((const __builtin_aarch64_simd_qi *) __a);
19699 ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
19700 ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
19701 ret.val[2] = (poly8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
19702 return ret;
19703 }
19704
19705 __extension__ extern __inline int16x8x3_t
19706 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19707 vld3q_dup_s16 (const int16_t * __a)
19708 {
19709 int16x8x3_t ret;
19710 __builtin_aarch64_simd_ci __o;
19711 __o = __builtin_aarch64_ld3rv8hi ((const __builtin_aarch64_simd_hi *) __a);
19712 ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
19713 ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
19714 ret.val[2] = (int16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
19715 return ret;
19716 }
19717
19718 __extension__ extern __inline poly16x8x3_t
19719 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19720 vld3q_dup_p16 (const poly16_t * __a)
19721 {
19722 poly16x8x3_t ret;
19723 __builtin_aarch64_simd_ci __o;
19724 __o = __builtin_aarch64_ld3rv8hi ((const __builtin_aarch64_simd_hi *) __a);
19725 ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
19726 ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
19727 ret.val[2] = (poly16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
19728 return ret;
19729 }
19730
19731 __extension__ extern __inline int32x4x3_t
19732 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19733 vld3q_dup_s32 (const int32_t * __a)
19734 {
19735 int32x4x3_t ret;
19736 __builtin_aarch64_simd_ci __o;
19737 __o = __builtin_aarch64_ld3rv4si ((const __builtin_aarch64_simd_si *) __a);
19738 ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0);
19739 ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1);
19740 ret.val[2] = (int32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2);
19741 return ret;
19742 }
19743
19744 __extension__ extern __inline int64x2x3_t
19745 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19746 vld3q_dup_s64 (const int64_t * __a)
19747 {
19748 int64x2x3_t ret;
19749 __builtin_aarch64_simd_ci __o;
19750 __o = __builtin_aarch64_ld3rv2di ((const __builtin_aarch64_simd_di *) __a);
19751 ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0);
19752 ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1);
19753 ret.val[2] = (int64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2);
19754 return ret;
19755 }
19756
19757 __extension__ extern __inline uint8x16x3_t
19758 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19759 vld3q_dup_u8 (const uint8_t * __a)
19760 {
19761 uint8x16x3_t ret;
19762 __builtin_aarch64_simd_ci __o;
19763 __o = __builtin_aarch64_ld3rv16qi ((const __builtin_aarch64_simd_qi *) __a);
19764 ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 0);
19765 ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 1);
19766 ret.val[2] = (uint8x16_t) __builtin_aarch64_get_qregciv16qi (__o, 2);
19767 return ret;
19768 }
19769
19770 __extension__ extern __inline uint16x8x3_t
19771 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19772 vld3q_dup_u16 (const uint16_t * __a)
19773 {
19774 uint16x8x3_t ret;
19775 __builtin_aarch64_simd_ci __o;
19776 __o = __builtin_aarch64_ld3rv8hi ((const __builtin_aarch64_simd_hi *) __a);
19777 ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 0);
19778 ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 1);
19779 ret.val[2] = (uint16x8_t) __builtin_aarch64_get_qregciv8hi (__o, 2);
19780 return ret;
19781 }
19782
19783 __extension__ extern __inline uint32x4x3_t
19784 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19785 vld3q_dup_u32 (const uint32_t * __a)
19786 {
19787 uint32x4x3_t ret;
19788 __builtin_aarch64_simd_ci __o;
19789 __o = __builtin_aarch64_ld3rv4si ((const __builtin_aarch64_simd_si *) __a);
19790 ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 0);
19791 ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 1);
19792 ret.val[2] = (uint32x4_t) __builtin_aarch64_get_qregciv4si (__o, 2);
19793 return ret;
19794 }
19795
19796 __extension__ extern __inline uint64x2x3_t
19797 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19798 vld3q_dup_u64 (const uint64_t * __a)
19799 {
19800 uint64x2x3_t ret;
19801 __builtin_aarch64_simd_ci __o;
19802 __o = __builtin_aarch64_ld3rv2di ((const __builtin_aarch64_simd_di *) __a);
19803 ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 0);
19804 ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 1);
19805 ret.val[2] = (uint64x2_t) __builtin_aarch64_get_qregciv2di (__o, 2);
19806 return ret;
19807 }
19808
19809 __extension__ extern __inline float16x8x3_t
19810 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19811 vld3q_dup_f16 (const float16_t * __a)
19812 {
19813 float16x8x3_t ret;
19814 __builtin_aarch64_simd_ci __o;
19815 __o = __builtin_aarch64_ld3rv8hf ((const __builtin_aarch64_simd_hf *) __a);
19816 ret.val[0] = (float16x8_t) __builtin_aarch64_get_qregciv8hf (__o, 0);
19817 ret.val[1] = (float16x8_t) __builtin_aarch64_get_qregciv8hf (__o, 1);
19818 ret.val[2] = (float16x8_t) __builtin_aarch64_get_qregciv8hf (__o, 2);
19819 return ret;
19820 }
19821
19822 __extension__ extern __inline float32x4x3_t
19823 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19824 vld3q_dup_f32 (const float32_t * __a)
19825 {
19826 float32x4x3_t ret;
19827 __builtin_aarch64_simd_ci __o;
19828 __o = __builtin_aarch64_ld3rv4sf ((const __builtin_aarch64_simd_sf *) __a);
19829 ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 0);
19830 ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 1);
19831 ret.val[2] = (float32x4_t) __builtin_aarch64_get_qregciv4sf (__o, 2);
19832 return ret;
19833 }
19834
19835 __extension__ extern __inline float64x2x3_t
19836 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19837 vld3q_dup_f64 (const float64_t * __a)
19838 {
19839 float64x2x3_t ret;
19840 __builtin_aarch64_simd_ci __o;
19841 __o = __builtin_aarch64_ld3rv2df ((const __builtin_aarch64_simd_df *) __a);
19842 ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 0);
19843 ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 1);
19844 ret.val[2] = (float64x2_t) __builtin_aarch64_get_qregciv2df (__o, 2);
19845 return ret;
19846 }
19847
19848 __extension__ extern __inline poly64x2x3_t
19849 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19850 vld3q_dup_p64 (const poly64_t * __a)
19851 {
19852 poly64x2x3_t ret;
19853 __builtin_aarch64_simd_ci __o;
19854 __o = __builtin_aarch64_ld3rv2di ((const __builtin_aarch64_simd_di *) __a);
19855 ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregciv2di_pss (__o, 0);
19856 ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregciv2di_pss (__o, 1);
19857 ret.val[2] = (poly64x2_t) __builtin_aarch64_get_qregciv2di_pss (__o, 2);
19858 return ret;
19859 }
19860
19861 __extension__ extern __inline int64x1x4_t
19862 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19863 vld4_dup_s64 (const int64_t * __a)
19864 {
19865 int64x1x4_t ret;
19866 __builtin_aarch64_simd_xi __o;
19867 __o = __builtin_aarch64_ld4rdi ((const __builtin_aarch64_simd_di *) __a);
19868 ret.val[0] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 0);
19869 ret.val[1] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 1);
19870 ret.val[2] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 2);
19871 ret.val[3] = (int64x1_t) __builtin_aarch64_get_dregxidi (__o, 3);
19872 return ret;
19873 }
19874
19875 __extension__ extern __inline uint64x1x4_t
19876 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19877 vld4_dup_u64 (const uint64_t * __a)
19878 {
19879 uint64x1x4_t ret;
19880 __builtin_aarch64_simd_xi __o;
19881 __o = __builtin_aarch64_ld4rdi ((const __builtin_aarch64_simd_di *) __a);
19882 ret.val[0] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 0);
19883 ret.val[1] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 1);
19884 ret.val[2] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 2);
19885 ret.val[3] = (uint64x1_t) __builtin_aarch64_get_dregxidi (__o, 3);
19886 return ret;
19887 }
19888
19889 __extension__ extern __inline float64x1x4_t
19890 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19891 vld4_dup_f64 (const float64_t * __a)
19892 {
19893 float64x1x4_t ret;
19894 __builtin_aarch64_simd_xi __o;
19895 __o = __builtin_aarch64_ld4rdf ((const __builtin_aarch64_simd_df *) __a);
19896 ret.val[0] = (float64x1_t) {__builtin_aarch64_get_dregxidf (__o, 0)};
19897 ret.val[1] = (float64x1_t) {__builtin_aarch64_get_dregxidf (__o, 1)};
19898 ret.val[2] = (float64x1_t) {__builtin_aarch64_get_dregxidf (__o, 2)};
19899 ret.val[3] = (float64x1_t) {__builtin_aarch64_get_dregxidf (__o, 3)};
19900 return ret;
19901 }
19902
19903 __extension__ extern __inline int8x8x4_t
19904 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19905 vld4_dup_s8 (const int8_t * __a)
19906 {
19907 int8x8x4_t ret;
19908 __builtin_aarch64_simd_xi __o;
19909 __o = __builtin_aarch64_ld4rv8qi ((const __builtin_aarch64_simd_qi *) __a);
19910 ret.val[0] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0);
19911 ret.val[1] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1);
19912 ret.val[2] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2);
19913 ret.val[3] = (int8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3);
19914 return ret;
19915 }
19916
19917 __extension__ extern __inline poly8x8x4_t
19918 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19919 vld4_dup_p8 (const poly8_t * __a)
19920 {
19921 poly8x8x4_t ret;
19922 __builtin_aarch64_simd_xi __o;
19923 __o = __builtin_aarch64_ld4rv8qi ((const __builtin_aarch64_simd_qi *) __a);
19924 ret.val[0] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0);
19925 ret.val[1] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1);
19926 ret.val[2] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2);
19927 ret.val[3] = (poly8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3);
19928 return ret;
19929 }
19930
19931 __extension__ extern __inline int16x4x4_t
19932 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19933 vld4_dup_s16 (const int16_t * __a)
19934 {
19935 int16x4x4_t ret;
19936 __builtin_aarch64_simd_xi __o;
19937 __o = __builtin_aarch64_ld4rv4hi ((const __builtin_aarch64_simd_hi *) __a);
19938 ret.val[0] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0);
19939 ret.val[1] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1);
19940 ret.val[2] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2);
19941 ret.val[3] = (int16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3);
19942 return ret;
19943 }
19944
19945 __extension__ extern __inline poly16x4x4_t
19946 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19947 vld4_dup_p16 (const poly16_t * __a)
19948 {
19949 poly16x4x4_t ret;
19950 __builtin_aarch64_simd_xi __o;
19951 __o = __builtin_aarch64_ld4rv4hi ((const __builtin_aarch64_simd_hi *) __a);
19952 ret.val[0] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0);
19953 ret.val[1] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1);
19954 ret.val[2] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2);
19955 ret.val[3] = (poly16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3);
19956 return ret;
19957 }
19958
19959 __extension__ extern __inline int32x2x4_t
19960 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19961 vld4_dup_s32 (const int32_t * __a)
19962 {
19963 int32x2x4_t ret;
19964 __builtin_aarch64_simd_xi __o;
19965 __o = __builtin_aarch64_ld4rv2si ((const __builtin_aarch64_simd_si *) __a);
19966 ret.val[0] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 0);
19967 ret.val[1] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 1);
19968 ret.val[2] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 2);
19969 ret.val[3] = (int32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 3);
19970 return ret;
19971 }
19972
19973 __extension__ extern __inline uint8x8x4_t
19974 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19975 vld4_dup_u8 (const uint8_t * __a)
19976 {
19977 uint8x8x4_t ret;
19978 __builtin_aarch64_simd_xi __o;
19979 __o = __builtin_aarch64_ld4rv8qi ((const __builtin_aarch64_simd_qi *) __a);
19980 ret.val[0] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 0);
19981 ret.val[1] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 1);
19982 ret.val[2] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 2);
19983 ret.val[3] = (uint8x8_t) __builtin_aarch64_get_dregxiv8qi (__o, 3);
19984 return ret;
19985 }
19986
19987 __extension__ extern __inline uint16x4x4_t
19988 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
19989 vld4_dup_u16 (const uint16_t * __a)
19990 {
19991 uint16x4x4_t ret;
19992 __builtin_aarch64_simd_xi __o;
19993 __o = __builtin_aarch64_ld4rv4hi ((const __builtin_aarch64_simd_hi *) __a);
19994 ret.val[0] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 0);
19995 ret.val[1] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 1);
19996 ret.val[2] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 2);
19997 ret.val[3] = (uint16x4_t) __builtin_aarch64_get_dregxiv4hi (__o, 3);
19998 return ret;
19999 }
20000
20001 __extension__ extern __inline uint32x2x4_t
20002 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20003 vld4_dup_u32 (const uint32_t * __a)
20004 {
20005 uint32x2x4_t ret;
20006 __builtin_aarch64_simd_xi __o;
20007 __o = __builtin_aarch64_ld4rv2si ((const __builtin_aarch64_simd_si *) __a);
20008 ret.val[0] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 0);
20009 ret.val[1] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 1);
20010 ret.val[2] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 2);
20011 ret.val[3] = (uint32x2_t) __builtin_aarch64_get_dregxiv2si (__o, 3);
20012 return ret;
20013 }
20014
20015 __extension__ extern __inline float16x4x4_t
20016 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20017 vld4_dup_f16 (const float16_t * __a)
20018 {
20019 float16x4x4_t ret;
20020 __builtin_aarch64_simd_xi __o;
20021 __o = __builtin_aarch64_ld4rv4hf ((const __builtin_aarch64_simd_hf *) __a);
20022 ret.val[0] = (float16x4_t) __builtin_aarch64_get_dregxiv4hf (__o, 0);
20023 ret.val[1] = (float16x4_t) __builtin_aarch64_get_dregxiv4hf (__o, 1);
20024 ret.val[2] = (float16x4_t) __builtin_aarch64_get_dregxiv4hf (__o, 2);
20025 ret.val[3] = (float16x4_t) __builtin_aarch64_get_dregxiv4hf (__o, 3);
20026 return ret;
20027 }
20028
20029 __extension__ extern __inline float32x2x4_t
20030 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20031 vld4_dup_f32 (const float32_t * __a)
20032 {
20033 float32x2x4_t ret;
20034 __builtin_aarch64_simd_xi __o;
20035 __o = __builtin_aarch64_ld4rv2sf ((const __builtin_aarch64_simd_sf *) __a);
20036 ret.val[0] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 0);
20037 ret.val[1] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 1);
20038 ret.val[2] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 2);
20039 ret.val[3] = (float32x2_t) __builtin_aarch64_get_dregxiv2sf (__o, 3);
20040 return ret;
20041 }
20042
20043 __extension__ extern __inline poly64x1x4_t
20044 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20045 vld4_dup_p64 (const poly64_t * __a)
20046 {
20047 poly64x1x4_t ret;
20048 __builtin_aarch64_simd_xi __o;
20049 __o = __builtin_aarch64_ld4rv2di ((const __builtin_aarch64_simd_di *) __a);
20050 ret.val[0] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 0);
20051 ret.val[1] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 1);
20052 ret.val[2] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 2);
20053 ret.val[3] = (poly64x1_t) __builtin_aarch64_get_dregxidi_pss (__o, 3);
20054 return ret;
20055 }
20056
20057 __extension__ extern __inline int8x16x4_t
20058 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20059 vld4q_dup_s8 (const int8_t * __a)
20060 {
20061 int8x16x4_t ret;
20062 __builtin_aarch64_simd_xi __o;
20063 __o = __builtin_aarch64_ld4rv16qi ((const __builtin_aarch64_simd_qi *) __a);
20064 ret.val[0] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0);
20065 ret.val[1] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1);
20066 ret.val[2] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2);
20067 ret.val[3] = (int8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3);
20068 return ret;
20069 }
20070
20071 __extension__ extern __inline poly8x16x4_t
20072 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20073 vld4q_dup_p8 (const poly8_t * __a)
20074 {
20075 poly8x16x4_t ret;
20076 __builtin_aarch64_simd_xi __o;
20077 __o = __builtin_aarch64_ld4rv16qi ((const __builtin_aarch64_simd_qi *) __a);
20078 ret.val[0] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0);
20079 ret.val[1] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1);
20080 ret.val[2] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2);
20081 ret.val[3] = (poly8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3);
20082 return ret;
20083 }
20084
20085 __extension__ extern __inline int16x8x4_t
20086 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20087 vld4q_dup_s16 (const int16_t * __a)
20088 {
20089 int16x8x4_t ret;
20090 __builtin_aarch64_simd_xi __o;
20091 __o = __builtin_aarch64_ld4rv8hi ((const __builtin_aarch64_simd_hi *) __a);
20092 ret.val[0] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0);
20093 ret.val[1] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1);
20094 ret.val[2] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2);
20095 ret.val[3] = (int16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3);
20096 return ret;
20097 }
20098
20099 __extension__ extern __inline poly16x8x4_t
20100 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20101 vld4q_dup_p16 (const poly16_t * __a)
20102 {
20103 poly16x8x4_t ret;
20104 __builtin_aarch64_simd_xi __o;
20105 __o = __builtin_aarch64_ld4rv8hi ((const __builtin_aarch64_simd_hi *) __a);
20106 ret.val[0] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0);
20107 ret.val[1] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1);
20108 ret.val[2] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2);
20109 ret.val[3] = (poly16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3);
20110 return ret;
20111 }
20112
20113 __extension__ extern __inline int32x4x4_t
20114 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20115 vld4q_dup_s32 (const int32_t * __a)
20116 {
20117 int32x4x4_t ret;
20118 __builtin_aarch64_simd_xi __o;
20119 __o = __builtin_aarch64_ld4rv4si ((const __builtin_aarch64_simd_si *) __a);
20120 ret.val[0] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0);
20121 ret.val[1] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1);
20122 ret.val[2] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2);
20123 ret.val[3] = (int32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3);
20124 return ret;
20125 }
20126
20127 __extension__ extern __inline int64x2x4_t
20128 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20129 vld4q_dup_s64 (const int64_t * __a)
20130 {
20131 int64x2x4_t ret;
20132 __builtin_aarch64_simd_xi __o;
20133 __o = __builtin_aarch64_ld4rv2di ((const __builtin_aarch64_simd_di *) __a);
20134 ret.val[0] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 0);
20135 ret.val[1] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 1);
20136 ret.val[2] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 2);
20137 ret.val[3] = (int64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 3);
20138 return ret;
20139 }
20140
20141 __extension__ extern __inline uint8x16x4_t
20142 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20143 vld4q_dup_u8 (const uint8_t * __a)
20144 {
20145 uint8x16x4_t ret;
20146 __builtin_aarch64_simd_xi __o;
20147 __o = __builtin_aarch64_ld4rv16qi ((const __builtin_aarch64_simd_qi *) __a);
20148 ret.val[0] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 0);
20149 ret.val[1] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 1);
20150 ret.val[2] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 2);
20151 ret.val[3] = (uint8x16_t) __builtin_aarch64_get_qregxiv16qi (__o, 3);
20152 return ret;
20153 }
20154
20155 __extension__ extern __inline uint16x8x4_t
20156 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20157 vld4q_dup_u16 (const uint16_t * __a)
20158 {
20159 uint16x8x4_t ret;
20160 __builtin_aarch64_simd_xi __o;
20161 __o = __builtin_aarch64_ld4rv8hi ((const __builtin_aarch64_simd_hi *) __a);
20162 ret.val[0] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 0);
20163 ret.val[1] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 1);
20164 ret.val[2] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 2);
20165 ret.val[3] = (uint16x8_t) __builtin_aarch64_get_qregxiv8hi (__o, 3);
20166 return ret;
20167 }
20168
20169 __extension__ extern __inline uint32x4x4_t
20170 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20171 vld4q_dup_u32 (const uint32_t * __a)
20172 {
20173 uint32x4x4_t ret;
20174 __builtin_aarch64_simd_xi __o;
20175 __o = __builtin_aarch64_ld4rv4si ((const __builtin_aarch64_simd_si *) __a);
20176 ret.val[0] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 0);
20177 ret.val[1] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 1);
20178 ret.val[2] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 2);
20179 ret.val[3] = (uint32x4_t) __builtin_aarch64_get_qregxiv4si (__o, 3);
20180 return ret;
20181 }
20182
20183 __extension__ extern __inline uint64x2x4_t
20184 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20185 vld4q_dup_u64 (const uint64_t * __a)
20186 {
20187 uint64x2x4_t ret;
20188 __builtin_aarch64_simd_xi __o;
20189 __o = __builtin_aarch64_ld4rv2di ((const __builtin_aarch64_simd_di *) __a);
20190 ret.val[0] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 0);
20191 ret.val[1] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 1);
20192 ret.val[2] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 2);
20193 ret.val[3] = (uint64x2_t) __builtin_aarch64_get_qregxiv2di (__o, 3);
20194 return ret;
20195 }
20196
20197 __extension__ extern __inline float16x8x4_t
20198 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20199 vld4q_dup_f16 (const float16_t * __a)
20200 {
20201 float16x8x4_t ret;
20202 __builtin_aarch64_simd_xi __o;
20203 __o = __builtin_aarch64_ld4rv8hf ((const __builtin_aarch64_simd_hf *) __a);
20204 ret.val[0] = (float16x8_t) __builtin_aarch64_get_qregxiv8hf (__o, 0);
20205 ret.val[1] = (float16x8_t) __builtin_aarch64_get_qregxiv8hf (__o, 1);
20206 ret.val[2] = (float16x8_t) __builtin_aarch64_get_qregxiv8hf (__o, 2);
20207 ret.val[3] = (float16x8_t) __builtin_aarch64_get_qregxiv8hf (__o, 3);
20208 return ret;
20209 }
20210
20211 __extension__ extern __inline float32x4x4_t
20212 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20213 vld4q_dup_f32 (const float32_t * __a)
20214 {
20215 float32x4x4_t ret;
20216 __builtin_aarch64_simd_xi __o;
20217 __o = __builtin_aarch64_ld4rv4sf ((const __builtin_aarch64_simd_sf *) __a);
20218 ret.val[0] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 0);
20219 ret.val[1] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 1);
20220 ret.val[2] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 2);
20221 ret.val[3] = (float32x4_t) __builtin_aarch64_get_qregxiv4sf (__o, 3);
20222 return ret;
20223 }
20224
20225 __extension__ extern __inline float64x2x4_t
20226 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20227 vld4q_dup_f64 (const float64_t * __a)
20228 {
20229 float64x2x4_t ret;
20230 __builtin_aarch64_simd_xi __o;
20231 __o = __builtin_aarch64_ld4rv2df ((const __builtin_aarch64_simd_df *) __a);
20232 ret.val[0] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 0);
20233 ret.val[1] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 1);
20234 ret.val[2] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 2);
20235 ret.val[3] = (float64x2_t) __builtin_aarch64_get_qregxiv2df (__o, 3);
20236 return ret;
20237 }
20238
20239 __extension__ extern __inline poly64x2x4_t
20240 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20241 vld4q_dup_p64 (const poly64_t * __a)
20242 {
20243 poly64x2x4_t ret;
20244 __builtin_aarch64_simd_xi __o;
20245 __o = __builtin_aarch64_ld4rv2di ((const __builtin_aarch64_simd_di *) __a);
20246 ret.val[0] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 0);
20247 ret.val[1] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 1);
20248 ret.val[2] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 2);
20249 ret.val[3] = (poly64x2_t) __builtin_aarch64_get_qregxiv2di_pss (__o, 3);
20250 return ret;
20251 }
20252
20253 /* vld2_lane */
20254
20255 #define __LD2_LANE_FUNC(intype, vectype, largetype, ptrtype, mode, \
20256 qmode, ptrmode, funcsuffix, signedtype) \
20257 __extension__ extern __inline intype \
20258 __attribute__ ((__always_inline__, __gnu_inline__,__artificial__)) \
20259 vld2_lane_##funcsuffix (const ptrtype * __ptr, intype __b, const int __c) \
20260 { \
20261 __builtin_aarch64_simd_oi __o; \
20262 largetype __temp; \
20263 __temp.val[0] = \
20264 vcombine_##funcsuffix (__b.val[0], vcreate_##funcsuffix (0)); \
20265 __temp.val[1] = \
20266 vcombine_##funcsuffix (__b.val[1], vcreate_##funcsuffix (0)); \
20267 __o = __builtin_aarch64_set_qregoi##qmode (__o, \
20268 (signedtype) __temp.val[0], \
20269 0); \
20270 __o = __builtin_aarch64_set_qregoi##qmode (__o, \
20271 (signedtype) __temp.val[1], \
20272 1); \
20273 __o = __builtin_aarch64_ld2_lane##mode ( \
20274 (__builtin_aarch64_simd_##ptrmode *) __ptr, __o, __c); \
20275 __b.val[0] = (vectype) __builtin_aarch64_get_dregoidi (__o, 0); \
20276 __b.val[1] = (vectype) __builtin_aarch64_get_dregoidi (__o, 1); \
20277 return __b; \
20278 }
20279
20280 __LD2_LANE_FUNC (float16x4x2_t, float16x4_t, float16x8x2_t, float16_t, v4hf,
20281 v8hf, hf, f16, float16x8_t)
20282 __LD2_LANE_FUNC (float32x2x2_t, float32x2_t, float32x4x2_t, float32_t, v2sf, v4sf,
20283 sf, f32, float32x4_t)
20284 __LD2_LANE_FUNC (float64x1x2_t, float64x1_t, float64x2x2_t, float64_t, df, v2df,
20285 df, f64, float64x2_t)
20286 __LD2_LANE_FUNC (poly8x8x2_t, poly8x8_t, poly8x16x2_t, poly8_t, v8qi, v16qi, qi, p8,
20287 int8x16_t)
20288 __LD2_LANE_FUNC (poly16x4x2_t, poly16x4_t, poly16x8x2_t, poly16_t, v4hi, v8hi, hi,
20289 p16, int16x8_t)
20290 __LD2_LANE_FUNC (poly64x1x2_t, poly64x1_t, poly64x2x2_t, poly64_t, di,
20291 v2di_ssps, di, p64, poly64x2_t)
20292 __LD2_LANE_FUNC (int8x8x2_t, int8x8_t, int8x16x2_t, int8_t, v8qi, v16qi, qi, s8,
20293 int8x16_t)
20294 __LD2_LANE_FUNC (int16x4x2_t, int16x4_t, int16x8x2_t, int16_t, v4hi, v8hi, hi, s16,
20295 int16x8_t)
20296 __LD2_LANE_FUNC (int32x2x2_t, int32x2_t, int32x4x2_t, int32_t, v2si, v4si, si, s32,
20297 int32x4_t)
20298 __LD2_LANE_FUNC (int64x1x2_t, int64x1_t, int64x2x2_t, int64_t, di, v2di, di, s64,
20299 int64x2_t)
20300 __LD2_LANE_FUNC (uint8x8x2_t, uint8x8_t, uint8x16x2_t, uint8_t, v8qi, v16qi, qi, u8,
20301 int8x16_t)
20302 __LD2_LANE_FUNC (uint16x4x2_t, uint16x4_t, uint16x8x2_t, uint16_t, v4hi, v8hi, hi,
20303 u16, int16x8_t)
20304 __LD2_LANE_FUNC (uint32x2x2_t, uint32x2_t, uint32x4x2_t, uint32_t, v2si, v4si, si,
20305 u32, int32x4_t)
20306 __LD2_LANE_FUNC (uint64x1x2_t, uint64x1_t, uint64x2x2_t, uint64_t, di, v2di, di,
20307 u64, int64x2_t)
20308
20309 /* vld2q_lane */
20310
20311 #define __LD2Q_LANE_FUNC(intype, vtype, ptrtype, mode, ptrmode, funcsuffix) \
20312 __extension__ extern __inline intype \
20313 __attribute__ ((__always_inline__, __gnu_inline__,__artificial__)) \
20314 vld2q_lane_##funcsuffix (const ptrtype * __ptr, intype __b, const int __c) \
20315 { \
20316 __builtin_aarch64_simd_oi __o; \
20317 intype ret; \
20318 __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[0], 0); \
20319 __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __b.val[1], 1); \
20320 __o = __builtin_aarch64_ld2_lane##mode ( \
20321 (__builtin_aarch64_simd_##ptrmode *) __ptr, __o, __c); \
20322 ret.val[0] = (vtype) __builtin_aarch64_get_qregoiv4si (__o, 0); \
20323 ret.val[1] = (vtype) __builtin_aarch64_get_qregoiv4si (__o, 1); \
20324 return ret; \
20325 }
20326
20327 __LD2Q_LANE_FUNC (float16x8x2_t, float16x8_t, float16_t, v8hf, hf, f16)
20328 __LD2Q_LANE_FUNC (float32x4x2_t, float32x4_t, float32_t, v4sf, sf, f32)
20329 __LD2Q_LANE_FUNC (float64x2x2_t, float64x2_t, float64_t, v2df, df, f64)
20330 __LD2Q_LANE_FUNC (poly8x16x2_t, poly8x16_t, poly8_t, v16qi, qi, p8)
20331 __LD2Q_LANE_FUNC (poly16x8x2_t, poly16x8_t, poly16_t, v8hi, hi, p16)
20332 __LD2Q_LANE_FUNC (poly64x2x2_t, poly64x2_t, poly64_t, v2di, di, p64)
20333 __LD2Q_LANE_FUNC (int8x16x2_t, int8x16_t, int8_t, v16qi, qi, s8)
20334 __LD2Q_LANE_FUNC (int16x8x2_t, int16x8_t, int16_t, v8hi, hi, s16)
20335 __LD2Q_LANE_FUNC (int32x4x2_t, int32x4_t, int32_t, v4si, si, s32)
20336 __LD2Q_LANE_FUNC (int64x2x2_t, int64x2_t, int64_t, v2di, di, s64)
20337 __LD2Q_LANE_FUNC (uint8x16x2_t, uint8x16_t, uint8_t, v16qi, qi, u8)
20338 __LD2Q_LANE_FUNC (uint16x8x2_t, uint16x8_t, uint16_t, v8hi, hi, u16)
20339 __LD2Q_LANE_FUNC (uint32x4x2_t, uint32x4_t, uint32_t, v4si, si, u32)
20340 __LD2Q_LANE_FUNC (uint64x2x2_t, uint64x2_t, uint64_t, v2di, di, u64)
20341
20342 /* vld3_lane */
20343
20344 #define __LD3_LANE_FUNC(intype, vectype, largetype, ptrtype, mode, \
20345 qmode, ptrmode, funcsuffix, signedtype) \
20346 __extension__ extern __inline intype \
20347 __attribute__ ((__always_inline__, __gnu_inline__,__artificial__)) \
20348 vld3_lane_##funcsuffix (const ptrtype * __ptr, intype __b, const int __c) \
20349 { \
20350 __builtin_aarch64_simd_ci __o; \
20351 largetype __temp; \
20352 __temp.val[0] = \
20353 vcombine_##funcsuffix (__b.val[0], vcreate_##funcsuffix (0)); \
20354 __temp.val[1] = \
20355 vcombine_##funcsuffix (__b.val[1], vcreate_##funcsuffix (0)); \
20356 __temp.val[2] = \
20357 vcombine_##funcsuffix (__b.val[2], vcreate_##funcsuffix (0)); \
20358 __o = __builtin_aarch64_set_qregci##qmode (__o, \
20359 (signedtype) __temp.val[0], \
20360 0); \
20361 __o = __builtin_aarch64_set_qregci##qmode (__o, \
20362 (signedtype) __temp.val[1], \
20363 1); \
20364 __o = __builtin_aarch64_set_qregci##qmode (__o, \
20365 (signedtype) __temp.val[2], \
20366 2); \
20367 __o = __builtin_aarch64_ld3_lane##mode ( \
20368 (__builtin_aarch64_simd_##ptrmode *) __ptr, __o, __c); \
20369 __b.val[0] = (vectype) __builtin_aarch64_get_dregcidi (__o, 0); \
20370 __b.val[1] = (vectype) __builtin_aarch64_get_dregcidi (__o, 1); \
20371 __b.val[2] = (vectype) __builtin_aarch64_get_dregcidi (__o, 2); \
20372 return __b; \
20373 }
20374
20375 __LD3_LANE_FUNC (float16x4x3_t, float16x4_t, float16x8x3_t, float16_t, v4hf,
20376 v8hf, hf, f16, float16x8_t)
20377 __LD3_LANE_FUNC (float32x2x3_t, float32x2_t, float32x4x3_t, float32_t, v2sf, v4sf,
20378 sf, f32, float32x4_t)
20379 __LD3_LANE_FUNC (float64x1x3_t, float64x1_t, float64x2x3_t, float64_t, df, v2df,
20380 df, f64, float64x2_t)
20381 __LD3_LANE_FUNC (poly8x8x3_t, poly8x8_t, poly8x16x3_t, poly8_t, v8qi, v16qi, qi, p8,
20382 int8x16_t)
20383 __LD3_LANE_FUNC (poly16x4x3_t, poly16x4_t, poly16x8x3_t, poly16_t, v4hi, v8hi, hi,
20384 p16, int16x8_t)
20385 __LD3_LANE_FUNC (poly64x1x3_t, poly64x1_t, poly64x2x3_t, poly64_t, di,
20386 v2di_ssps, di, p64, poly64x2_t)
20387 __LD3_LANE_FUNC (int8x8x3_t, int8x8_t, int8x16x3_t, int8_t, v8qi, v16qi, qi, s8,
20388 int8x16_t)
20389 __LD3_LANE_FUNC (int16x4x3_t, int16x4_t, int16x8x3_t, int16_t, v4hi, v8hi, hi, s16,
20390 int16x8_t)
20391 __LD3_LANE_FUNC (int32x2x3_t, int32x2_t, int32x4x3_t, int32_t, v2si, v4si, si, s32,
20392 int32x4_t)
20393 __LD3_LANE_FUNC (int64x1x3_t, int64x1_t, int64x2x3_t, int64_t, di, v2di, di, s64,
20394 int64x2_t)
20395 __LD3_LANE_FUNC (uint8x8x3_t, uint8x8_t, uint8x16x3_t, uint8_t, v8qi, v16qi, qi, u8,
20396 int8x16_t)
20397 __LD3_LANE_FUNC (uint16x4x3_t, uint16x4_t, uint16x8x3_t, uint16_t, v4hi, v8hi, hi,
20398 u16, int16x8_t)
20399 __LD3_LANE_FUNC (uint32x2x3_t, uint32x2_t, uint32x4x3_t, uint32_t, v2si, v4si, si,
20400 u32, int32x4_t)
20401 __LD3_LANE_FUNC (uint64x1x3_t, uint64x1_t, uint64x2x3_t, uint64_t, di, v2di, di,
20402 u64, int64x2_t)
20403
20404 /* vld3q_lane */
20405
20406 #define __LD3Q_LANE_FUNC(intype, vtype, ptrtype, mode, ptrmode, funcsuffix) \
20407 __extension__ extern __inline intype \
20408 __attribute__ ((__always_inline__, __gnu_inline__,__artificial__)) \
20409 vld3q_lane_##funcsuffix (const ptrtype * __ptr, intype __b, const int __c) \
20410 { \
20411 __builtin_aarch64_simd_ci __o; \
20412 intype ret; \
20413 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[0], 0); \
20414 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[1], 1); \
20415 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __b.val[2], 2); \
20416 __o = __builtin_aarch64_ld3_lane##mode ( \
20417 (__builtin_aarch64_simd_##ptrmode *) __ptr, __o, __c); \
20418 ret.val[0] = (vtype) __builtin_aarch64_get_qregciv4si (__o, 0); \
20419 ret.val[1] = (vtype) __builtin_aarch64_get_qregciv4si (__o, 1); \
20420 ret.val[2] = (vtype) __builtin_aarch64_get_qregciv4si (__o, 2); \
20421 return ret; \
20422 }
20423
20424 __LD3Q_LANE_FUNC (float16x8x3_t, float16x8_t, float16_t, v8hf, hf, f16)
20425 __LD3Q_LANE_FUNC (float32x4x3_t, float32x4_t, float32_t, v4sf, sf, f32)
20426 __LD3Q_LANE_FUNC (float64x2x3_t, float64x2_t, float64_t, v2df, df, f64)
20427 __LD3Q_LANE_FUNC (poly8x16x3_t, poly8x16_t, poly8_t, v16qi, qi, p8)
20428 __LD3Q_LANE_FUNC (poly16x8x3_t, poly16x8_t, poly16_t, v8hi, hi, p16)
20429 __LD3Q_LANE_FUNC (poly64x2x3_t, poly64x2_t, poly64_t, v2di, di, p64)
20430 __LD3Q_LANE_FUNC (int8x16x3_t, int8x16_t, int8_t, v16qi, qi, s8)
20431 __LD3Q_LANE_FUNC (int16x8x3_t, int16x8_t, int16_t, v8hi, hi, s16)
20432 __LD3Q_LANE_FUNC (int32x4x3_t, int32x4_t, int32_t, v4si, si, s32)
20433 __LD3Q_LANE_FUNC (int64x2x3_t, int64x2_t, int64_t, v2di, di, s64)
20434 __LD3Q_LANE_FUNC (uint8x16x3_t, uint8x16_t, uint8_t, v16qi, qi, u8)
20435 __LD3Q_LANE_FUNC (uint16x8x3_t, uint16x8_t, uint16_t, v8hi, hi, u16)
20436 __LD3Q_LANE_FUNC (uint32x4x3_t, uint32x4_t, uint32_t, v4si, si, u32)
20437 __LD3Q_LANE_FUNC (uint64x2x3_t, uint64x2_t, uint64_t, v2di, di, u64)
20438
20439 /* vld4_lane */
20440
20441 #define __LD4_LANE_FUNC(intype, vectype, largetype, ptrtype, mode, \
20442 qmode, ptrmode, funcsuffix, signedtype) \
20443 __extension__ extern __inline intype \
20444 __attribute__ ((__always_inline__, __gnu_inline__,__artificial__)) \
20445 vld4_lane_##funcsuffix (const ptrtype * __ptr, intype __b, const int __c) \
20446 { \
20447 __builtin_aarch64_simd_xi __o; \
20448 largetype __temp; \
20449 __temp.val[0] = \
20450 vcombine_##funcsuffix (__b.val[0], vcreate_##funcsuffix (0)); \
20451 __temp.val[1] = \
20452 vcombine_##funcsuffix (__b.val[1], vcreate_##funcsuffix (0)); \
20453 __temp.val[2] = \
20454 vcombine_##funcsuffix (__b.val[2], vcreate_##funcsuffix (0)); \
20455 __temp.val[3] = \
20456 vcombine_##funcsuffix (__b.val[3], vcreate_##funcsuffix (0)); \
20457 __o = __builtin_aarch64_set_qregxi##qmode (__o, \
20458 (signedtype) __temp.val[0], \
20459 0); \
20460 __o = __builtin_aarch64_set_qregxi##qmode (__o, \
20461 (signedtype) __temp.val[1], \
20462 1); \
20463 __o = __builtin_aarch64_set_qregxi##qmode (__o, \
20464 (signedtype) __temp.val[2], \
20465 2); \
20466 __o = __builtin_aarch64_set_qregxi##qmode (__o, \
20467 (signedtype) __temp.val[3], \
20468 3); \
20469 __o = __builtin_aarch64_ld4_lane##mode ( \
20470 (__builtin_aarch64_simd_##ptrmode *) __ptr, __o, __c); \
20471 __b.val[0] = (vectype) __builtin_aarch64_get_dregxidi (__o, 0); \
20472 __b.val[1] = (vectype) __builtin_aarch64_get_dregxidi (__o, 1); \
20473 __b.val[2] = (vectype) __builtin_aarch64_get_dregxidi (__o, 2); \
20474 __b.val[3] = (vectype) __builtin_aarch64_get_dregxidi (__o, 3); \
20475 return __b; \
20476 }
20477
20478 /* vld4q_lane */
20479
20480 __LD4_LANE_FUNC (float16x4x4_t, float16x4_t, float16x8x4_t, float16_t, v4hf,
20481 v8hf, hf, f16, float16x8_t)
20482 __LD4_LANE_FUNC (float32x2x4_t, float32x2_t, float32x4x4_t, float32_t, v2sf, v4sf,
20483 sf, f32, float32x4_t)
20484 __LD4_LANE_FUNC (float64x1x4_t, float64x1_t, float64x2x4_t, float64_t, df, v2df,
20485 df, f64, float64x2_t)
20486 __LD4_LANE_FUNC (poly8x8x4_t, poly8x8_t, poly8x16x4_t, poly8_t, v8qi, v16qi, qi, p8,
20487 int8x16_t)
20488 __LD4_LANE_FUNC (poly16x4x4_t, poly16x4_t, poly16x8x4_t, poly16_t, v4hi, v8hi, hi,
20489 p16, int16x8_t)
20490 __LD4_LANE_FUNC (poly64x1x4_t, poly64x1_t, poly64x2x4_t, poly64_t, di,
20491 v2di_ssps, di, p64, poly64x2_t)
20492 __LD4_LANE_FUNC (int8x8x4_t, int8x8_t, int8x16x4_t, int8_t, v8qi, v16qi, qi, s8,
20493 int8x16_t)
20494 __LD4_LANE_FUNC (int16x4x4_t, int16x4_t, int16x8x4_t, int16_t, v4hi, v8hi, hi, s16,
20495 int16x8_t)
20496 __LD4_LANE_FUNC (int32x2x4_t, int32x2_t, int32x4x4_t, int32_t, v2si, v4si, si, s32,
20497 int32x4_t)
20498 __LD4_LANE_FUNC (int64x1x4_t, int64x1_t, int64x2x4_t, int64_t, di, v2di, di, s64,
20499 int64x2_t)
20500 __LD4_LANE_FUNC (uint8x8x4_t, uint8x8_t, uint8x16x4_t, uint8_t, v8qi, v16qi, qi, u8,
20501 int8x16_t)
20502 __LD4_LANE_FUNC (uint16x4x4_t, uint16x4_t, uint16x8x4_t, uint16_t, v4hi, v8hi, hi,
20503 u16, int16x8_t)
20504 __LD4_LANE_FUNC (uint32x2x4_t, uint32x2_t, uint32x4x4_t, uint32_t, v2si, v4si, si,
20505 u32, int32x4_t)
20506 __LD4_LANE_FUNC (uint64x1x4_t, uint64x1_t, uint64x2x4_t, uint64_t, di, v2di, di,
20507 u64, int64x2_t)
20508
20509 /* vld4q_lane */
20510
20511 #define __LD4Q_LANE_FUNC(intype, vtype, ptrtype, mode, ptrmode, funcsuffix) \
20512 __extension__ extern __inline intype \
20513 __attribute__ ((__always_inline__, __gnu_inline__,__artificial__)) \
20514 vld4q_lane_##funcsuffix (const ptrtype * __ptr, intype __b, const int __c) \
20515 { \
20516 __builtin_aarch64_simd_xi __o; \
20517 intype ret; \
20518 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[0], 0); \
20519 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[1], 1); \
20520 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[2], 2); \
20521 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __b.val[3], 3); \
20522 __o = __builtin_aarch64_ld4_lane##mode ( \
20523 (__builtin_aarch64_simd_##ptrmode *) __ptr, __o, __c); \
20524 ret.val[0] = (vtype) __builtin_aarch64_get_qregxiv4si (__o, 0); \
20525 ret.val[1] = (vtype) __builtin_aarch64_get_qregxiv4si (__o, 1); \
20526 ret.val[2] = (vtype) __builtin_aarch64_get_qregxiv4si (__o, 2); \
20527 ret.val[3] = (vtype) __builtin_aarch64_get_qregxiv4si (__o, 3); \
20528 return ret; \
20529 }
20530
20531 __LD4Q_LANE_FUNC (float16x8x4_t, float16x8_t, float16_t, v8hf, hf, f16)
20532 __LD4Q_LANE_FUNC (float32x4x4_t, float32x4_t, float32_t, v4sf, sf, f32)
20533 __LD4Q_LANE_FUNC (float64x2x4_t, float64x2_t, float64_t, v2df, df, f64)
20534 __LD4Q_LANE_FUNC (poly8x16x4_t, poly8x16_t, poly8_t, v16qi, qi, p8)
20535 __LD4Q_LANE_FUNC (poly16x8x4_t, poly16x8_t, poly16_t, v8hi, hi, p16)
20536 __LD4Q_LANE_FUNC (poly64x2x4_t, poly64x2_t, poly64_t, v2di, di, p64)
20537 __LD4Q_LANE_FUNC (int8x16x4_t, int8x16_t, int8_t, v16qi, qi, s8)
20538 __LD4Q_LANE_FUNC (int16x8x4_t, int16x8_t, int16_t, v8hi, hi, s16)
20539 __LD4Q_LANE_FUNC (int32x4x4_t, int32x4_t, int32_t, v4si, si, s32)
20540 __LD4Q_LANE_FUNC (int64x2x4_t, int64x2_t, int64_t, v2di, di, s64)
20541 __LD4Q_LANE_FUNC (uint8x16x4_t, uint8x16_t, uint8_t, v16qi, qi, u8)
20542 __LD4Q_LANE_FUNC (uint16x8x4_t, uint16x8_t, uint16_t, v8hi, hi, u16)
20543 __LD4Q_LANE_FUNC (uint32x4x4_t, uint32x4_t, uint32_t, v4si, si, u32)
20544 __LD4Q_LANE_FUNC (uint64x2x4_t, uint64x2_t, uint64_t, v2di, di, u64)
20545
20546 /* vmax */
20547
20548 __extension__ extern __inline float32x2_t
20549 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20550 vmax_f32 (float32x2_t __a, float32x2_t __b)
20551 {
20552 return __builtin_aarch64_smax_nanv2sf (__a, __b);
20553 }
20554
20555 __extension__ extern __inline float64x1_t
20556 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20557 vmax_f64 (float64x1_t __a, float64x1_t __b)
20558 {
20559 return (float64x1_t)
20560 { __builtin_aarch64_smax_nandf (vget_lane_f64 (__a, 0),
20561 vget_lane_f64 (__b, 0)) };
20562 }
20563
20564 __extension__ extern __inline int8x8_t
20565 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20566 vmax_s8 (int8x8_t __a, int8x8_t __b)
20567 {
20568 return __builtin_aarch64_smaxv8qi (__a, __b);
20569 }
20570
20571 __extension__ extern __inline int16x4_t
20572 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20573 vmax_s16 (int16x4_t __a, int16x4_t __b)
20574 {
20575 return __builtin_aarch64_smaxv4hi (__a, __b);
20576 }
20577
20578 __extension__ extern __inline int32x2_t
20579 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20580 vmax_s32 (int32x2_t __a, int32x2_t __b)
20581 {
20582 return __builtin_aarch64_smaxv2si (__a, __b);
20583 }
20584
20585 __extension__ extern __inline uint8x8_t
20586 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20587 vmax_u8 (uint8x8_t __a, uint8x8_t __b)
20588 {
20589 return (uint8x8_t) __builtin_aarch64_umaxv8qi ((int8x8_t) __a,
20590 (int8x8_t) __b);
20591 }
20592
20593 __extension__ extern __inline uint16x4_t
20594 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20595 vmax_u16 (uint16x4_t __a, uint16x4_t __b)
20596 {
20597 return (uint16x4_t) __builtin_aarch64_umaxv4hi ((int16x4_t) __a,
20598 (int16x4_t) __b);
20599 }
20600
20601 __extension__ extern __inline uint32x2_t
20602 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20603 vmax_u32 (uint32x2_t __a, uint32x2_t __b)
20604 {
20605 return (uint32x2_t) __builtin_aarch64_umaxv2si ((int32x2_t) __a,
20606 (int32x2_t) __b);
20607 }
20608
20609 __extension__ extern __inline float32x4_t
20610 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20611 vmaxq_f32 (float32x4_t __a, float32x4_t __b)
20612 {
20613 return __builtin_aarch64_smax_nanv4sf (__a, __b);
20614 }
20615
20616 __extension__ extern __inline float64x2_t
20617 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20618 vmaxq_f64 (float64x2_t __a, float64x2_t __b)
20619 {
20620 return __builtin_aarch64_smax_nanv2df (__a, __b);
20621 }
20622
20623 __extension__ extern __inline int8x16_t
20624 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20625 vmaxq_s8 (int8x16_t __a, int8x16_t __b)
20626 {
20627 return __builtin_aarch64_smaxv16qi (__a, __b);
20628 }
20629
20630 __extension__ extern __inline int16x8_t
20631 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20632 vmaxq_s16 (int16x8_t __a, int16x8_t __b)
20633 {
20634 return __builtin_aarch64_smaxv8hi (__a, __b);
20635 }
20636
20637 __extension__ extern __inline int32x4_t
20638 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20639 vmaxq_s32 (int32x4_t __a, int32x4_t __b)
20640 {
20641 return __builtin_aarch64_smaxv4si (__a, __b);
20642 }
20643
20644 __extension__ extern __inline uint8x16_t
20645 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20646 vmaxq_u8 (uint8x16_t __a, uint8x16_t __b)
20647 {
20648 return (uint8x16_t) __builtin_aarch64_umaxv16qi ((int8x16_t) __a,
20649 (int8x16_t) __b);
20650 }
20651
20652 __extension__ extern __inline uint16x8_t
20653 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20654 vmaxq_u16 (uint16x8_t __a, uint16x8_t __b)
20655 {
20656 return (uint16x8_t) __builtin_aarch64_umaxv8hi ((int16x8_t) __a,
20657 (int16x8_t) __b);
20658 }
20659
20660 __extension__ extern __inline uint32x4_t
20661 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20662 vmaxq_u32 (uint32x4_t __a, uint32x4_t __b)
20663 {
20664 return (uint32x4_t) __builtin_aarch64_umaxv4si ((int32x4_t) __a,
20665 (int32x4_t) __b);
20666 }
20667 /* vmulx */
20668
20669 __extension__ extern __inline float32x2_t
20670 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20671 vmulx_f32 (float32x2_t __a, float32x2_t __b)
20672 {
20673 return __builtin_aarch64_fmulxv2sf (__a, __b);
20674 }
20675
20676 __extension__ extern __inline float32x4_t
20677 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20678 vmulxq_f32 (float32x4_t __a, float32x4_t __b)
20679 {
20680 return __builtin_aarch64_fmulxv4sf (__a, __b);
20681 }
20682
20683 __extension__ extern __inline float64x1_t
20684 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20685 vmulx_f64 (float64x1_t __a, float64x1_t __b)
20686 {
20687 return (float64x1_t) {__builtin_aarch64_fmulxdf (__a[0], __b[0])};
20688 }
20689
20690 __extension__ extern __inline float64x2_t
20691 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20692 vmulxq_f64 (float64x2_t __a, float64x2_t __b)
20693 {
20694 return __builtin_aarch64_fmulxv2df (__a, __b);
20695 }
20696
20697 __extension__ extern __inline float32_t
20698 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20699 vmulxs_f32 (float32_t __a, float32_t __b)
20700 {
20701 return __builtin_aarch64_fmulxsf (__a, __b);
20702 }
20703
20704 __extension__ extern __inline float64_t
20705 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20706 vmulxd_f64 (float64_t __a, float64_t __b)
20707 {
20708 return __builtin_aarch64_fmulxdf (__a, __b);
20709 }
20710
20711 __extension__ extern __inline float32x2_t
20712 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20713 vmulx_lane_f32 (float32x2_t __a, float32x2_t __v, const int __lane)
20714 {
20715 return vmulx_f32 (__a, __aarch64_vdup_lane_f32 (__v, __lane));
20716 }
20717
20718 __extension__ extern __inline float64x1_t
20719 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20720 vmulx_lane_f64 (float64x1_t __a, float64x1_t __v, const int __lane)
20721 {
20722 return vmulx_f64 (__a, __aarch64_vdup_lane_f64 (__v, __lane));
20723 }
20724
20725 __extension__ extern __inline float32x4_t
20726 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20727 vmulxq_lane_f32 (float32x4_t __a, float32x2_t __v, const int __lane)
20728 {
20729 return vmulxq_f32 (__a, __aarch64_vdupq_lane_f32 (__v, __lane));
20730 }
20731
20732 __extension__ extern __inline float64x2_t
20733 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20734 vmulxq_lane_f64 (float64x2_t __a, float64x1_t __v, const int __lane)
20735 {
20736 return vmulxq_f64 (__a, __aarch64_vdupq_lane_f64 (__v, __lane));
20737 }
20738
20739 __extension__ extern __inline float32x2_t
20740 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20741 vmulx_laneq_f32 (float32x2_t __a, float32x4_t __v, const int __lane)
20742 {
20743 return vmulx_f32 (__a, __aarch64_vdup_laneq_f32 (__v, __lane));
20744 }
20745
20746 __extension__ extern __inline float64x1_t
20747 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20748 vmulx_laneq_f64 (float64x1_t __a, float64x2_t __v, const int __lane)
20749 {
20750 return vmulx_f64 (__a, __aarch64_vdup_laneq_f64 (__v, __lane));
20751 }
20752
20753 __extension__ extern __inline float32x4_t
20754 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20755 vmulxq_laneq_f32 (float32x4_t __a, float32x4_t __v, const int __lane)
20756 {
20757 return vmulxq_f32 (__a, __aarch64_vdupq_laneq_f32 (__v, __lane));
20758 }
20759
20760 __extension__ extern __inline float64x2_t
20761 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20762 vmulxq_laneq_f64 (float64x2_t __a, float64x2_t __v, const int __lane)
20763 {
20764 return vmulxq_f64 (__a, __aarch64_vdupq_laneq_f64 (__v, __lane));
20765 }
20766
20767 __extension__ extern __inline float32_t
20768 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20769 vmulxs_lane_f32 (float32_t __a, float32x2_t __v, const int __lane)
20770 {
20771 return vmulxs_f32 (__a, __aarch64_vget_lane_any (__v, __lane));
20772 }
20773
20774 __extension__ extern __inline float32_t
20775 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20776 vmulxs_laneq_f32 (float32_t __a, float32x4_t __v, const int __lane)
20777 {
20778 return vmulxs_f32 (__a, __aarch64_vget_lane_any (__v, __lane));
20779 }
20780
20781 __extension__ extern __inline float64_t
20782 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20783 vmulxd_lane_f64 (float64_t __a, float64x1_t __v, const int __lane)
20784 {
20785 return vmulxd_f64 (__a, __aarch64_vget_lane_any (__v, __lane));
20786 }
20787
20788 __extension__ extern __inline float64_t
20789 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20790 vmulxd_laneq_f64 (float64_t __a, float64x2_t __v, const int __lane)
20791 {
20792 return vmulxd_f64 (__a, __aarch64_vget_lane_any (__v, __lane));
20793 }
20794
20795 /* vpmax */
20796
20797 __extension__ extern __inline int8x8_t
20798 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20799 vpmax_s8 (int8x8_t __a, int8x8_t __b)
20800 {
20801 return __builtin_aarch64_smaxpv8qi (__a, __b);
20802 }
20803
20804 __extension__ extern __inline int16x4_t
20805 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20806 vpmax_s16 (int16x4_t __a, int16x4_t __b)
20807 {
20808 return __builtin_aarch64_smaxpv4hi (__a, __b);
20809 }
20810
20811 __extension__ extern __inline int32x2_t
20812 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20813 vpmax_s32 (int32x2_t __a, int32x2_t __b)
20814 {
20815 return __builtin_aarch64_smaxpv2si (__a, __b);
20816 }
20817
20818 __extension__ extern __inline uint8x8_t
20819 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20820 vpmax_u8 (uint8x8_t __a, uint8x8_t __b)
20821 {
20822 return (uint8x8_t) __builtin_aarch64_umaxpv8qi ((int8x8_t) __a,
20823 (int8x8_t) __b);
20824 }
20825
20826 __extension__ extern __inline uint16x4_t
20827 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20828 vpmax_u16 (uint16x4_t __a, uint16x4_t __b)
20829 {
20830 return (uint16x4_t) __builtin_aarch64_umaxpv4hi ((int16x4_t) __a,
20831 (int16x4_t) __b);
20832 }
20833
20834 __extension__ extern __inline uint32x2_t
20835 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20836 vpmax_u32 (uint32x2_t __a, uint32x2_t __b)
20837 {
20838 return (uint32x2_t) __builtin_aarch64_umaxpv2si ((int32x2_t) __a,
20839 (int32x2_t) __b);
20840 }
20841
20842 __extension__ extern __inline int8x16_t
20843 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20844 vpmaxq_s8 (int8x16_t __a, int8x16_t __b)
20845 {
20846 return __builtin_aarch64_smaxpv16qi (__a, __b);
20847 }
20848
20849 __extension__ extern __inline int16x8_t
20850 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20851 vpmaxq_s16 (int16x8_t __a, int16x8_t __b)
20852 {
20853 return __builtin_aarch64_smaxpv8hi (__a, __b);
20854 }
20855
20856 __extension__ extern __inline int32x4_t
20857 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20858 vpmaxq_s32 (int32x4_t __a, int32x4_t __b)
20859 {
20860 return __builtin_aarch64_smaxpv4si (__a, __b);
20861 }
20862
20863 __extension__ extern __inline uint8x16_t
20864 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20865 vpmaxq_u8 (uint8x16_t __a, uint8x16_t __b)
20866 {
20867 return (uint8x16_t) __builtin_aarch64_umaxpv16qi ((int8x16_t) __a,
20868 (int8x16_t) __b);
20869 }
20870
20871 __extension__ extern __inline uint16x8_t
20872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20873 vpmaxq_u16 (uint16x8_t __a, uint16x8_t __b)
20874 {
20875 return (uint16x8_t) __builtin_aarch64_umaxpv8hi ((int16x8_t) __a,
20876 (int16x8_t) __b);
20877 }
20878
20879 __extension__ extern __inline uint32x4_t
20880 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20881 vpmaxq_u32 (uint32x4_t __a, uint32x4_t __b)
20882 {
20883 return (uint32x4_t) __builtin_aarch64_umaxpv4si ((int32x4_t) __a,
20884 (int32x4_t) __b);
20885 }
20886
20887 __extension__ extern __inline float32x2_t
20888 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20889 vpmax_f32 (float32x2_t __a, float32x2_t __b)
20890 {
20891 return __builtin_aarch64_smax_nanpv2sf (__a, __b);
20892 }
20893
20894 __extension__ extern __inline float32x4_t
20895 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20896 vpmaxq_f32 (float32x4_t __a, float32x4_t __b)
20897 {
20898 return __builtin_aarch64_smax_nanpv4sf (__a, __b);
20899 }
20900
20901 __extension__ extern __inline float64x2_t
20902 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20903 vpmaxq_f64 (float64x2_t __a, float64x2_t __b)
20904 {
20905 return __builtin_aarch64_smax_nanpv2df (__a, __b);
20906 }
20907
20908 __extension__ extern __inline float64_t
20909 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20910 vpmaxqd_f64 (float64x2_t __a)
20911 {
20912 return __builtin_aarch64_reduc_smax_nan_scal_v2df (__a);
20913 }
20914
20915 __extension__ extern __inline float32_t
20916 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20917 vpmaxs_f32 (float32x2_t __a)
20918 {
20919 return __builtin_aarch64_reduc_smax_nan_scal_v2sf (__a);
20920 }
20921
20922 /* vpmaxnm */
20923
20924 __extension__ extern __inline float32x2_t
20925 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20926 vpmaxnm_f32 (float32x2_t __a, float32x2_t __b)
20927 {
20928 return __builtin_aarch64_smaxpv2sf (__a, __b);
20929 }
20930
20931 __extension__ extern __inline float32x4_t
20932 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20933 vpmaxnmq_f32 (float32x4_t __a, float32x4_t __b)
20934 {
20935 return __builtin_aarch64_smaxpv4sf (__a, __b);
20936 }
20937
20938 __extension__ extern __inline float64x2_t
20939 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20940 vpmaxnmq_f64 (float64x2_t __a, float64x2_t __b)
20941 {
20942 return __builtin_aarch64_smaxpv2df (__a, __b);
20943 }
20944
20945 __extension__ extern __inline float64_t
20946 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20947 vpmaxnmqd_f64 (float64x2_t __a)
20948 {
20949 return __builtin_aarch64_reduc_smax_scal_v2df (__a);
20950 }
20951
20952 __extension__ extern __inline float32_t
20953 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20954 vpmaxnms_f32 (float32x2_t __a)
20955 {
20956 return __builtin_aarch64_reduc_smax_scal_v2sf (__a);
20957 }
20958
20959 /* vpmin */
20960
20961 __extension__ extern __inline int8x8_t
20962 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20963 vpmin_s8 (int8x8_t __a, int8x8_t __b)
20964 {
20965 return __builtin_aarch64_sminpv8qi (__a, __b);
20966 }
20967
20968 __extension__ extern __inline int16x4_t
20969 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20970 vpmin_s16 (int16x4_t __a, int16x4_t __b)
20971 {
20972 return __builtin_aarch64_sminpv4hi (__a, __b);
20973 }
20974
20975 __extension__ extern __inline int32x2_t
20976 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20977 vpmin_s32 (int32x2_t __a, int32x2_t __b)
20978 {
20979 return __builtin_aarch64_sminpv2si (__a, __b);
20980 }
20981
20982 __extension__ extern __inline uint8x8_t
20983 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20984 vpmin_u8 (uint8x8_t __a, uint8x8_t __b)
20985 {
20986 return (uint8x8_t) __builtin_aarch64_uminpv8qi ((int8x8_t) __a,
20987 (int8x8_t) __b);
20988 }
20989
20990 __extension__ extern __inline uint16x4_t
20991 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
20992 vpmin_u16 (uint16x4_t __a, uint16x4_t __b)
20993 {
20994 return (uint16x4_t) __builtin_aarch64_uminpv4hi ((int16x4_t) __a,
20995 (int16x4_t) __b);
20996 }
20997
20998 __extension__ extern __inline uint32x2_t
20999 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21000 vpmin_u32 (uint32x2_t __a, uint32x2_t __b)
21001 {
21002 return (uint32x2_t) __builtin_aarch64_uminpv2si ((int32x2_t) __a,
21003 (int32x2_t) __b);
21004 }
21005
21006 __extension__ extern __inline int8x16_t
21007 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21008 vpminq_s8 (int8x16_t __a, int8x16_t __b)
21009 {
21010 return __builtin_aarch64_sminpv16qi (__a, __b);
21011 }
21012
21013 __extension__ extern __inline int16x8_t
21014 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21015 vpminq_s16 (int16x8_t __a, int16x8_t __b)
21016 {
21017 return __builtin_aarch64_sminpv8hi (__a, __b);
21018 }
21019
21020 __extension__ extern __inline int32x4_t
21021 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21022 vpminq_s32 (int32x4_t __a, int32x4_t __b)
21023 {
21024 return __builtin_aarch64_sminpv4si (__a, __b);
21025 }
21026
21027 __extension__ extern __inline uint8x16_t
21028 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21029 vpminq_u8 (uint8x16_t __a, uint8x16_t __b)
21030 {
21031 return (uint8x16_t) __builtin_aarch64_uminpv16qi ((int8x16_t) __a,
21032 (int8x16_t) __b);
21033 }
21034
21035 __extension__ extern __inline uint16x8_t
21036 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21037 vpminq_u16 (uint16x8_t __a, uint16x8_t __b)
21038 {
21039 return (uint16x8_t) __builtin_aarch64_uminpv8hi ((int16x8_t) __a,
21040 (int16x8_t) __b);
21041 }
21042
21043 __extension__ extern __inline uint32x4_t
21044 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21045 vpminq_u32 (uint32x4_t __a, uint32x4_t __b)
21046 {
21047 return (uint32x4_t) __builtin_aarch64_uminpv4si ((int32x4_t) __a,
21048 (int32x4_t) __b);
21049 }
21050
21051 __extension__ extern __inline float32x2_t
21052 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21053 vpmin_f32 (float32x2_t __a, float32x2_t __b)
21054 {
21055 return __builtin_aarch64_smin_nanpv2sf (__a, __b);
21056 }
21057
21058 __extension__ extern __inline float32x4_t
21059 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21060 vpminq_f32 (float32x4_t __a, float32x4_t __b)
21061 {
21062 return __builtin_aarch64_smin_nanpv4sf (__a, __b);
21063 }
21064
21065 __extension__ extern __inline float64x2_t
21066 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21067 vpminq_f64 (float64x2_t __a, float64x2_t __b)
21068 {
21069 return __builtin_aarch64_smin_nanpv2df (__a, __b);
21070 }
21071
21072 __extension__ extern __inline float64_t
21073 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21074 vpminqd_f64 (float64x2_t __a)
21075 {
21076 return __builtin_aarch64_reduc_smin_nan_scal_v2df (__a);
21077 }
21078
21079 __extension__ extern __inline float32_t
21080 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21081 vpmins_f32 (float32x2_t __a)
21082 {
21083 return __builtin_aarch64_reduc_smin_nan_scal_v2sf (__a);
21084 }
21085
21086 /* vpminnm */
21087
21088 __extension__ extern __inline float32x2_t
21089 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21090 vpminnm_f32 (float32x2_t __a, float32x2_t __b)
21091 {
21092 return __builtin_aarch64_sminpv2sf (__a, __b);
21093 }
21094
21095 __extension__ extern __inline float32x4_t
21096 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21097 vpminnmq_f32 (float32x4_t __a, float32x4_t __b)
21098 {
21099 return __builtin_aarch64_sminpv4sf (__a, __b);
21100 }
21101
21102 __extension__ extern __inline float64x2_t
21103 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21104 vpminnmq_f64 (float64x2_t __a, float64x2_t __b)
21105 {
21106 return __builtin_aarch64_sminpv2df (__a, __b);
21107 }
21108
21109 __extension__ extern __inline float64_t
21110 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21111 vpminnmqd_f64 (float64x2_t __a)
21112 {
21113 return __builtin_aarch64_reduc_smin_scal_v2df (__a);
21114 }
21115
21116 __extension__ extern __inline float32_t
21117 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21118 vpminnms_f32 (float32x2_t __a)
21119 {
21120 return __builtin_aarch64_reduc_smin_scal_v2sf (__a);
21121 }
21122
21123 /* vmaxnm */
21124
21125 __extension__ extern __inline float32x2_t
21126 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21127 vmaxnm_f32 (float32x2_t __a, float32x2_t __b)
21128 {
21129 return __builtin_aarch64_fmaxv2sf (__a, __b);
21130 }
21131
21132 __extension__ extern __inline float64x1_t
21133 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21134 vmaxnm_f64 (float64x1_t __a, float64x1_t __b)
21135 {
21136 return (float64x1_t)
21137 { __builtin_aarch64_fmaxdf (vget_lane_f64 (__a, 0),
21138 vget_lane_f64 (__b, 0)) };
21139 }
21140
21141 __extension__ extern __inline float32x4_t
21142 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21143 vmaxnmq_f32 (float32x4_t __a, float32x4_t __b)
21144 {
21145 return __builtin_aarch64_fmaxv4sf (__a, __b);
21146 }
21147
21148 __extension__ extern __inline float64x2_t
21149 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21150 vmaxnmq_f64 (float64x2_t __a, float64x2_t __b)
21151 {
21152 return __builtin_aarch64_fmaxv2df (__a, __b);
21153 }
21154
21155 /* vmaxv */
21156
21157 __extension__ extern __inline float32_t
21158 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21159 vmaxv_f32 (float32x2_t __a)
21160 {
21161 return __builtin_aarch64_reduc_smax_nan_scal_v2sf (__a);
21162 }
21163
21164 __extension__ extern __inline int8_t
21165 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21166 vmaxv_s8 (int8x8_t __a)
21167 {
21168 return __builtin_aarch64_reduc_smax_scal_v8qi (__a);
21169 }
21170
21171 __extension__ extern __inline int16_t
21172 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21173 vmaxv_s16 (int16x4_t __a)
21174 {
21175 return __builtin_aarch64_reduc_smax_scal_v4hi (__a);
21176 }
21177
21178 __extension__ extern __inline int32_t
21179 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21180 vmaxv_s32 (int32x2_t __a)
21181 {
21182 return __builtin_aarch64_reduc_smax_scal_v2si (__a);
21183 }
21184
21185 __extension__ extern __inline uint8_t
21186 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21187 vmaxv_u8 (uint8x8_t __a)
21188 {
21189 return __builtin_aarch64_reduc_umax_scal_v8qi_uu (__a);
21190 }
21191
21192 __extension__ extern __inline uint16_t
21193 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21194 vmaxv_u16 (uint16x4_t __a)
21195 {
21196 return __builtin_aarch64_reduc_umax_scal_v4hi_uu (__a);
21197 }
21198
21199 __extension__ extern __inline uint32_t
21200 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21201 vmaxv_u32 (uint32x2_t __a)
21202 {
21203 return __builtin_aarch64_reduc_umax_scal_v2si_uu (__a);
21204 }
21205
21206 __extension__ extern __inline float32_t
21207 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21208 vmaxvq_f32 (float32x4_t __a)
21209 {
21210 return __builtin_aarch64_reduc_smax_nan_scal_v4sf (__a);
21211 }
21212
21213 __extension__ extern __inline float64_t
21214 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21215 vmaxvq_f64 (float64x2_t __a)
21216 {
21217 return __builtin_aarch64_reduc_smax_nan_scal_v2df (__a);
21218 }
21219
21220 __extension__ extern __inline int8_t
21221 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21222 vmaxvq_s8 (int8x16_t __a)
21223 {
21224 return __builtin_aarch64_reduc_smax_scal_v16qi (__a);
21225 }
21226
21227 __extension__ extern __inline int16_t
21228 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21229 vmaxvq_s16 (int16x8_t __a)
21230 {
21231 return __builtin_aarch64_reduc_smax_scal_v8hi (__a);
21232 }
21233
21234 __extension__ extern __inline int32_t
21235 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21236 vmaxvq_s32 (int32x4_t __a)
21237 {
21238 return __builtin_aarch64_reduc_smax_scal_v4si (__a);
21239 }
21240
21241 __extension__ extern __inline uint8_t
21242 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21243 vmaxvq_u8 (uint8x16_t __a)
21244 {
21245 return __builtin_aarch64_reduc_umax_scal_v16qi_uu (__a);
21246 }
21247
21248 __extension__ extern __inline uint16_t
21249 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21250 vmaxvq_u16 (uint16x8_t __a)
21251 {
21252 return __builtin_aarch64_reduc_umax_scal_v8hi_uu (__a);
21253 }
21254
21255 __extension__ extern __inline uint32_t
21256 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21257 vmaxvq_u32 (uint32x4_t __a)
21258 {
21259 return __builtin_aarch64_reduc_umax_scal_v4si_uu (__a);
21260 }
21261
21262 /* vmaxnmv */
21263
21264 __extension__ extern __inline float32_t
21265 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21266 vmaxnmv_f32 (float32x2_t __a)
21267 {
21268 return __builtin_aarch64_reduc_smax_scal_v2sf (__a);
21269 }
21270
21271 __extension__ extern __inline float32_t
21272 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21273 vmaxnmvq_f32 (float32x4_t __a)
21274 {
21275 return __builtin_aarch64_reduc_smax_scal_v4sf (__a);
21276 }
21277
21278 __extension__ extern __inline float64_t
21279 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21280 vmaxnmvq_f64 (float64x2_t __a)
21281 {
21282 return __builtin_aarch64_reduc_smax_scal_v2df (__a);
21283 }
21284
21285 /* vmin */
21286
21287 __extension__ extern __inline float32x2_t
21288 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21289 vmin_f32 (float32x2_t __a, float32x2_t __b)
21290 {
21291 return __builtin_aarch64_smin_nanv2sf (__a, __b);
21292 }
21293
21294 __extension__ extern __inline float64x1_t
21295 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21296 vmin_f64 (float64x1_t __a, float64x1_t __b)
21297 {
21298 return (float64x1_t)
21299 { __builtin_aarch64_smin_nandf (vget_lane_f64 (__a, 0),
21300 vget_lane_f64 (__b, 0)) };
21301 }
21302
21303 __extension__ extern __inline int8x8_t
21304 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21305 vmin_s8 (int8x8_t __a, int8x8_t __b)
21306 {
21307 return __builtin_aarch64_sminv8qi (__a, __b);
21308 }
21309
21310 __extension__ extern __inline int16x4_t
21311 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21312 vmin_s16 (int16x4_t __a, int16x4_t __b)
21313 {
21314 return __builtin_aarch64_sminv4hi (__a, __b);
21315 }
21316
21317 __extension__ extern __inline int32x2_t
21318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21319 vmin_s32 (int32x2_t __a, int32x2_t __b)
21320 {
21321 return __builtin_aarch64_sminv2si (__a, __b);
21322 }
21323
21324 __extension__ extern __inline uint8x8_t
21325 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21326 vmin_u8 (uint8x8_t __a, uint8x8_t __b)
21327 {
21328 return (uint8x8_t) __builtin_aarch64_uminv8qi ((int8x8_t) __a,
21329 (int8x8_t) __b);
21330 }
21331
21332 __extension__ extern __inline uint16x4_t
21333 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21334 vmin_u16 (uint16x4_t __a, uint16x4_t __b)
21335 {
21336 return (uint16x4_t) __builtin_aarch64_uminv4hi ((int16x4_t) __a,
21337 (int16x4_t) __b);
21338 }
21339
21340 __extension__ extern __inline uint32x2_t
21341 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21342 vmin_u32 (uint32x2_t __a, uint32x2_t __b)
21343 {
21344 return (uint32x2_t) __builtin_aarch64_uminv2si ((int32x2_t) __a,
21345 (int32x2_t) __b);
21346 }
21347
21348 __extension__ extern __inline float32x4_t
21349 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21350 vminq_f32 (float32x4_t __a, float32x4_t __b)
21351 {
21352 return __builtin_aarch64_smin_nanv4sf (__a, __b);
21353 }
21354
21355 __extension__ extern __inline float64x2_t
21356 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21357 vminq_f64 (float64x2_t __a, float64x2_t __b)
21358 {
21359 return __builtin_aarch64_smin_nanv2df (__a, __b);
21360 }
21361
21362 __extension__ extern __inline int8x16_t
21363 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21364 vminq_s8 (int8x16_t __a, int8x16_t __b)
21365 {
21366 return __builtin_aarch64_sminv16qi (__a, __b);
21367 }
21368
21369 __extension__ extern __inline int16x8_t
21370 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21371 vminq_s16 (int16x8_t __a, int16x8_t __b)
21372 {
21373 return __builtin_aarch64_sminv8hi (__a, __b);
21374 }
21375
21376 __extension__ extern __inline int32x4_t
21377 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21378 vminq_s32 (int32x4_t __a, int32x4_t __b)
21379 {
21380 return __builtin_aarch64_sminv4si (__a, __b);
21381 }
21382
21383 __extension__ extern __inline uint8x16_t
21384 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21385 vminq_u8 (uint8x16_t __a, uint8x16_t __b)
21386 {
21387 return (uint8x16_t) __builtin_aarch64_uminv16qi ((int8x16_t) __a,
21388 (int8x16_t) __b);
21389 }
21390
21391 __extension__ extern __inline uint16x8_t
21392 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21393 vminq_u16 (uint16x8_t __a, uint16x8_t __b)
21394 {
21395 return (uint16x8_t) __builtin_aarch64_uminv8hi ((int16x8_t) __a,
21396 (int16x8_t) __b);
21397 }
21398
21399 __extension__ extern __inline uint32x4_t
21400 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21401 vminq_u32 (uint32x4_t __a, uint32x4_t __b)
21402 {
21403 return (uint32x4_t) __builtin_aarch64_uminv4si ((int32x4_t) __a,
21404 (int32x4_t) __b);
21405 }
21406
21407 /* vminnm */
21408
21409 __extension__ extern __inline float32x2_t
21410 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21411 vminnm_f32 (float32x2_t __a, float32x2_t __b)
21412 {
21413 return __builtin_aarch64_fminv2sf (__a, __b);
21414 }
21415
21416 __extension__ extern __inline float64x1_t
21417 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21418 vminnm_f64 (float64x1_t __a, float64x1_t __b)
21419 {
21420 return (float64x1_t)
21421 { __builtin_aarch64_fmindf (vget_lane_f64 (__a, 0),
21422 vget_lane_f64 (__b, 0)) };
21423 }
21424
21425 __extension__ extern __inline float32x4_t
21426 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21427 vminnmq_f32 (float32x4_t __a, float32x4_t __b)
21428 {
21429 return __builtin_aarch64_fminv4sf (__a, __b);
21430 }
21431
21432 __extension__ extern __inline float64x2_t
21433 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21434 vminnmq_f64 (float64x2_t __a, float64x2_t __b)
21435 {
21436 return __builtin_aarch64_fminv2df (__a, __b);
21437 }
21438
21439 /* vminv */
21440
21441 __extension__ extern __inline float32_t
21442 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21443 vminv_f32 (float32x2_t __a)
21444 {
21445 return __builtin_aarch64_reduc_smin_nan_scal_v2sf (__a);
21446 }
21447
21448 __extension__ extern __inline int8_t
21449 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21450 vminv_s8 (int8x8_t __a)
21451 {
21452 return __builtin_aarch64_reduc_smin_scal_v8qi (__a);
21453 }
21454
21455 __extension__ extern __inline int16_t
21456 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21457 vminv_s16 (int16x4_t __a)
21458 {
21459 return __builtin_aarch64_reduc_smin_scal_v4hi (__a);
21460 }
21461
21462 __extension__ extern __inline int32_t
21463 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21464 vminv_s32 (int32x2_t __a)
21465 {
21466 return __builtin_aarch64_reduc_smin_scal_v2si (__a);
21467 }
21468
21469 __extension__ extern __inline uint8_t
21470 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21471 vminv_u8 (uint8x8_t __a)
21472 {
21473 return __builtin_aarch64_reduc_umin_scal_v8qi_uu (__a);
21474 }
21475
21476 __extension__ extern __inline uint16_t
21477 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21478 vminv_u16 (uint16x4_t __a)
21479 {
21480 return __builtin_aarch64_reduc_umin_scal_v4hi_uu (__a);
21481 }
21482
21483 __extension__ extern __inline uint32_t
21484 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21485 vminv_u32 (uint32x2_t __a)
21486 {
21487 return __builtin_aarch64_reduc_umin_scal_v2si_uu (__a);
21488 }
21489
21490 __extension__ extern __inline float32_t
21491 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21492 vminvq_f32 (float32x4_t __a)
21493 {
21494 return __builtin_aarch64_reduc_smin_nan_scal_v4sf (__a);
21495 }
21496
21497 __extension__ extern __inline float64_t
21498 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21499 vminvq_f64 (float64x2_t __a)
21500 {
21501 return __builtin_aarch64_reduc_smin_nan_scal_v2df (__a);
21502 }
21503
21504 __extension__ extern __inline int8_t
21505 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21506 vminvq_s8 (int8x16_t __a)
21507 {
21508 return __builtin_aarch64_reduc_smin_scal_v16qi (__a);
21509 }
21510
21511 __extension__ extern __inline int16_t
21512 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21513 vminvq_s16 (int16x8_t __a)
21514 {
21515 return __builtin_aarch64_reduc_smin_scal_v8hi (__a);
21516 }
21517
21518 __extension__ extern __inline int32_t
21519 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21520 vminvq_s32 (int32x4_t __a)
21521 {
21522 return __builtin_aarch64_reduc_smin_scal_v4si (__a);
21523 }
21524
21525 __extension__ extern __inline uint8_t
21526 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21527 vminvq_u8 (uint8x16_t __a)
21528 {
21529 return __builtin_aarch64_reduc_umin_scal_v16qi_uu (__a);
21530 }
21531
21532 __extension__ extern __inline uint16_t
21533 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21534 vminvq_u16 (uint16x8_t __a)
21535 {
21536 return __builtin_aarch64_reduc_umin_scal_v8hi_uu (__a);
21537 }
21538
21539 __extension__ extern __inline uint32_t
21540 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21541 vminvq_u32 (uint32x4_t __a)
21542 {
21543 return __builtin_aarch64_reduc_umin_scal_v4si_uu (__a);
21544 }
21545
21546 /* vminnmv */
21547
21548 __extension__ extern __inline float32_t
21549 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21550 vminnmv_f32 (float32x2_t __a)
21551 {
21552 return __builtin_aarch64_reduc_smin_scal_v2sf (__a);
21553 }
21554
21555 __extension__ extern __inline float32_t
21556 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21557 vminnmvq_f32 (float32x4_t __a)
21558 {
21559 return __builtin_aarch64_reduc_smin_scal_v4sf (__a);
21560 }
21561
21562 __extension__ extern __inline float64_t
21563 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21564 vminnmvq_f64 (float64x2_t __a)
21565 {
21566 return __builtin_aarch64_reduc_smin_scal_v2df (__a);
21567 }
21568
21569 /* vmla */
21570
21571 __extension__ extern __inline float32x2_t
21572 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21573 vmla_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
21574 {
21575 return __a + __b * __c;
21576 }
21577
21578 __extension__ extern __inline float64x1_t
21579 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21580 vmla_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c)
21581 {
21582 return __a + __b * __c;
21583 }
21584
21585 __extension__ extern __inline float32x4_t
21586 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21587 vmlaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
21588 {
21589 return __a + __b * __c;
21590 }
21591
21592 __extension__ extern __inline float64x2_t
21593 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21594 vmlaq_f64 (float64x2_t __a, float64x2_t __b, float64x2_t __c)
21595 {
21596 return __a + __b * __c;
21597 }
21598
21599 /* vmla_lane */
21600
21601 __extension__ extern __inline float32x2_t
21602 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21603 vmla_lane_f32 (float32x2_t __a, float32x2_t __b,
21604 float32x2_t __c, const int __lane)
21605 {
21606 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21607 }
21608
21609 __extension__ extern __inline int16x4_t
21610 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21611 vmla_lane_s16 (int16x4_t __a, int16x4_t __b,
21612 int16x4_t __c, const int __lane)
21613 {
21614 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21615 }
21616
21617 __extension__ extern __inline int32x2_t
21618 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21619 vmla_lane_s32 (int32x2_t __a, int32x2_t __b,
21620 int32x2_t __c, const int __lane)
21621 {
21622 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21623 }
21624
21625 __extension__ extern __inline uint16x4_t
21626 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21627 vmla_lane_u16 (uint16x4_t __a, uint16x4_t __b,
21628 uint16x4_t __c, const int __lane)
21629 {
21630 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21631 }
21632
21633 __extension__ extern __inline uint32x2_t
21634 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21635 vmla_lane_u32 (uint32x2_t __a, uint32x2_t __b,
21636 uint32x2_t __c, const int __lane)
21637 {
21638 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21639 }
21640
21641 /* vmla_laneq */
21642
21643 __extension__ extern __inline float32x2_t
21644 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21645 vmla_laneq_f32 (float32x2_t __a, float32x2_t __b,
21646 float32x4_t __c, const int __lane)
21647 {
21648 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21649 }
21650
21651 __extension__ extern __inline int16x4_t
21652 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21653 vmla_laneq_s16 (int16x4_t __a, int16x4_t __b,
21654 int16x8_t __c, const int __lane)
21655 {
21656 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21657 }
21658
21659 __extension__ extern __inline int32x2_t
21660 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21661 vmla_laneq_s32 (int32x2_t __a, int32x2_t __b,
21662 int32x4_t __c, const int __lane)
21663 {
21664 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21665 }
21666
21667 __extension__ extern __inline uint16x4_t
21668 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21669 vmla_laneq_u16 (uint16x4_t __a, uint16x4_t __b,
21670 uint16x8_t __c, const int __lane)
21671 {
21672 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21673 }
21674
21675 __extension__ extern __inline uint32x2_t
21676 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21677 vmla_laneq_u32 (uint32x2_t __a, uint32x2_t __b,
21678 uint32x4_t __c, const int __lane)
21679 {
21680 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21681 }
21682
21683 /* vmlaq_lane */
21684
21685 __extension__ extern __inline float32x4_t
21686 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21687 vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b,
21688 float32x2_t __c, const int __lane)
21689 {
21690 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21691 }
21692
21693 __extension__ extern __inline int16x8_t
21694 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21695 vmlaq_lane_s16 (int16x8_t __a, int16x8_t __b,
21696 int16x4_t __c, const int __lane)
21697 {
21698 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21699 }
21700
21701 __extension__ extern __inline int32x4_t
21702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21703 vmlaq_lane_s32 (int32x4_t __a, int32x4_t __b,
21704 int32x2_t __c, const int __lane)
21705 {
21706 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21707 }
21708
21709 __extension__ extern __inline uint16x8_t
21710 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21711 vmlaq_lane_u16 (uint16x8_t __a, uint16x8_t __b,
21712 uint16x4_t __c, const int __lane)
21713 {
21714 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21715 }
21716
21717 __extension__ extern __inline uint32x4_t
21718 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21719 vmlaq_lane_u32 (uint32x4_t __a, uint32x4_t __b,
21720 uint32x2_t __c, const int __lane)
21721 {
21722 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21723 }
21724
21725 /* vmlaq_laneq */
21726
21727 __extension__ extern __inline float32x4_t
21728 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21729 vmlaq_laneq_f32 (float32x4_t __a, float32x4_t __b,
21730 float32x4_t __c, const int __lane)
21731 {
21732 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21733 }
21734
21735 __extension__ extern __inline int16x8_t
21736 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21737 vmlaq_laneq_s16 (int16x8_t __a, int16x8_t __b,
21738 int16x8_t __c, const int __lane)
21739 {
21740 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21741 }
21742
21743 __extension__ extern __inline int32x4_t
21744 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21745 vmlaq_laneq_s32 (int32x4_t __a, int32x4_t __b,
21746 int32x4_t __c, const int __lane)
21747 {
21748 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21749 }
21750
21751 __extension__ extern __inline uint16x8_t
21752 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21753 vmlaq_laneq_u16 (uint16x8_t __a, uint16x8_t __b,
21754 uint16x8_t __c, const int __lane)
21755 {
21756 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21757 }
21758
21759 __extension__ extern __inline uint32x4_t
21760 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21761 vmlaq_laneq_u32 (uint32x4_t __a, uint32x4_t __b,
21762 uint32x4_t __c, const int __lane)
21763 {
21764 return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
21765 }
21766
21767 /* vmls */
21768
21769 __extension__ extern __inline float32x2_t
21770 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21771 vmls_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
21772 {
21773 return __a - __b * __c;
21774 }
21775
21776 __extension__ extern __inline float64x1_t
21777 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21778 vmls_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c)
21779 {
21780 return __a - __b * __c;
21781 }
21782
21783 __extension__ extern __inline float32x4_t
21784 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21785 vmlsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
21786 {
21787 return __a - __b * __c;
21788 }
21789
21790 __extension__ extern __inline float64x2_t
21791 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21792 vmlsq_f64 (float64x2_t __a, float64x2_t __b, float64x2_t __c)
21793 {
21794 return __a - __b * __c;
21795 }
21796
21797 /* vmls_lane */
21798
21799 __extension__ extern __inline float32x2_t
21800 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21801 vmls_lane_f32 (float32x2_t __a, float32x2_t __b,
21802 float32x2_t __c, const int __lane)
21803 {
21804 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21805 }
21806
21807 __extension__ extern __inline int16x4_t
21808 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21809 vmls_lane_s16 (int16x4_t __a, int16x4_t __b,
21810 int16x4_t __c, const int __lane)
21811 {
21812 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21813 }
21814
21815 __extension__ extern __inline int32x2_t
21816 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21817 vmls_lane_s32 (int32x2_t __a, int32x2_t __b,
21818 int32x2_t __c, const int __lane)
21819 {
21820 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21821 }
21822
21823 __extension__ extern __inline uint16x4_t
21824 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21825 vmls_lane_u16 (uint16x4_t __a, uint16x4_t __b,
21826 uint16x4_t __c, const int __lane)
21827 {
21828 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21829 }
21830
21831 __extension__ extern __inline uint32x2_t
21832 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21833 vmls_lane_u32 (uint32x2_t __a, uint32x2_t __b,
21834 uint32x2_t __c, const int __lane)
21835 {
21836 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21837 }
21838
21839 /* vmls_laneq */
21840
21841 __extension__ extern __inline float32x2_t
21842 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21843 vmls_laneq_f32 (float32x2_t __a, float32x2_t __b,
21844 float32x4_t __c, const int __lane)
21845 {
21846 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21847 }
21848
21849 __extension__ extern __inline int16x4_t
21850 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21851 vmls_laneq_s16 (int16x4_t __a, int16x4_t __b,
21852 int16x8_t __c, const int __lane)
21853 {
21854 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21855 }
21856
21857 __extension__ extern __inline int32x2_t
21858 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21859 vmls_laneq_s32 (int32x2_t __a, int32x2_t __b,
21860 int32x4_t __c, const int __lane)
21861 {
21862 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21863 }
21864
21865 __extension__ extern __inline uint16x4_t
21866 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21867 vmls_laneq_u16 (uint16x4_t __a, uint16x4_t __b,
21868 uint16x8_t __c, const int __lane)
21869 {
21870 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21871 }
21872
21873 __extension__ extern __inline uint32x2_t
21874 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21875 vmls_laneq_u32 (uint32x2_t __a, uint32x2_t __b,
21876 uint32x4_t __c, const int __lane)
21877 {
21878 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21879 }
21880
21881 /* vmlsq_lane */
21882
21883 __extension__ extern __inline float32x4_t
21884 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21885 vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b,
21886 float32x2_t __c, const int __lane)
21887 {
21888 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21889 }
21890
21891 __extension__ extern __inline int16x8_t
21892 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21893 vmlsq_lane_s16 (int16x8_t __a, int16x8_t __b,
21894 int16x4_t __c, const int __lane)
21895 {
21896 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21897 }
21898
21899 __extension__ extern __inline int32x4_t
21900 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21901 vmlsq_lane_s32 (int32x4_t __a, int32x4_t __b,
21902 int32x2_t __c, const int __lane)
21903 {
21904 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21905 }
21906
21907 __extension__ extern __inline uint16x8_t
21908 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21909 vmlsq_lane_u16 (uint16x8_t __a, uint16x8_t __b,
21910 uint16x4_t __c, const int __lane)
21911 {
21912 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21913 }
21914
21915 __extension__ extern __inline uint32x4_t
21916 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21917 vmlsq_lane_u32 (uint32x4_t __a, uint32x4_t __b,
21918 uint32x2_t __c, const int __lane)
21919 {
21920 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21921 }
21922
21923 /* vmlsq_laneq */
21924
21925 __extension__ extern __inline float32x4_t
21926 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21927 vmlsq_laneq_f32 (float32x4_t __a, float32x4_t __b,
21928 float32x4_t __c, const int __lane)
21929 {
21930 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21931 }
21932
21933 __extension__ extern __inline int16x8_t
21934 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21935 vmlsq_laneq_s16 (int16x8_t __a, int16x8_t __b,
21936 int16x8_t __c, const int __lane)
21937 {
21938 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21939 }
21940
21941 __extension__ extern __inline int32x4_t
21942 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21943 vmlsq_laneq_s32 (int32x4_t __a, int32x4_t __b,
21944 int32x4_t __c, const int __lane)
21945 {
21946 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21947 }
21948 __extension__ extern __inline uint16x8_t
21949 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21950 vmlsq_laneq_u16 (uint16x8_t __a, uint16x8_t __b,
21951 uint16x8_t __c, const int __lane)
21952 {
21953 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21954 }
21955
21956 __extension__ extern __inline uint32x4_t
21957 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21958 vmlsq_laneq_u32 (uint32x4_t __a, uint32x4_t __b,
21959 uint32x4_t __c, const int __lane)
21960 {
21961 return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
21962 }
21963
21964 /* vmov_n_ */
21965
21966 __extension__ extern __inline float16x4_t
21967 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21968 vmov_n_f16 (float16_t __a)
21969 {
21970 return vdup_n_f16 (__a);
21971 }
21972
21973 __extension__ extern __inline float32x2_t
21974 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21975 vmov_n_f32 (float32_t __a)
21976 {
21977 return vdup_n_f32 (__a);
21978 }
21979
21980 __extension__ extern __inline float64x1_t
21981 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21982 vmov_n_f64 (float64_t __a)
21983 {
21984 return (float64x1_t) {__a};
21985 }
21986
21987 __extension__ extern __inline poly8x8_t
21988 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21989 vmov_n_p8 (poly8_t __a)
21990 {
21991 return vdup_n_p8 (__a);
21992 }
21993
21994 __extension__ extern __inline poly16x4_t
21995 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
21996 vmov_n_p16 (poly16_t __a)
21997 {
21998 return vdup_n_p16 (__a);
21999 }
22000
22001 __extension__ extern __inline poly64x1_t
22002 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22003 vmov_n_p64 (poly64_t __a)
22004 {
22005 return vdup_n_p64 (__a);
22006 }
22007
22008 __extension__ extern __inline int8x8_t
22009 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22010 vmov_n_s8 (int8_t __a)
22011 {
22012 return vdup_n_s8 (__a);
22013 }
22014
22015 __extension__ extern __inline int16x4_t
22016 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22017 vmov_n_s16 (int16_t __a)
22018 {
22019 return vdup_n_s16 (__a);
22020 }
22021
22022 __extension__ extern __inline int32x2_t
22023 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22024 vmov_n_s32 (int32_t __a)
22025 {
22026 return vdup_n_s32 (__a);
22027 }
22028
22029 __extension__ extern __inline int64x1_t
22030 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22031 vmov_n_s64 (int64_t __a)
22032 {
22033 return (int64x1_t) {__a};
22034 }
22035
22036 __extension__ extern __inline uint8x8_t
22037 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22038 vmov_n_u8 (uint8_t __a)
22039 {
22040 return vdup_n_u8 (__a);
22041 }
22042
22043 __extension__ extern __inline uint16x4_t
22044 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22045 vmov_n_u16 (uint16_t __a)
22046 {
22047 return vdup_n_u16 (__a);
22048 }
22049
22050 __extension__ extern __inline uint32x2_t
22051 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22052 vmov_n_u32 (uint32_t __a)
22053 {
22054 return vdup_n_u32 (__a);
22055 }
22056
22057 __extension__ extern __inline uint64x1_t
22058 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22059 vmov_n_u64 (uint64_t __a)
22060 {
22061 return (uint64x1_t) {__a};
22062 }
22063
22064 __extension__ extern __inline float16x8_t
22065 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22066 vmovq_n_f16 (float16_t __a)
22067 {
22068 return vdupq_n_f16 (__a);
22069 }
22070
22071 __extension__ extern __inline float32x4_t
22072 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22073 vmovq_n_f32 (float32_t __a)
22074 {
22075 return vdupq_n_f32 (__a);
22076 }
22077
22078 __extension__ extern __inline float64x2_t
22079 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22080 vmovq_n_f64 (float64_t __a)
22081 {
22082 return vdupq_n_f64 (__a);
22083 }
22084
22085 __extension__ extern __inline poly8x16_t
22086 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22087 vmovq_n_p8 (poly8_t __a)
22088 {
22089 return vdupq_n_p8 (__a);
22090 }
22091
22092 __extension__ extern __inline poly16x8_t
22093 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22094 vmovq_n_p16 (poly16_t __a)
22095 {
22096 return vdupq_n_p16 (__a);
22097 }
22098
22099 __extension__ extern __inline poly64x2_t
22100 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22101 vmovq_n_p64 (poly64_t __a)
22102 {
22103 return vdupq_n_p64 (__a);
22104 }
22105
22106 __extension__ extern __inline int8x16_t
22107 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22108 vmovq_n_s8 (int8_t __a)
22109 {
22110 return vdupq_n_s8 (__a);
22111 }
22112
22113 __extension__ extern __inline int16x8_t
22114 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22115 vmovq_n_s16 (int16_t __a)
22116 {
22117 return vdupq_n_s16 (__a);
22118 }
22119
22120 __extension__ extern __inline int32x4_t
22121 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22122 vmovq_n_s32 (int32_t __a)
22123 {
22124 return vdupq_n_s32 (__a);
22125 }
22126
22127 __extension__ extern __inline int64x2_t
22128 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22129 vmovq_n_s64 (int64_t __a)
22130 {
22131 return vdupq_n_s64 (__a);
22132 }
22133
22134 __extension__ extern __inline uint8x16_t
22135 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22136 vmovq_n_u8 (uint8_t __a)
22137 {
22138 return vdupq_n_u8 (__a);
22139 }
22140
22141 __extension__ extern __inline uint16x8_t
22142 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22143 vmovq_n_u16 (uint16_t __a)
22144 {
22145 return vdupq_n_u16 (__a);
22146 }
22147
22148 __extension__ extern __inline uint32x4_t
22149 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22150 vmovq_n_u32 (uint32_t __a)
22151 {
22152 return vdupq_n_u32 (__a);
22153 }
22154
22155 __extension__ extern __inline uint64x2_t
22156 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22157 vmovq_n_u64 (uint64_t __a)
22158 {
22159 return vdupq_n_u64 (__a);
22160 }
22161
22162 /* vmul_lane */
22163
22164 __extension__ extern __inline float32x2_t
22165 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22166 vmul_lane_f32 (float32x2_t __a, float32x2_t __b, const int __lane)
22167 {
22168 return __a * __aarch64_vget_lane_any (__b, __lane);
22169 }
22170
22171 __extension__ extern __inline float64x1_t
22172 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22173 vmul_lane_f64 (float64x1_t __a, float64x1_t __b, const int __lane)
22174 {
22175 return __a * __b;
22176 }
22177
22178 __extension__ extern __inline int16x4_t
22179 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22180 vmul_lane_s16 (int16x4_t __a, int16x4_t __b, const int __lane)
22181 {
22182 return __a * __aarch64_vget_lane_any (__b, __lane);
22183 }
22184
22185 __extension__ extern __inline int32x2_t
22186 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22187 vmul_lane_s32 (int32x2_t __a, int32x2_t __b, const int __lane)
22188 {
22189 return __a * __aarch64_vget_lane_any (__b, __lane);
22190 }
22191
22192 __extension__ extern __inline uint16x4_t
22193 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22194 vmul_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __lane)
22195 {
22196 return __a * __aarch64_vget_lane_any (__b, __lane);
22197 }
22198
22199 __extension__ extern __inline uint32x2_t
22200 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22201 vmul_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __lane)
22202 {
22203 return __a * __aarch64_vget_lane_any (__b, __lane);
22204 }
22205
22206 /* vmuld_lane */
22207
22208 __extension__ extern __inline float64_t
22209 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22210 vmuld_lane_f64 (float64_t __a, float64x1_t __b, const int __lane)
22211 {
22212 return __a * __aarch64_vget_lane_any (__b, __lane);
22213 }
22214
22215 __extension__ extern __inline float64_t
22216 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22217 vmuld_laneq_f64 (float64_t __a, float64x2_t __b, const int __lane)
22218 {
22219 return __a * __aarch64_vget_lane_any (__b, __lane);
22220 }
22221
22222 /* vmuls_lane */
22223
22224 __extension__ extern __inline float32_t
22225 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22226 vmuls_lane_f32 (float32_t __a, float32x2_t __b, const int __lane)
22227 {
22228 return __a * __aarch64_vget_lane_any (__b, __lane);
22229 }
22230
22231 __extension__ extern __inline float32_t
22232 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22233 vmuls_laneq_f32 (float32_t __a, float32x4_t __b, const int __lane)
22234 {
22235 return __a * __aarch64_vget_lane_any (__b, __lane);
22236 }
22237
22238 /* vmul_laneq */
22239
22240 __extension__ extern __inline float32x2_t
22241 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22242 vmul_laneq_f32 (float32x2_t __a, float32x4_t __b, const int __lane)
22243 {
22244 return __a * __aarch64_vget_lane_any (__b, __lane);
22245 }
22246
22247 __extension__ extern __inline float64x1_t
22248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22249 vmul_laneq_f64 (float64x1_t __a, float64x2_t __b, const int __lane)
22250 {
22251 return __a * __aarch64_vget_lane_any (__b, __lane);
22252 }
22253
22254 __extension__ extern __inline int16x4_t
22255 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22256 vmul_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __lane)
22257 {
22258 return __a * __aarch64_vget_lane_any (__b, __lane);
22259 }
22260
22261 __extension__ extern __inline int32x2_t
22262 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22263 vmul_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __lane)
22264 {
22265 return __a * __aarch64_vget_lane_any (__b, __lane);
22266 }
22267
22268 __extension__ extern __inline uint16x4_t
22269 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22270 vmul_laneq_u16 (uint16x4_t __a, uint16x8_t __b, const int __lane)
22271 {
22272 return __a * __aarch64_vget_lane_any (__b, __lane);
22273 }
22274
22275 __extension__ extern __inline uint32x2_t
22276 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22277 vmul_laneq_u32 (uint32x2_t __a, uint32x4_t __b, const int __lane)
22278 {
22279 return __a * __aarch64_vget_lane_any (__b, __lane);
22280 }
22281
22282 /* vmul_n */
22283
22284 __extension__ extern __inline float64x1_t
22285 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22286 vmul_n_f64 (float64x1_t __a, float64_t __b)
22287 {
22288 return (float64x1_t) { vget_lane_f64 (__a, 0) * __b };
22289 }
22290
22291 /* vmulq_lane */
22292
22293 __extension__ extern __inline float32x4_t
22294 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22295 vmulq_lane_f32 (float32x4_t __a, float32x2_t __b, const int __lane)
22296 {
22297 return __a * __aarch64_vget_lane_any (__b, __lane);
22298 }
22299
22300 __extension__ extern __inline float64x2_t
22301 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22302 vmulq_lane_f64 (float64x2_t __a, float64x1_t __b, const int __lane)
22303 {
22304 __AARCH64_LANE_CHECK (__a, __lane);
22305 return __a * __b[0];
22306 }
22307
22308 __extension__ extern __inline int16x8_t
22309 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22310 vmulq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __lane)
22311 {
22312 return __a * __aarch64_vget_lane_any (__b, __lane);
22313 }
22314
22315 __extension__ extern __inline int32x4_t
22316 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22317 vmulq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __lane)
22318 {
22319 return __a * __aarch64_vget_lane_any (__b, __lane);
22320 }
22321
22322 __extension__ extern __inline uint16x8_t
22323 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22324 vmulq_lane_u16 (uint16x8_t __a, uint16x4_t __b, const int __lane)
22325 {
22326 return __a * __aarch64_vget_lane_any (__b, __lane);
22327 }
22328
22329 __extension__ extern __inline uint32x4_t
22330 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22331 vmulq_lane_u32 (uint32x4_t __a, uint32x2_t __b, const int __lane)
22332 {
22333 return __a * __aarch64_vget_lane_any (__b, __lane);
22334 }
22335
22336 /* vmulq_laneq */
22337
22338 __extension__ extern __inline float32x4_t
22339 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22340 vmulq_laneq_f32 (float32x4_t __a, float32x4_t __b, const int __lane)
22341 {
22342 return __a * __aarch64_vget_lane_any (__b, __lane);
22343 }
22344
22345 __extension__ extern __inline float64x2_t
22346 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22347 vmulq_laneq_f64 (float64x2_t __a, float64x2_t __b, const int __lane)
22348 {
22349 return __a * __aarch64_vget_lane_any (__b, __lane);
22350 }
22351
22352 __extension__ extern __inline int16x8_t
22353 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22354 vmulq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __lane)
22355 {
22356 return __a * __aarch64_vget_lane_any (__b, __lane);
22357 }
22358
22359 __extension__ extern __inline int32x4_t
22360 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22361 vmulq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __lane)
22362 {
22363 return __a * __aarch64_vget_lane_any (__b, __lane);
22364 }
22365
22366 __extension__ extern __inline uint16x8_t
22367 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22368 vmulq_laneq_u16 (uint16x8_t __a, uint16x8_t __b, const int __lane)
22369 {
22370 return __a * __aarch64_vget_lane_any (__b, __lane);
22371 }
22372
22373 __extension__ extern __inline uint32x4_t
22374 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22375 vmulq_laneq_u32 (uint32x4_t __a, uint32x4_t __b, const int __lane)
22376 {
22377 return __a * __aarch64_vget_lane_any (__b, __lane);
22378 }
22379
22380 /* vmul_n. */
22381
22382 __extension__ extern __inline float32x2_t
22383 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22384 vmul_n_f32 (float32x2_t __a, float32_t __b)
22385 {
22386 return __a * __b;
22387 }
22388
22389 __extension__ extern __inline float32x4_t
22390 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22391 vmulq_n_f32 (float32x4_t __a, float32_t __b)
22392 {
22393 return __a * __b;
22394 }
22395
22396 __extension__ extern __inline float64x2_t
22397 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22398 vmulq_n_f64 (float64x2_t __a, float64_t __b)
22399 {
22400 return __a * __b;
22401 }
22402
22403 __extension__ extern __inline int16x4_t
22404 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22405 vmul_n_s16 (int16x4_t __a, int16_t __b)
22406 {
22407 return __a * __b;
22408 }
22409
22410 __extension__ extern __inline int16x8_t
22411 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22412 vmulq_n_s16 (int16x8_t __a, int16_t __b)
22413 {
22414 return __a * __b;
22415 }
22416
22417 __extension__ extern __inline int32x2_t
22418 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22419 vmul_n_s32 (int32x2_t __a, int32_t __b)
22420 {
22421 return __a * __b;
22422 }
22423
22424 __extension__ extern __inline int32x4_t
22425 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22426 vmulq_n_s32 (int32x4_t __a, int32_t __b)
22427 {
22428 return __a * __b;
22429 }
22430
22431 __extension__ extern __inline uint16x4_t
22432 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22433 vmul_n_u16 (uint16x4_t __a, uint16_t __b)
22434 {
22435 return __a * __b;
22436 }
22437
22438 __extension__ extern __inline uint16x8_t
22439 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22440 vmulq_n_u16 (uint16x8_t __a, uint16_t __b)
22441 {
22442 return __a * __b;
22443 }
22444
22445 __extension__ extern __inline uint32x2_t
22446 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22447 vmul_n_u32 (uint32x2_t __a, uint32_t __b)
22448 {
22449 return __a * __b;
22450 }
22451
22452 __extension__ extern __inline uint32x4_t
22453 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22454 vmulq_n_u32 (uint32x4_t __a, uint32_t __b)
22455 {
22456 return __a * __b;
22457 }
22458
22459 /* vmvn */
22460
22461 __extension__ extern __inline poly8x8_t
22462 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22463 vmvn_p8 (poly8x8_t __a)
22464 {
22465 return (poly8x8_t) ~((int8x8_t) __a);
22466 }
22467
22468 __extension__ extern __inline int8x8_t
22469 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22470 vmvn_s8 (int8x8_t __a)
22471 {
22472 return ~__a;
22473 }
22474
22475 __extension__ extern __inline int16x4_t
22476 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22477 vmvn_s16 (int16x4_t __a)
22478 {
22479 return ~__a;
22480 }
22481
22482 __extension__ extern __inline int32x2_t
22483 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22484 vmvn_s32 (int32x2_t __a)
22485 {
22486 return ~__a;
22487 }
22488
22489 __extension__ extern __inline uint8x8_t
22490 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22491 vmvn_u8 (uint8x8_t __a)
22492 {
22493 return ~__a;
22494 }
22495
22496 __extension__ extern __inline uint16x4_t
22497 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22498 vmvn_u16 (uint16x4_t __a)
22499 {
22500 return ~__a;
22501 }
22502
22503 __extension__ extern __inline uint32x2_t
22504 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22505 vmvn_u32 (uint32x2_t __a)
22506 {
22507 return ~__a;
22508 }
22509
22510 __extension__ extern __inline poly8x16_t
22511 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22512 vmvnq_p8 (poly8x16_t __a)
22513 {
22514 return (poly8x16_t) ~((int8x16_t) __a);
22515 }
22516
22517 __extension__ extern __inline int8x16_t
22518 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22519 vmvnq_s8 (int8x16_t __a)
22520 {
22521 return ~__a;
22522 }
22523
22524 __extension__ extern __inline int16x8_t
22525 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22526 vmvnq_s16 (int16x8_t __a)
22527 {
22528 return ~__a;
22529 }
22530
22531 __extension__ extern __inline int32x4_t
22532 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22533 vmvnq_s32 (int32x4_t __a)
22534 {
22535 return ~__a;
22536 }
22537
22538 __extension__ extern __inline uint8x16_t
22539 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22540 vmvnq_u8 (uint8x16_t __a)
22541 {
22542 return ~__a;
22543 }
22544
22545 __extension__ extern __inline uint16x8_t
22546 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22547 vmvnq_u16 (uint16x8_t __a)
22548 {
22549 return ~__a;
22550 }
22551
22552 __extension__ extern __inline uint32x4_t
22553 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22554 vmvnq_u32 (uint32x4_t __a)
22555 {
22556 return ~__a;
22557 }
22558
22559 /* vneg */
22560
22561 __extension__ extern __inline float32x2_t
22562 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22563 vneg_f32 (float32x2_t __a)
22564 {
22565 return -__a;
22566 }
22567
22568 __extension__ extern __inline float64x1_t
22569 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22570 vneg_f64 (float64x1_t __a)
22571 {
22572 return -__a;
22573 }
22574
22575 __extension__ extern __inline int8x8_t
22576 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22577 vneg_s8 (int8x8_t __a)
22578 {
22579 return -__a;
22580 }
22581
22582 __extension__ extern __inline int16x4_t
22583 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22584 vneg_s16 (int16x4_t __a)
22585 {
22586 return -__a;
22587 }
22588
22589 __extension__ extern __inline int32x2_t
22590 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22591 vneg_s32 (int32x2_t __a)
22592 {
22593 return -__a;
22594 }
22595
22596 __extension__ extern __inline int64x1_t
22597 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22598 vneg_s64 (int64x1_t __a)
22599 {
22600 return -__a;
22601 }
22602
22603 /* According to the ACLE, the negative of the minimum (signed)
22604 value is itself. This leads to a semantics mismatch, as this is
22605 undefined behaviour in C. The value range predictor is not
22606 aware that the negation of a negative number can still be negative
22607 and it may try to fold the expression. See the test in
22608 gcc.target/aarch64/vnegd_s64.c for an example.
22609
22610 The cast below tricks the value range predictor to include
22611 INT64_MIN in the range it computes. So for x in the range
22612 [INT64_MIN, y] the range prediction after vnegd_s64 (x) will
22613 be ~[INT64_MIN + 1, y]. */
22614
22615 __extension__ extern __inline int64_t
22616 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22617 vnegd_s64 (int64_t __a)
22618 {
22619 return - (uint64_t) __a;
22620 }
22621
22622 __extension__ extern __inline float32x4_t
22623 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22624 vnegq_f32 (float32x4_t __a)
22625 {
22626 return -__a;
22627 }
22628
22629 __extension__ extern __inline float64x2_t
22630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22631 vnegq_f64 (float64x2_t __a)
22632 {
22633 return -__a;
22634 }
22635
22636 __extension__ extern __inline int8x16_t
22637 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22638 vnegq_s8 (int8x16_t __a)
22639 {
22640 return -__a;
22641 }
22642
22643 __extension__ extern __inline int16x8_t
22644 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22645 vnegq_s16 (int16x8_t __a)
22646 {
22647 return -__a;
22648 }
22649
22650 __extension__ extern __inline int32x4_t
22651 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22652 vnegq_s32 (int32x4_t __a)
22653 {
22654 return -__a;
22655 }
22656
22657 __extension__ extern __inline int64x2_t
22658 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22659 vnegq_s64 (int64x2_t __a)
22660 {
22661 return -__a;
22662 }
22663
22664 /* vpadd */
22665
22666 __extension__ extern __inline float32x2_t
22667 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22668 vpadd_f32 (float32x2_t __a, float32x2_t __b)
22669 {
22670 return __builtin_aarch64_faddpv2sf (__a, __b);
22671 }
22672
22673 __extension__ extern __inline float32x4_t
22674 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22675 vpaddq_f32 (float32x4_t __a, float32x4_t __b)
22676 {
22677 return __builtin_aarch64_faddpv4sf (__a, __b);
22678 }
22679
22680 __extension__ extern __inline float64x2_t
22681 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22682 vpaddq_f64 (float64x2_t __a, float64x2_t __b)
22683 {
22684 return __builtin_aarch64_faddpv2df (__a, __b);
22685 }
22686
22687 __extension__ extern __inline int8x8_t
22688 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22689 vpadd_s8 (int8x8_t __a, int8x8_t __b)
22690 {
22691 return __builtin_aarch64_addpv8qi (__a, __b);
22692 }
22693
22694 __extension__ extern __inline int16x4_t
22695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22696 vpadd_s16 (int16x4_t __a, int16x4_t __b)
22697 {
22698 return __builtin_aarch64_addpv4hi (__a, __b);
22699 }
22700
22701 __extension__ extern __inline int32x2_t
22702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22703 vpadd_s32 (int32x2_t __a, int32x2_t __b)
22704 {
22705 return __builtin_aarch64_addpv2si (__a, __b);
22706 }
22707
22708 __extension__ extern __inline uint8x8_t
22709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22710 vpadd_u8 (uint8x8_t __a, uint8x8_t __b)
22711 {
22712 return (uint8x8_t) __builtin_aarch64_addpv8qi ((int8x8_t) __a,
22713 (int8x8_t) __b);
22714 }
22715
22716 __extension__ extern __inline uint16x4_t
22717 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22718 vpadd_u16 (uint16x4_t __a, uint16x4_t __b)
22719 {
22720 return (uint16x4_t) __builtin_aarch64_addpv4hi ((int16x4_t) __a,
22721 (int16x4_t) __b);
22722 }
22723
22724 __extension__ extern __inline uint32x2_t
22725 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22726 vpadd_u32 (uint32x2_t __a, uint32x2_t __b)
22727 {
22728 return (uint32x2_t) __builtin_aarch64_addpv2si ((int32x2_t) __a,
22729 (int32x2_t) __b);
22730 }
22731
22732 __extension__ extern __inline float32_t
22733 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22734 vpadds_f32 (float32x2_t __a)
22735 {
22736 return __builtin_aarch64_reduc_plus_scal_v2sf (__a);
22737 }
22738
22739 __extension__ extern __inline float64_t
22740 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22741 vpaddd_f64 (float64x2_t __a)
22742 {
22743 return __builtin_aarch64_reduc_plus_scal_v2df (__a);
22744 }
22745
22746 __extension__ extern __inline int64_t
22747 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22748 vpaddd_s64 (int64x2_t __a)
22749 {
22750 return __builtin_aarch64_addpdi (__a);
22751 }
22752
22753 __extension__ extern __inline uint64_t
22754 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22755 vpaddd_u64 (uint64x2_t __a)
22756 {
22757 return __builtin_aarch64_addpdi ((int64x2_t) __a);
22758 }
22759
22760 /* vqabs */
22761
22762 __extension__ extern __inline int64x2_t
22763 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22764 vqabsq_s64 (int64x2_t __a)
22765 {
22766 return (int64x2_t) __builtin_aarch64_sqabsv2di (__a);
22767 }
22768
22769 __extension__ extern __inline int8_t
22770 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22771 vqabsb_s8 (int8_t __a)
22772 {
22773 return (int8_t) __builtin_aarch64_sqabsqi (__a);
22774 }
22775
22776 __extension__ extern __inline int16_t
22777 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22778 vqabsh_s16 (int16_t __a)
22779 {
22780 return (int16_t) __builtin_aarch64_sqabshi (__a);
22781 }
22782
22783 __extension__ extern __inline int32_t
22784 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22785 vqabss_s32 (int32_t __a)
22786 {
22787 return (int32_t) __builtin_aarch64_sqabssi (__a);
22788 }
22789
22790 __extension__ extern __inline int64_t
22791 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22792 vqabsd_s64 (int64_t __a)
22793 {
22794 return __builtin_aarch64_sqabsdi (__a);
22795 }
22796
22797 /* vqadd */
22798
22799 __extension__ extern __inline int8_t
22800 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22801 vqaddb_s8 (int8_t __a, int8_t __b)
22802 {
22803 return (int8_t) __builtin_aarch64_sqaddqi (__a, __b);
22804 }
22805
22806 __extension__ extern __inline int16_t
22807 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22808 vqaddh_s16 (int16_t __a, int16_t __b)
22809 {
22810 return (int16_t) __builtin_aarch64_sqaddhi (__a, __b);
22811 }
22812
22813 __extension__ extern __inline int32_t
22814 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22815 vqadds_s32 (int32_t __a, int32_t __b)
22816 {
22817 return (int32_t) __builtin_aarch64_sqaddsi (__a, __b);
22818 }
22819
22820 __extension__ extern __inline int64_t
22821 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22822 vqaddd_s64 (int64_t __a, int64_t __b)
22823 {
22824 return __builtin_aarch64_sqadddi (__a, __b);
22825 }
22826
22827 __extension__ extern __inline uint8_t
22828 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22829 vqaddb_u8 (uint8_t __a, uint8_t __b)
22830 {
22831 return (uint8_t) __builtin_aarch64_uqaddqi_uuu (__a, __b);
22832 }
22833
22834 __extension__ extern __inline uint16_t
22835 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22836 vqaddh_u16 (uint16_t __a, uint16_t __b)
22837 {
22838 return (uint16_t) __builtin_aarch64_uqaddhi_uuu (__a, __b);
22839 }
22840
22841 __extension__ extern __inline uint32_t
22842 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22843 vqadds_u32 (uint32_t __a, uint32_t __b)
22844 {
22845 return (uint32_t) __builtin_aarch64_uqaddsi_uuu (__a, __b);
22846 }
22847
22848 __extension__ extern __inline uint64_t
22849 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22850 vqaddd_u64 (uint64_t __a, uint64_t __b)
22851 {
22852 return __builtin_aarch64_uqadddi_uuu (__a, __b);
22853 }
22854
22855 /* vqdmlal */
22856
22857 __extension__ extern __inline int32x4_t
22858 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22859 vqdmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
22860 {
22861 return __builtin_aarch64_sqdmlalv4hi (__a, __b, __c);
22862 }
22863
22864 __extension__ extern __inline int32x4_t
22865 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22866 vqdmlal_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
22867 {
22868 return __builtin_aarch64_sqdmlal2v8hi (__a, __b, __c);
22869 }
22870
22871 __extension__ extern __inline int32x4_t
22872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22873 vqdmlal_high_lane_s16 (int32x4_t __a, int16x8_t __b, int16x4_t __c,
22874 int const __d)
22875 {
22876 return __builtin_aarch64_sqdmlal2_lanev8hi (__a, __b, __c, __d);
22877 }
22878
22879 __extension__ extern __inline int32x4_t
22880 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22881 vqdmlal_high_laneq_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
22882 int const __d)
22883 {
22884 return __builtin_aarch64_sqdmlal2_laneqv8hi (__a, __b, __c, __d);
22885 }
22886
22887 __extension__ extern __inline int32x4_t
22888 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22889 vqdmlal_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
22890 {
22891 return __builtin_aarch64_sqdmlal2_nv8hi (__a, __b, __c);
22892 }
22893
22894 __extension__ extern __inline int32x4_t
22895 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22896 vqdmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, int const __d)
22897 {
22898 return __builtin_aarch64_sqdmlal_lanev4hi (__a, __b, __c, __d);
22899 }
22900
22901 __extension__ extern __inline int32x4_t
22902 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22903 vqdmlal_laneq_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __c, int const __d)
22904 {
22905 return __builtin_aarch64_sqdmlal_laneqv4hi (__a, __b, __c, __d);
22906 }
22907
22908 __extension__ extern __inline int32x4_t
22909 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22910 vqdmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
22911 {
22912 return __builtin_aarch64_sqdmlal_nv4hi (__a, __b, __c);
22913 }
22914
22915 __extension__ extern __inline int64x2_t
22916 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22917 vqdmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
22918 {
22919 return __builtin_aarch64_sqdmlalv2si (__a, __b, __c);
22920 }
22921
22922 __extension__ extern __inline int64x2_t
22923 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22924 vqdmlal_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
22925 {
22926 return __builtin_aarch64_sqdmlal2v4si (__a, __b, __c);
22927 }
22928
22929 __extension__ extern __inline int64x2_t
22930 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22931 vqdmlal_high_lane_s32 (int64x2_t __a, int32x4_t __b, int32x2_t __c,
22932 int const __d)
22933 {
22934 return __builtin_aarch64_sqdmlal2_lanev4si (__a, __b, __c, __d);
22935 }
22936
22937 __extension__ extern __inline int64x2_t
22938 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22939 vqdmlal_high_laneq_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
22940 int const __d)
22941 {
22942 return __builtin_aarch64_sqdmlal2_laneqv4si (__a, __b, __c, __d);
22943 }
22944
22945 __extension__ extern __inline int64x2_t
22946 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22947 vqdmlal_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
22948 {
22949 return __builtin_aarch64_sqdmlal2_nv4si (__a, __b, __c);
22950 }
22951
22952 __extension__ extern __inline int64x2_t
22953 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22954 vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, int const __d)
22955 {
22956 return __builtin_aarch64_sqdmlal_lanev2si (__a, __b, __c, __d);
22957 }
22958
22959 __extension__ extern __inline int64x2_t
22960 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22961 vqdmlal_laneq_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c, int const __d)
22962 {
22963 return __builtin_aarch64_sqdmlal_laneqv2si (__a, __b, __c, __d);
22964 }
22965
22966 __extension__ extern __inline int64x2_t
22967 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22968 vqdmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
22969 {
22970 return __builtin_aarch64_sqdmlal_nv2si (__a, __b, __c);
22971 }
22972
22973 __extension__ extern __inline int32_t
22974 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22975 vqdmlalh_s16 (int32_t __a, int16_t __b, int16_t __c)
22976 {
22977 return __builtin_aarch64_sqdmlalhi (__a, __b, __c);
22978 }
22979
22980 __extension__ extern __inline int32_t
22981 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22982 vqdmlalh_lane_s16 (int32_t __a, int16_t __b, int16x4_t __c, const int __d)
22983 {
22984 return __builtin_aarch64_sqdmlal_lanehi (__a, __b, __c, __d);
22985 }
22986
22987 __extension__ extern __inline int32_t
22988 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22989 vqdmlalh_laneq_s16 (int32_t __a, int16_t __b, int16x8_t __c, const int __d)
22990 {
22991 return __builtin_aarch64_sqdmlal_laneqhi (__a, __b, __c, __d);
22992 }
22993
22994 __extension__ extern __inline int64_t
22995 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
22996 vqdmlals_s32 (int64_t __a, int32_t __b, int32_t __c)
22997 {
22998 return __builtin_aarch64_sqdmlalsi (__a, __b, __c);
22999 }
23000
23001 __extension__ extern __inline int64_t
23002 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23003 vqdmlals_lane_s32 (int64_t __a, int32_t __b, int32x2_t __c, const int __d)
23004 {
23005 return __builtin_aarch64_sqdmlal_lanesi (__a, __b, __c, __d);
23006 }
23007
23008 __extension__ extern __inline int64_t
23009 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23010 vqdmlals_laneq_s32 (int64_t __a, int32_t __b, int32x4_t __c, const int __d)
23011 {
23012 return __builtin_aarch64_sqdmlal_laneqsi (__a, __b, __c, __d);
23013 }
23014
23015 /* vqdmlsl */
23016
23017 __extension__ extern __inline int32x4_t
23018 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23019 vqdmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c)
23020 {
23021 return __builtin_aarch64_sqdmlslv4hi (__a, __b, __c);
23022 }
23023
23024 __extension__ extern __inline int32x4_t
23025 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23026 vqdmlsl_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
23027 {
23028 return __builtin_aarch64_sqdmlsl2v8hi (__a, __b, __c);
23029 }
23030
23031 __extension__ extern __inline int32x4_t
23032 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23033 vqdmlsl_high_lane_s16 (int32x4_t __a, int16x8_t __b, int16x4_t __c,
23034 int const __d)
23035 {
23036 return __builtin_aarch64_sqdmlsl2_lanev8hi (__a, __b, __c, __d);
23037 }
23038
23039 __extension__ extern __inline int32x4_t
23040 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23041 vqdmlsl_high_laneq_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c,
23042 int const __d)
23043 {
23044 return __builtin_aarch64_sqdmlsl2_laneqv8hi (__a, __b, __c, __d);
23045 }
23046
23047 __extension__ extern __inline int32x4_t
23048 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23049 vqdmlsl_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c)
23050 {
23051 return __builtin_aarch64_sqdmlsl2_nv8hi (__a, __b, __c);
23052 }
23053
23054 __extension__ extern __inline int32x4_t
23055 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23056 vqdmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, int const __d)
23057 {
23058 return __builtin_aarch64_sqdmlsl_lanev4hi (__a, __b, __c, __d);
23059 }
23060
23061 __extension__ extern __inline int32x4_t
23062 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23063 vqdmlsl_laneq_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __c, int const __d)
23064 {
23065 return __builtin_aarch64_sqdmlsl_laneqv4hi (__a, __b, __c, __d);
23066 }
23067
23068 __extension__ extern __inline int32x4_t
23069 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23070 vqdmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c)
23071 {
23072 return __builtin_aarch64_sqdmlsl_nv4hi (__a, __b, __c);
23073 }
23074
23075 __extension__ extern __inline int64x2_t
23076 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23077 vqdmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c)
23078 {
23079 return __builtin_aarch64_sqdmlslv2si (__a, __b, __c);
23080 }
23081
23082 __extension__ extern __inline int64x2_t
23083 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23084 vqdmlsl_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
23085 {
23086 return __builtin_aarch64_sqdmlsl2v4si (__a, __b, __c);
23087 }
23088
23089 __extension__ extern __inline int64x2_t
23090 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23091 vqdmlsl_high_lane_s32 (int64x2_t __a, int32x4_t __b, int32x2_t __c,
23092 int const __d)
23093 {
23094 return __builtin_aarch64_sqdmlsl2_lanev4si (__a, __b, __c, __d);
23095 }
23096
23097 __extension__ extern __inline int64x2_t
23098 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23099 vqdmlsl_high_laneq_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c,
23100 int const __d)
23101 {
23102 return __builtin_aarch64_sqdmlsl2_laneqv4si (__a, __b, __c, __d);
23103 }
23104
23105 __extension__ extern __inline int64x2_t
23106 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23107 vqdmlsl_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c)
23108 {
23109 return __builtin_aarch64_sqdmlsl2_nv4si (__a, __b, __c);
23110 }
23111
23112 __extension__ extern __inline int64x2_t
23113 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23114 vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, int const __d)
23115 {
23116 return __builtin_aarch64_sqdmlsl_lanev2si (__a, __b, __c, __d);
23117 }
23118
23119 __extension__ extern __inline int64x2_t
23120 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23121 vqdmlsl_laneq_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __c, int const __d)
23122 {
23123 return __builtin_aarch64_sqdmlsl_laneqv2si (__a, __b, __c, __d);
23124 }
23125
23126 __extension__ extern __inline int64x2_t
23127 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23128 vqdmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c)
23129 {
23130 return __builtin_aarch64_sqdmlsl_nv2si (__a, __b, __c);
23131 }
23132
23133 __extension__ extern __inline int32_t
23134 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23135 vqdmlslh_s16 (int32_t __a, int16_t __b, int16_t __c)
23136 {
23137 return __builtin_aarch64_sqdmlslhi (__a, __b, __c);
23138 }
23139
23140 __extension__ extern __inline int32_t
23141 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23142 vqdmlslh_lane_s16 (int32_t __a, int16_t __b, int16x4_t __c, const int __d)
23143 {
23144 return __builtin_aarch64_sqdmlsl_lanehi (__a, __b, __c, __d);
23145 }
23146
23147 __extension__ extern __inline int32_t
23148 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23149 vqdmlslh_laneq_s16 (int32_t __a, int16_t __b, int16x8_t __c, const int __d)
23150 {
23151 return __builtin_aarch64_sqdmlsl_laneqhi (__a, __b, __c, __d);
23152 }
23153
23154 __extension__ extern __inline int64_t
23155 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23156 vqdmlsls_s32 (int64_t __a, int32_t __b, int32_t __c)
23157 {
23158 return __builtin_aarch64_sqdmlslsi (__a, __b, __c);
23159 }
23160
23161 __extension__ extern __inline int64_t
23162 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23163 vqdmlsls_lane_s32 (int64_t __a, int32_t __b, int32x2_t __c, const int __d)
23164 {
23165 return __builtin_aarch64_sqdmlsl_lanesi (__a, __b, __c, __d);
23166 }
23167
23168 __extension__ extern __inline int64_t
23169 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23170 vqdmlsls_laneq_s32 (int64_t __a, int32_t __b, int32x4_t __c, const int __d)
23171 {
23172 return __builtin_aarch64_sqdmlsl_laneqsi (__a, __b, __c, __d);
23173 }
23174
23175 /* vqdmulh */
23176
23177 __extension__ extern __inline int16x4_t
23178 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23179 vqdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
23180 {
23181 return __builtin_aarch64_sqdmulh_lanev4hi (__a, __b, __c);
23182 }
23183
23184 __extension__ extern __inline int32x2_t
23185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23186 vqdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
23187 {
23188 return __builtin_aarch64_sqdmulh_lanev2si (__a, __b, __c);
23189 }
23190
23191 __extension__ extern __inline int16x8_t
23192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23193 vqdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
23194 {
23195 return __builtin_aarch64_sqdmulh_lanev8hi (__a, __b, __c);
23196 }
23197
23198 __extension__ extern __inline int32x4_t
23199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23200 vqdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
23201 {
23202 return __builtin_aarch64_sqdmulh_lanev4si (__a, __b, __c);
23203 }
23204
23205 __extension__ extern __inline int16_t
23206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23207 vqdmulhh_s16 (int16_t __a, int16_t __b)
23208 {
23209 return (int16_t) __builtin_aarch64_sqdmulhhi (__a, __b);
23210 }
23211
23212 __extension__ extern __inline int16_t
23213 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23214 vqdmulhh_lane_s16 (int16_t __a, int16x4_t __b, const int __c)
23215 {
23216 return __builtin_aarch64_sqdmulh_lanehi (__a, __b, __c);
23217 }
23218
23219 __extension__ extern __inline int16_t
23220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23221 vqdmulhh_laneq_s16 (int16_t __a, int16x8_t __b, const int __c)
23222 {
23223 return __builtin_aarch64_sqdmulh_laneqhi (__a, __b, __c);
23224 }
23225
23226 __extension__ extern __inline int32_t
23227 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23228 vqdmulhs_s32 (int32_t __a, int32_t __b)
23229 {
23230 return (int32_t) __builtin_aarch64_sqdmulhsi (__a, __b);
23231 }
23232
23233 __extension__ extern __inline int32_t
23234 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23235 vqdmulhs_lane_s32 (int32_t __a, int32x2_t __b, const int __c)
23236 {
23237 return __builtin_aarch64_sqdmulh_lanesi (__a, __b, __c);
23238 }
23239
23240 __extension__ extern __inline int32_t
23241 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23242 vqdmulhs_laneq_s32 (int32_t __a, int32x4_t __b, const int __c)
23243 {
23244 return __builtin_aarch64_sqdmulh_laneqsi (__a, __b, __c);
23245 }
23246
23247 /* vqdmull */
23248
23249 __extension__ extern __inline int32x4_t
23250 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23251 vqdmull_s16 (int16x4_t __a, int16x4_t __b)
23252 {
23253 return __builtin_aarch64_sqdmullv4hi (__a, __b);
23254 }
23255
23256 __extension__ extern __inline int32x4_t
23257 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23258 vqdmull_high_s16 (int16x8_t __a, int16x8_t __b)
23259 {
23260 return __builtin_aarch64_sqdmull2v8hi (__a, __b);
23261 }
23262
23263 __extension__ extern __inline int32x4_t
23264 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23265 vqdmull_high_lane_s16 (int16x8_t __a, int16x4_t __b, int const __c)
23266 {
23267 return __builtin_aarch64_sqdmull2_lanev8hi (__a, __b,__c);
23268 }
23269
23270 __extension__ extern __inline int32x4_t
23271 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23272 vqdmull_high_laneq_s16 (int16x8_t __a, int16x8_t __b, int const __c)
23273 {
23274 return __builtin_aarch64_sqdmull2_laneqv8hi (__a, __b,__c);
23275 }
23276
23277 __extension__ extern __inline int32x4_t
23278 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23279 vqdmull_high_n_s16 (int16x8_t __a, int16_t __b)
23280 {
23281 return __builtin_aarch64_sqdmull2_nv8hi (__a, __b);
23282 }
23283
23284 __extension__ extern __inline int32x4_t
23285 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23286 vqdmull_lane_s16 (int16x4_t __a, int16x4_t __b, int const __c)
23287 {
23288 return __builtin_aarch64_sqdmull_lanev4hi (__a, __b, __c);
23289 }
23290
23291 __extension__ extern __inline int32x4_t
23292 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23293 vqdmull_laneq_s16 (int16x4_t __a, int16x8_t __b, int const __c)
23294 {
23295 return __builtin_aarch64_sqdmull_laneqv4hi (__a, __b, __c);
23296 }
23297
23298 __extension__ extern __inline int32x4_t
23299 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23300 vqdmull_n_s16 (int16x4_t __a, int16_t __b)
23301 {
23302 return __builtin_aarch64_sqdmull_nv4hi (__a, __b);
23303 }
23304
23305 __extension__ extern __inline int64x2_t
23306 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23307 vqdmull_s32 (int32x2_t __a, int32x2_t __b)
23308 {
23309 return __builtin_aarch64_sqdmullv2si (__a, __b);
23310 }
23311
23312 __extension__ extern __inline int64x2_t
23313 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23314 vqdmull_high_s32 (int32x4_t __a, int32x4_t __b)
23315 {
23316 return __builtin_aarch64_sqdmull2v4si (__a, __b);
23317 }
23318
23319 __extension__ extern __inline int64x2_t
23320 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23321 vqdmull_high_lane_s32 (int32x4_t __a, int32x2_t __b, int const __c)
23322 {
23323 return __builtin_aarch64_sqdmull2_lanev4si (__a, __b, __c);
23324 }
23325
23326 __extension__ extern __inline int64x2_t
23327 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23328 vqdmull_high_laneq_s32 (int32x4_t __a, int32x4_t __b, int const __c)
23329 {
23330 return __builtin_aarch64_sqdmull2_laneqv4si (__a, __b, __c);
23331 }
23332
23333 __extension__ extern __inline int64x2_t
23334 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23335 vqdmull_high_n_s32 (int32x4_t __a, int32_t __b)
23336 {
23337 return __builtin_aarch64_sqdmull2_nv4si (__a, __b);
23338 }
23339
23340 __extension__ extern __inline int64x2_t
23341 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23342 vqdmull_lane_s32 (int32x2_t __a, int32x2_t __b, int const __c)
23343 {
23344 return __builtin_aarch64_sqdmull_lanev2si (__a, __b, __c);
23345 }
23346
23347 __extension__ extern __inline int64x2_t
23348 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23349 vqdmull_laneq_s32 (int32x2_t __a, int32x4_t __b, int const __c)
23350 {
23351 return __builtin_aarch64_sqdmull_laneqv2si (__a, __b, __c);
23352 }
23353
23354 __extension__ extern __inline int64x2_t
23355 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23356 vqdmull_n_s32 (int32x2_t __a, int32_t __b)
23357 {
23358 return __builtin_aarch64_sqdmull_nv2si (__a, __b);
23359 }
23360
23361 __extension__ extern __inline int32_t
23362 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23363 vqdmullh_s16 (int16_t __a, int16_t __b)
23364 {
23365 return (int32_t) __builtin_aarch64_sqdmullhi (__a, __b);
23366 }
23367
23368 __extension__ extern __inline int32_t
23369 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23370 vqdmullh_lane_s16 (int16_t __a, int16x4_t __b, const int __c)
23371 {
23372 return __builtin_aarch64_sqdmull_lanehi (__a, __b, __c);
23373 }
23374
23375 __extension__ extern __inline int32_t
23376 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23377 vqdmullh_laneq_s16 (int16_t __a, int16x8_t __b, const int __c)
23378 {
23379 return __builtin_aarch64_sqdmull_laneqhi (__a, __b, __c);
23380 }
23381
23382 __extension__ extern __inline int64_t
23383 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23384 vqdmulls_s32 (int32_t __a, int32_t __b)
23385 {
23386 return __builtin_aarch64_sqdmullsi (__a, __b);
23387 }
23388
23389 __extension__ extern __inline int64_t
23390 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23391 vqdmulls_lane_s32 (int32_t __a, int32x2_t __b, const int __c)
23392 {
23393 return __builtin_aarch64_sqdmull_lanesi (__a, __b, __c);
23394 }
23395
23396 __extension__ extern __inline int64_t
23397 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23398 vqdmulls_laneq_s32 (int32_t __a, int32x4_t __b, const int __c)
23399 {
23400 return __builtin_aarch64_sqdmull_laneqsi (__a, __b, __c);
23401 }
23402
23403 /* vqmovn */
23404
23405 __extension__ extern __inline int8x8_t
23406 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23407 vqmovn_s16 (int16x8_t __a)
23408 {
23409 return (int8x8_t) __builtin_aarch64_sqmovnv8hi (__a);
23410 }
23411
23412 __extension__ extern __inline int16x4_t
23413 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23414 vqmovn_s32 (int32x4_t __a)
23415 {
23416 return (int16x4_t) __builtin_aarch64_sqmovnv4si (__a);
23417 }
23418
23419 __extension__ extern __inline int32x2_t
23420 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23421 vqmovn_s64 (int64x2_t __a)
23422 {
23423 return (int32x2_t) __builtin_aarch64_sqmovnv2di (__a);
23424 }
23425
23426 __extension__ extern __inline uint8x8_t
23427 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23428 vqmovn_u16 (uint16x8_t __a)
23429 {
23430 return (uint8x8_t) __builtin_aarch64_uqmovnv8hi ((int16x8_t) __a);
23431 }
23432
23433 __extension__ extern __inline uint16x4_t
23434 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23435 vqmovn_u32 (uint32x4_t __a)
23436 {
23437 return (uint16x4_t) __builtin_aarch64_uqmovnv4si ((int32x4_t) __a);
23438 }
23439
23440 __extension__ extern __inline uint32x2_t
23441 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23442 vqmovn_u64 (uint64x2_t __a)
23443 {
23444 return (uint32x2_t) __builtin_aarch64_uqmovnv2di ((int64x2_t) __a);
23445 }
23446
23447 __extension__ extern __inline int8_t
23448 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23449 vqmovnh_s16 (int16_t __a)
23450 {
23451 return (int8_t) __builtin_aarch64_sqmovnhi (__a);
23452 }
23453
23454 __extension__ extern __inline int16_t
23455 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23456 vqmovns_s32 (int32_t __a)
23457 {
23458 return (int16_t) __builtin_aarch64_sqmovnsi (__a);
23459 }
23460
23461 __extension__ extern __inline int32_t
23462 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23463 vqmovnd_s64 (int64_t __a)
23464 {
23465 return (int32_t) __builtin_aarch64_sqmovndi (__a);
23466 }
23467
23468 __extension__ extern __inline uint8_t
23469 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23470 vqmovnh_u16 (uint16_t __a)
23471 {
23472 return (uint8_t) __builtin_aarch64_uqmovnhi (__a);
23473 }
23474
23475 __extension__ extern __inline uint16_t
23476 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23477 vqmovns_u32 (uint32_t __a)
23478 {
23479 return (uint16_t) __builtin_aarch64_uqmovnsi (__a);
23480 }
23481
23482 __extension__ extern __inline uint32_t
23483 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23484 vqmovnd_u64 (uint64_t __a)
23485 {
23486 return (uint32_t) __builtin_aarch64_uqmovndi (__a);
23487 }
23488
23489 /* vqmovun */
23490
23491 __extension__ extern __inline uint8x8_t
23492 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23493 vqmovun_s16 (int16x8_t __a)
23494 {
23495 return __builtin_aarch64_sqmovunv8hi_us (__a);
23496 }
23497
23498 __extension__ extern __inline uint16x4_t
23499 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23500 vqmovun_s32 (int32x4_t __a)
23501 {
23502 return __builtin_aarch64_sqmovunv4si_us (__a);
23503 }
23504
23505 __extension__ extern __inline uint32x2_t
23506 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23507 vqmovun_s64 (int64x2_t __a)
23508 {
23509 return __builtin_aarch64_sqmovunv2di_us (__a);
23510 }
23511
23512 __extension__ extern __inline uint8_t
23513 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23514 vqmovunh_s16 (int16_t __a)
23515 {
23516 return __builtin_aarch64_sqmovunhi_us (__a);
23517 }
23518
23519 __extension__ extern __inline uint16_t
23520 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23521 vqmovuns_s32 (int32_t __a)
23522 {
23523 return __builtin_aarch64_sqmovunsi_us (__a);
23524 }
23525
23526 __extension__ extern __inline uint32_t
23527 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23528 vqmovund_s64 (int64_t __a)
23529 {
23530 return __builtin_aarch64_sqmovundi_us (__a);
23531 }
23532
23533 /* vqneg */
23534
23535 __extension__ extern __inline int64x2_t
23536 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23537 vqnegq_s64 (int64x2_t __a)
23538 {
23539 return (int64x2_t) __builtin_aarch64_sqnegv2di (__a);
23540 }
23541
23542 __extension__ extern __inline int8_t
23543 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23544 vqnegb_s8 (int8_t __a)
23545 {
23546 return (int8_t) __builtin_aarch64_sqnegqi (__a);
23547 }
23548
23549 __extension__ extern __inline int16_t
23550 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23551 vqnegh_s16 (int16_t __a)
23552 {
23553 return (int16_t) __builtin_aarch64_sqneghi (__a);
23554 }
23555
23556 __extension__ extern __inline int32_t
23557 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23558 vqnegs_s32 (int32_t __a)
23559 {
23560 return (int32_t) __builtin_aarch64_sqnegsi (__a);
23561 }
23562
23563 __extension__ extern __inline int64_t
23564 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23565 vqnegd_s64 (int64_t __a)
23566 {
23567 return __builtin_aarch64_sqnegdi (__a);
23568 }
23569
23570 /* vqrdmulh */
23571
23572 __extension__ extern __inline int16x4_t
23573 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23574 vqrdmulh_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c)
23575 {
23576 return __builtin_aarch64_sqrdmulh_lanev4hi (__a, __b, __c);
23577 }
23578
23579 __extension__ extern __inline int32x2_t
23580 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23581 vqrdmulh_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c)
23582 {
23583 return __builtin_aarch64_sqrdmulh_lanev2si (__a, __b, __c);
23584 }
23585
23586 __extension__ extern __inline int16x8_t
23587 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23588 vqrdmulhq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __c)
23589 {
23590 return __builtin_aarch64_sqrdmulh_lanev8hi (__a, __b, __c);
23591 }
23592
23593 __extension__ extern __inline int32x4_t
23594 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23595 vqrdmulhq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __c)
23596 {
23597 return __builtin_aarch64_sqrdmulh_lanev4si (__a, __b, __c);
23598 }
23599
23600 __extension__ extern __inline int16_t
23601 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23602 vqrdmulhh_s16 (int16_t __a, int16_t __b)
23603 {
23604 return (int16_t) __builtin_aarch64_sqrdmulhhi (__a, __b);
23605 }
23606
23607 __extension__ extern __inline int16_t
23608 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23609 vqrdmulhh_lane_s16 (int16_t __a, int16x4_t __b, const int __c)
23610 {
23611 return __builtin_aarch64_sqrdmulh_lanehi (__a, __b, __c);
23612 }
23613
23614 __extension__ extern __inline int16_t
23615 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23616 vqrdmulhh_laneq_s16 (int16_t __a, int16x8_t __b, const int __c)
23617 {
23618 return __builtin_aarch64_sqrdmulh_laneqhi (__a, __b, __c);
23619 }
23620
23621 __extension__ extern __inline int32_t
23622 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23623 vqrdmulhs_s32 (int32_t __a, int32_t __b)
23624 {
23625 return (int32_t) __builtin_aarch64_sqrdmulhsi (__a, __b);
23626 }
23627
23628 __extension__ extern __inline int32_t
23629 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23630 vqrdmulhs_lane_s32 (int32_t __a, int32x2_t __b, const int __c)
23631 {
23632 return __builtin_aarch64_sqrdmulh_lanesi (__a, __b, __c);
23633 }
23634
23635 __extension__ extern __inline int32_t
23636 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23637 vqrdmulhs_laneq_s32 (int32_t __a, int32x4_t __b, const int __c)
23638 {
23639 return __builtin_aarch64_sqrdmulh_laneqsi (__a, __b, __c);
23640 }
23641
23642 /* vqrshl */
23643
23644 __extension__ extern __inline int8x8_t
23645 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23646 vqrshl_s8 (int8x8_t __a, int8x8_t __b)
23647 {
23648 return __builtin_aarch64_sqrshlv8qi (__a, __b);
23649 }
23650
23651 __extension__ extern __inline int16x4_t
23652 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23653 vqrshl_s16 (int16x4_t __a, int16x4_t __b)
23654 {
23655 return __builtin_aarch64_sqrshlv4hi (__a, __b);
23656 }
23657
23658 __extension__ extern __inline int32x2_t
23659 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23660 vqrshl_s32 (int32x2_t __a, int32x2_t __b)
23661 {
23662 return __builtin_aarch64_sqrshlv2si (__a, __b);
23663 }
23664
23665 __extension__ extern __inline int64x1_t
23666 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23667 vqrshl_s64 (int64x1_t __a, int64x1_t __b)
23668 {
23669 return (int64x1_t) {__builtin_aarch64_sqrshldi (__a[0], __b[0])};
23670 }
23671
23672 __extension__ extern __inline uint8x8_t
23673 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23674 vqrshl_u8 (uint8x8_t __a, int8x8_t __b)
23675 {
23676 return __builtin_aarch64_uqrshlv8qi_uus ( __a, __b);
23677 }
23678
23679 __extension__ extern __inline uint16x4_t
23680 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23681 vqrshl_u16 (uint16x4_t __a, int16x4_t __b)
23682 {
23683 return __builtin_aarch64_uqrshlv4hi_uus ( __a, __b);
23684 }
23685
23686 __extension__ extern __inline uint32x2_t
23687 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23688 vqrshl_u32 (uint32x2_t __a, int32x2_t __b)
23689 {
23690 return __builtin_aarch64_uqrshlv2si_uus ( __a, __b);
23691 }
23692
23693 __extension__ extern __inline uint64x1_t
23694 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23695 vqrshl_u64 (uint64x1_t __a, int64x1_t __b)
23696 {
23697 return (uint64x1_t) {__builtin_aarch64_uqrshldi_uus (__a[0], __b[0])};
23698 }
23699
23700 __extension__ extern __inline int8x16_t
23701 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23702 vqrshlq_s8 (int8x16_t __a, int8x16_t __b)
23703 {
23704 return __builtin_aarch64_sqrshlv16qi (__a, __b);
23705 }
23706
23707 __extension__ extern __inline int16x8_t
23708 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23709 vqrshlq_s16 (int16x8_t __a, int16x8_t __b)
23710 {
23711 return __builtin_aarch64_sqrshlv8hi (__a, __b);
23712 }
23713
23714 __extension__ extern __inline int32x4_t
23715 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23716 vqrshlq_s32 (int32x4_t __a, int32x4_t __b)
23717 {
23718 return __builtin_aarch64_sqrshlv4si (__a, __b);
23719 }
23720
23721 __extension__ extern __inline int64x2_t
23722 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23723 vqrshlq_s64 (int64x2_t __a, int64x2_t __b)
23724 {
23725 return __builtin_aarch64_sqrshlv2di (__a, __b);
23726 }
23727
23728 __extension__ extern __inline uint8x16_t
23729 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23730 vqrshlq_u8 (uint8x16_t __a, int8x16_t __b)
23731 {
23732 return __builtin_aarch64_uqrshlv16qi_uus ( __a, __b);
23733 }
23734
23735 __extension__ extern __inline uint16x8_t
23736 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23737 vqrshlq_u16 (uint16x8_t __a, int16x8_t __b)
23738 {
23739 return __builtin_aarch64_uqrshlv8hi_uus ( __a, __b);
23740 }
23741
23742 __extension__ extern __inline uint32x4_t
23743 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23744 vqrshlq_u32 (uint32x4_t __a, int32x4_t __b)
23745 {
23746 return __builtin_aarch64_uqrshlv4si_uus ( __a, __b);
23747 }
23748
23749 __extension__ extern __inline uint64x2_t
23750 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23751 vqrshlq_u64 (uint64x2_t __a, int64x2_t __b)
23752 {
23753 return __builtin_aarch64_uqrshlv2di_uus ( __a, __b);
23754 }
23755
23756 __extension__ extern __inline int8_t
23757 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23758 vqrshlb_s8 (int8_t __a, int8_t __b)
23759 {
23760 return __builtin_aarch64_sqrshlqi (__a, __b);
23761 }
23762
23763 __extension__ extern __inline int16_t
23764 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23765 vqrshlh_s16 (int16_t __a, int16_t __b)
23766 {
23767 return __builtin_aarch64_sqrshlhi (__a, __b);
23768 }
23769
23770 __extension__ extern __inline int32_t
23771 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23772 vqrshls_s32 (int32_t __a, int32_t __b)
23773 {
23774 return __builtin_aarch64_sqrshlsi (__a, __b);
23775 }
23776
23777 __extension__ extern __inline int64_t
23778 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23779 vqrshld_s64 (int64_t __a, int64_t __b)
23780 {
23781 return __builtin_aarch64_sqrshldi (__a, __b);
23782 }
23783
23784 __extension__ extern __inline uint8_t
23785 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23786 vqrshlb_u8 (uint8_t __a, int8_t __b)
23787 {
23788 return __builtin_aarch64_uqrshlqi_uus (__a, __b);
23789 }
23790
23791 __extension__ extern __inline uint16_t
23792 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23793 vqrshlh_u16 (uint16_t __a, int16_t __b)
23794 {
23795 return __builtin_aarch64_uqrshlhi_uus (__a, __b);
23796 }
23797
23798 __extension__ extern __inline uint32_t
23799 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23800 vqrshls_u32 (uint32_t __a, int32_t __b)
23801 {
23802 return __builtin_aarch64_uqrshlsi_uus (__a, __b);
23803 }
23804
23805 __extension__ extern __inline uint64_t
23806 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23807 vqrshld_u64 (uint64_t __a, int64_t __b)
23808 {
23809 return __builtin_aarch64_uqrshldi_uus (__a, __b);
23810 }
23811
23812 /* vqrshrn */
23813
23814 __extension__ extern __inline int8x8_t
23815 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23816 vqrshrn_n_s16 (int16x8_t __a, const int __b)
23817 {
23818 return (int8x8_t) __builtin_aarch64_sqrshrn_nv8hi (__a, __b);
23819 }
23820
23821 __extension__ extern __inline int16x4_t
23822 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23823 vqrshrn_n_s32 (int32x4_t __a, const int __b)
23824 {
23825 return (int16x4_t) __builtin_aarch64_sqrshrn_nv4si (__a, __b);
23826 }
23827
23828 __extension__ extern __inline int32x2_t
23829 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23830 vqrshrn_n_s64 (int64x2_t __a, const int __b)
23831 {
23832 return (int32x2_t) __builtin_aarch64_sqrshrn_nv2di (__a, __b);
23833 }
23834
23835 __extension__ extern __inline uint8x8_t
23836 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23837 vqrshrn_n_u16 (uint16x8_t __a, const int __b)
23838 {
23839 return __builtin_aarch64_uqrshrn_nv8hi_uus ( __a, __b);
23840 }
23841
23842 __extension__ extern __inline uint16x4_t
23843 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23844 vqrshrn_n_u32 (uint32x4_t __a, const int __b)
23845 {
23846 return __builtin_aarch64_uqrshrn_nv4si_uus ( __a, __b);
23847 }
23848
23849 __extension__ extern __inline uint32x2_t
23850 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23851 vqrshrn_n_u64 (uint64x2_t __a, const int __b)
23852 {
23853 return __builtin_aarch64_uqrshrn_nv2di_uus ( __a, __b);
23854 }
23855
23856 __extension__ extern __inline int8_t
23857 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23858 vqrshrnh_n_s16 (int16_t __a, const int __b)
23859 {
23860 return (int8_t) __builtin_aarch64_sqrshrn_nhi (__a, __b);
23861 }
23862
23863 __extension__ extern __inline int16_t
23864 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23865 vqrshrns_n_s32 (int32_t __a, const int __b)
23866 {
23867 return (int16_t) __builtin_aarch64_sqrshrn_nsi (__a, __b);
23868 }
23869
23870 __extension__ extern __inline int32_t
23871 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23872 vqrshrnd_n_s64 (int64_t __a, const int __b)
23873 {
23874 return (int32_t) __builtin_aarch64_sqrshrn_ndi (__a, __b);
23875 }
23876
23877 __extension__ extern __inline uint8_t
23878 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23879 vqrshrnh_n_u16 (uint16_t __a, const int __b)
23880 {
23881 return __builtin_aarch64_uqrshrn_nhi_uus (__a, __b);
23882 }
23883
23884 __extension__ extern __inline uint16_t
23885 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23886 vqrshrns_n_u32 (uint32_t __a, const int __b)
23887 {
23888 return __builtin_aarch64_uqrshrn_nsi_uus (__a, __b);
23889 }
23890
23891 __extension__ extern __inline uint32_t
23892 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23893 vqrshrnd_n_u64 (uint64_t __a, const int __b)
23894 {
23895 return __builtin_aarch64_uqrshrn_ndi_uus (__a, __b);
23896 }
23897
23898 /* vqrshrun */
23899
23900 __extension__ extern __inline uint8x8_t
23901 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23902 vqrshrun_n_s16 (int16x8_t __a, const int __b)
23903 {
23904 return (uint8x8_t) __builtin_aarch64_sqrshrun_nv8hi (__a, __b);
23905 }
23906
23907 __extension__ extern __inline uint16x4_t
23908 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23909 vqrshrun_n_s32 (int32x4_t __a, const int __b)
23910 {
23911 return (uint16x4_t) __builtin_aarch64_sqrshrun_nv4si (__a, __b);
23912 }
23913
23914 __extension__ extern __inline uint32x2_t
23915 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23916 vqrshrun_n_s64 (int64x2_t __a, const int __b)
23917 {
23918 return (uint32x2_t) __builtin_aarch64_sqrshrun_nv2di (__a, __b);
23919 }
23920
23921 __extension__ extern __inline int8_t
23922 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23923 vqrshrunh_n_s16 (int16_t __a, const int __b)
23924 {
23925 return (int8_t) __builtin_aarch64_sqrshrun_nhi (__a, __b);
23926 }
23927
23928 __extension__ extern __inline int16_t
23929 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23930 vqrshruns_n_s32 (int32_t __a, const int __b)
23931 {
23932 return (int16_t) __builtin_aarch64_sqrshrun_nsi (__a, __b);
23933 }
23934
23935 __extension__ extern __inline int32_t
23936 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23937 vqrshrund_n_s64 (int64_t __a, const int __b)
23938 {
23939 return (int32_t) __builtin_aarch64_sqrshrun_ndi (__a, __b);
23940 }
23941
23942 /* vqshl */
23943
23944 __extension__ extern __inline int8x8_t
23945 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23946 vqshl_s8 (int8x8_t __a, int8x8_t __b)
23947 {
23948 return __builtin_aarch64_sqshlv8qi (__a, __b);
23949 }
23950
23951 __extension__ extern __inline int16x4_t
23952 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23953 vqshl_s16 (int16x4_t __a, int16x4_t __b)
23954 {
23955 return __builtin_aarch64_sqshlv4hi (__a, __b);
23956 }
23957
23958 __extension__ extern __inline int32x2_t
23959 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23960 vqshl_s32 (int32x2_t __a, int32x2_t __b)
23961 {
23962 return __builtin_aarch64_sqshlv2si (__a, __b);
23963 }
23964
23965 __extension__ extern __inline int64x1_t
23966 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23967 vqshl_s64 (int64x1_t __a, int64x1_t __b)
23968 {
23969 return (int64x1_t) {__builtin_aarch64_sqshldi (__a[0], __b[0])};
23970 }
23971
23972 __extension__ extern __inline uint8x8_t
23973 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23974 vqshl_u8 (uint8x8_t __a, int8x8_t __b)
23975 {
23976 return __builtin_aarch64_uqshlv8qi_uus ( __a, __b);
23977 }
23978
23979 __extension__ extern __inline uint16x4_t
23980 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23981 vqshl_u16 (uint16x4_t __a, int16x4_t __b)
23982 {
23983 return __builtin_aarch64_uqshlv4hi_uus ( __a, __b);
23984 }
23985
23986 __extension__ extern __inline uint32x2_t
23987 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23988 vqshl_u32 (uint32x2_t __a, int32x2_t __b)
23989 {
23990 return __builtin_aarch64_uqshlv2si_uus ( __a, __b);
23991 }
23992
23993 __extension__ extern __inline uint64x1_t
23994 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23995 vqshl_u64 (uint64x1_t __a, int64x1_t __b)
23996 {
23997 return (uint64x1_t) {__builtin_aarch64_uqshldi_uus (__a[0], __b[0])};
23998 }
23999
24000 __extension__ extern __inline int8x16_t
24001 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24002 vqshlq_s8 (int8x16_t __a, int8x16_t __b)
24003 {
24004 return __builtin_aarch64_sqshlv16qi (__a, __b);
24005 }
24006
24007 __extension__ extern __inline int16x8_t
24008 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24009 vqshlq_s16 (int16x8_t __a, int16x8_t __b)
24010 {
24011 return __builtin_aarch64_sqshlv8hi (__a, __b);
24012 }
24013
24014 __extension__ extern __inline int32x4_t
24015 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24016 vqshlq_s32 (int32x4_t __a, int32x4_t __b)
24017 {
24018 return __builtin_aarch64_sqshlv4si (__a, __b);
24019 }
24020
24021 __extension__ extern __inline int64x2_t
24022 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24023 vqshlq_s64 (int64x2_t __a, int64x2_t __b)
24024 {
24025 return __builtin_aarch64_sqshlv2di (__a, __b);
24026 }
24027
24028 __extension__ extern __inline uint8x16_t
24029 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24030 vqshlq_u8 (uint8x16_t __a, int8x16_t __b)
24031 {
24032 return __builtin_aarch64_uqshlv16qi_uus ( __a, __b);
24033 }
24034
24035 __extension__ extern __inline uint16x8_t
24036 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24037 vqshlq_u16 (uint16x8_t __a, int16x8_t __b)
24038 {
24039 return __builtin_aarch64_uqshlv8hi_uus ( __a, __b);
24040 }
24041
24042 __extension__ extern __inline uint32x4_t
24043 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24044 vqshlq_u32 (uint32x4_t __a, int32x4_t __b)
24045 {
24046 return __builtin_aarch64_uqshlv4si_uus ( __a, __b);
24047 }
24048
24049 __extension__ extern __inline uint64x2_t
24050 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24051 vqshlq_u64 (uint64x2_t __a, int64x2_t __b)
24052 {
24053 return __builtin_aarch64_uqshlv2di_uus ( __a, __b);
24054 }
24055
24056 __extension__ extern __inline int8_t
24057 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24058 vqshlb_s8 (int8_t __a, int8_t __b)
24059 {
24060 return __builtin_aarch64_sqshlqi (__a, __b);
24061 }
24062
24063 __extension__ extern __inline int16_t
24064 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24065 vqshlh_s16 (int16_t __a, int16_t __b)
24066 {
24067 return __builtin_aarch64_sqshlhi (__a, __b);
24068 }
24069
24070 __extension__ extern __inline int32_t
24071 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24072 vqshls_s32 (int32_t __a, int32_t __b)
24073 {
24074 return __builtin_aarch64_sqshlsi (__a, __b);
24075 }
24076
24077 __extension__ extern __inline int64_t
24078 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24079 vqshld_s64 (int64_t __a, int64_t __b)
24080 {
24081 return __builtin_aarch64_sqshldi (__a, __b);
24082 }
24083
24084 __extension__ extern __inline uint8_t
24085 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24086 vqshlb_u8 (uint8_t __a, int8_t __b)
24087 {
24088 return __builtin_aarch64_uqshlqi_uus (__a, __b);
24089 }
24090
24091 __extension__ extern __inline uint16_t
24092 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24093 vqshlh_u16 (uint16_t __a, int16_t __b)
24094 {
24095 return __builtin_aarch64_uqshlhi_uus (__a, __b);
24096 }
24097
24098 __extension__ extern __inline uint32_t
24099 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24100 vqshls_u32 (uint32_t __a, int32_t __b)
24101 {
24102 return __builtin_aarch64_uqshlsi_uus (__a, __b);
24103 }
24104
24105 __extension__ extern __inline uint64_t
24106 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24107 vqshld_u64 (uint64_t __a, int64_t __b)
24108 {
24109 return __builtin_aarch64_uqshldi_uus (__a, __b);
24110 }
24111
24112 __extension__ extern __inline int8x8_t
24113 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24114 vqshl_n_s8 (int8x8_t __a, const int __b)
24115 {
24116 return (int8x8_t) __builtin_aarch64_sqshl_nv8qi (__a, __b);
24117 }
24118
24119 __extension__ extern __inline int16x4_t
24120 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24121 vqshl_n_s16 (int16x4_t __a, const int __b)
24122 {
24123 return (int16x4_t) __builtin_aarch64_sqshl_nv4hi (__a, __b);
24124 }
24125
24126 __extension__ extern __inline int32x2_t
24127 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24128 vqshl_n_s32 (int32x2_t __a, const int __b)
24129 {
24130 return (int32x2_t) __builtin_aarch64_sqshl_nv2si (__a, __b);
24131 }
24132
24133 __extension__ extern __inline int64x1_t
24134 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24135 vqshl_n_s64 (int64x1_t __a, const int __b)
24136 {
24137 return (int64x1_t) {__builtin_aarch64_sqshl_ndi (__a[0], __b)};
24138 }
24139
24140 __extension__ extern __inline uint8x8_t
24141 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24142 vqshl_n_u8 (uint8x8_t __a, const int __b)
24143 {
24144 return __builtin_aarch64_uqshl_nv8qi_uus (__a, __b);
24145 }
24146
24147 __extension__ extern __inline uint16x4_t
24148 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24149 vqshl_n_u16 (uint16x4_t __a, const int __b)
24150 {
24151 return __builtin_aarch64_uqshl_nv4hi_uus (__a, __b);
24152 }
24153
24154 __extension__ extern __inline uint32x2_t
24155 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24156 vqshl_n_u32 (uint32x2_t __a, const int __b)
24157 {
24158 return __builtin_aarch64_uqshl_nv2si_uus (__a, __b);
24159 }
24160
24161 __extension__ extern __inline uint64x1_t
24162 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24163 vqshl_n_u64 (uint64x1_t __a, const int __b)
24164 {
24165 return (uint64x1_t) {__builtin_aarch64_uqshl_ndi_uus (__a[0], __b)};
24166 }
24167
24168 __extension__ extern __inline int8x16_t
24169 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24170 vqshlq_n_s8 (int8x16_t __a, const int __b)
24171 {
24172 return (int8x16_t) __builtin_aarch64_sqshl_nv16qi (__a, __b);
24173 }
24174
24175 __extension__ extern __inline int16x8_t
24176 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24177 vqshlq_n_s16 (int16x8_t __a, const int __b)
24178 {
24179 return (int16x8_t) __builtin_aarch64_sqshl_nv8hi (__a, __b);
24180 }
24181
24182 __extension__ extern __inline int32x4_t
24183 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24184 vqshlq_n_s32 (int32x4_t __a, const int __b)
24185 {
24186 return (int32x4_t) __builtin_aarch64_sqshl_nv4si (__a, __b);
24187 }
24188
24189 __extension__ extern __inline int64x2_t
24190 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24191 vqshlq_n_s64 (int64x2_t __a, const int __b)
24192 {
24193 return (int64x2_t) __builtin_aarch64_sqshl_nv2di (__a, __b);
24194 }
24195
24196 __extension__ extern __inline uint8x16_t
24197 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24198 vqshlq_n_u8 (uint8x16_t __a, const int __b)
24199 {
24200 return __builtin_aarch64_uqshl_nv16qi_uus (__a, __b);
24201 }
24202
24203 __extension__ extern __inline uint16x8_t
24204 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24205 vqshlq_n_u16 (uint16x8_t __a, const int __b)
24206 {
24207 return __builtin_aarch64_uqshl_nv8hi_uus (__a, __b);
24208 }
24209
24210 __extension__ extern __inline uint32x4_t
24211 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24212 vqshlq_n_u32 (uint32x4_t __a, const int __b)
24213 {
24214 return __builtin_aarch64_uqshl_nv4si_uus (__a, __b);
24215 }
24216
24217 __extension__ extern __inline uint64x2_t
24218 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24219 vqshlq_n_u64 (uint64x2_t __a, const int __b)
24220 {
24221 return __builtin_aarch64_uqshl_nv2di_uus (__a, __b);
24222 }
24223
24224 __extension__ extern __inline int8_t
24225 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24226 vqshlb_n_s8 (int8_t __a, const int __b)
24227 {
24228 return (int8_t) __builtin_aarch64_sqshl_nqi (__a, __b);
24229 }
24230
24231 __extension__ extern __inline int16_t
24232 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24233 vqshlh_n_s16 (int16_t __a, const int __b)
24234 {
24235 return (int16_t) __builtin_aarch64_sqshl_nhi (__a, __b);
24236 }
24237
24238 __extension__ extern __inline int32_t
24239 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24240 vqshls_n_s32 (int32_t __a, const int __b)
24241 {
24242 return (int32_t) __builtin_aarch64_sqshl_nsi (__a, __b);
24243 }
24244
24245 __extension__ extern __inline int64_t
24246 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24247 vqshld_n_s64 (int64_t __a, const int __b)
24248 {
24249 return __builtin_aarch64_sqshl_ndi (__a, __b);
24250 }
24251
24252 __extension__ extern __inline uint8_t
24253 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24254 vqshlb_n_u8 (uint8_t __a, const int __b)
24255 {
24256 return __builtin_aarch64_uqshl_nqi_uus (__a, __b);
24257 }
24258
24259 __extension__ extern __inline uint16_t
24260 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24261 vqshlh_n_u16 (uint16_t __a, const int __b)
24262 {
24263 return __builtin_aarch64_uqshl_nhi_uus (__a, __b);
24264 }
24265
24266 __extension__ extern __inline uint32_t
24267 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24268 vqshls_n_u32 (uint32_t __a, const int __b)
24269 {
24270 return __builtin_aarch64_uqshl_nsi_uus (__a, __b);
24271 }
24272
24273 __extension__ extern __inline uint64_t
24274 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24275 vqshld_n_u64 (uint64_t __a, const int __b)
24276 {
24277 return __builtin_aarch64_uqshl_ndi_uus (__a, __b);
24278 }
24279
24280 /* vqshlu */
24281
24282 __extension__ extern __inline uint8x8_t
24283 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24284 vqshlu_n_s8 (int8x8_t __a, const int __b)
24285 {
24286 return __builtin_aarch64_sqshlu_nv8qi_uss (__a, __b);
24287 }
24288
24289 __extension__ extern __inline uint16x4_t
24290 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24291 vqshlu_n_s16 (int16x4_t __a, const int __b)
24292 {
24293 return __builtin_aarch64_sqshlu_nv4hi_uss (__a, __b);
24294 }
24295
24296 __extension__ extern __inline uint32x2_t
24297 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24298 vqshlu_n_s32 (int32x2_t __a, const int __b)
24299 {
24300 return __builtin_aarch64_sqshlu_nv2si_uss (__a, __b);
24301 }
24302
24303 __extension__ extern __inline uint64x1_t
24304 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24305 vqshlu_n_s64 (int64x1_t __a, const int __b)
24306 {
24307 return (uint64x1_t) {__builtin_aarch64_sqshlu_ndi_uss (__a[0], __b)};
24308 }
24309
24310 __extension__ extern __inline uint8x16_t
24311 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24312 vqshluq_n_s8 (int8x16_t __a, const int __b)
24313 {
24314 return __builtin_aarch64_sqshlu_nv16qi_uss (__a, __b);
24315 }
24316
24317 __extension__ extern __inline uint16x8_t
24318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24319 vqshluq_n_s16 (int16x8_t __a, const int __b)
24320 {
24321 return __builtin_aarch64_sqshlu_nv8hi_uss (__a, __b);
24322 }
24323
24324 __extension__ extern __inline uint32x4_t
24325 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24326 vqshluq_n_s32 (int32x4_t __a, const int __b)
24327 {
24328 return __builtin_aarch64_sqshlu_nv4si_uss (__a, __b);
24329 }
24330
24331 __extension__ extern __inline uint64x2_t
24332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24333 vqshluq_n_s64 (int64x2_t __a, const int __b)
24334 {
24335 return __builtin_aarch64_sqshlu_nv2di_uss (__a, __b);
24336 }
24337
24338 __extension__ extern __inline int8_t
24339 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24340 vqshlub_n_s8 (int8_t __a, const int __b)
24341 {
24342 return (int8_t) __builtin_aarch64_sqshlu_nqi_uss (__a, __b);
24343 }
24344
24345 __extension__ extern __inline int16_t
24346 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24347 vqshluh_n_s16 (int16_t __a, const int __b)
24348 {
24349 return (int16_t) __builtin_aarch64_sqshlu_nhi_uss (__a, __b);
24350 }
24351
24352 __extension__ extern __inline int32_t
24353 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24354 vqshlus_n_s32 (int32_t __a, const int __b)
24355 {
24356 return (int32_t) __builtin_aarch64_sqshlu_nsi_uss (__a, __b);
24357 }
24358
24359 __extension__ extern __inline uint64_t
24360 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24361 vqshlud_n_s64 (int64_t __a, const int __b)
24362 {
24363 return __builtin_aarch64_sqshlu_ndi_uss (__a, __b);
24364 }
24365
24366 /* vqshrn */
24367
24368 __extension__ extern __inline int8x8_t
24369 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24370 vqshrn_n_s16 (int16x8_t __a, const int __b)
24371 {
24372 return (int8x8_t) __builtin_aarch64_sqshrn_nv8hi (__a, __b);
24373 }
24374
24375 __extension__ extern __inline int16x4_t
24376 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24377 vqshrn_n_s32 (int32x4_t __a, const int __b)
24378 {
24379 return (int16x4_t) __builtin_aarch64_sqshrn_nv4si (__a, __b);
24380 }
24381
24382 __extension__ extern __inline int32x2_t
24383 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24384 vqshrn_n_s64 (int64x2_t __a, const int __b)
24385 {
24386 return (int32x2_t) __builtin_aarch64_sqshrn_nv2di (__a, __b);
24387 }
24388
24389 __extension__ extern __inline uint8x8_t
24390 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24391 vqshrn_n_u16 (uint16x8_t __a, const int __b)
24392 {
24393 return __builtin_aarch64_uqshrn_nv8hi_uus ( __a, __b);
24394 }
24395
24396 __extension__ extern __inline uint16x4_t
24397 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24398 vqshrn_n_u32 (uint32x4_t __a, const int __b)
24399 {
24400 return __builtin_aarch64_uqshrn_nv4si_uus ( __a, __b);
24401 }
24402
24403 __extension__ extern __inline uint32x2_t
24404 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24405 vqshrn_n_u64 (uint64x2_t __a, const int __b)
24406 {
24407 return __builtin_aarch64_uqshrn_nv2di_uus ( __a, __b);
24408 }
24409
24410 __extension__ extern __inline int8_t
24411 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24412 vqshrnh_n_s16 (int16_t __a, const int __b)
24413 {
24414 return (int8_t) __builtin_aarch64_sqshrn_nhi (__a, __b);
24415 }
24416
24417 __extension__ extern __inline int16_t
24418 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24419 vqshrns_n_s32 (int32_t __a, const int __b)
24420 {
24421 return (int16_t) __builtin_aarch64_sqshrn_nsi (__a, __b);
24422 }
24423
24424 __extension__ extern __inline int32_t
24425 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24426 vqshrnd_n_s64 (int64_t __a, const int __b)
24427 {
24428 return (int32_t) __builtin_aarch64_sqshrn_ndi (__a, __b);
24429 }
24430
24431 __extension__ extern __inline uint8_t
24432 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24433 vqshrnh_n_u16 (uint16_t __a, const int __b)
24434 {
24435 return __builtin_aarch64_uqshrn_nhi_uus (__a, __b);
24436 }
24437
24438 __extension__ extern __inline uint16_t
24439 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24440 vqshrns_n_u32 (uint32_t __a, const int __b)
24441 {
24442 return __builtin_aarch64_uqshrn_nsi_uus (__a, __b);
24443 }
24444
24445 __extension__ extern __inline uint32_t
24446 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24447 vqshrnd_n_u64 (uint64_t __a, const int __b)
24448 {
24449 return __builtin_aarch64_uqshrn_ndi_uus (__a, __b);
24450 }
24451
24452 /* vqshrun */
24453
24454 __extension__ extern __inline uint8x8_t
24455 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24456 vqshrun_n_s16 (int16x8_t __a, const int __b)
24457 {
24458 return (uint8x8_t) __builtin_aarch64_sqshrun_nv8hi (__a, __b);
24459 }
24460
24461 __extension__ extern __inline uint16x4_t
24462 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24463 vqshrun_n_s32 (int32x4_t __a, const int __b)
24464 {
24465 return (uint16x4_t) __builtin_aarch64_sqshrun_nv4si (__a, __b);
24466 }
24467
24468 __extension__ extern __inline uint32x2_t
24469 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24470 vqshrun_n_s64 (int64x2_t __a, const int __b)
24471 {
24472 return (uint32x2_t) __builtin_aarch64_sqshrun_nv2di (__a, __b);
24473 }
24474
24475 __extension__ extern __inline int8_t
24476 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24477 vqshrunh_n_s16 (int16_t __a, const int __b)
24478 {
24479 return (int8_t) __builtin_aarch64_sqshrun_nhi (__a, __b);
24480 }
24481
24482 __extension__ extern __inline int16_t
24483 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24484 vqshruns_n_s32 (int32_t __a, const int __b)
24485 {
24486 return (int16_t) __builtin_aarch64_sqshrun_nsi (__a, __b);
24487 }
24488
24489 __extension__ extern __inline int32_t
24490 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24491 vqshrund_n_s64 (int64_t __a, const int __b)
24492 {
24493 return (int32_t) __builtin_aarch64_sqshrun_ndi (__a, __b);
24494 }
24495
24496 /* vqsub */
24497
24498 __extension__ extern __inline int8_t
24499 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24500 vqsubb_s8 (int8_t __a, int8_t __b)
24501 {
24502 return (int8_t) __builtin_aarch64_sqsubqi (__a, __b);
24503 }
24504
24505 __extension__ extern __inline int16_t
24506 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24507 vqsubh_s16 (int16_t __a, int16_t __b)
24508 {
24509 return (int16_t) __builtin_aarch64_sqsubhi (__a, __b);
24510 }
24511
24512 __extension__ extern __inline int32_t
24513 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24514 vqsubs_s32 (int32_t __a, int32_t __b)
24515 {
24516 return (int32_t) __builtin_aarch64_sqsubsi (__a, __b);
24517 }
24518
24519 __extension__ extern __inline int64_t
24520 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24521 vqsubd_s64 (int64_t __a, int64_t __b)
24522 {
24523 return __builtin_aarch64_sqsubdi (__a, __b);
24524 }
24525
24526 __extension__ extern __inline uint8_t
24527 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24528 vqsubb_u8 (uint8_t __a, uint8_t __b)
24529 {
24530 return (uint8_t) __builtin_aarch64_uqsubqi_uuu (__a, __b);
24531 }
24532
24533 __extension__ extern __inline uint16_t
24534 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24535 vqsubh_u16 (uint16_t __a, uint16_t __b)
24536 {
24537 return (uint16_t) __builtin_aarch64_uqsubhi_uuu (__a, __b);
24538 }
24539
24540 __extension__ extern __inline uint32_t
24541 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24542 vqsubs_u32 (uint32_t __a, uint32_t __b)
24543 {
24544 return (uint32_t) __builtin_aarch64_uqsubsi_uuu (__a, __b);
24545 }
24546
24547 __extension__ extern __inline uint64_t
24548 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24549 vqsubd_u64 (uint64_t __a, uint64_t __b)
24550 {
24551 return __builtin_aarch64_uqsubdi_uuu (__a, __b);
24552 }
24553
24554 /* vqtbl2 */
24555
24556 __extension__ extern __inline int8x8_t
24557 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24558 vqtbl2_s8 (int8x16x2_t __tab, uint8x8_t __idx)
24559 {
24560 __builtin_aarch64_simd_oi __o;
24561 __o = __builtin_aarch64_set_qregoiv16qi (__o, __tab.val[0], 0);
24562 __o = __builtin_aarch64_set_qregoiv16qi (__o, __tab.val[1], 1);
24563 return __builtin_aarch64_tbl3v8qi (__o, (int8x8_t)__idx);
24564 }
24565
24566 __extension__ extern __inline uint8x8_t
24567 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24568 vqtbl2_u8 (uint8x16x2_t __tab, uint8x8_t __idx)
24569 {
24570 __builtin_aarch64_simd_oi __o;
24571 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24572 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24573 return (uint8x8_t)__builtin_aarch64_tbl3v8qi (__o, (int8x8_t)__idx);
24574 }
24575
24576 __extension__ extern __inline poly8x8_t
24577 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24578 vqtbl2_p8 (poly8x16x2_t __tab, uint8x8_t __idx)
24579 {
24580 __builtin_aarch64_simd_oi __o;
24581 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24582 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24583 return (poly8x8_t)__builtin_aarch64_tbl3v8qi (__o, (int8x8_t)__idx);
24584 }
24585
24586 __extension__ extern __inline int8x16_t
24587 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24588 vqtbl2q_s8 (int8x16x2_t __tab, uint8x16_t __idx)
24589 {
24590 __builtin_aarch64_simd_oi __o;
24591 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24592 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24593 return __builtin_aarch64_tbl3v16qi (__o, (int8x16_t)__idx);
24594 }
24595
24596 __extension__ extern __inline uint8x16_t
24597 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24598 vqtbl2q_u8 (uint8x16x2_t __tab, uint8x16_t __idx)
24599 {
24600 __builtin_aarch64_simd_oi __o;
24601 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24602 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24603 return (uint8x16_t)__builtin_aarch64_tbl3v16qi (__o, (int8x16_t)__idx);
24604 }
24605
24606 __extension__ extern __inline poly8x16_t
24607 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24608 vqtbl2q_p8 (poly8x16x2_t __tab, uint8x16_t __idx)
24609 {
24610 __builtin_aarch64_simd_oi __o;
24611 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24612 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24613 return (poly8x16_t)__builtin_aarch64_tbl3v16qi (__o, (int8x16_t)__idx);
24614 }
24615
24616 /* vqtbl3 */
24617
24618 __extension__ extern __inline int8x8_t
24619 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24620 vqtbl3_s8 (int8x16x3_t __tab, uint8x8_t __idx)
24621 {
24622 __builtin_aarch64_simd_ci __o;
24623 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[0], 0);
24624 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[1], 1);
24625 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[2], 2);
24626 return __builtin_aarch64_qtbl3v8qi (__o, (int8x8_t)__idx);
24627 }
24628
24629 __extension__ extern __inline uint8x8_t
24630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24631 vqtbl3_u8 (uint8x16x3_t __tab, uint8x8_t __idx)
24632 {
24633 __builtin_aarch64_simd_ci __o;
24634 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[0], 0);
24635 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[1], 1);
24636 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[2], 2);
24637 return (uint8x8_t)__builtin_aarch64_qtbl3v8qi (__o, (int8x8_t)__idx);
24638 }
24639
24640 __extension__ extern __inline poly8x8_t
24641 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24642 vqtbl3_p8 (poly8x16x3_t __tab, uint8x8_t __idx)
24643 {
24644 __builtin_aarch64_simd_ci __o;
24645 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[0], 0);
24646 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[1], 1);
24647 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[2], 2);
24648 return (poly8x8_t)__builtin_aarch64_qtbl3v8qi (__o, (int8x8_t)__idx);
24649 }
24650
24651 __extension__ extern __inline int8x16_t
24652 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24653 vqtbl3q_s8 (int8x16x3_t __tab, uint8x16_t __idx)
24654 {
24655 __builtin_aarch64_simd_ci __o;
24656 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[0], 0);
24657 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[1], 1);
24658 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[2], 2);
24659 return __builtin_aarch64_qtbl3v16qi (__o, (int8x16_t)__idx);
24660 }
24661
24662 __extension__ extern __inline uint8x16_t
24663 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24664 vqtbl3q_u8 (uint8x16x3_t __tab, uint8x16_t __idx)
24665 {
24666 __builtin_aarch64_simd_ci __o;
24667 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[0], 0);
24668 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[1], 1);
24669 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[2], 2);
24670 return (uint8x16_t)__builtin_aarch64_qtbl3v16qi (__o, (int8x16_t)__idx);
24671 }
24672
24673 __extension__ extern __inline poly8x16_t
24674 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24675 vqtbl3q_p8 (poly8x16x3_t __tab, uint8x16_t __idx)
24676 {
24677 __builtin_aarch64_simd_ci __o;
24678 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[0], 0);
24679 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[1], 1);
24680 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[2], 2);
24681 return (poly8x16_t)__builtin_aarch64_qtbl3v16qi (__o, (int8x16_t)__idx);
24682 }
24683
24684 /* vqtbl4 */
24685
24686 __extension__ extern __inline int8x8_t
24687 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24688 vqtbl4_s8 (int8x16x4_t __tab, uint8x8_t __idx)
24689 {
24690 __builtin_aarch64_simd_xi __o;
24691 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24692 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24693 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[2], 2);
24694 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[3], 3);
24695 return __builtin_aarch64_qtbl4v8qi (__o, (int8x8_t)__idx);
24696 }
24697
24698 __extension__ extern __inline uint8x8_t
24699 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24700 vqtbl4_u8 (uint8x16x4_t __tab, uint8x8_t __idx)
24701 {
24702 __builtin_aarch64_simd_xi __o;
24703 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24704 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24705 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[2], 2);
24706 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[3], 3);
24707 return (uint8x8_t)__builtin_aarch64_qtbl4v8qi (__o, (int8x8_t)__idx);
24708 }
24709
24710 __extension__ extern __inline poly8x8_t
24711 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24712 vqtbl4_p8 (poly8x16x4_t __tab, uint8x8_t __idx)
24713 {
24714 __builtin_aarch64_simd_xi __o;
24715 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24716 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24717 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[2], 2);
24718 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[3], 3);
24719 return (poly8x8_t)__builtin_aarch64_qtbl4v8qi (__o, (int8x8_t)__idx);
24720 }
24721
24722 __extension__ extern __inline int8x16_t
24723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24724 vqtbl4q_s8 (int8x16x4_t __tab, uint8x16_t __idx)
24725 {
24726 __builtin_aarch64_simd_xi __o;
24727 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24728 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24729 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[2], 2);
24730 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[3], 3);
24731 return __builtin_aarch64_qtbl4v16qi (__o, (int8x16_t)__idx);
24732 }
24733
24734 __extension__ extern __inline uint8x16_t
24735 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24736 vqtbl4q_u8 (uint8x16x4_t __tab, uint8x16_t __idx)
24737 {
24738 __builtin_aarch64_simd_xi __o;
24739 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24740 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24741 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[2], 2);
24742 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[3], 3);
24743 return (uint8x16_t)__builtin_aarch64_qtbl4v16qi (__o, (int8x16_t)__idx);
24744 }
24745
24746 __extension__ extern __inline poly8x16_t
24747 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24748 vqtbl4q_p8 (poly8x16x4_t __tab, uint8x16_t __idx)
24749 {
24750 __builtin_aarch64_simd_xi __o;
24751 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24752 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24753 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[2], 2);
24754 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[3], 3);
24755 return (poly8x16_t)__builtin_aarch64_qtbl4v16qi (__o, (int8x16_t)__idx);
24756 }
24757
24758
24759 /* vqtbx2 */
24760 __extension__ extern __inline int8x8_t
24761 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24762 vqtbx2_s8 (int8x8_t __r, int8x16x2_t __tab, uint8x8_t __idx)
24763 {
24764 __builtin_aarch64_simd_oi __o;
24765 __o = __builtin_aarch64_set_qregoiv16qi (__o, __tab.val[0], 0);
24766 __o = __builtin_aarch64_set_qregoiv16qi (__o, __tab.val[1], 1);
24767 return __builtin_aarch64_tbx4v8qi (__r, __o, (int8x8_t)__idx);
24768 }
24769
24770 __extension__ extern __inline uint8x8_t
24771 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24772 vqtbx2_u8 (uint8x8_t __r, uint8x16x2_t __tab, uint8x8_t __idx)
24773 {
24774 __builtin_aarch64_simd_oi __o;
24775 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24776 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24777 return (uint8x8_t)__builtin_aarch64_tbx4v8qi ((int8x8_t)__r, __o,
24778 (int8x8_t)__idx);
24779 }
24780
24781 __extension__ extern __inline poly8x8_t
24782 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24783 vqtbx2_p8 (poly8x8_t __r, poly8x16x2_t __tab, uint8x8_t __idx)
24784 {
24785 __builtin_aarch64_simd_oi __o;
24786 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24787 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24788 return (poly8x8_t)__builtin_aarch64_tbx4v8qi ((int8x8_t)__r, __o,
24789 (int8x8_t)__idx);
24790 }
24791
24792 __extension__ extern __inline int8x16_t
24793 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24794 vqtbx2q_s8 (int8x16_t __r, int8x16x2_t __tab, uint8x16_t __idx)
24795 {
24796 __builtin_aarch64_simd_oi __o;
24797 __o = __builtin_aarch64_set_qregoiv16qi (__o, __tab.val[0], 0);
24798 __o = __builtin_aarch64_set_qregoiv16qi (__o, __tab.val[1], 1);
24799 return __builtin_aarch64_tbx4v16qi (__r, __o, (int8x16_t)__idx);
24800 }
24801
24802 __extension__ extern __inline uint8x16_t
24803 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24804 vqtbx2q_u8 (uint8x16_t __r, uint8x16x2_t __tab, uint8x16_t __idx)
24805 {
24806 __builtin_aarch64_simd_oi __o;
24807 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24808 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24809 return (uint8x16_t)__builtin_aarch64_tbx4v16qi ((int8x16_t)__r, __o,
24810 (int8x16_t)__idx);
24811 }
24812
24813 __extension__ extern __inline poly8x16_t
24814 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24815 vqtbx2q_p8 (poly8x16_t __r, poly8x16x2_t __tab, uint8x16_t __idx)
24816 {
24817 __builtin_aarch64_simd_oi __o;
24818 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24819 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24820 return (poly8x16_t)__builtin_aarch64_tbx4v16qi ((int8x16_t)__r, __o,
24821 (int8x16_t)__idx);
24822 }
24823
24824 /* vqtbx3 */
24825 __extension__ extern __inline int8x8_t
24826 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24827 vqtbx3_s8 (int8x8_t __r, int8x16x3_t __tab, uint8x8_t __idx)
24828 {
24829 __builtin_aarch64_simd_ci __o;
24830 __o = __builtin_aarch64_set_qregciv16qi (__o, __tab.val[0], 0);
24831 __o = __builtin_aarch64_set_qregciv16qi (__o, __tab.val[1], 1);
24832 __o = __builtin_aarch64_set_qregciv16qi (__o, __tab.val[2], 2);
24833 return __builtin_aarch64_qtbx3v8qi (__r, __o, (int8x8_t)__idx);
24834 }
24835
24836 __extension__ extern __inline uint8x8_t
24837 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24838 vqtbx3_u8 (uint8x8_t __r, uint8x16x3_t __tab, uint8x8_t __idx)
24839 {
24840 __builtin_aarch64_simd_ci __o;
24841 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[0], 0);
24842 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[1], 1);
24843 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[2], 2);
24844 return (uint8x8_t)__builtin_aarch64_qtbx3v8qi ((int8x8_t)__r, __o,
24845 (int8x8_t)__idx);
24846 }
24847
24848 __extension__ extern __inline poly8x8_t
24849 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24850 vqtbx3_p8 (poly8x8_t __r, poly8x16x3_t __tab, uint8x8_t __idx)
24851 {
24852 __builtin_aarch64_simd_ci __o;
24853 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[0], 0);
24854 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[1], 1);
24855 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[2], 2);
24856 return (poly8x8_t)__builtin_aarch64_qtbx3v8qi ((int8x8_t)__r, __o,
24857 (int8x8_t)__idx);
24858 }
24859
24860 __extension__ extern __inline int8x16_t
24861 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24862 vqtbx3q_s8 (int8x16_t __r, int8x16x3_t __tab, uint8x16_t __idx)
24863 {
24864 __builtin_aarch64_simd_ci __o;
24865 __o = __builtin_aarch64_set_qregciv16qi (__o, __tab.val[0], 0);
24866 __o = __builtin_aarch64_set_qregciv16qi (__o, __tab.val[1], 1);
24867 __o = __builtin_aarch64_set_qregciv16qi (__o, __tab.val[2], 2);
24868 return __builtin_aarch64_qtbx3v16qi (__r, __o, (int8x16_t)__idx);
24869 }
24870
24871 __extension__ extern __inline uint8x16_t
24872 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24873 vqtbx3q_u8 (uint8x16_t __r, uint8x16x3_t __tab, uint8x16_t __idx)
24874 {
24875 __builtin_aarch64_simd_ci __o;
24876 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[0], 0);
24877 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[1], 1);
24878 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[2], 2);
24879 return (uint8x16_t)__builtin_aarch64_qtbx3v16qi ((int8x16_t)__r, __o,
24880 (int8x16_t)__idx);
24881 }
24882
24883 __extension__ extern __inline poly8x16_t
24884 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24885 vqtbx3q_p8 (poly8x16_t __r, poly8x16x3_t __tab, uint8x16_t __idx)
24886 {
24887 __builtin_aarch64_simd_ci __o;
24888 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[0], 0);
24889 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[1], 1);
24890 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t)__tab.val[2], 2);
24891 return (poly8x16_t)__builtin_aarch64_qtbx3v16qi ((int8x16_t)__r, __o,
24892 (int8x16_t)__idx);
24893 }
24894
24895 /* vqtbx4 */
24896
24897 __extension__ extern __inline int8x8_t
24898 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24899 vqtbx4_s8 (int8x8_t __r, int8x16x4_t __tab, uint8x8_t __idx)
24900 {
24901 __builtin_aarch64_simd_xi __o;
24902 __o = __builtin_aarch64_set_qregxiv16qi (__o, __tab.val[0], 0);
24903 __o = __builtin_aarch64_set_qregxiv16qi (__o, __tab.val[1], 1);
24904 __o = __builtin_aarch64_set_qregxiv16qi (__o, __tab.val[2], 2);
24905 __o = __builtin_aarch64_set_qregxiv16qi (__o, __tab.val[3], 3);
24906 return __builtin_aarch64_qtbx4v8qi (__r, __o, (int8x8_t)__idx);
24907 }
24908
24909 __extension__ extern __inline uint8x8_t
24910 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24911 vqtbx4_u8 (uint8x8_t __r, uint8x16x4_t __tab, uint8x8_t __idx)
24912 {
24913 __builtin_aarch64_simd_xi __o;
24914 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24915 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24916 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[2], 2);
24917 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[3], 3);
24918 return (uint8x8_t)__builtin_aarch64_qtbx4v8qi ((int8x8_t)__r, __o,
24919 (int8x8_t)__idx);
24920 }
24921
24922 __extension__ extern __inline poly8x8_t
24923 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24924 vqtbx4_p8 (poly8x8_t __r, poly8x16x4_t __tab, uint8x8_t __idx)
24925 {
24926 __builtin_aarch64_simd_xi __o;
24927 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24928 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24929 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[2], 2);
24930 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[3], 3);
24931 return (poly8x8_t)__builtin_aarch64_qtbx4v8qi ((int8x8_t)__r, __o,
24932 (int8x8_t)__idx);
24933 }
24934
24935 __extension__ extern __inline int8x16_t
24936 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24937 vqtbx4q_s8 (int8x16_t __r, int8x16x4_t __tab, uint8x16_t __idx)
24938 {
24939 __builtin_aarch64_simd_xi __o;
24940 __o = __builtin_aarch64_set_qregxiv16qi (__o, __tab.val[0], 0);
24941 __o = __builtin_aarch64_set_qregxiv16qi (__o, __tab.val[1], 1);
24942 __o = __builtin_aarch64_set_qregxiv16qi (__o, __tab.val[2], 2);
24943 __o = __builtin_aarch64_set_qregxiv16qi (__o, __tab.val[3], 3);
24944 return __builtin_aarch64_qtbx4v16qi (__r, __o, (int8x16_t)__idx);
24945 }
24946
24947 __extension__ extern __inline uint8x16_t
24948 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24949 vqtbx4q_u8 (uint8x16_t __r, uint8x16x4_t __tab, uint8x16_t __idx)
24950 {
24951 __builtin_aarch64_simd_xi __o;
24952 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24953 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24954 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[2], 2);
24955 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[3], 3);
24956 return (uint8x16_t)__builtin_aarch64_qtbx4v16qi ((int8x16_t)__r, __o,
24957 (int8x16_t)__idx);
24958 }
24959
24960 __extension__ extern __inline poly8x16_t
24961 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24962 vqtbx4q_p8 (poly8x16_t __r, poly8x16x4_t __tab, uint8x16_t __idx)
24963 {
24964 __builtin_aarch64_simd_xi __o;
24965 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[0], 0);
24966 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[1], 1);
24967 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[2], 2);
24968 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t)__tab.val[3], 3);
24969 return (poly8x16_t)__builtin_aarch64_qtbx4v16qi ((int8x16_t)__r, __o,
24970 (int8x16_t)__idx);
24971 }
24972
24973 /* vrbit */
24974
24975 __extension__ extern __inline poly8x8_t
24976 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24977 vrbit_p8 (poly8x8_t __a)
24978 {
24979 return (poly8x8_t) __builtin_aarch64_rbitv8qi ((int8x8_t) __a);
24980 }
24981
24982 __extension__ extern __inline int8x8_t
24983 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24984 vrbit_s8 (int8x8_t __a)
24985 {
24986 return __builtin_aarch64_rbitv8qi (__a);
24987 }
24988
24989 __extension__ extern __inline uint8x8_t
24990 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24991 vrbit_u8 (uint8x8_t __a)
24992 {
24993 return (uint8x8_t) __builtin_aarch64_rbitv8qi ((int8x8_t) __a);
24994 }
24995
24996 __extension__ extern __inline poly8x16_t
24997 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
24998 vrbitq_p8 (poly8x16_t __a)
24999 {
25000 return (poly8x16_t) __builtin_aarch64_rbitv16qi ((int8x16_t)__a);
25001 }
25002
25003 __extension__ extern __inline int8x16_t
25004 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25005 vrbitq_s8 (int8x16_t __a)
25006 {
25007 return __builtin_aarch64_rbitv16qi (__a);
25008 }
25009
25010 __extension__ extern __inline uint8x16_t
25011 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25012 vrbitq_u8 (uint8x16_t __a)
25013 {
25014 return (uint8x16_t) __builtin_aarch64_rbitv16qi ((int8x16_t) __a);
25015 }
25016
25017 /* vrecpe */
25018
25019 __extension__ extern __inline uint32x2_t
25020 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25021 vrecpe_u32 (uint32x2_t __a)
25022 {
25023 return (uint32x2_t) __builtin_aarch64_urecpev2si ((int32x2_t) __a);
25024 }
25025
25026 __extension__ extern __inline uint32x4_t
25027 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25028 vrecpeq_u32 (uint32x4_t __a)
25029 {
25030 return (uint32x4_t) __builtin_aarch64_urecpev4si ((int32x4_t) __a);
25031 }
25032
25033 __extension__ extern __inline float32_t
25034 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25035 vrecpes_f32 (float32_t __a)
25036 {
25037 return __builtin_aarch64_frecpesf (__a);
25038 }
25039
25040 __extension__ extern __inline float64_t
25041 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25042 vrecped_f64 (float64_t __a)
25043 {
25044 return __builtin_aarch64_frecpedf (__a);
25045 }
25046
25047 __extension__ extern __inline float32x2_t
25048 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25049 vrecpe_f32 (float32x2_t __a)
25050 {
25051 return __builtin_aarch64_frecpev2sf (__a);
25052 }
25053
25054 __extension__ extern __inline float64x1_t
25055 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25056 vrecpe_f64 (float64x1_t __a)
25057 {
25058 return (float64x1_t) { vrecped_f64 (vget_lane_f64 (__a, 0)) };
25059 }
25060
25061 __extension__ extern __inline float32x4_t
25062 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25063 vrecpeq_f32 (float32x4_t __a)
25064 {
25065 return __builtin_aarch64_frecpev4sf (__a);
25066 }
25067
25068 __extension__ extern __inline float64x2_t
25069 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25070 vrecpeq_f64 (float64x2_t __a)
25071 {
25072 return __builtin_aarch64_frecpev2df (__a);
25073 }
25074
25075 /* vrecps */
25076
25077 __extension__ extern __inline float32_t
25078 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25079 vrecpss_f32 (float32_t __a, float32_t __b)
25080 {
25081 return __builtin_aarch64_frecpssf (__a, __b);
25082 }
25083
25084 __extension__ extern __inline float64_t
25085 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25086 vrecpsd_f64 (float64_t __a, float64_t __b)
25087 {
25088 return __builtin_aarch64_frecpsdf (__a, __b);
25089 }
25090
25091 __extension__ extern __inline float32x2_t
25092 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25093 vrecps_f32 (float32x2_t __a, float32x2_t __b)
25094 {
25095 return __builtin_aarch64_frecpsv2sf (__a, __b);
25096 }
25097
25098 __extension__ extern __inline float64x1_t
25099 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25100 vrecps_f64 (float64x1_t __a, float64x1_t __b)
25101 {
25102 return (float64x1_t) { vrecpsd_f64 (vget_lane_f64 (__a, 0),
25103 vget_lane_f64 (__b, 0)) };
25104 }
25105
25106 __extension__ extern __inline float32x4_t
25107 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25108 vrecpsq_f32 (float32x4_t __a, float32x4_t __b)
25109 {
25110 return __builtin_aarch64_frecpsv4sf (__a, __b);
25111 }
25112
25113 __extension__ extern __inline float64x2_t
25114 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25115 vrecpsq_f64 (float64x2_t __a, float64x2_t __b)
25116 {
25117 return __builtin_aarch64_frecpsv2df (__a, __b);
25118 }
25119
25120 /* vrecpx */
25121
25122 __extension__ extern __inline float32_t
25123 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25124 vrecpxs_f32 (float32_t __a)
25125 {
25126 return __builtin_aarch64_frecpxsf (__a);
25127 }
25128
25129 __extension__ extern __inline float64_t
25130 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25131 vrecpxd_f64 (float64_t __a)
25132 {
25133 return __builtin_aarch64_frecpxdf (__a);
25134 }
25135
25136
25137 /* vrev */
25138
25139 __extension__ extern __inline poly8x8_t
25140 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25141 vrev16_p8 (poly8x8_t __a)
25142 {
25143 return __builtin_shuffle (__a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
25144 }
25145
25146 __extension__ extern __inline int8x8_t
25147 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25148 vrev16_s8 (int8x8_t __a)
25149 {
25150 return __builtin_shuffle (__a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
25151 }
25152
25153 __extension__ extern __inline uint8x8_t
25154 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25155 vrev16_u8 (uint8x8_t __a)
25156 {
25157 return __builtin_shuffle (__a, (uint8x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
25158 }
25159
25160 __extension__ extern __inline poly8x16_t
25161 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25162 vrev16q_p8 (poly8x16_t __a)
25163 {
25164 return __builtin_shuffle (__a,
25165 (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 });
25166 }
25167
25168 __extension__ extern __inline int8x16_t
25169 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25170 vrev16q_s8 (int8x16_t __a)
25171 {
25172 return __builtin_shuffle (__a,
25173 (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 });
25174 }
25175
25176 __extension__ extern __inline uint8x16_t
25177 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25178 vrev16q_u8 (uint8x16_t __a)
25179 {
25180 return __builtin_shuffle (__a,
25181 (uint8x16_t) { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 });
25182 }
25183
25184 __extension__ extern __inline poly8x8_t
25185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25186 vrev32_p8 (poly8x8_t __a)
25187 {
25188 return __builtin_shuffle (__a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
25189 }
25190
25191 __extension__ extern __inline poly16x4_t
25192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25193 vrev32_p16 (poly16x4_t __a)
25194 {
25195 return __builtin_shuffle (__a, (uint16x4_t) { 1, 0, 3, 2 });
25196 }
25197
25198 __extension__ extern __inline int8x8_t
25199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25200 vrev32_s8 (int8x8_t __a)
25201 {
25202 return __builtin_shuffle (__a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
25203 }
25204
25205 __extension__ extern __inline int16x4_t
25206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25207 vrev32_s16 (int16x4_t __a)
25208 {
25209 return __builtin_shuffle (__a, (uint16x4_t) { 1, 0, 3, 2 });
25210 }
25211
25212 __extension__ extern __inline uint8x8_t
25213 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25214 vrev32_u8 (uint8x8_t __a)
25215 {
25216 return __builtin_shuffle (__a, (uint8x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
25217 }
25218
25219 __extension__ extern __inline uint16x4_t
25220 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25221 vrev32_u16 (uint16x4_t __a)
25222 {
25223 return __builtin_shuffle (__a, (uint16x4_t) { 1, 0, 3, 2 });
25224 }
25225
25226 __extension__ extern __inline poly8x16_t
25227 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25228 vrev32q_p8 (poly8x16_t __a)
25229 {
25230 return __builtin_shuffle (__a,
25231 (uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 });
25232 }
25233
25234 __extension__ extern __inline poly16x8_t
25235 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25236 vrev32q_p16 (poly16x8_t __a)
25237 {
25238 return __builtin_shuffle (__a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
25239 }
25240
25241 __extension__ extern __inline int8x16_t
25242 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25243 vrev32q_s8 (int8x16_t __a)
25244 {
25245 return __builtin_shuffle (__a,
25246 (uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 });
25247 }
25248
25249 __extension__ extern __inline int16x8_t
25250 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25251 vrev32q_s16 (int16x8_t __a)
25252 {
25253 return __builtin_shuffle (__a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
25254 }
25255
25256 __extension__ extern __inline uint8x16_t
25257 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25258 vrev32q_u8 (uint8x16_t __a)
25259 {
25260 return __builtin_shuffle (__a,
25261 (uint8x16_t) { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 });
25262 }
25263
25264 __extension__ extern __inline uint16x8_t
25265 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25266 vrev32q_u16 (uint16x8_t __a)
25267 {
25268 return __builtin_shuffle (__a, (uint16x8_t) { 1, 0, 3, 2, 5, 4, 7, 6 });
25269 }
25270
25271 __extension__ extern __inline float16x4_t
25272 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25273 vrev64_f16 (float16x4_t __a)
25274 {
25275 return __builtin_shuffle (__a, (uint16x4_t) { 3, 2, 1, 0 });
25276 }
25277
25278 __extension__ extern __inline float32x2_t
25279 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25280 vrev64_f32 (float32x2_t __a)
25281 {
25282 return __builtin_shuffle (__a, (uint32x2_t) { 1, 0 });
25283 }
25284
25285 __extension__ extern __inline poly8x8_t
25286 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25287 vrev64_p8 (poly8x8_t __a)
25288 {
25289 return __builtin_shuffle (__a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 });
25290 }
25291
25292 __extension__ extern __inline poly16x4_t
25293 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25294 vrev64_p16 (poly16x4_t __a)
25295 {
25296 return __builtin_shuffle (__a, (uint16x4_t) { 3, 2, 1, 0 });
25297 }
25298
25299 __extension__ extern __inline int8x8_t
25300 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25301 vrev64_s8 (int8x8_t __a)
25302 {
25303 return __builtin_shuffle (__a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 });
25304 }
25305
25306 __extension__ extern __inline int16x4_t
25307 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25308 vrev64_s16 (int16x4_t __a)
25309 {
25310 return __builtin_shuffle (__a, (uint16x4_t) { 3, 2, 1, 0 });
25311 }
25312
25313 __extension__ extern __inline int32x2_t
25314 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25315 vrev64_s32 (int32x2_t __a)
25316 {
25317 return __builtin_shuffle (__a, (uint32x2_t) { 1, 0 });
25318 }
25319
25320 __extension__ extern __inline uint8x8_t
25321 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25322 vrev64_u8 (uint8x8_t __a)
25323 {
25324 return __builtin_shuffle (__a, (uint8x8_t) { 7, 6, 5, 4, 3, 2, 1, 0 });
25325 }
25326
25327 __extension__ extern __inline uint16x4_t
25328 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25329 vrev64_u16 (uint16x4_t __a)
25330 {
25331 return __builtin_shuffle (__a, (uint16x4_t) { 3, 2, 1, 0 });
25332 }
25333
25334 __extension__ extern __inline uint32x2_t
25335 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25336 vrev64_u32 (uint32x2_t __a)
25337 {
25338 return __builtin_shuffle (__a, (uint32x2_t) { 1, 0 });
25339 }
25340
25341 __extension__ extern __inline float16x8_t
25342 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25343 vrev64q_f16 (float16x8_t __a)
25344 {
25345 return __builtin_shuffle (__a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
25346 }
25347
25348 __extension__ extern __inline float32x4_t
25349 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25350 vrev64q_f32 (float32x4_t __a)
25351 {
25352 return __builtin_shuffle (__a, (uint32x4_t) { 1, 0, 3, 2 });
25353 }
25354
25355 __extension__ extern __inline poly8x16_t
25356 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25357 vrev64q_p8 (poly8x16_t __a)
25358 {
25359 return __builtin_shuffle (__a,
25360 (uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 });
25361 }
25362
25363 __extension__ extern __inline poly16x8_t
25364 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25365 vrev64q_p16 (poly16x8_t __a)
25366 {
25367 return __builtin_shuffle (__a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
25368 }
25369
25370 __extension__ extern __inline int8x16_t
25371 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25372 vrev64q_s8 (int8x16_t __a)
25373 {
25374 return __builtin_shuffle (__a,
25375 (uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 });
25376 }
25377
25378 __extension__ extern __inline int16x8_t
25379 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25380 vrev64q_s16 (int16x8_t __a)
25381 {
25382 return __builtin_shuffle (__a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
25383 }
25384
25385 __extension__ extern __inline int32x4_t
25386 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25387 vrev64q_s32 (int32x4_t __a)
25388 {
25389 return __builtin_shuffle (__a, (uint32x4_t) { 1, 0, 3, 2 });
25390 }
25391
25392 __extension__ extern __inline uint8x16_t
25393 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25394 vrev64q_u8 (uint8x16_t __a)
25395 {
25396 return __builtin_shuffle (__a,
25397 (uint8x16_t) { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 });
25398 }
25399
25400 __extension__ extern __inline uint16x8_t
25401 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25402 vrev64q_u16 (uint16x8_t __a)
25403 {
25404 return __builtin_shuffle (__a, (uint16x8_t) { 3, 2, 1, 0, 7, 6, 5, 4 });
25405 }
25406
25407 __extension__ extern __inline uint32x4_t
25408 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25409 vrev64q_u32 (uint32x4_t __a)
25410 {
25411 return __builtin_shuffle (__a, (uint32x4_t) { 1, 0, 3, 2 });
25412 }
25413
25414 /* vrnd */
25415
25416 __extension__ extern __inline float32x2_t
25417 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25418 vrnd_f32 (float32x2_t __a)
25419 {
25420 return __builtin_aarch64_btruncv2sf (__a);
25421 }
25422
25423 __extension__ extern __inline float64x1_t
25424 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25425 vrnd_f64 (float64x1_t __a)
25426 {
25427 return vset_lane_f64 (__builtin_trunc (vget_lane_f64 (__a, 0)), __a, 0);
25428 }
25429
25430 __extension__ extern __inline float32x4_t
25431 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25432 vrndq_f32 (float32x4_t __a)
25433 {
25434 return __builtin_aarch64_btruncv4sf (__a);
25435 }
25436
25437 __extension__ extern __inline float64x2_t
25438 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25439 vrndq_f64 (float64x2_t __a)
25440 {
25441 return __builtin_aarch64_btruncv2df (__a);
25442 }
25443
25444 /* vrnda */
25445
25446 __extension__ extern __inline float32x2_t
25447 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25448 vrnda_f32 (float32x2_t __a)
25449 {
25450 return __builtin_aarch64_roundv2sf (__a);
25451 }
25452
25453 __extension__ extern __inline float64x1_t
25454 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25455 vrnda_f64 (float64x1_t __a)
25456 {
25457 return vset_lane_f64 (__builtin_round (vget_lane_f64 (__a, 0)), __a, 0);
25458 }
25459
25460 __extension__ extern __inline float32x4_t
25461 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25462 vrndaq_f32 (float32x4_t __a)
25463 {
25464 return __builtin_aarch64_roundv4sf (__a);
25465 }
25466
25467 __extension__ extern __inline float64x2_t
25468 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25469 vrndaq_f64 (float64x2_t __a)
25470 {
25471 return __builtin_aarch64_roundv2df (__a);
25472 }
25473
25474 /* vrndi */
25475
25476 __extension__ extern __inline float32x2_t
25477 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25478 vrndi_f32 (float32x2_t __a)
25479 {
25480 return __builtin_aarch64_nearbyintv2sf (__a);
25481 }
25482
25483 __extension__ extern __inline float64x1_t
25484 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25485 vrndi_f64 (float64x1_t __a)
25486 {
25487 return vset_lane_f64 (__builtin_nearbyint (vget_lane_f64 (__a, 0)), __a, 0);
25488 }
25489
25490 __extension__ extern __inline float32x4_t
25491 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25492 vrndiq_f32 (float32x4_t __a)
25493 {
25494 return __builtin_aarch64_nearbyintv4sf (__a);
25495 }
25496
25497 __extension__ extern __inline float64x2_t
25498 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25499 vrndiq_f64 (float64x2_t __a)
25500 {
25501 return __builtin_aarch64_nearbyintv2df (__a);
25502 }
25503
25504 /* vrndm */
25505
25506 __extension__ extern __inline float32x2_t
25507 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25508 vrndm_f32 (float32x2_t __a)
25509 {
25510 return __builtin_aarch64_floorv2sf (__a);
25511 }
25512
25513 __extension__ extern __inline float64x1_t
25514 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25515 vrndm_f64 (float64x1_t __a)
25516 {
25517 return vset_lane_f64 (__builtin_floor (vget_lane_f64 (__a, 0)), __a, 0);
25518 }
25519
25520 __extension__ extern __inline float32x4_t
25521 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25522 vrndmq_f32 (float32x4_t __a)
25523 {
25524 return __builtin_aarch64_floorv4sf (__a);
25525 }
25526
25527 __extension__ extern __inline float64x2_t
25528 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25529 vrndmq_f64 (float64x2_t __a)
25530 {
25531 return __builtin_aarch64_floorv2df (__a);
25532 }
25533
25534 /* vrndn */
25535
25536 __extension__ extern __inline float32_t
25537 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25538 vrndns_f32 (float32_t __a)
25539 {
25540 return __builtin_aarch64_frintnsf (__a);
25541 }
25542
25543 __extension__ extern __inline float32x2_t
25544 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25545 vrndn_f32 (float32x2_t __a)
25546 {
25547 return __builtin_aarch64_frintnv2sf (__a);
25548 }
25549
25550 __extension__ extern __inline float64x1_t
25551 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25552 vrndn_f64 (float64x1_t __a)
25553 {
25554 return (float64x1_t) {__builtin_aarch64_frintndf (__a[0])};
25555 }
25556
25557 __extension__ extern __inline float32x4_t
25558 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25559 vrndnq_f32 (float32x4_t __a)
25560 {
25561 return __builtin_aarch64_frintnv4sf (__a);
25562 }
25563
25564 __extension__ extern __inline float64x2_t
25565 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25566 vrndnq_f64 (float64x2_t __a)
25567 {
25568 return __builtin_aarch64_frintnv2df (__a);
25569 }
25570
25571 /* vrndp */
25572
25573 __extension__ extern __inline float32x2_t
25574 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25575 vrndp_f32 (float32x2_t __a)
25576 {
25577 return __builtin_aarch64_ceilv2sf (__a);
25578 }
25579
25580 __extension__ extern __inline float64x1_t
25581 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25582 vrndp_f64 (float64x1_t __a)
25583 {
25584 return vset_lane_f64 (__builtin_ceil (vget_lane_f64 (__a, 0)), __a, 0);
25585 }
25586
25587 __extension__ extern __inline float32x4_t
25588 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25589 vrndpq_f32 (float32x4_t __a)
25590 {
25591 return __builtin_aarch64_ceilv4sf (__a);
25592 }
25593
25594 __extension__ extern __inline float64x2_t
25595 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25596 vrndpq_f64 (float64x2_t __a)
25597 {
25598 return __builtin_aarch64_ceilv2df (__a);
25599 }
25600
25601 /* vrndx */
25602
25603 __extension__ extern __inline float32x2_t
25604 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25605 vrndx_f32 (float32x2_t __a)
25606 {
25607 return __builtin_aarch64_rintv2sf (__a);
25608 }
25609
25610 __extension__ extern __inline float64x1_t
25611 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25612 vrndx_f64 (float64x1_t __a)
25613 {
25614 return vset_lane_f64 (__builtin_rint (vget_lane_f64 (__a, 0)), __a, 0);
25615 }
25616
25617 __extension__ extern __inline float32x4_t
25618 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25619 vrndxq_f32 (float32x4_t __a)
25620 {
25621 return __builtin_aarch64_rintv4sf (__a);
25622 }
25623
25624 __extension__ extern __inline float64x2_t
25625 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25626 vrndxq_f64 (float64x2_t __a)
25627 {
25628 return __builtin_aarch64_rintv2df (__a);
25629 }
25630
25631 /* vrshl */
25632
25633 __extension__ extern __inline int8x8_t
25634 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25635 vrshl_s8 (int8x8_t __a, int8x8_t __b)
25636 {
25637 return (int8x8_t) __builtin_aarch64_srshlv8qi (__a, __b);
25638 }
25639
25640 __extension__ extern __inline int16x4_t
25641 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25642 vrshl_s16 (int16x4_t __a, int16x4_t __b)
25643 {
25644 return (int16x4_t) __builtin_aarch64_srshlv4hi (__a, __b);
25645 }
25646
25647 __extension__ extern __inline int32x2_t
25648 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25649 vrshl_s32 (int32x2_t __a, int32x2_t __b)
25650 {
25651 return (int32x2_t) __builtin_aarch64_srshlv2si (__a, __b);
25652 }
25653
25654 __extension__ extern __inline int64x1_t
25655 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25656 vrshl_s64 (int64x1_t __a, int64x1_t __b)
25657 {
25658 return (int64x1_t) {__builtin_aarch64_srshldi (__a[0], __b[0])};
25659 }
25660
25661 __extension__ extern __inline uint8x8_t
25662 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25663 vrshl_u8 (uint8x8_t __a, int8x8_t __b)
25664 {
25665 return __builtin_aarch64_urshlv8qi_uus (__a, __b);
25666 }
25667
25668 __extension__ extern __inline uint16x4_t
25669 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25670 vrshl_u16 (uint16x4_t __a, int16x4_t __b)
25671 {
25672 return __builtin_aarch64_urshlv4hi_uus (__a, __b);
25673 }
25674
25675 __extension__ extern __inline uint32x2_t
25676 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25677 vrshl_u32 (uint32x2_t __a, int32x2_t __b)
25678 {
25679 return __builtin_aarch64_urshlv2si_uus (__a, __b);
25680 }
25681
25682 __extension__ extern __inline uint64x1_t
25683 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25684 vrshl_u64 (uint64x1_t __a, int64x1_t __b)
25685 {
25686 return (uint64x1_t) {__builtin_aarch64_urshldi_uus (__a[0], __b[0])};
25687 }
25688
25689 __extension__ extern __inline int8x16_t
25690 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25691 vrshlq_s8 (int8x16_t __a, int8x16_t __b)
25692 {
25693 return (int8x16_t) __builtin_aarch64_srshlv16qi (__a, __b);
25694 }
25695
25696 __extension__ extern __inline int16x8_t
25697 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25698 vrshlq_s16 (int16x8_t __a, int16x8_t __b)
25699 {
25700 return (int16x8_t) __builtin_aarch64_srshlv8hi (__a, __b);
25701 }
25702
25703 __extension__ extern __inline int32x4_t
25704 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25705 vrshlq_s32 (int32x4_t __a, int32x4_t __b)
25706 {
25707 return (int32x4_t) __builtin_aarch64_srshlv4si (__a, __b);
25708 }
25709
25710 __extension__ extern __inline int64x2_t
25711 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25712 vrshlq_s64 (int64x2_t __a, int64x2_t __b)
25713 {
25714 return (int64x2_t) __builtin_aarch64_srshlv2di (__a, __b);
25715 }
25716
25717 __extension__ extern __inline uint8x16_t
25718 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25719 vrshlq_u8 (uint8x16_t __a, int8x16_t __b)
25720 {
25721 return __builtin_aarch64_urshlv16qi_uus (__a, __b);
25722 }
25723
25724 __extension__ extern __inline uint16x8_t
25725 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25726 vrshlq_u16 (uint16x8_t __a, int16x8_t __b)
25727 {
25728 return __builtin_aarch64_urshlv8hi_uus (__a, __b);
25729 }
25730
25731 __extension__ extern __inline uint32x4_t
25732 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25733 vrshlq_u32 (uint32x4_t __a, int32x4_t __b)
25734 {
25735 return __builtin_aarch64_urshlv4si_uus (__a, __b);
25736 }
25737
25738 __extension__ extern __inline uint64x2_t
25739 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25740 vrshlq_u64 (uint64x2_t __a, int64x2_t __b)
25741 {
25742 return __builtin_aarch64_urshlv2di_uus (__a, __b);
25743 }
25744
25745 __extension__ extern __inline int64_t
25746 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25747 vrshld_s64 (int64_t __a, int64_t __b)
25748 {
25749 return __builtin_aarch64_srshldi (__a, __b);
25750 }
25751
25752 __extension__ extern __inline uint64_t
25753 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25754 vrshld_u64 (uint64_t __a, int64_t __b)
25755 {
25756 return __builtin_aarch64_urshldi_uus (__a, __b);
25757 }
25758
25759 /* vrshr */
25760
25761 __extension__ extern __inline int8x8_t
25762 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25763 vrshr_n_s8 (int8x8_t __a, const int __b)
25764 {
25765 return (int8x8_t) __builtin_aarch64_srshr_nv8qi (__a, __b);
25766 }
25767
25768 __extension__ extern __inline int16x4_t
25769 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25770 vrshr_n_s16 (int16x4_t __a, const int __b)
25771 {
25772 return (int16x4_t) __builtin_aarch64_srshr_nv4hi (__a, __b);
25773 }
25774
25775 __extension__ extern __inline int32x2_t
25776 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25777 vrshr_n_s32 (int32x2_t __a, const int __b)
25778 {
25779 return (int32x2_t) __builtin_aarch64_srshr_nv2si (__a, __b);
25780 }
25781
25782 __extension__ extern __inline int64x1_t
25783 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25784 vrshr_n_s64 (int64x1_t __a, const int __b)
25785 {
25786 return (int64x1_t) {__builtin_aarch64_srshr_ndi (__a[0], __b)};
25787 }
25788
25789 __extension__ extern __inline uint8x8_t
25790 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25791 vrshr_n_u8 (uint8x8_t __a, const int __b)
25792 {
25793 return __builtin_aarch64_urshr_nv8qi_uus (__a, __b);
25794 }
25795
25796 __extension__ extern __inline uint16x4_t
25797 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25798 vrshr_n_u16 (uint16x4_t __a, const int __b)
25799 {
25800 return __builtin_aarch64_urshr_nv4hi_uus (__a, __b);
25801 }
25802
25803 __extension__ extern __inline uint32x2_t
25804 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25805 vrshr_n_u32 (uint32x2_t __a, const int __b)
25806 {
25807 return __builtin_aarch64_urshr_nv2si_uus (__a, __b);
25808 }
25809
25810 __extension__ extern __inline uint64x1_t
25811 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25812 vrshr_n_u64 (uint64x1_t __a, const int __b)
25813 {
25814 return (uint64x1_t) {__builtin_aarch64_urshr_ndi_uus (__a[0], __b)};
25815 }
25816
25817 __extension__ extern __inline int8x16_t
25818 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25819 vrshrq_n_s8 (int8x16_t __a, const int __b)
25820 {
25821 return (int8x16_t) __builtin_aarch64_srshr_nv16qi (__a, __b);
25822 }
25823
25824 __extension__ extern __inline int16x8_t
25825 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25826 vrshrq_n_s16 (int16x8_t __a, const int __b)
25827 {
25828 return (int16x8_t) __builtin_aarch64_srshr_nv8hi (__a, __b);
25829 }
25830
25831 __extension__ extern __inline int32x4_t
25832 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25833 vrshrq_n_s32 (int32x4_t __a, const int __b)
25834 {
25835 return (int32x4_t) __builtin_aarch64_srshr_nv4si (__a, __b);
25836 }
25837
25838 __extension__ extern __inline int64x2_t
25839 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25840 vrshrq_n_s64 (int64x2_t __a, const int __b)
25841 {
25842 return (int64x2_t) __builtin_aarch64_srshr_nv2di (__a, __b);
25843 }
25844
25845 __extension__ extern __inline uint8x16_t
25846 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25847 vrshrq_n_u8 (uint8x16_t __a, const int __b)
25848 {
25849 return __builtin_aarch64_urshr_nv16qi_uus (__a, __b);
25850 }
25851
25852 __extension__ extern __inline uint16x8_t
25853 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25854 vrshrq_n_u16 (uint16x8_t __a, const int __b)
25855 {
25856 return __builtin_aarch64_urshr_nv8hi_uus (__a, __b);
25857 }
25858
25859 __extension__ extern __inline uint32x4_t
25860 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25861 vrshrq_n_u32 (uint32x4_t __a, const int __b)
25862 {
25863 return __builtin_aarch64_urshr_nv4si_uus (__a, __b);
25864 }
25865
25866 __extension__ extern __inline uint64x2_t
25867 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25868 vrshrq_n_u64 (uint64x2_t __a, const int __b)
25869 {
25870 return __builtin_aarch64_urshr_nv2di_uus (__a, __b);
25871 }
25872
25873 __extension__ extern __inline int64_t
25874 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25875 vrshrd_n_s64 (int64_t __a, const int __b)
25876 {
25877 return __builtin_aarch64_srshr_ndi (__a, __b);
25878 }
25879
25880 __extension__ extern __inline uint64_t
25881 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25882 vrshrd_n_u64 (uint64_t __a, const int __b)
25883 {
25884 return __builtin_aarch64_urshr_ndi_uus (__a, __b);
25885 }
25886
25887 /* vrsqrte. */
25888
25889 __extension__ extern __inline float32_t
25890 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25891 vrsqrtes_f32 (float32_t __a)
25892 {
25893 return __builtin_aarch64_rsqrtesf (__a);
25894 }
25895
25896 __extension__ extern __inline float64_t
25897 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25898 vrsqrted_f64 (float64_t __a)
25899 {
25900 return __builtin_aarch64_rsqrtedf (__a);
25901 }
25902
25903 __extension__ extern __inline float32x2_t
25904 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25905 vrsqrte_f32 (float32x2_t __a)
25906 {
25907 return __builtin_aarch64_rsqrtev2sf (__a);
25908 }
25909
25910 __extension__ extern __inline float64x1_t
25911 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25912 vrsqrte_f64 (float64x1_t __a)
25913 {
25914 return (float64x1_t) {vrsqrted_f64 (vget_lane_f64 (__a, 0))};
25915 }
25916
25917 __extension__ extern __inline float32x4_t
25918 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25919 vrsqrteq_f32 (float32x4_t __a)
25920 {
25921 return __builtin_aarch64_rsqrtev4sf (__a);
25922 }
25923
25924 __extension__ extern __inline float64x2_t
25925 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25926 vrsqrteq_f64 (float64x2_t __a)
25927 {
25928 return __builtin_aarch64_rsqrtev2df (__a);
25929 }
25930
25931 /* vrsqrts. */
25932
25933 __extension__ extern __inline float32_t
25934 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25935 vrsqrtss_f32 (float32_t __a, float32_t __b)
25936 {
25937 return __builtin_aarch64_rsqrtssf (__a, __b);
25938 }
25939
25940 __extension__ extern __inline float64_t
25941 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25942 vrsqrtsd_f64 (float64_t __a, float64_t __b)
25943 {
25944 return __builtin_aarch64_rsqrtsdf (__a, __b);
25945 }
25946
25947 __extension__ extern __inline float32x2_t
25948 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25949 vrsqrts_f32 (float32x2_t __a, float32x2_t __b)
25950 {
25951 return __builtin_aarch64_rsqrtsv2sf (__a, __b);
25952 }
25953
25954 __extension__ extern __inline float64x1_t
25955 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25956 vrsqrts_f64 (float64x1_t __a, float64x1_t __b)
25957 {
25958 return (float64x1_t) {vrsqrtsd_f64 (vget_lane_f64 (__a, 0),
25959 vget_lane_f64 (__b, 0))};
25960 }
25961
25962 __extension__ extern __inline float32x4_t
25963 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25964 vrsqrtsq_f32 (float32x4_t __a, float32x4_t __b)
25965 {
25966 return __builtin_aarch64_rsqrtsv4sf (__a, __b);
25967 }
25968
25969 __extension__ extern __inline float64x2_t
25970 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25971 vrsqrtsq_f64 (float64x2_t __a, float64x2_t __b)
25972 {
25973 return __builtin_aarch64_rsqrtsv2df (__a, __b);
25974 }
25975
25976 /* vrsra */
25977
25978 __extension__ extern __inline int8x8_t
25979 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25980 vrsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
25981 {
25982 return (int8x8_t) __builtin_aarch64_srsra_nv8qi (__a, __b, __c);
25983 }
25984
25985 __extension__ extern __inline int16x4_t
25986 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25987 vrsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
25988 {
25989 return (int16x4_t) __builtin_aarch64_srsra_nv4hi (__a, __b, __c);
25990 }
25991
25992 __extension__ extern __inline int32x2_t
25993 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
25994 vrsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
25995 {
25996 return (int32x2_t) __builtin_aarch64_srsra_nv2si (__a, __b, __c);
25997 }
25998
25999 __extension__ extern __inline int64x1_t
26000 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26001 vrsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
26002 {
26003 return (int64x1_t) {__builtin_aarch64_srsra_ndi (__a[0], __b[0], __c)};
26004 }
26005
26006 __extension__ extern __inline uint8x8_t
26007 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26008 vrsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
26009 {
26010 return __builtin_aarch64_ursra_nv8qi_uuus (__a, __b, __c);
26011 }
26012
26013 __extension__ extern __inline uint16x4_t
26014 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26015 vrsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
26016 {
26017 return __builtin_aarch64_ursra_nv4hi_uuus (__a, __b, __c);
26018 }
26019
26020 __extension__ extern __inline uint32x2_t
26021 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26022 vrsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
26023 {
26024 return __builtin_aarch64_ursra_nv2si_uuus (__a, __b, __c);
26025 }
26026
26027 __extension__ extern __inline uint64x1_t
26028 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26029 vrsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
26030 {
26031 return (uint64x1_t) {__builtin_aarch64_ursra_ndi_uuus (__a[0], __b[0], __c)};
26032 }
26033
26034 __extension__ extern __inline int8x16_t
26035 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26036 vrsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
26037 {
26038 return (int8x16_t) __builtin_aarch64_srsra_nv16qi (__a, __b, __c);
26039 }
26040
26041 __extension__ extern __inline int16x8_t
26042 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26043 vrsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
26044 {
26045 return (int16x8_t) __builtin_aarch64_srsra_nv8hi (__a, __b, __c);
26046 }
26047
26048 __extension__ extern __inline int32x4_t
26049 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26050 vrsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
26051 {
26052 return (int32x4_t) __builtin_aarch64_srsra_nv4si (__a, __b, __c);
26053 }
26054
26055 __extension__ extern __inline int64x2_t
26056 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26057 vrsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
26058 {
26059 return (int64x2_t) __builtin_aarch64_srsra_nv2di (__a, __b, __c);
26060 }
26061
26062 __extension__ extern __inline uint8x16_t
26063 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26064 vrsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
26065 {
26066 return __builtin_aarch64_ursra_nv16qi_uuus (__a, __b, __c);
26067 }
26068
26069 __extension__ extern __inline uint16x8_t
26070 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26071 vrsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
26072 {
26073 return __builtin_aarch64_ursra_nv8hi_uuus (__a, __b, __c);
26074 }
26075
26076 __extension__ extern __inline uint32x4_t
26077 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26078 vrsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
26079 {
26080 return __builtin_aarch64_ursra_nv4si_uuus (__a, __b, __c);
26081 }
26082
26083 __extension__ extern __inline uint64x2_t
26084 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26085 vrsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
26086 {
26087 return __builtin_aarch64_ursra_nv2di_uuus (__a, __b, __c);
26088 }
26089
26090 __extension__ extern __inline int64_t
26091 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26092 vrsrad_n_s64 (int64_t __a, int64_t __b, const int __c)
26093 {
26094 return __builtin_aarch64_srsra_ndi (__a, __b, __c);
26095 }
26096
26097 __extension__ extern __inline uint64_t
26098 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26099 vrsrad_n_u64 (uint64_t __a, uint64_t __b, const int __c)
26100 {
26101 return __builtin_aarch64_ursra_ndi_uuus (__a, __b, __c);
26102 }
26103
26104 #pragma GCC push_options
26105 #pragma GCC target ("+nothing+crypto")
26106
26107 /* vsha1 */
26108
26109 __extension__ extern __inline uint32x4_t
26110 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26111 vsha1cq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
26112 {
26113 return __builtin_aarch64_crypto_sha1cv4si_uuuu (__hash_abcd, __hash_e, __wk);
26114 }
26115
26116 __extension__ extern __inline uint32x4_t
26117 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26118 vsha1mq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
26119 {
26120 return __builtin_aarch64_crypto_sha1mv4si_uuuu (__hash_abcd, __hash_e, __wk);
26121 }
26122
26123 __extension__ extern __inline uint32x4_t
26124 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26125 vsha1pq_u32 (uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
26126 {
26127 return __builtin_aarch64_crypto_sha1pv4si_uuuu (__hash_abcd, __hash_e, __wk);
26128 }
26129
26130 __extension__ extern __inline uint32_t
26131 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26132 vsha1h_u32 (uint32_t __hash_e)
26133 {
26134 return __builtin_aarch64_crypto_sha1hsi_uu (__hash_e);
26135 }
26136
26137 __extension__ extern __inline uint32x4_t
26138 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26139 vsha1su0q_u32 (uint32x4_t __w0_3, uint32x4_t __w4_7, uint32x4_t __w8_11)
26140 {
26141 return __builtin_aarch64_crypto_sha1su0v4si_uuuu (__w0_3, __w4_7, __w8_11);
26142 }
26143
26144 __extension__ extern __inline uint32x4_t
26145 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26146 vsha1su1q_u32 (uint32x4_t __tw0_3, uint32x4_t __w12_15)
26147 {
26148 return __builtin_aarch64_crypto_sha1su1v4si_uuu (__tw0_3, __w12_15);
26149 }
26150
26151 __extension__ extern __inline uint32x4_t
26152 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26153 vsha256hq_u32 (uint32x4_t __hash_abcd, uint32x4_t __hash_efgh, uint32x4_t __wk)
26154 {
26155 return __builtin_aarch64_crypto_sha256hv4si_uuuu (__hash_abcd, __hash_efgh,
26156 __wk);
26157 }
26158
26159 __extension__ extern __inline uint32x4_t
26160 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26161 vsha256h2q_u32 (uint32x4_t __hash_efgh, uint32x4_t __hash_abcd, uint32x4_t __wk)
26162 {
26163 return __builtin_aarch64_crypto_sha256h2v4si_uuuu (__hash_efgh, __hash_abcd,
26164 __wk);
26165 }
26166
26167 __extension__ extern __inline uint32x4_t
26168 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26169 vsha256su0q_u32 (uint32x4_t __w0_3, uint32x4_t __w4_7)
26170 {
26171 return __builtin_aarch64_crypto_sha256su0v4si_uuu (__w0_3, __w4_7);
26172 }
26173
26174 __extension__ extern __inline uint32x4_t
26175 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26176 vsha256su1q_u32 (uint32x4_t __tw0_3, uint32x4_t __w8_11, uint32x4_t __w12_15)
26177 {
26178 return __builtin_aarch64_crypto_sha256su1v4si_uuuu (__tw0_3, __w8_11,
26179 __w12_15);
26180 }
26181
26182 __extension__ extern __inline poly128_t
26183 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26184 vmull_p64 (poly64_t __a, poly64_t __b)
26185 {
26186 return
26187 __builtin_aarch64_crypto_pmulldi_ppp (__a, __b);
26188 }
26189
26190 __extension__ extern __inline poly128_t
26191 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26192 vmull_high_p64 (poly64x2_t __a, poly64x2_t __b)
26193 {
26194 return __builtin_aarch64_crypto_pmullv2di_ppp (__a, __b);
26195 }
26196
26197 #pragma GCC pop_options
26198
26199 /* vshl */
26200
26201 __extension__ extern __inline int8x8_t
26202 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26203 vshl_n_s8 (int8x8_t __a, const int __b)
26204 {
26205 return (int8x8_t) __builtin_aarch64_ashlv8qi (__a, __b);
26206 }
26207
26208 __extension__ extern __inline int16x4_t
26209 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26210 vshl_n_s16 (int16x4_t __a, const int __b)
26211 {
26212 return (int16x4_t) __builtin_aarch64_ashlv4hi (__a, __b);
26213 }
26214
26215 __extension__ extern __inline int32x2_t
26216 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26217 vshl_n_s32 (int32x2_t __a, const int __b)
26218 {
26219 return (int32x2_t) __builtin_aarch64_ashlv2si (__a, __b);
26220 }
26221
26222 __extension__ extern __inline int64x1_t
26223 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26224 vshl_n_s64 (int64x1_t __a, const int __b)
26225 {
26226 return (int64x1_t) {__builtin_aarch64_ashldi (__a[0], __b)};
26227 }
26228
26229 __extension__ extern __inline uint8x8_t
26230 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26231 vshl_n_u8 (uint8x8_t __a, const int __b)
26232 {
26233 return (uint8x8_t) __builtin_aarch64_ashlv8qi ((int8x8_t) __a, __b);
26234 }
26235
26236 __extension__ extern __inline uint16x4_t
26237 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26238 vshl_n_u16 (uint16x4_t __a, const int __b)
26239 {
26240 return (uint16x4_t) __builtin_aarch64_ashlv4hi ((int16x4_t) __a, __b);
26241 }
26242
26243 __extension__ extern __inline uint32x2_t
26244 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26245 vshl_n_u32 (uint32x2_t __a, const int __b)
26246 {
26247 return (uint32x2_t) __builtin_aarch64_ashlv2si ((int32x2_t) __a, __b);
26248 }
26249
26250 __extension__ extern __inline uint64x1_t
26251 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26252 vshl_n_u64 (uint64x1_t __a, const int __b)
26253 {
26254 return (uint64x1_t) {__builtin_aarch64_ashldi ((int64_t) __a[0], __b)};
26255 }
26256
26257 __extension__ extern __inline int8x16_t
26258 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26259 vshlq_n_s8 (int8x16_t __a, const int __b)
26260 {
26261 return (int8x16_t) __builtin_aarch64_ashlv16qi (__a, __b);
26262 }
26263
26264 __extension__ extern __inline int16x8_t
26265 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26266 vshlq_n_s16 (int16x8_t __a, const int __b)
26267 {
26268 return (int16x8_t) __builtin_aarch64_ashlv8hi (__a, __b);
26269 }
26270
26271 __extension__ extern __inline int32x4_t
26272 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26273 vshlq_n_s32 (int32x4_t __a, const int __b)
26274 {
26275 return (int32x4_t) __builtin_aarch64_ashlv4si (__a, __b);
26276 }
26277
26278 __extension__ extern __inline int64x2_t
26279 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26280 vshlq_n_s64 (int64x2_t __a, const int __b)
26281 {
26282 return (int64x2_t) __builtin_aarch64_ashlv2di (__a, __b);
26283 }
26284
26285 __extension__ extern __inline uint8x16_t
26286 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26287 vshlq_n_u8 (uint8x16_t __a, const int __b)
26288 {
26289 return (uint8x16_t) __builtin_aarch64_ashlv16qi ((int8x16_t) __a, __b);
26290 }
26291
26292 __extension__ extern __inline uint16x8_t
26293 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26294 vshlq_n_u16 (uint16x8_t __a, const int __b)
26295 {
26296 return (uint16x8_t) __builtin_aarch64_ashlv8hi ((int16x8_t) __a, __b);
26297 }
26298
26299 __extension__ extern __inline uint32x4_t
26300 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26301 vshlq_n_u32 (uint32x4_t __a, const int __b)
26302 {
26303 return (uint32x4_t) __builtin_aarch64_ashlv4si ((int32x4_t) __a, __b);
26304 }
26305
26306 __extension__ extern __inline uint64x2_t
26307 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26308 vshlq_n_u64 (uint64x2_t __a, const int __b)
26309 {
26310 return (uint64x2_t) __builtin_aarch64_ashlv2di ((int64x2_t) __a, __b);
26311 }
26312
26313 __extension__ extern __inline int64_t
26314 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26315 vshld_n_s64 (int64_t __a, const int __b)
26316 {
26317 return __builtin_aarch64_ashldi (__a, __b);
26318 }
26319
26320 __extension__ extern __inline uint64_t
26321 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26322 vshld_n_u64 (uint64_t __a, const int __b)
26323 {
26324 return (uint64_t) __builtin_aarch64_ashldi (__a, __b);
26325 }
26326
26327 __extension__ extern __inline int8x8_t
26328 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26329 vshl_s8 (int8x8_t __a, int8x8_t __b)
26330 {
26331 return __builtin_aarch64_sshlv8qi (__a, __b);
26332 }
26333
26334 __extension__ extern __inline int16x4_t
26335 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26336 vshl_s16 (int16x4_t __a, int16x4_t __b)
26337 {
26338 return __builtin_aarch64_sshlv4hi (__a, __b);
26339 }
26340
26341 __extension__ extern __inline int32x2_t
26342 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26343 vshl_s32 (int32x2_t __a, int32x2_t __b)
26344 {
26345 return __builtin_aarch64_sshlv2si (__a, __b);
26346 }
26347
26348 __extension__ extern __inline int64x1_t
26349 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26350 vshl_s64 (int64x1_t __a, int64x1_t __b)
26351 {
26352 return (int64x1_t) {__builtin_aarch64_sshldi (__a[0], __b[0])};
26353 }
26354
26355 __extension__ extern __inline uint8x8_t
26356 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26357 vshl_u8 (uint8x8_t __a, int8x8_t __b)
26358 {
26359 return __builtin_aarch64_ushlv8qi_uus (__a, __b);
26360 }
26361
26362 __extension__ extern __inline uint16x4_t
26363 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26364 vshl_u16 (uint16x4_t __a, int16x4_t __b)
26365 {
26366 return __builtin_aarch64_ushlv4hi_uus (__a, __b);
26367 }
26368
26369 __extension__ extern __inline uint32x2_t
26370 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26371 vshl_u32 (uint32x2_t __a, int32x2_t __b)
26372 {
26373 return __builtin_aarch64_ushlv2si_uus (__a, __b);
26374 }
26375
26376 __extension__ extern __inline uint64x1_t
26377 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26378 vshl_u64 (uint64x1_t __a, int64x1_t __b)
26379 {
26380 return (uint64x1_t) {__builtin_aarch64_ushldi_uus (__a[0], __b[0])};
26381 }
26382
26383 __extension__ extern __inline int8x16_t
26384 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26385 vshlq_s8 (int8x16_t __a, int8x16_t __b)
26386 {
26387 return __builtin_aarch64_sshlv16qi (__a, __b);
26388 }
26389
26390 __extension__ extern __inline int16x8_t
26391 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26392 vshlq_s16 (int16x8_t __a, int16x8_t __b)
26393 {
26394 return __builtin_aarch64_sshlv8hi (__a, __b);
26395 }
26396
26397 __extension__ extern __inline int32x4_t
26398 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26399 vshlq_s32 (int32x4_t __a, int32x4_t __b)
26400 {
26401 return __builtin_aarch64_sshlv4si (__a, __b);
26402 }
26403
26404 __extension__ extern __inline int64x2_t
26405 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26406 vshlq_s64 (int64x2_t __a, int64x2_t __b)
26407 {
26408 return __builtin_aarch64_sshlv2di (__a, __b);
26409 }
26410
26411 __extension__ extern __inline uint8x16_t
26412 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26413 vshlq_u8 (uint8x16_t __a, int8x16_t __b)
26414 {
26415 return __builtin_aarch64_ushlv16qi_uus (__a, __b);
26416 }
26417
26418 __extension__ extern __inline uint16x8_t
26419 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26420 vshlq_u16 (uint16x8_t __a, int16x8_t __b)
26421 {
26422 return __builtin_aarch64_ushlv8hi_uus (__a, __b);
26423 }
26424
26425 __extension__ extern __inline uint32x4_t
26426 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26427 vshlq_u32 (uint32x4_t __a, int32x4_t __b)
26428 {
26429 return __builtin_aarch64_ushlv4si_uus (__a, __b);
26430 }
26431
26432 __extension__ extern __inline uint64x2_t
26433 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26434 vshlq_u64 (uint64x2_t __a, int64x2_t __b)
26435 {
26436 return __builtin_aarch64_ushlv2di_uus (__a, __b);
26437 }
26438
26439 __extension__ extern __inline int64_t
26440 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26441 vshld_s64 (int64_t __a, int64_t __b)
26442 {
26443 return __builtin_aarch64_sshldi (__a, __b);
26444 }
26445
26446 __extension__ extern __inline uint64_t
26447 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26448 vshld_u64 (uint64_t __a, int64_t __b)
26449 {
26450 return __builtin_aarch64_ushldi_uus (__a, __b);
26451 }
26452
26453 __extension__ extern __inline int16x8_t
26454 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26455 vshll_high_n_s8 (int8x16_t __a, const int __b)
26456 {
26457 return __builtin_aarch64_sshll2_nv16qi (__a, __b);
26458 }
26459
26460 __extension__ extern __inline int32x4_t
26461 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26462 vshll_high_n_s16 (int16x8_t __a, const int __b)
26463 {
26464 return __builtin_aarch64_sshll2_nv8hi (__a, __b);
26465 }
26466
26467 __extension__ extern __inline int64x2_t
26468 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26469 vshll_high_n_s32 (int32x4_t __a, const int __b)
26470 {
26471 return __builtin_aarch64_sshll2_nv4si (__a, __b);
26472 }
26473
26474 __extension__ extern __inline uint16x8_t
26475 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26476 vshll_high_n_u8 (uint8x16_t __a, const int __b)
26477 {
26478 return (uint16x8_t) __builtin_aarch64_ushll2_nv16qi ((int8x16_t) __a, __b);
26479 }
26480
26481 __extension__ extern __inline uint32x4_t
26482 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26483 vshll_high_n_u16 (uint16x8_t __a, const int __b)
26484 {
26485 return (uint32x4_t) __builtin_aarch64_ushll2_nv8hi ((int16x8_t) __a, __b);
26486 }
26487
26488 __extension__ extern __inline uint64x2_t
26489 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26490 vshll_high_n_u32 (uint32x4_t __a, const int __b)
26491 {
26492 return (uint64x2_t) __builtin_aarch64_ushll2_nv4si ((int32x4_t) __a, __b);
26493 }
26494
26495 __extension__ extern __inline int16x8_t
26496 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26497 vshll_n_s8 (int8x8_t __a, const int __b)
26498 {
26499 return __builtin_aarch64_sshll_nv8qi (__a, __b);
26500 }
26501
26502 __extension__ extern __inline int32x4_t
26503 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26504 vshll_n_s16 (int16x4_t __a, const int __b)
26505 {
26506 return __builtin_aarch64_sshll_nv4hi (__a, __b);
26507 }
26508
26509 __extension__ extern __inline int64x2_t
26510 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26511 vshll_n_s32 (int32x2_t __a, const int __b)
26512 {
26513 return __builtin_aarch64_sshll_nv2si (__a, __b);
26514 }
26515
26516 __extension__ extern __inline uint16x8_t
26517 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26518 vshll_n_u8 (uint8x8_t __a, const int __b)
26519 {
26520 return __builtin_aarch64_ushll_nv8qi_uus (__a, __b);
26521 }
26522
26523 __extension__ extern __inline uint32x4_t
26524 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26525 vshll_n_u16 (uint16x4_t __a, const int __b)
26526 {
26527 return __builtin_aarch64_ushll_nv4hi_uus (__a, __b);
26528 }
26529
26530 __extension__ extern __inline uint64x2_t
26531 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26532 vshll_n_u32 (uint32x2_t __a, const int __b)
26533 {
26534 return __builtin_aarch64_ushll_nv2si_uus (__a, __b);
26535 }
26536
26537 /* vshr */
26538
26539 __extension__ extern __inline int8x8_t
26540 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26541 vshr_n_s8 (int8x8_t __a, const int __b)
26542 {
26543 return (int8x8_t) __builtin_aarch64_ashrv8qi (__a, __b);
26544 }
26545
26546 __extension__ extern __inline int16x4_t
26547 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26548 vshr_n_s16 (int16x4_t __a, const int __b)
26549 {
26550 return (int16x4_t) __builtin_aarch64_ashrv4hi (__a, __b);
26551 }
26552
26553 __extension__ extern __inline int32x2_t
26554 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26555 vshr_n_s32 (int32x2_t __a, const int __b)
26556 {
26557 return (int32x2_t) __builtin_aarch64_ashrv2si (__a, __b);
26558 }
26559
26560 __extension__ extern __inline int64x1_t
26561 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26562 vshr_n_s64 (int64x1_t __a, const int __b)
26563 {
26564 return (int64x1_t) {__builtin_aarch64_ashr_simddi (__a[0], __b)};
26565 }
26566
26567 __extension__ extern __inline uint8x8_t
26568 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26569 vshr_n_u8 (uint8x8_t __a, const int __b)
26570 {
26571 return (uint8x8_t) __builtin_aarch64_lshrv8qi ((int8x8_t) __a, __b);
26572 }
26573
26574 __extension__ extern __inline uint16x4_t
26575 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26576 vshr_n_u16 (uint16x4_t __a, const int __b)
26577 {
26578 return (uint16x4_t) __builtin_aarch64_lshrv4hi ((int16x4_t) __a, __b);
26579 }
26580
26581 __extension__ extern __inline uint32x2_t
26582 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26583 vshr_n_u32 (uint32x2_t __a, const int __b)
26584 {
26585 return (uint32x2_t) __builtin_aarch64_lshrv2si ((int32x2_t) __a, __b);
26586 }
26587
26588 __extension__ extern __inline uint64x1_t
26589 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26590 vshr_n_u64 (uint64x1_t __a, const int __b)
26591 {
26592 return (uint64x1_t) {__builtin_aarch64_lshr_simddi_uus ( __a[0], __b)};
26593 }
26594
26595 __extension__ extern __inline int8x16_t
26596 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26597 vshrq_n_s8 (int8x16_t __a, const int __b)
26598 {
26599 return (int8x16_t) __builtin_aarch64_ashrv16qi (__a, __b);
26600 }
26601
26602 __extension__ extern __inline int16x8_t
26603 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26604 vshrq_n_s16 (int16x8_t __a, const int __b)
26605 {
26606 return (int16x8_t) __builtin_aarch64_ashrv8hi (__a, __b);
26607 }
26608
26609 __extension__ extern __inline int32x4_t
26610 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26611 vshrq_n_s32 (int32x4_t __a, const int __b)
26612 {
26613 return (int32x4_t) __builtin_aarch64_ashrv4si (__a, __b);
26614 }
26615
26616 __extension__ extern __inline int64x2_t
26617 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26618 vshrq_n_s64 (int64x2_t __a, const int __b)
26619 {
26620 return (int64x2_t) __builtin_aarch64_ashrv2di (__a, __b);
26621 }
26622
26623 __extension__ extern __inline uint8x16_t
26624 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26625 vshrq_n_u8 (uint8x16_t __a, const int __b)
26626 {
26627 return (uint8x16_t) __builtin_aarch64_lshrv16qi ((int8x16_t) __a, __b);
26628 }
26629
26630 __extension__ extern __inline uint16x8_t
26631 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26632 vshrq_n_u16 (uint16x8_t __a, const int __b)
26633 {
26634 return (uint16x8_t) __builtin_aarch64_lshrv8hi ((int16x8_t) __a, __b);
26635 }
26636
26637 __extension__ extern __inline uint32x4_t
26638 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26639 vshrq_n_u32 (uint32x4_t __a, const int __b)
26640 {
26641 return (uint32x4_t) __builtin_aarch64_lshrv4si ((int32x4_t) __a, __b);
26642 }
26643
26644 __extension__ extern __inline uint64x2_t
26645 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26646 vshrq_n_u64 (uint64x2_t __a, const int __b)
26647 {
26648 return (uint64x2_t) __builtin_aarch64_lshrv2di ((int64x2_t) __a, __b);
26649 }
26650
26651 __extension__ extern __inline int64_t
26652 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26653 vshrd_n_s64 (int64_t __a, const int __b)
26654 {
26655 return __builtin_aarch64_ashr_simddi (__a, __b);
26656 }
26657
26658 __extension__ extern __inline uint64_t
26659 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26660 vshrd_n_u64 (uint64_t __a, const int __b)
26661 {
26662 return __builtin_aarch64_lshr_simddi_uus (__a, __b);
26663 }
26664
26665 /* vsli */
26666
26667 __extension__ extern __inline int8x8_t
26668 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26669 vsli_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
26670 {
26671 return (int8x8_t) __builtin_aarch64_ssli_nv8qi (__a, __b, __c);
26672 }
26673
26674 __extension__ extern __inline int16x4_t
26675 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26676 vsli_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
26677 {
26678 return (int16x4_t) __builtin_aarch64_ssli_nv4hi (__a, __b, __c);
26679 }
26680
26681 __extension__ extern __inline int32x2_t
26682 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26683 vsli_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
26684 {
26685 return (int32x2_t) __builtin_aarch64_ssli_nv2si (__a, __b, __c);
26686 }
26687
26688 __extension__ extern __inline int64x1_t
26689 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26690 vsli_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
26691 {
26692 return (int64x1_t) {__builtin_aarch64_ssli_ndi (__a[0], __b[0], __c)};
26693 }
26694
26695 __extension__ extern __inline uint8x8_t
26696 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26697 vsli_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
26698 {
26699 return __builtin_aarch64_usli_nv8qi_uuus (__a, __b, __c);
26700 }
26701
26702 __extension__ extern __inline uint16x4_t
26703 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26704 vsli_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
26705 {
26706 return __builtin_aarch64_usli_nv4hi_uuus (__a, __b, __c);
26707 }
26708
26709 __extension__ extern __inline uint32x2_t
26710 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26711 vsli_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
26712 {
26713 return __builtin_aarch64_usli_nv2si_uuus (__a, __b, __c);
26714 }
26715
26716 __extension__ extern __inline uint64x1_t
26717 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26718 vsli_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
26719 {
26720 return (uint64x1_t) {__builtin_aarch64_usli_ndi_uuus (__a[0], __b[0], __c)};
26721 }
26722
26723 __extension__ extern __inline poly64x1_t
26724 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26725 vsli_n_p64 (poly64x1_t __a, poly64x1_t __b, const int __c)
26726 {
26727 return (poly64x1_t) {__builtin_aarch64_ssli_ndi_ppps (__a[0], __b[0], __c)};
26728 }
26729
26730 __extension__ extern __inline int8x16_t
26731 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26732 vsliq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
26733 {
26734 return (int8x16_t) __builtin_aarch64_ssli_nv16qi (__a, __b, __c);
26735 }
26736
26737 __extension__ extern __inline int16x8_t
26738 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26739 vsliq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
26740 {
26741 return (int16x8_t) __builtin_aarch64_ssli_nv8hi (__a, __b, __c);
26742 }
26743
26744 __extension__ extern __inline int32x4_t
26745 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26746 vsliq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
26747 {
26748 return (int32x4_t) __builtin_aarch64_ssli_nv4si (__a, __b, __c);
26749 }
26750
26751 __extension__ extern __inline int64x2_t
26752 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26753 vsliq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
26754 {
26755 return (int64x2_t) __builtin_aarch64_ssli_nv2di (__a, __b, __c);
26756 }
26757
26758 __extension__ extern __inline uint8x16_t
26759 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26760 vsliq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
26761 {
26762 return __builtin_aarch64_usli_nv16qi_uuus (__a, __b, __c);
26763 }
26764
26765 __extension__ extern __inline uint16x8_t
26766 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26767 vsliq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
26768 {
26769 return __builtin_aarch64_usli_nv8hi_uuus (__a, __b, __c);
26770 }
26771
26772 __extension__ extern __inline uint32x4_t
26773 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26774 vsliq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
26775 {
26776 return __builtin_aarch64_usli_nv4si_uuus (__a, __b, __c);
26777 }
26778
26779 __extension__ extern __inline uint64x2_t
26780 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26781 vsliq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
26782 {
26783 return __builtin_aarch64_usli_nv2di_uuus (__a, __b, __c);
26784 }
26785
26786 __extension__ extern __inline poly64x2_t
26787 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26788 vsliq_n_p64 (poly64x2_t __a, poly64x2_t __b, const int __c)
26789 {
26790 return __builtin_aarch64_ssli_nv2di_ppps (__a, __b, __c);
26791 }
26792
26793 __extension__ extern __inline int64_t
26794 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26795 vslid_n_s64 (int64_t __a, int64_t __b, const int __c)
26796 {
26797 return __builtin_aarch64_ssli_ndi (__a, __b, __c);
26798 }
26799
26800 __extension__ extern __inline uint64_t
26801 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26802 vslid_n_u64 (uint64_t __a, uint64_t __b, const int __c)
26803 {
26804 return __builtin_aarch64_usli_ndi_uuus (__a, __b, __c);
26805 }
26806
26807 /* vsqadd */
26808
26809 __extension__ extern __inline uint8x8_t
26810 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26811 vsqadd_u8 (uint8x8_t __a, int8x8_t __b)
26812 {
26813 return __builtin_aarch64_usqaddv8qi_uus (__a, __b);
26814 }
26815
26816 __extension__ extern __inline uint16x4_t
26817 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26818 vsqadd_u16 (uint16x4_t __a, int16x4_t __b)
26819 {
26820 return __builtin_aarch64_usqaddv4hi_uus (__a, __b);
26821 }
26822
26823 __extension__ extern __inline uint32x2_t
26824 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26825 vsqadd_u32 (uint32x2_t __a, int32x2_t __b)
26826 {
26827 return __builtin_aarch64_usqaddv2si_uus (__a, __b);
26828 }
26829
26830 __extension__ extern __inline uint64x1_t
26831 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26832 vsqadd_u64 (uint64x1_t __a, int64x1_t __b)
26833 {
26834 return (uint64x1_t) {__builtin_aarch64_usqadddi_uus (__a[0], __b[0])};
26835 }
26836
26837 __extension__ extern __inline uint8x16_t
26838 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26839 vsqaddq_u8 (uint8x16_t __a, int8x16_t __b)
26840 {
26841 return __builtin_aarch64_usqaddv16qi_uus (__a, __b);
26842 }
26843
26844 __extension__ extern __inline uint16x8_t
26845 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26846 vsqaddq_u16 (uint16x8_t __a, int16x8_t __b)
26847 {
26848 return __builtin_aarch64_usqaddv8hi_uus (__a, __b);
26849 }
26850
26851 __extension__ extern __inline uint32x4_t
26852 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26853 vsqaddq_u32 (uint32x4_t __a, int32x4_t __b)
26854 {
26855 return __builtin_aarch64_usqaddv4si_uus (__a, __b);
26856 }
26857
26858 __extension__ extern __inline uint64x2_t
26859 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26860 vsqaddq_u64 (uint64x2_t __a, int64x2_t __b)
26861 {
26862 return __builtin_aarch64_usqaddv2di_uus (__a, __b);
26863 }
26864
26865 __extension__ extern __inline uint8_t
26866 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26867 vsqaddb_u8 (uint8_t __a, int8_t __b)
26868 {
26869 return __builtin_aarch64_usqaddqi_uus (__a, __b);
26870 }
26871
26872 __extension__ extern __inline uint16_t
26873 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26874 vsqaddh_u16 (uint16_t __a, int16_t __b)
26875 {
26876 return __builtin_aarch64_usqaddhi_uus (__a, __b);
26877 }
26878
26879 __extension__ extern __inline uint32_t
26880 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26881 vsqadds_u32 (uint32_t __a, int32_t __b)
26882 {
26883 return __builtin_aarch64_usqaddsi_uus (__a, __b);
26884 }
26885
26886 __extension__ extern __inline uint64_t
26887 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26888 vsqaddd_u64 (uint64_t __a, int64_t __b)
26889 {
26890 return __builtin_aarch64_usqadddi_uus (__a, __b);
26891 }
26892
26893 /* vsqrt */
26894 __extension__ extern __inline float32x2_t
26895 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26896 vsqrt_f32 (float32x2_t __a)
26897 {
26898 return __builtin_aarch64_sqrtv2sf (__a);
26899 }
26900
26901 __extension__ extern __inline float32x4_t
26902 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26903 vsqrtq_f32 (float32x4_t __a)
26904 {
26905 return __builtin_aarch64_sqrtv4sf (__a);
26906 }
26907
26908 __extension__ extern __inline float64x1_t
26909 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26910 vsqrt_f64 (float64x1_t __a)
26911 {
26912 return (float64x1_t) { __builtin_aarch64_sqrtdf (__a[0]) };
26913 }
26914
26915 __extension__ extern __inline float64x2_t
26916 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26917 vsqrtq_f64 (float64x2_t __a)
26918 {
26919 return __builtin_aarch64_sqrtv2df (__a);
26920 }
26921
26922 /* vsra */
26923
26924 __extension__ extern __inline int8x8_t
26925 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26926 vsra_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
26927 {
26928 return (int8x8_t) __builtin_aarch64_ssra_nv8qi (__a, __b, __c);
26929 }
26930
26931 __extension__ extern __inline int16x4_t
26932 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26933 vsra_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
26934 {
26935 return (int16x4_t) __builtin_aarch64_ssra_nv4hi (__a, __b, __c);
26936 }
26937
26938 __extension__ extern __inline int32x2_t
26939 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26940 vsra_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
26941 {
26942 return (int32x2_t) __builtin_aarch64_ssra_nv2si (__a, __b, __c);
26943 }
26944
26945 __extension__ extern __inline int64x1_t
26946 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26947 vsra_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
26948 {
26949 return (int64x1_t) {__builtin_aarch64_ssra_ndi (__a[0], __b[0], __c)};
26950 }
26951
26952 __extension__ extern __inline uint8x8_t
26953 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26954 vsra_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
26955 {
26956 return __builtin_aarch64_usra_nv8qi_uuus (__a, __b, __c);
26957 }
26958
26959 __extension__ extern __inline uint16x4_t
26960 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26961 vsra_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
26962 {
26963 return __builtin_aarch64_usra_nv4hi_uuus (__a, __b, __c);
26964 }
26965
26966 __extension__ extern __inline uint32x2_t
26967 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26968 vsra_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
26969 {
26970 return __builtin_aarch64_usra_nv2si_uuus (__a, __b, __c);
26971 }
26972
26973 __extension__ extern __inline uint64x1_t
26974 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26975 vsra_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
26976 {
26977 return (uint64x1_t) {__builtin_aarch64_usra_ndi_uuus (__a[0], __b[0], __c)};
26978 }
26979
26980 __extension__ extern __inline int8x16_t
26981 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26982 vsraq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
26983 {
26984 return (int8x16_t) __builtin_aarch64_ssra_nv16qi (__a, __b, __c);
26985 }
26986
26987 __extension__ extern __inline int16x8_t
26988 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26989 vsraq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
26990 {
26991 return (int16x8_t) __builtin_aarch64_ssra_nv8hi (__a, __b, __c);
26992 }
26993
26994 __extension__ extern __inline int32x4_t
26995 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
26996 vsraq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
26997 {
26998 return (int32x4_t) __builtin_aarch64_ssra_nv4si (__a, __b, __c);
26999 }
27000
27001 __extension__ extern __inline int64x2_t
27002 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27003 vsraq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
27004 {
27005 return (int64x2_t) __builtin_aarch64_ssra_nv2di (__a, __b, __c);
27006 }
27007
27008 __extension__ extern __inline uint8x16_t
27009 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27010 vsraq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
27011 {
27012 return __builtin_aarch64_usra_nv16qi_uuus (__a, __b, __c);
27013 }
27014
27015 __extension__ extern __inline uint16x8_t
27016 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27017 vsraq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
27018 {
27019 return __builtin_aarch64_usra_nv8hi_uuus (__a, __b, __c);
27020 }
27021
27022 __extension__ extern __inline uint32x4_t
27023 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27024 vsraq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
27025 {
27026 return __builtin_aarch64_usra_nv4si_uuus (__a, __b, __c);
27027 }
27028
27029 __extension__ extern __inline uint64x2_t
27030 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27031 vsraq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
27032 {
27033 return __builtin_aarch64_usra_nv2di_uuus (__a, __b, __c);
27034 }
27035
27036 __extension__ extern __inline int64_t
27037 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27038 vsrad_n_s64 (int64_t __a, int64_t __b, const int __c)
27039 {
27040 return __builtin_aarch64_ssra_ndi (__a, __b, __c);
27041 }
27042
27043 __extension__ extern __inline uint64_t
27044 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27045 vsrad_n_u64 (uint64_t __a, uint64_t __b, const int __c)
27046 {
27047 return __builtin_aarch64_usra_ndi_uuus (__a, __b, __c);
27048 }
27049
27050 /* vsri */
27051
27052 __extension__ extern __inline int8x8_t
27053 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27054 vsri_n_s8 (int8x8_t __a, int8x8_t __b, const int __c)
27055 {
27056 return (int8x8_t) __builtin_aarch64_ssri_nv8qi (__a, __b, __c);
27057 }
27058
27059 __extension__ extern __inline int16x4_t
27060 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27061 vsri_n_s16 (int16x4_t __a, int16x4_t __b, const int __c)
27062 {
27063 return (int16x4_t) __builtin_aarch64_ssri_nv4hi (__a, __b, __c);
27064 }
27065
27066 __extension__ extern __inline int32x2_t
27067 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27068 vsri_n_s32 (int32x2_t __a, int32x2_t __b, const int __c)
27069 {
27070 return (int32x2_t) __builtin_aarch64_ssri_nv2si (__a, __b, __c);
27071 }
27072
27073 __extension__ extern __inline int64x1_t
27074 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27075 vsri_n_s64 (int64x1_t __a, int64x1_t __b, const int __c)
27076 {
27077 return (int64x1_t) {__builtin_aarch64_ssri_ndi (__a[0], __b[0], __c)};
27078 }
27079
27080 __extension__ extern __inline uint8x8_t
27081 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27082 vsri_n_u8 (uint8x8_t __a, uint8x8_t __b, const int __c)
27083 {
27084 return __builtin_aarch64_usri_nv8qi_uuus (__a, __b, __c);
27085 }
27086
27087 __extension__ extern __inline uint16x4_t
27088 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27089 vsri_n_u16 (uint16x4_t __a, uint16x4_t __b, const int __c)
27090 {
27091 return __builtin_aarch64_usri_nv4hi_uuus (__a, __b, __c);
27092 }
27093
27094 __extension__ extern __inline uint32x2_t
27095 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27096 vsri_n_u32 (uint32x2_t __a, uint32x2_t __b, const int __c)
27097 {
27098 return __builtin_aarch64_usri_nv2si_uuus (__a, __b, __c);
27099 }
27100
27101 __extension__ extern __inline uint64x1_t
27102 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27103 vsri_n_u64 (uint64x1_t __a, uint64x1_t __b, const int __c)
27104 {
27105 return (uint64x1_t) {__builtin_aarch64_usri_ndi_uuus (__a[0], __b[0], __c)};
27106 }
27107
27108 __extension__ extern __inline int8x16_t
27109 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27110 vsriq_n_s8 (int8x16_t __a, int8x16_t __b, const int __c)
27111 {
27112 return (int8x16_t) __builtin_aarch64_ssri_nv16qi (__a, __b, __c);
27113 }
27114
27115 __extension__ extern __inline int16x8_t
27116 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27117 vsriq_n_s16 (int16x8_t __a, int16x8_t __b, const int __c)
27118 {
27119 return (int16x8_t) __builtin_aarch64_ssri_nv8hi (__a, __b, __c);
27120 }
27121
27122 __extension__ extern __inline int32x4_t
27123 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27124 vsriq_n_s32 (int32x4_t __a, int32x4_t __b, const int __c)
27125 {
27126 return (int32x4_t) __builtin_aarch64_ssri_nv4si (__a, __b, __c);
27127 }
27128
27129 __extension__ extern __inline int64x2_t
27130 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27131 vsriq_n_s64 (int64x2_t __a, int64x2_t __b, const int __c)
27132 {
27133 return (int64x2_t) __builtin_aarch64_ssri_nv2di (__a, __b, __c);
27134 }
27135
27136 __extension__ extern __inline uint8x16_t
27137 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27138 vsriq_n_u8 (uint8x16_t __a, uint8x16_t __b, const int __c)
27139 {
27140 return __builtin_aarch64_usri_nv16qi_uuus (__a, __b, __c);
27141 }
27142
27143 __extension__ extern __inline uint16x8_t
27144 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27145 vsriq_n_u16 (uint16x8_t __a, uint16x8_t __b, const int __c)
27146 {
27147 return __builtin_aarch64_usri_nv8hi_uuus (__a, __b, __c);
27148 }
27149
27150 __extension__ extern __inline uint32x4_t
27151 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27152 vsriq_n_u32 (uint32x4_t __a, uint32x4_t __b, const int __c)
27153 {
27154 return __builtin_aarch64_usri_nv4si_uuus (__a, __b, __c);
27155 }
27156
27157 __extension__ extern __inline uint64x2_t
27158 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27159 vsriq_n_u64 (uint64x2_t __a, uint64x2_t __b, const int __c)
27160 {
27161 return __builtin_aarch64_usri_nv2di_uuus (__a, __b, __c);
27162 }
27163
27164 __extension__ extern __inline int64_t
27165 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27166 vsrid_n_s64 (int64_t __a, int64_t __b, const int __c)
27167 {
27168 return __builtin_aarch64_ssri_ndi (__a, __b, __c);
27169 }
27170
27171 __extension__ extern __inline uint64_t
27172 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27173 vsrid_n_u64 (uint64_t __a, uint64_t __b, const int __c)
27174 {
27175 return __builtin_aarch64_usri_ndi_uuus (__a, __b, __c);
27176 }
27177
27178 /* vst1 */
27179
27180 __extension__ extern __inline void
27181 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27182 vst1_f16 (float16_t *__a, float16x4_t __b)
27183 {
27184 __builtin_aarch64_st1v4hf (__a, __b);
27185 }
27186
27187 __extension__ extern __inline void
27188 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27189 vst1_f32 (float32_t *__a, float32x2_t __b)
27190 {
27191 __builtin_aarch64_st1v2sf ((__builtin_aarch64_simd_sf *) __a, __b);
27192 }
27193
27194 __extension__ extern __inline void
27195 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27196 vst1_f64 (float64_t *__a, float64x1_t __b)
27197 {
27198 *__a = __b[0];
27199 }
27200
27201 __extension__ extern __inline void
27202 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27203 vst1_p8 (poly8_t *__a, poly8x8_t __b)
27204 {
27205 __builtin_aarch64_st1v8qi ((__builtin_aarch64_simd_qi *) __a,
27206 (int8x8_t) __b);
27207 }
27208
27209 __extension__ extern __inline void
27210 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27211 vst1_p16 (poly16_t *__a, poly16x4_t __b)
27212 {
27213 __builtin_aarch64_st1v4hi ((__builtin_aarch64_simd_hi *) __a,
27214 (int16x4_t) __b);
27215 }
27216
27217 __extension__ extern __inline void
27218 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27219 vst1_p64 (poly64_t *__a, poly64x1_t __b)
27220 {
27221 *__a = __b[0];
27222 }
27223
27224 __extension__ extern __inline void
27225 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27226 vst1_s8 (int8_t *__a, int8x8_t __b)
27227 {
27228 __builtin_aarch64_st1v8qi ((__builtin_aarch64_simd_qi *) __a, __b);
27229 }
27230
27231 __extension__ extern __inline void
27232 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27233 vst1_s16 (int16_t *__a, int16x4_t __b)
27234 {
27235 __builtin_aarch64_st1v4hi ((__builtin_aarch64_simd_hi *) __a, __b);
27236 }
27237
27238 __extension__ extern __inline void
27239 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27240 vst1_s32 (int32_t *__a, int32x2_t __b)
27241 {
27242 __builtin_aarch64_st1v2si ((__builtin_aarch64_simd_si *) __a, __b);
27243 }
27244
27245 __extension__ extern __inline void
27246 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27247 vst1_s64 (int64_t *__a, int64x1_t __b)
27248 {
27249 *__a = __b[0];
27250 }
27251
27252 __extension__ extern __inline void
27253 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27254 vst1_u8 (uint8_t *__a, uint8x8_t __b)
27255 {
27256 __builtin_aarch64_st1v8qi ((__builtin_aarch64_simd_qi *) __a,
27257 (int8x8_t) __b);
27258 }
27259
27260 __extension__ extern __inline void
27261 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27262 vst1_u16 (uint16_t *__a, uint16x4_t __b)
27263 {
27264 __builtin_aarch64_st1v4hi ((__builtin_aarch64_simd_hi *) __a,
27265 (int16x4_t) __b);
27266 }
27267
27268 __extension__ extern __inline void
27269 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27270 vst1_u32 (uint32_t *__a, uint32x2_t __b)
27271 {
27272 __builtin_aarch64_st1v2si ((__builtin_aarch64_simd_si *) __a,
27273 (int32x2_t) __b);
27274 }
27275
27276 __extension__ extern __inline void
27277 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27278 vst1_u64 (uint64_t *__a, uint64x1_t __b)
27279 {
27280 *__a = __b[0];
27281 }
27282
27283 /* vst1q */
27284
27285 __extension__ extern __inline void
27286 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27287 vst1q_f16 (float16_t *__a, float16x8_t __b)
27288 {
27289 __builtin_aarch64_st1v8hf (__a, __b);
27290 }
27291
27292 __extension__ extern __inline void
27293 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27294 vst1q_f32 (float32_t *__a, float32x4_t __b)
27295 {
27296 __builtin_aarch64_st1v4sf ((__builtin_aarch64_simd_sf *) __a, __b);
27297 }
27298
27299 __extension__ extern __inline void
27300 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27301 vst1q_f64 (float64_t *__a, float64x2_t __b)
27302 {
27303 __builtin_aarch64_st1v2df ((__builtin_aarch64_simd_df *) __a, __b);
27304 }
27305
27306 __extension__ extern __inline void
27307 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27308 vst1q_p8 (poly8_t *__a, poly8x16_t __b)
27309 {
27310 __builtin_aarch64_st1v16qi ((__builtin_aarch64_simd_qi *) __a,
27311 (int8x16_t) __b);
27312 }
27313
27314 __extension__ extern __inline void
27315 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27316 vst1q_p16 (poly16_t *__a, poly16x8_t __b)
27317 {
27318 __builtin_aarch64_st1v8hi ((__builtin_aarch64_simd_hi *) __a,
27319 (int16x8_t) __b);
27320 }
27321
27322 __extension__ extern __inline void
27323 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27324 vst1q_p64 (poly64_t *__a, poly64x2_t __b)
27325 {
27326 __builtin_aarch64_st1v2di_sp ((__builtin_aarch64_simd_di *) __a,
27327 (poly64x2_t) __b);
27328 }
27329
27330 __extension__ extern __inline void
27331 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27332 vst1q_s8 (int8_t *__a, int8x16_t __b)
27333 {
27334 __builtin_aarch64_st1v16qi ((__builtin_aarch64_simd_qi *) __a, __b);
27335 }
27336
27337 __extension__ extern __inline void
27338 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27339 vst1q_s16 (int16_t *__a, int16x8_t __b)
27340 {
27341 __builtin_aarch64_st1v8hi ((__builtin_aarch64_simd_hi *) __a, __b);
27342 }
27343
27344 __extension__ extern __inline void
27345 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27346 vst1q_s32 (int32_t *__a, int32x4_t __b)
27347 {
27348 __builtin_aarch64_st1v4si ((__builtin_aarch64_simd_si *) __a, __b);
27349 }
27350
27351 __extension__ extern __inline void
27352 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27353 vst1q_s64 (int64_t *__a, int64x2_t __b)
27354 {
27355 __builtin_aarch64_st1v2di ((__builtin_aarch64_simd_di *) __a, __b);
27356 }
27357
27358 __extension__ extern __inline void
27359 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27360 vst1q_u8 (uint8_t *__a, uint8x16_t __b)
27361 {
27362 __builtin_aarch64_st1v16qi ((__builtin_aarch64_simd_qi *) __a,
27363 (int8x16_t) __b);
27364 }
27365
27366 __extension__ extern __inline void
27367 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27368 vst1q_u16 (uint16_t *__a, uint16x8_t __b)
27369 {
27370 __builtin_aarch64_st1v8hi ((__builtin_aarch64_simd_hi *) __a,
27371 (int16x8_t) __b);
27372 }
27373
27374 __extension__ extern __inline void
27375 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27376 vst1q_u32 (uint32_t *__a, uint32x4_t __b)
27377 {
27378 __builtin_aarch64_st1v4si ((__builtin_aarch64_simd_si *) __a,
27379 (int32x4_t) __b);
27380 }
27381
27382 __extension__ extern __inline void
27383 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27384 vst1q_u64 (uint64_t *__a, uint64x2_t __b)
27385 {
27386 __builtin_aarch64_st1v2di ((__builtin_aarch64_simd_di *) __a,
27387 (int64x2_t) __b);
27388 }
27389
27390 /* vst1_lane */
27391
27392 __extension__ extern __inline void
27393 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27394 vst1_lane_f16 (float16_t *__a, float16x4_t __b, const int __lane)
27395 {
27396 *__a = __aarch64_vget_lane_any (__b, __lane);
27397 }
27398
27399 __extension__ extern __inline void
27400 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27401 vst1_lane_f32 (float32_t *__a, float32x2_t __b, const int __lane)
27402 {
27403 *__a = __aarch64_vget_lane_any (__b, __lane);
27404 }
27405
27406 __extension__ extern __inline void
27407 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27408 vst1_lane_f64 (float64_t *__a, float64x1_t __b, const int __lane)
27409 {
27410 *__a = __aarch64_vget_lane_any (__b, __lane);
27411 }
27412
27413 __extension__ extern __inline void
27414 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27415 vst1_lane_p8 (poly8_t *__a, poly8x8_t __b, const int __lane)
27416 {
27417 *__a = __aarch64_vget_lane_any (__b, __lane);
27418 }
27419
27420 __extension__ extern __inline void
27421 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27422 vst1_lane_p16 (poly16_t *__a, poly16x4_t __b, const int __lane)
27423 {
27424 *__a = __aarch64_vget_lane_any (__b, __lane);
27425 }
27426
27427 __extension__ extern __inline void
27428 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27429 vst1_lane_p64 (poly64_t *__a, poly64x1_t __b, const int __lane)
27430 {
27431 *__a = __aarch64_vget_lane_any (__b, __lane);
27432 }
27433
27434 __extension__ extern __inline void
27435 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27436 vst1_lane_s8 (int8_t *__a, int8x8_t __b, const int __lane)
27437 {
27438 *__a = __aarch64_vget_lane_any (__b, __lane);
27439 }
27440
27441 __extension__ extern __inline void
27442 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27443 vst1_lane_s16 (int16_t *__a, int16x4_t __b, const int __lane)
27444 {
27445 *__a = __aarch64_vget_lane_any (__b, __lane);
27446 }
27447
27448 __extension__ extern __inline void
27449 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27450 vst1_lane_s32 (int32_t *__a, int32x2_t __b, const int __lane)
27451 {
27452 *__a = __aarch64_vget_lane_any (__b, __lane);
27453 }
27454
27455 __extension__ extern __inline void
27456 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27457 vst1_lane_s64 (int64_t *__a, int64x1_t __b, const int __lane)
27458 {
27459 *__a = __aarch64_vget_lane_any (__b, __lane);
27460 }
27461
27462 __extension__ extern __inline void
27463 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27464 vst1_lane_u8 (uint8_t *__a, uint8x8_t __b, const int __lane)
27465 {
27466 *__a = __aarch64_vget_lane_any (__b, __lane);
27467 }
27468
27469 __extension__ extern __inline void
27470 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27471 vst1_lane_u16 (uint16_t *__a, uint16x4_t __b, const int __lane)
27472 {
27473 *__a = __aarch64_vget_lane_any (__b, __lane);
27474 }
27475
27476 __extension__ extern __inline void
27477 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27478 vst1_lane_u32 (uint32_t *__a, uint32x2_t __b, const int __lane)
27479 {
27480 *__a = __aarch64_vget_lane_any (__b, __lane);
27481 }
27482
27483 __extension__ extern __inline void
27484 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27485 vst1_lane_u64 (uint64_t *__a, uint64x1_t __b, const int __lane)
27486 {
27487 *__a = __aarch64_vget_lane_any (__b, __lane);
27488 }
27489
27490 /* vst1q_lane */
27491
27492 __extension__ extern __inline void
27493 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27494 vst1q_lane_f16 (float16_t *__a, float16x8_t __b, const int __lane)
27495 {
27496 *__a = __aarch64_vget_lane_any (__b, __lane);
27497 }
27498
27499 __extension__ extern __inline void
27500 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27501 vst1q_lane_f32 (float32_t *__a, float32x4_t __b, const int __lane)
27502 {
27503 *__a = __aarch64_vget_lane_any (__b, __lane);
27504 }
27505
27506 __extension__ extern __inline void
27507 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27508 vst1q_lane_f64 (float64_t *__a, float64x2_t __b, const int __lane)
27509 {
27510 *__a = __aarch64_vget_lane_any (__b, __lane);
27511 }
27512
27513 __extension__ extern __inline void
27514 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27515 vst1q_lane_p8 (poly8_t *__a, poly8x16_t __b, const int __lane)
27516 {
27517 *__a = __aarch64_vget_lane_any (__b, __lane);
27518 }
27519
27520 __extension__ extern __inline void
27521 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27522 vst1q_lane_p16 (poly16_t *__a, poly16x8_t __b, const int __lane)
27523 {
27524 *__a = __aarch64_vget_lane_any (__b, __lane);
27525 }
27526
27527 __extension__ extern __inline void
27528 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27529 vst1q_lane_p64 (poly64_t *__a, poly64x2_t __b, const int __lane)
27530 {
27531 *__a = __aarch64_vget_lane_any (__b, __lane);
27532 }
27533
27534 __extension__ extern __inline void
27535 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27536 vst1q_lane_s8 (int8_t *__a, int8x16_t __b, const int __lane)
27537 {
27538 *__a = __aarch64_vget_lane_any (__b, __lane);
27539 }
27540
27541 __extension__ extern __inline void
27542 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27543 vst1q_lane_s16 (int16_t *__a, int16x8_t __b, const int __lane)
27544 {
27545 *__a = __aarch64_vget_lane_any (__b, __lane);
27546 }
27547
27548 __extension__ extern __inline void
27549 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27550 vst1q_lane_s32 (int32_t *__a, int32x4_t __b, const int __lane)
27551 {
27552 *__a = __aarch64_vget_lane_any (__b, __lane);
27553 }
27554
27555 __extension__ extern __inline void
27556 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27557 vst1q_lane_s64 (int64_t *__a, int64x2_t __b, const int __lane)
27558 {
27559 *__a = __aarch64_vget_lane_any (__b, __lane);
27560 }
27561
27562 __extension__ extern __inline void
27563 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27564 vst1q_lane_u8 (uint8_t *__a, uint8x16_t __b, const int __lane)
27565 {
27566 *__a = __aarch64_vget_lane_any (__b, __lane);
27567 }
27568
27569 __extension__ extern __inline void
27570 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27571 vst1q_lane_u16 (uint16_t *__a, uint16x8_t __b, const int __lane)
27572 {
27573 *__a = __aarch64_vget_lane_any (__b, __lane);
27574 }
27575
27576 __extension__ extern __inline void
27577 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27578 vst1q_lane_u32 (uint32_t *__a, uint32x4_t __b, const int __lane)
27579 {
27580 *__a = __aarch64_vget_lane_any (__b, __lane);
27581 }
27582
27583 __extension__ extern __inline void
27584 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27585 vst1q_lane_u64 (uint64_t *__a, uint64x2_t __b, const int __lane)
27586 {
27587 *__a = __aarch64_vget_lane_any (__b, __lane);
27588 }
27589
27590 /* vst1x2 */
27591
27592 __extension__ extern __inline void
27593 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27594 vst1_s64_x2 (int64_t * __a, int64x1x2_t __val)
27595 {
27596 __builtin_aarch64_simd_oi __o;
27597 int64x2x2_t __temp;
27598 __temp.val[0]
27599 = vcombine_s64 (__val.val[0], vcreate_s64 (__AARCH64_INT64_C (0)));
27600 __temp.val[1]
27601 = vcombine_s64 (__val.val[1], vcreate_s64 (__AARCH64_INT64_C (0)));
27602 __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[0], 0);
27603 __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[1], 1);
27604 __builtin_aarch64_st1x2di ((__builtin_aarch64_simd_di *) __a, __o);
27605 }
27606
27607 __extension__ extern __inline void
27608 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27609 vst1_u64_x2 (uint64_t * __a, uint64x1x2_t __val)
27610 {
27611 __builtin_aarch64_simd_oi __o;
27612 uint64x2x2_t __temp;
27613 __temp.val[0]
27614 = vcombine_u64 (__val.val[0], vcreate_u64 (__AARCH64_UINT64_C (0)));
27615 __temp.val[1]
27616 = vcombine_u64 (__val.val[1], vcreate_u64 (__AARCH64_UINT64_C (0)));
27617 __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[0], 0);
27618 __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[1], 1);
27619 __builtin_aarch64_st1x2di ((__builtin_aarch64_simd_di *) __a, __o);
27620 }
27621
27622 __extension__ extern __inline void
27623 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27624 vst1_f64_x2 (float64_t * __a, float64x1x2_t __val)
27625 {
27626 __builtin_aarch64_simd_oi __o;
27627 float64x2x2_t __temp;
27628 __temp.val[0]
27629 = vcombine_f64 (__val.val[0], vcreate_f64 (__AARCH64_UINT64_C (0)));
27630 __temp.val[1]
27631 = vcombine_f64 (__val.val[1], vcreate_f64 (__AARCH64_UINT64_C (0)));
27632 __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) __temp.val[0], 0);
27633 __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) __temp.val[1], 1);
27634 __builtin_aarch64_st1x2df ((__builtin_aarch64_simd_df *) __a, __o);
27635 }
27636
27637 __extension__ extern __inline void
27638 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27639 vst1_s8_x2 (int8_t * __a, int8x8x2_t __val)
27640 {
27641 __builtin_aarch64_simd_oi __o;
27642 int8x16x2_t __temp;
27643 __temp.val[0]
27644 = vcombine_s8 (__val.val[0], vcreate_s8 (__AARCH64_INT64_C (0)));
27645 __temp.val[1]
27646 = vcombine_s8 (__val.val[1], vcreate_s8 (__AARCH64_INT64_C (0)));
27647 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[0], 0);
27648 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[1], 1);
27649 __builtin_aarch64_st1x2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
27650 }
27651
27652 __extension__ extern __inline void
27653 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27654 vst1_p8_x2 (poly8_t * __a, poly8x8x2_t __val)
27655 {
27656 __builtin_aarch64_simd_oi __o;
27657 poly8x16x2_t __temp;
27658 __temp.val[0]
27659 = vcombine_p8 (__val.val[0], vcreate_p8 (__AARCH64_UINT64_C (0)));
27660 __temp.val[1]
27661 = vcombine_p8 (__val.val[1], vcreate_p8 (__AARCH64_UINT64_C (0)));
27662 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[0], 0);
27663 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[1], 1);
27664 __builtin_aarch64_st1x2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
27665 }
27666
27667 __extension__ extern __inline void
27668 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27669 vst1_s16_x2 (int16_t * __a, int16x4x2_t __val)
27670 {
27671 __builtin_aarch64_simd_oi __o;
27672 int16x8x2_t __temp;
27673 __temp.val[0]
27674 = vcombine_s16 (__val.val[0], vcreate_s16 (__AARCH64_INT64_C (0)));
27675 __temp.val[1]
27676 = vcombine_s16 (__val.val[1], vcreate_s16 (__AARCH64_INT64_C (0)));
27677 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[0], 0);
27678 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[1], 1);
27679 __builtin_aarch64_st1x2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
27680 }
27681
27682 __extension__ extern __inline void
27683 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27684 vst1_p16_x2 (poly16_t * __a, poly16x4x2_t __val)
27685 {
27686 __builtin_aarch64_simd_oi __o;
27687 poly16x8x2_t __temp;
27688 __temp.val[0]
27689 = vcombine_p16 (__val.val[0], vcreate_p16 (__AARCH64_UINT64_C (0)));
27690 __temp.val[1]
27691 = vcombine_p16 (__val.val[1], vcreate_p16 (__AARCH64_UINT64_C (0)));
27692 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[0], 0);
27693 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[1], 1);
27694 __builtin_aarch64_st1x2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
27695 }
27696
27697 __extension__ extern __inline void
27698 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27699 vst1_s32_x2 (int32_t * __a, int32x2x2_t __val)
27700 {
27701 __builtin_aarch64_simd_oi __o;
27702 int32x4x2_t __temp;
27703 __temp.val[0]
27704 = vcombine_s32 (__val.val[0], vcreate_s32 (__AARCH64_INT64_C (0)));
27705 __temp.val[1]
27706 = vcombine_s32 (__val.val[1], vcreate_s32 (__AARCH64_INT64_C (0)));
27707 __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[0], 0);
27708 __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[1], 1);
27709 __builtin_aarch64_st1x2v2si ((__builtin_aarch64_simd_si *) __a, __o);
27710 }
27711
27712 __extension__ extern __inline void
27713 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27714 vst1_u8_x2 (uint8_t * __a, uint8x8x2_t __val)
27715 {
27716 __builtin_aarch64_simd_oi __o;
27717 uint8x16x2_t __temp;
27718 __temp.val[0] = vcombine_u8 (__val.val[0], vcreate_u8 (__AARCH64_UINT64_C (0)));
27719 __temp.val[1] = vcombine_u8 (__val.val[1], vcreate_u8 (__AARCH64_UINT64_C (0)));
27720 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[0], 0);
27721 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[1], 1);
27722 __builtin_aarch64_st1x2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
27723 }
27724
27725 __extension__ extern __inline void
27726 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27727 vst1_u16_x2 (uint16_t * __a, uint16x4x2_t __val)
27728 {
27729 __builtin_aarch64_simd_oi __o;
27730 uint16x8x2_t __temp;
27731 __temp.val[0] = vcombine_u16 (__val.val[0], vcreate_u16 (__AARCH64_UINT64_C (0)));
27732 __temp.val[1] = vcombine_u16 (__val.val[1], vcreate_u16 (__AARCH64_UINT64_C (0)));
27733 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[0], 0);
27734 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[1], 1);
27735 __builtin_aarch64_st1x2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
27736 }
27737
27738 __extension__ extern __inline void
27739 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27740 vst1_u32_x2 (uint32_t * __a, uint32x2x2_t __val)
27741 {
27742 __builtin_aarch64_simd_oi __o;
27743 uint32x4x2_t __temp;
27744 __temp.val[0] = vcombine_u32 (__val.val[0], vcreate_u32 (__AARCH64_UINT64_C (0)));
27745 __temp.val[1] = vcombine_u32 (__val.val[1], vcreate_u32 (__AARCH64_UINT64_C (0)));
27746 __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[0], 0);
27747 __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[1], 1);
27748 __builtin_aarch64_st1x2v2si ((__builtin_aarch64_simd_si *) __a, __o);
27749 }
27750
27751 __extension__ extern __inline void
27752 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27753 vst1_f16_x2 (float16_t * __a, float16x4x2_t __val)
27754 {
27755 __builtin_aarch64_simd_oi __o;
27756 float16x8x2_t __temp;
27757 __temp.val[0] = vcombine_f16 (__val.val[0], vcreate_f16 (__AARCH64_UINT64_C (0)));
27758 __temp.val[1] = vcombine_f16 (__val.val[1], vcreate_f16 (__AARCH64_UINT64_C (0)));
27759 __o = __builtin_aarch64_set_qregoiv8hf (__o, __temp.val[0], 0);
27760 __o = __builtin_aarch64_set_qregoiv8hf (__o, __temp.val[1], 1);
27761 __builtin_aarch64_st1x2v4hf (__a, __o);
27762 }
27763
27764 __extension__ extern __inline void
27765 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27766 vst1_f32_x2 (float32_t * __a, float32x2x2_t __val)
27767 {
27768 __builtin_aarch64_simd_oi __o;
27769 float32x4x2_t __temp;
27770 __temp.val[0] = vcombine_f32 (__val.val[0], vcreate_f32 (__AARCH64_UINT64_C (0)));
27771 __temp.val[1] = vcombine_f32 (__val.val[1], vcreate_f32 (__AARCH64_UINT64_C (0)));
27772 __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) __temp.val[0], 0);
27773 __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) __temp.val[1], 1);
27774 __builtin_aarch64_st1x2v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
27775 }
27776
27777 __extension__ extern __inline void
27778 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27779 vst1_p64_x2 (poly64_t * __a, poly64x1x2_t __val)
27780 {
27781 __builtin_aarch64_simd_oi __o;
27782 poly64x2x2_t __temp;
27783 __temp.val[0] = vcombine_p64 (__val.val[0], vcreate_p64 (__AARCH64_UINT64_C (0)));
27784 __temp.val[1] = vcombine_p64 (__val.val[1], vcreate_p64 (__AARCH64_UINT64_C (0)));
27785 __o = __builtin_aarch64_set_qregoiv2di_ssps (__o,
27786 (poly64x2_t) __temp.val[0], 0);
27787 __o = __builtin_aarch64_set_qregoiv2di_ssps (__o,
27788 (poly64x2_t) __temp.val[1], 1);
27789 __builtin_aarch64_st1x2di ((__builtin_aarch64_simd_di *) __a, __o);
27790 }
27791
27792 __extension__ extern __inline void
27793 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27794 vst1q_s8_x2 (int8_t * __a, int8x16x2_t __val)
27795 {
27796 __builtin_aarch64_simd_oi __o;
27797 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __val.val[0], 0);
27798 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __val.val[1], 1);
27799 __builtin_aarch64_st1x2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
27800 }
27801
27802 __extension__ extern __inline void
27803 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27804 vst1q_p8_x2 (poly8_t * __a, poly8x16x2_t __val)
27805 {
27806 __builtin_aarch64_simd_oi __o;
27807 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __val.val[0], 0);
27808 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __val.val[1], 1);
27809 __builtin_aarch64_st1x2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
27810 }
27811
27812 __extension__ extern __inline void
27813 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27814 vst1q_s16_x2 (int16_t * __a, int16x8x2_t __val)
27815 {
27816 __builtin_aarch64_simd_oi __o;
27817 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __val.val[0], 0);
27818 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __val.val[1], 1);
27819 __builtin_aarch64_st1x2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
27820 }
27821
27822 __extension__ extern __inline void
27823 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27824 vst1q_p16_x2 (poly16_t * __a, poly16x8x2_t __val)
27825 {
27826 __builtin_aarch64_simd_oi __o;
27827 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __val.val[0], 0);
27828 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __val.val[1], 1);
27829 __builtin_aarch64_st1x2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
27830 }
27831
27832 __extension__ extern __inline void
27833 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27834 vst1q_s32_x2 (int32_t * __a, int32x4x2_t __val)
27835 {
27836 __builtin_aarch64_simd_oi __o;
27837 __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __val.val[0], 0);
27838 __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __val.val[1], 1);
27839 __builtin_aarch64_st1x2v4si ((__builtin_aarch64_simd_si *) __a, __o);
27840 }
27841
27842 __extension__ extern __inline void
27843 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27844 vst1q_s64_x2 (int64_t * __a, int64x2x2_t __val)
27845 {
27846 __builtin_aarch64_simd_oi __o;
27847 __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __val.val[0], 0);
27848 __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __val.val[1], 1);
27849 __builtin_aarch64_st1x2v2di ((__builtin_aarch64_simd_di *) __a, __o);
27850 }
27851
27852 __extension__ extern __inline void
27853 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27854 vst1q_u8_x2 (uint8_t * __a, uint8x16x2_t __val)
27855 {
27856 __builtin_aarch64_simd_oi __o;
27857 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __val.val[0], 0);
27858 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __val.val[1], 1);
27859 __builtin_aarch64_st1x2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
27860 }
27861
27862 __extension__ extern __inline void
27863 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27864 vst1q_u16_x2 (uint16_t * __a, uint16x8x2_t __val)
27865 {
27866 __builtin_aarch64_simd_oi __o;
27867 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __val.val[0], 0);
27868 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __val.val[1], 1);
27869 __builtin_aarch64_st1x2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
27870 }
27871
27872 __extension__ extern __inline void
27873 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27874 vst1q_u32_x2 (uint32_t * __a, uint32x4x2_t __val)
27875 {
27876 __builtin_aarch64_simd_oi __o;
27877 __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __val.val[0], 0);
27878 __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __val.val[1], 1);
27879 __builtin_aarch64_st1x2v4si ((__builtin_aarch64_simd_si *) __a, __o);
27880 }
27881
27882 __extension__ extern __inline void
27883 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27884 vst1q_u64_x2 (uint64_t * __a, uint64x2x2_t __val)
27885 {
27886 __builtin_aarch64_simd_oi __o;
27887 __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __val.val[0], 0);
27888 __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __val.val[1], 1);
27889 __builtin_aarch64_st1x2v2di ((__builtin_aarch64_simd_di *) __a, __o);
27890 }
27891
27892 __extension__ extern __inline void
27893 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27894 vst1q_f16_x2 (float16_t * __a, float16x8x2_t __val)
27895 {
27896 __builtin_aarch64_simd_oi __o;
27897 __o = __builtin_aarch64_set_qregoiv8hf (__o, __val.val[0], 0);
27898 __o = __builtin_aarch64_set_qregoiv8hf (__o, __val.val[1], 1);
27899 __builtin_aarch64_st1x2v8hf (__a, __o);
27900 }
27901
27902 __extension__ extern __inline void
27903 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27904 vst1q_f32_x2 (float32_t * __a, float32x4x2_t __val)
27905 {
27906 __builtin_aarch64_simd_oi __o;
27907 __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) __val.val[0], 0);
27908 __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) __val.val[1], 1);
27909 __builtin_aarch64_st1x2v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
27910 }
27911
27912 __extension__ extern __inline void
27913 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27914 vst1q_f64_x2 (float64_t * __a, float64x2x2_t __val)
27915 {
27916 __builtin_aarch64_simd_oi __o;
27917 __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) __val.val[0], 0);
27918 __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) __val.val[1], 1);
27919 __builtin_aarch64_st1x2v2df ((__builtin_aarch64_simd_df *) __a, __o);
27920 }
27921
27922 __extension__ extern __inline void
27923 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27924 vst1q_p64_x2 (poly64_t * __a, poly64x2x2_t __val)
27925 {
27926 __builtin_aarch64_simd_oi __o;
27927 __o = __builtin_aarch64_set_qregoiv2di_ssps (__o,
27928 (poly64x2_t) __val.val[0], 0);
27929 __o = __builtin_aarch64_set_qregoiv2di_ssps (__o,
27930 (poly64x2_t) __val.val[1], 1);
27931 __builtin_aarch64_st1x2v2di ((__builtin_aarch64_simd_di *) __a, __o);
27932 }
27933
27934 /* vst1x3 */
27935
27936 __extension__ extern __inline void
27937 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27938 vst1_s64_x3 (int64_t * __a, int64x1x3_t __val)
27939 {
27940 __builtin_aarch64_simd_ci __o;
27941 int64x2x3_t __temp;
27942 __temp.val[0] = vcombine_s64 (__val.val[0], vcreate_s64 (__AARCH64_INT64_C (0)));
27943 __temp.val[1] = vcombine_s64 (__val.val[1], vcreate_s64 (__AARCH64_INT64_C (0)));
27944 __temp.val[2] = vcombine_s64 (__val.val[2], vcreate_s64 (__AARCH64_INT64_C (0)));
27945 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[0], 0);
27946 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[1], 1);
27947 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[2], 2);
27948 __builtin_aarch64_st1x3di ((__builtin_aarch64_simd_di *) __a, __o);
27949 }
27950
27951 __extension__ extern __inline void
27952 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27953 vst1_u64_x3 (uint64_t * __a, uint64x1x3_t __val)
27954 {
27955 __builtin_aarch64_simd_ci __o;
27956 uint64x2x3_t __temp;
27957 __temp.val[0] = vcombine_u64 (__val.val[0], vcreate_u64 (__AARCH64_UINT64_C (0)));
27958 __temp.val[1] = vcombine_u64 (__val.val[1], vcreate_u64 (__AARCH64_UINT64_C (0)));
27959 __temp.val[2] = vcombine_u64 (__val.val[2], vcreate_u64 (__AARCH64_UINT64_C (0)));
27960 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[0], 0);
27961 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[1], 1);
27962 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[2], 2);
27963 __builtin_aarch64_st1x3di ((__builtin_aarch64_simd_di *) __a, __o);
27964 }
27965
27966 __extension__ extern __inline void
27967 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27968 vst1_f64_x3 (float64_t * __a, float64x1x3_t __val)
27969 {
27970 __builtin_aarch64_simd_ci __o;
27971 float64x2x3_t __temp;
27972 __temp.val[0] = vcombine_f64 (__val.val[0], vcreate_f64 (__AARCH64_UINT64_C (0)));
27973 __temp.val[1] = vcombine_f64 (__val.val[1], vcreate_f64 (__AARCH64_UINT64_C (0)));
27974 __temp.val[2] = vcombine_f64 (__val.val[2], vcreate_f64 (__AARCH64_UINT64_C (0)));
27975 __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) __temp.val[0], 0);
27976 __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) __temp.val[1], 1);
27977 __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) __temp.val[2], 2);
27978 __builtin_aarch64_st1x3df ((__builtin_aarch64_simd_df *) __a, __o);
27979 }
27980
27981 __extension__ extern __inline void
27982 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27983 vst1_s8_x3 (int8_t * __a, int8x8x3_t __val)
27984 {
27985 __builtin_aarch64_simd_ci __o;
27986 int8x16x3_t __temp;
27987 __temp.val[0] = vcombine_s8 (__val.val[0], vcreate_s8 (__AARCH64_INT64_C (0)));
27988 __temp.val[1] = vcombine_s8 (__val.val[1], vcreate_s8 (__AARCH64_INT64_C (0)));
27989 __temp.val[2] = vcombine_s8 (__val.val[2], vcreate_s8 (__AARCH64_INT64_C (0)));
27990 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[0], 0);
27991 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[1], 1);
27992 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[2], 2);
27993 __builtin_aarch64_st1x3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
27994 }
27995
27996 __extension__ extern __inline void
27997 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
27998 vst1_p8_x3 (poly8_t * __a, poly8x8x3_t __val)
27999 {
28000 __builtin_aarch64_simd_ci __o;
28001 poly8x16x3_t __temp;
28002 __temp.val[0] = vcombine_p8 (__val.val[0], vcreate_p8 (__AARCH64_UINT64_C (0)));
28003 __temp.val[1] = vcombine_p8 (__val.val[1], vcreate_p8 (__AARCH64_UINT64_C (0)));
28004 __temp.val[2] = vcombine_p8 (__val.val[2], vcreate_p8 (__AARCH64_UINT64_C (0)));
28005 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[0], 0);
28006 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[1], 1);
28007 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[2], 2);
28008 __builtin_aarch64_st1x3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
28009 }
28010
28011 __extension__ extern __inline void
28012 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28013 vst1_s16_x3 (int16_t * __a, int16x4x3_t __val)
28014 {
28015 __builtin_aarch64_simd_ci __o;
28016 int16x8x3_t __temp;
28017 __temp.val[0] = vcombine_s16 (__val.val[0], vcreate_s16 (__AARCH64_INT64_C (0)));
28018 __temp.val[1] = vcombine_s16 (__val.val[1], vcreate_s16 (__AARCH64_INT64_C (0)));
28019 __temp.val[2] = vcombine_s16 (__val.val[2], vcreate_s16 (__AARCH64_INT64_C (0)));
28020 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[0], 0);
28021 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[1], 1);
28022 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[2], 2);
28023 __builtin_aarch64_st1x3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
28024 }
28025
28026 __extension__ extern __inline void
28027 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28028 vst1_p16_x3 (poly16_t * __a, poly16x4x3_t __val)
28029 {
28030 __builtin_aarch64_simd_ci __o;
28031 poly16x8x3_t __temp;
28032 __temp.val[0] = vcombine_p16 (__val.val[0], vcreate_p16 (__AARCH64_UINT64_C (0)));
28033 __temp.val[1] = vcombine_p16 (__val.val[1], vcreate_p16 (__AARCH64_UINT64_C (0)));
28034 __temp.val[2] = vcombine_p16 (__val.val[2], vcreate_p16 (__AARCH64_UINT64_C (0)));
28035 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[0], 0);
28036 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[1], 1);
28037 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[2], 2);
28038 __builtin_aarch64_st1x3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
28039 }
28040
28041 __extension__ extern __inline void
28042 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28043 vst1_s32_x3 (int32_t * __a, int32x2x3_t __val)
28044 {
28045 __builtin_aarch64_simd_ci __o;
28046 int32x4x3_t __temp;
28047 __temp.val[0] = vcombine_s32 (__val.val[0], vcreate_s32 (__AARCH64_INT64_C (0)));
28048 __temp.val[1] = vcombine_s32 (__val.val[1], vcreate_s32 (__AARCH64_INT64_C (0)));
28049 __temp.val[2] = vcombine_s32 (__val.val[2], vcreate_s32 (__AARCH64_INT64_C (0)));
28050 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[0], 0);
28051 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[1], 1);
28052 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[2], 2);
28053 __builtin_aarch64_st1x3v2si ((__builtin_aarch64_simd_si *) __a, __o);
28054 }
28055
28056 __extension__ extern __inline void
28057 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28058 vst1_u8_x3 (uint8_t * __a, uint8x8x3_t __val)
28059 {
28060 __builtin_aarch64_simd_ci __o;
28061 uint8x16x3_t __temp;
28062 __temp.val[0] = vcombine_u8 (__val.val[0], vcreate_u8 (__AARCH64_UINT64_C (0)));
28063 __temp.val[1] = vcombine_u8 (__val.val[1], vcreate_u8 (__AARCH64_UINT64_C (0)));
28064 __temp.val[2] = vcombine_u8 (__val.val[2], vcreate_u8 (__AARCH64_UINT64_C (0)));
28065 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[0], 0);
28066 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[1], 1);
28067 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[2], 2);
28068 __builtin_aarch64_st1x3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
28069 }
28070
28071 __extension__ extern __inline void
28072 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28073 vst1_u16_x3 (uint16_t * __a, uint16x4x3_t __val)
28074 {
28075 __builtin_aarch64_simd_ci __o;
28076 uint16x8x3_t __temp;
28077 __temp.val[0] = vcombine_u16 (__val.val[0], vcreate_u16 (__AARCH64_UINT64_C (0)));
28078 __temp.val[1] = vcombine_u16 (__val.val[1], vcreate_u16 (__AARCH64_UINT64_C (0)));
28079 __temp.val[2] = vcombine_u16 (__val.val[2], vcreate_u16 (__AARCH64_UINT64_C (0)));
28080 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[0], 0);
28081 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[1], 1);
28082 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[2], 2);
28083 __builtin_aarch64_st1x3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
28084 }
28085
28086 __extension__ extern __inline void
28087 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28088 vst1_u32_x3 (uint32_t * __a, uint32x2x3_t __val)
28089 {
28090 __builtin_aarch64_simd_ci __o;
28091 uint32x4x3_t __temp;
28092 __temp.val[0] = vcombine_u32 (__val.val[0], vcreate_u32 (__AARCH64_UINT64_C (0)));
28093 __temp.val[1] = vcombine_u32 (__val.val[1], vcreate_u32 (__AARCH64_UINT64_C (0)));
28094 __temp.val[2] = vcombine_u32 (__val.val[2], vcreate_u32 (__AARCH64_UINT64_C (0)));
28095 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[0], 0);
28096 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[1], 1);
28097 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[2], 2);
28098 __builtin_aarch64_st1x3v2si ((__builtin_aarch64_simd_si *) __a, __o);
28099 }
28100
28101 __extension__ extern __inline void
28102 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28103 vst1_f16_x3 (float16_t * __a, float16x4x3_t __val)
28104 {
28105 __builtin_aarch64_simd_ci __o;
28106 float16x8x3_t __temp;
28107 __temp.val[0] = vcombine_f16 (__val.val[0], vcreate_f16 (__AARCH64_UINT64_C (0)));
28108 __temp.val[1] = vcombine_f16 (__val.val[1], vcreate_f16 (__AARCH64_UINT64_C (0)));
28109 __temp.val[2] = vcombine_f16 (__val.val[2], vcreate_f16 (__AARCH64_UINT64_C (0)));
28110 __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) __temp.val[0], 0);
28111 __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) __temp.val[1], 1);
28112 __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) __temp.val[2], 2);
28113 __builtin_aarch64_st1x3v4hf ((__builtin_aarch64_simd_hf *) __a, __o);
28114 }
28115
28116 __extension__ extern __inline void
28117 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28118 vst1_f32_x3 (float32_t * __a, float32x2x3_t __val)
28119 {
28120 __builtin_aarch64_simd_ci __o;
28121 float32x4x3_t __temp;
28122 __temp.val[0] = vcombine_f32 (__val.val[0], vcreate_f32 (__AARCH64_UINT64_C (0)));
28123 __temp.val[1] = vcombine_f32 (__val.val[1], vcreate_f32 (__AARCH64_UINT64_C (0)));
28124 __temp.val[2] = vcombine_f32 (__val.val[2], vcreate_f32 (__AARCH64_UINT64_C (0)));
28125 __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) __temp.val[0], 0);
28126 __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) __temp.val[1], 1);
28127 __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) __temp.val[2], 2);
28128 __builtin_aarch64_st1x3v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
28129 }
28130
28131 __extension__ extern __inline void
28132 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28133 vst1_p64_x3 (poly64_t * __a, poly64x1x3_t __val)
28134 {
28135 __builtin_aarch64_simd_ci __o;
28136 poly64x2x3_t __temp;
28137 __temp.val[0] = vcombine_p64 (__val.val[0], vcreate_p64 (__AARCH64_UINT64_C (0)));
28138 __temp.val[1] = vcombine_p64 (__val.val[1], vcreate_p64 (__AARCH64_UINT64_C (0)));
28139 __temp.val[2] = vcombine_p64 (__val.val[2], vcreate_p64 (__AARCH64_UINT64_C (0)));
28140 __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
28141 (poly64x2_t) __temp.val[0], 0);
28142 __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
28143 (poly64x2_t) __temp.val[1], 1);
28144 __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
28145 (poly64x2_t) __temp.val[2], 2);
28146 __builtin_aarch64_st1x3di ((__builtin_aarch64_simd_di *) __a, __o);
28147 }
28148
28149 __extension__ extern __inline void
28150 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28151 vst1q_s8_x3 (int8_t * __a, int8x16x3_t __val)
28152 {
28153 __builtin_aarch64_simd_ci __o;
28154 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __val.val[0], 0);
28155 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __val.val[1], 1);
28156 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __val.val[2], 2);
28157 __builtin_aarch64_st1x3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
28158 }
28159
28160 __extension__ extern __inline void
28161 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28162 vst1q_p8_x3 (poly8_t * __a, poly8x16x3_t __val)
28163 {
28164 __builtin_aarch64_simd_ci __o;
28165 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __val.val[0], 0);
28166 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __val.val[1], 1);
28167 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __val.val[2], 2);
28168 __builtin_aarch64_st1x3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
28169 }
28170
28171 __extension__ extern __inline void
28172 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28173 vst1q_s16_x3 (int16_t * __a, int16x8x3_t __val)
28174 {
28175 __builtin_aarch64_simd_ci __o;
28176 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __val.val[0], 0);
28177 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __val.val[1], 1);
28178 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __val.val[2], 2);
28179 __builtin_aarch64_st1x3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
28180 }
28181
28182 __extension__ extern __inline void
28183 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28184 vst1q_p16_x3 (poly16_t * __a, poly16x8x3_t __val)
28185 {
28186 __builtin_aarch64_simd_ci __o;
28187 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __val.val[0], 0);
28188 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __val.val[1], 1);
28189 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __val.val[2], 2);
28190 __builtin_aarch64_st1x3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
28191 }
28192
28193 __extension__ extern __inline void
28194 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28195 vst1q_s32_x3 (int32_t * __a, int32x4x3_t __val)
28196 {
28197 __builtin_aarch64_simd_ci __o;
28198 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __val.val[0], 0);
28199 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __val.val[1], 1);
28200 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __val.val[2], 2);
28201 __builtin_aarch64_st1x3v4si ((__builtin_aarch64_simd_si *) __a, __o);
28202 }
28203
28204 __extension__ extern __inline void
28205 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28206 vst1q_s64_x3 (int64_t * __a, int64x2x3_t __val)
28207 {
28208 __builtin_aarch64_simd_ci __o;
28209 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __val.val[0], 0);
28210 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __val.val[1], 1);
28211 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __val.val[2], 2);
28212 __builtin_aarch64_st1x3v2di ((__builtin_aarch64_simd_di *) __a, __o);
28213 }
28214
28215 __extension__ extern __inline void
28216 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28217 vst1q_u8_x3 (uint8_t * __a, uint8x16x3_t __val)
28218 {
28219 __builtin_aarch64_simd_ci __o;
28220 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __val.val[0], 0);
28221 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __val.val[1], 1);
28222 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __val.val[2], 2);
28223 __builtin_aarch64_st1x3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
28224 }
28225
28226 __extension__ extern __inline void
28227 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28228 vst1q_u16_x3 (uint16_t * __a, uint16x8x3_t __val)
28229 {
28230 __builtin_aarch64_simd_ci __o;
28231 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __val.val[0], 0);
28232 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __val.val[1], 1);
28233 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __val.val[2], 2);
28234 __builtin_aarch64_st1x3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
28235 }
28236
28237 __extension__ extern __inline void
28238 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28239 vst1q_u32_x3 (uint32_t * __a, uint32x4x3_t __val)
28240 {
28241 __builtin_aarch64_simd_ci __o;
28242 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __val.val[0], 0);
28243 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __val.val[1], 1);
28244 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __val.val[2], 2);
28245 __builtin_aarch64_st1x3v4si ((__builtin_aarch64_simd_si *) __a, __o);
28246 }
28247
28248 __extension__ extern __inline void
28249 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28250 vst1q_u64_x3 (uint64_t * __a, uint64x2x3_t __val)
28251 {
28252 __builtin_aarch64_simd_ci __o;
28253 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __val.val[0], 0);
28254 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __val.val[1], 1);
28255 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __val.val[2], 2);
28256 __builtin_aarch64_st1x3v2di ((__builtin_aarch64_simd_di *) __a, __o);
28257 }
28258
28259 __extension__ extern __inline void
28260 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28261 vst1q_f16_x3 (float16_t * __a, float16x8x3_t __val)
28262 {
28263 __builtin_aarch64_simd_ci __o;
28264 __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) __val.val[0], 0);
28265 __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) __val.val[1], 1);
28266 __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) __val.val[2], 2);
28267 __builtin_aarch64_st1x3v8hf ((__builtin_aarch64_simd_hf *) __a, __o);
28268 }
28269
28270 __extension__ extern __inline void
28271 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28272 vst1q_f32_x3 (float32_t * __a, float32x4x3_t __val)
28273 {
28274 __builtin_aarch64_simd_ci __o;
28275 __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) __val.val[0], 0);
28276 __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) __val.val[1], 1);
28277 __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) __val.val[2], 2);
28278 __builtin_aarch64_st1x3v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
28279 }
28280
28281 __extension__ extern __inline void
28282 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28283 vst1q_f64_x3 (float64_t * __a, float64x2x3_t __val)
28284 {
28285 __builtin_aarch64_simd_ci __o;
28286 __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) __val.val[0], 0);
28287 __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) __val.val[1], 1);
28288 __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) __val.val[2], 2);
28289 __builtin_aarch64_st1x3v2df ((__builtin_aarch64_simd_df *) __a, __o);
28290 }
28291
28292 __extension__ extern __inline void
28293 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28294 vst1q_p64_x3 (poly64_t * __a, poly64x2x3_t __val)
28295 {
28296 __builtin_aarch64_simd_ci __o;
28297 __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
28298 (poly64x2_t) __val.val[0], 0);
28299 __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
28300 (poly64x2_t) __val.val[1], 1);
28301 __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
28302 (poly64x2_t) __val.val[2], 2);
28303 __builtin_aarch64_st1x3v2di ((__builtin_aarch64_simd_di *) __a, __o);
28304 }
28305
28306 /* vst1(q)_x4. */
28307
28308 __extension__ extern __inline void
28309 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28310 vst1_s8_x4 (int8_t * __a, int8x8x4_t val)
28311 {
28312 union { int8x8x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28313 __builtin_aarch64_st1x4v8qi ((__builtin_aarch64_simd_qi *) __a, __u.__o);
28314 }
28315
28316 __extension__ extern __inline void
28317 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28318 vst1q_s8_x4 (int8_t * __a, int8x16x4_t val)
28319 {
28320 union { int8x16x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28321 __builtin_aarch64_st1x4v16qi ((__builtin_aarch64_simd_qi *) __a, __u.__o);
28322 }
28323
28324 __extension__ extern __inline void
28325 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28326 vst1_s16_x4 (int16_t * __a, int16x4x4_t val)
28327 {
28328 union { int16x4x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28329 __builtin_aarch64_st1x4v4hi ((__builtin_aarch64_simd_hi *) __a, __u.__o);
28330 }
28331
28332 __extension__ extern __inline void
28333 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28334 vst1q_s16_x4 (int16_t * __a, int16x8x4_t val)
28335 {
28336 union { int16x8x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28337 __builtin_aarch64_st1x4v8hi ((__builtin_aarch64_simd_hi *) __a, __u.__o);
28338 }
28339
28340 __extension__ extern __inline void
28341 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28342 vst1_s32_x4 (int32_t * __a, int32x2x4_t val)
28343 {
28344 union { int32x2x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28345 __builtin_aarch64_st1x4v2si ((__builtin_aarch64_simd_si *) __a, __u.__o);
28346 }
28347
28348 __extension__ extern __inline void
28349 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28350 vst1q_s32_x4 (int32_t * __a, int32x4x4_t val)
28351 {
28352 union { int32x4x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28353 __builtin_aarch64_st1x4v4si ((__builtin_aarch64_simd_si *) __a, __u.__o);
28354 }
28355
28356 __extension__ extern __inline void
28357 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28358 vst1_u8_x4 (uint8_t * __a, uint8x8x4_t val)
28359 {
28360 union { uint8x8x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28361 __builtin_aarch64_st1x4v8qi ((__builtin_aarch64_simd_qi *) __a, __u.__o);
28362 }
28363
28364 __extension__ extern __inline void
28365 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28366 vst1q_u8_x4 (uint8_t * __a, uint8x16x4_t val)
28367 {
28368 union { uint8x16x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28369 __builtin_aarch64_st1x4v16qi ((__builtin_aarch64_simd_qi *) __a, __u.__o);
28370 }
28371
28372 __extension__ extern __inline void
28373 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28374 vst1_u16_x4 (uint16_t * __a, uint16x4x4_t val)
28375 {
28376 union { uint16x4x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28377 __builtin_aarch64_st1x4v4hi ((__builtin_aarch64_simd_hi *) __a, __u.__o);
28378 }
28379
28380 __extension__ extern __inline void
28381 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28382 vst1q_u16_x4 (uint16_t * __a, uint16x8x4_t val)
28383 {
28384 union { uint16x8x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28385 __builtin_aarch64_st1x4v8hi ((__builtin_aarch64_simd_hi *) __a, __u.__o);
28386 }
28387
28388 __extension__ extern __inline void
28389 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28390 vst1_u32_x4 (uint32_t * __a, uint32x2x4_t val)
28391 {
28392 union { uint32x2x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28393 __builtin_aarch64_st1x4v2si ((__builtin_aarch64_simd_si *) __a, __u.__o);
28394 }
28395
28396 __extension__ extern __inline void
28397 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28398 vst1q_u32_x4 (uint32_t * __a, uint32x4x4_t val)
28399 {
28400 union { uint32x4x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28401 __builtin_aarch64_st1x4v4si ((__builtin_aarch64_simd_si *) __a, __u.__o);
28402 }
28403
28404 __extension__ extern __inline void
28405 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28406 vst1_f16_x4 (float16_t * __a, float16x4x4_t val)
28407 {
28408 union { float16x4x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28409 __builtin_aarch64_st1x4v4hf ((__builtin_aarch64_simd_hf *) __a, __u.__o);
28410 }
28411
28412 __extension__ extern __inline void
28413 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28414 vst1q_f16_x4 (float16_t * __a, float16x8x4_t val)
28415 {
28416 union { float16x8x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28417 __builtin_aarch64_st1x4v8hf ((__builtin_aarch64_simd_hf *) __a, __u.__o);
28418 }
28419
28420 __extension__ extern __inline void
28421 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28422 vst1_f32_x4 (float32_t * __a, float32x2x4_t val)
28423 {
28424 union { float32x2x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28425 __builtin_aarch64_st1x4v2sf ((__builtin_aarch64_simd_sf *) __a, __u.__o);
28426 }
28427
28428 __extension__ extern __inline void
28429 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28430 vst1q_f32_x4 (float32_t * __a, float32x4x4_t val)
28431 {
28432 union { float32x4x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28433 __builtin_aarch64_st1x4v4sf ((__builtin_aarch64_simd_sf *) __a, __u.__o);
28434 }
28435
28436 __extension__ extern __inline void
28437 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28438 vst1_p8_x4 (poly8_t * __a, poly8x8x4_t val)
28439 {
28440 union { poly8x8x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28441 __builtin_aarch64_st1x4v8qi ((__builtin_aarch64_simd_qi *) __a, __u.__o);
28442 }
28443
28444 __extension__ extern __inline void
28445 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28446 vst1q_p8_x4 (poly8_t * __a, poly8x16x4_t val)
28447 {
28448 union { poly8x16x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28449 __builtin_aarch64_st1x4v16qi ((__builtin_aarch64_simd_qi *) __a, __u.__o);
28450 }
28451
28452 __extension__ extern __inline void
28453 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28454 vst1_p16_x4 (poly16_t * __a, poly16x4x4_t val)
28455 {
28456 union { poly16x4x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28457 __builtin_aarch64_st1x4v4hi ((__builtin_aarch64_simd_hi *) __a, __u.__o);
28458 }
28459
28460 __extension__ extern __inline void
28461 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28462 vst1q_p16_x4 (poly16_t * __a, poly16x8x4_t val)
28463 {
28464 union { poly16x8x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28465 __builtin_aarch64_st1x4v8hi ((__builtin_aarch64_simd_hi *) __a, __u.__o);
28466 }
28467
28468 __extension__ extern __inline void
28469 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28470 vst1_s64_x4 (int64_t * __a, int64x1x4_t val)
28471 {
28472 union { int64x1x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28473 __builtin_aarch64_st1x4di ((__builtin_aarch64_simd_di *) __a, __u.__o);
28474 }
28475
28476 __extension__ extern __inline void
28477 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28478 vst1_u64_x4 (uint64_t * __a, uint64x1x4_t val)
28479 {
28480 union { uint64x1x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28481 __builtin_aarch64_st1x4di ((__builtin_aarch64_simd_di *) __a, __u.__o);
28482 }
28483
28484 __extension__ extern __inline void
28485 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28486 vst1_p64_x4 (poly64_t * __a, poly64x1x4_t val)
28487 {
28488 union { poly64x1x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28489 __builtin_aarch64_st1x4di ((__builtin_aarch64_simd_di *) __a, __u.__o);
28490 }
28491
28492 __extension__ extern __inline void
28493 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28494 vst1q_s64_x4 (int64_t * __a, int64x2x4_t val)
28495 {
28496 union { int64x2x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28497 __builtin_aarch64_st1x4v2di ((__builtin_aarch64_simd_di *) __a, __u.__o);
28498 }
28499
28500 __extension__ extern __inline void
28501 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28502 vst1q_u64_x4 (uint64_t * __a, uint64x2x4_t val)
28503 {
28504 union { uint64x2x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28505 __builtin_aarch64_st1x4v2di ((__builtin_aarch64_simd_di *) __a, __u.__o);
28506 }
28507
28508 __extension__ extern __inline void
28509 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28510 vst1q_p64_x4 (poly64_t * __a, poly64x2x4_t val)
28511 {
28512 union { poly64x2x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28513 __builtin_aarch64_st1x4v2di ((__builtin_aarch64_simd_di *) __a, __u.__o);
28514 }
28515
28516 __extension__ extern __inline void
28517 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28518 vst1_f64_x4 (float64_t * __a, float64x1x4_t val)
28519 {
28520 union { float64x1x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28521 __builtin_aarch64_st1x4df ((__builtin_aarch64_simd_df *) __a, __u.__o);
28522 }
28523
28524 __extension__ extern __inline void
28525 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28526 vst1q_f64_x4 (float64_t * __a, float64x2x4_t val)
28527 {
28528 union { float64x2x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
28529 __builtin_aarch64_st1x4v2df ((__builtin_aarch64_simd_df *) __a, __u.__o);
28530 }
28531
28532 /* vstn */
28533
28534 __extension__ extern __inline void
28535 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28536 vst2_s64 (int64_t * __a, int64x1x2_t __val)
28537 {
28538 __builtin_aarch64_simd_oi __o;
28539 int64x2x2_t __temp;
28540 __temp.val[0] = vcombine_s64 (__val.val[0], vcreate_s64 (__AARCH64_INT64_C (0)));
28541 __temp.val[1] = vcombine_s64 (__val.val[1], vcreate_s64 (__AARCH64_INT64_C (0)));
28542 __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[0], 0);
28543 __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[1], 1);
28544 __builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o);
28545 }
28546
28547 __extension__ extern __inline void
28548 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28549 vst2_u64 (uint64_t * __a, uint64x1x2_t __val)
28550 {
28551 __builtin_aarch64_simd_oi __o;
28552 uint64x2x2_t __temp;
28553 __temp.val[0] = vcombine_u64 (__val.val[0], vcreate_u64 (__AARCH64_UINT64_C (0)));
28554 __temp.val[1] = vcombine_u64 (__val.val[1], vcreate_u64 (__AARCH64_UINT64_C (0)));
28555 __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[0], 0);
28556 __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __temp.val[1], 1);
28557 __builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o);
28558 }
28559
28560 __extension__ extern __inline void
28561 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28562 vst2_f64 (float64_t * __a, float64x1x2_t __val)
28563 {
28564 __builtin_aarch64_simd_oi __o;
28565 float64x2x2_t __temp;
28566 __temp.val[0] = vcombine_f64 (__val.val[0], vcreate_f64 (__AARCH64_UINT64_C (0)));
28567 __temp.val[1] = vcombine_f64 (__val.val[1], vcreate_f64 (__AARCH64_UINT64_C (0)));
28568 __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) __temp.val[0], 0);
28569 __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) __temp.val[1], 1);
28570 __builtin_aarch64_st2df ((__builtin_aarch64_simd_df *) __a, __o);
28571 }
28572
28573 __extension__ extern __inline void
28574 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28575 vst2_s8 (int8_t * __a, int8x8x2_t __val)
28576 {
28577 __builtin_aarch64_simd_oi __o;
28578 int8x16x2_t __temp;
28579 __temp.val[0] = vcombine_s8 (__val.val[0], vcreate_s8 (__AARCH64_INT64_C (0)));
28580 __temp.val[1] = vcombine_s8 (__val.val[1], vcreate_s8 (__AARCH64_INT64_C (0)));
28581 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[0], 0);
28582 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[1], 1);
28583 __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
28584 }
28585
28586 __extension__ extern __inline void
28587 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28588 vst2_p8 (poly8_t * __a, poly8x8x2_t __val)
28589 {
28590 __builtin_aarch64_simd_oi __o;
28591 poly8x16x2_t __temp;
28592 __temp.val[0] = vcombine_p8 (__val.val[0], vcreate_p8 (__AARCH64_UINT64_C (0)));
28593 __temp.val[1] = vcombine_p8 (__val.val[1], vcreate_p8 (__AARCH64_UINT64_C (0)));
28594 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[0], 0);
28595 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[1], 1);
28596 __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
28597 }
28598
28599 __extension__ extern __inline void
28600 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28601 vst2_s16 (int16_t * __a, int16x4x2_t __val)
28602 {
28603 __builtin_aarch64_simd_oi __o;
28604 int16x8x2_t __temp;
28605 __temp.val[0] = vcombine_s16 (__val.val[0], vcreate_s16 (__AARCH64_INT64_C (0)));
28606 __temp.val[1] = vcombine_s16 (__val.val[1], vcreate_s16 (__AARCH64_INT64_C (0)));
28607 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[0], 0);
28608 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[1], 1);
28609 __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
28610 }
28611
28612 __extension__ extern __inline void
28613 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28614 vst2_p16 (poly16_t * __a, poly16x4x2_t __val)
28615 {
28616 __builtin_aarch64_simd_oi __o;
28617 poly16x8x2_t __temp;
28618 __temp.val[0] = vcombine_p16 (__val.val[0], vcreate_p16 (__AARCH64_UINT64_C (0)));
28619 __temp.val[1] = vcombine_p16 (__val.val[1], vcreate_p16 (__AARCH64_UINT64_C (0)));
28620 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[0], 0);
28621 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[1], 1);
28622 __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
28623 }
28624
28625 __extension__ extern __inline void
28626 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28627 vst2_s32 (int32_t * __a, int32x2x2_t __val)
28628 {
28629 __builtin_aarch64_simd_oi __o;
28630 int32x4x2_t __temp;
28631 __temp.val[0] = vcombine_s32 (__val.val[0], vcreate_s32 (__AARCH64_INT64_C (0)));
28632 __temp.val[1] = vcombine_s32 (__val.val[1], vcreate_s32 (__AARCH64_INT64_C (0)));
28633 __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[0], 0);
28634 __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[1], 1);
28635 __builtin_aarch64_st2v2si ((__builtin_aarch64_simd_si *) __a, __o);
28636 }
28637
28638 __extension__ extern __inline void
28639 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28640 vst2_u8 (uint8_t * __a, uint8x8x2_t __val)
28641 {
28642 __builtin_aarch64_simd_oi __o;
28643 uint8x16x2_t __temp;
28644 __temp.val[0] = vcombine_u8 (__val.val[0], vcreate_u8 (__AARCH64_UINT64_C (0)));
28645 __temp.val[1] = vcombine_u8 (__val.val[1], vcreate_u8 (__AARCH64_UINT64_C (0)));
28646 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[0], 0);
28647 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __temp.val[1], 1);
28648 __builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
28649 }
28650
28651 __extension__ extern __inline void
28652 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28653 vst2_u16 (uint16_t * __a, uint16x4x2_t __val)
28654 {
28655 __builtin_aarch64_simd_oi __o;
28656 uint16x8x2_t __temp;
28657 __temp.val[0] = vcombine_u16 (__val.val[0], vcreate_u16 (__AARCH64_UINT64_C (0)));
28658 __temp.val[1] = vcombine_u16 (__val.val[1], vcreate_u16 (__AARCH64_UINT64_C (0)));
28659 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[0], 0);
28660 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __temp.val[1], 1);
28661 __builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
28662 }
28663
28664 __extension__ extern __inline void
28665 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28666 vst2_u32 (uint32_t * __a, uint32x2x2_t __val)
28667 {
28668 __builtin_aarch64_simd_oi __o;
28669 uint32x4x2_t __temp;
28670 __temp.val[0] = vcombine_u32 (__val.val[0], vcreate_u32 (__AARCH64_UINT64_C (0)));
28671 __temp.val[1] = vcombine_u32 (__val.val[1], vcreate_u32 (__AARCH64_UINT64_C (0)));
28672 __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[0], 0);
28673 __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __temp.val[1], 1);
28674 __builtin_aarch64_st2v2si ((__builtin_aarch64_simd_si *) __a, __o);
28675 }
28676
28677 __extension__ extern __inline void
28678 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28679 vst2_f16 (float16_t * __a, float16x4x2_t __val)
28680 {
28681 __builtin_aarch64_simd_oi __o;
28682 float16x8x2_t __temp;
28683 __temp.val[0] = vcombine_f16 (__val.val[0], vcreate_f16 (__AARCH64_UINT64_C (0)));
28684 __temp.val[1] = vcombine_f16 (__val.val[1], vcreate_f16 (__AARCH64_UINT64_C (0)));
28685 __o = __builtin_aarch64_set_qregoiv8hf (__o, __temp.val[0], 0);
28686 __o = __builtin_aarch64_set_qregoiv8hf (__o, __temp.val[1], 1);
28687 __builtin_aarch64_st2v4hf (__a, __o);
28688 }
28689
28690 __extension__ extern __inline void
28691 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28692 vst2_f32 (float32_t * __a, float32x2x2_t __val)
28693 {
28694 __builtin_aarch64_simd_oi __o;
28695 float32x4x2_t __temp;
28696 __temp.val[0] = vcombine_f32 (__val.val[0], vcreate_f32 (__AARCH64_UINT64_C (0)));
28697 __temp.val[1] = vcombine_f32 (__val.val[1], vcreate_f32 (__AARCH64_UINT64_C (0)));
28698 __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) __temp.val[0], 0);
28699 __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) __temp.val[1], 1);
28700 __builtin_aarch64_st2v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
28701 }
28702
28703 __extension__ extern __inline void
28704 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28705 vst2_p64 (poly64_t * __a, poly64x1x2_t __val)
28706 {
28707 __builtin_aarch64_simd_oi __o;
28708 poly64x2x2_t __temp;
28709 __temp.val[0] = vcombine_p64 (__val.val[0], vcreate_p64 (__AARCH64_UINT64_C (0)));
28710 __temp.val[1] = vcombine_p64 (__val.val[1], vcreate_p64 (__AARCH64_UINT64_C (0)));
28711 __o = __builtin_aarch64_set_qregoiv2di_ssps (__o,
28712 (poly64x2_t) __temp.val[0], 0);
28713 __o = __builtin_aarch64_set_qregoiv2di_ssps (__o,
28714 (poly64x2_t) __temp.val[1], 1);
28715 __builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o);
28716 }
28717
28718 __extension__ extern __inline void
28719 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28720 vst2q_s8 (int8_t * __a, int8x16x2_t __val)
28721 {
28722 __builtin_aarch64_simd_oi __o;
28723 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __val.val[0], 0);
28724 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __val.val[1], 1);
28725 __builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
28726 }
28727
28728 __extension__ extern __inline void
28729 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28730 vst2q_p8 (poly8_t * __a, poly8x16x2_t __val)
28731 {
28732 __builtin_aarch64_simd_oi __o;
28733 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __val.val[0], 0);
28734 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __val.val[1], 1);
28735 __builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
28736 }
28737
28738 __extension__ extern __inline void
28739 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28740 vst2q_s16 (int16_t * __a, int16x8x2_t __val)
28741 {
28742 __builtin_aarch64_simd_oi __o;
28743 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __val.val[0], 0);
28744 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __val.val[1], 1);
28745 __builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
28746 }
28747
28748 __extension__ extern __inline void
28749 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28750 vst2q_p16 (poly16_t * __a, poly16x8x2_t __val)
28751 {
28752 __builtin_aarch64_simd_oi __o;
28753 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __val.val[0], 0);
28754 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __val.val[1], 1);
28755 __builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
28756 }
28757
28758 __extension__ extern __inline void
28759 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28760 vst2q_s32 (int32_t * __a, int32x4x2_t __val)
28761 {
28762 __builtin_aarch64_simd_oi __o;
28763 __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __val.val[0], 0);
28764 __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __val.val[1], 1);
28765 __builtin_aarch64_st2v4si ((__builtin_aarch64_simd_si *) __a, __o);
28766 }
28767
28768 __extension__ extern __inline void
28769 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28770 vst2q_s64 (int64_t * __a, int64x2x2_t __val)
28771 {
28772 __builtin_aarch64_simd_oi __o;
28773 __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __val.val[0], 0);
28774 __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __val.val[1], 1);
28775 __builtin_aarch64_st2v2di ((__builtin_aarch64_simd_di *) __a, __o);
28776 }
28777
28778 __extension__ extern __inline void
28779 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28780 vst2q_u8 (uint8_t * __a, uint8x16x2_t __val)
28781 {
28782 __builtin_aarch64_simd_oi __o;
28783 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __val.val[0], 0);
28784 __o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) __val.val[1], 1);
28785 __builtin_aarch64_st2v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
28786 }
28787
28788 __extension__ extern __inline void
28789 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28790 vst2q_u16 (uint16_t * __a, uint16x8x2_t __val)
28791 {
28792 __builtin_aarch64_simd_oi __o;
28793 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __val.val[0], 0);
28794 __o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) __val.val[1], 1);
28795 __builtin_aarch64_st2v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
28796 }
28797
28798 __extension__ extern __inline void
28799 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28800 vst2q_u32 (uint32_t * __a, uint32x4x2_t __val)
28801 {
28802 __builtin_aarch64_simd_oi __o;
28803 __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __val.val[0], 0);
28804 __o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) __val.val[1], 1);
28805 __builtin_aarch64_st2v4si ((__builtin_aarch64_simd_si *) __a, __o);
28806 }
28807
28808 __extension__ extern __inline void
28809 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28810 vst2q_u64 (uint64_t * __a, uint64x2x2_t __val)
28811 {
28812 __builtin_aarch64_simd_oi __o;
28813 __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __val.val[0], 0);
28814 __o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) __val.val[1], 1);
28815 __builtin_aarch64_st2v2di ((__builtin_aarch64_simd_di *) __a, __o);
28816 }
28817
28818 __extension__ extern __inline void
28819 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28820 vst2q_f16 (float16_t * __a, float16x8x2_t __val)
28821 {
28822 __builtin_aarch64_simd_oi __o;
28823 __o = __builtin_aarch64_set_qregoiv8hf (__o, __val.val[0], 0);
28824 __o = __builtin_aarch64_set_qregoiv8hf (__o, __val.val[1], 1);
28825 __builtin_aarch64_st2v8hf (__a, __o);
28826 }
28827
28828 __extension__ extern __inline void
28829 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28830 vst2q_f32 (float32_t * __a, float32x4x2_t __val)
28831 {
28832 __builtin_aarch64_simd_oi __o;
28833 __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) __val.val[0], 0);
28834 __o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) __val.val[1], 1);
28835 __builtin_aarch64_st2v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
28836 }
28837
28838 __extension__ extern __inline void
28839 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28840 vst2q_f64 (float64_t * __a, float64x2x2_t __val)
28841 {
28842 __builtin_aarch64_simd_oi __o;
28843 __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) __val.val[0], 0);
28844 __o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) __val.val[1], 1);
28845 __builtin_aarch64_st2v2df ((__builtin_aarch64_simd_df *) __a, __o);
28846 }
28847
28848 __extension__ extern __inline void
28849 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28850 vst2q_p64 (poly64_t * __a, poly64x2x2_t __val)
28851 {
28852 __builtin_aarch64_simd_oi __o;
28853 __o = __builtin_aarch64_set_qregoiv2di_ssps (__o,
28854 (poly64x2_t) __val.val[0], 0);
28855 __o = __builtin_aarch64_set_qregoiv2di_ssps (__o,
28856 (poly64x2_t) __val.val[1], 1);
28857 __builtin_aarch64_st2v2di ((__builtin_aarch64_simd_di *) __a, __o);
28858 }
28859
28860 __extension__ extern __inline void
28861 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28862 vst3_s64 (int64_t * __a, int64x1x3_t __val)
28863 {
28864 __builtin_aarch64_simd_ci __o;
28865 int64x2x3_t __temp;
28866 __temp.val[0] = vcombine_s64 (__val.val[0], vcreate_s64 (__AARCH64_INT64_C (0)));
28867 __temp.val[1] = vcombine_s64 (__val.val[1], vcreate_s64 (__AARCH64_INT64_C (0)));
28868 __temp.val[2] = vcombine_s64 (__val.val[2], vcreate_s64 (__AARCH64_INT64_C (0)));
28869 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[0], 0);
28870 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[1], 1);
28871 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[2], 2);
28872 __builtin_aarch64_st3di ((__builtin_aarch64_simd_di *) __a, __o);
28873 }
28874
28875 __extension__ extern __inline void
28876 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28877 vst3_u64 (uint64_t * __a, uint64x1x3_t __val)
28878 {
28879 __builtin_aarch64_simd_ci __o;
28880 uint64x2x3_t __temp;
28881 __temp.val[0] = vcombine_u64 (__val.val[0], vcreate_u64 (__AARCH64_UINT64_C (0)));
28882 __temp.val[1] = vcombine_u64 (__val.val[1], vcreate_u64 (__AARCH64_UINT64_C (0)));
28883 __temp.val[2] = vcombine_u64 (__val.val[2], vcreate_u64 (__AARCH64_UINT64_C (0)));
28884 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[0], 0);
28885 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[1], 1);
28886 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __temp.val[2], 2);
28887 __builtin_aarch64_st3di ((__builtin_aarch64_simd_di *) __a, __o);
28888 }
28889
28890 __extension__ extern __inline void
28891 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28892 vst3_f64 (float64_t * __a, float64x1x3_t __val)
28893 {
28894 __builtin_aarch64_simd_ci __o;
28895 float64x2x3_t __temp;
28896 __temp.val[0] = vcombine_f64 (__val.val[0], vcreate_f64 (__AARCH64_UINT64_C (0)));
28897 __temp.val[1] = vcombine_f64 (__val.val[1], vcreate_f64 (__AARCH64_UINT64_C (0)));
28898 __temp.val[2] = vcombine_f64 (__val.val[2], vcreate_f64 (__AARCH64_UINT64_C (0)));
28899 __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) __temp.val[0], 0);
28900 __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) __temp.val[1], 1);
28901 __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) __temp.val[2], 2);
28902 __builtin_aarch64_st3df ((__builtin_aarch64_simd_df *) __a, __o);
28903 }
28904
28905 __extension__ extern __inline void
28906 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28907 vst3_s8 (int8_t * __a, int8x8x3_t __val)
28908 {
28909 __builtin_aarch64_simd_ci __o;
28910 int8x16x3_t __temp;
28911 __temp.val[0] = vcombine_s8 (__val.val[0], vcreate_s8 (__AARCH64_INT64_C (0)));
28912 __temp.val[1] = vcombine_s8 (__val.val[1], vcreate_s8 (__AARCH64_INT64_C (0)));
28913 __temp.val[2] = vcombine_s8 (__val.val[2], vcreate_s8 (__AARCH64_INT64_C (0)));
28914 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[0], 0);
28915 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[1], 1);
28916 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[2], 2);
28917 __builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
28918 }
28919
28920 __extension__ extern __inline void
28921 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28922 vst3_p8 (poly8_t * __a, poly8x8x3_t __val)
28923 {
28924 __builtin_aarch64_simd_ci __o;
28925 poly8x16x3_t __temp;
28926 __temp.val[0] = vcombine_p8 (__val.val[0], vcreate_p8 (__AARCH64_UINT64_C (0)));
28927 __temp.val[1] = vcombine_p8 (__val.val[1], vcreate_p8 (__AARCH64_UINT64_C (0)));
28928 __temp.val[2] = vcombine_p8 (__val.val[2], vcreate_p8 (__AARCH64_UINT64_C (0)));
28929 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[0], 0);
28930 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[1], 1);
28931 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[2], 2);
28932 __builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
28933 }
28934
28935 __extension__ extern __inline void
28936 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28937 vst3_s16 (int16_t * __a, int16x4x3_t __val)
28938 {
28939 __builtin_aarch64_simd_ci __o;
28940 int16x8x3_t __temp;
28941 __temp.val[0] = vcombine_s16 (__val.val[0], vcreate_s16 (__AARCH64_INT64_C (0)));
28942 __temp.val[1] = vcombine_s16 (__val.val[1], vcreate_s16 (__AARCH64_INT64_C (0)));
28943 __temp.val[2] = vcombine_s16 (__val.val[2], vcreate_s16 (__AARCH64_INT64_C (0)));
28944 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[0], 0);
28945 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[1], 1);
28946 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[2], 2);
28947 __builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
28948 }
28949
28950 __extension__ extern __inline void
28951 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28952 vst3_p16 (poly16_t * __a, poly16x4x3_t __val)
28953 {
28954 __builtin_aarch64_simd_ci __o;
28955 poly16x8x3_t __temp;
28956 __temp.val[0] = vcombine_p16 (__val.val[0], vcreate_p16 (__AARCH64_UINT64_C (0)));
28957 __temp.val[1] = vcombine_p16 (__val.val[1], vcreate_p16 (__AARCH64_UINT64_C (0)));
28958 __temp.val[2] = vcombine_p16 (__val.val[2], vcreate_p16 (__AARCH64_UINT64_C (0)));
28959 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[0], 0);
28960 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[1], 1);
28961 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[2], 2);
28962 __builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
28963 }
28964
28965 __extension__ extern __inline void
28966 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28967 vst3_s32 (int32_t * __a, int32x2x3_t __val)
28968 {
28969 __builtin_aarch64_simd_ci __o;
28970 int32x4x3_t __temp;
28971 __temp.val[0] = vcombine_s32 (__val.val[0], vcreate_s32 (__AARCH64_INT64_C (0)));
28972 __temp.val[1] = vcombine_s32 (__val.val[1], vcreate_s32 (__AARCH64_INT64_C (0)));
28973 __temp.val[2] = vcombine_s32 (__val.val[2], vcreate_s32 (__AARCH64_INT64_C (0)));
28974 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[0], 0);
28975 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[1], 1);
28976 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[2], 2);
28977 __builtin_aarch64_st3v2si ((__builtin_aarch64_simd_si *) __a, __o);
28978 }
28979
28980 __extension__ extern __inline void
28981 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28982 vst3_u8 (uint8_t * __a, uint8x8x3_t __val)
28983 {
28984 __builtin_aarch64_simd_ci __o;
28985 uint8x16x3_t __temp;
28986 __temp.val[0] = vcombine_u8 (__val.val[0], vcreate_u8 (__AARCH64_UINT64_C (0)));
28987 __temp.val[1] = vcombine_u8 (__val.val[1], vcreate_u8 (__AARCH64_UINT64_C (0)));
28988 __temp.val[2] = vcombine_u8 (__val.val[2], vcreate_u8 (__AARCH64_UINT64_C (0)));
28989 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[0], 0);
28990 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[1], 1);
28991 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __temp.val[2], 2);
28992 __builtin_aarch64_st3v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
28993 }
28994
28995 __extension__ extern __inline void
28996 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
28997 vst3_u16 (uint16_t * __a, uint16x4x3_t __val)
28998 {
28999 __builtin_aarch64_simd_ci __o;
29000 uint16x8x3_t __temp;
29001 __temp.val[0] = vcombine_u16 (__val.val[0], vcreate_u16 (__AARCH64_UINT64_C (0)));
29002 __temp.val[1] = vcombine_u16 (__val.val[1], vcreate_u16 (__AARCH64_UINT64_C (0)));
29003 __temp.val[2] = vcombine_u16 (__val.val[2], vcreate_u16 (__AARCH64_UINT64_C (0)));
29004 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[0], 0);
29005 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[1], 1);
29006 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __temp.val[2], 2);
29007 __builtin_aarch64_st3v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
29008 }
29009
29010 __extension__ extern __inline void
29011 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29012 vst3_u32 (uint32_t * __a, uint32x2x3_t __val)
29013 {
29014 __builtin_aarch64_simd_ci __o;
29015 uint32x4x3_t __temp;
29016 __temp.val[0] = vcombine_u32 (__val.val[0], vcreate_u32 (__AARCH64_UINT64_C (0)));
29017 __temp.val[1] = vcombine_u32 (__val.val[1], vcreate_u32 (__AARCH64_UINT64_C (0)));
29018 __temp.val[2] = vcombine_u32 (__val.val[2], vcreate_u32 (__AARCH64_UINT64_C (0)));
29019 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[0], 0);
29020 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[1], 1);
29021 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __temp.val[2], 2);
29022 __builtin_aarch64_st3v2si ((__builtin_aarch64_simd_si *) __a, __o);
29023 }
29024
29025 __extension__ extern __inline void
29026 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29027 vst3_f16 (float16_t * __a, float16x4x3_t __val)
29028 {
29029 __builtin_aarch64_simd_ci __o;
29030 float16x8x3_t __temp;
29031 __temp.val[0] = vcombine_f16 (__val.val[0], vcreate_f16 (__AARCH64_UINT64_C (0)));
29032 __temp.val[1] = vcombine_f16 (__val.val[1], vcreate_f16 (__AARCH64_UINT64_C (0)));
29033 __temp.val[2] = vcombine_f16 (__val.val[2], vcreate_f16 (__AARCH64_UINT64_C (0)));
29034 __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) __temp.val[0], 0);
29035 __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) __temp.val[1], 1);
29036 __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) __temp.val[2], 2);
29037 __builtin_aarch64_st3v4hf ((__builtin_aarch64_simd_hf *) __a, __o);
29038 }
29039
29040 __extension__ extern __inline void
29041 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29042 vst3_f32 (float32_t * __a, float32x2x3_t __val)
29043 {
29044 __builtin_aarch64_simd_ci __o;
29045 float32x4x3_t __temp;
29046 __temp.val[0] = vcombine_f32 (__val.val[0], vcreate_f32 (__AARCH64_UINT64_C (0)));
29047 __temp.val[1] = vcombine_f32 (__val.val[1], vcreate_f32 (__AARCH64_UINT64_C (0)));
29048 __temp.val[2] = vcombine_f32 (__val.val[2], vcreate_f32 (__AARCH64_UINT64_C (0)));
29049 __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) __temp.val[0], 0);
29050 __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) __temp.val[1], 1);
29051 __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) __temp.val[2], 2);
29052 __builtin_aarch64_st3v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
29053 }
29054
29055 __extension__ extern __inline void
29056 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29057 vst3_p64 (poly64_t * __a, poly64x1x3_t __val)
29058 {
29059 __builtin_aarch64_simd_ci __o;
29060 poly64x2x3_t __temp;
29061 __temp.val[0] = vcombine_p64 (__val.val[0], vcreate_p64 (__AARCH64_UINT64_C (0)));
29062 __temp.val[1] = vcombine_p64 (__val.val[1], vcreate_p64 (__AARCH64_UINT64_C (0)));
29063 __temp.val[2] = vcombine_p64 (__val.val[2], vcreate_p64 (__AARCH64_UINT64_C (0)));
29064 __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
29065 (poly64x2_t) __temp.val[0], 0);
29066 __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
29067 (poly64x2_t) __temp.val[1], 1);
29068 __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
29069 (poly64x2_t) __temp.val[2], 2);
29070 __builtin_aarch64_st3di ((__builtin_aarch64_simd_di *) __a, __o);
29071 }
29072
29073 __extension__ extern __inline void
29074 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29075 vst3q_s8 (int8_t * __a, int8x16x3_t __val)
29076 {
29077 __builtin_aarch64_simd_ci __o;
29078 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __val.val[0], 0);
29079 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __val.val[1], 1);
29080 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __val.val[2], 2);
29081 __builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
29082 }
29083
29084 __extension__ extern __inline void
29085 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29086 vst3q_p8 (poly8_t * __a, poly8x16x3_t __val)
29087 {
29088 __builtin_aarch64_simd_ci __o;
29089 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __val.val[0], 0);
29090 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __val.val[1], 1);
29091 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __val.val[2], 2);
29092 __builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
29093 }
29094
29095 __extension__ extern __inline void
29096 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29097 vst3q_s16 (int16_t * __a, int16x8x3_t __val)
29098 {
29099 __builtin_aarch64_simd_ci __o;
29100 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __val.val[0], 0);
29101 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __val.val[1], 1);
29102 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __val.val[2], 2);
29103 __builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
29104 }
29105
29106 __extension__ extern __inline void
29107 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29108 vst3q_p16 (poly16_t * __a, poly16x8x3_t __val)
29109 {
29110 __builtin_aarch64_simd_ci __o;
29111 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __val.val[0], 0);
29112 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __val.val[1], 1);
29113 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __val.val[2], 2);
29114 __builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
29115 }
29116
29117 __extension__ extern __inline void
29118 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29119 vst3q_s32 (int32_t * __a, int32x4x3_t __val)
29120 {
29121 __builtin_aarch64_simd_ci __o;
29122 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __val.val[0], 0);
29123 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __val.val[1], 1);
29124 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __val.val[2], 2);
29125 __builtin_aarch64_st3v4si ((__builtin_aarch64_simd_si *) __a, __o);
29126 }
29127
29128 __extension__ extern __inline void
29129 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29130 vst3q_s64 (int64_t * __a, int64x2x3_t __val)
29131 {
29132 __builtin_aarch64_simd_ci __o;
29133 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __val.val[0], 0);
29134 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __val.val[1], 1);
29135 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __val.val[2], 2);
29136 __builtin_aarch64_st3v2di ((__builtin_aarch64_simd_di *) __a, __o);
29137 }
29138
29139 __extension__ extern __inline void
29140 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29141 vst3q_u8 (uint8_t * __a, uint8x16x3_t __val)
29142 {
29143 __builtin_aarch64_simd_ci __o;
29144 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __val.val[0], 0);
29145 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __val.val[1], 1);
29146 __o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) __val.val[2], 2);
29147 __builtin_aarch64_st3v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
29148 }
29149
29150 __extension__ extern __inline void
29151 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29152 vst3q_u16 (uint16_t * __a, uint16x8x3_t __val)
29153 {
29154 __builtin_aarch64_simd_ci __o;
29155 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __val.val[0], 0);
29156 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __val.val[1], 1);
29157 __o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) __val.val[2], 2);
29158 __builtin_aarch64_st3v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
29159 }
29160
29161 __extension__ extern __inline void
29162 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29163 vst3q_u32 (uint32_t * __a, uint32x4x3_t __val)
29164 {
29165 __builtin_aarch64_simd_ci __o;
29166 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __val.val[0], 0);
29167 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __val.val[1], 1);
29168 __o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) __val.val[2], 2);
29169 __builtin_aarch64_st3v4si ((__builtin_aarch64_simd_si *) __a, __o);
29170 }
29171
29172 __extension__ extern __inline void
29173 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29174 vst3q_u64 (uint64_t * __a, uint64x2x3_t __val)
29175 {
29176 __builtin_aarch64_simd_ci __o;
29177 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __val.val[0], 0);
29178 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __val.val[1], 1);
29179 __o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) __val.val[2], 2);
29180 __builtin_aarch64_st3v2di ((__builtin_aarch64_simd_di *) __a, __o);
29181 }
29182
29183 __extension__ extern __inline void
29184 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29185 vst3q_f16 (float16_t * __a, float16x8x3_t __val)
29186 {
29187 __builtin_aarch64_simd_ci __o;
29188 __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) __val.val[0], 0);
29189 __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) __val.val[1], 1);
29190 __o = __builtin_aarch64_set_qregciv8hf (__o, (float16x8_t) __val.val[2], 2);
29191 __builtin_aarch64_st3v8hf ((__builtin_aarch64_simd_hf *) __a, __o);
29192 }
29193
29194 __extension__ extern __inline void
29195 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29196 vst3q_f32 (float32_t * __a, float32x4x3_t __val)
29197 {
29198 __builtin_aarch64_simd_ci __o;
29199 __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) __val.val[0], 0);
29200 __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) __val.val[1], 1);
29201 __o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) __val.val[2], 2);
29202 __builtin_aarch64_st3v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
29203 }
29204
29205 __extension__ extern __inline void
29206 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29207 vst3q_f64 (float64_t * __a, float64x2x3_t __val)
29208 {
29209 __builtin_aarch64_simd_ci __o;
29210 __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) __val.val[0], 0);
29211 __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) __val.val[1], 1);
29212 __o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) __val.val[2], 2);
29213 __builtin_aarch64_st3v2df ((__builtin_aarch64_simd_df *) __a, __o);
29214 }
29215
29216 __extension__ extern __inline void
29217 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29218 vst3q_p64 (poly64_t * __a, poly64x2x3_t __val)
29219 {
29220 __builtin_aarch64_simd_ci __o;
29221 __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
29222 (poly64x2_t) __val.val[0], 0);
29223 __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
29224 (poly64x2_t) __val.val[1], 1);
29225 __o = __builtin_aarch64_set_qregciv2di_ssps (__o,
29226 (poly64x2_t) __val.val[2], 2);
29227 __builtin_aarch64_st3v2di ((__builtin_aarch64_simd_di *) __a, __o);
29228 }
29229
29230 __extension__ extern __inline void
29231 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29232 vst4_s64 (int64_t * __a, int64x1x4_t __val)
29233 {
29234 __builtin_aarch64_simd_xi __o;
29235 int64x2x4_t __temp;
29236 __temp.val[0] = vcombine_s64 (__val.val[0], vcreate_s64 (__AARCH64_INT64_C (0)));
29237 __temp.val[1] = vcombine_s64 (__val.val[1], vcreate_s64 (__AARCH64_INT64_C (0)));
29238 __temp.val[2] = vcombine_s64 (__val.val[2], vcreate_s64 (__AARCH64_INT64_C (0)));
29239 __temp.val[3] = vcombine_s64 (__val.val[3], vcreate_s64 (__AARCH64_INT64_C (0)));
29240 __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[0], 0);
29241 __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[1], 1);
29242 __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[2], 2);
29243 __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[3], 3);
29244 __builtin_aarch64_st4di ((__builtin_aarch64_simd_di *) __a, __o);
29245 }
29246
29247 __extension__ extern __inline void
29248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29249 vst4_u64 (uint64_t * __a, uint64x1x4_t __val)
29250 {
29251 __builtin_aarch64_simd_xi __o;
29252 uint64x2x4_t __temp;
29253 __temp.val[0] = vcombine_u64 (__val.val[0], vcreate_u64 (__AARCH64_UINT64_C (0)));
29254 __temp.val[1] = vcombine_u64 (__val.val[1], vcreate_u64 (__AARCH64_UINT64_C (0)));
29255 __temp.val[2] = vcombine_u64 (__val.val[2], vcreate_u64 (__AARCH64_UINT64_C (0)));
29256 __temp.val[3] = vcombine_u64 (__val.val[3], vcreate_u64 (__AARCH64_UINT64_C (0)));
29257 __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[0], 0);
29258 __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[1], 1);
29259 __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[2], 2);
29260 __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __temp.val[3], 3);
29261 __builtin_aarch64_st4di ((__builtin_aarch64_simd_di *) __a, __o);
29262 }
29263
29264 __extension__ extern __inline void
29265 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29266 vst4_f64 (float64_t * __a, float64x1x4_t __val)
29267 {
29268 __builtin_aarch64_simd_xi __o;
29269 float64x2x4_t __temp;
29270 __temp.val[0] = vcombine_f64 (__val.val[0], vcreate_f64 (__AARCH64_UINT64_C (0)));
29271 __temp.val[1] = vcombine_f64 (__val.val[1], vcreate_f64 (__AARCH64_UINT64_C (0)));
29272 __temp.val[2] = vcombine_f64 (__val.val[2], vcreate_f64 (__AARCH64_UINT64_C (0)));
29273 __temp.val[3] = vcombine_f64 (__val.val[3], vcreate_f64 (__AARCH64_UINT64_C (0)));
29274 __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) __temp.val[0], 0);
29275 __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) __temp.val[1], 1);
29276 __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) __temp.val[2], 2);
29277 __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) __temp.val[3], 3);
29278 __builtin_aarch64_st4df ((__builtin_aarch64_simd_df *) __a, __o);
29279 }
29280
29281 __extension__ extern __inline void
29282 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29283 vst4_s8 (int8_t * __a, int8x8x4_t __val)
29284 {
29285 __builtin_aarch64_simd_xi __o;
29286 int8x16x4_t __temp;
29287 __temp.val[0] = vcombine_s8 (__val.val[0], vcreate_s8 (__AARCH64_INT64_C (0)));
29288 __temp.val[1] = vcombine_s8 (__val.val[1], vcreate_s8 (__AARCH64_INT64_C (0)));
29289 __temp.val[2] = vcombine_s8 (__val.val[2], vcreate_s8 (__AARCH64_INT64_C (0)));
29290 __temp.val[3] = vcombine_s8 (__val.val[3], vcreate_s8 (__AARCH64_INT64_C (0)));
29291 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[0], 0);
29292 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[1], 1);
29293 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[2], 2);
29294 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[3], 3);
29295 __builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
29296 }
29297
29298 __extension__ extern __inline void
29299 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29300 vst4_p8 (poly8_t * __a, poly8x8x4_t __val)
29301 {
29302 __builtin_aarch64_simd_xi __o;
29303 poly8x16x4_t __temp;
29304 __temp.val[0] = vcombine_p8 (__val.val[0], vcreate_p8 (__AARCH64_UINT64_C (0)));
29305 __temp.val[1] = vcombine_p8 (__val.val[1], vcreate_p8 (__AARCH64_UINT64_C (0)));
29306 __temp.val[2] = vcombine_p8 (__val.val[2], vcreate_p8 (__AARCH64_UINT64_C (0)));
29307 __temp.val[3] = vcombine_p8 (__val.val[3], vcreate_p8 (__AARCH64_UINT64_C (0)));
29308 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[0], 0);
29309 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[1], 1);
29310 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[2], 2);
29311 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[3], 3);
29312 __builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
29313 }
29314
29315 __extension__ extern __inline void
29316 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29317 vst4_s16 (int16_t * __a, int16x4x4_t __val)
29318 {
29319 __builtin_aarch64_simd_xi __o;
29320 int16x8x4_t __temp;
29321 __temp.val[0] = vcombine_s16 (__val.val[0], vcreate_s16 (__AARCH64_INT64_C (0)));
29322 __temp.val[1] = vcombine_s16 (__val.val[1], vcreate_s16 (__AARCH64_INT64_C (0)));
29323 __temp.val[2] = vcombine_s16 (__val.val[2], vcreate_s16 (__AARCH64_INT64_C (0)));
29324 __temp.val[3] = vcombine_s16 (__val.val[3], vcreate_s16 (__AARCH64_INT64_C (0)));
29325 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[0], 0);
29326 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[1], 1);
29327 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[2], 2);
29328 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[3], 3);
29329 __builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
29330 }
29331
29332 __extension__ extern __inline void
29333 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29334 vst4_p16 (poly16_t * __a, poly16x4x4_t __val)
29335 {
29336 __builtin_aarch64_simd_xi __o;
29337 poly16x8x4_t __temp;
29338 __temp.val[0] = vcombine_p16 (__val.val[0], vcreate_p16 (__AARCH64_UINT64_C (0)));
29339 __temp.val[1] = vcombine_p16 (__val.val[1], vcreate_p16 (__AARCH64_UINT64_C (0)));
29340 __temp.val[2] = vcombine_p16 (__val.val[2], vcreate_p16 (__AARCH64_UINT64_C (0)));
29341 __temp.val[3] = vcombine_p16 (__val.val[3], vcreate_p16 (__AARCH64_UINT64_C (0)));
29342 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[0], 0);
29343 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[1], 1);
29344 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[2], 2);
29345 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[3], 3);
29346 __builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
29347 }
29348
29349 __extension__ extern __inline void
29350 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29351 vst4_s32 (int32_t * __a, int32x2x4_t __val)
29352 {
29353 __builtin_aarch64_simd_xi __o;
29354 int32x4x4_t __temp;
29355 __temp.val[0] = vcombine_s32 (__val.val[0], vcreate_s32 (__AARCH64_INT64_C (0)));
29356 __temp.val[1] = vcombine_s32 (__val.val[1], vcreate_s32 (__AARCH64_INT64_C (0)));
29357 __temp.val[2] = vcombine_s32 (__val.val[2], vcreate_s32 (__AARCH64_INT64_C (0)));
29358 __temp.val[3] = vcombine_s32 (__val.val[3], vcreate_s32 (__AARCH64_INT64_C (0)));
29359 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[0], 0);
29360 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[1], 1);
29361 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[2], 2);
29362 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[3], 3);
29363 __builtin_aarch64_st4v2si ((__builtin_aarch64_simd_si *) __a, __o);
29364 }
29365
29366 __extension__ extern __inline void
29367 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29368 vst4_u8 (uint8_t * __a, uint8x8x4_t __val)
29369 {
29370 __builtin_aarch64_simd_xi __o;
29371 uint8x16x4_t __temp;
29372 __temp.val[0] = vcombine_u8 (__val.val[0], vcreate_u8 (__AARCH64_UINT64_C (0)));
29373 __temp.val[1] = vcombine_u8 (__val.val[1], vcreate_u8 (__AARCH64_UINT64_C (0)));
29374 __temp.val[2] = vcombine_u8 (__val.val[2], vcreate_u8 (__AARCH64_UINT64_C (0)));
29375 __temp.val[3] = vcombine_u8 (__val.val[3], vcreate_u8 (__AARCH64_UINT64_C (0)));
29376 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[0], 0);
29377 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[1], 1);
29378 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[2], 2);
29379 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __temp.val[3], 3);
29380 __builtin_aarch64_st4v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
29381 }
29382
29383 __extension__ extern __inline void
29384 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29385 vst4_u16 (uint16_t * __a, uint16x4x4_t __val)
29386 {
29387 __builtin_aarch64_simd_xi __o;
29388 uint16x8x4_t __temp;
29389 __temp.val[0] = vcombine_u16 (__val.val[0], vcreate_u16 (__AARCH64_UINT64_C (0)));
29390 __temp.val[1] = vcombine_u16 (__val.val[1], vcreate_u16 (__AARCH64_UINT64_C (0)));
29391 __temp.val[2] = vcombine_u16 (__val.val[2], vcreate_u16 (__AARCH64_UINT64_C (0)));
29392 __temp.val[3] = vcombine_u16 (__val.val[3], vcreate_u16 (__AARCH64_UINT64_C (0)));
29393 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[0], 0);
29394 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[1], 1);
29395 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[2], 2);
29396 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __temp.val[3], 3);
29397 __builtin_aarch64_st4v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
29398 }
29399
29400 __extension__ extern __inline void
29401 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29402 vst4_u32 (uint32_t * __a, uint32x2x4_t __val)
29403 {
29404 __builtin_aarch64_simd_xi __o;
29405 uint32x4x4_t __temp;
29406 __temp.val[0] = vcombine_u32 (__val.val[0], vcreate_u32 (__AARCH64_UINT64_C (0)));
29407 __temp.val[1] = vcombine_u32 (__val.val[1], vcreate_u32 (__AARCH64_UINT64_C (0)));
29408 __temp.val[2] = vcombine_u32 (__val.val[2], vcreate_u32 (__AARCH64_UINT64_C (0)));
29409 __temp.val[3] = vcombine_u32 (__val.val[3], vcreate_u32 (__AARCH64_UINT64_C (0)));
29410 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[0], 0);
29411 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[1], 1);
29412 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[2], 2);
29413 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __temp.val[3], 3);
29414 __builtin_aarch64_st4v2si ((__builtin_aarch64_simd_si *) __a, __o);
29415 }
29416
29417 __extension__ extern __inline void
29418 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29419 vst4_f16 (float16_t * __a, float16x4x4_t __val)
29420 {
29421 __builtin_aarch64_simd_xi __o;
29422 float16x8x4_t __temp;
29423 __temp.val[0] = vcombine_f16 (__val.val[0], vcreate_f16 (__AARCH64_UINT64_C (0)));
29424 __temp.val[1] = vcombine_f16 (__val.val[1], vcreate_f16 (__AARCH64_UINT64_C (0)));
29425 __temp.val[2] = vcombine_f16 (__val.val[2], vcreate_f16 (__AARCH64_UINT64_C (0)));
29426 __temp.val[3] = vcombine_f16 (__val.val[3], vcreate_f16 (__AARCH64_UINT64_C (0)));
29427 __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) __temp.val[0], 0);
29428 __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) __temp.val[1], 1);
29429 __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) __temp.val[2], 2);
29430 __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) __temp.val[3], 3);
29431 __builtin_aarch64_st4v4hf ((__builtin_aarch64_simd_hf *) __a, __o);
29432 }
29433
29434 __extension__ extern __inline void
29435 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29436 vst4_f32 (float32_t * __a, float32x2x4_t __val)
29437 {
29438 __builtin_aarch64_simd_xi __o;
29439 float32x4x4_t __temp;
29440 __temp.val[0] = vcombine_f32 (__val.val[0], vcreate_f32 (__AARCH64_UINT64_C (0)));
29441 __temp.val[1] = vcombine_f32 (__val.val[1], vcreate_f32 (__AARCH64_UINT64_C (0)));
29442 __temp.val[2] = vcombine_f32 (__val.val[2], vcreate_f32 (__AARCH64_UINT64_C (0)));
29443 __temp.val[3] = vcombine_f32 (__val.val[3], vcreate_f32 (__AARCH64_UINT64_C (0)));
29444 __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) __temp.val[0], 0);
29445 __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) __temp.val[1], 1);
29446 __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) __temp.val[2], 2);
29447 __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) __temp.val[3], 3);
29448 __builtin_aarch64_st4v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
29449 }
29450
29451 __extension__ extern __inline void
29452 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29453 vst4_p64 (poly64_t * __a, poly64x1x4_t __val)
29454 {
29455 __builtin_aarch64_simd_xi __o;
29456 poly64x2x4_t __temp;
29457 __temp.val[0] = vcombine_p64 (__val.val[0], vcreate_p64 (__AARCH64_UINT64_C (0)));
29458 __temp.val[1] = vcombine_p64 (__val.val[1], vcreate_p64 (__AARCH64_UINT64_C (0)));
29459 __temp.val[2] = vcombine_p64 (__val.val[2], vcreate_p64 (__AARCH64_UINT64_C (0)));
29460 __temp.val[3] = vcombine_p64 (__val.val[3], vcreate_p64 (__AARCH64_UINT64_C (0)));
29461 __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
29462 (poly64x2_t) __temp.val[0], 0);
29463 __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
29464 (poly64x2_t) __temp.val[1], 1);
29465 __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
29466 (poly64x2_t) __temp.val[2], 2);
29467 __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
29468 (poly64x2_t) __temp.val[3], 3);
29469 __builtin_aarch64_st4di ((__builtin_aarch64_simd_di *) __a, __o);
29470 }
29471
29472 __extension__ extern __inline void
29473 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29474 vst4q_s8 (int8_t * __a, int8x16x4_t __val)
29475 {
29476 __builtin_aarch64_simd_xi __o;
29477 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __val.val[0], 0);
29478 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __val.val[1], 1);
29479 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __val.val[2], 2);
29480 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __val.val[3], 3);
29481 __builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
29482 }
29483
29484 __extension__ extern __inline void
29485 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29486 vst4q_p8 (poly8_t * __a, poly8x16x4_t __val)
29487 {
29488 __builtin_aarch64_simd_xi __o;
29489 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __val.val[0], 0);
29490 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __val.val[1], 1);
29491 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __val.val[2], 2);
29492 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __val.val[3], 3);
29493 __builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
29494 }
29495
29496 __extension__ extern __inline void
29497 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29498 vst4q_s16 (int16_t * __a, int16x8x4_t __val)
29499 {
29500 __builtin_aarch64_simd_xi __o;
29501 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __val.val[0], 0);
29502 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __val.val[1], 1);
29503 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __val.val[2], 2);
29504 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __val.val[3], 3);
29505 __builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
29506 }
29507
29508 __extension__ extern __inline void
29509 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29510 vst4q_p16 (poly16_t * __a, poly16x8x4_t __val)
29511 {
29512 __builtin_aarch64_simd_xi __o;
29513 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __val.val[0], 0);
29514 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __val.val[1], 1);
29515 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __val.val[2], 2);
29516 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __val.val[3], 3);
29517 __builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
29518 }
29519
29520 __extension__ extern __inline void
29521 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29522 vst4q_s32 (int32_t * __a, int32x4x4_t __val)
29523 {
29524 __builtin_aarch64_simd_xi __o;
29525 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __val.val[0], 0);
29526 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __val.val[1], 1);
29527 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __val.val[2], 2);
29528 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __val.val[3], 3);
29529 __builtin_aarch64_st4v4si ((__builtin_aarch64_simd_si *) __a, __o);
29530 }
29531
29532 __extension__ extern __inline void
29533 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29534 vst4q_s64 (int64_t * __a, int64x2x4_t __val)
29535 {
29536 __builtin_aarch64_simd_xi __o;
29537 __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __val.val[0], 0);
29538 __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __val.val[1], 1);
29539 __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __val.val[2], 2);
29540 __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __val.val[3], 3);
29541 __builtin_aarch64_st4v2di ((__builtin_aarch64_simd_di *) __a, __o);
29542 }
29543
29544 __extension__ extern __inline void
29545 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29546 vst4q_u8 (uint8_t * __a, uint8x16x4_t __val)
29547 {
29548 __builtin_aarch64_simd_xi __o;
29549 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __val.val[0], 0);
29550 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __val.val[1], 1);
29551 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __val.val[2], 2);
29552 __o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) __val.val[3], 3);
29553 __builtin_aarch64_st4v16qi ((__builtin_aarch64_simd_qi *) __a, __o);
29554 }
29555
29556 __extension__ extern __inline void
29557 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29558 vst4q_u16 (uint16_t * __a, uint16x8x4_t __val)
29559 {
29560 __builtin_aarch64_simd_xi __o;
29561 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __val.val[0], 0);
29562 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __val.val[1], 1);
29563 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __val.val[2], 2);
29564 __o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) __val.val[3], 3);
29565 __builtin_aarch64_st4v8hi ((__builtin_aarch64_simd_hi *) __a, __o);
29566 }
29567
29568 __extension__ extern __inline void
29569 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29570 vst4q_u32 (uint32_t * __a, uint32x4x4_t __val)
29571 {
29572 __builtin_aarch64_simd_xi __o;
29573 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __val.val[0], 0);
29574 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __val.val[1], 1);
29575 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __val.val[2], 2);
29576 __o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) __val.val[3], 3);
29577 __builtin_aarch64_st4v4si ((__builtin_aarch64_simd_si *) __a, __o);
29578 }
29579
29580 __extension__ extern __inline void
29581 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29582 vst4q_u64 (uint64_t * __a, uint64x2x4_t __val)
29583 {
29584 __builtin_aarch64_simd_xi __o;
29585 __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __val.val[0], 0);
29586 __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __val.val[1], 1);
29587 __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __val.val[2], 2);
29588 __o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) __val.val[3], 3);
29589 __builtin_aarch64_st4v2di ((__builtin_aarch64_simd_di *) __a, __o);
29590 }
29591
29592 __extension__ extern __inline void
29593 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29594 vst4q_f16 (float16_t * __a, float16x8x4_t __val)
29595 {
29596 __builtin_aarch64_simd_xi __o;
29597 __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) __val.val[0], 0);
29598 __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) __val.val[1], 1);
29599 __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) __val.val[2], 2);
29600 __o = __builtin_aarch64_set_qregxiv8hf (__o, (float16x8_t) __val.val[3], 3);
29601 __builtin_aarch64_st4v8hf ((__builtin_aarch64_simd_hf *) __a, __o);
29602 }
29603
29604 __extension__ extern __inline void
29605 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29606 vst4q_f32 (float32_t * __a, float32x4x4_t __val)
29607 {
29608 __builtin_aarch64_simd_xi __o;
29609 __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) __val.val[0], 0);
29610 __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) __val.val[1], 1);
29611 __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) __val.val[2], 2);
29612 __o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) __val.val[3], 3);
29613 __builtin_aarch64_st4v4sf ((__builtin_aarch64_simd_sf *) __a, __o);
29614 }
29615
29616 __extension__ extern __inline void
29617 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29618 vst4q_f64 (float64_t * __a, float64x2x4_t __val)
29619 {
29620 __builtin_aarch64_simd_xi __o;
29621 __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) __val.val[0], 0);
29622 __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) __val.val[1], 1);
29623 __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) __val.val[2], 2);
29624 __o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) __val.val[3], 3);
29625 __builtin_aarch64_st4v2df ((__builtin_aarch64_simd_df *) __a, __o);
29626 }
29627
29628 __extension__ extern __inline void
29629 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29630 vst4q_p64 (poly64_t * __a, poly64x2x4_t __val)
29631 {
29632 __builtin_aarch64_simd_xi __o;
29633 __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
29634 (poly64x2_t) __val.val[0], 0);
29635 __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
29636 (poly64x2_t) __val.val[1], 1);
29637 __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
29638 (poly64x2_t) __val.val[2], 2);
29639 __o = __builtin_aarch64_set_qregxiv2di_ssps (__o,
29640 (poly64x2_t) __val.val[3], 3);
29641 __builtin_aarch64_st4v2di ((__builtin_aarch64_simd_di *) __a, __o);
29642 }
29643
29644 __extension__ extern __inline void
29645 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29646 vstrq_p128 (poly128_t * __ptr, poly128_t __val)
29647 {
29648 *__ptr = __val;
29649 }
29650
29651 /* vsub */
29652
29653 __extension__ extern __inline int64_t
29654 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29655 vsubd_s64 (int64_t __a, int64_t __b)
29656 {
29657 return __a - __b;
29658 }
29659
29660 __extension__ extern __inline uint64_t
29661 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29662 vsubd_u64 (uint64_t __a, uint64_t __b)
29663 {
29664 return __a - __b;
29665 }
29666
29667 /* vtbx1 */
29668
29669 __extension__ extern __inline int8x8_t
29670 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29671 vtbx1_s8 (int8x8_t __r, int8x8_t __tab, int8x8_t __idx)
29672 {
29673 uint8x8_t __mask = vclt_u8 (vreinterpret_u8_s8 (__idx),
29674 vmov_n_u8 (8));
29675 int8x8_t __tbl = vtbl1_s8 (__tab, __idx);
29676
29677 return vbsl_s8 (__mask, __tbl, __r);
29678 }
29679
29680 __extension__ extern __inline uint8x8_t
29681 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29682 vtbx1_u8 (uint8x8_t __r, uint8x8_t __tab, uint8x8_t __idx)
29683 {
29684 uint8x8_t __mask = vclt_u8 (__idx, vmov_n_u8 (8));
29685 uint8x8_t __tbl = vtbl1_u8 (__tab, __idx);
29686
29687 return vbsl_u8 (__mask, __tbl, __r);
29688 }
29689
29690 __extension__ extern __inline poly8x8_t
29691 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29692 vtbx1_p8 (poly8x8_t __r, poly8x8_t __tab, uint8x8_t __idx)
29693 {
29694 uint8x8_t __mask = vclt_u8 (__idx, vmov_n_u8 (8));
29695 poly8x8_t __tbl = vtbl1_p8 (__tab, __idx);
29696
29697 return vbsl_p8 (__mask, __tbl, __r);
29698 }
29699
29700 /* vtbx3 */
29701
29702 __extension__ extern __inline int8x8_t
29703 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29704 vtbx3_s8 (int8x8_t __r, int8x8x3_t __tab, int8x8_t __idx)
29705 {
29706 uint8x8_t __mask = vclt_u8 (vreinterpret_u8_s8 (__idx),
29707 vmov_n_u8 (24));
29708 int8x8_t __tbl = vtbl3_s8 (__tab, __idx);
29709
29710 return vbsl_s8 (__mask, __tbl, __r);
29711 }
29712
29713 __extension__ extern __inline uint8x8_t
29714 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29715 vtbx3_u8 (uint8x8_t __r, uint8x8x3_t __tab, uint8x8_t __idx)
29716 {
29717 uint8x8_t __mask = vclt_u8 (__idx, vmov_n_u8 (24));
29718 uint8x8_t __tbl = vtbl3_u8 (__tab, __idx);
29719
29720 return vbsl_u8 (__mask, __tbl, __r);
29721 }
29722
29723 __extension__ extern __inline poly8x8_t
29724 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29725 vtbx3_p8 (poly8x8_t __r, poly8x8x3_t __tab, uint8x8_t __idx)
29726 {
29727 uint8x8_t __mask = vclt_u8 (__idx, vmov_n_u8 (24));
29728 poly8x8_t __tbl = vtbl3_p8 (__tab, __idx);
29729
29730 return vbsl_p8 (__mask, __tbl, __r);
29731 }
29732
29733 /* vtbx4 */
29734
29735 __extension__ extern __inline int8x8_t
29736 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29737 vtbx4_s8 (int8x8_t __r, int8x8x4_t __tab, int8x8_t __idx)
29738 {
29739 int8x8_t __result;
29740 int8x16x2_t __temp;
29741 __builtin_aarch64_simd_oi __o;
29742 __temp.val[0] = vcombine_s8 (__tab.val[0], __tab.val[1]);
29743 __temp.val[1] = vcombine_s8 (__tab.val[2], __tab.val[3]);
29744 __o = __builtin_aarch64_set_qregoiv16qi (__o,
29745 (int8x16_t) __temp.val[0], 0);
29746 __o = __builtin_aarch64_set_qregoiv16qi (__o,
29747 (int8x16_t) __temp.val[1], 1);
29748 __result = __builtin_aarch64_tbx4v8qi (__r, __o, __idx);
29749 return __result;
29750 }
29751
29752 __extension__ extern __inline uint8x8_t
29753 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29754 vtbx4_u8 (uint8x8_t __r, uint8x8x4_t __tab, uint8x8_t __idx)
29755 {
29756 uint8x8_t __result;
29757 uint8x16x2_t __temp;
29758 __builtin_aarch64_simd_oi __o;
29759 __temp.val[0] = vcombine_u8 (__tab.val[0], __tab.val[1]);
29760 __temp.val[1] = vcombine_u8 (__tab.val[2], __tab.val[3]);
29761 __o = __builtin_aarch64_set_qregoiv16qi (__o,
29762 (int8x16_t) __temp.val[0], 0);
29763 __o = __builtin_aarch64_set_qregoiv16qi (__o,
29764 (int8x16_t) __temp.val[1], 1);
29765 __result = (uint8x8_t)__builtin_aarch64_tbx4v8qi ((int8x8_t)__r, __o,
29766 (int8x8_t)__idx);
29767 return __result;
29768 }
29769
29770 __extension__ extern __inline poly8x8_t
29771 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29772 vtbx4_p8 (poly8x8_t __r, poly8x8x4_t __tab, uint8x8_t __idx)
29773 {
29774 poly8x8_t __result;
29775 poly8x16x2_t __temp;
29776 __builtin_aarch64_simd_oi __o;
29777 __temp.val[0] = vcombine_p8 (__tab.val[0], __tab.val[1]);
29778 __temp.val[1] = vcombine_p8 (__tab.val[2], __tab.val[3]);
29779 __o = __builtin_aarch64_set_qregoiv16qi (__o,
29780 (int8x16_t) __temp.val[0], 0);
29781 __o = __builtin_aarch64_set_qregoiv16qi (__o,
29782 (int8x16_t) __temp.val[1], 1);
29783 __result = (poly8x8_t)__builtin_aarch64_tbx4v8qi ((int8x8_t)__r, __o,
29784 (int8x8_t)__idx);
29785 return __result;
29786 }
29787
29788 /* vtrn */
29789
29790 __extension__ extern __inline float16x4_t
29791 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29792 vtrn1_f16 (float16x4_t __a, float16x4_t __b)
29793 {
29794 #ifdef __AARCH64EB__
29795 return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 1, 7, 3});
29796 #else
29797 return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 2, 6});
29798 #endif
29799 }
29800
29801 __extension__ extern __inline float32x2_t
29802 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29803 vtrn1_f32 (float32x2_t __a, float32x2_t __b)
29804 {
29805 #ifdef __AARCH64EB__
29806 return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
29807 #else
29808 return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
29809 #endif
29810 }
29811
29812 __extension__ extern __inline poly8x8_t
29813 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29814 vtrn1_p8 (poly8x8_t __a, poly8x8_t __b)
29815 {
29816 #ifdef __AARCH64EB__
29817 return __builtin_shuffle (__a, __b, (uint8x8_t) {9, 1, 11, 3, 13, 5, 15, 7});
29818 #else
29819 return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 8, 2, 10, 4, 12, 6, 14});
29820 #endif
29821 }
29822
29823 __extension__ extern __inline poly16x4_t
29824 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29825 vtrn1_p16 (poly16x4_t __a, poly16x4_t __b)
29826 {
29827 #ifdef __AARCH64EB__
29828 return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 1, 7, 3});
29829 #else
29830 return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 2, 6});
29831 #endif
29832 }
29833
29834 __extension__ extern __inline int8x8_t
29835 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29836 vtrn1_s8 (int8x8_t __a, int8x8_t __b)
29837 {
29838 #ifdef __AARCH64EB__
29839 return __builtin_shuffle (__a, __b, (uint8x8_t) {9, 1, 11, 3, 13, 5, 15, 7});
29840 #else
29841 return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 8, 2, 10, 4, 12, 6, 14});
29842 #endif
29843 }
29844
29845 __extension__ extern __inline int16x4_t
29846 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29847 vtrn1_s16 (int16x4_t __a, int16x4_t __b)
29848 {
29849 #ifdef __AARCH64EB__
29850 return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 1, 7, 3});
29851 #else
29852 return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 2, 6});
29853 #endif
29854 }
29855
29856 __extension__ extern __inline int32x2_t
29857 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29858 vtrn1_s32 (int32x2_t __a, int32x2_t __b)
29859 {
29860 #ifdef __AARCH64EB__
29861 return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
29862 #else
29863 return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
29864 #endif
29865 }
29866
29867 __extension__ extern __inline uint8x8_t
29868 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29869 vtrn1_u8 (uint8x8_t __a, uint8x8_t __b)
29870 {
29871 #ifdef __AARCH64EB__
29872 return __builtin_shuffle (__a, __b, (uint8x8_t) {9, 1, 11, 3, 13, 5, 15, 7});
29873 #else
29874 return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 8, 2, 10, 4, 12, 6, 14});
29875 #endif
29876 }
29877
29878 __extension__ extern __inline uint16x4_t
29879 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29880 vtrn1_u16 (uint16x4_t __a, uint16x4_t __b)
29881 {
29882 #ifdef __AARCH64EB__
29883 return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 1, 7, 3});
29884 #else
29885 return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 2, 6});
29886 #endif
29887 }
29888
29889 __extension__ extern __inline uint32x2_t
29890 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29891 vtrn1_u32 (uint32x2_t __a, uint32x2_t __b)
29892 {
29893 #ifdef __AARCH64EB__
29894 return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
29895 #else
29896 return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
29897 #endif
29898 }
29899
29900 __extension__ extern __inline float16x8_t
29901 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29902 vtrn1q_f16 (float16x8_t __a, float16x8_t __b)
29903 {
29904 #ifdef __AARCH64EB__
29905 return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 1, 11, 3, 13, 5, 15, 7});
29906 #else
29907 return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 8, 2, 10, 4, 12, 6, 14});
29908 #endif
29909 }
29910
29911 __extension__ extern __inline float32x4_t
29912 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29913 vtrn1q_f32 (float32x4_t __a, float32x4_t __b)
29914 {
29915 #ifdef __AARCH64EB__
29916 return __builtin_shuffle (__a, __b, (uint32x4_t) {5, 1, 7, 3});
29917 #else
29918 return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 4, 2, 6});
29919 #endif
29920 }
29921
29922 __extension__ extern __inline float64x2_t
29923 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29924 vtrn1q_f64 (float64x2_t __a, float64x2_t __b)
29925 {
29926 #ifdef __AARCH64EB__
29927 return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
29928 #else
29929 return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
29930 #endif
29931 }
29932
29933 __extension__ extern __inline poly8x16_t
29934 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29935 vtrn1q_p8 (poly8x16_t __a, poly8x16_t __b)
29936 {
29937 #ifdef __AARCH64EB__
29938 return __builtin_shuffle (__a, __b,
29939 (uint8x16_t) {17, 1, 19, 3, 21, 5, 23, 7, 25, 9, 27, 11, 29, 13, 31, 15});
29940 #else
29941 return __builtin_shuffle (__a, __b,
29942 (uint8x16_t) {0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30});
29943 #endif
29944 }
29945
29946 __extension__ extern __inline poly16x8_t
29947 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29948 vtrn1q_p16 (poly16x8_t __a, poly16x8_t __b)
29949 {
29950 #ifdef __AARCH64EB__
29951 return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 1, 11, 3, 13, 5, 15, 7});
29952 #else
29953 return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 8, 2, 10, 4, 12, 6, 14});
29954 #endif
29955 }
29956
29957 __extension__ extern __inline int8x16_t
29958 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29959 vtrn1q_s8 (int8x16_t __a, int8x16_t __b)
29960 {
29961 #ifdef __AARCH64EB__
29962 return __builtin_shuffle (__a, __b,
29963 (uint8x16_t) {17, 1, 19, 3, 21, 5, 23, 7, 25, 9, 27, 11, 29, 13, 31, 15});
29964 #else
29965 return __builtin_shuffle (__a, __b,
29966 (uint8x16_t) {0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30});
29967 #endif
29968 }
29969
29970 __extension__ extern __inline int16x8_t
29971 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29972 vtrn1q_s16 (int16x8_t __a, int16x8_t __b)
29973 {
29974 #ifdef __AARCH64EB__
29975 return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 1, 11, 3, 13, 5, 15, 7});
29976 #else
29977 return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 8, 2, 10, 4, 12, 6, 14});
29978 #endif
29979 }
29980
29981 __extension__ extern __inline int32x4_t
29982 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29983 vtrn1q_s32 (int32x4_t __a, int32x4_t __b)
29984 {
29985 #ifdef __AARCH64EB__
29986 return __builtin_shuffle (__a, __b, (uint32x4_t) {5, 1, 7, 3});
29987 #else
29988 return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 4, 2, 6});
29989 #endif
29990 }
29991
29992 __extension__ extern __inline int64x2_t
29993 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
29994 vtrn1q_s64 (int64x2_t __a, int64x2_t __b)
29995 {
29996 #ifdef __AARCH64EB__
29997 return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
29998 #else
29999 return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
30000 #endif
30001 }
30002
30003 __extension__ extern __inline uint8x16_t
30004 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30005 vtrn1q_u8 (uint8x16_t __a, uint8x16_t __b)
30006 {
30007 #ifdef __AARCH64EB__
30008 return __builtin_shuffle (__a, __b,
30009 (uint8x16_t) {17, 1, 19, 3, 21, 5, 23, 7, 25, 9, 27, 11, 29, 13, 31, 15});
30010 #else
30011 return __builtin_shuffle (__a, __b,
30012 (uint8x16_t) {0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30});
30013 #endif
30014 }
30015
30016 __extension__ extern __inline uint16x8_t
30017 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30018 vtrn1q_u16 (uint16x8_t __a, uint16x8_t __b)
30019 {
30020 #ifdef __AARCH64EB__
30021 return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 1, 11, 3, 13, 5, 15, 7});
30022 #else
30023 return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 8, 2, 10, 4, 12, 6, 14});
30024 #endif
30025 }
30026
30027 __extension__ extern __inline uint32x4_t
30028 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30029 vtrn1q_u32 (uint32x4_t __a, uint32x4_t __b)
30030 {
30031 #ifdef __AARCH64EB__
30032 return __builtin_shuffle (__a, __b, (uint32x4_t) {5, 1, 7, 3});
30033 #else
30034 return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 4, 2, 6});
30035 #endif
30036 }
30037
30038 __extension__ extern __inline poly64x2_t
30039 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30040 vtrn1q_p64 (poly64x2_t __a, poly64x2_t __b)
30041 {
30042 #ifdef __AARCH64EB__
30043 return __builtin_shuffle (__a, __b, (poly64x2_t) {3, 1});
30044 #else
30045 return __builtin_shuffle (__a, __b, (poly64x2_t) {0, 2});
30046 #endif
30047 }
30048
30049 __extension__ extern __inline uint64x2_t
30050 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30051 vtrn1q_u64 (uint64x2_t __a, uint64x2_t __b)
30052 {
30053 #ifdef __AARCH64EB__
30054 return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
30055 #else
30056 return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
30057 #endif
30058 }
30059
30060 __extension__ extern __inline float16x4_t
30061 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30062 vtrn2_f16 (float16x4_t __a, float16x4_t __b)
30063 {
30064 #ifdef __AARCH64EB__
30065 return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 0, 6, 2});
30066 #else
30067 return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 5, 3, 7});
30068 #endif
30069 }
30070
30071 __extension__ extern __inline float32x2_t
30072 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30073 vtrn2_f32 (float32x2_t __a, float32x2_t __b)
30074 {
30075 #ifdef __AARCH64EB__
30076 return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
30077 #else
30078 return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
30079 #endif
30080 }
30081
30082 __extension__ extern __inline poly8x8_t
30083 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30084 vtrn2_p8 (poly8x8_t __a, poly8x8_t __b)
30085 {
30086 #ifdef __AARCH64EB__
30087 return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 0, 10, 2, 12, 4, 14, 6});
30088 #else
30089 return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 9, 3, 11, 5, 13, 7, 15});
30090 #endif
30091 }
30092
30093 __extension__ extern __inline poly16x4_t
30094 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30095 vtrn2_p16 (poly16x4_t __a, poly16x4_t __b)
30096 {
30097 #ifdef __AARCH64EB__
30098 return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 0, 6, 2});
30099 #else
30100 return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 5, 3, 7});
30101 #endif
30102 }
30103
30104 __extension__ extern __inline int8x8_t
30105 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30106 vtrn2_s8 (int8x8_t __a, int8x8_t __b)
30107 {
30108 #ifdef __AARCH64EB__
30109 return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 0, 10, 2, 12, 4, 14, 6});
30110 #else
30111 return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 9, 3, 11, 5, 13, 7, 15});
30112 #endif
30113 }
30114
30115 __extension__ extern __inline int16x4_t
30116 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30117 vtrn2_s16 (int16x4_t __a, int16x4_t __b)
30118 {
30119 #ifdef __AARCH64EB__
30120 return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 0, 6, 2});
30121 #else
30122 return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 5, 3, 7});
30123 #endif
30124 }
30125
30126 __extension__ extern __inline int32x2_t
30127 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30128 vtrn2_s32 (int32x2_t __a, int32x2_t __b)
30129 {
30130 #ifdef __AARCH64EB__
30131 return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
30132 #else
30133 return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
30134 #endif
30135 }
30136
30137 __extension__ extern __inline uint8x8_t
30138 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30139 vtrn2_u8 (uint8x8_t __a, uint8x8_t __b)
30140 {
30141 #ifdef __AARCH64EB__
30142 return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 0, 10, 2, 12, 4, 14, 6});
30143 #else
30144 return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 9, 3, 11, 5, 13, 7, 15});
30145 #endif
30146 }
30147
30148 __extension__ extern __inline uint16x4_t
30149 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30150 vtrn2_u16 (uint16x4_t __a, uint16x4_t __b)
30151 {
30152 #ifdef __AARCH64EB__
30153 return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 0, 6, 2});
30154 #else
30155 return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 5, 3, 7});
30156 #endif
30157 }
30158
30159 __extension__ extern __inline uint32x2_t
30160 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30161 vtrn2_u32 (uint32x2_t __a, uint32x2_t __b)
30162 {
30163 #ifdef __AARCH64EB__
30164 return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
30165 #else
30166 return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
30167 #endif
30168 }
30169
30170 __extension__ extern __inline float16x8_t
30171 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30172 vtrn2q_f16 (float16x8_t __a, float16x8_t __b)
30173 {
30174 #ifdef __AARCH64EB__
30175 return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 0, 10, 2, 12, 4, 14, 6});
30176 #else
30177 return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 9, 3, 11, 5, 13, 7, 15});
30178 #endif
30179 }
30180
30181 __extension__ extern __inline float32x4_t
30182 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30183 vtrn2q_f32 (float32x4_t __a, float32x4_t __b)
30184 {
30185 #ifdef __AARCH64EB__
30186 return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 0, 6, 2});
30187 #else
30188 return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 5, 3, 7});
30189 #endif
30190 }
30191
30192 __extension__ extern __inline float64x2_t
30193 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30194 vtrn2q_f64 (float64x2_t __a, float64x2_t __b)
30195 {
30196 #ifdef __AARCH64EB__
30197 return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
30198 #else
30199 return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
30200 #endif
30201 }
30202
30203 __extension__ extern __inline poly8x16_t
30204 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30205 vtrn2q_p8 (poly8x16_t __a, poly8x16_t __b)
30206 {
30207 #ifdef __AARCH64EB__
30208 return __builtin_shuffle (__a, __b,
30209 (uint8x16_t) {16, 0, 18, 2, 20, 4, 22, 6, 24, 8, 26, 10, 28, 12, 30, 14});
30210 #else
30211 return __builtin_shuffle (__a, __b,
30212 (uint8x16_t) {1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31});
30213 #endif
30214 }
30215
30216 __extension__ extern __inline poly16x8_t
30217 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30218 vtrn2q_p16 (poly16x8_t __a, poly16x8_t __b)
30219 {
30220 #ifdef __AARCH64EB__
30221 return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 0, 10, 2, 12, 4, 14, 6});
30222 #else
30223 return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 9, 3, 11, 5, 13, 7, 15});
30224 #endif
30225 }
30226
30227 __extension__ extern __inline int8x16_t
30228 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30229 vtrn2q_s8 (int8x16_t __a, int8x16_t __b)
30230 {
30231 #ifdef __AARCH64EB__
30232 return __builtin_shuffle (__a, __b,
30233 (uint8x16_t) {16, 0, 18, 2, 20, 4, 22, 6, 24, 8, 26, 10, 28, 12, 30, 14});
30234 #else
30235 return __builtin_shuffle (__a, __b,
30236 (uint8x16_t) {1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31});
30237 #endif
30238 }
30239
30240 __extension__ extern __inline int16x8_t
30241 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30242 vtrn2q_s16 (int16x8_t __a, int16x8_t __b)
30243 {
30244 #ifdef __AARCH64EB__
30245 return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 0, 10, 2, 12, 4, 14, 6});
30246 #else
30247 return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 9, 3, 11, 5, 13, 7, 15});
30248 #endif
30249 }
30250
30251 __extension__ extern __inline int32x4_t
30252 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30253 vtrn2q_s32 (int32x4_t __a, int32x4_t __b)
30254 {
30255 #ifdef __AARCH64EB__
30256 return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 0, 6, 2});
30257 #else
30258 return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 5, 3, 7});
30259 #endif
30260 }
30261
30262 __extension__ extern __inline int64x2_t
30263 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30264 vtrn2q_s64 (int64x2_t __a, int64x2_t __b)
30265 {
30266 #ifdef __AARCH64EB__
30267 return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
30268 #else
30269 return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
30270 #endif
30271 }
30272
30273 __extension__ extern __inline uint8x16_t
30274 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30275 vtrn2q_u8 (uint8x16_t __a, uint8x16_t __b)
30276 {
30277 #ifdef __AARCH64EB__
30278 return __builtin_shuffle (__a, __b,
30279 (uint8x16_t) {16, 0, 18, 2, 20, 4, 22, 6, 24, 8, 26, 10, 28, 12, 30, 14});
30280 #else
30281 return __builtin_shuffle (__a, __b,
30282 (uint8x16_t) {1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31});
30283 #endif
30284 }
30285
30286 __extension__ extern __inline uint16x8_t
30287 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30288 vtrn2q_u16 (uint16x8_t __a, uint16x8_t __b)
30289 {
30290 #ifdef __AARCH64EB__
30291 return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 0, 10, 2, 12, 4, 14, 6});
30292 #else
30293 return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 9, 3, 11, 5, 13, 7, 15});
30294 #endif
30295 }
30296
30297 __extension__ extern __inline uint32x4_t
30298 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30299 vtrn2q_u32 (uint32x4_t __a, uint32x4_t __b)
30300 {
30301 #ifdef __AARCH64EB__
30302 return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 0, 6, 2});
30303 #else
30304 return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 5, 3, 7});
30305 #endif
30306 }
30307
30308 __extension__ extern __inline uint64x2_t
30309 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30310 vtrn2q_u64 (uint64x2_t __a, uint64x2_t __b)
30311 {
30312 #ifdef __AARCH64EB__
30313 return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
30314 #else
30315 return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
30316 #endif
30317 }
30318
30319
30320 __extension__ extern __inline poly64x2_t
30321 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30322 vtrn2q_p64 (poly64x2_t __a, poly64x2_t __b)
30323 {
30324 #ifdef __AARCH64EB__
30325 return __builtin_shuffle (__a, __b, (poly64x2_t) {2, 0});
30326 #else
30327 return __builtin_shuffle (__a, __b, (poly64x2_t) {1, 3});
30328 #endif
30329 }
30330
30331 __extension__ extern __inline float16x4x2_t
30332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30333 vtrn_f16 (float16x4_t __a, float16x4_t __b)
30334 {
30335 return (float16x4x2_t) {vtrn1_f16 (__a, __b), vtrn2_f16 (__a, __b)};
30336 }
30337
30338 __extension__ extern __inline float32x2x2_t
30339 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30340 vtrn_f32 (float32x2_t __a, float32x2_t __b)
30341 {
30342 return (float32x2x2_t) {vtrn1_f32 (__a, __b), vtrn2_f32 (__a, __b)};
30343 }
30344
30345 __extension__ extern __inline poly8x8x2_t
30346 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30347 vtrn_p8 (poly8x8_t __a, poly8x8_t __b)
30348 {
30349 return (poly8x8x2_t) {vtrn1_p8 (__a, __b), vtrn2_p8 (__a, __b)};
30350 }
30351
30352 __extension__ extern __inline poly16x4x2_t
30353 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30354 vtrn_p16 (poly16x4_t __a, poly16x4_t __b)
30355 {
30356 return (poly16x4x2_t) {vtrn1_p16 (__a, __b), vtrn2_p16 (__a, __b)};
30357 }
30358
30359 __extension__ extern __inline int8x8x2_t
30360 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30361 vtrn_s8 (int8x8_t __a, int8x8_t __b)
30362 {
30363 return (int8x8x2_t) {vtrn1_s8 (__a, __b), vtrn2_s8 (__a, __b)};
30364 }
30365
30366 __extension__ extern __inline int16x4x2_t
30367 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30368 vtrn_s16 (int16x4_t __a, int16x4_t __b)
30369 {
30370 return (int16x4x2_t) {vtrn1_s16 (__a, __b), vtrn2_s16 (__a, __b)};
30371 }
30372
30373 __extension__ extern __inline int32x2x2_t
30374 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30375 vtrn_s32 (int32x2_t __a, int32x2_t __b)
30376 {
30377 return (int32x2x2_t) {vtrn1_s32 (__a, __b), vtrn2_s32 (__a, __b)};
30378 }
30379
30380 __extension__ extern __inline uint8x8x2_t
30381 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30382 vtrn_u8 (uint8x8_t __a, uint8x8_t __b)
30383 {
30384 return (uint8x8x2_t) {vtrn1_u8 (__a, __b), vtrn2_u8 (__a, __b)};
30385 }
30386
30387 __extension__ extern __inline uint16x4x2_t
30388 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30389 vtrn_u16 (uint16x4_t __a, uint16x4_t __b)
30390 {
30391 return (uint16x4x2_t) {vtrn1_u16 (__a, __b), vtrn2_u16 (__a, __b)};
30392 }
30393
30394 __extension__ extern __inline uint32x2x2_t
30395 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30396 vtrn_u32 (uint32x2_t __a, uint32x2_t __b)
30397 {
30398 return (uint32x2x2_t) {vtrn1_u32 (__a, __b), vtrn2_u32 (__a, __b)};
30399 }
30400
30401 __extension__ extern __inline float16x8x2_t
30402 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30403 vtrnq_f16 (float16x8_t __a, float16x8_t __b)
30404 {
30405 return (float16x8x2_t) {vtrn1q_f16 (__a, __b), vtrn2q_f16 (__a, __b)};
30406 }
30407
30408 __extension__ extern __inline float32x4x2_t
30409 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30410 vtrnq_f32 (float32x4_t __a, float32x4_t __b)
30411 {
30412 return (float32x4x2_t) {vtrn1q_f32 (__a, __b), vtrn2q_f32 (__a, __b)};
30413 }
30414
30415 __extension__ extern __inline poly8x16x2_t
30416 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30417 vtrnq_p8 (poly8x16_t __a, poly8x16_t __b)
30418 {
30419 return (poly8x16x2_t) {vtrn1q_p8 (__a, __b), vtrn2q_p8 (__a, __b)};
30420 }
30421
30422 __extension__ extern __inline poly16x8x2_t
30423 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30424 vtrnq_p16 (poly16x8_t __a, poly16x8_t __b)
30425 {
30426 return (poly16x8x2_t) {vtrn1q_p16 (__a, __b), vtrn2q_p16 (__a, __b)};
30427 }
30428
30429 __extension__ extern __inline int8x16x2_t
30430 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30431 vtrnq_s8 (int8x16_t __a, int8x16_t __b)
30432 {
30433 return (int8x16x2_t) {vtrn1q_s8 (__a, __b), vtrn2q_s8 (__a, __b)};
30434 }
30435
30436 __extension__ extern __inline int16x8x2_t
30437 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30438 vtrnq_s16 (int16x8_t __a, int16x8_t __b)
30439 {
30440 return (int16x8x2_t) {vtrn1q_s16 (__a, __b), vtrn2q_s16 (__a, __b)};
30441 }
30442
30443 __extension__ extern __inline int32x4x2_t
30444 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30445 vtrnq_s32 (int32x4_t __a, int32x4_t __b)
30446 {
30447 return (int32x4x2_t) {vtrn1q_s32 (__a, __b), vtrn2q_s32 (__a, __b)};
30448 }
30449
30450 __extension__ extern __inline uint8x16x2_t
30451 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30452 vtrnq_u8 (uint8x16_t __a, uint8x16_t __b)
30453 {
30454 return (uint8x16x2_t) {vtrn1q_u8 (__a, __b), vtrn2q_u8 (__a, __b)};
30455 }
30456
30457 __extension__ extern __inline uint16x8x2_t
30458 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30459 vtrnq_u16 (uint16x8_t __a, uint16x8_t __b)
30460 {
30461 return (uint16x8x2_t) {vtrn1q_u16 (__a, __b), vtrn2q_u16 (__a, __b)};
30462 }
30463
30464 __extension__ extern __inline uint32x4x2_t
30465 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30466 vtrnq_u32 (uint32x4_t __a, uint32x4_t __b)
30467 {
30468 return (uint32x4x2_t) {vtrn1q_u32 (__a, __b), vtrn2q_u32 (__a, __b)};
30469 }
30470
30471 /* vtst */
30472
30473 __extension__ extern __inline uint8x8_t
30474 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30475 vtst_s8 (int8x8_t __a, int8x8_t __b)
30476 {
30477 return (uint8x8_t) ((__a & __b) != 0);
30478 }
30479
30480 __extension__ extern __inline uint16x4_t
30481 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30482 vtst_s16 (int16x4_t __a, int16x4_t __b)
30483 {
30484 return (uint16x4_t) ((__a & __b) != 0);
30485 }
30486
30487 __extension__ extern __inline uint32x2_t
30488 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30489 vtst_s32 (int32x2_t __a, int32x2_t __b)
30490 {
30491 return (uint32x2_t) ((__a & __b) != 0);
30492 }
30493
30494 __extension__ extern __inline uint64x1_t
30495 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30496 vtst_s64 (int64x1_t __a, int64x1_t __b)
30497 {
30498 return (uint64x1_t) ((__a & __b) != __AARCH64_INT64_C (0));
30499 }
30500
30501 __extension__ extern __inline uint8x8_t
30502 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30503 vtst_u8 (uint8x8_t __a, uint8x8_t __b)
30504 {
30505 return ((__a & __b) != 0);
30506 }
30507
30508 __extension__ extern __inline uint16x4_t
30509 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30510 vtst_u16 (uint16x4_t __a, uint16x4_t __b)
30511 {
30512 return ((__a & __b) != 0);
30513 }
30514
30515 __extension__ extern __inline uint32x2_t
30516 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30517 vtst_u32 (uint32x2_t __a, uint32x2_t __b)
30518 {
30519 return ((__a & __b) != 0);
30520 }
30521
30522 __extension__ extern __inline uint64x1_t
30523 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30524 vtst_u64 (uint64x1_t __a, uint64x1_t __b)
30525 {
30526 return ((__a & __b) != __AARCH64_UINT64_C (0));
30527 }
30528
30529 __extension__ extern __inline uint8x16_t
30530 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30531 vtstq_s8 (int8x16_t __a, int8x16_t __b)
30532 {
30533 return (uint8x16_t) ((__a & __b) != 0);
30534 }
30535
30536 __extension__ extern __inline uint16x8_t
30537 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30538 vtstq_s16 (int16x8_t __a, int16x8_t __b)
30539 {
30540 return (uint16x8_t) ((__a & __b) != 0);
30541 }
30542
30543 __extension__ extern __inline uint32x4_t
30544 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30545 vtstq_s32 (int32x4_t __a, int32x4_t __b)
30546 {
30547 return (uint32x4_t) ((__a & __b) != 0);
30548 }
30549
30550 __extension__ extern __inline uint64x2_t
30551 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30552 vtstq_s64 (int64x2_t __a, int64x2_t __b)
30553 {
30554 return (uint64x2_t) ((__a & __b) != __AARCH64_INT64_C (0));
30555 }
30556
30557 __extension__ extern __inline uint8x16_t
30558 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30559 vtstq_u8 (uint8x16_t __a, uint8x16_t __b)
30560 {
30561 return ((__a & __b) != 0);
30562 }
30563
30564 __extension__ extern __inline uint16x8_t
30565 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30566 vtstq_u16 (uint16x8_t __a, uint16x8_t __b)
30567 {
30568 return ((__a & __b) != 0);
30569 }
30570
30571 __extension__ extern __inline uint32x4_t
30572 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30573 vtstq_u32 (uint32x4_t __a, uint32x4_t __b)
30574 {
30575 return ((__a & __b) != 0);
30576 }
30577
30578 __extension__ extern __inline uint64x2_t
30579 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30580 vtstq_u64 (uint64x2_t __a, uint64x2_t __b)
30581 {
30582 return ((__a & __b) != __AARCH64_UINT64_C (0));
30583 }
30584
30585 __extension__ extern __inline uint64_t
30586 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30587 vtstd_s64 (int64_t __a, int64_t __b)
30588 {
30589 return (__a & __b) ? -1ll : 0ll;
30590 }
30591
30592 __extension__ extern __inline uint64_t
30593 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30594 vtstd_u64 (uint64_t __a, uint64_t __b)
30595 {
30596 return (__a & __b) ? -1ll : 0ll;
30597 }
30598
30599 /* vuqadd */
30600
30601 __extension__ extern __inline int8x8_t
30602 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30603 vuqadd_s8 (int8x8_t __a, uint8x8_t __b)
30604 {
30605 return __builtin_aarch64_suqaddv8qi_ssu (__a, __b);
30606 }
30607
30608 __extension__ extern __inline int16x4_t
30609 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30610 vuqadd_s16 (int16x4_t __a, uint16x4_t __b)
30611 {
30612 return __builtin_aarch64_suqaddv4hi_ssu (__a, __b);
30613 }
30614
30615 __extension__ extern __inline int32x2_t
30616 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30617 vuqadd_s32 (int32x2_t __a, uint32x2_t __b)
30618 {
30619 return __builtin_aarch64_suqaddv2si_ssu (__a, __b);
30620 }
30621
30622 __extension__ extern __inline int64x1_t
30623 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30624 vuqadd_s64 (int64x1_t __a, uint64x1_t __b)
30625 {
30626 return (int64x1_t) {__builtin_aarch64_suqadddi_ssu (__a[0], __b[0])};
30627 }
30628
30629 __extension__ extern __inline int8x16_t
30630 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30631 vuqaddq_s8 (int8x16_t __a, uint8x16_t __b)
30632 {
30633 return __builtin_aarch64_suqaddv16qi_ssu (__a, __b);
30634 }
30635
30636 __extension__ extern __inline int16x8_t
30637 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30638 vuqaddq_s16 (int16x8_t __a, uint16x8_t __b)
30639 {
30640 return __builtin_aarch64_suqaddv8hi_ssu (__a, __b);
30641 }
30642
30643 __extension__ extern __inline int32x4_t
30644 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30645 vuqaddq_s32 (int32x4_t __a, uint32x4_t __b)
30646 {
30647 return __builtin_aarch64_suqaddv4si_ssu (__a, __b);
30648 }
30649
30650 __extension__ extern __inline int64x2_t
30651 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30652 vuqaddq_s64 (int64x2_t __a, uint64x2_t __b)
30653 {
30654 return __builtin_aarch64_suqaddv2di_ssu (__a, __b);
30655 }
30656
30657 __extension__ extern __inline int8_t
30658 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30659 vuqaddb_s8 (int8_t __a, uint8_t __b)
30660 {
30661 return __builtin_aarch64_suqaddqi_ssu (__a, __b);
30662 }
30663
30664 __extension__ extern __inline int16_t
30665 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30666 vuqaddh_s16 (int16_t __a, uint16_t __b)
30667 {
30668 return __builtin_aarch64_suqaddhi_ssu (__a, __b);
30669 }
30670
30671 __extension__ extern __inline int32_t
30672 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30673 vuqadds_s32 (int32_t __a, uint32_t __b)
30674 {
30675 return __builtin_aarch64_suqaddsi_ssu (__a, __b);
30676 }
30677
30678 __extension__ extern __inline int64_t
30679 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30680 vuqaddd_s64 (int64_t __a, uint64_t __b)
30681 {
30682 return __builtin_aarch64_suqadddi_ssu (__a, __b);
30683 }
30684
30685 #define __DEFINTERLEAVE(op, rettype, intype, funcsuffix, Q) \
30686 __extension__ extern __inline rettype \
30687 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) \
30688 v ## op ## Q ## _ ## funcsuffix (intype a, intype b) \
30689 { \
30690 return (rettype) {v ## op ## 1 ## Q ## _ ## funcsuffix (a, b), \
30691 v ## op ## 2 ## Q ## _ ## funcsuffix (a, b)}; \
30692 }
30693
30694 #define __INTERLEAVE_LIST(op) \
30695 __DEFINTERLEAVE (op, float16x4x2_t, float16x4_t, f16,) \
30696 __DEFINTERLEAVE (op, float32x2x2_t, float32x2_t, f32,) \
30697 __DEFINTERLEAVE (op, poly8x8x2_t, poly8x8_t, p8,) \
30698 __DEFINTERLEAVE (op, poly16x4x2_t, poly16x4_t, p16,) \
30699 __DEFINTERLEAVE (op, int8x8x2_t, int8x8_t, s8,) \
30700 __DEFINTERLEAVE (op, int16x4x2_t, int16x4_t, s16,) \
30701 __DEFINTERLEAVE (op, int32x2x2_t, int32x2_t, s32,) \
30702 __DEFINTERLEAVE (op, uint8x8x2_t, uint8x8_t, u8,) \
30703 __DEFINTERLEAVE (op, uint16x4x2_t, uint16x4_t, u16,) \
30704 __DEFINTERLEAVE (op, uint32x2x2_t, uint32x2_t, u32,) \
30705 __DEFINTERLEAVE (op, float16x8x2_t, float16x8_t, f16, q) \
30706 __DEFINTERLEAVE (op, float32x4x2_t, float32x4_t, f32, q) \
30707 __DEFINTERLEAVE (op, poly8x16x2_t, poly8x16_t, p8, q) \
30708 __DEFINTERLEAVE (op, poly16x8x2_t, poly16x8_t, p16, q) \
30709 __DEFINTERLEAVE (op, int8x16x2_t, int8x16_t, s8, q) \
30710 __DEFINTERLEAVE (op, int16x8x2_t, int16x8_t, s16, q) \
30711 __DEFINTERLEAVE (op, int32x4x2_t, int32x4_t, s32, q) \
30712 __DEFINTERLEAVE (op, uint8x16x2_t, uint8x16_t, u8, q) \
30713 __DEFINTERLEAVE (op, uint16x8x2_t, uint16x8_t, u16, q) \
30714 __DEFINTERLEAVE (op, uint32x4x2_t, uint32x4_t, u32, q)
30715
30716 /* vuzp */
30717
30718 __extension__ extern __inline float16x4_t
30719 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30720 vuzp1_f16 (float16x4_t __a, float16x4_t __b)
30721 {
30722 #ifdef __AARCH64EB__
30723 return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 7, 1, 3});
30724 #else
30725 return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 2, 4, 6});
30726 #endif
30727 }
30728
30729 __extension__ extern __inline float32x2_t
30730 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30731 vuzp1_f32 (float32x2_t __a, float32x2_t __b)
30732 {
30733 #ifdef __AARCH64EB__
30734 return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
30735 #else
30736 return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
30737 #endif
30738 }
30739
30740 __extension__ extern __inline poly8x8_t
30741 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30742 vuzp1_p8 (poly8x8_t __a, poly8x8_t __b)
30743 {
30744 #ifdef __AARCH64EB__
30745 return __builtin_shuffle (__a, __b, (uint8x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
30746 #else
30747 return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
30748 #endif
30749 }
30750
30751 __extension__ extern __inline poly16x4_t
30752 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30753 vuzp1_p16 (poly16x4_t __a, poly16x4_t __b)
30754 {
30755 #ifdef __AARCH64EB__
30756 return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 7, 1, 3});
30757 #else
30758 return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 2, 4, 6});
30759 #endif
30760 }
30761
30762 __extension__ extern __inline int8x8_t
30763 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30764 vuzp1_s8 (int8x8_t __a, int8x8_t __b)
30765 {
30766 #ifdef __AARCH64EB__
30767 return __builtin_shuffle (__a, __b, (uint8x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
30768 #else
30769 return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
30770 #endif
30771 }
30772
30773 __extension__ extern __inline int16x4_t
30774 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30775 vuzp1_s16 (int16x4_t __a, int16x4_t __b)
30776 {
30777 #ifdef __AARCH64EB__
30778 return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 7, 1, 3});
30779 #else
30780 return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 2, 4, 6});
30781 #endif
30782 }
30783
30784 __extension__ extern __inline int32x2_t
30785 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30786 vuzp1_s32 (int32x2_t __a, int32x2_t __b)
30787 {
30788 #ifdef __AARCH64EB__
30789 return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
30790 #else
30791 return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
30792 #endif
30793 }
30794
30795 __extension__ extern __inline uint8x8_t
30796 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30797 vuzp1_u8 (uint8x8_t __a, uint8x8_t __b)
30798 {
30799 #ifdef __AARCH64EB__
30800 return __builtin_shuffle (__a, __b, (uint8x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
30801 #else
30802 return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
30803 #endif
30804 }
30805
30806 __extension__ extern __inline uint16x4_t
30807 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30808 vuzp1_u16 (uint16x4_t __a, uint16x4_t __b)
30809 {
30810 #ifdef __AARCH64EB__
30811 return __builtin_shuffle (__a, __b, (uint16x4_t) {5, 7, 1, 3});
30812 #else
30813 return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 2, 4, 6});
30814 #endif
30815 }
30816
30817 __extension__ extern __inline uint32x2_t
30818 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30819 vuzp1_u32 (uint32x2_t __a, uint32x2_t __b)
30820 {
30821 #ifdef __AARCH64EB__
30822 return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
30823 #else
30824 return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
30825 #endif
30826 }
30827
30828 __extension__ extern __inline float16x8_t
30829 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30830 vuzp1q_f16 (float16x8_t __a, float16x8_t __b)
30831 {
30832 #ifdef __AARCH64EB__
30833 return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
30834 #else
30835 return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
30836 #endif
30837 }
30838
30839 __extension__ extern __inline float32x4_t
30840 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30841 vuzp1q_f32 (float32x4_t __a, float32x4_t __b)
30842 {
30843 #ifdef __AARCH64EB__
30844 return __builtin_shuffle (__a, __b, (uint32x4_t) {5, 7, 1, 3});
30845 #else
30846 return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 2, 4, 6});
30847 #endif
30848 }
30849
30850 __extension__ extern __inline float64x2_t
30851 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30852 vuzp1q_f64 (float64x2_t __a, float64x2_t __b)
30853 {
30854 #ifdef __AARCH64EB__
30855 return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
30856 #else
30857 return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
30858 #endif
30859 }
30860
30861 __extension__ extern __inline poly8x16_t
30862 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30863 vuzp1q_p8 (poly8x16_t __a, poly8x16_t __b)
30864 {
30865 #ifdef __AARCH64EB__
30866 return __builtin_shuffle (__a, __b, (uint8x16_t)
30867 {17, 19, 21, 23, 25, 27, 29, 31, 1, 3, 5, 7, 9, 11, 13, 15});
30868 #else
30869 return __builtin_shuffle (__a, __b, (uint8x16_t)
30870 {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30});
30871 #endif
30872 }
30873
30874 __extension__ extern __inline poly16x8_t
30875 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30876 vuzp1q_p16 (poly16x8_t __a, poly16x8_t __b)
30877 {
30878 #ifdef __AARCH64EB__
30879 return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
30880 #else
30881 return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
30882 #endif
30883 }
30884
30885 __extension__ extern __inline int8x16_t
30886 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30887 vuzp1q_s8 (int8x16_t __a, int8x16_t __b)
30888 {
30889 #ifdef __AARCH64EB__
30890 return __builtin_shuffle (__a, __b,
30891 (uint8x16_t) {17, 19, 21, 23, 25, 27, 29, 31, 1, 3, 5, 7, 9, 11, 13, 15});
30892 #else
30893 return __builtin_shuffle (__a, __b,
30894 (uint8x16_t) {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30});
30895 #endif
30896 }
30897
30898 __extension__ extern __inline int16x8_t
30899 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30900 vuzp1q_s16 (int16x8_t __a, int16x8_t __b)
30901 {
30902 #ifdef __AARCH64EB__
30903 return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
30904 #else
30905 return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
30906 #endif
30907 }
30908
30909 __extension__ extern __inline int32x4_t
30910 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30911 vuzp1q_s32 (int32x4_t __a, int32x4_t __b)
30912 {
30913 #ifdef __AARCH64EB__
30914 return __builtin_shuffle (__a, __b, (uint32x4_t) {5, 7, 1, 3});
30915 #else
30916 return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 2, 4, 6});
30917 #endif
30918 }
30919
30920 __extension__ extern __inline int64x2_t
30921 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30922 vuzp1q_s64 (int64x2_t __a, int64x2_t __b)
30923 {
30924 #ifdef __AARCH64EB__
30925 return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
30926 #else
30927 return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
30928 #endif
30929 }
30930
30931 __extension__ extern __inline uint8x16_t
30932 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30933 vuzp1q_u8 (uint8x16_t __a, uint8x16_t __b)
30934 {
30935 #ifdef __AARCH64EB__
30936 return __builtin_shuffle (__a, __b,
30937 (uint8x16_t) {17, 19, 21, 23, 25, 27, 29, 31, 1, 3, 5, 7, 9, 11, 13, 15});
30938 #else
30939 return __builtin_shuffle (__a, __b,
30940 (uint8x16_t) {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30});
30941 #endif
30942 }
30943
30944 __extension__ extern __inline uint16x8_t
30945 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30946 vuzp1q_u16 (uint16x8_t __a, uint16x8_t __b)
30947 {
30948 #ifdef __AARCH64EB__
30949 return __builtin_shuffle (__a, __b, (uint16x8_t) {9, 11, 13, 15, 1, 3, 5, 7});
30950 #else
30951 return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 2, 4, 6, 8, 10, 12, 14});
30952 #endif
30953 }
30954
30955 __extension__ extern __inline uint32x4_t
30956 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30957 vuzp1q_u32 (uint32x4_t __a, uint32x4_t __b)
30958 {
30959 #ifdef __AARCH64EB__
30960 return __builtin_shuffle (__a, __b, (uint32x4_t) {5, 7, 1, 3});
30961 #else
30962 return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 2, 4, 6});
30963 #endif
30964 }
30965
30966 __extension__ extern __inline uint64x2_t
30967 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30968 vuzp1q_u64 (uint64x2_t __a, uint64x2_t __b)
30969 {
30970 #ifdef __AARCH64EB__
30971 return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
30972 #else
30973 return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
30974 #endif
30975 }
30976
30977 __extension__ extern __inline poly64x2_t
30978 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30979 vuzp1q_p64 (poly64x2_t __a, poly64x2_t __b)
30980 {
30981 #ifdef __AARCH64EB__
30982 return __builtin_shuffle (__a, __b, (poly64x2_t) {3, 1});
30983 #else
30984 return __builtin_shuffle (__a, __b, (poly64x2_t) {0, 2});
30985 #endif
30986 }
30987
30988 __extension__ extern __inline float16x4_t
30989 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
30990 vuzp2_f16 (float16x4_t __a, float16x4_t __b)
30991 {
30992 #ifdef __AARCH64EB__
30993 return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 6, 0, 2});
30994 #else
30995 return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 3, 5, 7});
30996 #endif
30997 }
30998
30999 __extension__ extern __inline float32x2_t
31000 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31001 vuzp2_f32 (float32x2_t __a, float32x2_t __b)
31002 {
31003 #ifdef __AARCH64EB__
31004 return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
31005 #else
31006 return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
31007 #endif
31008 }
31009
31010 __extension__ extern __inline poly8x8_t
31011 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31012 vuzp2_p8 (poly8x8_t __a, poly8x8_t __b)
31013 {
31014 #ifdef __AARCH64EB__
31015 return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
31016 #else
31017 return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
31018 #endif
31019 }
31020
31021 __extension__ extern __inline poly16x4_t
31022 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31023 vuzp2_p16 (poly16x4_t __a, poly16x4_t __b)
31024 {
31025 #ifdef __AARCH64EB__
31026 return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 6, 0, 2});
31027 #else
31028 return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 3, 5, 7});
31029 #endif
31030 }
31031
31032 __extension__ extern __inline int8x8_t
31033 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31034 vuzp2_s8 (int8x8_t __a, int8x8_t __b)
31035 {
31036 #ifdef __AARCH64EB__
31037 return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
31038 #else
31039 return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
31040 #endif
31041 }
31042
31043 __extension__ extern __inline int16x4_t
31044 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31045 vuzp2_s16 (int16x4_t __a, int16x4_t __b)
31046 {
31047 #ifdef __AARCH64EB__
31048 return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 6, 0, 2});
31049 #else
31050 return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 3, 5, 7});
31051 #endif
31052 }
31053
31054 __extension__ extern __inline int32x2_t
31055 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31056 vuzp2_s32 (int32x2_t __a, int32x2_t __b)
31057 {
31058 #ifdef __AARCH64EB__
31059 return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
31060 #else
31061 return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
31062 #endif
31063 }
31064
31065 __extension__ extern __inline uint8x8_t
31066 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31067 vuzp2_u8 (uint8x8_t __a, uint8x8_t __b)
31068 {
31069 #ifdef __AARCH64EB__
31070 return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
31071 #else
31072 return __builtin_shuffle (__a, __b, (uint8x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
31073 #endif
31074 }
31075
31076 __extension__ extern __inline uint16x4_t
31077 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31078 vuzp2_u16 (uint16x4_t __a, uint16x4_t __b)
31079 {
31080 #ifdef __AARCH64EB__
31081 return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 6, 0, 2});
31082 #else
31083 return __builtin_shuffle (__a, __b, (uint16x4_t) {1, 3, 5, 7});
31084 #endif
31085 }
31086
31087 __extension__ extern __inline uint32x2_t
31088 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31089 vuzp2_u32 (uint32x2_t __a, uint32x2_t __b)
31090 {
31091 #ifdef __AARCH64EB__
31092 return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
31093 #else
31094 return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
31095 #endif
31096 }
31097
31098 __extension__ extern __inline float16x8_t
31099 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31100 vuzp2q_f16 (float16x8_t __a, float16x8_t __b)
31101 {
31102 #ifdef __AARCH64EB__
31103 return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
31104 #else
31105 return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
31106 #endif
31107 }
31108
31109 __extension__ extern __inline float32x4_t
31110 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31111 vuzp2q_f32 (float32x4_t __a, float32x4_t __b)
31112 {
31113 #ifdef __AARCH64EB__
31114 return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 6, 0, 2});
31115 #else
31116 return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 3, 5, 7});
31117 #endif
31118 }
31119
31120 __extension__ extern __inline float64x2_t
31121 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31122 vuzp2q_f64 (float64x2_t __a, float64x2_t __b)
31123 {
31124 #ifdef __AARCH64EB__
31125 return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
31126 #else
31127 return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
31128 #endif
31129 }
31130
31131 __extension__ extern __inline poly8x16_t
31132 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31133 vuzp2q_p8 (poly8x16_t __a, poly8x16_t __b)
31134 {
31135 #ifdef __AARCH64EB__
31136 return __builtin_shuffle (__a, __b,
31137 (uint8x16_t) {16, 18, 20, 22, 24, 26, 28, 30, 0, 2, 4, 6, 8, 10, 12, 14});
31138 #else
31139 return __builtin_shuffle (__a, __b,
31140 (uint8x16_t) {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31});
31141 #endif
31142 }
31143
31144 __extension__ extern __inline poly16x8_t
31145 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31146 vuzp2q_p16 (poly16x8_t __a, poly16x8_t __b)
31147 {
31148 #ifdef __AARCH64EB__
31149 return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
31150 #else
31151 return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
31152 #endif
31153 }
31154
31155 __extension__ extern __inline int8x16_t
31156 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31157 vuzp2q_s8 (int8x16_t __a, int8x16_t __b)
31158 {
31159 #ifdef __AARCH64EB__
31160 return __builtin_shuffle (__a, __b,
31161 (uint8x16_t) {16, 18, 20, 22, 24, 26, 28, 30, 0, 2, 4, 6, 8, 10, 12, 14});
31162 #else
31163 return __builtin_shuffle (__a, __b,
31164 (uint8x16_t) {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31});
31165 #endif
31166 }
31167
31168 __extension__ extern __inline int16x8_t
31169 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31170 vuzp2q_s16 (int16x8_t __a, int16x8_t __b)
31171 {
31172 #ifdef __AARCH64EB__
31173 return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
31174 #else
31175 return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
31176 #endif
31177 }
31178
31179 __extension__ extern __inline int32x4_t
31180 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31181 vuzp2q_s32 (int32x4_t __a, int32x4_t __b)
31182 {
31183 #ifdef __AARCH64EB__
31184 return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 6, 0, 2});
31185 #else
31186 return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 3, 5, 7});
31187 #endif
31188 }
31189
31190 __extension__ extern __inline int64x2_t
31191 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31192 vuzp2q_s64 (int64x2_t __a, int64x2_t __b)
31193 {
31194 #ifdef __AARCH64EB__
31195 return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
31196 #else
31197 return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
31198 #endif
31199 }
31200
31201 __extension__ extern __inline uint8x16_t
31202 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31203 vuzp2q_u8 (uint8x16_t __a, uint8x16_t __b)
31204 {
31205 #ifdef __AARCH64EB__
31206 return __builtin_shuffle (__a, __b, (uint8x16_t)
31207 {16, 18, 20, 22, 24, 26, 28, 30, 0, 2, 4, 6, 8, 10, 12, 14});
31208 #else
31209 return __builtin_shuffle (__a, __b, (uint8x16_t)
31210 {1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31});
31211 #endif
31212 }
31213
31214 __extension__ extern __inline uint16x8_t
31215 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31216 vuzp2q_u16 (uint16x8_t __a, uint16x8_t __b)
31217 {
31218 #ifdef __AARCH64EB__
31219 return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 10, 12, 14, 0, 2, 4, 6});
31220 #else
31221 return __builtin_shuffle (__a, __b, (uint16x8_t) {1, 3, 5, 7, 9, 11, 13, 15});
31222 #endif
31223 }
31224
31225 __extension__ extern __inline uint32x4_t
31226 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31227 vuzp2q_u32 (uint32x4_t __a, uint32x4_t __b)
31228 {
31229 #ifdef __AARCH64EB__
31230 return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 6, 0, 2});
31231 #else
31232 return __builtin_shuffle (__a, __b, (uint32x4_t) {1, 3, 5, 7});
31233 #endif
31234 }
31235
31236 __extension__ extern __inline uint64x2_t
31237 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31238 vuzp2q_u64 (uint64x2_t __a, uint64x2_t __b)
31239 {
31240 #ifdef __AARCH64EB__
31241 return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
31242 #else
31243 return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
31244 #endif
31245 }
31246
31247 __extension__ extern __inline poly64x2_t
31248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31249 vuzp2q_p64 (poly64x2_t __a, poly64x2_t __b)
31250 {
31251 #ifdef __AARCH64EB__
31252 return __builtin_shuffle (__a, __b, (poly64x2_t) {2, 0});
31253 #else
31254 return __builtin_shuffle (__a, __b, (poly64x2_t) {1, 3});
31255 #endif
31256 }
31257
31258 __INTERLEAVE_LIST (uzp)
31259
31260 /* vzip */
31261
31262 __extension__ extern __inline float16x4_t
31263 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31264 vzip1_f16 (float16x4_t __a, float16x4_t __b)
31265 {
31266 #ifdef __AARCH64EB__
31267 return __builtin_shuffle (__a, __b, (uint16x4_t) {6, 2, 7, 3});
31268 #else
31269 return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 1, 5});
31270 #endif
31271 }
31272
31273 __extension__ extern __inline float32x2_t
31274 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31275 vzip1_f32 (float32x2_t __a, float32x2_t __b)
31276 {
31277 #ifdef __AARCH64EB__
31278 return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
31279 #else
31280 return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
31281 #endif
31282 }
31283
31284 __extension__ extern __inline poly8x8_t
31285 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31286 vzip1_p8 (poly8x8_t __a, poly8x8_t __b)
31287 {
31288 #ifdef __AARCH64EB__
31289 return __builtin_shuffle (__a, __b, (uint8x8_t) {12, 4, 13, 5, 14, 6, 15, 7});
31290 #else
31291 return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 8, 1, 9, 2, 10, 3, 11});
31292 #endif
31293 }
31294
31295 __extension__ extern __inline poly16x4_t
31296 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31297 vzip1_p16 (poly16x4_t __a, poly16x4_t __b)
31298 {
31299 #ifdef __AARCH64EB__
31300 return __builtin_shuffle (__a, __b, (uint16x4_t) {6, 2, 7, 3});
31301 #else
31302 return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 1, 5});
31303 #endif
31304 }
31305
31306 __extension__ extern __inline int8x8_t
31307 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31308 vzip1_s8 (int8x8_t __a, int8x8_t __b)
31309 {
31310 #ifdef __AARCH64EB__
31311 return __builtin_shuffle (__a, __b, (uint8x8_t) {12, 4, 13, 5, 14, 6, 15, 7});
31312 #else
31313 return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 8, 1, 9, 2, 10, 3, 11});
31314 #endif
31315 }
31316
31317 __extension__ extern __inline int16x4_t
31318 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31319 vzip1_s16 (int16x4_t __a, int16x4_t __b)
31320 {
31321 #ifdef __AARCH64EB__
31322 return __builtin_shuffle (__a, __b, (uint16x4_t) {6, 2, 7, 3});
31323 #else
31324 return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 1, 5});
31325 #endif
31326 }
31327
31328 __extension__ extern __inline int32x2_t
31329 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31330 vzip1_s32 (int32x2_t __a, int32x2_t __b)
31331 {
31332 #ifdef __AARCH64EB__
31333 return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
31334 #else
31335 return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
31336 #endif
31337 }
31338
31339 __extension__ extern __inline uint8x8_t
31340 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31341 vzip1_u8 (uint8x8_t __a, uint8x8_t __b)
31342 {
31343 #ifdef __AARCH64EB__
31344 return __builtin_shuffle (__a, __b, (uint8x8_t) {12, 4, 13, 5, 14, 6, 15, 7});
31345 #else
31346 return __builtin_shuffle (__a, __b, (uint8x8_t) {0, 8, 1, 9, 2, 10, 3, 11});
31347 #endif
31348 }
31349
31350 __extension__ extern __inline uint16x4_t
31351 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31352 vzip1_u16 (uint16x4_t __a, uint16x4_t __b)
31353 {
31354 #ifdef __AARCH64EB__
31355 return __builtin_shuffle (__a, __b, (uint16x4_t) {6, 2, 7, 3});
31356 #else
31357 return __builtin_shuffle (__a, __b, (uint16x4_t) {0, 4, 1, 5});
31358 #endif
31359 }
31360
31361 __extension__ extern __inline uint32x2_t
31362 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31363 vzip1_u32 (uint32x2_t __a, uint32x2_t __b)
31364 {
31365 #ifdef __AARCH64EB__
31366 return __builtin_shuffle (__a, __b, (uint32x2_t) {3, 1});
31367 #else
31368 return __builtin_shuffle (__a, __b, (uint32x2_t) {0, 2});
31369 #endif
31370 }
31371
31372 __extension__ extern __inline float16x8_t
31373 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31374 vzip1q_f16 (float16x8_t __a, float16x8_t __b)
31375 {
31376 #ifdef __AARCH64EB__
31377 return __builtin_shuffle (__a, __b,
31378 (uint16x8_t) {12, 4, 13, 5, 14, 6, 15, 7});
31379 #else
31380 return __builtin_shuffle (__a, __b,
31381 (uint16x8_t) {0, 8, 1, 9, 2, 10, 3, 11});
31382 #endif
31383 }
31384
31385 __extension__ extern __inline float32x4_t
31386 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31387 vzip1q_f32 (float32x4_t __a, float32x4_t __b)
31388 {
31389 #ifdef __AARCH64EB__
31390 return __builtin_shuffle (__a, __b, (uint32x4_t) {6, 2, 7, 3});
31391 #else
31392 return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 4, 1, 5});
31393 #endif
31394 }
31395
31396 __extension__ extern __inline float64x2_t
31397 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31398 vzip1q_f64 (float64x2_t __a, float64x2_t __b)
31399 {
31400 #ifdef __AARCH64EB__
31401 return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
31402 #else
31403 return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
31404 #endif
31405 }
31406
31407 __extension__ extern __inline poly8x16_t
31408 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31409 vzip1q_p8 (poly8x16_t __a, poly8x16_t __b)
31410 {
31411 #ifdef __AARCH64EB__
31412 return __builtin_shuffle (__a, __b, (uint8x16_t)
31413 {24, 8, 25, 9, 26, 10, 27, 11, 28, 12, 29, 13, 30, 14, 31, 15});
31414 #else
31415 return __builtin_shuffle (__a, __b, (uint8x16_t)
31416 {0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23});
31417 #endif
31418 }
31419
31420 __extension__ extern __inline poly16x8_t
31421 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31422 vzip1q_p16 (poly16x8_t __a, poly16x8_t __b)
31423 {
31424 #ifdef __AARCH64EB__
31425 return __builtin_shuffle (__a, __b, (uint16x8_t)
31426 {12, 4, 13, 5, 14, 6, 15, 7});
31427 #else
31428 return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 8, 1, 9, 2, 10, 3, 11});
31429 #endif
31430 }
31431
31432 __extension__ extern __inline int8x16_t
31433 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31434 vzip1q_s8 (int8x16_t __a, int8x16_t __b)
31435 {
31436 #ifdef __AARCH64EB__
31437 return __builtin_shuffle (__a, __b, (uint8x16_t)
31438 {24, 8, 25, 9, 26, 10, 27, 11, 28, 12, 29, 13, 30, 14, 31, 15});
31439 #else
31440 return __builtin_shuffle (__a, __b, (uint8x16_t)
31441 {0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23});
31442 #endif
31443 }
31444
31445 __extension__ extern __inline int16x8_t
31446 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31447 vzip1q_s16 (int16x8_t __a, int16x8_t __b)
31448 {
31449 #ifdef __AARCH64EB__
31450 return __builtin_shuffle (__a, __b, (uint16x8_t)
31451 {12, 4, 13, 5, 14, 6, 15, 7});
31452 #else
31453 return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 8, 1, 9, 2, 10, 3, 11});
31454 #endif
31455 }
31456
31457 __extension__ extern __inline int32x4_t
31458 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31459 vzip1q_s32 (int32x4_t __a, int32x4_t __b)
31460 {
31461 #ifdef __AARCH64EB__
31462 return __builtin_shuffle (__a, __b, (uint32x4_t) {6, 2, 7, 3});
31463 #else
31464 return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 4, 1, 5});
31465 #endif
31466 }
31467
31468 __extension__ extern __inline int64x2_t
31469 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31470 vzip1q_s64 (int64x2_t __a, int64x2_t __b)
31471 {
31472 #ifdef __AARCH64EB__
31473 return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
31474 #else
31475 return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
31476 #endif
31477 }
31478
31479 __extension__ extern __inline uint8x16_t
31480 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31481 vzip1q_u8 (uint8x16_t __a, uint8x16_t __b)
31482 {
31483 #ifdef __AARCH64EB__
31484 return __builtin_shuffle (__a, __b, (uint8x16_t)
31485 {24, 8, 25, 9, 26, 10, 27, 11, 28, 12, 29, 13, 30, 14, 31, 15});
31486 #else
31487 return __builtin_shuffle (__a, __b, (uint8x16_t)
31488 {0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23});
31489 #endif
31490 }
31491
31492 __extension__ extern __inline uint16x8_t
31493 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31494 vzip1q_u16 (uint16x8_t __a, uint16x8_t __b)
31495 {
31496 #ifdef __AARCH64EB__
31497 return __builtin_shuffle (__a, __b, (uint16x8_t)
31498 {12, 4, 13, 5, 14, 6, 15, 7});
31499 #else
31500 return __builtin_shuffle (__a, __b, (uint16x8_t) {0, 8, 1, 9, 2, 10, 3, 11});
31501 #endif
31502 }
31503
31504 __extension__ extern __inline uint32x4_t
31505 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31506 vzip1q_u32 (uint32x4_t __a, uint32x4_t __b)
31507 {
31508 #ifdef __AARCH64EB__
31509 return __builtin_shuffle (__a, __b, (uint32x4_t) {6, 2, 7, 3});
31510 #else
31511 return __builtin_shuffle (__a, __b, (uint32x4_t) {0, 4, 1, 5});
31512 #endif
31513 }
31514
31515 __extension__ extern __inline uint64x2_t
31516 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31517 vzip1q_u64 (uint64x2_t __a, uint64x2_t __b)
31518 {
31519 #ifdef __AARCH64EB__
31520 return __builtin_shuffle (__a, __b, (uint64x2_t) {3, 1});
31521 #else
31522 return __builtin_shuffle (__a, __b, (uint64x2_t) {0, 2});
31523 #endif
31524 }
31525
31526 __extension__ extern __inline poly64x2_t
31527 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31528 vzip1q_p64 (poly64x2_t __a, poly64x2_t __b)
31529 {
31530 #ifdef __AARCH64EB__
31531 return __builtin_shuffle (__a, __b, (poly64x2_t) {3, 1});
31532 #else
31533 return __builtin_shuffle (__a, __b, (poly64x2_t) {0, 2});
31534 #endif
31535 }
31536
31537 __extension__ extern __inline float16x4_t
31538 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31539 vzip2_f16 (float16x4_t __a, float16x4_t __b)
31540 {
31541 #ifdef __AARCH64EB__
31542 return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 0, 5, 1});
31543 #else
31544 return __builtin_shuffle (__a, __b, (uint16x4_t) {2, 6, 3, 7});
31545 #endif
31546 }
31547
31548 __extension__ extern __inline float32x2_t
31549 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31550 vzip2_f32 (float32x2_t __a, float32x2_t __b)
31551 {
31552 #ifdef __AARCH64EB__
31553 return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
31554 #else
31555 return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
31556 #endif
31557 }
31558
31559 __extension__ extern __inline poly8x8_t
31560 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31561 vzip2_p8 (poly8x8_t __a, poly8x8_t __b)
31562 {
31563 #ifdef __AARCH64EB__
31564 return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 0, 9, 1, 10, 2, 11, 3});
31565 #else
31566 return __builtin_shuffle (__a, __b, (uint8x8_t) {4, 12, 5, 13, 6, 14, 7, 15});
31567 #endif
31568 }
31569
31570 __extension__ extern __inline poly16x4_t
31571 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31572 vzip2_p16 (poly16x4_t __a, poly16x4_t __b)
31573 {
31574 #ifdef __AARCH64EB__
31575 return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 0, 5, 1});
31576 #else
31577 return __builtin_shuffle (__a, __b, (uint16x4_t) {2, 6, 3, 7});
31578 #endif
31579 }
31580
31581 __extension__ extern __inline int8x8_t
31582 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31583 vzip2_s8 (int8x8_t __a, int8x8_t __b)
31584 {
31585 #ifdef __AARCH64EB__
31586 return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 0, 9, 1, 10, 2, 11, 3});
31587 #else
31588 return __builtin_shuffle (__a, __b, (uint8x8_t) {4, 12, 5, 13, 6, 14, 7, 15});
31589 #endif
31590 }
31591
31592 __extension__ extern __inline int16x4_t
31593 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31594 vzip2_s16 (int16x4_t __a, int16x4_t __b)
31595 {
31596 #ifdef __AARCH64EB__
31597 return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 0, 5, 1});
31598 #else
31599 return __builtin_shuffle (__a, __b, (uint16x4_t) {2, 6, 3, 7});
31600 #endif
31601 }
31602
31603 __extension__ extern __inline int32x2_t
31604 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31605 vzip2_s32 (int32x2_t __a, int32x2_t __b)
31606 {
31607 #ifdef __AARCH64EB__
31608 return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
31609 #else
31610 return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
31611 #endif
31612 }
31613
31614 __extension__ extern __inline uint8x8_t
31615 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31616 vzip2_u8 (uint8x8_t __a, uint8x8_t __b)
31617 {
31618 #ifdef __AARCH64EB__
31619 return __builtin_shuffle (__a, __b, (uint8x8_t) {8, 0, 9, 1, 10, 2, 11, 3});
31620 #else
31621 return __builtin_shuffle (__a, __b, (uint8x8_t) {4, 12, 5, 13, 6, 14, 7, 15});
31622 #endif
31623 }
31624
31625 __extension__ extern __inline uint16x4_t
31626 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31627 vzip2_u16 (uint16x4_t __a, uint16x4_t __b)
31628 {
31629 #ifdef __AARCH64EB__
31630 return __builtin_shuffle (__a, __b, (uint16x4_t) {4, 0, 5, 1});
31631 #else
31632 return __builtin_shuffle (__a, __b, (uint16x4_t) {2, 6, 3, 7});
31633 #endif
31634 }
31635
31636 __extension__ extern __inline uint32x2_t
31637 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31638 vzip2_u32 (uint32x2_t __a, uint32x2_t __b)
31639 {
31640 #ifdef __AARCH64EB__
31641 return __builtin_shuffle (__a, __b, (uint32x2_t) {2, 0});
31642 #else
31643 return __builtin_shuffle (__a, __b, (uint32x2_t) {1, 3});
31644 #endif
31645 }
31646
31647 __extension__ extern __inline float16x8_t
31648 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31649 vzip2q_f16 (float16x8_t __a, float16x8_t __b)
31650 {
31651 #ifdef __AARCH64EB__
31652 return __builtin_shuffle (__a, __b,
31653 (uint16x8_t) {8, 0, 9, 1, 10, 2, 11, 3});
31654 #else
31655 return __builtin_shuffle (__a, __b,
31656 (uint16x8_t) {4, 12, 5, 13, 6, 14, 7, 15});
31657 #endif
31658 }
31659
31660 __extension__ extern __inline float32x4_t
31661 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31662 vzip2q_f32 (float32x4_t __a, float32x4_t __b)
31663 {
31664 #ifdef __AARCH64EB__
31665 return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 0, 5, 1});
31666 #else
31667 return __builtin_shuffle (__a, __b, (uint32x4_t) {2, 6, 3, 7});
31668 #endif
31669 }
31670
31671 __extension__ extern __inline float64x2_t
31672 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31673 vzip2q_f64 (float64x2_t __a, float64x2_t __b)
31674 {
31675 #ifdef __AARCH64EB__
31676 return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
31677 #else
31678 return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
31679 #endif
31680 }
31681
31682 __extension__ extern __inline poly8x16_t
31683 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31684 vzip2q_p8 (poly8x16_t __a, poly8x16_t __b)
31685 {
31686 #ifdef __AARCH64EB__
31687 return __builtin_shuffle (__a, __b, (uint8x16_t)
31688 {16, 0, 17, 1, 18, 2, 19, 3, 20, 4, 21, 5, 22, 6, 23, 7});
31689 #else
31690 return __builtin_shuffle (__a, __b, (uint8x16_t)
31691 {8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31});
31692 #endif
31693 }
31694
31695 __extension__ extern __inline poly16x8_t
31696 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31697 vzip2q_p16 (poly16x8_t __a, poly16x8_t __b)
31698 {
31699 #ifdef __AARCH64EB__
31700 return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 0, 9, 1, 10, 2, 11, 3});
31701 #else
31702 return __builtin_shuffle (__a, __b, (uint16x8_t)
31703 {4, 12, 5, 13, 6, 14, 7, 15});
31704 #endif
31705 }
31706
31707 __extension__ extern __inline int8x16_t
31708 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31709 vzip2q_s8 (int8x16_t __a, int8x16_t __b)
31710 {
31711 #ifdef __AARCH64EB__
31712 return __builtin_shuffle (__a, __b, (uint8x16_t)
31713 {16, 0, 17, 1, 18, 2, 19, 3, 20, 4, 21, 5, 22, 6, 23, 7});
31714 #else
31715 return __builtin_shuffle (__a, __b, (uint8x16_t)
31716 {8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31});
31717 #endif
31718 }
31719
31720 __extension__ extern __inline int16x8_t
31721 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31722 vzip2q_s16 (int16x8_t __a, int16x8_t __b)
31723 {
31724 #ifdef __AARCH64EB__
31725 return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 0, 9, 1, 10, 2, 11, 3});
31726 #else
31727 return __builtin_shuffle (__a, __b, (uint16x8_t)
31728 {4, 12, 5, 13, 6, 14, 7, 15});
31729 #endif
31730 }
31731
31732 __extension__ extern __inline int32x4_t
31733 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31734 vzip2q_s32 (int32x4_t __a, int32x4_t __b)
31735 {
31736 #ifdef __AARCH64EB__
31737 return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 0, 5, 1});
31738 #else
31739 return __builtin_shuffle (__a, __b, (uint32x4_t) {2, 6, 3, 7});
31740 #endif
31741 }
31742
31743 __extension__ extern __inline int64x2_t
31744 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31745 vzip2q_s64 (int64x2_t __a, int64x2_t __b)
31746 {
31747 #ifdef __AARCH64EB__
31748 return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
31749 #else
31750 return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
31751 #endif
31752 }
31753
31754 __extension__ extern __inline uint8x16_t
31755 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31756 vzip2q_u8 (uint8x16_t __a, uint8x16_t __b)
31757 {
31758 #ifdef __AARCH64EB__
31759 return __builtin_shuffle (__a, __b, (uint8x16_t)
31760 {16, 0, 17, 1, 18, 2, 19, 3, 20, 4, 21, 5, 22, 6, 23, 7});
31761 #else
31762 return __builtin_shuffle (__a, __b, (uint8x16_t)
31763 {8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31});
31764 #endif
31765 }
31766
31767 __extension__ extern __inline uint16x8_t
31768 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31769 vzip2q_u16 (uint16x8_t __a, uint16x8_t __b)
31770 {
31771 #ifdef __AARCH64EB__
31772 return __builtin_shuffle (__a, __b, (uint16x8_t) {8, 0, 9, 1, 10, 2, 11, 3});
31773 #else
31774 return __builtin_shuffle (__a, __b, (uint16x8_t)
31775 {4, 12, 5, 13, 6, 14, 7, 15});
31776 #endif
31777 }
31778
31779 __extension__ extern __inline uint32x4_t
31780 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31781 vzip2q_u32 (uint32x4_t __a, uint32x4_t __b)
31782 {
31783 #ifdef __AARCH64EB__
31784 return __builtin_shuffle (__a, __b, (uint32x4_t) {4, 0, 5, 1});
31785 #else
31786 return __builtin_shuffle (__a, __b, (uint32x4_t) {2, 6, 3, 7});
31787 #endif
31788 }
31789
31790 __extension__ extern __inline uint64x2_t
31791 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31792 vzip2q_u64 (uint64x2_t __a, uint64x2_t __b)
31793 {
31794 #ifdef __AARCH64EB__
31795 return __builtin_shuffle (__a, __b, (uint64x2_t) {2, 0});
31796 #else
31797 return __builtin_shuffle (__a, __b, (uint64x2_t) {1, 3});
31798 #endif
31799 }
31800
31801 __extension__ extern __inline poly64x2_t
31802 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31803 vzip2q_p64 (poly64x2_t __a, poly64x2_t __b)
31804 {
31805 #ifdef __AARCH64EB__
31806 return __builtin_shuffle (__a, __b, (poly64x2_t) {2, 0});
31807 #else
31808 return __builtin_shuffle (__a, __b, (poly64x2_t) {1, 3});
31809 #endif
31810 }
31811
31812 __INTERLEAVE_LIST (zip)
31813
31814 #undef __INTERLEAVE_LIST
31815 #undef __DEFINTERLEAVE
31816
31817 /* End of optimal implementations in approved order. */
31818
31819 #pragma GCC pop_options
31820
31821 /* ARMv8.2-A FP16 intrinsics. */
31822
31823 #include "arm_fp16.h"
31824
31825 #pragma GCC push_options
31826 #pragma GCC target ("arch=armv8.2-a+fp16")
31827
31828 /* ARMv8.2-A FP16 one operand vector intrinsics. */
31829
31830 __extension__ extern __inline float16x4_t
31831 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31832 vabs_f16 (float16x4_t __a)
31833 {
31834 return __builtin_aarch64_absv4hf (__a);
31835 }
31836
31837 __extension__ extern __inline float16x8_t
31838 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31839 vabsq_f16 (float16x8_t __a)
31840 {
31841 return __builtin_aarch64_absv8hf (__a);
31842 }
31843
31844 __extension__ extern __inline uint16x4_t
31845 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31846 vceqz_f16 (float16x4_t __a)
31847 {
31848 return __builtin_aarch64_cmeqv4hf_uss (__a, vdup_n_f16 (0.0f));
31849 }
31850
31851 __extension__ extern __inline uint16x8_t
31852 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31853 vceqzq_f16 (float16x8_t __a)
31854 {
31855 return __builtin_aarch64_cmeqv8hf_uss (__a, vdupq_n_f16 (0.0f));
31856 }
31857
31858 __extension__ extern __inline uint16x4_t
31859 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31860 vcgez_f16 (float16x4_t __a)
31861 {
31862 return __builtin_aarch64_cmgev4hf_uss (__a, vdup_n_f16 (0.0f));
31863 }
31864
31865 __extension__ extern __inline uint16x8_t
31866 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31867 vcgezq_f16 (float16x8_t __a)
31868 {
31869 return __builtin_aarch64_cmgev8hf_uss (__a, vdupq_n_f16 (0.0f));
31870 }
31871
31872 __extension__ extern __inline uint16x4_t
31873 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31874 vcgtz_f16 (float16x4_t __a)
31875 {
31876 return __builtin_aarch64_cmgtv4hf_uss (__a, vdup_n_f16 (0.0f));
31877 }
31878
31879 __extension__ extern __inline uint16x8_t
31880 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31881 vcgtzq_f16 (float16x8_t __a)
31882 {
31883 return __builtin_aarch64_cmgtv8hf_uss (__a, vdupq_n_f16 (0.0f));
31884 }
31885
31886 __extension__ extern __inline uint16x4_t
31887 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31888 vclez_f16 (float16x4_t __a)
31889 {
31890 return __builtin_aarch64_cmlev4hf_uss (__a, vdup_n_f16 (0.0f));
31891 }
31892
31893 __extension__ extern __inline uint16x8_t
31894 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31895 vclezq_f16 (float16x8_t __a)
31896 {
31897 return __builtin_aarch64_cmlev8hf_uss (__a, vdupq_n_f16 (0.0f));
31898 }
31899
31900 __extension__ extern __inline uint16x4_t
31901 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31902 vcltz_f16 (float16x4_t __a)
31903 {
31904 return __builtin_aarch64_cmltv4hf_uss (__a, vdup_n_f16 (0.0f));
31905 }
31906
31907 __extension__ extern __inline uint16x8_t
31908 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31909 vcltzq_f16 (float16x8_t __a)
31910 {
31911 return __builtin_aarch64_cmltv8hf_uss (__a, vdupq_n_f16 (0.0f));
31912 }
31913
31914 __extension__ extern __inline float16x4_t
31915 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31916 vcvt_f16_s16 (int16x4_t __a)
31917 {
31918 return __builtin_aarch64_floatv4hiv4hf (__a);
31919 }
31920
31921 __extension__ extern __inline float16x8_t
31922 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31923 vcvtq_f16_s16 (int16x8_t __a)
31924 {
31925 return __builtin_aarch64_floatv8hiv8hf (__a);
31926 }
31927
31928 __extension__ extern __inline float16x4_t
31929 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31930 vcvt_f16_u16 (uint16x4_t __a)
31931 {
31932 return __builtin_aarch64_floatunsv4hiv4hf ((int16x4_t) __a);
31933 }
31934
31935 __extension__ extern __inline float16x8_t
31936 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31937 vcvtq_f16_u16 (uint16x8_t __a)
31938 {
31939 return __builtin_aarch64_floatunsv8hiv8hf ((int16x8_t) __a);
31940 }
31941
31942 __extension__ extern __inline int16x4_t
31943 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31944 vcvt_s16_f16 (float16x4_t __a)
31945 {
31946 return __builtin_aarch64_lbtruncv4hfv4hi (__a);
31947 }
31948
31949 __extension__ extern __inline int16x8_t
31950 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31951 vcvtq_s16_f16 (float16x8_t __a)
31952 {
31953 return __builtin_aarch64_lbtruncv8hfv8hi (__a);
31954 }
31955
31956 __extension__ extern __inline uint16x4_t
31957 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31958 vcvt_u16_f16 (float16x4_t __a)
31959 {
31960 return __builtin_aarch64_lbtruncuv4hfv4hi_us (__a);
31961 }
31962
31963 __extension__ extern __inline uint16x8_t
31964 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31965 vcvtq_u16_f16 (float16x8_t __a)
31966 {
31967 return __builtin_aarch64_lbtruncuv8hfv8hi_us (__a);
31968 }
31969
31970 __extension__ extern __inline int16x4_t
31971 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31972 vcvta_s16_f16 (float16x4_t __a)
31973 {
31974 return __builtin_aarch64_lroundv4hfv4hi (__a);
31975 }
31976
31977 __extension__ extern __inline int16x8_t
31978 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31979 vcvtaq_s16_f16 (float16x8_t __a)
31980 {
31981 return __builtin_aarch64_lroundv8hfv8hi (__a);
31982 }
31983
31984 __extension__ extern __inline uint16x4_t
31985 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31986 vcvta_u16_f16 (float16x4_t __a)
31987 {
31988 return __builtin_aarch64_lrounduv4hfv4hi_us (__a);
31989 }
31990
31991 __extension__ extern __inline uint16x8_t
31992 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
31993 vcvtaq_u16_f16 (float16x8_t __a)
31994 {
31995 return __builtin_aarch64_lrounduv8hfv8hi_us (__a);
31996 }
31997
31998 __extension__ extern __inline int16x4_t
31999 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32000 vcvtm_s16_f16 (float16x4_t __a)
32001 {
32002 return __builtin_aarch64_lfloorv4hfv4hi (__a);
32003 }
32004
32005 __extension__ extern __inline int16x8_t
32006 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32007 vcvtmq_s16_f16 (float16x8_t __a)
32008 {
32009 return __builtin_aarch64_lfloorv8hfv8hi (__a);
32010 }
32011
32012 __extension__ extern __inline uint16x4_t
32013 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32014 vcvtm_u16_f16 (float16x4_t __a)
32015 {
32016 return __builtin_aarch64_lflooruv4hfv4hi_us (__a);
32017 }
32018
32019 __extension__ extern __inline uint16x8_t
32020 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32021 vcvtmq_u16_f16 (float16x8_t __a)
32022 {
32023 return __builtin_aarch64_lflooruv8hfv8hi_us (__a);
32024 }
32025
32026 __extension__ extern __inline int16x4_t
32027 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32028 vcvtn_s16_f16 (float16x4_t __a)
32029 {
32030 return __builtin_aarch64_lfrintnv4hfv4hi (__a);
32031 }
32032
32033 __extension__ extern __inline int16x8_t
32034 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32035 vcvtnq_s16_f16 (float16x8_t __a)
32036 {
32037 return __builtin_aarch64_lfrintnv8hfv8hi (__a);
32038 }
32039
32040 __extension__ extern __inline uint16x4_t
32041 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32042 vcvtn_u16_f16 (float16x4_t __a)
32043 {
32044 return __builtin_aarch64_lfrintnuv4hfv4hi_us (__a);
32045 }
32046
32047 __extension__ extern __inline uint16x8_t
32048 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32049 vcvtnq_u16_f16 (float16x8_t __a)
32050 {
32051 return __builtin_aarch64_lfrintnuv8hfv8hi_us (__a);
32052 }
32053
32054 __extension__ extern __inline int16x4_t
32055 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32056 vcvtp_s16_f16 (float16x4_t __a)
32057 {
32058 return __builtin_aarch64_lceilv4hfv4hi (__a);
32059 }
32060
32061 __extension__ extern __inline int16x8_t
32062 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32063 vcvtpq_s16_f16 (float16x8_t __a)
32064 {
32065 return __builtin_aarch64_lceilv8hfv8hi (__a);
32066 }
32067
32068 __extension__ extern __inline uint16x4_t
32069 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32070 vcvtp_u16_f16 (float16x4_t __a)
32071 {
32072 return __builtin_aarch64_lceiluv4hfv4hi_us (__a);
32073 }
32074
32075 __extension__ extern __inline uint16x8_t
32076 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32077 vcvtpq_u16_f16 (float16x8_t __a)
32078 {
32079 return __builtin_aarch64_lceiluv8hfv8hi_us (__a);
32080 }
32081
32082 __extension__ extern __inline float16x4_t
32083 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32084 vneg_f16 (float16x4_t __a)
32085 {
32086 return -__a;
32087 }
32088
32089 __extension__ extern __inline float16x8_t
32090 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32091 vnegq_f16 (float16x8_t __a)
32092 {
32093 return -__a;
32094 }
32095
32096 __extension__ extern __inline float16x4_t
32097 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32098 vrecpe_f16 (float16x4_t __a)
32099 {
32100 return __builtin_aarch64_frecpev4hf (__a);
32101 }
32102
32103 __extension__ extern __inline float16x8_t
32104 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32105 vrecpeq_f16 (float16x8_t __a)
32106 {
32107 return __builtin_aarch64_frecpev8hf (__a);
32108 }
32109
32110 __extension__ extern __inline float16x4_t
32111 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32112 vrnd_f16 (float16x4_t __a)
32113 {
32114 return __builtin_aarch64_btruncv4hf (__a);
32115 }
32116
32117 __extension__ extern __inline float16x8_t
32118 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32119 vrndq_f16 (float16x8_t __a)
32120 {
32121 return __builtin_aarch64_btruncv8hf (__a);
32122 }
32123
32124 __extension__ extern __inline float16x4_t
32125 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32126 vrnda_f16 (float16x4_t __a)
32127 {
32128 return __builtin_aarch64_roundv4hf (__a);
32129 }
32130
32131 __extension__ extern __inline float16x8_t
32132 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32133 vrndaq_f16 (float16x8_t __a)
32134 {
32135 return __builtin_aarch64_roundv8hf (__a);
32136 }
32137
32138 __extension__ extern __inline float16x4_t
32139 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32140 vrndi_f16 (float16x4_t __a)
32141 {
32142 return __builtin_aarch64_nearbyintv4hf (__a);
32143 }
32144
32145 __extension__ extern __inline float16x8_t
32146 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32147 vrndiq_f16 (float16x8_t __a)
32148 {
32149 return __builtin_aarch64_nearbyintv8hf (__a);
32150 }
32151
32152 __extension__ extern __inline float16x4_t
32153 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32154 vrndm_f16 (float16x4_t __a)
32155 {
32156 return __builtin_aarch64_floorv4hf (__a);
32157 }
32158
32159 __extension__ extern __inline float16x8_t
32160 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32161 vrndmq_f16 (float16x8_t __a)
32162 {
32163 return __builtin_aarch64_floorv8hf (__a);
32164 }
32165
32166 __extension__ extern __inline float16x4_t
32167 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32168 vrndn_f16 (float16x4_t __a)
32169 {
32170 return __builtin_aarch64_frintnv4hf (__a);
32171 }
32172
32173 __extension__ extern __inline float16x8_t
32174 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32175 vrndnq_f16 (float16x8_t __a)
32176 {
32177 return __builtin_aarch64_frintnv8hf (__a);
32178 }
32179
32180 __extension__ extern __inline float16x4_t
32181 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32182 vrndp_f16 (float16x4_t __a)
32183 {
32184 return __builtin_aarch64_ceilv4hf (__a);
32185 }
32186
32187 __extension__ extern __inline float16x8_t
32188 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32189 vrndpq_f16 (float16x8_t __a)
32190 {
32191 return __builtin_aarch64_ceilv8hf (__a);
32192 }
32193
32194 __extension__ extern __inline float16x4_t
32195 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32196 vrndx_f16 (float16x4_t __a)
32197 {
32198 return __builtin_aarch64_rintv4hf (__a);
32199 }
32200
32201 __extension__ extern __inline float16x8_t
32202 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32203 vrndxq_f16 (float16x8_t __a)
32204 {
32205 return __builtin_aarch64_rintv8hf (__a);
32206 }
32207
32208 __extension__ extern __inline float16x4_t
32209 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32210 vrsqrte_f16 (float16x4_t __a)
32211 {
32212 return __builtin_aarch64_rsqrtev4hf (__a);
32213 }
32214
32215 __extension__ extern __inline float16x8_t
32216 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32217 vrsqrteq_f16 (float16x8_t __a)
32218 {
32219 return __builtin_aarch64_rsqrtev8hf (__a);
32220 }
32221
32222 __extension__ extern __inline float16x4_t
32223 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32224 vsqrt_f16 (float16x4_t __a)
32225 {
32226 return __builtin_aarch64_sqrtv4hf (__a);
32227 }
32228
32229 __extension__ extern __inline float16x8_t
32230 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32231 vsqrtq_f16 (float16x8_t __a)
32232 {
32233 return __builtin_aarch64_sqrtv8hf (__a);
32234 }
32235
32236 /* ARMv8.2-A FP16 two operands vector intrinsics. */
32237
32238 __extension__ extern __inline float16x4_t
32239 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32240 vadd_f16 (float16x4_t __a, float16x4_t __b)
32241 {
32242 return __a + __b;
32243 }
32244
32245 __extension__ extern __inline float16x8_t
32246 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32247 vaddq_f16 (float16x8_t __a, float16x8_t __b)
32248 {
32249 return __a + __b;
32250 }
32251
32252 __extension__ extern __inline float16x4_t
32253 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32254 vabd_f16 (float16x4_t __a, float16x4_t __b)
32255 {
32256 return __builtin_aarch64_fabdv4hf (__a, __b);
32257 }
32258
32259 __extension__ extern __inline float16x8_t
32260 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32261 vabdq_f16 (float16x8_t __a, float16x8_t __b)
32262 {
32263 return __builtin_aarch64_fabdv8hf (__a, __b);
32264 }
32265
32266 __extension__ extern __inline uint16x4_t
32267 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32268 vcage_f16 (float16x4_t __a, float16x4_t __b)
32269 {
32270 return __builtin_aarch64_facgev4hf_uss (__a, __b);
32271 }
32272
32273 __extension__ extern __inline uint16x8_t
32274 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32275 vcageq_f16 (float16x8_t __a, float16x8_t __b)
32276 {
32277 return __builtin_aarch64_facgev8hf_uss (__a, __b);
32278 }
32279
32280 __extension__ extern __inline uint16x4_t
32281 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32282 vcagt_f16 (float16x4_t __a, float16x4_t __b)
32283 {
32284 return __builtin_aarch64_facgtv4hf_uss (__a, __b);
32285 }
32286
32287 __extension__ extern __inline uint16x8_t
32288 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32289 vcagtq_f16 (float16x8_t __a, float16x8_t __b)
32290 {
32291 return __builtin_aarch64_facgtv8hf_uss (__a, __b);
32292 }
32293
32294 __extension__ extern __inline uint16x4_t
32295 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32296 vcale_f16 (float16x4_t __a, float16x4_t __b)
32297 {
32298 return __builtin_aarch64_faclev4hf_uss (__a, __b);
32299 }
32300
32301 __extension__ extern __inline uint16x8_t
32302 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32303 vcaleq_f16 (float16x8_t __a, float16x8_t __b)
32304 {
32305 return __builtin_aarch64_faclev8hf_uss (__a, __b);
32306 }
32307
32308 __extension__ extern __inline uint16x4_t
32309 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32310 vcalt_f16 (float16x4_t __a, float16x4_t __b)
32311 {
32312 return __builtin_aarch64_facltv4hf_uss (__a, __b);
32313 }
32314
32315 __extension__ extern __inline uint16x8_t
32316 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32317 vcaltq_f16 (float16x8_t __a, float16x8_t __b)
32318 {
32319 return __builtin_aarch64_facltv8hf_uss (__a, __b);
32320 }
32321
32322 __extension__ extern __inline uint16x4_t
32323 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32324 vceq_f16 (float16x4_t __a, float16x4_t __b)
32325 {
32326 return __builtin_aarch64_cmeqv4hf_uss (__a, __b);
32327 }
32328
32329 __extension__ extern __inline uint16x8_t
32330 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32331 vceqq_f16 (float16x8_t __a, float16x8_t __b)
32332 {
32333 return __builtin_aarch64_cmeqv8hf_uss (__a, __b);
32334 }
32335
32336 __extension__ extern __inline uint16x4_t
32337 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32338 vcge_f16 (float16x4_t __a, float16x4_t __b)
32339 {
32340 return __builtin_aarch64_cmgev4hf_uss (__a, __b);
32341 }
32342
32343 __extension__ extern __inline uint16x8_t
32344 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32345 vcgeq_f16 (float16x8_t __a, float16x8_t __b)
32346 {
32347 return __builtin_aarch64_cmgev8hf_uss (__a, __b);
32348 }
32349
32350 __extension__ extern __inline uint16x4_t
32351 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32352 vcgt_f16 (float16x4_t __a, float16x4_t __b)
32353 {
32354 return __builtin_aarch64_cmgtv4hf_uss (__a, __b);
32355 }
32356
32357 __extension__ extern __inline uint16x8_t
32358 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32359 vcgtq_f16 (float16x8_t __a, float16x8_t __b)
32360 {
32361 return __builtin_aarch64_cmgtv8hf_uss (__a, __b);
32362 }
32363
32364 __extension__ extern __inline uint16x4_t
32365 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32366 vcle_f16 (float16x4_t __a, float16x4_t __b)
32367 {
32368 return __builtin_aarch64_cmlev4hf_uss (__a, __b);
32369 }
32370
32371 __extension__ extern __inline uint16x8_t
32372 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32373 vcleq_f16 (float16x8_t __a, float16x8_t __b)
32374 {
32375 return __builtin_aarch64_cmlev8hf_uss (__a, __b);
32376 }
32377
32378 __extension__ extern __inline uint16x4_t
32379 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32380 vclt_f16 (float16x4_t __a, float16x4_t __b)
32381 {
32382 return __builtin_aarch64_cmltv4hf_uss (__a, __b);
32383 }
32384
32385 __extension__ extern __inline uint16x8_t
32386 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32387 vcltq_f16 (float16x8_t __a, float16x8_t __b)
32388 {
32389 return __builtin_aarch64_cmltv8hf_uss (__a, __b);
32390 }
32391
32392 __extension__ extern __inline float16x4_t
32393 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32394 vcvt_n_f16_s16 (int16x4_t __a, const int __b)
32395 {
32396 return __builtin_aarch64_scvtfv4hi (__a, __b);
32397 }
32398
32399 __extension__ extern __inline float16x8_t
32400 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32401 vcvtq_n_f16_s16 (int16x8_t __a, const int __b)
32402 {
32403 return __builtin_aarch64_scvtfv8hi (__a, __b);
32404 }
32405
32406 __extension__ extern __inline float16x4_t
32407 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32408 vcvt_n_f16_u16 (uint16x4_t __a, const int __b)
32409 {
32410 return __builtin_aarch64_ucvtfv4hi_sus (__a, __b);
32411 }
32412
32413 __extension__ extern __inline float16x8_t
32414 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32415 vcvtq_n_f16_u16 (uint16x8_t __a, const int __b)
32416 {
32417 return __builtin_aarch64_ucvtfv8hi_sus (__a, __b);
32418 }
32419
32420 __extension__ extern __inline int16x4_t
32421 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32422 vcvt_n_s16_f16 (float16x4_t __a, const int __b)
32423 {
32424 return __builtin_aarch64_fcvtzsv4hf (__a, __b);
32425 }
32426
32427 __extension__ extern __inline int16x8_t
32428 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32429 vcvtq_n_s16_f16 (float16x8_t __a, const int __b)
32430 {
32431 return __builtin_aarch64_fcvtzsv8hf (__a, __b);
32432 }
32433
32434 __extension__ extern __inline uint16x4_t
32435 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32436 vcvt_n_u16_f16 (float16x4_t __a, const int __b)
32437 {
32438 return __builtin_aarch64_fcvtzuv4hf_uss (__a, __b);
32439 }
32440
32441 __extension__ extern __inline uint16x8_t
32442 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32443 vcvtq_n_u16_f16 (float16x8_t __a, const int __b)
32444 {
32445 return __builtin_aarch64_fcvtzuv8hf_uss (__a, __b);
32446 }
32447
32448 __extension__ extern __inline float16x4_t
32449 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32450 vdiv_f16 (float16x4_t __a, float16x4_t __b)
32451 {
32452 return __a / __b;
32453 }
32454
32455 __extension__ extern __inline float16x8_t
32456 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32457 vdivq_f16 (float16x8_t __a, float16x8_t __b)
32458 {
32459 return __a / __b;
32460 }
32461
32462 __extension__ extern __inline float16x4_t
32463 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32464 vmax_f16 (float16x4_t __a, float16x4_t __b)
32465 {
32466 return __builtin_aarch64_smax_nanv4hf (__a, __b);
32467 }
32468
32469 __extension__ extern __inline float16x8_t
32470 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32471 vmaxq_f16 (float16x8_t __a, float16x8_t __b)
32472 {
32473 return __builtin_aarch64_smax_nanv8hf (__a, __b);
32474 }
32475
32476 __extension__ extern __inline float16x4_t
32477 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32478 vmaxnm_f16 (float16x4_t __a, float16x4_t __b)
32479 {
32480 return __builtin_aarch64_fmaxv4hf (__a, __b);
32481 }
32482
32483 __extension__ extern __inline float16x8_t
32484 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32485 vmaxnmq_f16 (float16x8_t __a, float16x8_t __b)
32486 {
32487 return __builtin_aarch64_fmaxv8hf (__a, __b);
32488 }
32489
32490 __extension__ extern __inline float16x4_t
32491 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32492 vmin_f16 (float16x4_t __a, float16x4_t __b)
32493 {
32494 return __builtin_aarch64_smin_nanv4hf (__a, __b);
32495 }
32496
32497 __extension__ extern __inline float16x8_t
32498 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32499 vminq_f16 (float16x8_t __a, float16x8_t __b)
32500 {
32501 return __builtin_aarch64_smin_nanv8hf (__a, __b);
32502 }
32503
32504 __extension__ extern __inline float16x4_t
32505 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32506 vminnm_f16 (float16x4_t __a, float16x4_t __b)
32507 {
32508 return __builtin_aarch64_fminv4hf (__a, __b);
32509 }
32510
32511 __extension__ extern __inline float16x8_t
32512 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32513 vminnmq_f16 (float16x8_t __a, float16x8_t __b)
32514 {
32515 return __builtin_aarch64_fminv8hf (__a, __b);
32516 }
32517
32518 __extension__ extern __inline float16x4_t
32519 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32520 vmul_f16 (float16x4_t __a, float16x4_t __b)
32521 {
32522 return __a * __b;
32523 }
32524
32525 __extension__ extern __inline float16x8_t
32526 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32527 vmulq_f16 (float16x8_t __a, float16x8_t __b)
32528 {
32529 return __a * __b;
32530 }
32531
32532 __extension__ extern __inline float16x4_t
32533 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32534 vmulx_f16 (float16x4_t __a, float16x4_t __b)
32535 {
32536 return __builtin_aarch64_fmulxv4hf (__a, __b);
32537 }
32538
32539 __extension__ extern __inline float16x8_t
32540 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32541 vmulxq_f16 (float16x8_t __a, float16x8_t __b)
32542 {
32543 return __builtin_aarch64_fmulxv8hf (__a, __b);
32544 }
32545
32546 __extension__ extern __inline float16x4_t
32547 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32548 vpadd_f16 (float16x4_t __a, float16x4_t __b)
32549 {
32550 return __builtin_aarch64_faddpv4hf (__a, __b);
32551 }
32552
32553 __extension__ extern __inline float16x8_t
32554 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32555 vpaddq_f16 (float16x8_t __a, float16x8_t __b)
32556 {
32557 return __builtin_aarch64_faddpv8hf (__a, __b);
32558 }
32559
32560 __extension__ extern __inline float16x4_t
32561 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32562 vpmax_f16 (float16x4_t __a, float16x4_t __b)
32563 {
32564 return __builtin_aarch64_smax_nanpv4hf (__a, __b);
32565 }
32566
32567 __extension__ extern __inline float16x8_t
32568 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32569 vpmaxq_f16 (float16x8_t __a, float16x8_t __b)
32570 {
32571 return __builtin_aarch64_smax_nanpv8hf (__a, __b);
32572 }
32573
32574 __extension__ extern __inline float16x4_t
32575 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32576 vpmaxnm_f16 (float16x4_t __a, float16x4_t __b)
32577 {
32578 return __builtin_aarch64_smaxpv4hf (__a, __b);
32579 }
32580
32581 __extension__ extern __inline float16x8_t
32582 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32583 vpmaxnmq_f16 (float16x8_t __a, float16x8_t __b)
32584 {
32585 return __builtin_aarch64_smaxpv8hf (__a, __b);
32586 }
32587
32588 __extension__ extern __inline float16x4_t
32589 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32590 vpmin_f16 (float16x4_t __a, float16x4_t __b)
32591 {
32592 return __builtin_aarch64_smin_nanpv4hf (__a, __b);
32593 }
32594
32595 __extension__ extern __inline float16x8_t
32596 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32597 vpminq_f16 (float16x8_t __a, float16x8_t __b)
32598 {
32599 return __builtin_aarch64_smin_nanpv8hf (__a, __b);
32600 }
32601
32602 __extension__ extern __inline float16x4_t
32603 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32604 vpminnm_f16 (float16x4_t __a, float16x4_t __b)
32605 {
32606 return __builtin_aarch64_sminpv4hf (__a, __b);
32607 }
32608
32609 __extension__ extern __inline float16x8_t
32610 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32611 vpminnmq_f16 (float16x8_t __a, float16x8_t __b)
32612 {
32613 return __builtin_aarch64_sminpv8hf (__a, __b);
32614 }
32615
32616 __extension__ extern __inline float16x4_t
32617 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32618 vrecps_f16 (float16x4_t __a, float16x4_t __b)
32619 {
32620 return __builtin_aarch64_frecpsv4hf (__a, __b);
32621 }
32622
32623 __extension__ extern __inline float16x8_t
32624 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32625 vrecpsq_f16 (float16x8_t __a, float16x8_t __b)
32626 {
32627 return __builtin_aarch64_frecpsv8hf (__a, __b);
32628 }
32629
32630 __extension__ extern __inline float16x4_t
32631 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32632 vrsqrts_f16 (float16x4_t __a, float16x4_t __b)
32633 {
32634 return __builtin_aarch64_rsqrtsv4hf (__a, __b);
32635 }
32636
32637 __extension__ extern __inline float16x8_t
32638 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32639 vrsqrtsq_f16 (float16x8_t __a, float16x8_t __b)
32640 {
32641 return __builtin_aarch64_rsqrtsv8hf (__a, __b);
32642 }
32643
32644 __extension__ extern __inline float16x4_t
32645 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32646 vsub_f16 (float16x4_t __a, float16x4_t __b)
32647 {
32648 return __a - __b;
32649 }
32650
32651 __extension__ extern __inline float16x8_t
32652 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32653 vsubq_f16 (float16x8_t __a, float16x8_t __b)
32654 {
32655 return __a - __b;
32656 }
32657
32658 /* ARMv8.2-A FP16 three operands vector intrinsics. */
32659
32660 __extension__ extern __inline float16x4_t
32661 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32662 vfma_f16 (float16x4_t __a, float16x4_t __b, float16x4_t __c)
32663 {
32664 return __builtin_aarch64_fmav4hf (__b, __c, __a);
32665 }
32666
32667 __extension__ extern __inline float16x8_t
32668 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32669 vfmaq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
32670 {
32671 return __builtin_aarch64_fmav8hf (__b, __c, __a);
32672 }
32673
32674 __extension__ extern __inline float16x4_t
32675 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32676 vfms_f16 (float16x4_t __a, float16x4_t __b, float16x4_t __c)
32677 {
32678 return __builtin_aarch64_fnmav4hf (__b, __c, __a);
32679 }
32680
32681 __extension__ extern __inline float16x8_t
32682 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32683 vfmsq_f16 (float16x8_t __a, float16x8_t __b, float16x8_t __c)
32684 {
32685 return __builtin_aarch64_fnmav8hf (__b, __c, __a);
32686 }
32687
32688 /* ARMv8.2-A FP16 lane vector intrinsics. */
32689
32690 __extension__ extern __inline float16_t
32691 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32692 vfmah_lane_f16 (float16_t __a, float16_t __b,
32693 float16x4_t __c, const int __lane)
32694 {
32695 return vfmah_f16 (__a, __b, __aarch64_vget_lane_any (__c, __lane));
32696 }
32697
32698 __extension__ extern __inline float16_t
32699 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32700 vfmah_laneq_f16 (float16_t __a, float16_t __b,
32701 float16x8_t __c, const int __lane)
32702 {
32703 return vfmah_f16 (__a, __b, __aarch64_vget_lane_any (__c, __lane));
32704 }
32705
32706 __extension__ extern __inline float16x4_t
32707 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32708 vfma_lane_f16 (float16x4_t __a, float16x4_t __b,
32709 float16x4_t __c, const int __lane)
32710 {
32711 return vfma_f16 (__a, __b, __aarch64_vdup_lane_f16 (__c, __lane));
32712 }
32713
32714 __extension__ extern __inline float16x8_t
32715 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32716 vfmaq_lane_f16 (float16x8_t __a, float16x8_t __b,
32717 float16x4_t __c, const int __lane)
32718 {
32719 return vfmaq_f16 (__a, __b, __aarch64_vdupq_lane_f16 (__c, __lane));
32720 }
32721
32722 __extension__ extern __inline float16x4_t
32723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32724 vfma_laneq_f16 (float16x4_t __a, float16x4_t __b,
32725 float16x8_t __c, const int __lane)
32726 {
32727 return vfma_f16 (__a, __b, __aarch64_vdup_laneq_f16 (__c, __lane));
32728 }
32729
32730 __extension__ extern __inline float16x8_t
32731 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32732 vfmaq_laneq_f16 (float16x8_t __a, float16x8_t __b,
32733 float16x8_t __c, const int __lane)
32734 {
32735 return vfmaq_f16 (__a, __b, __aarch64_vdupq_laneq_f16 (__c, __lane));
32736 }
32737
32738 __extension__ extern __inline float16x4_t
32739 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32740 vfma_n_f16 (float16x4_t __a, float16x4_t __b, float16_t __c)
32741 {
32742 return vfma_f16 (__a, __b, vdup_n_f16 (__c));
32743 }
32744
32745 __extension__ extern __inline float16x8_t
32746 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32747 vfmaq_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c)
32748 {
32749 return vfmaq_f16 (__a, __b, vdupq_n_f16 (__c));
32750 }
32751
32752 __extension__ extern __inline float16_t
32753 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32754 vfmsh_lane_f16 (float16_t __a, float16_t __b,
32755 float16x4_t __c, const int __lane)
32756 {
32757 return vfmsh_f16 (__a, __b, __aarch64_vget_lane_any (__c, __lane));
32758 }
32759
32760 __extension__ extern __inline float16_t
32761 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32762 vfmsh_laneq_f16 (float16_t __a, float16_t __b,
32763 float16x8_t __c, const int __lane)
32764 {
32765 return vfmsh_f16 (__a, __b, __aarch64_vget_lane_any (__c, __lane));
32766 }
32767
32768 __extension__ extern __inline float16x4_t
32769 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32770 vfms_lane_f16 (float16x4_t __a, float16x4_t __b,
32771 float16x4_t __c, const int __lane)
32772 {
32773 return vfms_f16 (__a, __b, __aarch64_vdup_lane_f16 (__c, __lane));
32774 }
32775
32776 __extension__ extern __inline float16x8_t
32777 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32778 vfmsq_lane_f16 (float16x8_t __a, float16x8_t __b,
32779 float16x4_t __c, const int __lane)
32780 {
32781 return vfmsq_f16 (__a, __b, __aarch64_vdupq_lane_f16 (__c, __lane));
32782 }
32783
32784 __extension__ extern __inline float16x4_t
32785 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32786 vfms_laneq_f16 (float16x4_t __a, float16x4_t __b,
32787 float16x8_t __c, const int __lane)
32788 {
32789 return vfms_f16 (__a, __b, __aarch64_vdup_laneq_f16 (__c, __lane));
32790 }
32791
32792 __extension__ extern __inline float16x8_t
32793 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32794 vfmsq_laneq_f16 (float16x8_t __a, float16x8_t __b,
32795 float16x8_t __c, const int __lane)
32796 {
32797 return vfmsq_f16 (__a, __b, __aarch64_vdupq_laneq_f16 (__c, __lane));
32798 }
32799
32800 __extension__ extern __inline float16x4_t
32801 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32802 vfms_n_f16 (float16x4_t __a, float16x4_t __b, float16_t __c)
32803 {
32804 return vfms_f16 (__a, __b, vdup_n_f16 (__c));
32805 }
32806
32807 __extension__ extern __inline float16x8_t
32808 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32809 vfmsq_n_f16 (float16x8_t __a, float16x8_t __b, float16_t __c)
32810 {
32811 return vfmsq_f16 (__a, __b, vdupq_n_f16 (__c));
32812 }
32813
32814 __extension__ extern __inline float16_t
32815 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32816 vmulh_lane_f16 (float16_t __a, float16x4_t __b, const int __lane)
32817 {
32818 return __a * __aarch64_vget_lane_any (__b, __lane);
32819 }
32820
32821 __extension__ extern __inline float16x4_t
32822 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32823 vmul_lane_f16 (float16x4_t __a, float16x4_t __b, const int __lane)
32824 {
32825 return vmul_f16 (__a, vdup_n_f16 (__aarch64_vget_lane_any (__b, __lane)));
32826 }
32827
32828 __extension__ extern __inline float16x8_t
32829 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32830 vmulq_lane_f16 (float16x8_t __a, float16x4_t __b, const int __lane)
32831 {
32832 return vmulq_f16 (__a, vdupq_n_f16 (__aarch64_vget_lane_any (__b, __lane)));
32833 }
32834
32835 __extension__ extern __inline float16_t
32836 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32837 vmulh_laneq_f16 (float16_t __a, float16x8_t __b, const int __lane)
32838 {
32839 return __a * __aarch64_vget_lane_any (__b, __lane);
32840 }
32841
32842 __extension__ extern __inline float16x4_t
32843 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32844 vmul_laneq_f16 (float16x4_t __a, float16x8_t __b, const int __lane)
32845 {
32846 return vmul_f16 (__a, vdup_n_f16 (__aarch64_vget_lane_any (__b, __lane)));
32847 }
32848
32849 __extension__ extern __inline float16x8_t
32850 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32851 vmulq_laneq_f16 (float16x8_t __a, float16x8_t __b, const int __lane)
32852 {
32853 return vmulq_f16 (__a, vdupq_n_f16 (__aarch64_vget_lane_any (__b, __lane)));
32854 }
32855
32856 __extension__ extern __inline float16x4_t
32857 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32858 vmul_n_f16 (float16x4_t __a, float16_t __b)
32859 {
32860 return vmul_lane_f16 (__a, vdup_n_f16 (__b), 0);
32861 }
32862
32863 __extension__ extern __inline float16x8_t
32864 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32865 vmulq_n_f16 (float16x8_t __a, float16_t __b)
32866 {
32867 return vmulq_laneq_f16 (__a, vdupq_n_f16 (__b), 0);
32868 }
32869
32870 __extension__ extern __inline float16_t
32871 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32872 vmulxh_lane_f16 (float16_t __a, float16x4_t __b, const int __lane)
32873 {
32874 return vmulxh_f16 (__a, __aarch64_vget_lane_any (__b, __lane));
32875 }
32876
32877 __extension__ extern __inline float16x4_t
32878 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32879 vmulx_lane_f16 (float16x4_t __a, float16x4_t __b, const int __lane)
32880 {
32881 return vmulx_f16 (__a, __aarch64_vdup_lane_f16 (__b, __lane));
32882 }
32883
32884 __extension__ extern __inline float16x8_t
32885 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32886 vmulxq_lane_f16 (float16x8_t __a, float16x4_t __b, const int __lane)
32887 {
32888 return vmulxq_f16 (__a, __aarch64_vdupq_lane_f16 (__b, __lane));
32889 }
32890
32891 __extension__ extern __inline float16_t
32892 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32893 vmulxh_laneq_f16 (float16_t __a, float16x8_t __b, const int __lane)
32894 {
32895 return vmulxh_f16 (__a, __aarch64_vget_lane_any (__b, __lane));
32896 }
32897
32898 __extension__ extern __inline float16x4_t
32899 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32900 vmulx_laneq_f16 (float16x4_t __a, float16x8_t __b, const int __lane)
32901 {
32902 return vmulx_f16 (__a, __aarch64_vdup_laneq_f16 (__b, __lane));
32903 }
32904
32905 __extension__ extern __inline float16x8_t
32906 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32907 vmulxq_laneq_f16 (float16x8_t __a, float16x8_t __b, const int __lane)
32908 {
32909 return vmulxq_f16 (__a, __aarch64_vdupq_laneq_f16 (__b, __lane));
32910 }
32911
32912 __extension__ extern __inline float16x4_t
32913 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32914 vmulx_n_f16 (float16x4_t __a, float16_t __b)
32915 {
32916 return vmulx_f16 (__a, vdup_n_f16 (__b));
32917 }
32918
32919 __extension__ extern __inline float16x8_t
32920 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32921 vmulxq_n_f16 (float16x8_t __a, float16_t __b)
32922 {
32923 return vmulxq_f16 (__a, vdupq_n_f16 (__b));
32924 }
32925
32926 /* ARMv8.2-A FP16 reduction vector intrinsics. */
32927
32928 __extension__ extern __inline float16_t
32929 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32930 vmaxv_f16 (float16x4_t __a)
32931 {
32932 return __builtin_aarch64_reduc_smax_nan_scal_v4hf (__a);
32933 }
32934
32935 __extension__ extern __inline float16_t
32936 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32937 vmaxvq_f16 (float16x8_t __a)
32938 {
32939 return __builtin_aarch64_reduc_smax_nan_scal_v8hf (__a);
32940 }
32941
32942 __extension__ extern __inline float16_t
32943 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32944 vminv_f16 (float16x4_t __a)
32945 {
32946 return __builtin_aarch64_reduc_smin_nan_scal_v4hf (__a);
32947 }
32948
32949 __extension__ extern __inline float16_t
32950 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32951 vminvq_f16 (float16x8_t __a)
32952 {
32953 return __builtin_aarch64_reduc_smin_nan_scal_v8hf (__a);
32954 }
32955
32956 __extension__ extern __inline float16_t
32957 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32958 vmaxnmv_f16 (float16x4_t __a)
32959 {
32960 return __builtin_aarch64_reduc_smax_scal_v4hf (__a);
32961 }
32962
32963 __extension__ extern __inline float16_t
32964 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32965 vmaxnmvq_f16 (float16x8_t __a)
32966 {
32967 return __builtin_aarch64_reduc_smax_scal_v8hf (__a);
32968 }
32969
32970 __extension__ extern __inline float16_t
32971 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32972 vminnmv_f16 (float16x4_t __a)
32973 {
32974 return __builtin_aarch64_reduc_smin_scal_v4hf (__a);
32975 }
32976
32977 __extension__ extern __inline float16_t
32978 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32979 vminnmvq_f16 (float16x8_t __a)
32980 {
32981 return __builtin_aarch64_reduc_smin_scal_v8hf (__a);
32982 }
32983
32984 #pragma GCC pop_options
32985
32986 /* AdvSIMD Dot Product intrinsics. */
32987
32988 #pragma GCC push_options
32989 #pragma GCC target ("arch=armv8.2-a+dotprod")
32990
32991 __extension__ extern __inline uint32x2_t
32992 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32993 vdot_u32 (uint32x2_t __r, uint8x8_t __a, uint8x8_t __b)
32994 {
32995 return __builtin_aarch64_udotv8qi_uuuu (__r, __a, __b);
32996 }
32997
32998 __extension__ extern __inline uint32x4_t
32999 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33000 vdotq_u32 (uint32x4_t __r, uint8x16_t __a, uint8x16_t __b)
33001 {
33002 return __builtin_aarch64_udotv16qi_uuuu (__r, __a, __b);
33003 }
33004
33005 __extension__ extern __inline int32x2_t
33006 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33007 vdot_s32 (int32x2_t __r, int8x8_t __a, int8x8_t __b)
33008 {
33009 return __builtin_aarch64_sdotv8qi (__r, __a, __b);
33010 }
33011
33012 __extension__ extern __inline int32x4_t
33013 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33014 vdotq_s32 (int32x4_t __r, int8x16_t __a, int8x16_t __b)
33015 {
33016 return __builtin_aarch64_sdotv16qi (__r, __a, __b);
33017 }
33018
33019 __extension__ extern __inline uint32x2_t
33020 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33021 vdot_lane_u32 (uint32x2_t __r, uint8x8_t __a, uint8x8_t __b, const int __index)
33022 {
33023 return __builtin_aarch64_udot_lanev8qi_uuuus (__r, __a, __b, __index);
33024 }
33025
33026 __extension__ extern __inline uint32x2_t
33027 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33028 vdot_laneq_u32 (uint32x2_t __r, uint8x8_t __a, uint8x16_t __b,
33029 const int __index)
33030 {
33031 return __builtin_aarch64_udot_laneqv8qi_uuuus (__r, __a, __b, __index);
33032 }
33033
33034 __extension__ extern __inline uint32x4_t
33035 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33036 vdotq_lane_u32 (uint32x4_t __r, uint8x16_t __a, uint8x8_t __b,
33037 const int __index)
33038 {
33039 return __builtin_aarch64_udot_lanev16qi_uuuus (__r, __a, __b, __index);
33040 }
33041
33042 __extension__ extern __inline uint32x4_t
33043 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33044 vdotq_laneq_u32 (uint32x4_t __r, uint8x16_t __a, uint8x16_t __b,
33045 const int __index)
33046 {
33047 return __builtin_aarch64_udot_laneqv16qi_uuuus (__r, __a, __b, __index);
33048 }
33049
33050 __extension__ extern __inline int32x2_t
33051 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33052 vdot_lane_s32 (int32x2_t __r, int8x8_t __a, int8x8_t __b, const int __index)
33053 {
33054 return __builtin_aarch64_sdot_lanev8qi (__r, __a, __b, __index);
33055 }
33056
33057 __extension__ extern __inline int32x2_t
33058 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33059 vdot_laneq_s32 (int32x2_t __r, int8x8_t __a, int8x16_t __b, const int __index)
33060 {
33061 return __builtin_aarch64_sdot_laneqv8qi (__r, __a, __b, __index);
33062 }
33063
33064 __extension__ extern __inline int32x4_t
33065 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33066 vdotq_lane_s32 (int32x4_t __r, int8x16_t __a, int8x8_t __b, const int __index)
33067 {
33068 return __builtin_aarch64_sdot_lanev16qi (__r, __a, __b, __index);
33069 }
33070
33071 __extension__ extern __inline int32x4_t
33072 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33073 vdotq_laneq_s32 (int32x4_t __r, int8x16_t __a, int8x16_t __b, const int __index)
33074 {
33075 return __builtin_aarch64_sdot_laneqv16qi (__r, __a, __b, __index);
33076 }
33077 #pragma GCC pop_options
33078
33079 #pragma GCC push_options
33080 #pragma GCC target ("arch=armv8.2-a+sm4")
33081
33082 __extension__ extern __inline uint32x4_t
33083 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33084 vsm3ss1q_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
33085 {
33086 return __builtin_aarch64_sm3ss1qv4si_uuuu (__a, __b, __c);
33087 }
33088
33089 __extension__ extern __inline uint32x4_t
33090 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33091 vsm3tt1aq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c, const int __imm2)
33092 {
33093 return __builtin_aarch64_sm3tt1aqv4si_uuuus (__a, __b, __c, __imm2);
33094 }
33095
33096 __extension__ extern __inline uint32x4_t
33097 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33098 vsm3tt1bq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c, const int __imm2)
33099 {
33100 return __builtin_aarch64_sm3tt1bqv4si_uuuus (__a, __b, __c, __imm2);
33101 }
33102
33103 __extension__ extern __inline uint32x4_t
33104 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33105 vsm3tt2aq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c, const int __imm2)
33106 {
33107 return __builtin_aarch64_sm3tt2aqv4si_uuuus (__a, __b, __c, __imm2);
33108 }
33109
33110 __extension__ extern __inline uint32x4_t
33111 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33112 vsm3tt2bq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c, const int __imm2)
33113 {
33114 return __builtin_aarch64_sm3tt2bqv4si_uuuus (__a, __b, __c, __imm2);
33115 }
33116
33117 __extension__ extern __inline uint32x4_t
33118 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33119 vsm3partw1q_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
33120 {
33121 return __builtin_aarch64_sm3partw1qv4si_uuuu (__a, __b, __c);
33122 }
33123 __extension__ extern __inline uint32x4_t
33124 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33125 vsm3partw2q_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
33126 {
33127 return __builtin_aarch64_sm3partw2qv4si_uuuu (__a, __b, __c);
33128 }
33129
33130 __extension__ extern __inline uint32x4_t
33131 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33132 vsm4eq_u32 (uint32x4_t __a, uint32x4_t __b)
33133 {
33134 return __builtin_aarch64_sm4eqv4si_uuu (__a, __b);
33135 }
33136
33137 __extension__ extern __inline uint32x4_t
33138 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33139 vsm4ekeyq_u32 (uint32x4_t __a, uint32x4_t __b)
33140 {
33141 return __builtin_aarch64_sm4ekeyqv4si_uuu (__a, __b);
33142 }
33143
33144 #pragma GCC pop_options
33145
33146 #pragma GCC push_options
33147 #pragma GCC target ("arch=armv8.2-a+sha3")
33148
33149 __extension__ extern __inline uint64x2_t
33150 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33151 vsha512hq_u64 (uint64x2_t __a, uint64x2_t __b, uint64x2_t __c)
33152 {
33153 return __builtin_aarch64_crypto_sha512hqv2di_uuuu (__a, __b, __c);
33154 }
33155
33156 __extension__ extern __inline uint64x2_t
33157 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33158 vsha512h2q_u64 (uint64x2_t __a, uint64x2_t __b, uint64x2_t __c)
33159 {
33160 return __builtin_aarch64_crypto_sha512h2qv2di_uuuu (__a, __b, __c);
33161 }
33162
33163 __extension__ extern __inline uint64x2_t
33164 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33165 vsha512su0q_u64 (uint64x2_t __a, uint64x2_t __b)
33166 {
33167 return __builtin_aarch64_crypto_sha512su0qv2di_uuu (__a, __b);
33168 }
33169
33170 __extension__ extern __inline uint64x2_t
33171 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33172 vsha512su1q_u64 (uint64x2_t __a, uint64x2_t __b, uint64x2_t __c)
33173 {
33174 return __builtin_aarch64_crypto_sha512su1qv2di_uuuu (__a, __b, __c);
33175 }
33176
33177 __extension__ extern __inline uint8x16_t
33178 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33179 veor3q_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
33180 {
33181 return __builtin_aarch64_eor3qv16qi_uuuu (__a, __b, __c);
33182 }
33183
33184 __extension__ extern __inline uint16x8_t
33185 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33186 veor3q_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
33187 {
33188 return __builtin_aarch64_eor3qv8hi_uuuu (__a, __b, __c);
33189 }
33190
33191 __extension__ extern __inline uint32x4_t
33192 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33193 veor3q_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
33194 {
33195 return __builtin_aarch64_eor3qv4si_uuuu (__a, __b, __c);
33196 }
33197
33198 __extension__ extern __inline uint64x2_t
33199 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33200 veor3q_u64 (uint64x2_t __a, uint64x2_t __b, uint64x2_t __c)
33201 {
33202 return __builtin_aarch64_eor3qv2di_uuuu (__a, __b, __c);
33203 }
33204
33205
33206 __extension__ extern __inline int8x16_t
33207 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33208 veor3q_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
33209 {
33210 return __builtin_aarch64_eor3qv16qi (__a, __b, __c);
33211 }
33212
33213 __extension__ extern __inline int16x8_t
33214 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33215 veor3q_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
33216 {
33217 return __builtin_aarch64_eor3qv8hi (__a, __b, __c);
33218 }
33219
33220 __extension__ extern __inline int32x4_t
33221 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33222 veor3q_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
33223 {
33224 return __builtin_aarch64_eor3qv4si (__a, __b, __c);
33225 }
33226
33227 __extension__ extern __inline int64x2_t
33228 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33229 veor3q_s64 (int64x2_t __a, int64x2_t __b, int64x2_t __c)
33230 {
33231 return __builtin_aarch64_eor3qv2di (__a, __b, __c);
33232 }
33233
33234 __extension__ extern __inline uint64x2_t
33235 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33236 vrax1q_u64 (uint64x2_t __a, uint64x2_t __b)
33237 {
33238 return __builtin_aarch64_rax1qv2di_uuu (__a, __b);
33239 }
33240
33241 __extension__ extern __inline uint64x2_t
33242 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33243 vxarq_u64 (uint64x2_t __a, uint64x2_t __b, const int imm6)
33244 {
33245 return __builtin_aarch64_xarqv2di_uuus (__a, __b,imm6);
33246 }
33247
33248 __extension__ extern __inline uint8x16_t
33249 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33250 vbcaxq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
33251 {
33252 return __builtin_aarch64_bcaxqv16qi_uuuu (__a, __b, __c);
33253 }
33254
33255 __extension__ extern __inline uint16x8_t
33256 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33257 vbcaxq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
33258 {
33259 return __builtin_aarch64_bcaxqv8hi_uuuu (__a, __b, __c);
33260 }
33261
33262 __extension__ extern __inline uint32x4_t
33263 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33264 vbcaxq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
33265 {
33266 return __builtin_aarch64_bcaxqv4si_uuuu (__a, __b, __c);
33267 }
33268
33269 __extension__ extern __inline uint64x2_t
33270 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33271 vbcaxq_u64 (uint64x2_t __a, uint64x2_t __b, uint64x2_t __c)
33272 {
33273 return __builtin_aarch64_bcaxqv2di_uuuu (__a, __b, __c);
33274 }
33275
33276 __extension__ extern __inline int8x16_t
33277 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33278 vbcaxq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
33279 {
33280 return __builtin_aarch64_bcaxqv16qi (__a, __b, __c);
33281 }
33282
33283 __extension__ extern __inline int16x8_t
33284 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33285 vbcaxq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
33286 {
33287 return __builtin_aarch64_bcaxqv8hi (__a, __b, __c);
33288 }
33289
33290 __extension__ extern __inline int32x4_t
33291 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33292 vbcaxq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
33293 {
33294 return __builtin_aarch64_bcaxqv4si (__a, __b, __c);
33295 }
33296
33297 __extension__ extern __inline int64x2_t
33298 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33299 vbcaxq_s64 (int64x2_t __a, int64x2_t __b, int64x2_t __c)
33300 {
33301 return __builtin_aarch64_bcaxqv2di (__a, __b, __c);
33302 }
33303
33304 #pragma GCC pop_options
33305
33306 /* AdvSIMD Complex numbers intrinsics. */
33307
33308 #pragma GCC push_options
33309 #pragma GCC target ("arch=armv8.3-a")
33310
33311 #pragma GCC push_options
33312 #pragma GCC target ("+fp16")
33313 __extension__ extern __inline float16x4_t
33314 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33315 vcadd_rot90_f16 (float16x4_t __a, float16x4_t __b)
33316 {
33317 return __builtin_aarch64_fcadd90v4hf (__a, __b);
33318 }
33319
33320 __extension__ extern __inline float16x8_t
33321 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33322 vcaddq_rot90_f16 (float16x8_t __a, float16x8_t __b)
33323 {
33324 return __builtin_aarch64_fcadd90v8hf (__a, __b);
33325 }
33326
33327 __extension__ extern __inline float16x4_t
33328 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33329 vcadd_rot270_f16 (float16x4_t __a, float16x4_t __b)
33330 {
33331 return __builtin_aarch64_fcadd270v4hf (__a, __b);
33332 }
33333
33334 __extension__ extern __inline float16x8_t
33335 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33336 vcaddq_rot270_f16 (float16x8_t __a, float16x8_t __b)
33337 {
33338 return __builtin_aarch64_fcadd270v8hf (__a, __b);
33339 }
33340
33341 __extension__ extern __inline float16x4_t
33342 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33343 vcmla_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b)
33344 {
33345 return __builtin_aarch64_fcmla0v4hf (__r, __a, __b);
33346 }
33347
33348 __extension__ extern __inline float16x8_t
33349 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33350 vcmlaq_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b)
33351 {
33352 return __builtin_aarch64_fcmla0v8hf (__r, __a, __b);
33353 }
33354
33355 __extension__ extern __inline float16x4_t
33356 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33357 vcmla_lane_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b,
33358 const int __index)
33359 {
33360 return __builtin_aarch64_fcmla_lane0v4hf (__r, __a, __b, __index);
33361 }
33362
33363 __extension__ extern __inline float16x4_t
33364 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33365 vcmla_laneq_f16 (float16x4_t __r, float16x4_t __a, float16x8_t __b,
33366 const int __index)
33367 {
33368 return __builtin_aarch64_fcmla_laneq0v4hf (__r, __a, __b, __index);
33369 }
33370
33371 __extension__ extern __inline float16x8_t
33372 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33373 vcmlaq_lane_f16 (float16x8_t __r, float16x8_t __a, float16x4_t __b,
33374 const int __index)
33375 {
33376 return __builtin_aarch64_fcmlaq_lane0v8hf (__r, __a, __b, __index);
33377 }
33378
33379 __extension__ extern __inline float16x8_t
33380 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33381 vcmlaq_rot90_lane_f16 (float16x8_t __r, float16x8_t __a, float16x4_t __b,
33382 const int __index)
33383 {
33384 return __builtin_aarch64_fcmlaq_lane90v8hf (__r, __a, __b, __index);
33385 }
33386
33387 __extension__ extern __inline float16x4_t
33388 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33389 vcmla_rot90_laneq_f16 (float16x4_t __r, float16x4_t __a, float16x8_t __b,
33390 const int __index)
33391 {
33392 return __builtin_aarch64_fcmla_laneq90v4hf (__r, __a, __b, __index);
33393 }
33394
33395 __extension__ extern __inline float16x4_t
33396 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33397 vcmla_rot90_lane_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b,
33398 const int __index)
33399 {
33400 return __builtin_aarch64_fcmla_lane90v4hf (__r, __a, __b, __index);
33401 }
33402
33403 __extension__ extern __inline float16x8_t
33404 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33405 vcmlaq_rot90_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b)
33406 {
33407 return __builtin_aarch64_fcmla90v8hf (__r, __a, __b);
33408 }
33409
33410 __extension__ extern __inline float16x4_t
33411 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33412 vcmla_rot90_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b)
33413 {
33414 return __builtin_aarch64_fcmla90v4hf (__r, __a, __b);
33415 }
33416
33417 __extension__ extern __inline float16x8_t
33418 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33419 vcmlaq_laneq_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b,
33420 const int __index)
33421 {
33422 return __builtin_aarch64_fcmla_lane0v8hf (__r, __a, __b, __index);
33423 }
33424
33425 __extension__ extern __inline float16x4_t
33426 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33427 vcmla_rot180_laneq_f16 (float16x4_t __r, float16x4_t __a, float16x8_t __b,
33428 const int __index)
33429 {
33430 return __builtin_aarch64_fcmla_laneq180v4hf (__r, __a, __b, __index);
33431 }
33432
33433 __extension__ extern __inline float16x4_t
33434 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33435 vcmla_rot180_lane_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b,
33436 const int __index)
33437 {
33438 return __builtin_aarch64_fcmla_lane180v4hf (__r, __a, __b, __index);
33439 }
33440
33441 __extension__ extern __inline float16x8_t
33442 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33443 vcmlaq_rot180_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b)
33444 {
33445 return __builtin_aarch64_fcmla180v8hf (__r, __a, __b);
33446 }
33447
33448 __extension__ extern __inline float16x4_t
33449 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33450 vcmla_rot180_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b)
33451 {
33452 return __builtin_aarch64_fcmla180v4hf (__r, __a, __b);
33453 }
33454
33455 __extension__ extern __inline float16x8_t
33456 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33457 vcmlaq_rot90_laneq_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b,
33458 const int __index)
33459 {
33460 return __builtin_aarch64_fcmla_lane90v8hf (__r, __a, __b, __index);
33461 }
33462
33463 __extension__ extern __inline float16x8_t
33464 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33465 vcmlaq_rot270_laneq_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b,
33466 const int __index)
33467 {
33468 return __builtin_aarch64_fcmla_lane270v8hf (__r, __a, __b, __index);
33469 }
33470
33471 __extension__ extern __inline float16x8_t
33472 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33473 vcmlaq_rot270_lane_f16 (float16x8_t __r, float16x8_t __a, float16x4_t __b,
33474 const int __index)
33475 {
33476 return __builtin_aarch64_fcmlaq_lane270v8hf (__r, __a, __b, __index);
33477 }
33478
33479 __extension__ extern __inline float16x4_t
33480 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33481 vcmla_rot270_laneq_f16 (float16x4_t __r, float16x4_t __a, float16x8_t __b,
33482 const int __index)
33483 {
33484 return __builtin_aarch64_fcmla_laneq270v4hf (__r, __a, __b, __index);
33485 }
33486
33487 __extension__ extern __inline float16x8_t
33488 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33489 vcmlaq_rot270_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b)
33490 {
33491 return __builtin_aarch64_fcmla270v8hf (__r, __a, __b);
33492 }
33493
33494 __extension__ extern __inline float16x4_t
33495 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33496 vcmla_rot270_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b)
33497 {
33498 return __builtin_aarch64_fcmla270v4hf (__r, __a, __b);
33499 }
33500
33501 __extension__ extern __inline float16x8_t
33502 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33503 vcmlaq_rot180_laneq_f16 (float16x8_t __r, float16x8_t __a, float16x8_t __b,
33504 const int __index)
33505 {
33506 return __builtin_aarch64_fcmla_lane180v8hf (__r, __a, __b, __index);
33507 }
33508
33509 __extension__ extern __inline float16x8_t
33510 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33511 vcmlaq_rot180_lane_f16 (float16x8_t __r, float16x8_t __a, float16x4_t __b,
33512 const int __index)
33513 {
33514 return __builtin_aarch64_fcmlaq_lane180v8hf (__r, __a, __b, __index);
33515 }
33516
33517 __extension__ extern __inline float16x4_t
33518 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33519 vcmla_rot270_lane_f16 (float16x4_t __r, float16x4_t __a, float16x4_t __b,
33520 const int __index)
33521 {
33522 return __builtin_aarch64_fcmla_lane270v4hf (__r, __a, __b, __index);
33523 }
33524 #pragma GCC pop_options
33525
33526 __extension__ extern __inline float32x2_t
33527 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33528 vcadd_rot90_f32 (float32x2_t __a, float32x2_t __b)
33529 {
33530 return __builtin_aarch64_fcadd90v2sf (__a, __b);
33531 }
33532
33533 __extension__ extern __inline float32x4_t
33534 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33535 vcaddq_rot90_f32 (float32x4_t __a, float32x4_t __b)
33536 {
33537 return __builtin_aarch64_fcadd90v4sf (__a, __b);
33538 }
33539
33540 __extension__ extern __inline float64x2_t
33541 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33542 vcaddq_rot90_f64 (float64x2_t __a, float64x2_t __b)
33543 {
33544 return __builtin_aarch64_fcadd90v2df (__a, __b);
33545 }
33546
33547 __extension__ extern __inline float32x2_t
33548 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33549 vcadd_rot270_f32 (float32x2_t __a, float32x2_t __b)
33550 {
33551 return __builtin_aarch64_fcadd270v2sf (__a, __b);
33552 }
33553
33554 __extension__ extern __inline float32x4_t
33555 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33556 vcaddq_rot270_f32 (float32x4_t __a, float32x4_t __b)
33557 {
33558 return __builtin_aarch64_fcadd270v4sf (__a, __b);
33559 }
33560
33561 __extension__ extern __inline float64x2_t
33562 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33563 vcaddq_rot270_f64 (float64x2_t __a, float64x2_t __b)
33564 {
33565 return __builtin_aarch64_fcadd270v2df (__a, __b);
33566 }
33567
33568 __extension__ extern __inline float32x2_t
33569 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33570 vcmla_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b)
33571 {
33572 return __builtin_aarch64_fcmla0v2sf (__r, __a, __b);
33573 }
33574
33575 __extension__ extern __inline float32x4_t
33576 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33577 vcmlaq_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b)
33578 {
33579 return __builtin_aarch64_fcmla0v4sf (__r, __a, __b);
33580 }
33581
33582 __extension__ extern __inline float64x2_t
33583 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33584 vcmlaq_f64 (float64x2_t __r, float64x2_t __a, float64x2_t __b)
33585 {
33586 return __builtin_aarch64_fcmla0v2df (__r, __a, __b);
33587 }
33588
33589 __extension__ extern __inline float32x2_t
33590 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33591 vcmla_lane_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b,
33592 const int __index)
33593 {
33594 return __builtin_aarch64_fcmla_lane0v2sf (__r, __a, __b, __index);
33595 }
33596
33597 __extension__ extern __inline float32x2_t
33598 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33599 vcmla_laneq_f32 (float32x2_t __r, float32x2_t __a, float32x4_t __b,
33600 const int __index)
33601 {
33602 return __builtin_aarch64_fcmla_laneq0v2sf (__r, __a, __b, __index);
33603 }
33604
33605 __extension__ extern __inline float32x4_t
33606 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33607 vcmlaq_lane_f32 (float32x4_t __r, float32x4_t __a, float32x2_t __b,
33608 const int __index)
33609 {
33610 return __builtin_aarch64_fcmlaq_lane0v4sf (__r, __a, __b, __index);
33611 }
33612
33613 __extension__ extern __inline float32x4_t
33614 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33615 vcmlaq_laneq_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b,
33616 const int __index)
33617 {
33618 return __builtin_aarch64_fcmla_lane0v4sf (__r, __a, __b, __index);
33619 }
33620
33621 __extension__ extern __inline float32x2_t
33622 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33623 vcmla_rot90_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b)
33624 {
33625 return __builtin_aarch64_fcmla90v2sf (__r, __a, __b);
33626 }
33627
33628 __extension__ extern __inline float32x4_t
33629 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33630 vcmlaq_rot90_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b)
33631 {
33632 return __builtin_aarch64_fcmla90v4sf (__r, __a, __b);
33633 }
33634
33635 __extension__ extern __inline float64x2_t
33636 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33637 vcmlaq_rot90_f64 (float64x2_t __r, float64x2_t __a, float64x2_t __b)
33638 {
33639 return __builtin_aarch64_fcmla90v2df (__r, __a, __b);
33640 }
33641
33642 __extension__ extern __inline float32x2_t
33643 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33644 vcmla_rot90_lane_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b,
33645 const int __index)
33646 {
33647 return __builtin_aarch64_fcmla_lane90v2sf (__r, __a, __b, __index);
33648 }
33649
33650 __extension__ extern __inline float32x2_t
33651 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33652 vcmla_rot90_laneq_f32 (float32x2_t __r, float32x2_t __a, float32x4_t __b,
33653 const int __index)
33654 {
33655 return __builtin_aarch64_fcmla_laneq90v2sf (__r, __a, __b, __index);
33656 }
33657
33658 __extension__ extern __inline float32x4_t
33659 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33660 vcmlaq_rot90_lane_f32 (float32x4_t __r, float32x4_t __a, float32x2_t __b,
33661 const int __index)
33662 {
33663 return __builtin_aarch64_fcmlaq_lane90v4sf (__r, __a, __b, __index);
33664 }
33665
33666 __extension__ extern __inline float32x4_t
33667 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33668 vcmlaq_rot90_laneq_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b,
33669 const int __index)
33670 {
33671 return __builtin_aarch64_fcmla_lane90v4sf (__r, __a, __b, __index);
33672 }
33673
33674 __extension__ extern __inline float32x2_t
33675 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33676 vcmla_rot180_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b)
33677 {
33678 return __builtin_aarch64_fcmla180v2sf (__r, __a, __b);
33679 }
33680
33681 __extension__ extern __inline float32x4_t
33682 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33683 vcmlaq_rot180_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b)
33684 {
33685 return __builtin_aarch64_fcmla180v4sf (__r, __a, __b);
33686 }
33687
33688 __extension__ extern __inline float64x2_t
33689 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33690 vcmlaq_rot180_f64 (float64x2_t __r, float64x2_t __a, float64x2_t __b)
33691 {
33692 return __builtin_aarch64_fcmla180v2df (__r, __a, __b);
33693 }
33694
33695 __extension__ extern __inline float32x2_t
33696 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33697 vcmla_rot180_lane_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b,
33698 const int __index)
33699 {
33700 return __builtin_aarch64_fcmla_lane180v2sf (__r, __a, __b, __index);
33701 }
33702
33703 __extension__ extern __inline float32x2_t
33704 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33705 vcmla_rot180_laneq_f32 (float32x2_t __r, float32x2_t __a, float32x4_t __b,
33706 const int __index)
33707 {
33708 return __builtin_aarch64_fcmla_laneq180v2sf (__r, __a, __b, __index);
33709 }
33710
33711 __extension__ extern __inline float32x4_t
33712 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33713 vcmlaq_rot180_lane_f32 (float32x4_t __r, float32x4_t __a, float32x2_t __b,
33714 const int __index)
33715 {
33716 return __builtin_aarch64_fcmlaq_lane180v4sf (__r, __a, __b, __index);
33717 }
33718
33719 __extension__ extern __inline float32x4_t
33720 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33721 vcmlaq_rot180_laneq_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b,
33722 const int __index)
33723 {
33724 return __builtin_aarch64_fcmla_lane180v4sf (__r, __a, __b, __index);
33725 }
33726
33727 __extension__ extern __inline float32x2_t
33728 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33729 vcmla_rot270_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b)
33730 {
33731 return __builtin_aarch64_fcmla270v2sf (__r, __a, __b);
33732 }
33733
33734 __extension__ extern __inline float32x4_t
33735 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33736 vcmlaq_rot270_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b)
33737 {
33738 return __builtin_aarch64_fcmla270v4sf (__r, __a, __b);
33739 }
33740
33741 __extension__ extern __inline float64x2_t
33742 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33743 vcmlaq_rot270_f64 (float64x2_t __r, float64x2_t __a, float64x2_t __b)
33744 {
33745 return __builtin_aarch64_fcmla270v2df (__r, __a, __b);
33746 }
33747
33748 __extension__ extern __inline float32x2_t
33749 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33750 vcmla_rot270_lane_f32 (float32x2_t __r, float32x2_t __a, float32x2_t __b,
33751 const int __index)
33752 {
33753 return __builtin_aarch64_fcmla_lane270v2sf (__r, __a, __b, __index);
33754 }
33755
33756 __extension__ extern __inline float32x2_t
33757 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33758 vcmla_rot270_laneq_f32 (float32x2_t __r, float32x2_t __a, float32x4_t __b,
33759 const int __index)
33760 {
33761 return __builtin_aarch64_fcmla_laneq270v2sf (__r, __a, __b, __index);
33762 }
33763
33764 __extension__ extern __inline float32x4_t
33765 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33766 vcmlaq_rot270_lane_f32 (float32x4_t __r, float32x4_t __a, float32x2_t __b,
33767 const int __index)
33768 {
33769 return __builtin_aarch64_fcmlaq_lane270v4sf (__r, __a, __b, __index);
33770 }
33771
33772 __extension__ extern __inline float32x4_t
33773 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33774 vcmlaq_rot270_laneq_f32 (float32x4_t __r, float32x4_t __a, float32x4_t __b,
33775 const int __index)
33776 {
33777 return __builtin_aarch64_fcmla_lane270v4sf (__r, __a, __b, __index);
33778 }
33779
33780 #pragma GCC pop_options
33781
33782 #pragma GCC push_options
33783 #pragma GCC target ("arch=armv8.2-a+fp16fml")
33784
33785 __extension__ extern __inline float32x2_t
33786 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33787 vfmlal_low_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b)
33788 {
33789 return __builtin_aarch64_fmlal_lowv2sf (__r, __a, __b);
33790 }
33791
33792 __extension__ extern __inline float32x2_t
33793 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33794 vfmlsl_low_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b)
33795 {
33796 return __builtin_aarch64_fmlsl_lowv2sf (__r, __a, __b);
33797 }
33798
33799 __extension__ extern __inline float32x4_t
33800 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33801 vfmlalq_low_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b)
33802 {
33803 return __builtin_aarch64_fmlalq_lowv4sf (__r, __a, __b);
33804 }
33805
33806 __extension__ extern __inline float32x4_t
33807 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33808 vfmlslq_low_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b)
33809 {
33810 return __builtin_aarch64_fmlslq_lowv4sf (__r, __a, __b);
33811 }
33812
33813 __extension__ extern __inline float32x2_t
33814 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33815 vfmlal_high_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b)
33816 {
33817 return __builtin_aarch64_fmlal_highv2sf (__r, __a, __b);
33818 }
33819
33820 __extension__ extern __inline float32x2_t
33821 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33822 vfmlsl_high_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b)
33823 {
33824 return __builtin_aarch64_fmlsl_highv2sf (__r, __a, __b);
33825 }
33826
33827 __extension__ extern __inline float32x4_t
33828 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33829 vfmlalq_high_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b)
33830 {
33831 return __builtin_aarch64_fmlalq_highv4sf (__r, __a, __b);
33832 }
33833
33834 __extension__ extern __inline float32x4_t
33835 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33836 vfmlslq_high_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b)
33837 {
33838 return __builtin_aarch64_fmlslq_highv4sf (__r, __a, __b);
33839 }
33840
33841 __extension__ extern __inline float32x2_t
33842 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33843 vfmlal_lane_low_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b,
33844 const int __lane)
33845 {
33846 return __builtin_aarch64_fmlal_lane_lowv2sf (__r, __a, __b, __lane);
33847 }
33848
33849 __extension__ extern __inline float32x2_t
33850 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33851 vfmlsl_lane_low_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b,
33852 const int __lane)
33853 {
33854 return __builtin_aarch64_fmlsl_lane_lowv2sf (__r, __a, __b, __lane);
33855 }
33856
33857 __extension__ extern __inline float32x2_t
33858 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33859 vfmlal_laneq_low_f16 (float32x2_t __r, float16x4_t __a, float16x8_t __b,
33860 const int __lane)
33861 {
33862 return __builtin_aarch64_fmlal_laneq_lowv2sf (__r, __a, __b, __lane);
33863 }
33864
33865 __extension__ extern __inline float32x2_t
33866 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33867 vfmlsl_laneq_low_f16 (float32x2_t __r, float16x4_t __a, float16x8_t __b,
33868 const int __lane)
33869 {
33870 return __builtin_aarch64_fmlsl_laneq_lowv2sf (__r, __a, __b, __lane);
33871 }
33872
33873 __extension__ extern __inline float32x4_t
33874 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33875 vfmlalq_lane_low_f16 (float32x4_t __r, float16x8_t __a, float16x4_t __b,
33876 const int __lane)
33877 {
33878 return __builtin_aarch64_fmlalq_lane_lowv4sf (__r, __a, __b, __lane);
33879 }
33880
33881 __extension__ extern __inline float32x4_t
33882 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33883 vfmlslq_lane_low_f16 (float32x4_t __r, float16x8_t __a, float16x4_t __b,
33884 const int __lane)
33885 {
33886 return __builtin_aarch64_fmlslq_lane_lowv4sf (__r, __a, __b, __lane);
33887 }
33888
33889 __extension__ extern __inline float32x4_t
33890 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33891 vfmlalq_laneq_low_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b,
33892 const int __lane)
33893 {
33894 return __builtin_aarch64_fmlalq_laneq_lowv4sf (__r, __a, __b, __lane);
33895 }
33896
33897 __extension__ extern __inline float32x4_t
33898 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33899 vfmlslq_laneq_low_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b,
33900 const int __lane)
33901 {
33902 return __builtin_aarch64_fmlslq_laneq_lowv4sf (__r, __a, __b, __lane);
33903 }
33904
33905 __extension__ extern __inline float32x2_t
33906 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33907 vfmlal_lane_high_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b,
33908 const int __lane)
33909 {
33910 return __builtin_aarch64_fmlal_lane_highv2sf (__r, __a, __b, __lane);
33911 }
33912
33913 __extension__ extern __inline float32x2_t
33914 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33915 vfmlsl_lane_high_f16 (float32x2_t __r, float16x4_t __a, float16x4_t __b,
33916 const int __lane)
33917 {
33918 return __builtin_aarch64_fmlsl_lane_highv2sf (__r, __a, __b, __lane);
33919 }
33920
33921 __extension__ extern __inline float32x2_t
33922 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33923 vfmlal_laneq_high_f16 (float32x2_t __r, float16x4_t __a, float16x8_t __b,
33924 const int __lane)
33925 {
33926 return __builtin_aarch64_fmlal_laneq_highv2sf (__r, __a, __b, __lane);
33927 }
33928
33929 __extension__ extern __inline float32x2_t
33930 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33931 vfmlsl_laneq_high_f16 (float32x2_t __r, float16x4_t __a, float16x8_t __b,
33932 const int __lane)
33933 {
33934 return __builtin_aarch64_fmlsl_laneq_highv2sf (__r, __a, __b, __lane);
33935 }
33936
33937 __extension__ extern __inline float32x4_t
33938 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33939 vfmlalq_lane_high_f16 (float32x4_t __r, float16x8_t __a, float16x4_t __b,
33940 const int __lane)
33941 {
33942 return __builtin_aarch64_fmlalq_lane_highv4sf (__r, __a, __b, __lane);
33943 }
33944
33945 __extension__ extern __inline float32x4_t
33946 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33947 vfmlslq_lane_high_f16 (float32x4_t __r, float16x8_t __a, float16x4_t __b,
33948 const int __lane)
33949 {
33950 return __builtin_aarch64_fmlslq_lane_highv4sf (__r, __a, __b, __lane);
33951 }
33952
33953 __extension__ extern __inline float32x4_t
33954 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33955 vfmlalq_laneq_high_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b,
33956 const int __lane)
33957 {
33958 return __builtin_aarch64_fmlalq_laneq_highv4sf (__r, __a, __b, __lane);
33959 }
33960
33961 __extension__ extern __inline float32x4_t
33962 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33963 vfmlslq_laneq_high_f16 (float32x4_t __r, float16x8_t __a, float16x8_t __b,
33964 const int __lane)
33965 {
33966 return __builtin_aarch64_fmlslq_laneq_highv4sf (__r, __a, __b, __lane);
33967 }
33968
33969 #pragma GCC pop_options
33970
33971 #pragma GCC push_options
33972 #pragma GCC target ("arch=armv8.5-a")
33973
33974 __extension__ extern __inline float32x2_t
33975 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33976 vrnd32z_f32 (float32x2_t __a)
33977 {
33978 return __builtin_aarch64_frint32zv2sf (__a);
33979 }
33980
33981 __extension__ extern __inline float32x4_t
33982 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33983 vrnd32zq_f32 (float32x4_t __a)
33984 {
33985 return __builtin_aarch64_frint32zv4sf (__a);
33986 }
33987
33988 __extension__ extern __inline float64x1_t
33989 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33990 vrnd32z_f64 (float64x1_t __a)
33991 {
33992 return (float64x1_t)
33993 {__builtin_aarch64_frint32zdf (vget_lane_f64 (__a, 0))};
33994 }
33995
33996 __extension__ extern __inline float64x2_t
33997 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
33998 vrnd32zq_f64 (float64x2_t __a)
33999 {
34000 return __builtin_aarch64_frint32zv2df (__a);
34001 }
34002
34003 __extension__ extern __inline float32x2_t
34004 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34005 vrnd32x_f32 (float32x2_t __a)
34006 {
34007 return __builtin_aarch64_frint32xv2sf (__a);
34008 }
34009
34010 __extension__ extern __inline float32x4_t
34011 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34012 vrnd32xq_f32 (float32x4_t __a)
34013 {
34014 return __builtin_aarch64_frint32xv4sf (__a);
34015 }
34016
34017 __extension__ extern __inline float64x1_t
34018 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34019 vrnd32x_f64 (float64x1_t __a)
34020 {
34021 return (float64x1_t) {__builtin_aarch64_frint32xdf (vget_lane_f64 (__a, 0))};
34022 }
34023
34024 __extension__ extern __inline float64x2_t
34025 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34026 vrnd32xq_f64 (float64x2_t __a)
34027 {
34028 return __builtin_aarch64_frint32xv2df (__a);
34029 }
34030
34031 __extension__ extern __inline float32x2_t
34032 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34033 vrnd64z_f32 (float32x2_t __a)
34034 {
34035 return __builtin_aarch64_frint64zv2sf (__a);
34036 }
34037
34038 __extension__ extern __inline float32x4_t
34039 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34040 vrnd64zq_f32 (float32x4_t __a)
34041 {
34042 return __builtin_aarch64_frint64zv4sf (__a);
34043 }
34044
34045 __extension__ extern __inline float64x1_t
34046 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34047 vrnd64z_f64 (float64x1_t __a)
34048 {
34049 return (float64x1_t) {__builtin_aarch64_frint64zdf (vget_lane_f64 (__a, 0))};
34050 }
34051
34052 __extension__ extern __inline float64x2_t
34053 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34054 vrnd64zq_f64 (float64x2_t __a)
34055 {
34056 return __builtin_aarch64_frint64zv2df (__a);
34057 }
34058
34059 __extension__ extern __inline float32x2_t
34060 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34061 vrnd64x_f32 (float32x2_t __a)
34062 {
34063 return __builtin_aarch64_frint64xv2sf (__a);
34064 }
34065
34066 __extension__ extern __inline float32x4_t
34067 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34068 vrnd64xq_f32 (float32x4_t __a)
34069 {
34070 return __builtin_aarch64_frint64xv4sf (__a);
34071 }
34072
34073 __extension__ extern __inline float64x1_t
34074 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34075 vrnd64x_f64 (float64x1_t __a)
34076 {
34077 return (float64x1_t) {__builtin_aarch64_frint64xdf (vget_lane_f64 (__a, 0))};
34078 }
34079
34080 __extension__ extern __inline float64x2_t
34081 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34082 vrnd64xq_f64 (float64x2_t __a)
34083 {
34084 return __builtin_aarch64_frint64xv2df (__a);
34085 }
34086
34087 #pragma GCC pop_options
34088
34089 #include "arm_bf16.h"
34090
34091 #pragma GCC push_options
34092 #pragma GCC target ("arch=armv8.2-a+bf16")
34093
34094 __extension__ extern __inline bfloat16x4_t
34095 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34096 vset_lane_bf16 (bfloat16_t __elem, bfloat16x4_t __vec, const int __index)
34097 {
34098 return __aarch64_vset_lane_any (__elem, __vec, __index);
34099 }
34100
34101 __extension__ extern __inline bfloat16x8_t
34102 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34103 vsetq_lane_bf16 (bfloat16_t __elem, bfloat16x8_t __vec, const int __index)
34104 {
34105 return __aarch64_vset_lane_any (__elem, __vec, __index);
34106 }
34107
34108 __extension__ extern __inline bfloat16_t
34109 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34110 vget_lane_bf16 (bfloat16x4_t __a, const int __b)
34111 {
34112 return __aarch64_vget_lane_any (__a, __b);
34113 }
34114
34115 __extension__ extern __inline bfloat16_t
34116 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34117 vgetq_lane_bf16 (bfloat16x8_t __a, const int __b)
34118 {
34119 return __aarch64_vget_lane_any (__a, __b);
34120 }
34121
34122 __extension__ extern __inline bfloat16x4_t
34123 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34124 vcreate_bf16 (uint64_t __a)
34125 {
34126 return (bfloat16x4_t) __a;
34127 }
34128
34129 __extension__ extern __inline bfloat16x8_t
34130 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34131 vcombine_bf16 (bfloat16x4_t __a, bfloat16x4_t __b)
34132 {
34133 return (bfloat16x8_t)__builtin_aarch64_combinev4bf (__a, __b);
34134 }
34135
34136 /* vdup */
34137
34138 __extension__ extern __inline bfloat16x4_t
34139 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34140 vdup_n_bf16 (bfloat16_t __a)
34141 {
34142 return (bfloat16x4_t) {__a, __a, __a, __a};
34143 }
34144
34145 __extension__ extern __inline bfloat16x8_t
34146 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34147 vdupq_n_bf16 (bfloat16_t __a)
34148 {
34149 return (bfloat16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a};
34150 }
34151
34152 __extension__ extern __inline bfloat16x4_t
34153 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34154 vdup_lane_bf16 (bfloat16x4_t __a, const int __b)
34155 {
34156 return vdup_n_bf16 (__aarch64_vget_lane_any (__a, __b));
34157 }
34158
34159 __extension__ extern __inline bfloat16x4_t
34160 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34161 vdup_laneq_bf16 (bfloat16x8_t __a, const int __b)
34162 {
34163 return vdup_n_bf16 (__aarch64_vget_lane_any (__a, __b));
34164 }
34165
34166 __extension__ extern __inline bfloat16x8_t
34167 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34168 vdupq_lane_bf16 (bfloat16x4_t __a, const int __b)
34169 {
34170 return vdupq_n_bf16 (__aarch64_vget_lane_any (__a, __b));
34171 }
34172
34173 __extension__ extern __inline bfloat16x8_t
34174 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34175 vdupq_laneq_bf16 (bfloat16x8_t __a, const int __b)
34176 {
34177 return vdupq_n_bf16 (__aarch64_vget_lane_any (__a, __b));
34178 }
34179
34180 __extension__ extern __inline bfloat16_t
34181 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34182 vduph_lane_bf16 (bfloat16x4_t __a, const int __b)
34183 {
34184 return __aarch64_vget_lane_any (__a, __b);
34185 }
34186
34187 __extension__ extern __inline bfloat16_t
34188 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34189 vduph_laneq_bf16 (bfloat16x8_t __a, const int __b)
34190 {
34191 return __aarch64_vget_lane_any (__a, __b);
34192 }
34193
34194 /* vld */
34195
34196 __extension__ extern __inline bfloat16x4_t
34197 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34198 vld1_bf16 (const bfloat16_t *__a)
34199 {
34200 return (bfloat16x4_t) __builtin_aarch64_ld1v4bf (__a);
34201 }
34202
34203 __extension__ extern __inline bfloat16x8_t
34204 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34205 vld1q_bf16 (const bfloat16_t *__a)
34206 {
34207 return __builtin_aarch64_ld1v8bf (__a);
34208 }
34209
34210 __extension__ extern __inline bfloat16x4x2_t
34211 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34212 vld1_bf16_x2 (const bfloat16_t *__a)
34213 {
34214 bfloat16x4x2_t ret;
34215 __builtin_aarch64_simd_oi __o;
34216 __o = __builtin_aarch64_ld1x2v4bf ((const __builtin_aarch64_simd_bf *) __a);
34217 ret.val[0] = (bfloat16x4_t) __builtin_aarch64_get_dregoiv4bf (__o, 0);
34218 ret.val[1] = (bfloat16x4_t) __builtin_aarch64_get_dregoiv4bf (__o, 1);
34219 return ret;
34220 }
34221
34222 __extension__ extern __inline bfloat16x8x2_t
34223 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34224 vld1q_bf16_x2 (const bfloat16_t *__a)
34225 {
34226 bfloat16x8x2_t ret;
34227 __builtin_aarch64_simd_oi __o;
34228 __o = __builtin_aarch64_ld1x2v8bf ((const __builtin_aarch64_simd_bf *) __a);
34229 ret.val[0] = (bfloat16x8_t) __builtin_aarch64_get_qregoiv8bf (__o, 0);
34230 ret.val[1] = (bfloat16x8_t) __builtin_aarch64_get_qregoiv8bf (__o, 1);
34231 return ret;
34232 }
34233
34234 __extension__ extern __inline bfloat16x4x3_t
34235 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34236 vld1_bf16_x3 (const bfloat16_t *__a)
34237 {
34238 bfloat16x4x3_t __i;
34239 __builtin_aarch64_simd_ci __o;
34240 __o = __builtin_aarch64_ld1x3v4bf ((const __builtin_aarch64_simd_bf *) __a);
34241 __i.val[0] = (bfloat16x4_t) __builtin_aarch64_get_dregciv4bf (__o, 0);
34242 __i.val[1] = (bfloat16x4_t) __builtin_aarch64_get_dregciv4bf (__o, 1);
34243 __i.val[2] = (bfloat16x4_t) __builtin_aarch64_get_dregciv4bf (__o, 2);
34244 return __i;
34245 }
34246
34247 __extension__ extern __inline bfloat16x8x3_t
34248 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34249 vld1q_bf16_x3 (const bfloat16_t *__a)
34250 {
34251 bfloat16x8x3_t __i;
34252 __builtin_aarch64_simd_ci __o;
34253 __o = __builtin_aarch64_ld1x3v8bf ((const __builtin_aarch64_simd_bf *) __a);
34254 __i.val[0] = (bfloat16x8_t) __builtin_aarch64_get_qregciv8bf (__o, 0);
34255 __i.val[1] = (bfloat16x8_t) __builtin_aarch64_get_qregciv8bf (__o, 1);
34256 __i.val[2] = (bfloat16x8_t) __builtin_aarch64_get_qregciv8bf (__o, 2);
34257 return __i;
34258 }
34259 __extension__ extern __inline bfloat16x4x4_t
34260 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34261 vld1_bf16_x4 (const bfloat16_t *__a)
34262 {
34263 union { bfloat16x4x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
34264 __au.__o
34265 = __builtin_aarch64_ld1x4v4bf ((const __builtin_aarch64_simd_bf *) __a);
34266 return __au.__i;
34267 }
34268
34269 __extension__ extern __inline bfloat16x8x4_t
34270 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34271 vld1q_bf16_x4 (const bfloat16_t *__a)
34272 {
34273 union { bfloat16x8x4_t __i; __builtin_aarch64_simd_xi __o; } __au;
34274 __au.__o
34275 = __builtin_aarch64_ld1x4v8bf ((const __builtin_aarch64_simd_bf *) __a);
34276 return __au.__i;
34277 }
34278
34279 __extension__ extern __inline bfloat16x4_t
34280 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34281 vld1_lane_bf16 (const bfloat16_t *__src, bfloat16x4_t __vec, const int __lane)
34282 {
34283 return __aarch64_vset_lane_any (*__src, __vec, __lane);
34284 }
34285
34286 __extension__ extern __inline bfloat16x8_t
34287 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34288 vld1q_lane_bf16 (const bfloat16_t *__src, bfloat16x8_t __vec, const int __lane)
34289 {
34290 return __aarch64_vset_lane_any (*__src, __vec, __lane);
34291 }
34292
34293 __extension__ extern __inline bfloat16x4_t
34294 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34295 vld1_dup_bf16 (const bfloat16_t* __a)
34296 {
34297 return vdup_n_bf16 (*__a);
34298 }
34299
34300 __extension__ extern __inline bfloat16x8_t
34301 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34302 vld1q_dup_bf16 (const bfloat16_t* __a)
34303 {
34304 return vdupq_n_bf16 (*__a);
34305 }
34306
34307 __extension__ extern __inline bfloat16x4x2_t
34308 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34309 vld2_bf16 (const bfloat16_t * __a)
34310 {
34311 bfloat16x4x2_t ret;
34312 __builtin_aarch64_simd_oi __o;
34313 __o = __builtin_aarch64_ld2v4bf (__a);
34314 ret.val[0] = (bfloat16x4_t) __builtin_aarch64_get_dregoiv4bf (__o, 0);
34315 ret.val[1] = (bfloat16x4_t) __builtin_aarch64_get_dregoiv4bf (__o, 1);
34316 return ret;
34317 }
34318
34319 __extension__ extern __inline bfloat16x8x2_t
34320 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34321 vld2q_bf16 (const bfloat16_t * __a)
34322 {
34323 bfloat16x8x2_t ret;
34324 __builtin_aarch64_simd_oi __o;
34325 __o = __builtin_aarch64_ld2v8bf ((const __builtin_aarch64_simd_bf *) __a);
34326 ret.val[0] = (bfloat16x8_t) __builtin_aarch64_get_qregoiv8bf (__o, 0);
34327 ret.val[1] = (bfloat16x8_t) __builtin_aarch64_get_qregoiv8bf (__o, 1);
34328 return ret;
34329 }
34330
34331 __extension__ extern __inline bfloat16x4x2_t
34332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34333 vld2_dup_bf16 (const bfloat16_t * __a)
34334 {
34335 bfloat16x4x2_t ret;
34336 __builtin_aarch64_simd_oi __o;
34337 __o = __builtin_aarch64_ld2rv4bf ((const __builtin_aarch64_simd_bf *) __a);
34338 ret.val[0] = (bfloat16x4_t) __builtin_aarch64_get_dregoiv4bf (__o, 0);
34339 ret.val[1] = (bfloat16x4_t) __builtin_aarch64_get_dregoiv4bf (__o, 1);
34340 return ret;
34341 }
34342
34343 __extension__ extern __inline bfloat16x8x2_t
34344 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34345 vld2q_dup_bf16 (const bfloat16_t * __a)
34346 {
34347 bfloat16x8x2_t ret;
34348 __builtin_aarch64_simd_oi __o;
34349 __o = __builtin_aarch64_ld2rv8bf ((const __builtin_aarch64_simd_bf *) __a);
34350 ret.val[0] = (bfloat16x8_t) __builtin_aarch64_get_qregoiv8bf (__o, 0);
34351 ret.val[1] = (bfloat16x8_t) __builtin_aarch64_get_qregoiv8bf (__o, 1);
34352 return ret;
34353 }
34354
34355 __extension__ extern __inline bfloat16x4x3_t
34356 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34357 vld3_bf16 (const bfloat16_t * __a)
34358 {
34359 bfloat16x4x3_t ret;
34360 __builtin_aarch64_simd_ci __o;
34361 __o = __builtin_aarch64_ld3v4bf ((const __builtin_aarch64_simd_bf *) __a);
34362 ret.val[0] = (bfloat16x4_t) __builtin_aarch64_get_dregciv4bf (__o, 0);
34363 ret.val[1] = (bfloat16x4_t) __builtin_aarch64_get_dregciv4bf (__o, 1);
34364 ret.val[2] = (bfloat16x4_t) __builtin_aarch64_get_dregciv4bf (__o, 2);
34365 return ret;
34366 }
34367
34368 __extension__ extern __inline bfloat16x8x3_t
34369 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34370 vld3q_bf16 (const bfloat16_t * __a)
34371 {
34372 bfloat16x8x3_t ret;
34373 __builtin_aarch64_simd_ci __o;
34374 __o = __builtin_aarch64_ld3v8bf ((const __builtin_aarch64_simd_bf *) __a);
34375 ret.val[0] = (bfloat16x8_t) __builtin_aarch64_get_qregciv8bf (__o, 0);
34376 ret.val[1] = (bfloat16x8_t) __builtin_aarch64_get_qregciv8bf (__o, 1);
34377 ret.val[2] = (bfloat16x8_t) __builtin_aarch64_get_qregciv8bf (__o, 2);
34378 return ret;
34379 }
34380
34381 __extension__ extern __inline bfloat16x4x3_t
34382 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34383 vld3_dup_bf16 (const bfloat16_t * __a)
34384 {
34385 bfloat16x4x3_t ret;
34386 __builtin_aarch64_simd_ci __o;
34387 __o = __builtin_aarch64_ld3rv4bf ((const __builtin_aarch64_simd_bf *) __a);
34388 ret.val[0] = (bfloat16x4_t) __builtin_aarch64_get_dregciv4bf (__o, 0);
34389 ret.val[1] = (bfloat16x4_t) __builtin_aarch64_get_dregciv4bf (__o, 1);
34390 ret.val[2] = (bfloat16x4_t) __builtin_aarch64_get_dregciv4bf (__o, 2);
34391 return ret;
34392 }
34393
34394 __extension__ extern __inline bfloat16x8x3_t
34395 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34396 vld3q_dup_bf16 (const bfloat16_t * __a)
34397 {
34398 bfloat16x8x3_t ret;
34399 __builtin_aarch64_simd_ci __o;
34400 __o = __builtin_aarch64_ld3rv8bf ((const __builtin_aarch64_simd_bf *) __a);
34401 ret.val[0] = (bfloat16x8_t) __builtin_aarch64_get_qregciv8bf (__o, 0);
34402 ret.val[1] = (bfloat16x8_t) __builtin_aarch64_get_qregciv8bf (__o, 1);
34403 ret.val[2] = (bfloat16x8_t) __builtin_aarch64_get_qregciv8bf (__o, 2);
34404 return ret;
34405 }
34406
34407 __extension__ extern __inline bfloat16x4x4_t
34408 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34409 vld4_bf16 (const bfloat16_t * __a)
34410 {
34411 bfloat16x4x4_t ret;
34412 __builtin_aarch64_simd_xi __o;
34413 __o = __builtin_aarch64_ld4v4bf ((const __builtin_aarch64_simd_bf *) __a);
34414 ret.val[0] = (bfloat16x4_t) __builtin_aarch64_get_dregxiv4bf (__o, 0);
34415 ret.val[1] = (bfloat16x4_t) __builtin_aarch64_get_dregxiv4bf (__o, 1);
34416 ret.val[2] = (bfloat16x4_t) __builtin_aarch64_get_dregxiv4bf (__o, 2);
34417 ret.val[3] = (bfloat16x4_t) __builtin_aarch64_get_dregxiv4bf (__o, 3);
34418 return ret;
34419 }
34420
34421 __extension__ extern __inline bfloat16x8x4_t
34422 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34423 vld4q_bf16 (const bfloat16_t * __a)
34424 {
34425 bfloat16x8x4_t ret;
34426 __builtin_aarch64_simd_xi __o;
34427 __o = __builtin_aarch64_ld4v8bf ((const __builtin_aarch64_simd_bf *) __a);
34428 ret.val[0] = (bfloat16x8_t) __builtin_aarch64_get_qregxiv8bf (__o, 0);
34429 ret.val[1] = (bfloat16x8_t) __builtin_aarch64_get_qregxiv8bf (__o, 1);
34430 ret.val[2] = (bfloat16x8_t) __builtin_aarch64_get_qregxiv8bf (__o, 2);
34431 ret.val[3] = (bfloat16x8_t) __builtin_aarch64_get_qregxiv8bf (__o, 3);
34432 return ret;
34433 }
34434
34435 __extension__ extern __inline bfloat16x4x4_t
34436 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34437 vld4_dup_bf16 (const bfloat16_t * __a)
34438 {
34439 bfloat16x4x4_t ret;
34440 __builtin_aarch64_simd_xi __o;
34441 __o = __builtin_aarch64_ld4rv4bf ((const __builtin_aarch64_simd_bf *) __a);
34442 ret.val[0] = (bfloat16x4_t) __builtin_aarch64_get_dregxiv4bf (__o, 0);
34443 ret.val[1] = (bfloat16x4_t) __builtin_aarch64_get_dregxiv4bf (__o, 1);
34444 ret.val[2] = (bfloat16x4_t) __builtin_aarch64_get_dregxiv4bf (__o, 2);
34445 ret.val[3] = (bfloat16x4_t) __builtin_aarch64_get_dregxiv4bf (__o, 3);
34446 return ret;
34447 }
34448
34449 __extension__ extern __inline bfloat16x8x4_t
34450 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34451 vld4q_dup_bf16 (const bfloat16_t * __a)
34452 {
34453 bfloat16x8x4_t ret;
34454 __builtin_aarch64_simd_xi __o;
34455 __o = __builtin_aarch64_ld4rv8bf ((const __builtin_aarch64_simd_bf *) __a);
34456 ret.val[0] = (bfloat16x8_t) __builtin_aarch64_get_qregxiv8bf (__o, 0);
34457 ret.val[1] = (bfloat16x8_t) __builtin_aarch64_get_qregxiv8bf (__o, 1);
34458 ret.val[2] = (bfloat16x8_t) __builtin_aarch64_get_qregxiv8bf (__o, 2);
34459 ret.val[3] = (bfloat16x8_t) __builtin_aarch64_get_qregxiv8bf (__o, 3);
34460 return ret;
34461 }
34462
34463 /* vst */
34464
34465 __extension__ extern __inline void
34466 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34467 vst1_bf16 (bfloat16_t *__a, bfloat16x4_t __b)
34468 {
34469 __builtin_aarch64_st1v4bf (__a, __b);
34470 }
34471
34472 __extension__ extern __inline void
34473 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34474 vst1_bf16_x2 (bfloat16_t * __a, bfloat16x4x2_t __val)
34475 {
34476 __builtin_aarch64_simd_oi __o;
34477 bfloat16x8x2_t __temp;
34478 __temp.val[0] = vcombine_bf16 (__val.val[0], vcreate_bf16 (__AARCH64_UINT64_C (0)));
34479 __temp.val[1] = vcombine_bf16 (__val.val[1], vcreate_bf16 (__AARCH64_UINT64_C (0)));
34480 __o = __builtin_aarch64_set_qregoiv8bf (__o, __temp.val[0], 0);
34481 __o = __builtin_aarch64_set_qregoiv8bf (__o, __temp.val[1], 1);
34482 __builtin_aarch64_st1x2v4bf (__a, __o);
34483 }
34484
34485 __extension__ extern __inline void
34486 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34487 vst1q_bf16_x2 (bfloat16_t * __a, bfloat16x8x2_t __val)
34488 {
34489 __builtin_aarch64_simd_oi __o;
34490 __o = __builtin_aarch64_set_qregoiv8bf (__o, __val.val[0], 0);
34491 __o = __builtin_aarch64_set_qregoiv8bf (__o, __val.val[1], 1);
34492 __builtin_aarch64_st1x2v8bf (__a, __o);
34493 }
34494
34495 __extension__ extern __inline void
34496 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34497 vst1_bf16_x3 (bfloat16_t * __a, bfloat16x4x3_t __val)
34498 {
34499 __builtin_aarch64_simd_ci __o;
34500 bfloat16x8x3_t __temp;
34501 __temp.val[0] = vcombine_bf16 (__val.val[0], vcreate_bf16 (__AARCH64_UINT64_C (0)));
34502 __temp.val[1] = vcombine_bf16 (__val.val[1], vcreate_bf16 (__AARCH64_UINT64_C (0)));
34503 __temp.val[2] = vcombine_bf16 (__val.val[2], vcreate_bf16 (__AARCH64_UINT64_C (0)));
34504 __o = __builtin_aarch64_set_qregciv8bf (__o, (bfloat16x8_t) __temp.val[0], 0);
34505 __o = __builtin_aarch64_set_qregciv8bf (__o, (bfloat16x8_t) __temp.val[1], 1);
34506 __o = __builtin_aarch64_set_qregciv8bf (__o, (bfloat16x8_t) __temp.val[2], 2);
34507 __builtin_aarch64_st1x3v4bf ((__builtin_aarch64_simd_bf *) __a, __o);
34508 }
34509
34510 __extension__ extern __inline void
34511 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34512 vst1q_bf16_x3 (bfloat16_t * __a, bfloat16x8x3_t __val)
34513 {
34514 __builtin_aarch64_simd_ci __o;
34515 __o = __builtin_aarch64_set_qregciv8bf (__o, (bfloat16x8_t) __val.val[0], 0);
34516 __o = __builtin_aarch64_set_qregciv8bf (__o, (bfloat16x8_t) __val.val[1], 1);
34517 __o = __builtin_aarch64_set_qregciv8bf (__o, (bfloat16x8_t) __val.val[2], 2);
34518 __builtin_aarch64_st1x3v8bf ((__builtin_aarch64_simd_bf *) __a, __o);
34519 }
34520
34521 __extension__ extern __inline void
34522 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34523 vst1_bf16_x4 (bfloat16_t * __a, bfloat16x4x4_t val)
34524 {
34525 union { bfloat16x4x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
34526 __builtin_aarch64_st1x4v4bf ((__builtin_aarch64_simd_bf *) __a, __u.__o);
34527 }
34528
34529 __extension__ extern __inline void
34530 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34531 vst1q_bf16_x4 (bfloat16_t * __a, bfloat16x8x4_t val)
34532 {
34533 union { bfloat16x8x4_t __i; __builtin_aarch64_simd_xi __o; } __u = { val };
34534 __builtin_aarch64_st1x4v8bf ((__builtin_aarch64_simd_bf *) __a, __u.__o);
34535 }
34536
34537 __extension__ extern __inline void
34538 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34539 vst1q_bf16 (bfloat16_t *__a, bfloat16x8_t __b)
34540 {
34541 __builtin_aarch64_st1v8bf (__a, __b);
34542 }
34543
34544 __extension__ extern __inline void
34545 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34546 vst1_lane_bf16 (bfloat16_t *__a, bfloat16x4_t __b, const int __lane)
34547 {
34548 *__a = __aarch64_vget_lane_any (__b, __lane);
34549 }
34550
34551 __extension__ extern __inline void
34552 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34553 vst1q_lane_bf16 (bfloat16_t *__a, bfloat16x8_t __b, const int __lane)
34554 {
34555 *__a = __aarch64_vget_lane_any (__b, __lane);
34556 }
34557
34558 __extension__ extern __inline void
34559 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34560 vst2_bf16 (bfloat16_t * __a, bfloat16x4x2_t __val)
34561 {
34562 __builtin_aarch64_simd_oi __o;
34563 bfloat16x8x2_t __temp;
34564 __temp.val[0] = vcombine_bf16 (__val.val[0], vcreate_bf16 (__AARCH64_UINT64_C (0)));
34565 __temp.val[1] = vcombine_bf16 (__val.val[1], vcreate_bf16 (__AARCH64_UINT64_C (0)));
34566 __o = __builtin_aarch64_set_qregoiv8bf (__o, __temp.val[0], 0);
34567 __o = __builtin_aarch64_set_qregoiv8bf (__o, __temp.val[1], 1);
34568 __builtin_aarch64_st2v4bf (__a, __o);
34569 }
34570
34571 __extension__ extern __inline void
34572 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34573 vst2q_bf16 (bfloat16_t * __a, bfloat16x8x2_t __val)
34574 {
34575 __builtin_aarch64_simd_oi __o;
34576 __o = __builtin_aarch64_set_qregoiv8bf (__o, __val.val[0], 0);
34577 __o = __builtin_aarch64_set_qregoiv8bf (__o, __val.val[1], 1);
34578 __builtin_aarch64_st2v8bf (__a, __o);
34579 }
34580
34581 __extension__ extern __inline void
34582 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34583 vst3_bf16 (bfloat16_t * __a, bfloat16x4x3_t __val)
34584 {
34585 __builtin_aarch64_simd_ci __o;
34586 bfloat16x8x3_t __temp;
34587 __temp.val[0] = vcombine_bf16 (__val.val[0], vcreate_bf16 (__AARCH64_UINT64_C (0)));
34588 __temp.val[1] = vcombine_bf16 (__val.val[1], vcreate_bf16 (__AARCH64_UINT64_C (0)));
34589 __temp.val[2] = vcombine_bf16 (__val.val[2], vcreate_bf16 (__AARCH64_UINT64_C (0)));
34590 __o = __builtin_aarch64_set_qregciv8bf (__o, (bfloat16x8_t) __temp.val[0], 0);
34591 __o = __builtin_aarch64_set_qregciv8bf (__o, (bfloat16x8_t) __temp.val[1], 1);
34592 __o = __builtin_aarch64_set_qregciv8bf (__o, (bfloat16x8_t) __temp.val[2], 2);
34593 __builtin_aarch64_st3v4bf ((__builtin_aarch64_simd_bf *) __a, __o);
34594 }
34595
34596 __extension__ extern __inline void
34597 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34598 vst3q_bf16 (bfloat16_t * __a, bfloat16x8x3_t __val)
34599 {
34600 __builtin_aarch64_simd_ci __o;
34601 __o = __builtin_aarch64_set_qregciv8bf (__o, (bfloat16x8_t) __val.val[0], 0);
34602 __o = __builtin_aarch64_set_qregciv8bf (__o, (bfloat16x8_t) __val.val[1], 1);
34603 __o = __builtin_aarch64_set_qregciv8bf (__o, (bfloat16x8_t) __val.val[2], 2);
34604 __builtin_aarch64_st3v8bf ((__builtin_aarch64_simd_bf *) __a, __o);
34605 }
34606
34607 __extension__ extern __inline void
34608 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34609 vst4_bf16 (bfloat16_t * __a, bfloat16x4x4_t __val)
34610 {
34611 __builtin_aarch64_simd_xi __o;
34612 bfloat16x8x4_t __temp;
34613 __temp.val[0] = vcombine_bf16 (__val.val[0], vcreate_bf16 (__AARCH64_UINT64_C (0)));
34614 __temp.val[1] = vcombine_bf16 (__val.val[1], vcreate_bf16 (__AARCH64_UINT64_C (0)));
34615 __temp.val[2] = vcombine_bf16 (__val.val[2], vcreate_bf16 (__AARCH64_UINT64_C (0)));
34616 __temp.val[3] = vcombine_bf16 (__val.val[3], vcreate_bf16 (__AARCH64_UINT64_C (0)));
34617 __o = __builtin_aarch64_set_qregxiv8bf (__o, (bfloat16x8_t) __temp.val[0], 0);
34618 __o = __builtin_aarch64_set_qregxiv8bf (__o, (bfloat16x8_t) __temp.val[1], 1);
34619 __o = __builtin_aarch64_set_qregxiv8bf (__o, (bfloat16x8_t) __temp.val[2], 2);
34620 __o = __builtin_aarch64_set_qregxiv8bf (__o, (bfloat16x8_t) __temp.val[3], 3);
34621 __builtin_aarch64_st4v4bf ((__builtin_aarch64_simd_bf *) __a, __o);
34622 }
34623
34624 __extension__ extern __inline void
34625 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34626 vst4q_bf16 (bfloat16_t * __a, bfloat16x8x4_t __val)
34627 {
34628 __builtin_aarch64_simd_xi __o;
34629 __o = __builtin_aarch64_set_qregxiv8bf (__o, (bfloat16x8_t) __val.val[0], 0);
34630 __o = __builtin_aarch64_set_qregxiv8bf (__o, (bfloat16x8_t) __val.val[1], 1);
34631 __o = __builtin_aarch64_set_qregxiv8bf (__o, (bfloat16x8_t) __val.val[2], 2);
34632 __o = __builtin_aarch64_set_qregxiv8bf (__o, (bfloat16x8_t) __val.val[3], 3);
34633 __builtin_aarch64_st4v8bf ((__builtin_aarch64_simd_bf *) __a, __o);
34634 }
34635
34636 /* vreinterpret */
34637
34638 __extension__ extern __inline bfloat16x4_t
34639 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34640 vreinterpret_bf16_u8 (uint8x8_t __a)
34641 {
34642 return (bfloat16x4_t)__a;
34643 }
34644
34645 __extension__ extern __inline bfloat16x4_t
34646 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34647 vreinterpret_bf16_u16 (uint16x4_t __a)
34648 {
34649 return (bfloat16x4_t)__a;
34650 }
34651
34652 __extension__ extern __inline bfloat16x4_t
34653 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34654 vreinterpret_bf16_u32 (uint32x2_t __a)
34655 {
34656 return (bfloat16x4_t)__a;
34657 }
34658
34659 __extension__ extern __inline bfloat16x4_t
34660 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34661 vreinterpret_bf16_u64 (uint64x1_t __a)
34662 {
34663 return (bfloat16x4_t)__a;
34664 }
34665
34666 __extension__ extern __inline bfloat16x4_t
34667 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34668 vreinterpret_bf16_s8 (int8x8_t __a)
34669 {
34670 return (bfloat16x4_t)__a;
34671 }
34672
34673 __extension__ extern __inline bfloat16x4_t
34674 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34675 vreinterpret_bf16_s16 (int16x4_t __a)
34676 {
34677 return (bfloat16x4_t)__a;
34678 }
34679
34680 __extension__ extern __inline bfloat16x4_t
34681 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34682 vreinterpret_bf16_s32 (int32x2_t __a)
34683 {
34684 return (bfloat16x4_t)__a;
34685 }
34686
34687 __extension__ extern __inline bfloat16x4_t
34688 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34689 vreinterpret_bf16_s64 (int64x1_t __a)
34690 {
34691 return (bfloat16x4_t)__a;
34692 }
34693
34694 __extension__ extern __inline bfloat16x4_t
34695 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34696 vreinterpret_bf16_p8 (poly8x8_t __a)
34697 {
34698 return (bfloat16x4_t)__a;
34699 }
34700
34701 __extension__ extern __inline bfloat16x4_t
34702 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34703 vreinterpret_bf16_p16 (poly16x4_t __a)
34704 {
34705 return (bfloat16x4_t)__a;
34706 }
34707
34708 __extension__ extern __inline bfloat16x4_t
34709 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34710 vreinterpret_bf16_p64 (poly64x1_t __a)
34711 {
34712 return (bfloat16x4_t)__a;
34713 }
34714
34715 __extension__ extern __inline bfloat16x4_t
34716 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34717 vreinterpret_bf16_f16 (float16x4_t __a)
34718 {
34719 return (bfloat16x4_t)__a;
34720 }
34721
34722 __extension__ extern __inline bfloat16x4_t
34723 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34724 vreinterpret_bf16_f32 (float32x2_t __a)
34725 {
34726 return (bfloat16x4_t)__a;
34727 }
34728
34729 __extension__ extern __inline bfloat16x4_t
34730 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34731 vreinterpret_bf16_f64 (float64x1_t __a)
34732 {
34733 return (bfloat16x4_t)__a;
34734 }
34735
34736 __extension__ extern __inline bfloat16x8_t
34737 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34738 vreinterpretq_bf16_u8 (uint8x16_t __a)
34739 {
34740 return (bfloat16x8_t)__a;
34741 }
34742
34743 __extension__ extern __inline bfloat16x8_t
34744 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34745 vreinterpretq_bf16_u16 (uint16x8_t __a)
34746 {
34747 return (bfloat16x8_t)__a;
34748 }
34749
34750 __extension__ extern __inline bfloat16x8_t
34751 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34752 vreinterpretq_bf16_u32 (uint32x4_t __a)
34753 {
34754 return (bfloat16x8_t)__a;
34755 }
34756
34757 __extension__ extern __inline bfloat16x8_t
34758 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34759 vreinterpretq_bf16_u64 (uint64x2_t __a)
34760 {
34761 return (bfloat16x8_t)__a;
34762 }
34763
34764 __extension__ extern __inline bfloat16x8_t
34765 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34766 vreinterpretq_bf16_s8 (int8x16_t __a)
34767 {
34768 return (bfloat16x8_t)__a;
34769 }
34770
34771 __extension__ extern __inline bfloat16x8_t
34772 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34773 vreinterpretq_bf16_s16 (int16x8_t __a)
34774 {
34775 return (bfloat16x8_t)__a;
34776 }
34777
34778 __extension__ extern __inline bfloat16x8_t
34779 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34780 vreinterpretq_bf16_s32 (int32x4_t __a)
34781 {
34782 return (bfloat16x8_t)__a;
34783 }
34784
34785 __extension__ extern __inline bfloat16x8_t
34786 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34787 vreinterpretq_bf16_s64 (int64x2_t __a)
34788 {
34789 return (bfloat16x8_t)__a;
34790 }
34791
34792 __extension__ extern __inline bfloat16x8_t
34793 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34794 vreinterpretq_bf16_p8 (poly8x16_t __a)
34795 {
34796 return (bfloat16x8_t)__a;
34797 }
34798
34799 __extension__ extern __inline bfloat16x8_t
34800 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34801 vreinterpretq_bf16_p16 (poly16x8_t __a)
34802 {
34803 return (bfloat16x8_t)__a;
34804 }
34805
34806 __extension__ extern __inline bfloat16x8_t
34807 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34808 vreinterpretq_bf16_p64 (poly64x2_t __a)
34809 {
34810 return (bfloat16x8_t)__a;
34811 }
34812
34813 __extension__ extern __inline bfloat16x8_t
34814 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34815 vreinterpretq_bf16_p128 (poly128_t __a)
34816 {
34817 return (bfloat16x8_t)__a;
34818 }
34819
34820 __extension__ extern __inline bfloat16x8_t
34821 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34822 vreinterpretq_bf16_f16 (float16x8_t __a)
34823 {
34824 return (bfloat16x8_t)__a;
34825 }
34826
34827 __extension__ extern __inline bfloat16x8_t
34828 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34829 vreinterpretq_bf16_f32 (float32x4_t __a)
34830 {
34831 return (bfloat16x8_t)__a;
34832 }
34833
34834 __extension__ extern __inline bfloat16x8_t
34835 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34836 vreinterpretq_bf16_f64 (float64x2_t __a)
34837 {
34838 return (bfloat16x8_t)__a;
34839 }
34840
34841 __extension__ extern __inline int8x8_t
34842 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34843 vreinterpret_s8_bf16 (bfloat16x4_t __a)
34844 {
34845 return (int8x8_t)__a;
34846 }
34847
34848 __extension__ extern __inline int16x4_t
34849 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34850 vreinterpret_s16_bf16 (bfloat16x4_t __a)
34851 {
34852 return (int16x4_t)__a;
34853 }
34854
34855 __extension__ extern __inline int32x2_t
34856 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34857 vreinterpret_s32_bf16 (bfloat16x4_t __a)
34858 {
34859 return (int32x2_t)__a;
34860 }
34861
34862 __extension__ extern __inline int64x1_t
34863 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34864 vreinterpret_s64_bf16 (bfloat16x4_t __a)
34865 {
34866 return (int64x1_t)__a;
34867 }
34868
34869 __extension__ extern __inline uint8x8_t
34870 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34871 vreinterpret_u8_bf16 (bfloat16x4_t __a)
34872 {
34873 return (uint8x8_t)__a;
34874 }
34875
34876 __extension__ extern __inline uint16x4_t
34877 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34878 vreinterpret_u16_bf16 (bfloat16x4_t __a)
34879 {
34880 return (uint16x4_t)__a;
34881 }
34882
34883 __extension__ extern __inline uint32x2_t
34884 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34885 vreinterpret_u32_bf16 (bfloat16x4_t __a)
34886 {
34887 return (uint32x2_t)__a;
34888 }
34889
34890 __extension__ extern __inline uint64x1_t
34891 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34892 vreinterpret_u64_bf16 (bfloat16x4_t __a)
34893 {
34894 return (uint64x1_t)__a;
34895 }
34896
34897 __extension__ extern __inline float16x4_t
34898 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34899 vreinterpret_f16_bf16 (bfloat16x4_t __a)
34900 {
34901 return (float16x4_t)__a;
34902 }
34903
34904 __extension__ extern __inline float32x2_t
34905 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34906 vreinterpret_f32_bf16 (bfloat16x4_t __a)
34907 {
34908 return (float32x2_t)__a;
34909 }
34910
34911 __extension__ extern __inline float64x1_t
34912 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34913 vreinterpret_f64_bf16 (bfloat16x4_t __a)
34914 {
34915 return (float64x1_t)__a;
34916 }
34917
34918 __extension__ extern __inline poly8x8_t
34919 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34920 vreinterpret_p8_bf16 (bfloat16x4_t __a)
34921 {
34922 return (poly8x8_t)__a;
34923 }
34924
34925 __extension__ extern __inline poly16x4_t
34926 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34927 vreinterpret_p16_bf16 (bfloat16x4_t __a)
34928 {
34929 return (poly16x4_t)__a;
34930 }
34931
34932 __extension__ extern __inline poly64x1_t
34933 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34934 vreinterpret_p64_bf16 (bfloat16x4_t __a)
34935 {
34936 return (poly64x1_t)__a;
34937 }
34938
34939 __extension__ extern __inline int8x16_t
34940 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34941 vreinterpretq_s8_bf16 (bfloat16x8_t __a)
34942 {
34943 return (int8x16_t)__a;
34944 }
34945
34946 __extension__ extern __inline int16x8_t
34947 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34948 vreinterpretq_s16_bf16 (bfloat16x8_t __a)
34949 {
34950 return (int16x8_t)__a;
34951 }
34952
34953 __extension__ extern __inline int32x4_t
34954 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34955 vreinterpretq_s32_bf16 (bfloat16x8_t __a)
34956 {
34957 return (int32x4_t)__a;
34958 }
34959
34960 __extension__ extern __inline int64x2_t
34961 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34962 vreinterpretq_s64_bf16 (bfloat16x8_t __a)
34963 {
34964 return (int64x2_t)__a;
34965 }
34966
34967 __extension__ extern __inline uint8x16_t
34968 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34969 vreinterpretq_u8_bf16 (bfloat16x8_t __a)
34970 {
34971 return (uint8x16_t)__a;
34972 }
34973
34974 __extension__ extern __inline uint16x8_t
34975 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34976 vreinterpretq_u16_bf16 (bfloat16x8_t __a)
34977 {
34978 return (uint16x8_t)__a;
34979 }
34980
34981 __extension__ extern __inline uint32x4_t
34982 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34983 vreinterpretq_u32_bf16 (bfloat16x8_t __a)
34984 {
34985 return (uint32x4_t)__a;
34986 }
34987
34988 __extension__ extern __inline uint64x2_t
34989 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34990 vreinterpretq_u64_bf16 (bfloat16x8_t __a)
34991 {
34992 return (uint64x2_t)__a;
34993 }
34994
34995 __extension__ extern __inline float16x8_t
34996 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
34997 vreinterpretq_f16_bf16 (bfloat16x8_t __a)
34998 {
34999 return (float16x8_t)__a;
35000 }
35001
35002 __extension__ extern __inline float32x4_t
35003 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35004 vreinterpretq_f32_bf16 (bfloat16x8_t __a)
35005 {
35006 return (float32x4_t)__a;
35007 }
35008
35009 __extension__ extern __inline float64x2_t
35010 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35011 vreinterpretq_f64_bf16 (bfloat16x8_t __a)
35012 {
35013 return (float64x2_t)__a;
35014 }
35015
35016 __extension__ extern __inline poly8x16_t
35017 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35018 vreinterpretq_p8_bf16 (bfloat16x8_t __a)
35019 {
35020 return (poly8x16_t)__a;
35021 }
35022
35023 __extension__ extern __inline poly16x8_t
35024 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35025 vreinterpretq_p16_bf16 (bfloat16x8_t __a)
35026 {
35027 return (poly16x8_t)__a;
35028 }
35029
35030 __extension__ extern __inline poly64x2_t
35031 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35032 vreinterpretq_p64_bf16 (bfloat16x8_t __a)
35033 {
35034 return (poly64x2_t)__a;
35035 }
35036
35037 __extension__ extern __inline poly128_t
35038 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35039 vreinterpretq_p128_bf16 (bfloat16x8_t __a)
35040 {
35041 return (poly128_t)__a;
35042 }
35043
35044 __extension__ extern __inline float32x2_t
35045 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35046 vbfdot_f32 (float32x2_t __r, bfloat16x4_t __a, bfloat16x4_t __b)
35047 {
35048 return __builtin_aarch64_bfdotv2sf (__r, __a, __b);
35049 }
35050
35051 __extension__ extern __inline float32x4_t
35052 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35053 vbfdotq_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x8_t __b)
35054 {
35055 return __builtin_aarch64_bfdotv4sf (__r, __a, __b);
35056 }
35057
35058 __extension__ extern __inline float32x2_t
35059 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35060 vbfdot_lane_f32 (float32x2_t __r, bfloat16x4_t __a, bfloat16x4_t __b,
35061 const int __index)
35062 {
35063 return __builtin_aarch64_bfdot_lanev2sf (__r, __a, __b, __index);
35064 }
35065
35066 __extension__ extern __inline float32x4_t
35067 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35068 vbfdotq_lane_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x4_t __b,
35069 const int __index)
35070 {
35071 return __builtin_aarch64_bfdot_lanev4sf (__r, __a, __b, __index);
35072 }
35073
35074 __extension__ extern __inline float32x2_t
35075 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35076 vbfdot_laneq_f32 (float32x2_t __r, bfloat16x4_t __a, bfloat16x8_t __b,
35077 const int __index)
35078 {
35079 return __builtin_aarch64_bfdot_laneqv2sf (__r, __a, __b, __index);
35080 }
35081
35082 __extension__ extern __inline float32x4_t
35083 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35084 vbfdotq_laneq_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x8_t __b,
35085 const int __index)
35086 {
35087 return __builtin_aarch64_bfdot_laneqv4sf (__r, __a, __b, __index);
35088 }
35089
35090 __extension__ extern __inline float32x4_t
35091 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35092 vbfmmlaq_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x8_t __b)
35093
35094 {
35095 return __builtin_aarch64_bfmmlaqv4sf (__r, __a, __b);
35096 }
35097
35098 __extension__ extern __inline float32x4_t
35099 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35100 vbfmlalbq_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x8_t __b)
35101 {
35102 return __builtin_aarch64_bfmlalbv4sf (__r, __a, __b);
35103 }
35104
35105 __extension__ extern __inline float32x4_t
35106 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35107 vbfmlaltq_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x8_t __b)
35108 {
35109 return __builtin_aarch64_bfmlaltv4sf (__r, __a, __b);
35110 }
35111
35112 __extension__ extern __inline float32x4_t
35113 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35114 vbfmlalbq_lane_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x4_t __b,
35115 const int __index)
35116 {
35117 return __builtin_aarch64_bfmlalb_lanev4sf (__r, __a, __b, __index);
35118 }
35119
35120 __extension__ extern __inline float32x4_t
35121 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35122 vbfmlaltq_lane_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x4_t __b,
35123 const int __index)
35124 {
35125 return __builtin_aarch64_bfmlalt_lanev4sf (__r, __a, __b, __index);
35126 }
35127
35128 __extension__ extern __inline float32x4_t
35129 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35130 vbfmlalbq_laneq_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x8_t __b,
35131 const int __index)
35132 {
35133 return __builtin_aarch64_bfmlalb_lane_qv4sf (__r, __a, __b, __index);
35134 }
35135
35136 __extension__ extern __inline float32x4_t
35137 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35138 vbfmlaltq_laneq_f32 (float32x4_t __r, bfloat16x8_t __a, bfloat16x8_t __b,
35139 const int __index)
35140 {
35141 return __builtin_aarch64_bfmlalt_lane_qv4sf (__r, __a, __b, __index);
35142 }
35143
35144 __extension__ extern __inline bfloat16x4_t
35145 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35146 vget_low_bf16 (bfloat16x8_t __a)
35147 {
35148 return __builtin_aarch64_vget_lo_halfv8bf (__a);
35149 }
35150
35151 __extension__ extern __inline bfloat16x4_t
35152 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35153 vget_high_bf16 (bfloat16x8_t __a)
35154 {
35155 return __builtin_aarch64_vget_hi_halfv8bf (__a);
35156 }
35157
35158 __extension__ extern __inline float32x4_t
35159 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35160 vcvt_f32_bf16 (bfloat16x4_t __a)
35161 {
35162 return __builtin_aarch64_vbfcvtv4bf (__a);
35163 }
35164
35165 __extension__ extern __inline float32x4_t
35166 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35167 vcvtq_low_f32_bf16 (bfloat16x8_t __a)
35168 {
35169 return __builtin_aarch64_vbfcvtv8bf (__a);
35170 }
35171
35172 __extension__ extern __inline float32x4_t
35173 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35174 vcvtq_high_f32_bf16 (bfloat16x8_t __a)
35175 {
35176 return __builtin_aarch64_vbfcvt_highv8bf (__a);
35177 }
35178
35179 __extension__ extern __inline bfloat16x4_t
35180 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35181 vcvt_bf16_f32 (float32x4_t __a)
35182 {
35183 return __builtin_aarch64_bfcvtnv4bf (__a);
35184 }
35185
35186 __extension__ extern __inline bfloat16x8_t
35187 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35188 vcvtq_low_bf16_f32 (float32x4_t __a)
35189 {
35190 return __builtin_aarch64_bfcvtn_qv8bf (__a);
35191 }
35192
35193 __extension__ extern __inline bfloat16x8_t
35194 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35195 vcvtq_high_bf16_f32 (bfloat16x8_t __inactive, float32x4_t __a)
35196 {
35197 return __builtin_aarch64_bfcvtn2v8bf (__inactive, __a);
35198 }
35199
35200 __extension__ extern __inline bfloat16x4_t
35201 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35202 vcopy_lane_bf16 (bfloat16x4_t __a, const int __lane1,
35203 bfloat16x4_t __b, const int __lane2)
35204 {
35205 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
35206 __a, __lane1);
35207 }
35208
35209 __extension__ extern __inline bfloat16x8_t
35210 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35211 vcopyq_lane_bf16 (bfloat16x8_t __a, const int __lane1,
35212 bfloat16x4_t __b, const int __lane2)
35213 {
35214 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
35215 __a, __lane1);
35216 }
35217
35218 __extension__ extern __inline bfloat16x4_t
35219 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35220 vcopy_laneq_bf16 (bfloat16x4_t __a, const int __lane1,
35221 bfloat16x8_t __b, const int __lane2)
35222 {
35223 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
35224 __a, __lane1);
35225 }
35226
35227 __extension__ extern __inline bfloat16x8_t
35228 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35229 vcopyq_laneq_bf16 (bfloat16x8_t __a, const int __lane1,
35230 bfloat16x8_t __b, const int __lane2)
35231 {
35232 return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2),
35233 __a, __lane1);
35234 }
35235
35236 __LD2_LANE_FUNC (bfloat16x4x2_t, bfloat16x4_t, bfloat16x8x2_t, bfloat16_t, v4bf,
35237 v8bf, bf, bf16, bfloat16x8_t)
35238 __LD2Q_LANE_FUNC (bfloat16x8x2_t, bfloat16x8_t, bfloat16_t, v8bf, bf, bf16)
35239 __LD3_LANE_FUNC (bfloat16x4x3_t, bfloat16x4_t, bfloat16x8x3_t, bfloat16_t, v4bf,
35240 v8bf, bf, bf16, bfloat16x8_t)
35241 __LD3Q_LANE_FUNC (bfloat16x8x3_t, bfloat16x8_t, bfloat16_t, v8bf, bf, bf16)
35242 __LD4_LANE_FUNC (bfloat16x4x4_t, bfloat16x4_t, bfloat16x8x4_t, bfloat16_t, v4bf,
35243 v8bf, bf, bf16, bfloat16x8_t)
35244 __LD4Q_LANE_FUNC (bfloat16x8x4_t, bfloat16x8_t, bfloat16_t, v8bf, bf, bf16)
35245
35246 __ST2_LANE_FUNC (bfloat16x4x2_t, bfloat16x8x2_t, bfloat16_t, v4bf, v8bf, bf,
35247 bf16, bfloat16x8_t)
35248 __ST2Q_LANE_FUNC (bfloat16x8x2_t, bfloat16_t, v8bf, bf, bf16)
35249 __ST3_LANE_FUNC (bfloat16x4x3_t, bfloat16x8x3_t, bfloat16_t, v4bf, v8bf, bf,
35250 bf16, bfloat16x8_t)
35251 __ST3Q_LANE_FUNC (bfloat16x8x3_t, bfloat16_t, v8bf, bf, bf16)
35252 __ST4_LANE_FUNC (bfloat16x4x4_t, bfloat16x8x4_t, bfloat16_t, v4bf, v8bf, bf,
35253 bf16, bfloat16x8_t)
35254 __ST4Q_LANE_FUNC (bfloat16x8x4_t, bfloat16_t, v8bf, bf, bf16)
35255
35256 #pragma GCC pop_options
35257
35258 /* AdvSIMD 8-bit Integer Matrix Multiply (I8MM) intrinsics. */
35259
35260 #pragma GCC push_options
35261 #pragma GCC target ("arch=armv8.2-a+i8mm")
35262
35263 __extension__ extern __inline int32x2_t
35264 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35265 vusdot_s32 (int32x2_t __r, uint8x8_t __a, int8x8_t __b)
35266 {
35267 return __builtin_aarch64_usdotv8qi_ssus (__r, __a, __b);
35268 }
35269
35270 __extension__ extern __inline int32x4_t
35271 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35272 vusdotq_s32 (int32x4_t __r, uint8x16_t __a, int8x16_t __b)
35273 {
35274 return __builtin_aarch64_usdotv16qi_ssus (__r, __a, __b);
35275 }
35276
35277 __extension__ extern __inline int32x2_t
35278 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35279 vusdot_lane_s32 (int32x2_t __r, uint8x8_t __a, int8x8_t __b, const int __index)
35280 {
35281 return __builtin_aarch64_usdot_lanev8qi_ssuss (__r, __a, __b, __index);
35282 }
35283
35284 __extension__ extern __inline int32x2_t
35285 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35286 vusdot_laneq_s32 (int32x2_t __r, uint8x8_t __a, int8x16_t __b,
35287 const int __index)
35288 {
35289 return __builtin_aarch64_usdot_laneqv8qi_ssuss (__r, __a, __b, __index);
35290 }
35291
35292 __extension__ extern __inline int32x4_t
35293 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35294 vusdotq_lane_s32 (int32x4_t __r, uint8x16_t __a, int8x8_t __b,
35295 const int __index)
35296 {
35297 return __builtin_aarch64_usdot_lanev16qi_ssuss (__r, __a, __b, __index);
35298 }
35299
35300 __extension__ extern __inline int32x4_t
35301 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35302 vusdotq_laneq_s32 (int32x4_t __r, uint8x16_t __a, int8x16_t __b,
35303 const int __index)
35304 {
35305 return __builtin_aarch64_usdot_laneqv16qi_ssuss (__r, __a, __b, __index);
35306 }
35307
35308 __extension__ extern __inline int32x2_t
35309 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35310 vsudot_lane_s32 (int32x2_t __r, int8x8_t __a, uint8x8_t __b, const int __index)
35311 {
35312 return __builtin_aarch64_sudot_lanev8qi_sssus (__r, __a, __b, __index);
35313 }
35314
35315 __extension__ extern __inline int32x2_t
35316 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35317 vsudot_laneq_s32 (int32x2_t __r, int8x8_t __a, uint8x16_t __b,
35318 const int __index)
35319 {
35320 return __builtin_aarch64_sudot_laneqv8qi_sssus (__r, __a, __b, __index);
35321 }
35322
35323 __extension__ extern __inline int32x4_t
35324 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35325 vsudotq_lane_s32 (int32x4_t __r, int8x16_t __a, uint8x8_t __b,
35326 const int __index)
35327 {
35328 return __builtin_aarch64_sudot_lanev16qi_sssus (__r, __a, __b, __index);
35329 }
35330
35331 __extension__ extern __inline int32x4_t
35332 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35333 vsudotq_laneq_s32 (int32x4_t __r, int8x16_t __a, uint8x16_t __b,
35334 const int __index)
35335 {
35336 return __builtin_aarch64_sudot_laneqv16qi_sssus (__r, __a, __b, __index);
35337 }
35338
35339 /* Matrix Multiply-Accumulate. */
35340
35341 __extension__ extern __inline int32x4_t
35342 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35343 vmmlaq_s32 (int32x4_t __r, int8x16_t __a, int8x16_t __b)
35344 {
35345 return __builtin_aarch64_simd_smmlav16qi (__r, __a, __b);
35346 }
35347
35348 __extension__ extern __inline uint32x4_t
35349 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35350 vmmlaq_u32 (uint32x4_t __r, uint8x16_t __a, uint8x16_t __b)
35351 {
35352 return __builtin_aarch64_simd_ummlav16qi_uuuu (__r, __a, __b);
35353 }
35354
35355 __extension__ extern __inline int32x4_t
35356 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
35357 vusmmlaq_s32 (int32x4_t __r, uint8x16_t __a, int8x16_t __b)
35358 {
35359 return __builtin_aarch64_simd_usmmlav16qi_ssus (__r, __a, __b);
35360 }
35361
35362 #pragma GCC pop_options
35363
35364 __extension__ extern __inline poly8x8_t
35365 __attribute ((__always_inline__, __gnu_inline__, __artificial__))
35366 vadd_p8 (poly8x8_t __a, poly8x8_t __b)
35367 {
35368 return __a ^ __b;
35369 }
35370
35371 __extension__ extern __inline poly16x4_t
35372 __attribute ((__always_inline__, __gnu_inline__, __artificial__))
35373 vadd_p16 (poly16x4_t __a, poly16x4_t __b)
35374 {
35375 return __a ^ __b;
35376 }
35377
35378 __extension__ extern __inline poly64x1_t
35379 __attribute ((__always_inline__, __gnu_inline__, __artificial__))
35380 vadd_p64 (poly64x1_t __a, poly64x1_t __b)
35381 {
35382 return __a ^ __b;
35383 }
35384
35385 __extension__ extern __inline poly8x16_t
35386 __attribute ((__always_inline__, __gnu_inline__, __artificial__))
35387 vaddq_p8 (poly8x16_t __a, poly8x16_t __b)
35388 {
35389 return __a ^ __b;
35390 }
35391
35392 __extension__ extern __inline poly16x8_t
35393 __attribute ((__always_inline__, __gnu_inline__, __artificial__))
35394 vaddq_p16 (poly16x8_t __a, poly16x8_t __b)
35395 {
35396 return __a ^__b;
35397 }
35398
35399 __extension__ extern __inline poly64x2_t
35400 __attribute ((__always_inline__, __gnu_inline__, __artificial__))
35401 vaddq_p64 (poly64x2_t __a, poly64x2_t __b)
35402 {
35403 return __a ^ __b;
35404 }
35405
35406 __extension__ extern __inline poly128_t
35407 __attribute ((__always_inline__, __gnu_inline__, __artificial__))
35408 vaddq_p128 (poly128_t __a, poly128_t __b)
35409 {
35410 return __a ^ __b;
35411 }
35412
35413 #undef __aarch64_vget_lane_any
35414
35415 #undef __aarch64_vdup_lane_any
35416 #undef __aarch64_vdup_lane_f16
35417 #undef __aarch64_vdup_lane_f32
35418 #undef __aarch64_vdup_lane_f64
35419 #undef __aarch64_vdup_lane_p8
35420 #undef __aarch64_vdup_lane_p16
35421 #undef __aarch64_vdup_lane_s8
35422 #undef __aarch64_vdup_lane_s16
35423 #undef __aarch64_vdup_lane_s32
35424 #undef __aarch64_vdup_lane_s64
35425 #undef __aarch64_vdup_lane_u8
35426 #undef __aarch64_vdup_lane_u16
35427 #undef __aarch64_vdup_lane_u32
35428 #undef __aarch64_vdup_lane_u64
35429 #undef __aarch64_vdup_laneq_f16
35430 #undef __aarch64_vdup_laneq_f32
35431 #undef __aarch64_vdup_laneq_f64
35432 #undef __aarch64_vdup_laneq_p8
35433 #undef __aarch64_vdup_laneq_p16
35434 #undef __aarch64_vdup_laneq_s8
35435 #undef __aarch64_vdup_laneq_s16
35436 #undef __aarch64_vdup_laneq_s32
35437 #undef __aarch64_vdup_laneq_s64
35438 #undef __aarch64_vdup_laneq_u8
35439 #undef __aarch64_vdup_laneq_u16
35440 #undef __aarch64_vdup_laneq_u32
35441 #undef __aarch64_vdup_laneq_u64
35442 #undef __aarch64_vdupq_lane_f16
35443 #undef __aarch64_vdupq_lane_f32
35444 #undef __aarch64_vdupq_lane_f64
35445 #undef __aarch64_vdupq_lane_p8
35446 #undef __aarch64_vdupq_lane_p16
35447 #undef __aarch64_vdupq_lane_s8
35448 #undef __aarch64_vdupq_lane_s16
35449 #undef __aarch64_vdupq_lane_s32
35450 #undef __aarch64_vdupq_lane_s64
35451 #undef __aarch64_vdupq_lane_u8
35452 #undef __aarch64_vdupq_lane_u16
35453 #undef __aarch64_vdupq_lane_u32
35454 #undef __aarch64_vdupq_lane_u64
35455 #undef __aarch64_vdupq_laneq_f16
35456 #undef __aarch64_vdupq_laneq_f32
35457 #undef __aarch64_vdupq_laneq_f64
35458 #undef __aarch64_vdupq_laneq_p8
35459 #undef __aarch64_vdupq_laneq_p16
35460 #undef __aarch64_vdupq_laneq_s8
35461 #undef __aarch64_vdupq_laneq_s16
35462 #undef __aarch64_vdupq_laneq_s32
35463 #undef __aarch64_vdupq_laneq_s64
35464 #undef __aarch64_vdupq_laneq_u8
35465 #undef __aarch64_vdupq_laneq_u16
35466 #undef __aarch64_vdupq_laneq_u32
35467 #undef __aarch64_vdupq_laneq_u64
35468
35469 #undef __LD2_LANE_FUNC
35470 #undef __LD2Q_LANE_FUNC
35471 #undef __LD3_LANE_FUNC
35472 #undef __LD3Q_LANE_FUNC
35473 #undef __LD4_LANE_FUNC
35474 #undef __LD4Q_LANE_FUNC
35475 #undef __ST2_LANE_FUNC
35476 #undef __ST2Q_LANE_FUNC
35477 #undef __ST3_LANE_FUNC
35478 #undef __ST3Q_LANE_FUNC
35479 #undef __ST4_LANE_FUNC
35480 #undef __ST4Q_LANE_FUNC
35481
35482 #endif