From: Christophe Lyon Date: Mon, 23 May 2016 09:02:51 +0000 (+0000) Subject: [ARM, AArch64] Add missing vstX_lane fp16 tests. X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=4b1f57fedad042fcf2bf2cc78d0e62172b4d7aed;p=gcc.git [ARM, AArch64] Add missing vstX_lane fp16 tests. 2016-05-23 Christophe Lyon * gcc.target/aarch64/advsimd-intrinsics/vstX_lane.c: Add fp16 tests. From-SVN: r236577 --- diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index cc1c39fa559..e6490135031 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,7 @@ +2016-05-23 Christophe Lyon + + * gcc.target/aarch64/advsimd-intrinsics/vstX_lane.c: Add fp16 tests. + 2016-05-23 Christophe Lyon * gcc.target/aarch64/advsimd-intrinsics/vtst.c: Add tests for diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vstX_lane.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vstX_lane.c index b923b644124..282edd591a0 100644 --- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vstX_lane.c +++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vstX_lane.c @@ -14,6 +14,7 @@ VECT_VAR_DECL(expected_st2_0,uint,32,2) [] = { 0xfffffff0, 0xfffffff1 }; VECT_VAR_DECL(expected_st2_0,poly,8,8) [] = { 0xf0, 0xf1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st2_0,poly,16,4) [] = { 0xfff0, 0xfff1, 0x0, 0x0 }; +VECT_VAR_DECL(expected_st2_0,hfloat,16,4) [] = { 0xcc00, 0xcb80, 0x0, 0x0 }; VECT_VAR_DECL(expected_st2_0,hfloat,32,2) [] = { 0xc1800000, 0xc1700000 }; VECT_VAR_DECL(expected_st2_0,int,16,8) [] = { 0xfff0, 0xfff1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; @@ -24,6 +25,8 @@ VECT_VAR_DECL(expected_st2_0,uint,32,4) [] = { 0xfffffff0, 0xfffffff1, 0x0, 0x0 }; VECT_VAR_DECL(expected_st2_0,poly,16,8) [] = { 0xfff0, 0xfff1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; +VECT_VAR_DECL(expected_st2_0,hfloat,16,8) [] = { 0xcc00, 0xcb80, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st2_0,hfloat,32,4) [] = { 0xc1800000, 0xc1700000, 0x0, 0x0 }; @@ -39,6 +42,7 @@ VECT_VAR_DECL(expected_st2_1,uint,32,2) [] = { 0x0, 0x0 }; VECT_VAR_DECL(expected_st2_1,poly,8,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st2_1,poly,16,4) [] = { 0x0, 0x0, 0x0, 0x0 }; +VECT_VAR_DECL(expected_st2_1,hfloat,16,4) [] = { 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st2_1,hfloat,32,2) [] = { 0x0, 0x0 }; VECT_VAR_DECL(expected_st2_1,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; @@ -48,6 +52,8 @@ VECT_VAR_DECL(expected_st2_1,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0, VECT_VAR_DECL(expected_st2_1,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st2_1,poly,16,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; +VECT_VAR_DECL(expected_st2_1,hfloat,16,8) [] = { 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st2_1,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 }; /* Expected results for vst3, chunk 0. */ @@ -62,6 +68,7 @@ VECT_VAR_DECL(expected_st3_0,uint,32,2) [] = { 0xfffffff0, 0xfffffff1 }; VECT_VAR_DECL(expected_st3_0,poly,8,8) [] = { 0xf0, 0xf1, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st3_0,poly,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0x0 }; +VECT_VAR_DECL(expected_st3_0,hfloat,16,4) [] = { 0xcc00, 0xcb80, 0xcb00, 0x0 }; VECT_VAR_DECL(expected_st3_0,hfloat,32,2) [] = { 0xc1800000, 0xc1700000 }; VECT_VAR_DECL(expected_st3_0,int,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0x0, 0x0, 0x0, 0x0, 0x0 }; @@ -73,6 +80,8 @@ VECT_VAR_DECL(expected_st3_0,uint,32,4) [] = { 0xfffffff0, 0xfffffff1, 0xfffffff2, 0x0 }; VECT_VAR_DECL(expected_st3_0,poly,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0x0, 0x0, 0x0, 0x0, 0x0 }; +VECT_VAR_DECL(expected_st3_0,hfloat,16,8) [] = { 0xcc00, 0xcb80, 0xcb00, 0x0, + 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st3_0,hfloat,32,4) [] = { 0xc1800000, 0xc1700000, 0xc1600000, 0x0 }; @@ -88,6 +97,7 @@ VECT_VAR_DECL(expected_st3_1,uint,32,2) [] = { 0xfffffff2, 0x0 }; VECT_VAR_DECL(expected_st3_1,poly,8,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st3_1,poly,16,4) [] = { 0x0, 0x0, 0x0, 0x0 }; +VECT_VAR_DECL(expected_st3_1,hfloat,16,4) [] = { 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st3_1,hfloat,32,2) [] = { 0xc1600000, 0x0 }; VECT_VAR_DECL(expected_st3_1,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; @@ -97,6 +107,8 @@ VECT_VAR_DECL(expected_st3_1,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0, VECT_VAR_DECL(expected_st3_1,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st3_1,poly,16,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; +VECT_VAR_DECL(expected_st3_1,hfloat,16,8) [] = { 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st3_1,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 }; /* Expected results for vst3, chunk 2. */ @@ -111,6 +123,7 @@ VECT_VAR_DECL(expected_st3_2,uint,32,2) [] = { 0x0, 0x0 }; VECT_VAR_DECL(expected_st3_2,poly,8,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st3_2,poly,16,4) [] = { 0x0, 0x0, 0x0, 0x0 }; +VECT_VAR_DECL(expected_st3_2,hfloat,16,4) [] = { 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st3_2,hfloat,32,2) [] = { 0x0, 0x0 }; VECT_VAR_DECL(expected_st3_2,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; @@ -120,6 +133,8 @@ VECT_VAR_DECL(expected_st3_2,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0, VECT_VAR_DECL(expected_st3_2,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st3_2,poly,16,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; +VECT_VAR_DECL(expected_st3_2,hfloat,16,8) [] = { 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st3_2,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 }; /* Expected results for vst4, chunk 0. */ @@ -134,6 +149,7 @@ VECT_VAR_DECL(expected_st4_0,uint,32,2) [] = { 0xfffffff0, 0xfffffff1 }; VECT_VAR_DECL(expected_st4_0,poly,8,8) [] = { 0xf0, 0xf1, 0xf2, 0xf3, 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st4_0,poly,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3 }; +VECT_VAR_DECL(expected_st4_0,hfloat,16,4) [] = { 0xcc00, 0xcb80, 0xcb00, 0xca80 }; VECT_VAR_DECL(expected_st4_0,hfloat,32,2) [] = { 0xc1800000, 0xc1700000 }; VECT_VAR_DECL(expected_st4_0,int,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3, 0x0, 0x0, 0x0, 0x0 }; @@ -145,6 +161,8 @@ VECT_VAR_DECL(expected_st4_0,uint,32,4) [] = { 0xfffffff0, 0xfffffff1, 0xfffffff2, 0xfffffff3 }; VECT_VAR_DECL(expected_st4_0,poly,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3, 0x0, 0x0, 0x0, 0x0 }; +VECT_VAR_DECL(expected_st4_0,hfloat,16,8) [] = { 0xcc00, 0xcb80, 0xcb00, 0xca80, + 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st4_0,hfloat,32,4) [] = { 0xc1800000, 0xc1700000, 0xc1600000, 0xc1500000 }; @@ -160,6 +178,7 @@ VECT_VAR_DECL(expected_st4_1,uint,32,2) [] = { 0xfffffff2, 0xfffffff3 }; VECT_VAR_DECL(expected_st4_1,poly,8,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st4_1,poly,16,4) [] = { 0x0, 0x0, 0x0, 0x0 }; +VECT_VAR_DECL(expected_st4_1,hfloat,16,4) [] = { 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st4_1,hfloat,32,2) [] = { 0xc1600000, 0xc1500000 }; VECT_VAR_DECL(expected_st4_1,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; @@ -169,6 +188,8 @@ VECT_VAR_DECL(expected_st4_1,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0, VECT_VAR_DECL(expected_st4_1,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st4_1,poly,16,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; +VECT_VAR_DECL(expected_st4_1,hfloat,16,8) [] = { 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st4_1,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 }; /* Expected results for vst4, chunk 2. */ @@ -183,6 +204,7 @@ VECT_VAR_DECL(expected_st4_2,uint,32,2) [] = { 0x0, 0x0 }; VECT_VAR_DECL(expected_st4_2,poly,8,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st4_2,poly,16,4) [] = { 0x0, 0x0, 0x0, 0x0 }; +VECT_VAR_DECL(expected_st4_2,hfloat,16,4) [] = { 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st4_2,hfloat,32,2) [] = { 0x0, 0x0 }; VECT_VAR_DECL(expected_st4_2,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; @@ -192,6 +214,8 @@ VECT_VAR_DECL(expected_st4_2,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0, VECT_VAR_DECL(expected_st4_2,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st4_2,poly,16,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; +VECT_VAR_DECL(expected_st4_2,hfloat,16,8) [] = { 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st4_2,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 }; /* Expected results for vst4, chunk 3. */ @@ -206,6 +230,7 @@ VECT_VAR_DECL(expected_st4_3,uint,32,2) [] = { 0x0, 0x0 }; VECT_VAR_DECL(expected_st4_3,poly,8,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st4_3,poly,16,4) [] = { 0x0, 0x0, 0x0, 0x0 }; +VECT_VAR_DECL(expected_st4_3,hfloat,16,4) [] = { 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st4_3,hfloat,32,2) [] = { 0x0, 0x0 }; VECT_VAR_DECL(expected_st4_3,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; @@ -215,6 +240,8 @@ VECT_VAR_DECL(expected_st4_3,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0, VECT_VAR_DECL(expected_st4_3,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st4_3,poly,16,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }; +VECT_VAR_DECL(expected_st4_3,hfloat,16,8) [] = { 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0 }; VECT_VAR_DECL(expected_st4_3,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 }; /* Declare additional input buffers as needed. */ @@ -229,6 +256,7 @@ VECT_VAR_DECL_INIT(buffer_vld2_lane, uint, 32, 2); VECT_VAR_DECL_INIT(buffer_vld2_lane, uint, 64, 2); VECT_VAR_DECL_INIT(buffer_vld2_lane, poly, 8, 2); VECT_VAR_DECL_INIT(buffer_vld2_lane, poly, 16, 2); +VECT_VAR_DECL_INIT(buffer_vld2_lane, float, 16, 2); VECT_VAR_DECL_INIT(buffer_vld2_lane, float, 32, 2); /* Input buffers for vld3_lane. */ @@ -242,6 +270,7 @@ VECT_VAR_DECL_INIT(buffer_vld3_lane, uint, 32, 3); VECT_VAR_DECL_INIT(buffer_vld3_lane, uint, 64, 3); VECT_VAR_DECL_INIT(buffer_vld3_lane, poly, 8, 3); VECT_VAR_DECL_INIT(buffer_vld3_lane, poly, 16, 3); +VECT_VAR_DECL_INIT(buffer_vld3_lane, float, 16, 3); VECT_VAR_DECL_INIT(buffer_vld3_lane, float, 32, 3); /* Input buffers for vld4_lane. */ @@ -255,6 +284,7 @@ VECT_VAR_DECL_INIT(buffer_vld4_lane, uint, 32, 4); VECT_VAR_DECL_INIT(buffer_vld4_lane, uint, 64, 4); VECT_VAR_DECL_INIT(buffer_vld4_lane, poly, 8, 4); VECT_VAR_DECL_INIT(buffer_vld4_lane, poly, 16, 4); +VECT_VAR_DECL_INIT(buffer_vld4_lane, float, 16, 4); VECT_VAR_DECL_INIT(buffer_vld4_lane, float, 32, 4); void exec_vstX_lane (void) @@ -302,7 +332,7 @@ void exec_vstX_lane (void) /* We need all variants in 64 bits, but there is no 64x2 variant, nor 128 bits vectors of int8/uint8/poly8. */ -#define DECL_ALL_VSTX_LANE(X) \ +#define DECL_ALL_VSTX_LANE_NO_FP16(X) \ DECL_VSTX_LANE(int, 8, 8, X); \ DECL_VSTX_LANE(int, 16, 4, X); \ DECL_VSTX_LANE(int, 32, 2, X); \ @@ -319,11 +349,20 @@ void exec_vstX_lane (void) DECL_VSTX_LANE(poly, 16, 8, X); \ DECL_VSTX_LANE(float, 32, 4, X) +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +#define DECL_ALL_VSTX_LANE(X) \ + DECL_ALL_VSTX_LANE_NO_FP16(X); \ + DECL_VSTX_LANE(float, 16, 4, X); \ + DECL_VSTX_LANE(float, 16, 8, X) +#else +#define DECL_ALL_VSTX_LANE(X) DECL_ALL_VSTX_LANE_NO_FP16(X) +#endif + #define DUMMY_ARRAY(V, T, W, N, L) VECT_VAR_DECL(V,T,W,N)[N*L] /* Use the same lanes regardless of the size of the array (X), for simplicity. */ -#define TEST_ALL_VSTX_LANE(X) \ +#define TEST_ALL_VSTX_LANE_NO_FP16(X) \ TEST_VSTX_LANE(, int, s, 8, 8, X, 7); \ TEST_VSTX_LANE(, int, s, 16, 4, X, 2); \ TEST_VSTX_LANE(, int, s, 32, 2, X, 0); \ @@ -340,7 +379,16 @@ void exec_vstX_lane (void) TEST_VSTX_LANE(q, poly, p, 16, 8, X, 5); \ TEST_VSTX_LANE(q, float, f, 32, 4, X, 2) -#define TEST_ALL_EXTRA_CHUNKS(X, Y) \ +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +#define TEST_ALL_VSTX_LANE(X) \ + TEST_ALL_VSTX_LANE_NO_FP16(X); \ + TEST_VSTX_LANE(, float, f, 16, 4, X, 2); \ + TEST_VSTX_LANE(q, float, f, 16, 8, X, 6) +#else +#define TEST_ALL_VSTX_LANE(X) TEST_ALL_VSTX_LANE_NO_FP16(X) +#endif + +#define TEST_ALL_EXTRA_CHUNKS_NO_FP16(X, Y) \ TEST_EXTRA_CHUNK(int, 8, 8, X, Y); \ TEST_EXTRA_CHUNK(int, 16, 4, X, Y); \ TEST_EXTRA_CHUNK(int, 32, 2, X, Y); \ @@ -357,6 +405,15 @@ void exec_vstX_lane (void) TEST_EXTRA_CHUNK(poly, 16, 8, X, Y); \ TEST_EXTRA_CHUNK(float, 32, 4, X, Y) +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) +#define TEST_ALL_EXTRA_CHUNKS(X,Y) \ + TEST_ALL_EXTRA_CHUNKS_NO_FP16(X, Y); \ + TEST_EXTRA_CHUNK(float, 16, 4, X, Y); \ + TEST_EXTRA_CHUNK(float, 16, 8, X, Y) +#else +#define TEST_ALL_EXTRA_CHUNKS(X,Y) TEST_ALL_EXTRA_CHUNKS_NO_FP16(X, Y) +#endif + /* Declare the temporary buffers / variables. */ DECL_ALL_VSTX_LANE(2); DECL_ALL_VSTX_LANE(3); @@ -371,12 +428,18 @@ void exec_vstX_lane (void) DUMMY_ARRAY(buffer_src, uint, 32, 2, 4); DUMMY_ARRAY(buffer_src, poly, 8, 8, 4); DUMMY_ARRAY(buffer_src, poly, 16, 4, 4); +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) + DUMMY_ARRAY(buffer_src, float, 16, 4, 4); +#endif DUMMY_ARRAY(buffer_src, float, 32, 2, 4); DUMMY_ARRAY(buffer_src, int, 16, 8, 4); DUMMY_ARRAY(buffer_src, int, 32, 4, 4); DUMMY_ARRAY(buffer_src, uint, 16, 8, 4); DUMMY_ARRAY(buffer_src, uint, 32, 4, 4); DUMMY_ARRAY(buffer_src, poly, 16, 8, 4); +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) + DUMMY_ARRAY(buffer_src, float, 16, 8, 4); +#endif DUMMY_ARRAY(buffer_src, float, 32, 4, 4); /* Check vst2_lane/vst2q_lane. */ @@ -400,6 +463,10 @@ void exec_vstX_lane (void) CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st2_0, CMT); CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st2_0, CMT); CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st2_0, CMT); +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) + CHECK_FP(TEST_MSG, float, 16, 4, PRIx16, expected_st2_0, CMT); + CHECK_FP(TEST_MSG, float, 16, 8, PRIx16, expected_st2_0, CMT); +#endif TEST_ALL_EXTRA_CHUNKS(2, 1); #undef CMT @@ -419,6 +486,10 @@ void exec_vstX_lane (void) CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st2_1, CMT); CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st2_1, CMT); CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st2_1, CMT); +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) + CHECK_FP(TEST_MSG, float, 16, 4, PRIx16, expected_st2_1, CMT); + CHECK_FP(TEST_MSG, float, 16, 8, PRIx16, expected_st2_1, CMT); +#endif /* Check vst3_lane/vst3q_lane. */ @@ -444,6 +515,10 @@ void exec_vstX_lane (void) CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st3_0, CMT); CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st3_0, CMT); CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st3_0, CMT); +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) + CHECK_FP(TEST_MSG, float, 16, 4, PRIx16, expected_st3_0, CMT); + CHECK_FP(TEST_MSG, float, 16, 8, PRIx16, expected_st3_0, CMT); +#endif TEST_ALL_EXTRA_CHUNKS(3, 1); @@ -464,6 +539,10 @@ void exec_vstX_lane (void) CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st3_1, CMT); CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st3_1, CMT); CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st3_1, CMT); +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) + CHECK_FP(TEST_MSG, float, 16, 4, PRIx16, expected_st3_1, CMT); + CHECK_FP(TEST_MSG, float, 16, 8, PRIx16, expected_st3_1, CMT); +#endif TEST_ALL_EXTRA_CHUNKS(3, 2); @@ -484,6 +563,10 @@ void exec_vstX_lane (void) CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st3_2, CMT); CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st3_2, CMT); CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st3_2, CMT); +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) + CHECK_FP(TEST_MSG, float, 16, 4, PRIx16, expected_st3_2, CMT); + CHECK_FP(TEST_MSG, float, 16, 8, PRIx16, expected_st3_2, CMT); +#endif /* Check vst4_lane/vst4q_lane. */ @@ -509,6 +592,10 @@ void exec_vstX_lane (void) CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st4_0, CMT); CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st4_0, CMT); CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st4_0, CMT); +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) + CHECK_FP(TEST_MSG, float, 16, 4, PRIx16, expected_st4_0, CMT); + CHECK_FP(TEST_MSG, float, 16, 8, PRIx16, expected_st4_0, CMT); +#endif TEST_ALL_EXTRA_CHUNKS(4, 1); @@ -529,6 +616,10 @@ void exec_vstX_lane (void) CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st4_1, CMT); CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st4_1, CMT); CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st4_1, CMT); +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) + CHECK_FP(TEST_MSG, float, 16, 4, PRIx16, expected_st4_1, CMT); + CHECK_FP(TEST_MSG, float, 16, 8, PRIx16, expected_st4_1, CMT); +#endif TEST_ALL_EXTRA_CHUNKS(4, 2); @@ -549,6 +640,10 @@ void exec_vstX_lane (void) CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st4_2, CMT); CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st4_2, CMT); CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st4_2, CMT); +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) + CHECK_FP(TEST_MSG, float, 16, 4, PRIx16, expected_st4_2, CMT); + CHECK_FP(TEST_MSG, float, 16, 8, PRIx16, expected_st4_2, CMT); +#endif TEST_ALL_EXTRA_CHUNKS(4, 3); @@ -569,6 +664,10 @@ void exec_vstX_lane (void) CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st4_3, CMT); CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st4_3, CMT); CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st4_3, CMT); +#if defined (__ARM_FP16_FORMAT_IEEE) || defined (__ARM_FP16_FORMAT_ALTERNATIVE) + CHECK_FP(TEST_MSG, float, 16, 4, PRIx16, expected_st4_3, CMT); + CHECK_FP(TEST_MSG, float, 16, 8, PRIx16, expected_st4_3, CMT); +#endif } int main (void)