-#define SV_REG_CSR( type, regkey, elwidth, regidx, isvec, packed ) \
- (regkey | (elwidth<<5) | (type<<7) | (regidx<<8) | (isvec<<14) | (packed<<15))
+#define SV_REG_CSR(type, regkey, elwidth, regidx, isvec) \
+ (regkey | (elwidth<<5) | (type<<7) | (regidx<<8) | (isvec<<15))
+#define SV_PRED_CSR(type, regkey, zero, inv, regidx, packed) \
+ (regkey | (zero<<5) | (inv<<6) | (type<<7) | (regidx<<8) | (packed<<15))
-#define SV_PRED_CSR( type, regkey, zero, inv, regidx, active ) \
- (regkey | (zero<<5) | (inv<<6) | (type<<7) | (regidx<<8) | (active<<14))
-
-#define SET_SV_CSR( type, regkey, elwidth, regidx, isvec, packed ) \
- li x1, SV_REG_CSR( type, regkey, elwidth, regidx, isvec, packed ); \
+#define SET_SV_CSR( type, regkey, elwidth, regidx, isvec) \
+ li x1, SV_REG_CSR( type, regkey, elwidth, regidx, isvec); \
csrrw x0, 0x4c0, x1
-#define SET_SV_PRED_CSR( type, regkey, zero, inv, regidx, active ) \
- li x1, SV_PRED_CSR( type, regkey, zero, inv, regidx, active ); \
+#define SET_SV_PRED_CSR( type, regkey, zero, inv, regidx, packed ) \
+ li x1, SV_PRED_CSR( type, regkey, zero, inv, regidx, packed ); \
csrrw x0, 0x4c8, x1
#define SET_SV_2CSRS( c1, c2 ) \
li a4, 0
SET_SV_MVL(3)
- SET_SV_2CSRS( SV_REG_CSR(1, 12, 0, 12, 1, 0),
- SV_REG_CSR(1, 2, 0, 2, 1, 0) )
+ SET_SV_2CSRS( SV_REG_CSR(1, 12, 0, 12, 1),
+ SV_REG_CSR(1, 2, 0, 2, 1) )
SET_SV_VL(3)
mv a1, sp
la sp, data;
SET_SV_MVL(3)
- SET_SV_2CSRS( SV_REG_CSR(1, 12, 0, 12, 1, 0),
- SV_REG_CSR(1, 2, 0, 2, 1, 0) )
+ SET_SV_2CSRS( SV_REG_CSR(1, 12, 0, 12, 1),
+ SV_REG_CSR(1, 2, 0, 2, 1) )
SET_SV_2PREDCSRS(
- SV_PRED_CSR(1, 2, 0, 0, 10, 1),
- SV_PRED_CSR(1, 12, 0, 0, 11, 1) );
+ SV_PRED_CSR(1, 2, 0, 0, 10, 0),
+ SV_PRED_CSR(1, 12, 0, 0, 11, 0) );
SET_SV_VL(3)
li x5, 0 # deliberately set x4 to 0
SET_SV_MVL(3)
- SET_SV_2CSRS( SV_REG_CSR(1, 3, 0, 3, 1, 0),
- SV_REG_CSR(1, 6, 0, 6, 1, 0) )
+ SET_SV_2CSRS( SV_REG_CSR(1, 3, 0, 3, 1),
+ SV_REG_CSR(1, 6, 0, 6, 1) )
SET_SV_VL(3)
.option rvc
li a4, pred2; \
\
SET_SV_MVL(3); \
- SET_SV_2CSRS( SV_REG_CSR(1, 3, 0, 3, 1, 0), \
- SV_REG_CSR(1, 6, 0, 6, 1, 0) ); \
+ SET_SV_2CSRS( SV_REG_CSR(1, 3, 0, 3, 1), \
+ SV_REG_CSR(1, 6, 0, 6, 1) ); \
SET_SV_2PREDCSRS( \
- SV_PRED_CSR(1, 3, 0, 0, 13, 1), \
- SV_PRED_CSR(1, 6, 0, 0, 14, 1) );\
+ SV_PRED_CSR(1, 3, 0, 0, 13, 0), \
+ SV_PRED_CSR(1, 6, 0, 0, 14, 0) );\
SET_SV_VL(3); \
\
.option rvc; \
li a4, 1004;
SET_SV_MVL(3)
- SET_SV_2CSRS( SV_REG_CSR(1, 12, 0, 12, 1, 0),
- SV_REG_CSR(1, 2, 0, 2, 1, 0) )
+ SET_SV_2CSRS( SV_REG_CSR(1, 12, 0, 12, 1),
+ SV_REG_CSR(1, 2, 0, 2, 1) )
SET_SV_VL(3)
mv a1, sp
SV_FLD_DATA( f8, testdata+56, 0)
SET_SV_MVL(2)
- SET_SV_2CSRS( SV_REG_CSR(0, 2, 0, 2, 1, 0),
- SV_REG_CSR(0, 6, 0, 6, 1, 0) )
+ SET_SV_2CSRS( SV_REG_CSR(0, 2, 0, 2, 1),
+ SV_REG_CSR(0, 6, 0, 6, 1) )
SET_SV_VL(2)
fadd.d f2, f2, f6;
sv_addi_scalar_src \
sv_addi_vector_vector \
sv_addi_predicated \
+ sv_beq \
rv64ui_p_tests = $(addprefix rv64ui-p-, $(rv64ui_sv_tests))
rv64ui_v_tests = $(addprefix rv64ui-v-, $(rv64ui_sv_tests))
SV_LD_DATA( x5, testdata+24, 0)
SET_SV_MVL(2)
- SET_SV_CSR(1, 3, 0, 3, 1, 0)
+ SET_SV_CSR(1, 3, 0, 3, 1)
SET_SV_VL(2)
addi x3, x3, 1
li x6, pred; \
\
SET_SV_MVL( 2); \
- SET_SV_CSR( 1, 3, 0, 3, 1, 0); \
- SET_SV_PRED_CSR( 1, 3, zero, inv, 6, 1); \
+ SET_SV_CSR( 1, 3, 0, 3, 1); \
+ SET_SV_PRED_CSR( 1, 3, zero, inv, 6, 0); \
SET_SV_VL( 2); \
\
addi x3, x3, 1; \
SV_LD_DATA( x5, testdata+24, 0)
SET_SV_MVL(2)
- SET_SV_CSR(1, 16, 0, 3, 1, 0)
+ SET_SV_CSR(1, 16, 0, 3, 1)
SET_SV_VL(2)
addi x16, x16, 1
li x6, 41 # going to be stored in x3 *and* x4 (plus one, on each)
SET_SV_MVL(2)
- SET_SV_CSR(1, 3, 0, 3, 1, 0)
+ SET_SV_CSR(1, 3, 0, 3, 1)
SET_SV_VL(2)
addi x3, x6, 1 # x3 = x6+1 *AND* x4 = x6+1
li x4, 0 # deliberately set x4 to 0
SET_SV_MVL(2)
- SET_SV_2CSRS( SV_REG_CSR(1, 3, 0, 3, 1, 0),
- SV_REG_CSR(1, 6, 0, 6, 1, 0) )
+ SET_SV_2CSRS( SV_REG_CSR(1, 3, 0, 3, 1),
+ SV_REG_CSR(1, 6, 0, 6, 1) )
SET_SV_VL(2)
addi x3, x6, 1