From 8aa6ada8384a961b37dfefec7f9e40e5a4e27ce7 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Samuel=20Iglesias=20Gons=C3=A1lvez?= Date: Wed, 19 Apr 2017 10:35:07 +0200 Subject: [PATCH] i965/vec4: fix swizzle and writemask when loading an uniform with constant offset MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit It was setting XYWZ swizzle and writemask to all uniforms, no matter if they were a vector or scalar, so this can lead to problems when loading them to the push constant buffer. Moreover, 'shift' calculation was designed to calculate the offset in DWORDS, but it doesn't take into account DFs, so the calculated swizzle for the later ones was wrong. The indirect case is not changed because MOV INDIRECT will write to all components. Added an assert to verify that these uniforms are aligned. v2: - Fix 'shift' calculation (Curro) - Set both swizzle and writemask. - Add assert(shift == 0) for the indirect case. Signed-off-by: Samuel Iglesias Gonsálvez Cc: "17.1" Reviewed-by: Francisco Jerez --- src/intel/compiler/brw_vec4_nir.cpp | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/src/intel/compiler/brw_vec4_nir.cpp b/src/intel/compiler/brw_vec4_nir.cpp index 66324e3bcd7..0e8807c5b42 100644 --- a/src/intel/compiler/brw_vec4_nir.cpp +++ b/src/intel/compiler/brw_vec4_nir.cpp @@ -769,7 +769,8 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) * The swizzle also works in the indirect case as the generator adds * the swizzle to the offset for us. */ - unsigned shift = (nir_intrinsic_base(instr) % 16) / 4; + const int type_size = type_sz(src.type); + unsigned shift = (nir_intrinsic_base(instr) % 16) / type_size; assert(shift + instr->num_components <= 4); nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]); @@ -777,14 +778,20 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr) /* Offsets are in bytes but they should always be multiples of 4 */ assert(const_offset->u32[0] % 4 == 0); - unsigned offset = const_offset->u32[0] + shift * 4; + src.swizzle = brw_swizzle_for_size(instr->num_components); + dest.writemask = brw_writemask_for_size(instr->num_components); + unsigned offset = const_offset->u32[0] + shift * type_size; src.offset = ROUND_DOWN_TO(offset, 16); - shift = (offset % 16) / 4; + shift = (offset % 16) / type_size; + assert(shift + instr->num_components <= 4); src.swizzle += BRW_SWIZZLE4(shift, shift, shift, shift); emit(MOV(dest, src)); } else { - src.swizzle += BRW_SWIZZLE4(shift, shift, shift, shift); + /* Uniform arrays are vec4 aligned, because of std140 alignment + * rules. + */ + assert(shift == 0); src_reg indirect = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_UD, 1); -- 2.30.2