case nir_intrinsic_load_input:
case nir_intrinsic_load_per_vertex_input: {
src_reg indirect_offset = get_indirect_offset(instr);
+ dst_reg dst = get_nir_dest(instr->dest, BRW_REGISTER_TYPE_D);
unsigned imm_offset = instr->const_index[0];
+ unsigned fist_component = nir_intrinsic_component(instr);
src_reg header = input_read_header;
if (indirect_offset.file != BAD_FILE) {
*/
const unsigned max_push_slots = 24;
if (imm_offset < max_push_slots) {
- emit(MOV(get_nir_dest(instr->dest, BRW_REGISTER_TYPE_D),
- src_reg(ATTR, imm_offset, glsl_type::ivec4_type)));
+ src_reg src = src_reg(ATTR, imm_offset, glsl_type::ivec4_type);
+ src.swizzle = BRW_SWZ_COMP_INPUT(fist_component);
+
+ emit(MOV(dst, src));
prog_data->urb_read_length =
MAX2(prog_data->urb_read_length,
DIV_ROUND_UP(imm_offset + 1, 2));
read->offset = imm_offset;
read->urb_write_flags = BRW_URB_WRITE_PER_SLOT_OFFSET;
+ src_reg src = src_reg(temp);
+ src.swizzle = BRW_SWZ_COMP_INPUT(fist_component);
+
/* Copy to target. We might end up with some funky writemasks landing
* in here, but we really don't want them in the above pseudo-ops.
*/
- dst_reg dst = get_nir_dest(instr->dest, BRW_REGISTER_TYPE_D);
dst.writemask = brw_writemask_for_size(instr->num_components);
- emit(MOV(dst, src_reg(temp)));
+ emit(MOV(dst, src));
break;
}
default: