nir_ssa_def *ssa = nir_imul(&b, intr->src[0].ssa, nir_imm_int(&b, 16));
nir_instr_rewrite_src(instr, &intr->src[0], nir_src_for_ssa(ssa));
} break;
+ case nir_intrinsic_load_vertex_id:
+ case nir_intrinsic_load_instance_id:
+ /* detect use of vertex_id/instance_id */
+ v->vs_id_in_reg = v->infile.num_reg;
+ break;
default:
break;
}
etna_coalesce_start(stream, &coalesce);
if (unlikely(dirty & (ETNA_DIRTY_SHADER))) {
/* Magic states (load balancing, inter-unit sync, buffers) */
+ /*007C4*/ EMIT_STATE(FE_HALTI5_ID_CONFIG, ctx->shader_state.FE_HALTI5_ID_CONFIG);
/*00870*/ EMIT_STATE(VS_HALTI5_OUTPUT_COUNT, vs_output_count | ((vs_output_count * 0x10) << 8));
/*008A0*/ EMIT_STATE(VS_HALTI5_UNK008A0, 0x0001000e | ((0x110/vs_output_count) << 20));
for (int x = 0; x < 4; ++x) {
uint32_t GL_VARYING_NUM_COMPONENTS;
uint32_t GL_VARYING_COMPONENT_USE[2];
uint32_t GL_HALTI5_SH_SPECIALS;
+ uint32_t FE_HALTI5_ID_CONFIG;
unsigned vs_inst_mem_size;
unsigned ps_inst_mem_size;
uint32_t *VS_INST_MEM;
etna_bitarray_set(vs_input, 8, idx, cur_temp++);
}
+ if (vs->vs_id_in_reg >= 0) {
+ cs->VS_INPUT_COUNT = VIVS_VS_INPUT_COUNT_COUNT(num_vs_inputs + 1) |
+ VIVS_VS_INPUT_COUNT_UNK8(vs->input_count_unk8) |
+ VIVS_VS_INPUT_COUNT_ID_ENABLE;
+
+ etna_bitarray_set(vs_input, 8, num_vs_inputs, vs->vs_id_in_reg);
+
+ cs->FE_HALTI5_ID_CONFIG =
+ VIVS_FE_HALTI5_ID_CONFIG_VERTEX_ID_ENABLE |
+ VIVS_FE_HALTI5_ID_CONFIG_INSTANCE_ID_ENABLE |
+ VIVS_FE_HALTI5_ID_CONFIG_VERTEX_ID_REG(vs->vs_id_in_reg * 4) |
+ VIVS_FE_HALTI5_ID_CONFIG_INSTANCE_ID_REG(vs->vs_id_in_reg * 4 + 1);
+ }
+
for (int idx = 0; idx < ARRAY_SIZE(cs->VS_INPUT); ++idx)
cs->VS_INPUT[idx] = vs_input[idx];