vtn_handle_amd_gcn_shader_instruction(struct vtn_builder *b, SpvOp ext_opcode,
const uint32_t *w, unsigned count)
{
- const struct glsl_type *dest_type =
- vtn_value(b, w[1], vtn_value_type_type)->type->type;
- struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
- val->ssa = vtn_create_ssa_value(b, dest_type);
-
+ nir_ssa_def *def;
switch ((enum GcnShaderAMD)ext_opcode) {
case CubeFaceIndexAMD:
- val->ssa->def = nir_cube_face_index(&b->nb, vtn_ssa_value(b, w[5])->def);
- break;
+ def = nir_cube_face_index(&b->nb, vtn_get_nir_ssa(b, w[5]));
+ break;
case CubeFaceCoordAMD:
- val->ssa->def = nir_cube_face_coord(&b->nb, vtn_ssa_value(b, w[5])->def);
+ def = nir_cube_face_coord(&b->nb, vtn_get_nir_ssa(b, w[5]));
break;
case TimeAMD: {
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->nb.shader,
nir_ssa_dest_init(&intrin->instr, &intrin->dest, 2, 32, NULL);
nir_intrinsic_set_memory_scope(intrin, NIR_SCOPE_SUBGROUP);
nir_builder_instr_insert(&b->nb, &intrin->instr);
- val->ssa->def = nir_pack_64_2x32(&b->nb, &intrin->dest.ssa);
+ def = nir_pack_64_2x32(&b->nb, &intrin->dest.ssa);
break;
}
default:
unreachable("Invalid opcode");
}
+
+ vtn_push_nir_ssa(b, w[2], def);
+
return true;
}
vtn_handle_amd_shader_ballot_instruction(struct vtn_builder *b, SpvOp ext_opcode,
const uint32_t *w, unsigned count)
{
- const struct glsl_type *dest_type =
- vtn_value(b, w[1], vtn_value_type_type)->type->type;
- struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
- val->ssa = vtn_create_ssa_value(b, dest_type);
-
unsigned num_args;
nir_intrinsic_op op;
switch ((enum ShaderBallotAMD)ext_opcode) {
unreachable("Invalid opcode");
}
+ const struct glsl_type *dest_type = vtn_get_type(b, w[1])->type;
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->nb.shader, op);
nir_ssa_dest_init_for_type(&intrin->instr, &intrin->dest, dest_type, NULL);
- intrin->num_components = intrin->dest.ssa.num_components;
+ if (nir_intrinsic_infos[op].src_components[0] == 0)
+ intrin->num_components = intrin->dest.ssa.num_components;
for (unsigned i = 0; i < num_args; i++)
- intrin->src[i] = nir_src_for_ssa(vtn_ssa_value(b, w[i + 5])->def);
+ intrin->src[i] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[i + 5]));
if (intrin->intrinsic == nir_intrinsic_quad_swizzle_amd) {
struct vtn_value *val = vtn_value(b, w[6], vtn_value_type_constant);
}
nir_builder_instr_insert(&b->nb, &intrin->instr);
- val->ssa->def = &intrin->dest.ssa;
+ vtn_push_nir_ssa(b, w[2], &intrin->dest.ssa);
return true;
}
const uint32_t *w, unsigned count)
{
struct nir_builder *nb = &b->nb;
- const struct glsl_type *dest_type =
- vtn_value(b, w[1], vtn_value_type_type)->type->type;
- struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
- val->ssa = vtn_create_ssa_value(b, dest_type);
unsigned num_inputs = count - 5;
assert(num_inputs == 3);
nir_ssa_def *src[3] = { NULL, };
for (unsigned i = 0; i < num_inputs; i++)
- src[i] = vtn_ssa_value(b, w[i + 5])->def;
-
+ src[i] = vtn_get_nir_ssa(b, w[i + 5]);
+
+ /* place constants at src[1-2] for easier constant-folding */
+ for (unsigned i = 1; i <= 2; i++) {
+ if (nir_src_as_const_value(nir_src_for_ssa(src[0]))) {
+ nir_ssa_def* tmp = src[i];
+ src[i] = src[0];
+ src[0] = tmp;
+ }
+ }
+ nir_ssa_def *def;
switch ((enum ShaderTrinaryMinMaxAMD)ext_opcode) {
case FMin3AMD:
- val->ssa->def = nir_fmin3(nb, src[0], src[1], src[2]);
+ def = nir_fmin(nb, src[0], nir_fmin(nb, src[1], src[2]));
break;
case UMin3AMD:
- val->ssa->def = nir_umin3(nb, src[0], src[1], src[2]);
+ def = nir_umin(nb, src[0], nir_umin(nb, src[1], src[2]));
break;
case SMin3AMD:
- val->ssa->def = nir_imin3(nb, src[0], src[1], src[2]);
+ def = nir_imin(nb, src[0], nir_imin(nb, src[1], src[2]));
break;
case FMax3AMD:
- val->ssa->def = nir_fmax3(nb, src[0], src[1], src[2]);
+ def = nir_fmax(nb, src[0], nir_fmax(nb, src[1], src[2]));
break;
case UMax3AMD:
- val->ssa->def = nir_umax3(nb, src[0], src[1], src[2]);
+ def = nir_umax(nb, src[0], nir_umax(nb, src[1], src[2]));
break;
case SMax3AMD:
- val->ssa->def = nir_imax3(nb, src[0], src[1], src[2]);
+ def = nir_imax(nb, src[0], nir_imax(nb, src[1], src[2]));
break;
case FMid3AMD:
- val->ssa->def = nir_fmed3(nb, src[0], src[1], src[2]);
+ def = nir_fmin(nb, nir_fmax(nb, src[0], nir_fmin(nb, src[1], src[2])),
+ nir_fmax(nb, src[1], src[2]));
break;
case UMid3AMD:
- val->ssa->def = nir_umed3(nb, src[0], src[1], src[2]);
+ def = nir_umin(nb, nir_umax(nb, src[0], nir_umin(nb, src[1], src[2])),
+ nir_umax(nb, src[1], src[2]));
break;
case SMid3AMD:
- val->ssa->def = nir_imed3(nb, src[0], src[1], src[2]);
+ def = nir_imin(nb, nir_imax(nb, src[0], nir_imin(nb, src[1], src[2])),
+ nir_imax(nb, src[1], src[2]));
break;
default:
unreachable("unknown opcode\n");
break;
}
+ vtn_push_nir_ssa(b, w[2], def);
+
return true;
}
vtn_handle_amd_shader_explicit_vertex_parameter_instruction(struct vtn_builder *b, SpvOp ext_opcode,
const uint32_t *w, unsigned count)
{
- const struct glsl_type *dest_type =
- vtn_value(b, w[1], vtn_value_type_type)->type->type;
-
- struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
- val->ssa = vtn_create_ssa_value(b, dest_type);
-
nir_intrinsic_op op;
switch ((enum ShaderExplicitVertexParameterAMD)ext_opcode) {
case InterpolateAtVertexAMD:
deref = nir_deref_instr_parent(deref);
}
intrin->src[0] = nir_src_for_ssa(&deref->dest.ssa);
- intrin->src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
+ intrin->src[1] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[6]));
intrin->num_components = glsl_get_vector_elements(deref->type);
nir_ssa_dest_init(&intrin->instr, &intrin->dest,
nir_builder_instr_insert(&b->nb, &intrin->instr);
+ nir_ssa_def *def;
if (vec_array_deref) {
assert(vec_deref);
- val->ssa->def = nir_vector_extract(&b->nb, &intrin->dest.ssa,
- vec_deref->arr.index.ssa);
+ def = nir_vector_extract(&b->nb, &intrin->dest.ssa,
+ vec_deref->arr.index.ssa);
} else {
- val->ssa->def = &intrin->dest.ssa;
+ def = &intrin->dest.ssa;
}
+ vtn_push_nir_ssa(b, w[2], def);
return true;
}