The only thing still using old-school drefs are function calls.
Acked-by: Rob Clark <robdclark@gmail.com>
Acked-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Acked-by: Dave Airlie <airlied@redhat.com>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
#include "nir/nir_vla.h"
#include "nir/nir_control_flow.h"
#include "nir/nir_constant_expressions.h"
+#include "nir/nir_deref.h"
#include "spirv_info.h"
#include <stdio.h>
struct vtn_value *arg = vtn_untyped_value(b, arg_id);
if (arg->value_type == vtn_value_type_pointer &&
arg->pointer->ptr_type->type == NULL) {
- nir_deref_var *d = vtn_pointer_to_deref(b, arg->pointer);
+ nir_deref_var *d = vtn_pointer_to_deref_var(b, arg->pointer);
call->params[i] = nir_deref_var_clone(d, call);
} else {
struct vtn_ssa_value *arg_ssa = vtn_ssa_value(b, arg_id);
nir_local_variable_create(b->nb.impl, arg_ssa->type, "arg_tmp");
call->params[i] = nir_deref_var_create(call, tmp);
- vtn_local_store(b, arg_ssa, call->params[i]);
+ vtn_local_store(b, arg_ssa,
+ nir_build_deref_for_chain(&b->nb, call->params[i]));
}
}
if (glsl_type_is_void(callee->return_type)) {
vtn_push_value(b, w[2], vtn_value_type_undef);
} else {
- vtn_push_ssa(b, w[2], res_type, vtn_local_load(b, call->return_deref));
+ nir_deref_instr *return_deref =
+ nir_build_deref_for_chain(&b->nb, call->return_deref);
+ vtn_push_ssa(b, w[2], res_type, vtn_local_load(b, return_deref));
}
}
vtn_fail("Unhandled opcode");
}
- nir_tex_src srcs[8]; /* 8 should be enough */
+ nir_tex_src srcs[10]; /* 10 should be enough */
nir_tex_src *p = srcs;
+ nir_deref_instr *sampler = vtn_pointer_to_deref(b, sampled.sampler);
+ nir_deref_instr *texture =
+ sampled.image ? vtn_pointer_to_deref(b, sampled.image) : sampler;
+
+ p->src = nir_src_for_ssa(&texture->dest.ssa);
+ p->src_type = nir_tex_src_texture_deref;
+ p++;
+
+ switch (texop) {
+ case nir_texop_tex:
+ case nir_texop_txb:
+ case nir_texop_txl:
+ case nir_texop_txd:
+ case nir_texop_tg4:
+ /* These operations require a sampler */
+ p->src = nir_src_for_ssa(&sampler->dest.ssa);
+ p->src_type = nir_tex_src_sampler_deref;
+ p++;
+ break;
+ case nir_texop_txf:
+ case nir_texop_txf_ms:
+ case nir_texop_txs:
+ case nir_texop_lod:
+ case nir_texop_query_levels:
+ case nir_texop_texture_samples:
+ case nir_texop_samples_identical:
+ /* These don't */
+ break;
+ case nir_texop_txf_ms_mcs:
+ vtn_fail("unexpected nir_texop_txf_ms_mcs");
+ }
+
unsigned idx = 4;
struct nir_ssa_def *coord;
vtn_fail("Invalid base type for sampler result");
}
- nir_deref_var *sampler = vtn_pointer_to_deref(b, sampled.sampler);
- nir_deref_var *texture;
- if (sampled.image) {
- nir_deref_var *image = vtn_pointer_to_deref(b, sampled.image);
- texture = image;
- } else {
- texture = sampler;
- }
-
- instr->texture = nir_deref_var_clone(texture, instr);
-
- switch (instr->op) {
- case nir_texop_tex:
- case nir_texop_txb:
- case nir_texop_txl:
- case nir_texop_txd:
- case nir_texop_tg4:
- /* These operations require a sampler */
- instr->sampler = nir_deref_var_clone(sampler, instr);
- break;
- case nir_texop_txf:
- case nir_texop_txf_ms:
- case nir_texop_txs:
- case nir_texop_lod:
- case nir_texop_query_levels:
- case nir_texop_texture_samples:
- case nir_texop_samples_identical:
- /* These don't */
- instr->sampler = NULL;
- break;
- case nir_texop_txf_ms_mcs:
- vtn_fail("unexpected nir_texop_txf_ms_mcs");
- }
-
nir_ssa_dest_init(&instr->instr, &instr->dest,
nir_tex_instr_dest_size(instr), 32, NULL);
instrs[i]->is_new_style_shadow = instr->is_new_style_shadow;
instrs[i]->component = instr->component;
instrs[i]->dest_type = instr->dest_type;
- instrs[i]->texture = nir_deref_var_clone(texture, instrs[i]);
- instrs[i]->sampler = NULL;
memcpy(instrs[i]->src, srcs, instr->num_srcs * sizeof(*instr->src));
nir_intrinsic_op op;
switch (opcode) {
-#define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_var_##N; break;
+#define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break;
OP(ImageQuerySize, size)
OP(ImageRead, load)
OP(ImageWrite, store)
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
- nir_deref_var *image_deref = vtn_pointer_to_deref(b, image.image);
- intrin->variables[0] = nir_deref_var_clone(image_deref, intrin);
+ nir_deref_instr *image_deref = vtn_pointer_to_deref(b, image.image);
+ intrin->src[0] = nir_src_for_ssa(&image_deref->dest.ssa);
/* ImageQuerySize doesn't take any extra parameters */
if (opcode != SpvOpImageQuerySize) {
unsigned swiz[4];
for (unsigned i = 0; i < 4; i++)
swiz[i] = i < image.coord->num_components ? i : 0;
- intrin->src[0] = nir_src_for_ssa(nir_swizzle(&b->nb, image.coord,
+ intrin->src[1] = nir_src_for_ssa(nir_swizzle(&b->nb, image.coord,
swiz, 4, false));
- intrin->src[1] = nir_src_for_ssa(image.sample);
+ intrin->src[2] = nir_src_for_ssa(image.sample);
}
switch (opcode) {
case SpvOpImageRead:
break;
case SpvOpAtomicStore:
- intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
+ intrin->src[3] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
break;
case SpvOpImageWrite:
- intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def);
+ intrin->src[3] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def);
break;
case SpvOpAtomicCompareExchange:
case SpvOpAtomicAnd:
case SpvOpAtomicOr:
case SpvOpAtomicXor:
- fill_common_atomic_sources(b, opcode, w, &intrin->src[2]);
+ fill_common_atomic_sources(b, opcode, w, &intrin->src[3]);
break;
default:
struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
unsigned dest_components = nir_intrinsic_dest_components(intrin);
- if (intrin->intrinsic == nir_intrinsic_image_var_size) {
+ if (intrin->intrinsic == nir_intrinsic_image_deref_size) {
dest_components = intrin->num_components =
glsl_get_vector_elements(type->type);
}
}
static nir_intrinsic_op
-get_var_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
+get_deref_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
{
switch (opcode) {
- case SpvOpAtomicLoad: return nir_intrinsic_load_var;
- case SpvOpAtomicStore: return nir_intrinsic_store_var;
-#define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
+ case SpvOpAtomicLoad: return nir_intrinsic_load_deref;
+ case SpvOpAtomicStore: return nir_intrinsic_store_deref;
+#define OP(S, N) case SpvOp##S: return nir_intrinsic_deref_##N;
OP(AtomicExchange, atomic_exchange)
OP(AtomicCompareExchange, atomic_comp_swap)
OP(AtomicIIncrement, atomic_add)
if (ptr->mode == vtn_variable_mode_workgroup &&
!b->options->lower_workgroup_access_to_offsets) {
- nir_deref_var *deref = vtn_pointer_to_deref(b, ptr);
- const struct glsl_type *deref_type = nir_deref_tail(&deref->deref)->type;
- nir_intrinsic_op op = get_var_nir_atomic_op(b, opcode);
+ nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
+ const struct glsl_type *deref_type = deref->type;
+ nir_intrinsic_op op = get_deref_nir_atomic_op(b, opcode);
atomic = nir_intrinsic_instr_create(b->nb.shader, op);
- atomic->variables[0] = nir_deref_var_clone(deref, atomic);
+ atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
switch (opcode) {
case SpvOpAtomicLoad:
case SpvOpAtomicStore:
atomic->num_components = glsl_get_vector_elements(deref_type);
nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
- atomic->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
+ atomic->src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
break;
case SpvOpAtomicExchange:
case SpvOpAtomicAnd:
case SpvOpAtomicOr:
case SpvOpAtomicXor:
- fill_common_atomic_sources(b, opcode, w, &atomic->src[0]);
+ fill_common_atomic_sources(b, opcode, w, &atomic->src[1]);
break;
default:
}
} while (progress);
+ /* We sometimes generate bogus derefs that, while never used, give the
+ * validator a bit of heartburn. Run dead code to get rid of them.
+ */
+ nir_opt_dce(b->shader);
+
vtn_assert(b->entry_point->value_type == vtn_value_type_function);
nir_function *entry_point = b->entry_point->func->impl->function;
vtn_assert(entry_point);
} else {
/* We're a regular SSA value. */
struct vtn_ssa_value *param_ssa =
- vtn_local_load(b, nir_deref_var_create(b, param));
+ vtn_local_load(b, nir_build_deref_var(&b->nb, param));
struct vtn_value *val = vtn_push_ssa(b, w[2], type, param_ssa);
/* Name the parameter so it shows up nicely in NIR */
_mesa_hash_table_insert(b->phi_table, w, phi_var);
vtn_push_ssa(b, w[2], type,
- vtn_local_load(b, nir_deref_var_create(b, phi_var)));
+ vtn_local_load(b, nir_build_deref_var(&b->nb, phi_var)));
return true;
}
struct vtn_ssa_value *src = vtn_ssa_value(b, w[i]);
- vtn_local_store(b, src, nir_deref_var_create(b, phi_var));
+ vtn_local_store(b, src, nir_build_deref_var(&b->nb, phi_var));
}
return true;
if ((*block->branch & SpvOpCodeMask) == SpvOpReturnValue) {
struct vtn_ssa_value *src = vtn_ssa_value(b, block->branch[1]);
vtn_local_store(b, src,
- nir_deref_var_create(b, b->nb.impl->return_var));
+ nir_build_deref_var(&b->nb, b->nb.impl->return_var));
}
if (block->branch_type != vtn_branch_type_none) {
nir_ssa_def *sign = nir_fsign(nb, src[0]);
nir_ssa_def *abs = nir_fabs(nb, src[0]);
val->ssa->def = nir_fmul(nb, sign, nir_ffract(nb, abs));
- nir_store_deref_var(nb, vtn_nir_deref(b, w[6]),
- nir_fmul(nb, sign, nir_ffloor(nb, abs)), 0xf);
+ nir_store_deref(nb, vtn_nir_deref(b, w[6]),
+ nir_fmul(nb, sign, nir_ffloor(nb, abs)), 0xf);
return;
}
val->ssa->def = build_frexp64(nb, src[0], &exponent);
else
val->ssa->def = build_frexp32(nb, src[0], &exponent);
- nir_store_deref_var(nb, vtn_nir_deref(b, w[6]), exponent, 0xf);
+ nir_store_deref(nb, vtn_nir_deref(b, w[6]), exponent, 0xf);
return;
}
nir_intrinsic_op op;
switch (opcode) {
case GLSLstd450InterpolateAtCentroid:
- op = nir_intrinsic_interp_var_at_centroid;
+ op = nir_intrinsic_interp_deref_at_centroid;
break;
case GLSLstd450InterpolateAtSample:
- op = nir_intrinsic_interp_var_at_sample;
+ op = nir_intrinsic_interp_deref_at_sample;
break;
case GLSLstd450InterpolateAtOffset:
- op = nir_intrinsic_interp_var_at_offset;
+ op = nir_intrinsic_interp_deref_at_offset;
break;
default:
vtn_fail("Invalid opcode");
nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->nb.shader, op);
- nir_deref_var *deref = vtn_nir_deref(b, w[5]);
- intrin->variables[0] = nir_deref_var_clone(deref, intrin);
+ struct vtn_pointer *ptr =
+ vtn_value(b, w[5], vtn_value_type_pointer)->pointer;
+ intrin->src[0] = nir_src_for_ssa(&vtn_pointer_to_deref(b, ptr)->dest.ssa);
switch (opcode) {
case GLSLstd450InterpolateAtCentroid:
break;
case GLSLstd450InterpolateAtSample:
case GLSLstd450InterpolateAtOffset:
- intrin->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
+ intrin->src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
break;
default:
vtn_fail("Invalid opcode");
nir_ssa_def *vtn_vector_insert_dynamic(struct vtn_builder *b, nir_ssa_def *src,
nir_ssa_def *insert, nir_ssa_def *index);
-nir_deref_var *vtn_nir_deref(struct vtn_builder *b, uint32_t id);
+nir_deref_instr *vtn_nir_deref(struct vtn_builder *b, uint32_t id);
struct vtn_pointer *vtn_pointer_for_variable(struct vtn_builder *b,
struct vtn_variable *var,
struct vtn_type *ptr_type);
-nir_deref_var *vtn_pointer_to_deref(struct vtn_builder *b,
- struct vtn_pointer *ptr);
+nir_deref_var *vtn_pointer_to_deref_var(struct vtn_builder *b,
+ struct vtn_pointer *ptr);
+nir_deref_instr *vtn_pointer_to_deref(struct vtn_builder *b,
+ struct vtn_pointer *ptr);
nir_ssa_def *
vtn_pointer_to_offset(struct vtn_builder *b, struct vtn_pointer *ptr,
nir_ssa_def **index_out, unsigned *end_idx_out);
-struct vtn_ssa_value *vtn_local_load(struct vtn_builder *b, nir_deref_var *src);
+struct vtn_ssa_value *
+vtn_local_load(struct vtn_builder *b, nir_deref_instr *src);
void vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src,
- nir_deref_var *dest);
+ nir_deref_instr *dest);
struct vtn_ssa_value *
vtn_variable_load(struct vtn_builder *b, struct vtn_pointer *src);
#include "vtn_private.h"
#include "spirv_info.h"
+#include "nir_deref.h"
static struct vtn_access_chain *
vtn_access_chain_create(struct vtn_builder *b, unsigned length)
* lengths stay the same but the terminal type is the one given by
* tail_type. This is useful for split structures.
*/
-static void
-rewrite_deref_types(struct vtn_builder *b, nir_deref *deref,
- const struct glsl_type *type)
+static const struct glsl_type *
+rewrite_deref_var(struct vtn_builder *b, nir_deref_instr *deref,
+ struct nir_variable *var)
{
- deref->type = type;
- if (deref->child) {
- vtn_assert(deref->child->deref_type == nir_deref_type_array);
- vtn_assert(glsl_type_is_array(deref->type));
- rewrite_deref_types(b, deref->child, glsl_get_array_element(type));
+ /* Always set the mode */
+ deref->mode = var->data.mode;
+
+ if (deref->deref_type == nir_deref_type_var) {
+ assert(deref->var == NULL);
+ deref->var = var;
+ deref->type = var->type;
+ } else {
+ assert(deref->deref_type == nir_deref_type_array);
+ assert(deref->parent.is_ssa);
+ nir_deref_instr *parent =
+ nir_instr_as_deref(deref->parent.ssa->parent_instr);
+ deref->type = rewrite_deref_var(b, parent, var);
+ assert(deref->type);
}
+
+ /* Return of the child type of this deref*/
+ if (glsl_type_is_array(deref->type))
+ return glsl_get_array_element(deref->type);
+ else
+ return NULL;
}
struct vtn_pointer *
return pointer;
}
-nir_deref_var *
+nir_deref_instr *
vtn_pointer_to_deref(struct vtn_builder *b, struct vtn_pointer *ptr)
{
/* Do on-the-fly copy propagation for samplers. */
if (ptr->var->copy_prop_sampler)
return vtn_pointer_to_deref(b, ptr->var->copy_prop_sampler);
- nir_deref_var *deref_var;
+ nir_deref_instr *deref_var =
+ nir_deref_instr_create(b->nb.shader, nir_deref_type_var);
+ nir_ssa_dest_init(&deref_var->instr, &deref_var->dest, 1, 32, NULL);
+ nir_builder_instr_insert(&b->nb, &deref_var->instr);
+
if (ptr->var->var) {
- deref_var = nir_deref_var_create(b, ptr->var->var);
+ deref_var->mode = ptr->var->var->data.mode;
+ deref_var->type = ptr->var->var->type;
+ deref_var->var = ptr->var->var;
/* Raw variable access */
if (!ptr->chain)
return deref_var;
} else {
vtn_assert(ptr->var->members);
- /* Create the deref_var manually. It will get filled out later. */
- deref_var = rzalloc(b, nir_deref_var);
- deref_var->deref.deref_type = nir_deref_type_var;
+ /* We'll fill out the rest of the deref_var later */
+ deref_var->type = ptr->var->type->type;
}
struct vtn_access_chain *chain = ptr->chain;
vtn_assert(chain);
struct vtn_type *deref_type = ptr->var->type;
- nir_deref *tail = &deref_var->deref;
+ nir_deref_instr *tail = deref_var;
nir_variable **members = ptr->var->members;
for (unsigned i = 0; i < chain->length; i++) {
case GLSL_TYPE_ARRAY: {
deref_type = deref_type->array_element;
- nir_deref_array *deref_arr = nir_deref_array_create(b);
- deref_arr->deref.type = deref_type->type;
-
+ nir_ssa_def *index;
if (chain->link[i].mode == vtn_access_mode_literal) {
- deref_arr->deref_array_type = nir_deref_array_type_direct;
- deref_arr->base_offset = chain->link[i].id;
+ index = nir_imm_int(&b->nb, chain->link[i].id);
} else {
vtn_assert(chain->link[i].mode == vtn_access_mode_id);
- deref_arr->deref_array_type = nir_deref_array_type_indirect;
- deref_arr->base_offset = 0;
- deref_arr->indirect =
- nir_src_for_ssa(vtn_ssa_value(b, chain->link[i].id)->def);
+ index = vtn_ssa_value(b, chain->link[i].id)->def;
}
- tail->child = &deref_arr->deref;
- tail = tail->child;
+ tail = nir_build_deref_array(&b->nb, tail, index);
break;
}
unsigned idx = chain->link[i].id;
deref_type = deref_type->members[idx];
if (members) {
- /* This is a pre-split structure. */
- deref_var->var = members[idx];
- rewrite_deref_types(b, &deref_var->deref, members[idx]->type);
- vtn_assert(tail->type == deref_type->type);
+ rewrite_deref_var(b, tail, members[idx]);
+ assert(tail->type == deref_type->type);
members = NULL;
} else {
- nir_deref_struct *deref_struct = nir_deref_struct_create(b, idx);
- deref_struct->deref.type = deref_type->type;
- tail->child = &deref_struct->deref;
- tail = tail->child;
+ tail = nir_build_deref_struct(&b->nb, tail, idx);
}
break;
}
}
vtn_assert(members == NULL);
- return deref_var;
+ return tail;
}
-static void
-_vtn_local_load_store(struct vtn_builder *b, bool load, nir_deref_var *deref,
- nir_deref *tail, struct vtn_ssa_value *inout)
+nir_deref_var *
+vtn_pointer_to_deref_var(struct vtn_builder *b, struct vtn_pointer *ptr)
{
- /* The deref tail may contain a deref to select a component of a vector (in
- * other words, it might not be an actual tail) so we have to save it away
- * here since we overwrite it later.
- */
- nir_deref *old_child = tail->child;
-
- if (glsl_type_is_vector_or_scalar(tail->type)) {
- /* Terminate the deref chain in case there is one more link to pick
- * off a component of the vector.
- */
- tail->child = NULL;
-
- nir_intrinsic_op op = load ? nir_intrinsic_load_var :
- nir_intrinsic_store_var;
-
- nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
- intrin->variables[0] = nir_deref_var_clone(deref, intrin);
- intrin->num_components = glsl_get_vector_elements(tail->type);
+ return nir_deref_instr_to_deref(vtn_pointer_to_deref(b, ptr), b);
+}
+static void
+_vtn_local_load_store(struct vtn_builder *b, bool load, nir_deref_instr *deref,
+ struct vtn_ssa_value *inout)
+{
+ if (glsl_type_is_vector_or_scalar(deref->type)) {
if (load) {
- nir_ssa_dest_init(&intrin->instr, &intrin->dest,
- intrin->num_components,
- glsl_get_bit_size(tail->type),
- NULL);
- inout->def = &intrin->dest.ssa;
+ inout->def = nir_load_deref(&b->nb, deref);
} else {
- nir_intrinsic_set_write_mask(intrin, (1 << intrin->num_components) - 1);
- intrin->src[0] = nir_src_for_ssa(inout->def);
+ nir_store_deref(&b->nb, deref, inout->def, ~0);
}
-
- nir_builder_instr_insert(&b->nb, &intrin->instr);
- } else if (glsl_get_base_type(tail->type) == GLSL_TYPE_ARRAY ||
- glsl_type_is_matrix(tail->type)) {
- unsigned elems = glsl_get_length(tail->type);
- nir_deref_array *deref_arr = nir_deref_array_create(b);
- deref_arr->deref_array_type = nir_deref_array_type_direct;
- deref_arr->deref.type = glsl_get_array_element(tail->type);
- tail->child = &deref_arr->deref;
+ } else if (glsl_type_is_array(deref->type) ||
+ glsl_type_is_matrix(deref->type)) {
+ unsigned elems = glsl_get_length(deref->type);
for (unsigned i = 0; i < elems; i++) {
- deref_arr->base_offset = i;
- _vtn_local_load_store(b, load, deref, tail->child, inout->elems[i]);
+ nir_deref_instr *child =
+ nir_build_deref_array(&b->nb, deref, nir_imm_int(&b->nb, i));
+ _vtn_local_load_store(b, load, child, inout->elems[i]);
}
} else {
- vtn_assert(glsl_get_base_type(tail->type) == GLSL_TYPE_STRUCT);
- unsigned elems = glsl_get_length(tail->type);
- nir_deref_struct *deref_struct = nir_deref_struct_create(b, 0);
- tail->child = &deref_struct->deref;
+ vtn_assert(glsl_type_is_struct(deref->type));
+ unsigned elems = glsl_get_length(deref->type);
for (unsigned i = 0; i < elems; i++) {
- deref_struct->index = i;
- deref_struct->deref.type = glsl_get_struct_field(tail->type, i);
- _vtn_local_load_store(b, load, deref, tail->child, inout->elems[i]);
+ nir_deref_instr *child = nir_build_deref_struct(&b->nb, deref, i);
+ _vtn_local_load_store(b, load, child, inout->elems[i]);
}
}
-
- tail->child = old_child;
}
-nir_deref_var *
+nir_deref_instr *
vtn_nir_deref(struct vtn_builder *b, uint32_t id)
{
struct vtn_pointer *ptr = vtn_value(b, id, vtn_value_type_pointer)->pointer;
* selecting which component due to OpAccessChain supporting per-component
* indexing in SPIR-V.
*/
-static nir_deref *
-get_deref_tail(nir_deref_var *deref)
+static nir_deref_instr *
+get_deref_tail(nir_deref_instr *deref)
{
- nir_deref *cur = &deref->deref;
- while (!glsl_type_is_vector_or_scalar(cur->type) && cur->child)
- cur = cur->child;
+ if (deref->deref_type != nir_deref_type_array)
+ return deref;
+
+ nir_deref_instr *parent =
+ nir_instr_as_deref(deref->parent.ssa->parent_instr);
- return cur;
+ if (glsl_type_is_vector(parent->type))
+ return parent;
+ else
+ return deref;
}
struct vtn_ssa_value *
-vtn_local_load(struct vtn_builder *b, nir_deref_var *src)
+vtn_local_load(struct vtn_builder *b, nir_deref_instr *src)
{
- nir_deref *src_tail = get_deref_tail(src);
+ nir_deref_instr *src_tail = get_deref_tail(src);
struct vtn_ssa_value *val = vtn_create_ssa_value(b, src_tail->type);
- _vtn_local_load_store(b, true, src, src_tail, val);
-
- if (src_tail->child) {
- nir_deref_array *vec_deref = nir_deref_as_array(src_tail->child);
- vtn_assert(vec_deref->deref.child == NULL);
- val->type = vec_deref->deref.type;
- if (vec_deref->deref_array_type == nir_deref_array_type_direct)
- val->def = vtn_vector_extract(b, val->def, vec_deref->base_offset);
+ _vtn_local_load_store(b, true, src_tail, val);
+
+ if (src_tail != src) {
+ val->type = src->type;
+ nir_const_value *const_index = nir_src_as_const_value(src->arr.index);
+ if (const_index)
+ val->def = vtn_vector_extract(b, val->def, const_index->u32[0]);
else
- val->def = vtn_vector_extract_dynamic(b, val->def,
- vec_deref->indirect.ssa);
+ val->def = vtn_vector_extract_dynamic(b, val->def, src->arr.index.ssa);
}
return val;
void
vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src,
- nir_deref_var *dest)
+ nir_deref_instr *dest)
{
- nir_deref *dest_tail = get_deref_tail(dest);
+ nir_deref_instr *dest_tail = get_deref_tail(dest);
- if (dest_tail->child) {
+ if (dest_tail != dest) {
struct vtn_ssa_value *val = vtn_create_ssa_value(b, dest_tail->type);
- _vtn_local_load_store(b, true, dest, dest_tail, val);
- nir_deref_array *deref = nir_deref_as_array(dest_tail->child);
- vtn_assert(deref->deref.child == NULL);
- if (deref->deref_array_type == nir_deref_array_type_direct)
+ _vtn_local_load_store(b, true, dest_tail, val);
+
+ nir_const_value *const_index = nir_src_as_const_value(dest->arr.index);
+ if (const_index)
val->def = vtn_vector_insert(b, val->def, src->def,
- deref->base_offset);
+ const_index->u32[0]);
else
val->def = vtn_vector_insert_dynamic(b, val->def, src->def,
- deref->indirect.ssa);
- _vtn_local_load_store(b, false, dest, dest_tail, val);
+ dest->arr.index.ssa);
+ _vtn_local_load_store(b, false, dest_tail, val);
} else {
- _vtn_local_load_store(b, false, dest, dest_tail, src);
+ _vtn_local_load_store(b, false, dest_tail, src);
}
}