X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fvc4%2Fvc4_nir_lower_io.c;h=2b4ad9c5ea6dddd08e0caa02a37a04981fe1009c;hb=89918c1e74e454af119e7ae23f3ed66fc26abc4b;hp=1afe52a63f4c805ac9053bfd74dfa90ced66d5b6;hpb=4cff16bc3a84569da05e672c8226931678aa62c0;p=mesa.git diff --git a/src/gallium/drivers/vc4/vc4_nir_lower_io.c b/src/gallium/drivers/vc4/vc4_nir_lower_io.c index 1afe52a63f4..2b4ad9c5ea6 100644 --- a/src/gallium/drivers/vc4/vc4_nir_lower_io.c +++ b/src/gallium/drivers/vc4/vc4_nir_lower_io.c @@ -22,7 +22,7 @@ */ #include "vc4_qir.h" -#include "glsl/nir/nir_builder.h" +#include "compiler/nir/nir_builder.h" #include "util/u_format.h" /** @@ -98,7 +98,7 @@ vc4_nir_get_vattr_channel_vpm(struct vc4_compile *c, &desc->channel[swiz]; nir_ssa_def *temp; - if (swiz > UTIL_FORMAT_SWIZZLE_W) { + if (swiz > PIPE_SWIZZLE_W) { return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz); } else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_FLOAT) { return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz); @@ -179,6 +179,12 @@ vc4_nir_lower_vertex_attr(struct vc4_compile *c, nir_builder *b, /* All TGSI-to-NIR inputs are vec4. */ assert(intr->num_components == 4); + /* We only accept direct outputs and TGSI only ever gives them to us + * with an offset value of 0. + */ + assert(nir_src_as_const_value(intr->src[0]) && + nir_src_as_const_value(intr->src[0])->u32[0] == 0); + /* Generate dword loads for the VPM values (Since these intrinsics may * be reordered, the actual reads will be generated at the top of the * shader by ntq_setup_inputs(). @@ -190,7 +196,8 @@ vc4_nir_lower_vertex_attr(struct vc4_compile *c, nir_builder *b, nir_intrinsic_load_input); intr_comp->num_components = 1; intr_comp->const_index[0] = intr->const_index[0] * 4 + i; - nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL); + intr_comp->src[0] = nir_src_for_ssa(nir_imm_int(b, 0)); + nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL); nir_builder_instr_insert(b, &intr_comp->instr); vpm_reads[i] = &intr_comp->dest.ssa; @@ -226,7 +233,9 @@ vc4_nir_lower_fs_input(struct vc4_compile *c, nir_builder *b, { b->cursor = nir_before_instr(&intr->instr); - if (intr->const_index[0] == VC4_NIR_TLB_COLOR_READ_INPUT) { + if (intr->const_index[0] >= VC4_NIR_TLB_COLOR_READ_INPUT && + intr->const_index[0] < (VC4_NIR_TLB_COLOR_READ_INPUT + + VC4_MAX_SAMPLES)) { /* This doesn't need any lowering. */ return; } @@ -243,6 +252,12 @@ vc4_nir_lower_fs_input(struct vc4_compile *c, nir_builder *b, /* All TGSI-to-NIR inputs are vec4. */ assert(intr->num_components == 4); + /* We only accept direct inputs and TGSI only ever gives them to us + * with an offset value of 0. + */ + assert(nir_src_as_const_value(intr->src[0]) && + nir_src_as_const_value(intr->src[0])->u32[0] == 0); + /* Generate scalar loads equivalent to the original VEC4. */ nir_ssa_def *dests[4]; for (unsigned i = 0; i < intr->num_components; i++) { @@ -250,18 +265,24 @@ vc4_nir_lower_fs_input(struct vc4_compile *c, nir_builder *b, nir_intrinsic_instr_create(c->s, nir_intrinsic_load_input); intr_comp->num_components = 1; intr_comp->const_index[0] = intr->const_index[0] * 4 + i; - nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL); + intr_comp->src[0] = nir_src_for_ssa(nir_imm_int(b, 0)); + + nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL); nir_builder_instr_insert(b, &intr_comp->instr); dests[i] = &intr_comp->dest.ssa; } if (input_var->data.location == VARYING_SLOT_FACE) { - dests[0] = nir_fsub(b, - nir_imm_float(b, 1.0), - nir_fmul(b, - nir_i2f(b, dests[0]), - nir_imm_float(b, 2.0))); + /* TGSI-to-NIR's front face. Convert to using the system + * value boolean instead. + */ + nir_ssa_def *face = + nir_load_system_value(b, + nir_intrinsic_load_front_face, + 0); + dests[0] = nir_bcsel(b, face, nir_imm_float(b, 1.0), + nir_imm_float(b, -1.0)); dests[1] = nir_imm_float(b, 0.0); dests[2] = nir_imm_float(b, 0.0); dests[3] = nir_imm_float(b, 1.0); @@ -309,7 +330,8 @@ vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b, /* Color output is lowered by vc4_nir_lower_blend(). */ if (c->stage == QSTAGE_FRAG && (output_var->data.location == FRAG_RESULT_COLOR || - output_var->data.location == FRAG_RESULT_DATA0)) { + output_var->data.location == FRAG_RESULT_DATA0 || + output_var->data.location == FRAG_RESULT_SAMPLE_MASK)) { intr->const_index[0] *= 4; return; } @@ -317,6 +339,12 @@ vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b, /* All TGSI-to-NIR outputs are VEC4. */ assert(intr->num_components == 4); + /* We only accept direct outputs and TGSI only ever gives them to us + * with an offset value of 0. + */ + assert(nir_src_as_const_value(intr->src[1]) && + nir_src_as_const_value(intr->src[1])->u32[0] == 0); + b->cursor = nir_before_instr(&intr->instr); for (unsigned i = 0; i < intr->num_components; i++) { @@ -328,6 +356,7 @@ vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b, assert(intr->src[0].is_ssa); intr_comp->src[0] = nir_src_for_ssa(nir_channel(b, intr->src[0].ssa, i)); + intr_comp->src[1] = nir_src_for_ssa(nir_imm_int(b, 0)); nir_builder_instr_insert(b, &intr_comp->instr); } @@ -338,8 +367,8 @@ static void vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b, nir_intrinsic_instr *intr) { - /* All TGSI-to-NIR uniform loads are vec4, but we may create dword - * loads in our lowering passes. + /* All TGSI-to-NIR uniform loads are vec4, but we need byte offsets + * in the backend. */ if (intr->num_components == 1) return; @@ -353,27 +382,16 @@ vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b, nir_intrinsic_instr *intr_comp = nir_intrinsic_instr_create(c->s, intr->intrinsic); intr_comp->num_components = 1; - nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL); - - if (intr->intrinsic == nir_intrinsic_load_uniform_indirect) { - /* Convert the variable TGSI register index to a byte - * offset. - */ - intr_comp->src[0] = - nir_src_for_ssa(nir_ishl(b, - intr->src[0].ssa, - nir_imm_int(b, 4))); - - /* Convert the offset to be a byte index, too. */ - intr_comp->const_index[0] = (intr->const_index[0] * 16 + - i * 4); - } else { - /* We want a dword index for non-indirect uniform - * loads. - */ - intr_comp->const_index[0] = (intr->const_index[0] * 4 + - i); - } + nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL); + + /* Convert the uniform offset to bytes. If it happens to be a + * constant, constant-folding will clean up the shift for us. + */ + intr_comp->const_index[0] = (intr->const_index[0] * 16 + i * 4); + + intr_comp->src[0] = + nir_src_for_ssa(nir_ishl(b, intr->src[0].ssa, + nir_imm_int(b, 4))); dests[i] = &intr_comp->dest.ssa; @@ -404,36 +422,25 @@ vc4_nir_lower_io_instr(struct vc4_compile *c, nir_builder *b, break; case nir_intrinsic_load_uniform: - case nir_intrinsic_load_uniform_indirect: - case nir_intrinsic_load_user_clip_plane: vc4_nir_lower_uniform(c, b, intr); break; + case nir_intrinsic_load_user_clip_plane: default: break; } } static bool -vc4_nir_lower_io_block(nir_block *block, void *arg) +vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl) { - struct vc4_compile *c = arg; - nir_function_impl *impl = - nir_cf_node_get_function(&block->cf_node); - nir_builder b; nir_builder_init(&b, impl); - nir_foreach_instr_safe(block, instr) - vc4_nir_lower_io_instr(c, &b, instr); - - return true; -} - -static bool -vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl) -{ - nir_foreach_block(impl, vc4_nir_lower_io_block, c); + nir_foreach_block(block, impl) { + nir_foreach_instr_safe(instr, block) + vc4_nir_lower_io_instr(c, &b, instr); + } nir_metadata_preserve(impl, nir_metadata_block_index | nir_metadata_dominance); @@ -442,10 +449,10 @@ vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl) } void -vc4_nir_lower_io(struct vc4_compile *c) +vc4_nir_lower_io(nir_shader *s, struct vc4_compile *c) { - nir_foreach_overload(c->s, overload) { - if (overload->impl) - vc4_nir_lower_io_impl(c, overload->impl); + nir_foreach_function(function, s) { + if (function->impl) + vc4_nir_lower_io_impl(c, function->impl); } }