X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fvc4%2Fvc4_nir_lower_io.c;h=c82d99812259bf18d69038f88c2d2321350ed4e3;hb=882ca6dfb0f3d17e0f8bc917307d915ab1718069;hp=9882b6b8a35aad4726a27eb374101dac33aefd41;hpb=27f728cdc5d90f63839fbeb1942e6f27339b102a;p=mesa.git diff --git a/src/gallium/drivers/vc4/vc4_nir_lower_io.c b/src/gallium/drivers/vc4/vc4_nir_lower_io.c index 9882b6b8a35..c82d9981225 100644 --- a/src/gallium/drivers/vc4/vc4_nir_lower_io.c +++ b/src/gallium/drivers/vc4/vc4_nir_lower_io.c @@ -22,51 +22,278 @@ */ #include "vc4_qir.h" -#include "tgsi/tgsi_info.h" -#include "glsl/nir/nir_builder.h" +#include "compiler/nir/nir_builder.h" +#include "util/format/u_format.h" /** - * Walks the NIR generated by TGSI-to-NIR to lower its io intrinsics into - * something amenable to the VC4 architecture. + * Walks the NIR generated by TGSI-to-NIR or GLSL-to-NIR to lower its io + * intrinsics into something amenable to the VC4 architecture. * - * Currently, it split inputs and outputs into scalars, and drops any - * non-position outputs in coordinate shaders. + * Currently, it splits VS inputs and uniforms into scalars, drops any + * non-position outputs in coordinate shaders, and fixes up the addressing on + * indirect uniform loads. FS input and VS output scalarization is handled by + * nir_lower_io_to_scalar(). */ static void -vc4_nir_lower_input(struct vc4_compile *c, nir_builder *b, - nir_intrinsic_instr *intr) +replace_intrinsic_with_vec(nir_builder *b, nir_intrinsic_instr *intr, + nir_ssa_def **comps) { - /* All TGSI-to-NIR inputs are vec4. */ - assert(intr->num_components == 4); - nir_builder_insert_before_instr(b, &intr->instr); + /* Batch things back together into a vector. This will get split by + * the later ALU scalarization pass. + */ + nir_ssa_def *vec = nir_vec(b, comps, intr->num_components); - /* Generate scalar loads equivalent to the original VEC4. */ - nir_ssa_def *dests[4]; - for (unsigned i = 0; i < intr->num_components; i++) { + /* Replace the old intrinsic with a reference to our reconstructed + * vector. + */ + nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(vec)); + nir_instr_remove(&intr->instr); +} + +static nir_ssa_def * +vc4_nir_unpack_8i(nir_builder *b, nir_ssa_def *src, unsigned chan) +{ + return nir_ubitfield_extract(b, + src, + nir_imm_int(b, 8 * chan), + nir_imm_int(b, 8)); +} + +/** Returns the 16 bit field as a sign-extended 32-bit value. */ +static nir_ssa_def * +vc4_nir_unpack_16i(nir_builder *b, nir_ssa_def *src, unsigned chan) +{ + return nir_ibitfield_extract(b, + src, + nir_imm_int(b, 16 * chan), + nir_imm_int(b, 16)); +} + +/** Returns the 16 bit field as an unsigned 32 bit value. */ +static nir_ssa_def * +vc4_nir_unpack_16u(nir_builder *b, nir_ssa_def *src, unsigned chan) +{ + if (chan == 0) { + return nir_iand(b, src, nir_imm_int(b, 0xffff)); + } else { + return nir_ushr(b, src, nir_imm_int(b, 16)); + } +} + +static nir_ssa_def * +vc4_nir_unpack_8f(nir_builder *b, nir_ssa_def *src, unsigned chan) +{ + return nir_channel(b, nir_unpack_unorm_4x8(b, src), chan); +} + +static nir_ssa_def * +vc4_nir_get_vattr_channel_vpm(struct vc4_compile *c, + nir_builder *b, + nir_ssa_def **vpm_reads, + uint8_t swiz, + const struct util_format_description *desc) +{ + const struct util_format_channel_description *chan = + &desc->channel[swiz]; + nir_ssa_def *temp; + + if (swiz > PIPE_SWIZZLE_W) { + return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz); + } else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_FLOAT) { + return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz); + } else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_SIGNED) { + if (chan->normalized) { + return nir_fmul(b, + nir_i2f32(b, vpm_reads[swiz]), + nir_imm_float(b, + 1.0 / 0x7fffffff)); + } else { + return nir_i2f32(b, vpm_reads[swiz]); + } + } else if (chan->size == 8 && + (chan->type == UTIL_FORMAT_TYPE_UNSIGNED || + chan->type == UTIL_FORMAT_TYPE_SIGNED)) { + nir_ssa_def *vpm = vpm_reads[0]; + if (chan->type == UTIL_FORMAT_TYPE_SIGNED) { + temp = nir_ixor(b, vpm, nir_imm_int(b, 0x80808080)); + if (chan->normalized) { + return nir_fsub(b, nir_fmul(b, + vc4_nir_unpack_8f(b, temp, swiz), + nir_imm_float(b, 2.0)), + nir_imm_float(b, 1.0)); + } else { + return nir_fadd(b, + nir_i2f32(b, + vc4_nir_unpack_8i(b, temp, + swiz)), + nir_imm_float(b, -128.0)); + } + } else { + if (chan->normalized) { + return vc4_nir_unpack_8f(b, vpm, swiz); + } else { + return nir_i2f32(b, vc4_nir_unpack_8i(b, vpm, swiz)); + } + } + } else if (chan->size == 16 && + (chan->type == UTIL_FORMAT_TYPE_UNSIGNED || + chan->type == UTIL_FORMAT_TYPE_SIGNED)) { + nir_ssa_def *vpm = vpm_reads[swiz / 2]; + + /* Note that UNPACK_16F eats a half float, not ints, so we use + * UNPACK_16_I for all of these. + */ + if (chan->type == UTIL_FORMAT_TYPE_SIGNED) { + temp = nir_i2f32(b, vc4_nir_unpack_16i(b, vpm, swiz & 1)); + if (chan->normalized) { + return nir_fmul(b, temp, + nir_imm_float(b, 1/32768.0f)); + } else { + return temp; + } + } else { + temp = nir_i2f32(b, vc4_nir_unpack_16u(b, vpm, swiz & 1)); + if (chan->normalized) { + return nir_fmul(b, temp, + nir_imm_float(b, 1 / 65535.0)); + } else { + return temp; + } + } + } else { + return NULL; + } +} + +static void +vc4_nir_lower_vertex_attr(struct vc4_compile *c, nir_builder *b, + nir_intrinsic_instr *intr) +{ + b->cursor = nir_before_instr(&intr->instr); + + int attr = nir_intrinsic_base(intr); + enum pipe_format format = c->vs_key->attr_formats[attr]; + uint32_t attr_size = util_format_get_blocksize(format); + + /* We only accept direct outputs and TGSI only ever gives them to us + * with an offset value of 0. + */ + assert(nir_src_as_uint(intr->src[0]) == 0); + + /* Generate dword loads for the VPM values (Since these intrinsics may + * be reordered, the actual reads will be generated at the top of the + * shader by ntq_setup_inputs(). + */ + nir_ssa_def *vpm_reads[4]; + for (int i = 0; i < align(attr_size, 4) / 4; i++) { nir_intrinsic_instr *intr_comp = - nir_intrinsic_instr_create(c->s, nir_intrinsic_load_input); + nir_intrinsic_instr_create(c->s, + nir_intrinsic_load_input); intr_comp->num_components = 1; - intr_comp->const_index[0] = intr->const_index[0] * 4 + i; - nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL); + nir_intrinsic_set_base(intr_comp, nir_intrinsic_base(intr)); + nir_intrinsic_set_component(intr_comp, i); + intr_comp->src[0] = nir_src_for_ssa(nir_imm_int(b, 0)); + nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL); nir_builder_instr_insert(b, &intr_comp->instr); - dests[i] = &intr_comp->dest.ssa; + vpm_reads[i] = &intr_comp->dest.ssa; } - /* Batch things back together into a vec4. This will get split by the - * later ALU scalarization pass. - */ - nir_ssa_def *vec_instr = nir_vec4(b, dests[0], dests[1], - dests[2], dests[3]); + bool format_warned = false; + const struct util_format_description *desc = + util_format_description(format); - /* Replace the old intrinsic with a reference to our reconstructed - * vec4. - */ - nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(vec_instr), - ralloc_parent(b->impl)); - nir_instr_remove(&intr->instr); + nir_ssa_def *dests[4]; + for (int i = 0; i < intr->num_components; i++) { + uint8_t swiz = desc->swizzle[i]; + dests[i] = vc4_nir_get_vattr_channel_vpm(c, b, vpm_reads, swiz, + desc); + + if (!dests[i]) { + if (!format_warned) { + fprintf(stderr, + "vtx element %d unsupported type: %s\n", + attr, util_format_name(format)); + format_warned = true; + } + dests[i] = nir_imm_float(b, 0.0); + } + } + + replace_intrinsic_with_vec(b, intr, dests); +} + +static bool +is_point_sprite(struct vc4_compile *c, nir_variable *var) +{ + if (var->data.location < VARYING_SLOT_VAR0 || + var->data.location > VARYING_SLOT_VAR31) + return false; + + return (c->fs_key->point_sprite_mask & + (1 << (var->data.location - VARYING_SLOT_VAR0))); +} + +static void +vc4_nir_lower_fs_input(struct vc4_compile *c, nir_builder *b, + nir_intrinsic_instr *intr) +{ + b->cursor = nir_after_instr(&intr->instr); + + if (nir_intrinsic_base(intr) >= VC4_NIR_TLB_COLOR_READ_INPUT && + nir_intrinsic_base(intr) < (VC4_NIR_TLB_COLOR_READ_INPUT + + VC4_MAX_SAMPLES)) { + /* This doesn't need any lowering. */ + return; + } + + nir_variable *input_var = NULL; + nir_foreach_variable(var, &c->s->inputs) { + if (var->data.driver_location == nir_intrinsic_base(intr)) { + input_var = var; + break; + } + } + assert(input_var); + + int comp = nir_intrinsic_component(intr); + + /* Lower away point coordinates, and fix up PNTC. */ + if (is_point_sprite(c, input_var) || + input_var->data.location == VARYING_SLOT_PNTC) { + assert(intr->num_components == 1); + + nir_ssa_def *result = &intr->dest.ssa; + + switch (comp) { + case 0: + case 1: + /* If we're not rendering points, we need to set a + * defined value for the input that would come from + * PNTC. + */ + if (!c->fs_key->is_points) + result = nir_imm_float(b, 0.0); + break; + case 2: + result = nir_imm_float(b, 0.0); + break; + case 3: + result = nir_imm_float(b, 1.0); + break; + } + + if (c->fs_key->point_coord_upper_left && comp == 1) + result = nir_fsub(b, nir_imm_float(b, 1.0), result); + + if (result != &intr->dest.ssa) { + nir_ssa_def_rewrite_uses_after(&intr->dest.ssa, + nir_src_for_ssa(result), + result->parent_instr); + } + } } static void @@ -74,41 +301,57 @@ vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b, nir_intrinsic_instr *intr) { nir_variable *output_var = NULL; - foreach_list_typed(nir_variable, var, node, &c->s->outputs) { - if (var->data.driver_location == intr->const_index[0]) { + nir_foreach_variable(var, &c->s->outputs) { + if (var->data.driver_location == nir_intrinsic_base(intr)) { output_var = var; break; } } assert(output_var); - unsigned semantic_name = output_var->data.location; if (c->stage == QSTAGE_COORD && - (semantic_name != TGSI_SEMANTIC_POSITION && - semantic_name != TGSI_SEMANTIC_PSIZE)) { + output_var->data.location != VARYING_SLOT_POS && + output_var->data.location != VARYING_SLOT_PSIZ) { nir_instr_remove(&intr->instr); return; } +} - /* All TGSI-to-NIR outputs are VEC4. */ - assert(intr->num_components == 4); - - nir_builder_insert_before_instr(b, &intr->instr); +static void +vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b, + nir_intrinsic_instr *intr) +{ + b->cursor = nir_before_instr(&intr->instr); + /* Generate scalar loads equivalent to the original vector. */ + nir_ssa_def *dests[4]; for (unsigned i = 0; i < intr->num_components; i++) { nir_intrinsic_instr *intr_comp = - nir_intrinsic_instr_create(c->s, nir_intrinsic_store_output); + nir_intrinsic_instr_create(c->s, intr->intrinsic); intr_comp->num_components = 1; - intr_comp->const_index[0] = intr->const_index[0] * 4 + i; + nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, + intr->dest.ssa.bit_size, NULL); + + /* Convert the uniform offset to bytes. If it happens + * to be a constant, constant-folding will clean up + * the shift for us. + */ + nir_intrinsic_set_base(intr_comp, + nir_intrinsic_base(intr) * 16 + + i * 4); + nir_intrinsic_set_range(intr_comp, + nir_intrinsic_range(intr) * 16 - i * 4); + + intr_comp->src[0] = + nir_src_for_ssa(nir_ishl(b, intr->src[0].ssa, + nir_imm_int(b, 4))); + + dests[i] = &intr_comp->dest.ssa; - assert(intr->src[0].is_ssa); - intr_comp->src[0] = nir_src_for_ssa(nir_swizzle(b, - intr->src[0].ssa, - &i, 1, false)); nir_builder_instr_insert(b, &intr_comp->instr); } - nir_instr_remove(&intr->instr); + replace_intrinsic_with_vec(b, intr, dests); } static void @@ -121,38 +364,36 @@ vc4_nir_lower_io_instr(struct vc4_compile *c, nir_builder *b, switch (intr->intrinsic) { case nir_intrinsic_load_input: - vc4_nir_lower_input(c, b, intr); + if (c->stage == QSTAGE_FRAG) + vc4_nir_lower_fs_input(c, b, intr); + else + vc4_nir_lower_vertex_attr(c, b, intr); break; case nir_intrinsic_store_output: vc4_nir_lower_output(c, b, intr); break; + case nir_intrinsic_load_uniform: + vc4_nir_lower_uniform(c, b, intr); + break; + + case nir_intrinsic_load_user_clip_plane: default: break; } } static bool -vc4_nir_lower_io_block(nir_block *block, void *arg) +vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl) { - struct vc4_compile *c = arg; - nir_function_impl *impl = - nir_cf_node_get_function(&block->cf_node); - nir_builder b; nir_builder_init(&b, impl); - nir_foreach_instr_safe(block, instr) - vc4_nir_lower_io_instr(c, &b, instr); - - return true; -} - -static bool -vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl) -{ - nir_foreach_block(impl, vc4_nir_lower_io_block, c); + nir_foreach_block(block, impl) { + nir_foreach_instr_safe(instr, block) + vc4_nir_lower_io_instr(c, &b, instr); + } nir_metadata_preserve(impl, nir_metadata_block_index | nir_metadata_dominance); @@ -161,10 +402,10 @@ vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl) } void -vc4_nir_lower_io(struct vc4_compile *c) +vc4_nir_lower_io(nir_shader *s, struct vc4_compile *c) { - nir_foreach_overload(c->s, overload) { - if (overload->impl) - vc4_nir_lower_io_impl(c, overload->impl); + nir_foreach_function(function, s) { + if (function->impl) + vc4_nir_lower_io_impl(c, function->impl); } }