nir: Add explicit signs to image min/max intrinsics
[mesa.git] / src / amd / vulkan / radv_shader_info.c
index 448babb3ca026fa16b580c86fbc5c2314a10854b..4742036a1dc139353ce1ab210fda9e3854c92da1 100644 (file)
@@ -23,6 +23,8 @@
 #include "radv_private.h"
 #include "radv_shader.h"
 #include "nir/nir.h"
+#include "nir/nir_deref.h"
+#include "nir/nir_xfb_info.h"
 
 static void mark_sampler_desc(const nir_variable *var,
                              struct radv_shader_info *info)
@@ -47,57 +49,56 @@ static void mark_tess_output(struct radv_shader_info *info,
                info->tcs.outputs_written |= (mask << param);
 }
 
-static void get_deref_offset(nir_deref_var *deref, unsigned *const_out)
+static void
+get_deref_offset(nir_deref_instr *instr,
+                 unsigned *const_out)
 {
-       nir_deref *tail = &deref->deref;
-       unsigned const_offset = 0;
-
-       if (deref->var->data.compact) {
-               assert(tail->child->deref_type == nir_deref_type_array);
-               assert(glsl_type_is_scalar(glsl_without_array(deref->var->type)));
+        nir_variable *var = nir_deref_instr_get_variable(instr);
+        nir_deref_path path;
+        unsigned idx_lvl = 1;
 
-               nir_deref_array *deref_array = nir_deref_as_array(tail->child);
-               /* We always lower indirect dereferences for "compact" array vars. */
-               assert(deref_array->deref_array_type == nir_deref_array_type_direct);
-
-               *const_out = deref_array->base_offset;
+       if (var->data.compact) {
+               assert(instr->deref_type == nir_deref_type_array);
+               *const_out = nir_src_as_uint(instr->arr.index);
                return;
        }
 
-       while (tail->child != NULL) {
-               const struct glsl_type *parent_type = tail->type;
-               tail = tail->child;
+       nir_deref_path_init(&path, instr, NULL);
 
-               if (tail->deref_type == nir_deref_type_array) {
-                       nir_deref_array *deref_array = nir_deref_as_array(tail);
-                       unsigned size = glsl_count_attribute_slots(tail->type, false);
+       uint32_t const_offset = 0;
 
-                       const_offset += size * deref_array->base_offset;
-               } else if (tail->deref_type == nir_deref_type_struct) {
-                       nir_deref_struct *deref_struct = nir_deref_as_struct(tail);
+       for (; path.path[idx_lvl]; ++idx_lvl) {
+               const struct glsl_type *parent_type = path.path[idx_lvl - 1]->type;
+               if (path.path[idx_lvl]->deref_type == nir_deref_type_struct) {
+                       unsigned index = path.path[idx_lvl]->strct.index;
 
-                       for (unsigned i = 0; i < deref_struct->index; i++) {
+                       for (unsigned i = 0; i < index; i++) {
                                const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
                                const_offset += glsl_count_attribute_slots(ft, false);
                        }
+               } else if(path.path[idx_lvl]->deref_type == nir_deref_type_array) {
+                       unsigned size = glsl_count_attribute_slots(path.path[idx_lvl]->type, false);
+                       if (nir_src_is_const(path.path[idx_lvl]->arr.index))
+                               const_offset += nir_src_as_uint(path.path[idx_lvl]->arr.index) * size;
                } else
-                       unreachable("unsupported deref type");
+                       unreachable("Uhandled deref type in get_deref_instr_offset");
        }
 
        *const_out = const_offset;
+
+       nir_deref_path_finish(&path);
 }
 
 static void
-gather_intrinsic_load_var_info(const nir_shader *nir,
+gather_intrinsic_load_deref_info(const nir_shader *nir,
                               const nir_intrinsic_instr *instr,
                               struct radv_shader_info *info)
 {
        switch (nir->info.stage) {
        case MESA_SHADER_VERTEX: {
-               nir_deref_var *dvar = instr->variables[0];
-               nir_variable *var = dvar->var;
+               nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
 
-               if (var->data.mode == nir_var_shader_in) {
+               if (var && var->data.mode == nir_var_shader_in) {
                        unsigned idx = var->data.location;
                        uint8_t mask = nir_ssa_def_components_read(&instr->dest.ssa);
 
@@ -111,40 +112,67 @@ gather_intrinsic_load_var_info(const nir_shader *nir,
        }
 }
 
+static uint32_t
+widen_writemask(uint32_t wrmask)
+{
+       uint32_t new_wrmask = 0;
+       for(unsigned i = 0; i < 4; i++)
+               new_wrmask |= (wrmask & (1 << i) ? 0x3 : 0x0) << (i * 2);
+       return new_wrmask;
+}
+
+static void
+set_output_usage_mask(const nir_shader *nir, const nir_intrinsic_instr *instr,
+                     uint8_t *output_usage_mask)
+{
+       nir_deref_instr *deref_instr =
+               nir_instr_as_deref(instr->src[0].ssa->parent_instr);
+       nir_variable *var = nir_deref_instr_get_variable(deref_instr);
+       unsigned attrib_count = glsl_count_attribute_slots(deref_instr->type, false);
+       unsigned idx = var->data.location;
+       unsigned comp = var->data.location_frac;
+       unsigned const_offset = 0;
+
+       get_deref_offset(deref_instr, &const_offset);
+
+       if (var->data.compact) {
+               assert(!glsl_type_is_64bit(deref_instr->type));
+               const_offset += comp;
+               output_usage_mask[idx + const_offset / 4] |= 1 << (const_offset % 4);
+               return;
+       }
+
+       uint32_t wrmask = nir_intrinsic_write_mask(instr);
+       if (glsl_type_is_64bit(deref_instr->type))
+               wrmask = widen_writemask(wrmask);
+
+       for (unsigned i = 0; i < attrib_count; i++)
+               output_usage_mask[idx + i + const_offset] |=
+                       ((wrmask >> (i * 4)) & 0xf) << comp;
+}
+
 static void
-gather_intrinsic_store_var_info(const nir_shader *nir,
+gather_intrinsic_store_deref_info(const nir_shader *nir,
                                const nir_intrinsic_instr *instr,
                                struct radv_shader_info *info)
 {
-       nir_deref_var *dvar = instr->variables[0];
-       nir_variable *var = dvar->var;
+       nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
 
-       if (var->data.mode == nir_var_shader_out) {
-               unsigned attrib_count = glsl_count_attribute_slots(var->type, false);
+       if (var && var->data.mode == nir_var_shader_out) {
                unsigned idx = var->data.location;
-               unsigned comp = var->data.location_frac;
-               unsigned const_offset = 0;
-
-               get_deref_offset(dvar, &const_offset);
 
                switch (nir->info.stage) {
                case MESA_SHADER_VERTEX:
-                       for (unsigned i = 0; i < attrib_count; i++) {
-                               info->vs.output_usage_mask[idx + i + const_offset] |=
-                                       instr->const_index[0] << comp;
-                       }
+                       set_output_usage_mask(nir, instr,
+                                             info->vs.output_usage_mask);
                        break;
                case MESA_SHADER_GEOMETRY:
-                       for (unsigned i = 0; i < attrib_count; i++) {
-                               info->gs.output_usage_mask[idx + i + const_offset] |=
-                                       instr->const_index[0] << comp;
-                       }
+                       set_output_usage_mask(nir, instr,
+                                             info->gs.output_usage_mask);
                        break;
                case MESA_SHADER_TESS_EVAL:
-                       for (unsigned i = 0; i < attrib_count; i++) {
-                               info->tes.output_usage_mask[idx + i + const_offset] |=
-                                       instr->const_index[0] << comp;
-                       }
+                       set_output_usage_mask(nir, instr,
+                                             info->tes.output_usage_mask);
                        break;
                case MESA_SHADER_TESS_CTRL: {
                        unsigned param = shader_io_get_unique_index(idx);
@@ -154,13 +182,9 @@ gather_intrinsic_store_var_info(const nir_shader *nir,
                                type = glsl_get_array_element(var->type);
 
                        unsigned slots =
-                               var->data.compact ? DIV_ROUND_UP(glsl_get_length(type), 4)
+                               var->data.compact ? DIV_ROUND_UP(var->data.location_frac + glsl_get_length(type), 4)
                                                  : glsl_count_attribute_slots(type, false);
 
-                       if (idx == VARYING_SLOT_CLIP_DIST0)
-                               slots = (nir->info.clip_distance_array_size +
-                                        nir->info.cull_distance_array_size > 4) ? 2 : 1;
-
                        mark_tess_output(info, var->data.patch, param, slots);
                        break;
                }
@@ -170,12 +194,37 @@ gather_intrinsic_store_var_info(const nir_shader *nir,
        }
 }
 
+static void
+gather_push_constant_info(const nir_shader *nir,
+                         const nir_intrinsic_instr *instr,
+                         struct radv_shader_info *info)
+{
+       int base = nir_intrinsic_base(instr);
+
+       if (!nir_src_is_const(instr->src[0])) {
+               info->has_indirect_push_constants = true;
+       } else {
+               uint32_t min = base + nir_src_as_uint(instr->src[0]);
+               uint32_t max = min + instr->num_components * 4;
+
+               info->max_push_constant_used =
+                       MAX2(max, info->max_push_constant_used);
+               info->min_push_constant_used =
+                       MIN2(min, info->min_push_constant_used);
+       }
+
+       if (instr->dest.ssa.bit_size != 32)
+               info->has_only_32bit_push_constants = false;
+
+       info->loads_push_constants = true;
+}
+
 static void
 gather_intrinsic_info(const nir_shader *nir, const nir_intrinsic_instr *instr,
                      struct radv_shader_info *info)
 {
        switch (instr->intrinsic) {
-       case nir_intrinsic_interp_var_at_sample:
+       case nir_intrinsic_load_barycentric_at_sample:
                info->ps.needs_sample_positions = true;
                break;
        case nir_intrinsic_load_draw_id:
@@ -216,6 +265,10 @@ gather_intrinsic_info(const nir_shader *nir, const nir_intrinsic_instr *instr,
                if (nir->info.stage == MESA_SHADER_FRAGMENT)
                        info->ps.layer_input = true;
                break;
+       case nir_intrinsic_load_layer_id:
+               if (nir->info.stage == MESA_SHADER_FRAGMENT)
+                       info->ps.layer_input = true;
+               break;
        case nir_intrinsic_load_invocation_id:
                info->uses_invocation_id = true;
                break;
@@ -223,51 +276,18 @@ gather_intrinsic_info(const nir_shader *nir, const nir_intrinsic_instr *instr,
                info->uses_prim_id = true;
                break;
        case nir_intrinsic_load_push_constant:
-               info->loads_push_constants = true;
+               gather_push_constant_info(nir, instr, info);
                break;
        case nir_intrinsic_vulkan_resource_index:
                info->desc_set_used_mask |= (1 << nir_intrinsic_desc_set(instr));
                break;
-       case nir_intrinsic_image_var_load:
-       case nir_intrinsic_image_var_store:
-       case nir_intrinsic_image_var_atomic_add:
-       case nir_intrinsic_image_var_atomic_min:
-       case nir_intrinsic_image_var_atomic_max:
-       case nir_intrinsic_image_var_atomic_and:
-       case nir_intrinsic_image_var_atomic_or:
-       case nir_intrinsic_image_var_atomic_xor:
-       case nir_intrinsic_image_var_atomic_exchange:
-       case nir_intrinsic_image_var_atomic_comp_swap:
-       case nir_intrinsic_image_var_size: {
-               const struct glsl_type *type = glsl_without_array(instr->variables[0]->var->type);
-
-               enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
-               if (dim == GLSL_SAMPLER_DIM_SUBPASS ||
-                   dim == GLSL_SAMPLER_DIM_SUBPASS_MS) {
-                       info->ps.layer_input = true;
-                       info->ps.uses_input_attachments = true;
-               }
-               mark_sampler_desc(instr->variables[0]->var, info);
-
-               if (nir_intrinsic_image_var_store ||
-                   nir_intrinsic_image_var_atomic_add ||
-                   nir_intrinsic_image_var_atomic_min ||
-                   nir_intrinsic_image_var_atomic_max ||
-                   nir_intrinsic_image_var_atomic_and ||
-                   nir_intrinsic_image_var_atomic_or ||
-                   nir_intrinsic_image_var_atomic_xor ||
-                   nir_intrinsic_image_var_atomic_exchange ||
-                   nir_intrinsic_image_var_atomic_comp_swap) {
-                       if (nir->info.stage == MESA_SHADER_FRAGMENT)
-                               info->ps.writes_memory = true;
-               }
-               break;
-       }
        case nir_intrinsic_image_deref_load:
        case nir_intrinsic_image_deref_store:
        case nir_intrinsic_image_deref_atomic_add:
-       case nir_intrinsic_image_deref_atomic_min:
-       case nir_intrinsic_image_deref_atomic_max:
+       case nir_intrinsic_image_deref_atomic_imin:
+       case nir_intrinsic_image_deref_atomic_umin:
+       case nir_intrinsic_image_deref_atomic_imax:
+       case nir_intrinsic_image_deref_atomic_umax:
        case nir_intrinsic_image_deref_atomic_and:
        case nir_intrinsic_image_deref_atomic_or:
        case nir_intrinsic_image_deref_atomic_xor:
@@ -275,25 +295,19 @@ gather_intrinsic_info(const nir_shader *nir, const nir_intrinsic_instr *instr,
        case nir_intrinsic_image_deref_atomic_comp_swap:
        case nir_intrinsic_image_deref_size: {
                nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
-               const struct glsl_type *type = glsl_without_array(var->type);
-
-               enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
-               if (dim == GLSL_SAMPLER_DIM_SUBPASS ||
-                   dim == GLSL_SAMPLER_DIM_SUBPASS_MS) {
-                       info->ps.layer_input = true;
-                       info->ps.uses_input_attachments = true;
-               }
                mark_sampler_desc(var, info);
 
-               if (nir_intrinsic_image_deref_store ||
-                   nir_intrinsic_image_deref_atomic_add ||
-                   nir_intrinsic_image_deref_atomic_min ||
-                   nir_intrinsic_image_deref_atomic_max ||
-                   nir_intrinsic_image_deref_atomic_and ||
-                   nir_intrinsic_image_deref_atomic_or ||
-                   nir_intrinsic_image_deref_atomic_xor ||
-                   nir_intrinsic_image_deref_atomic_exchange ||
-                   nir_intrinsic_image_deref_atomic_comp_swap) {
+               if (instr->intrinsic == nir_intrinsic_image_deref_store ||
+                   instr->intrinsic == nir_intrinsic_image_deref_atomic_add ||
+                   instr->intrinsic == nir_intrinsic_image_deref_atomic_imin ||
+                   instr->intrinsic == nir_intrinsic_image_deref_atomic_umin ||
+                   instr->intrinsic == nir_intrinsic_image_deref_atomic_imax ||
+                   instr->intrinsic == nir_intrinsic_image_deref_atomic_umax ||
+                   instr->intrinsic == nir_intrinsic_image_deref_atomic_and ||
+                   instr->intrinsic == nir_intrinsic_image_deref_atomic_or ||
+                   instr->intrinsic == nir_intrinsic_image_deref_atomic_xor ||
+                   instr->intrinsic == nir_intrinsic_image_deref_atomic_exchange ||
+                   instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) {
                        if (nir->info.stage == MESA_SHADER_FRAGMENT)
                                info->ps.writes_memory = true;
                }
@@ -313,11 +327,11 @@ gather_intrinsic_info(const nir_shader *nir, const nir_intrinsic_instr *instr,
                if (nir->info.stage == MESA_SHADER_FRAGMENT)
                        info->ps.writes_memory = true;
                break;
-       case nir_intrinsic_load_var:
-               gather_intrinsic_load_var_info(nir, instr, info);
+       case nir_intrinsic_load_deref:
+               gather_intrinsic_load_deref_info(nir, instr, info);
                break;
-       case nir_intrinsic_store_var:
-               gather_intrinsic_store_var_info(nir, instr, info);
+       case nir_intrinsic_store_deref:
+               gather_intrinsic_store_deref_info(nir, instr, info);
                break;
        default:
                break;
@@ -340,11 +354,6 @@ gather_tex_info(const nir_shader *nir, const nir_tex_instr *instr,
                        break;
                }
        }
-
-       if (instr->sampler)
-               mark_sampler_desc(instr->sampler->var, info);
-       if (instr->texture)
-               mark_sampler_desc(instr->texture->var, info);
 }
 
 static void
@@ -367,18 +376,28 @@ gather_info_block(const nir_shader *nir, const nir_block *block,
 
 static void
 gather_info_input_decl_vs(const nir_shader *nir, const nir_variable *var,
-                         struct radv_shader_info *info)
+                         struct radv_shader_info *info,
+                         const struct radv_nir_compiler_options *options)
 {
+       unsigned attrib_count = glsl_count_attribute_slots(var->type, true);
        int idx = var->data.location;
 
        if (idx >= VERT_ATTRIB_GENERIC0 && idx <= VERT_ATTRIB_GENERIC15)
                info->vs.has_vertex_buffers = true;
+
+       for (unsigned i = 0; i < attrib_count; ++i) {
+               unsigned attrib_index = var->data.location + i - VERT_ATTRIB_GENERIC0;
+
+               if (options->key.vs.instance_rate_inputs & (1u << attrib_index))
+                       info->vs.needs_instance_id = true;
+       }
 }
 
 static void
 gather_info_input_decl_ps(const nir_shader *nir, const nir_variable *var,
                          struct radv_shader_info *info)
 {
+       unsigned attrib_count = glsl_count_attribute_slots(var->type, false);
        const struct glsl_type *type = glsl_without_array(var->type);
        int idx = var->data.location;
 
@@ -392,6 +411,10 @@ gather_info_input_decl_ps(const nir_shader *nir, const nir_variable *var,
        case VARYING_SLOT_LAYER:
                info->ps.layer_input = true;
                break;
+       case VARYING_SLOT_CLIP_DIST0:
+       case VARYING_SLOT_CLIP_DIST1:
+               info->ps.num_input_clips_culls += attrib_count;
+               break;
        default:
                break;
        }
@@ -404,11 +427,12 @@ gather_info_input_decl_ps(const nir_shader *nir, const nir_variable *var,
 
 static void
 gather_info_input_decl(const nir_shader *nir, const nir_variable *var,
-                      struct radv_shader_info *info)
+                      struct radv_shader_info *info,
+                      const struct radv_nir_compiler_options *options)
 {
        switch (nir->info.stage) {
        case MESA_SHADER_VERTEX:
-               gather_info_input_decl_vs(nir, var, info);
+               gather_info_input_decl_vs(nir, var, info, options);
                break;
        case MESA_SHADER_FRAGMENT:
                gather_info_input_decl_ps(nir, var, info);
@@ -425,8 +449,8 @@ gather_info_output_decl_ls(const nir_shader *nir, const nir_variable *var,
        int idx = var->data.location;
        unsigned param = shader_io_get_unique_index(idx);
        int num_slots = glsl_count_attribute_slots(var->type, false);
-       if (idx == VARYING_SLOT_CLIP_DIST0)
-               num_slots = (nir->info.clip_distance_array_size + nir->info.cull_distance_array_size > 4) ? 2 : 1;
+       if (var->data.compact)
+               num_slots = DIV_ROUND_UP(var->data.location_frac + glsl_get_length(var->type), 4);
        mark_ls_output(info, param, num_slots);
 }
 
@@ -451,6 +475,21 @@ gather_info_output_decl_ps(const nir_shader *nir, const nir_variable *var,
        }
 }
 
+static void
+gather_info_output_decl_gs(const nir_shader *nir, const nir_variable *var,
+                          struct radv_shader_info *info)
+{
+       unsigned num_components = glsl_get_component_slots(var->type);
+       unsigned stream = var->data.stream;
+       unsigned idx = var->data.location;
+
+       assert(stream < 4);
+
+       info->gs.max_stream = MAX2(info->gs.max_stream, stream);
+       info->gs.num_stream_output_components[stream] += num_components;
+       info->gs.output_streams[idx] = stream;
+}
+
 static void
 gather_info_output_decl(const nir_shader *nir, const nir_variable *var,
                        struct radv_shader_info *info,
@@ -461,14 +500,58 @@ gather_info_output_decl(const nir_shader *nir, const nir_variable *var,
                gather_info_output_decl_ps(nir, var, info);
                break;
        case MESA_SHADER_VERTEX:
-               if (options->key.vs.as_ls)
+               if (options->key.vs_common_out.as_ls)
                        gather_info_output_decl_ls(nir, var, info);
                break;
+       case MESA_SHADER_GEOMETRY:
+               gather_info_output_decl_gs(nir, var, info);
+               break;
        default:
                break;
        }
 }
 
+static void
+gather_xfb_info(const nir_shader *nir, struct radv_shader_info *info)
+{
+       nir_xfb_info *xfb = nir_gather_xfb_info(nir, NULL);
+       struct radv_streamout_info *so = &info->so;
+
+       if (!xfb)
+               return;
+
+       assert(xfb->output_count < MAX_SO_OUTPUTS);
+       so->num_outputs = xfb->output_count;
+
+       for (unsigned i = 0; i < xfb->output_count; i++) {
+               struct radv_stream_output *output = &so->outputs[i];
+
+               output->buffer = xfb->outputs[i].buffer;
+               output->stream = xfb->buffer_to_stream[xfb->outputs[i].buffer];
+               output->offset = xfb->outputs[i].offset;
+               output->location = xfb->outputs[i].location;
+               output->component_mask = xfb->outputs[i].component_mask;
+
+               so->enabled_stream_buffers_mask |=
+                       (1 << output->buffer) << (output->stream * 4);
+
+       }
+
+       for (unsigned i = 0; i < NIR_MAX_XFB_BUFFERS; i++) {
+               so->strides[i] = xfb->buffers[i].stride / 4;
+       }
+
+       ralloc_free(xfb);
+}
+
+void
+radv_nir_shader_info_init(struct radv_shader_info *info)
+{
+       /* Assume that shaders only have 32-bit push constants by default. */
+       info->min_push_constant_used = UINT8_MAX;
+       info->has_only_32bit_push_constants = true;
+}
+
 void
 radv_nir_shader_info_pass(const struct nir_shader *nir,
                          const struct radv_nir_compiler_options *options,
@@ -477,11 +560,14 @@ radv_nir_shader_info_pass(const struct nir_shader *nir,
        struct nir_function *func =
                (struct nir_function *)exec_list_get_head_const(&nir->functions);
 
-       if (options->layout && options->layout->dynamic_offset_count)
+       if (options->layout && options->layout->dynamic_offset_count &&
+           (options->layout->dynamic_shader_stages & mesa_to_vk_shader_stage(nir->info.stage))) {
                info->loads_push_constants = true;
+               info->loads_dynamic_offsets = true;
+       }
 
        nir_foreach_variable(variable, &nir->inputs)
-               gather_info_input_decl(nir, variable, info);
+               gather_info_input_decl(nir, variable, info, options);
 
        nir_foreach_block(block, func->impl) {
                gather_info_block(nir, block, info);
@@ -489,4 +575,26 @@ radv_nir_shader_info_pass(const struct nir_shader *nir,
 
        nir_foreach_variable(variable, &nir->outputs)
                gather_info_output_decl(nir, variable, info, options);
+
+       if (nir->info.stage == MESA_SHADER_VERTEX ||
+           nir->info.stage == MESA_SHADER_TESS_EVAL ||
+           nir->info.stage == MESA_SHADER_GEOMETRY)
+               gather_xfb_info(nir, info);
+
+       /* Make sure to export the LayerID if the fragment shader needs it. */
+       if (options->key.vs_common_out.export_layer_id) {
+               switch (nir->info.stage) {
+               case MESA_SHADER_VERTEX:
+                       info->vs.output_usage_mask[VARYING_SLOT_LAYER] |= 0x1;
+                       break;
+               case MESA_SHADER_TESS_EVAL:
+                       info->tes.output_usage_mask[VARYING_SLOT_LAYER] |= 0x1;
+                       break;
+               case MESA_SHADER_GEOMETRY:
+                       info->gs.output_usage_mask[VARYING_SLOT_LAYER] |= 0x1;
+                       break;
+               default:
+                       break;
+               }
+       }
 }