nir/algebraic: mark some optimizations with fsat(NaN) as inexact
[mesa.git] / src / compiler / nir / nir_opt_constant_folding.c
index 28a73f86f95565918ddb199c452f3064037777db..9dcb464c72452f1fcb94fea128fba5c7c7d48500 100644 (file)
  */
 
 struct constant_fold_state {
-   void *mem_ctx;
-   nir_function_impl *impl;
-   bool progress;
+   nir_shader *shader;
+   unsigned execution_mode;
+   bool has_load_constant;
+   bool has_indirect_load_const;
 };
 
 static bool
-constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
+constant_fold_alu_instr(struct constant_fold_state *state, nir_alu_instr *instr)
 {
-   nir_const_value src[4];
+   nir_const_value src[NIR_MAX_VEC_COMPONENTS][NIR_MAX_VEC_COMPONENTS];
 
    if (!instr->dest.dest.is_ssa)
       return false;
 
+   /* In the case that any outputs/inputs have unsized types, then we need to
+    * guess the bit-size. In this case, the validator ensures that all
+    * bit-sizes match so we can just take the bit-size from first
+    * output/input with an unsized type. If all the outputs/inputs are sized
+    * then we don't need to guess the bit-size at all because the code we
+    * generate for constant opcodes in this case already knows the sizes of
+    * the types involved and does not need the provided bit-size for anything
+    * (although it still requires to receive a valid bit-size).
+    */
+   unsigned bit_size = 0;
+   if (!nir_alu_type_get_type_size(nir_op_infos[instr->op].output_type))
+      bit_size = instr->dest.dest.ssa.bit_size;
+
    for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
       if (!instr->src[i].src.is_ssa)
          return false;
 
+      if (bit_size == 0 &&
+          !nir_alu_type_get_type_size(nir_op_infos[instr->op].input_types[i]))
+         bit_size = instr->src[i].src.ssa->bit_size;
+
       nir_instr *src_instr = instr->src[i].src.ssa->parent_instr;
 
       if (src_instr->type != nir_instr_type_load_const)
@@ -58,25 +76,33 @@ constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
 
       for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(instr, i);
            j++) {
-         src[i].u[j] = load_const->value.u[instr->src[i].swizzle[j]];
+         src[i][j] = load_const->value[instr->src[i].swizzle[j]];
       }
 
       /* We shouldn't have any source modifiers in the optimization loop. */
       assert(!instr->src[i].abs && !instr->src[i].negate);
    }
 
+   if (bit_size == 0)
+      bit_size = 32;
+
    /* We shouldn't have any saturate modifiers in the optimization loop. */
    assert(!instr->dest.saturate);
 
-   nir_const_value dest =
-      nir_eval_const_opcode(instr->op, instr->dest.dest.ssa.num_components,
-                            src);
+   nir_const_value dest[NIR_MAX_VEC_COMPONENTS];
+   nir_const_value *srcs[NIR_MAX_VEC_COMPONENTS];
+   memset(dest, 0, sizeof(dest));
+   for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; ++i)
+      srcs[i] = src[i];
+   nir_eval_const_opcode(instr->op, dest, instr->dest.dest.ssa.num_components,
+                         bit_size, srcs, state->execution_mode);
 
    nir_load_const_instr *new_instr =
-      nir_load_const_instr_create(mem_ctx,
-                                  instr->dest.dest.ssa.num_components);
+      nir_load_const_instr_create(state->shader,
+                                  instr->dest.dest.ssa.num_components,
+                                  instr->dest.dest.ssa.bit_size);
 
-   new_instr->value = dest;
+   memcpy(new_instr->value, dest, sizeof(*new_instr->value) * new_instr->def.num_components);
 
    nir_instr_insert_before(&instr->instr, &new_instr->instr);
 
@@ -90,75 +116,88 @@ constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
 }
 
 static bool
-constant_fold_deref(nir_instr *instr, nir_deref_var *deref)
+constant_fold_intrinsic_instr(struct constant_fold_state *state, nir_intrinsic_instr *instr)
 {
    bool progress = false;
 
-   for (nir_deref *tail = deref->deref.child; tail; tail = tail->child) {
-      if (tail->deref_type != nir_deref_type_array)
-         continue;
-
-      nir_deref_array *arr = nir_deref_as_array(tail);
-
-      if (arr->deref_array_type == nir_deref_array_type_indirect &&
-          arr->indirect.is_ssa &&
-          arr->indirect.ssa->parent_instr->type == nir_instr_type_load_const) {
-         nir_load_const_instr *indirect =
-            nir_instr_as_load_const(arr->indirect.ssa->parent_instr);
-
-         arr->base_offset += indirect->value.u[0];
-
-         /* Clear out the source */
-         nir_instr_rewrite_src(instr, &arr->indirect, nir_src_for_ssa(NULL));
-
-         arr->deref_array_type = nir_deref_array_type_direct;
-
+   if ((instr->intrinsic == nir_intrinsic_demote_if ||
+        instr->intrinsic == nir_intrinsic_discard_if) &&
+       nir_src_is_const(instr->src[0])) {
+      if (nir_src_as_bool(instr->src[0])) {
+         nir_intrinsic_op op = instr->intrinsic == nir_intrinsic_discard_if ?
+                               nir_intrinsic_discard :
+                               nir_intrinsic_demote;
+         nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(state->shader, op);
+         nir_instr_insert_before(&instr->instr, &new_instr->instr);
+         nir_instr_remove(&instr->instr);
+         progress = true;
+      } else {
+         /* We're not discarding, just delete the instruction */
+         nir_instr_remove(&instr->instr);
          progress = true;
       }
-   }
+   } else if (instr->intrinsic == nir_intrinsic_load_constant) {
+      state->has_load_constant = true;
 
-   return progress;
-}
+      if (!nir_src_is_const(instr->src[0])) {
+         state->has_indirect_load_const = true;
+         return progress;
+      }
 
-static bool
-constant_fold_intrinsic_instr(nir_intrinsic_instr *instr)
-{
-   bool progress = false;
+      unsigned offset = nir_src_as_uint(instr->src[0]);
+      unsigned base = nir_intrinsic_base(instr);
+      unsigned range = nir_intrinsic_range(instr);
+      assert(base + range <= state->shader->constant_data_size);
+
+      nir_instr *new_instr = NULL;
+      if (offset >= range) {
+         nir_ssa_undef_instr *undef =
+            nir_ssa_undef_instr_create(state->shader,
+                                       instr->num_components,
+                                       instr->dest.ssa.bit_size);
+
+         nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&undef->def));
+         new_instr = &undef->instr;
+      } else {
+         nir_load_const_instr *load_const =
+            nir_load_const_instr_create(state->shader,
+                                        instr->num_components,
+                                        instr->dest.ssa.bit_size);
+
+         uint8_t *data = (uint8_t*)state->shader->constant_data + base;
+         for (unsigned i = 0; i < instr->num_components; i++) {
+            unsigned bytes = instr->dest.ssa.bit_size / 8;
+            bytes = MIN2(bytes, range - offset);
+
+            memcpy(&load_const->value[i].u64, data + offset, bytes);
+            offset += bytes;
+         }
+
+         nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&load_const->def));
+         new_instr = &load_const->instr;
+      }
 
-   unsigned num_vars = nir_intrinsic_infos[instr->intrinsic].num_variables;
-   for (unsigned i = 0; i < num_vars; i++) {
-      progress |= constant_fold_deref(&instr->instr, instr->variables[i]);
+      nir_instr_insert_before(&instr->instr, new_instr);
+      nir_instr_remove(&instr->instr);
+      progress = true;
    }
 
    return progress;
 }
 
 static bool
-constant_fold_tex_instr(nir_tex_instr *instr)
+constant_fold_block(struct constant_fold_state *state, nir_block *block)
 {
-   if (instr->sampler)
-      return constant_fold_deref(&instr->instr, instr->sampler);
-   else
-      return false;
-}
-
-static bool
-constant_fold_block(nir_block *block, void *void_state)
-{
-   struct constant_fold_state *state = void_state;
+   bool progress = false;
 
-   nir_foreach_instr_safe(block, instr) {
+   nir_foreach_instr_safe(instr, block) {
       switch (instr->type) {
       case nir_instr_type_alu:
-         state->progress |= constant_fold_alu_instr(nir_instr_as_alu(instr),
-                                                    state->mem_ctx);
+         progress |= constant_fold_alu_instr(state, nir_instr_as_alu(instr));
          break;
       case nir_instr_type_intrinsic:
-         state->progress |=
-            constant_fold_intrinsic_instr(nir_instr_as_intrinsic(instr));
-         break;
-      case nir_instr_type_tex:
-         state->progress |= constant_fold_tex_instr(nir_instr_as_tex(instr));
+         progress |=
+            constant_fold_intrinsic_instr(state, nir_instr_as_intrinsic(instr));
          break;
       default:
          /* Don't know how to constant fold */
@@ -166,35 +205,51 @@ constant_fold_block(nir_block *block, void *void_state)
       }
    }
 
-   return true;
+   return progress;
 }
 
 static bool
-nir_opt_constant_folding_impl(nir_function_impl *impl)
+nir_opt_constant_folding_impl(struct constant_fold_state *state, nir_function_impl *impl)
 {
-   struct constant_fold_state state;
-
-   state.mem_ctx = ralloc_parent(impl);
-   state.impl = impl;
-   state.progress = false;
+   bool progress = false;
 
-   nir_foreach_block(impl, constant_fold_block, &state);
+   nir_foreach_block(block, impl) {
+      progress |= constant_fold_block(state, block);
+   }
 
-   if (state.progress)
+   if (progress) {
       nir_metadata_preserve(impl, nir_metadata_block_index |
                                   nir_metadata_dominance);
+   } else {
+      nir_metadata_preserve(impl, nir_metadata_all);
+   }
 
-   return state.progress;
+   return progress;
 }
 
 bool
 nir_opt_constant_folding(nir_shader *shader)
 {
    bool progress = false;
+   struct constant_fold_state state;
+   state.shader = shader;
+   state.execution_mode = shader->info.float_controls_execution_mode;
+   state.has_load_constant = false;
+   state.has_indirect_load_const = false;
 
-   nir_foreach_function(shader, function) {
+   nir_foreach_function(function, shader) {
       if (function->impl)
-         progress |= nir_opt_constant_folding_impl(function->impl);
+         progress |= nir_opt_constant_folding_impl(&state, function->impl);
+   }
+
+   /* This doesn't free the constant data if there are no constant loads because
+    * the data might still be used but the loads have been lowered to load_ubo
+    */
+   if (state.has_load_constant && !state.has_indirect_load_const &&
+       shader->constant_data_size) {
+      ralloc_free(shader->constant_data);
+      shader->constant_data = NULL;
+      shader->constant_data_size = 0;
    }
 
    return progress;