glsl/builtin_types.cpp \
glsl/builtin_variables.cpp \
glsl/generate_ir.cpp \
+ glsl/gl_nir_lower_atomics.c \
+ glsl/gl_nir_lower_samplers.c \
+ glsl/gl_nir_lower_samplers_as_deref.c \
+ glsl/gl_nir.h \
glsl/glsl_parser_extras.cpp \
glsl/glsl_parser_extras.h \
glsl/glsl_symbol_table.cpp \
nir/nir_lower_64bit_packing.c \
nir/nir_lower_alpha_test.c \
nir/nir_lower_alu_to_scalar.c \
- nir/nir_lower_atomics.c \
nir/nir_lower_atomics_to_ssbo.c \
nir/nir_lower_bitmap.c \
nir/nir_lower_clamp_color_outputs.c \
nir/nir_lower_phis_to_scalar.c \
nir/nir_lower_regs_to_ssa.c \
nir/nir_lower_returns.c \
- nir/nir_lower_samplers.c \
- nir/nir_lower_samplers_as_deref.c \
nir/nir_lower_subgroups.c \
nir/nir_lower_system_values.c \
nir/nir_lower_tex.c \
--- /dev/null
+/*
+ * Copyright © 2018 Timothy Arceri
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef GL_NIR_H
+#define GL_NIR_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct nir_shader;
+struct gl_shader_program;
+
+bool gl_nir_lower_atomics(nir_shader *shader,
+ const struct gl_shader_program *shader_program,
+ bool use_binding_as_idx);
+
+bool gl_nir_lower_samplers(nir_shader *shader,
+ const struct gl_shader_program *shader_program);
+bool gl_nir_lower_samplers_as_deref(nir_shader *shader,
+ const struct gl_shader_program *shader_program);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GL_NIR_H */
--- /dev/null
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Connor Abbott (cwabbott0@gmail.com)
+ *
+ */
+
+#include "compiler/nir/nir.h"
+#include "gl_nir.h"
+#include "ir_uniform.h"
+
+#include "main/config.h"
+#include "main/mtypes.h"
+#include <assert.h>
+
+/*
+ * replace atomic counter intrinsics that use a variable with intrinsics
+ * that directly store the buffer index and byte offset
+ */
+
+static bool
+lower_instr(nir_intrinsic_instr *instr,
+ const struct gl_shader_program *shader_program,
+ nir_shader *shader, bool use_binding_as_idx)
+{
+ nir_intrinsic_op op;
+ switch (instr->intrinsic) {
+ case nir_intrinsic_atomic_counter_read_var:
+ op = nir_intrinsic_atomic_counter_read;
+ break;
+
+ case nir_intrinsic_atomic_counter_inc_var:
+ op = nir_intrinsic_atomic_counter_inc;
+ break;
+
+ case nir_intrinsic_atomic_counter_dec_var:
+ op = nir_intrinsic_atomic_counter_dec;
+ break;
+
+ case nir_intrinsic_atomic_counter_add_var:
+ op = nir_intrinsic_atomic_counter_add;
+ break;
+
+ case nir_intrinsic_atomic_counter_min_var:
+ op = nir_intrinsic_atomic_counter_min;
+ break;
+
+ case nir_intrinsic_atomic_counter_max_var:
+ op = nir_intrinsic_atomic_counter_max;
+ break;
+
+ case nir_intrinsic_atomic_counter_and_var:
+ op = nir_intrinsic_atomic_counter_and;
+ break;
+
+ case nir_intrinsic_atomic_counter_or_var:
+ op = nir_intrinsic_atomic_counter_or;
+ break;
+
+ case nir_intrinsic_atomic_counter_xor_var:
+ op = nir_intrinsic_atomic_counter_xor;
+ break;
+
+ case nir_intrinsic_atomic_counter_exchange_var:
+ op = nir_intrinsic_atomic_counter_exchange;
+ break;
+
+ case nir_intrinsic_atomic_counter_comp_swap_var:
+ op = nir_intrinsic_atomic_counter_comp_swap;
+ break;
+
+ default:
+ return false;
+ }
+
+ if (instr->variables[0]->var->data.mode != nir_var_uniform &&
+ instr->variables[0]->var->data.mode != nir_var_shader_storage &&
+ instr->variables[0]->var->data.mode != nir_var_shared)
+ return false; /* atomics passed as function arguments can't be lowered */
+
+ void *mem_ctx = ralloc_parent(instr);
+ unsigned uniform_loc = instr->variables[0]->var->data.location;
+
+ unsigned idx = use_binding_as_idx ?
+ instr->variables[0]->var->data.binding :
+ shader_program->data->UniformStorage[uniform_loc].opaque[shader->info.stage].index;
+
+ nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(mem_ctx, op);
+ nir_intrinsic_set_base(new_instr, idx);
+
+ nir_load_const_instr *offset_const =
+ nir_load_const_instr_create(mem_ctx, 1, 32);
+ offset_const->value.u32[0] = instr->variables[0]->var->data.offset;
+
+ nir_instr_insert_before(&instr->instr, &offset_const->instr);
+
+ nir_ssa_def *offset_def = &offset_const->def;
+
+ nir_deref *tail = &instr->variables[0]->deref;
+ while (tail->child != NULL) {
+ nir_deref_array *deref_array = nir_deref_as_array(tail->child);
+ tail = tail->child;
+
+ unsigned child_array_elements = tail->child != NULL ?
+ glsl_get_aoa_size(tail->type) : 1;
+
+ offset_const->value.u32[0] += deref_array->base_offset *
+ child_array_elements * ATOMIC_COUNTER_SIZE;
+
+ if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
+ nir_load_const_instr *atomic_counter_size =
+ nir_load_const_instr_create(mem_ctx, 1, 32);
+ atomic_counter_size->value.u32[0] = child_array_elements * ATOMIC_COUNTER_SIZE;
+ nir_instr_insert_before(&instr->instr, &atomic_counter_size->instr);
+
+ nir_alu_instr *mul = nir_alu_instr_create(mem_ctx, nir_op_imul);
+ nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, 32, NULL);
+ mul->dest.write_mask = 0x1;
+ nir_src_copy(&mul->src[0].src, &deref_array->indirect, mul);
+ mul->src[1].src.is_ssa = true;
+ mul->src[1].src.ssa = &atomic_counter_size->def;
+ nir_instr_insert_before(&instr->instr, &mul->instr);
+
+ nir_alu_instr *add = nir_alu_instr_create(mem_ctx, nir_op_iadd);
+ nir_ssa_dest_init(&add->instr, &add->dest.dest, 1, 32, NULL);
+ add->dest.write_mask = 0x1;
+ add->src[0].src.is_ssa = true;
+ add->src[0].src.ssa = &mul->dest.dest.ssa;
+ add->src[1].src.is_ssa = true;
+ add->src[1].src.ssa = offset_def;
+ nir_instr_insert_before(&instr->instr, &add->instr);
+
+ offset_def = &add->dest.dest.ssa;
+ }
+ }
+
+ new_instr->src[0].is_ssa = true;
+ new_instr->src[0].ssa = offset_def;
+
+ /* Copy the other sources, if any, from the original instruction to the new
+ * instruction.
+ */
+ for (unsigned i = 0; i < nir_intrinsic_infos[instr->intrinsic].num_srcs; i++)
+ nir_src_copy(&new_instr->src[i + 1], &instr->src[i], new_instr);
+
+ if (instr->dest.is_ssa) {
+ nir_ssa_dest_init(&new_instr->instr, &new_instr->dest,
+ instr->dest.ssa.num_components, 32, NULL);
+ nir_ssa_def_rewrite_uses(&instr->dest.ssa,
+ nir_src_for_ssa(&new_instr->dest.ssa));
+ } else {
+ nir_dest_copy(&new_instr->dest, &instr->dest, mem_ctx);
+ }
+
+ nir_instr_insert_before(&instr->instr, &new_instr->instr);
+ nir_instr_remove(&instr->instr);
+
+ return true;
+}
+
+bool
+gl_nir_lower_atomics(nir_shader *shader,
+ const struct gl_shader_program *shader_program,
+ bool use_binding_as_idx)
+{
+ bool progress = false;
+
+ nir_foreach_function(function, shader) {
+ if (!function->impl)
+ continue;
+
+ bool impl_progress = false;
+
+ nir_foreach_block(block, function->impl) {
+ nir_foreach_instr_safe(instr, block) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ impl_progress |= lower_instr(nir_instr_as_intrinsic(instr),
+ shader_program, shader,
+ use_binding_as_idx);
+ }
+ }
+
+ if (impl_progress) {
+ nir_metadata_preserve(function->impl, nir_metadata_block_index |
+ nir_metadata_dominance);
+ progress = true;
+ }
+ }
+
+ return progress;
+}
--- /dev/null
+/*
+ * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
+ * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "compiler/nir/nir.h"
+#include "compiler/nir/nir_builder.h"
+#include "gl_nir.h"
+#include "ir_uniform.h"
+
+#include "main/compiler.h"
+#include "main/mtypes.h"
+
+/* Calculate the sampler index based on array indicies and also
+ * calculate the base uniform location for struct members.
+ */
+static void
+calc_sampler_offsets(nir_deref *tail, nir_tex_instr *instr,
+ unsigned *array_elements, nir_ssa_def **indirect,
+ nir_builder *b, unsigned *location)
+{
+ if (tail->child == NULL)
+ return;
+
+ switch (tail->child->deref_type) {
+ case nir_deref_type_array: {
+ nir_deref_array *deref_array = nir_deref_as_array(tail->child);
+
+ assert(deref_array->deref_array_type != nir_deref_array_type_wildcard);
+
+ calc_sampler_offsets(tail->child, instr, array_elements,
+ indirect, b, location);
+ instr->texture_index += deref_array->base_offset * *array_elements;
+
+ if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
+ nir_ssa_def *mul =
+ nir_imul(b, nir_imm_int(b, *array_elements),
+ nir_ssa_for_src(b, deref_array->indirect, 1));
+
+ nir_instr_rewrite_src(&instr->instr, &deref_array->indirect,
+ NIR_SRC_INIT);
+
+ if (*indirect) {
+ *indirect = nir_iadd(b, *indirect, mul);
+ } else {
+ *indirect = mul;
+ }
+ }
+
+ *array_elements *= glsl_get_length(tail->type);
+ break;
+ }
+
+ case nir_deref_type_struct: {
+ nir_deref_struct *deref_struct = nir_deref_as_struct(tail->child);
+ *location += glsl_get_record_location_offset(tail->type, deref_struct->index);
+ calc_sampler_offsets(tail->child, instr, array_elements,
+ indirect, b, location);
+ break;
+ }
+
+ default:
+ unreachable("Invalid deref type");
+ break;
+ }
+}
+
+static bool
+lower_sampler(nir_tex_instr *instr, const struct gl_shader_program *shader_program,
+ gl_shader_stage stage, nir_builder *b)
+{
+ if (instr->texture == NULL)
+ return false;
+
+ /* In GLSL, we only fill out the texture field. The sampler is inferred */
+ assert(instr->sampler == NULL);
+
+ instr->texture_index = 0;
+ unsigned location = instr->texture->var->data.location;
+ unsigned array_elements = 1;
+ nir_ssa_def *indirect = NULL;
+
+ b->cursor = nir_before_instr(&instr->instr);
+ calc_sampler_offsets(&instr->texture->deref, instr, &array_elements,
+ &indirect, b, &location);
+
+ if (indirect) {
+ assert(array_elements >= 1);
+ indirect = nir_umin(b, indirect, nir_imm_int(b, array_elements - 1));
+
+ nir_tex_instr_add_src(instr, nir_tex_src_texture_offset,
+ nir_src_for_ssa(indirect));
+ nir_tex_instr_add_src(instr, nir_tex_src_sampler_offset,
+ nir_src_for_ssa(indirect));
+
+ instr->texture_array_size = array_elements;
+ }
+
+ assert(location < shader_program->data->NumUniformStorage &&
+ shader_program->data->UniformStorage[location].opaque[stage].active);
+
+ instr->texture_index +=
+ shader_program->data->UniformStorage[location].opaque[stage].index;
+
+ instr->sampler_index = instr->texture_index;
+
+ instr->texture = NULL;
+
+ return true;
+}
+
+static bool
+lower_impl(nir_function_impl *impl, const struct gl_shader_program *shader_program,
+ gl_shader_stage stage)
+{
+ nir_builder b;
+ nir_builder_init(&b, impl);
+ bool progress = false;
+
+ nir_foreach_block(block, impl) {
+ nir_foreach_instr(instr, block) {
+ if (instr->type == nir_instr_type_tex)
+ progress |= lower_sampler(nir_instr_as_tex(instr),
+ shader_program, stage, &b);
+ }
+ }
+
+ return progress;
+}
+
+bool
+gl_nir_lower_samplers(nir_shader *shader,
+ const struct gl_shader_program *shader_program)
+{
+ bool progress = false;
+
+ nir_foreach_function(function, shader) {
+ if (function->impl)
+ progress |= lower_impl(function->impl, shader_program,
+ shader->info.stage);
+ }
+
+ return progress;
+}
--- /dev/null
+/*
+ * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
+ * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
+ * Copyright © 2014 Intel Corporation
+ * Copyright © 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file
+ *
+ * Lower sampler and image references of (non-bindless) uniforms by removing
+ * struct dereferences, and synthesizing new uniform variables without structs
+ * if required.
+ *
+ * This will allow backends to have a simple, uniform treatment of bindless and
+ * non-bindless samplers and images.
+ *
+ * Example:
+ *
+ * struct S {
+ * sampler2D tex[2];
+ * sampler2D other;
+ * };
+ * uniform S s[2];
+ *
+ * tmp = texture(s[n].tex[m], coord);
+ *
+ * Becomes:
+ *
+ * decl_var uniform INTERP_MODE_NONE sampler2D[2][2] lower@s.tex (...)
+ *
+ * vec1 32 ssa_idx = $(2 * n + m)
+ * vec4 32 ssa_out = tex ssa_coord (coord), lower@s.tex[n][m] (texture), lower@s.tex[n][m] (sampler)
+ *
+ * and lower@s.tex has var->data.binding set to the base index as defined by
+ * the opaque uniform mapping.
+ */
+
+#include "compiler/nir/nir.h"
+#include "compiler/nir/nir_builder.h"
+#include "gl_nir.h"
+#include "ir_uniform.h"
+
+#include "main/compiler.h"
+#include "main/mtypes.h"
+
+struct lower_samplers_as_deref_state {
+ nir_shader *shader;
+ const struct gl_shader_program *shader_program;
+ struct hash_table *remap_table;
+};
+
+static void
+remove_struct_derefs(nir_deref *tail,
+ struct lower_samplers_as_deref_state *state,
+ nir_builder *b, char **path, unsigned *location)
+{
+ if (!tail->child)
+ return;
+
+ switch (tail->child->deref_type) {
+ case nir_deref_type_array: {
+ unsigned length = glsl_get_length(tail->type);
+
+ remove_struct_derefs(tail->child, state, b, path, location);
+
+ tail->type = glsl_get_array_instance(tail->child->type, length);
+ break;
+ }
+
+ case nir_deref_type_struct: {
+ nir_deref_struct *deref_struct = nir_deref_as_struct(tail->child);
+
+ *location += glsl_get_record_location_offset(tail->type, deref_struct->index);
+ ralloc_asprintf_append(path, ".%s",
+ glsl_get_struct_elem_name(tail->type, deref_struct->index));
+
+ remove_struct_derefs(tail->child, state, b, path, location);
+
+ /* Drop the struct deref and re-parent. */
+ ralloc_steal(tail, tail->child->child);
+ tail->type = tail->child->type;
+ tail->child = tail->child->child;
+ break;
+ }
+
+ default:
+ unreachable("Invalid deref type");
+ break;
+ }
+}
+
+static void
+lower_deref(nir_deref_var *deref,
+ struct lower_samplers_as_deref_state *state,
+ nir_builder *b)
+{
+ nir_variable *var = deref->var;
+ gl_shader_stage stage = state->shader->info.stage;
+ unsigned location = var->data.location;
+ unsigned binding;
+ const struct glsl_type *orig_type = deref->deref.type;
+ char *path;
+
+ assert(var->data.mode == nir_var_uniform);
+
+ path = ralloc_asprintf(state->remap_table, "lower@%s", var->name);
+ remove_struct_derefs(&deref->deref, state, b, &path, &location);
+
+ assert(location < state->shader_program->data->NumUniformStorage &&
+ state->shader_program->data->UniformStorage[location].opaque[stage].active);
+
+ binding = state->shader_program->data->UniformStorage[location].opaque[stage].index;
+
+ if (orig_type == deref->deref.type) {
+ /* Fast path: We did not encounter any struct derefs. */
+ var->data.binding = binding;
+ return;
+ }
+
+ uint32_t hash = _mesa_key_hash_string(path);
+ struct hash_entry *h =
+ _mesa_hash_table_search_pre_hashed(state->remap_table, hash, path);
+
+ if (h) {
+ var = (nir_variable *)h->data;
+ } else {
+ var = nir_variable_create(state->shader, nir_var_uniform, deref->deref.type, path);
+ var->data.binding = binding;
+ _mesa_hash_table_insert_pre_hashed(state->remap_table, hash, path, var);
+ }
+
+ deref->var = var;
+}
+
+static bool
+lower_sampler(nir_tex_instr *instr, struct lower_samplers_as_deref_state *state,
+ nir_builder *b)
+{
+ if (!instr->texture || instr->texture->var->data.bindless ||
+ instr->texture->var->data.mode != nir_var_uniform)
+ return false;
+
+ /* In GLSL, we only fill out the texture field. The sampler is inferred */
+ assert(instr->sampler == NULL);
+
+ b->cursor = nir_before_instr(&instr->instr);
+ lower_deref(instr->texture, state, b);
+
+ if (instr->op != nir_texop_txf_ms &&
+ instr->op != nir_texop_txf_ms_mcs &&
+ instr->op != nir_texop_samples_identical) {
+ nir_instr_rewrite_deref(&instr->instr, &instr->sampler,
+ nir_deref_var_clone(instr->texture, instr));
+ } else {
+ assert(!instr->sampler);
+ }
+
+ return true;
+}
+
+static bool
+lower_intrinsic(nir_intrinsic_instr *instr,
+ struct lower_samplers_as_deref_state *state,
+ nir_builder *b)
+{
+ if (instr->intrinsic == nir_intrinsic_image_var_load ||
+ instr->intrinsic == nir_intrinsic_image_var_store ||
+ instr->intrinsic == nir_intrinsic_image_var_atomic_add ||
+ instr->intrinsic == nir_intrinsic_image_var_atomic_min ||
+ instr->intrinsic == nir_intrinsic_image_var_atomic_max ||
+ instr->intrinsic == nir_intrinsic_image_var_atomic_and ||
+ instr->intrinsic == nir_intrinsic_image_var_atomic_or ||
+ instr->intrinsic == nir_intrinsic_image_var_atomic_xor ||
+ instr->intrinsic == nir_intrinsic_image_var_atomic_exchange ||
+ instr->intrinsic == nir_intrinsic_image_var_atomic_comp_swap ||
+ instr->intrinsic == nir_intrinsic_image_var_size) {
+ b->cursor = nir_before_instr(&instr->instr);
+
+ if (instr->variables[0]->var->data.bindless ||
+ instr->variables[0]->var->data.mode != nir_var_uniform)
+ return false;
+
+ lower_deref(instr->variables[0], state, b);
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+lower_impl(nir_function_impl *impl, struct lower_samplers_as_deref_state *state)
+{
+ nir_builder b;
+ nir_builder_init(&b, impl);
+ bool progress = false;
+
+ nir_foreach_block(block, impl) {
+ nir_foreach_instr(instr, block) {
+ if (instr->type == nir_instr_type_tex)
+ progress |= lower_sampler(nir_instr_as_tex(instr), state, &b);
+ else if (instr->type == nir_instr_type_intrinsic)
+ progress |= lower_intrinsic(nir_instr_as_intrinsic(instr), state, &b);
+ }
+ }
+
+ return progress;
+}
+
+bool
+gl_nir_lower_samplers_as_deref(nir_shader *shader,
+ const struct gl_shader_program *shader_program)
+{
+ bool progress = false;
+ struct lower_samplers_as_deref_state state;
+
+ state.shader = shader;
+ state.shader_program = shader_program;
+ state.remap_table = _mesa_hash_table_create(NULL, _mesa_key_hash_string,
+ _mesa_key_string_equal);
+
+ nir_foreach_function(function, shader) {
+ if (function->impl)
+ progress |= lower_impl(function->impl, &state);
+ }
+
+ /* keys are freed automatically by ralloc */
+ _mesa_hash_table_destroy(state.remap_table, NULL);
+
+ return progress;
+}
'builtin_types.cpp',
'builtin_variables.cpp',
'generate_ir.cpp',
+ 'gl_nir_lower_atomics.c',
+ 'gl_nir_lower_samplers.c',
+ 'gl_nir_lower_samplers_as_deref.c',
+ 'gl_nir.h',
'glsl_parser_extras.cpp',
'glsl_parser_extras.h',
'glsl_symbol_table.cpp',
'nir_lower_64bit_packing.c',
'nir_lower_alu_to_scalar.c',
'nir_lower_alpha_test.c',
- 'nir_lower_atomics.c',
'nir_lower_atomics_to_ssbo.c',
'nir_lower_bitmap.c',
'nir_lower_clamp_color_outputs.c',
'nir_lower_phis_to_scalar.c',
'nir_lower_regs_to_ssa.c',
'nir_lower_returns.c',
- 'nir_lower_samplers.c',
- 'nir_lower_samplers_as_deref.c',
'nir_lower_subgroups.c',
'nir_lower_system_values.c',
'nir_lower_tex.c',
extern "C" {
#endif
-struct gl_program;
-struct gl_shader_program;
-
#define NIR_FALSE 0u
#define NIR_TRUE (~0u)
void nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask);
void nir_lower_io_to_scalar_early(nir_shader *shader, nir_variable_mode mask);
-bool nir_lower_samplers(nir_shader *shader,
- const struct gl_shader_program *shader_program);
-bool nir_lower_samplers_as_deref(nir_shader *shader,
- const struct gl_shader_program *shader_program);
-
typedef struct nir_lower_subgroups_options {
uint8_t subgroup_size;
uint8_t ballot_bit_size;
void nir_lower_bitmap(nir_shader *shader, const nir_lower_bitmap_options *options);
-bool nir_lower_atomics(nir_shader *shader,
- const struct gl_shader_program *shader_program,
- bool use_binding_as_idx);
bool nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset);
bool nir_lower_to_source_mods(nir_shader *shader);
+++ /dev/null
-/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Connor Abbott (cwabbott0@gmail.com)
- *
- */
-
-#include "compiler/glsl/ir_uniform.h"
-#include "nir.h"
-#include "main/config.h"
-#include "main/mtypes.h"
-#include <assert.h>
-
-/*
- * replace atomic counter intrinsics that use a variable with intrinsics
- * that directly store the buffer index and byte offset
- */
-
-static bool
-lower_instr(nir_intrinsic_instr *instr,
- const struct gl_shader_program *shader_program,
- nir_shader *shader, bool use_binding_as_idx)
-{
- nir_intrinsic_op op;
- switch (instr->intrinsic) {
- case nir_intrinsic_atomic_counter_read_var:
- op = nir_intrinsic_atomic_counter_read;
- break;
-
- case nir_intrinsic_atomic_counter_inc_var:
- op = nir_intrinsic_atomic_counter_inc;
- break;
-
- case nir_intrinsic_atomic_counter_dec_var:
- op = nir_intrinsic_atomic_counter_dec;
- break;
-
- case nir_intrinsic_atomic_counter_add_var:
- op = nir_intrinsic_atomic_counter_add;
- break;
-
- case nir_intrinsic_atomic_counter_min_var:
- op = nir_intrinsic_atomic_counter_min;
- break;
-
- case nir_intrinsic_atomic_counter_max_var:
- op = nir_intrinsic_atomic_counter_max;
- break;
-
- case nir_intrinsic_atomic_counter_and_var:
- op = nir_intrinsic_atomic_counter_and;
- break;
-
- case nir_intrinsic_atomic_counter_or_var:
- op = nir_intrinsic_atomic_counter_or;
- break;
-
- case nir_intrinsic_atomic_counter_xor_var:
- op = nir_intrinsic_atomic_counter_xor;
- break;
-
- case nir_intrinsic_atomic_counter_exchange_var:
- op = nir_intrinsic_atomic_counter_exchange;
- break;
-
- case nir_intrinsic_atomic_counter_comp_swap_var:
- op = nir_intrinsic_atomic_counter_comp_swap;
- break;
-
- default:
- return false;
- }
-
- if (instr->variables[0]->var->data.mode != nir_var_uniform &&
- instr->variables[0]->var->data.mode != nir_var_shader_storage &&
- instr->variables[0]->var->data.mode != nir_var_shared)
- return false; /* atomics passed as function arguments can't be lowered */
-
- void *mem_ctx = ralloc_parent(instr);
- unsigned uniform_loc = instr->variables[0]->var->data.location;
-
- unsigned idx = use_binding_as_idx ?
- instr->variables[0]->var->data.binding :
- shader_program->data->UniformStorage[uniform_loc].opaque[shader->info.stage].index;
-
- nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(mem_ctx, op);
- nir_intrinsic_set_base(new_instr, idx);
-
- nir_load_const_instr *offset_const =
- nir_load_const_instr_create(mem_ctx, 1, 32);
- offset_const->value.u32[0] = instr->variables[0]->var->data.offset;
-
- nir_instr_insert_before(&instr->instr, &offset_const->instr);
-
- nir_ssa_def *offset_def = &offset_const->def;
-
- nir_deref *tail = &instr->variables[0]->deref;
- while (tail->child != NULL) {
- nir_deref_array *deref_array = nir_deref_as_array(tail->child);
- tail = tail->child;
-
- unsigned child_array_elements = tail->child != NULL ?
- glsl_get_aoa_size(tail->type) : 1;
-
- offset_const->value.u32[0] += deref_array->base_offset *
- child_array_elements * ATOMIC_COUNTER_SIZE;
-
- if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
- nir_load_const_instr *atomic_counter_size =
- nir_load_const_instr_create(mem_ctx, 1, 32);
- atomic_counter_size->value.u32[0] = child_array_elements * ATOMIC_COUNTER_SIZE;
- nir_instr_insert_before(&instr->instr, &atomic_counter_size->instr);
-
- nir_alu_instr *mul = nir_alu_instr_create(mem_ctx, nir_op_imul);
- nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, 32, NULL);
- mul->dest.write_mask = 0x1;
- nir_src_copy(&mul->src[0].src, &deref_array->indirect, mul);
- mul->src[1].src.is_ssa = true;
- mul->src[1].src.ssa = &atomic_counter_size->def;
- nir_instr_insert_before(&instr->instr, &mul->instr);
-
- nir_alu_instr *add = nir_alu_instr_create(mem_ctx, nir_op_iadd);
- nir_ssa_dest_init(&add->instr, &add->dest.dest, 1, 32, NULL);
- add->dest.write_mask = 0x1;
- add->src[0].src.is_ssa = true;
- add->src[0].src.ssa = &mul->dest.dest.ssa;
- add->src[1].src.is_ssa = true;
- add->src[1].src.ssa = offset_def;
- nir_instr_insert_before(&instr->instr, &add->instr);
-
- offset_def = &add->dest.dest.ssa;
- }
- }
-
- new_instr->src[0].is_ssa = true;
- new_instr->src[0].ssa = offset_def;
-
- /* Copy the other sources, if any, from the original instruction to the new
- * instruction.
- */
- for (unsigned i = 0; i < nir_intrinsic_infos[instr->intrinsic].num_srcs; i++)
- nir_src_copy(&new_instr->src[i + 1], &instr->src[i], new_instr);
-
- if (instr->dest.is_ssa) {
- nir_ssa_dest_init(&new_instr->instr, &new_instr->dest,
- instr->dest.ssa.num_components, 32, NULL);
- nir_ssa_def_rewrite_uses(&instr->dest.ssa,
- nir_src_for_ssa(&new_instr->dest.ssa));
- } else {
- nir_dest_copy(&new_instr->dest, &instr->dest, mem_ctx);
- }
-
- nir_instr_insert_before(&instr->instr, &new_instr->instr);
- nir_instr_remove(&instr->instr);
-
- return true;
-}
-
-bool
-nir_lower_atomics(nir_shader *shader,
- const struct gl_shader_program *shader_program,
- bool use_binding_as_idx)
-{
- bool progress = false;
-
- nir_foreach_function(function, shader) {
- if (!function->impl)
- continue;
-
- bool impl_progress = false;
-
- nir_foreach_block(block, function->impl) {
- nir_foreach_instr_safe(instr, block) {
- if (instr->type != nir_instr_type_intrinsic)
- continue;
-
- impl_progress |= lower_instr(nir_instr_as_intrinsic(instr),
- shader_program, shader,
- use_binding_as_idx);
- }
- }
-
- if (impl_progress) {
- nir_metadata_preserve(function->impl, nir_metadata_block_index |
- nir_metadata_dominance);
- progress = true;
- }
- }
-
- return progress;
-}
+++ /dev/null
-/*
- * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
- * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "nir.h"
-#include "nir_builder.h"
-#include "compiler/glsl/ir_uniform.h"
-
-#include "main/compiler.h"
-#include "main/mtypes.h"
-
-/* Calculate the sampler index based on array indicies and also
- * calculate the base uniform location for struct members.
- */
-static void
-calc_sampler_offsets(nir_deref *tail, nir_tex_instr *instr,
- unsigned *array_elements, nir_ssa_def **indirect,
- nir_builder *b, unsigned *location)
-{
- if (tail->child == NULL)
- return;
-
- switch (tail->child->deref_type) {
- case nir_deref_type_array: {
- nir_deref_array *deref_array = nir_deref_as_array(tail->child);
-
- assert(deref_array->deref_array_type != nir_deref_array_type_wildcard);
-
- calc_sampler_offsets(tail->child, instr, array_elements,
- indirect, b, location);
- instr->texture_index += deref_array->base_offset * *array_elements;
-
- if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
- nir_ssa_def *mul =
- nir_imul(b, nir_imm_int(b, *array_elements),
- nir_ssa_for_src(b, deref_array->indirect, 1));
-
- nir_instr_rewrite_src(&instr->instr, &deref_array->indirect,
- NIR_SRC_INIT);
-
- if (*indirect) {
- *indirect = nir_iadd(b, *indirect, mul);
- } else {
- *indirect = mul;
- }
- }
-
- *array_elements *= glsl_get_length(tail->type);
- break;
- }
-
- case nir_deref_type_struct: {
- nir_deref_struct *deref_struct = nir_deref_as_struct(tail->child);
- *location += glsl_get_record_location_offset(tail->type, deref_struct->index);
- calc_sampler_offsets(tail->child, instr, array_elements,
- indirect, b, location);
- break;
- }
-
- default:
- unreachable("Invalid deref type");
- break;
- }
-}
-
-static bool
-lower_sampler(nir_tex_instr *instr, const struct gl_shader_program *shader_program,
- gl_shader_stage stage, nir_builder *b)
-{
- if (instr->texture == NULL)
- return false;
-
- /* In GLSL, we only fill out the texture field. The sampler is inferred */
- assert(instr->sampler == NULL);
-
- instr->texture_index = 0;
- unsigned location = instr->texture->var->data.location;
- unsigned array_elements = 1;
- nir_ssa_def *indirect = NULL;
-
- b->cursor = nir_before_instr(&instr->instr);
- calc_sampler_offsets(&instr->texture->deref, instr, &array_elements,
- &indirect, b, &location);
-
- if (indirect) {
- assert(array_elements >= 1);
- indirect = nir_umin(b, indirect, nir_imm_int(b, array_elements - 1));
-
- nir_tex_instr_add_src(instr, nir_tex_src_texture_offset,
- nir_src_for_ssa(indirect));
- nir_tex_instr_add_src(instr, nir_tex_src_sampler_offset,
- nir_src_for_ssa(indirect));
-
- instr->texture_array_size = array_elements;
- }
-
- assert(location < shader_program->data->NumUniformStorage &&
- shader_program->data->UniformStorage[location].opaque[stage].active);
-
- instr->texture_index +=
- shader_program->data->UniformStorage[location].opaque[stage].index;
-
- instr->sampler_index = instr->texture_index;
-
- instr->texture = NULL;
-
- return true;
-}
-
-static bool
-lower_impl(nir_function_impl *impl, const struct gl_shader_program *shader_program,
- gl_shader_stage stage)
-{
- nir_builder b;
- nir_builder_init(&b, impl);
- bool progress = false;
-
- nir_foreach_block(block, impl) {
- nir_foreach_instr(instr, block) {
- if (instr->type == nir_instr_type_tex)
- progress |= lower_sampler(nir_instr_as_tex(instr),
- shader_program, stage, &b);
- }
- }
-
- return progress;
-}
-
-bool
-nir_lower_samplers(nir_shader *shader,
- const struct gl_shader_program *shader_program)
-{
- bool progress = false;
-
- nir_foreach_function(function, shader) {
- if (function->impl)
- progress |= lower_impl(function->impl, shader_program,
- shader->info.stage);
- }
-
- return progress;
-}
+++ /dev/null
-/*
- * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
- * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
- * Copyright © 2014 Intel Corporation
- * Copyright © 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-/**
- * \file
- *
- * Lower sampler and image references of (non-bindless) uniforms by removing
- * struct dereferences, and synthesizing new uniform variables without structs
- * if required.
- *
- * This will allow backends to have a simple, uniform treatment of bindless and
- * non-bindless samplers and images.
- *
- * Example:
- *
- * struct S {
- * sampler2D tex[2];
- * sampler2D other;
- * };
- * uniform S s[2];
- *
- * tmp = texture(s[n].tex[m], coord);
- *
- * Becomes:
- *
- * decl_var uniform INTERP_MODE_NONE sampler2D[2][2] lower@s.tex (...)
- *
- * vec1 32 ssa_idx = $(2 * n + m)
- * vec4 32 ssa_out = tex ssa_coord (coord), lower@s.tex[n][m] (texture), lower@s.tex[n][m] (sampler)
- *
- * and lower@s.tex has var->data.binding set to the base index as defined by
- * the opaque uniform mapping.
- */
-
-#include "nir.h"
-#include "nir_builder.h"
-#include "compiler/glsl/ir_uniform.h"
-
-#include "main/compiler.h"
-#include "main/mtypes.h"
-
-struct lower_samplers_as_deref_state {
- nir_shader *shader;
- const struct gl_shader_program *shader_program;
- struct hash_table *remap_table;
-};
-
-static void
-remove_struct_derefs(nir_deref *tail,
- struct lower_samplers_as_deref_state *state,
- nir_builder *b, char **path, unsigned *location)
-{
- if (!tail->child)
- return;
-
- switch (tail->child->deref_type) {
- case nir_deref_type_array: {
- unsigned length = glsl_get_length(tail->type);
-
- remove_struct_derefs(tail->child, state, b, path, location);
-
- tail->type = glsl_get_array_instance(tail->child->type, length);
- break;
- }
-
- case nir_deref_type_struct: {
- nir_deref_struct *deref_struct = nir_deref_as_struct(tail->child);
-
- *location += glsl_get_record_location_offset(tail->type, deref_struct->index);
- ralloc_asprintf_append(path, ".%s",
- glsl_get_struct_elem_name(tail->type, deref_struct->index));
-
- remove_struct_derefs(tail->child, state, b, path, location);
-
- /* Drop the struct deref and re-parent. */
- ralloc_steal(tail, tail->child->child);
- tail->type = tail->child->type;
- tail->child = tail->child->child;
- break;
- }
-
- default:
- unreachable("Invalid deref type");
- break;
- }
-}
-
-static void
-lower_deref(nir_deref_var *deref,
- struct lower_samplers_as_deref_state *state,
- nir_builder *b)
-{
- nir_variable *var = deref->var;
- gl_shader_stage stage = state->shader->info.stage;
- unsigned location = var->data.location;
- unsigned binding;
- const struct glsl_type *orig_type = deref->deref.type;
- char *path;
-
- assert(var->data.mode == nir_var_uniform);
-
- path = ralloc_asprintf(state->remap_table, "lower@%s", var->name);
- remove_struct_derefs(&deref->deref, state, b, &path, &location);
-
- assert(location < state->shader_program->data->NumUniformStorage &&
- state->shader_program->data->UniformStorage[location].opaque[stage].active);
-
- binding = state->shader_program->data->UniformStorage[location].opaque[stage].index;
-
- if (orig_type == deref->deref.type) {
- /* Fast path: We did not encounter any struct derefs. */
- var->data.binding = binding;
- return;
- }
-
- uint32_t hash = _mesa_key_hash_string(path);
- struct hash_entry *h =
- _mesa_hash_table_search_pre_hashed(state->remap_table, hash, path);
-
- if (h) {
- var = (nir_variable *)h->data;
- } else {
- var = nir_variable_create(state->shader, nir_var_uniform, deref->deref.type, path);
- var->data.binding = binding;
- _mesa_hash_table_insert_pre_hashed(state->remap_table, hash, path, var);
- }
-
- deref->var = var;
-}
-
-static bool
-lower_sampler(nir_tex_instr *instr, struct lower_samplers_as_deref_state *state,
- nir_builder *b)
-{
- if (!instr->texture || instr->texture->var->data.bindless ||
- instr->texture->var->data.mode != nir_var_uniform)
- return false;
-
- /* In GLSL, we only fill out the texture field. The sampler is inferred */
- assert(instr->sampler == NULL);
-
- b->cursor = nir_before_instr(&instr->instr);
- lower_deref(instr->texture, state, b);
-
- if (instr->op != nir_texop_txf_ms &&
- instr->op != nir_texop_txf_ms_mcs &&
- instr->op != nir_texop_samples_identical) {
- nir_instr_rewrite_deref(&instr->instr, &instr->sampler,
- nir_deref_var_clone(instr->texture, instr));
- } else {
- assert(!instr->sampler);
- }
-
- return true;
-}
-
-static bool
-lower_intrinsic(nir_intrinsic_instr *instr,
- struct lower_samplers_as_deref_state *state,
- nir_builder *b)
-{
- if (instr->intrinsic == nir_intrinsic_image_var_load ||
- instr->intrinsic == nir_intrinsic_image_var_store ||
- instr->intrinsic == nir_intrinsic_image_var_atomic_add ||
- instr->intrinsic == nir_intrinsic_image_var_atomic_min ||
- instr->intrinsic == nir_intrinsic_image_var_atomic_max ||
- instr->intrinsic == nir_intrinsic_image_var_atomic_and ||
- instr->intrinsic == nir_intrinsic_image_var_atomic_or ||
- instr->intrinsic == nir_intrinsic_image_var_atomic_xor ||
- instr->intrinsic == nir_intrinsic_image_var_atomic_exchange ||
- instr->intrinsic == nir_intrinsic_image_var_atomic_comp_swap ||
- instr->intrinsic == nir_intrinsic_image_var_size) {
- b->cursor = nir_before_instr(&instr->instr);
-
- if (instr->variables[0]->var->data.bindless ||
- instr->variables[0]->var->data.mode != nir_var_uniform)
- return false;
-
- lower_deref(instr->variables[0], state, b);
- return true;
- }
-
- return false;
-}
-
-static bool
-lower_impl(nir_function_impl *impl, struct lower_samplers_as_deref_state *state)
-{
- nir_builder b;
- nir_builder_init(&b, impl);
- bool progress = false;
-
- nir_foreach_block(block, impl) {
- nir_foreach_instr(instr, block) {
- if (instr->type == nir_instr_type_tex)
- progress |= lower_sampler(nir_instr_as_tex(instr), state, &b);
- else if (instr->type == nir_instr_type_intrinsic)
- progress |= lower_intrinsic(nir_instr_as_intrinsic(instr), state, &b);
- }
- }
-
- return progress;
-}
-
-bool
-nir_lower_samplers_as_deref(nir_shader *shader,
- const struct gl_shader_program *shader_program)
-{
- bool progress = false;
- struct lower_samplers_as_deref_state state;
-
- state.shader = shader;
- state.shader_program = shader_program;
- state.remap_table = _mesa_hash_table_create(NULL, _mesa_key_hash_string,
- _mesa_key_string_equal);
-
- nir_foreach_function(function, shader) {
- if (function->impl)
- progress |= lower_impl(function->impl, &state);
- }
-
- /* keys are freed automatically by ralloc */
- _mesa_hash_table_destroy(state.remap_table, NULL);
-
- return progress;
-}
NIR_PASS_V(nir, nir_lower_system_values);
NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size, 0);
- NIR_PASS_V(nir, nir_lower_samplers, prog);
+ NIR_PASS_V(nir, gl_nir_lower_samplers, prog);
return nir;
}
#include "brw_context.h"
#include "compiler/brw_nir.h"
#include "brw_program.h"
+#include "compiler/glsl/gl_nir.h"
#include "compiler/glsl/ir.h"
#include "compiler/glsl/ir_optimization.h"
#include "compiler/glsl/program.h"
struct gl_program *prog = shader->Program;
brw_shader_gather_info(prog->nir, prog);
- NIR_PASS_V(prog->nir, nir_lower_samplers, shProg);
- NIR_PASS_V(prog->nir, nir_lower_atomics, shProg, false);
+ NIR_PASS_V(prog->nir, gl_nir_lower_samplers, shProg);
+ NIR_PASS_V(prog->nir, gl_nir_lower_atomics, shProg, false);
NIR_PASS_V(prog->nir, nir_lower_atomics_to_ssbo,
prog->nir->info.num_abos);
extern "C" {
#endif
+struct gl_shader_program;
struct gl_context;
struct gl_shader;
#include "compiler/nir/nir.h"
#include "compiler/glsl_types.h"
#include "compiler/glsl/glsl_to_nir.h"
+#include "compiler/glsl/gl_nir.h"
#include "compiler/glsl/ir.h"
#include "compiler/glsl/string_to_uint_map.h"
st_set_prog_affected_state_flags(prog);
NIR_PASS_V(nir, st_nir_lower_builtin);
- NIR_PASS_V(nir, nir_lower_atomics, shader_program, true);
+ NIR_PASS_V(nir, gl_nir_lower_atomics, shader_program, true);
if (st->ctx->_Shader->Flags & GLSL_DUMP) {
_mesa_log("\n");
}
if (screen->get_param(screen, PIPE_CAP_NIR_SAMPLERS_AS_DEREF))
- NIR_PASS_V(nir, nir_lower_samplers_as_deref, shader_program);
+ NIR_PASS_V(nir, gl_nir_lower_samplers_as_deref, shader_program);
else
- NIR_PASS_V(nir, nir_lower_samplers, shader_program);
+ NIR_PASS_V(nir, gl_nir_lower_samplers, shader_program);
}
} /* extern "C" */