nir/nir_lower_tex.c \
nir/nir_lower_to_source_mods.c \
nir/nir_lower_two_sided_color.c \
- nir/nir_lower_uniforms_to_ubo.c \
nir/nir_lower_vars_to_ssa.c \
nir/nir_lower_var_copies.c \
nir/nir_lower_vec_to_movs.c \
'nir_lower_tex.c',
'nir_lower_to_source_mods.c',
'nir_lower_two_sided_color.c',
- 'nir_lower_uniforms_to_ubo.c',
'nir_lower_vars_to_ssa.c',
'nir_lower_var_copies.c',
'nir_lower_vec_to_movs.c',
bool nir_lower_atomics(nir_shader *shader,
const struct gl_shader_program *shader_program);
bool nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset);
-bool nir_lower_uniforms_to_ubo(nir_shader *shader);
bool nir_lower_to_source_mods(nir_shader *shader);
bool nir_lower_gs_intrinsics(nir_shader *shader);
+++ /dev/null
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * on the rights to use, copy, modify, merge, publish, distribute, sub
- * license, and/or sell copies of the Software, and to permit persons to whom
- * the Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * Remap load_uniform intrinsics to UBO accesses of UBO binding point 0. Both
- * the base and the offset are interpreted as 16-byte units.
- *
- * Simultaneously, remap existing UBO accesses by increasing their binding
- * point by 1.
- */
-
-#include "nir.h"
-#include "nir_builder.h"
-
-static bool
-lower_instr(nir_intrinsic_instr *instr, nir_builder *b)
-{
- b->cursor = nir_before_instr(&instr->instr);
-
- if (instr->intrinsic == nir_intrinsic_load_ubo) {
- nir_ssa_def *old_idx = nir_ssa_for_src(b, instr->src[0], 1);
- nir_ssa_def *new_idx = nir_iadd(b, old_idx, nir_imm_int(b, 1));
- nir_instr_rewrite_src(&instr->instr, &instr->src[0],
- nir_src_for_ssa(new_idx));
- return true;
- }
-
- if (instr->intrinsic == nir_intrinsic_load_uniform) {
- nir_ssa_def *ubo_idx = nir_imm_int(b, 0);
- nir_ssa_def *ubo_offset =
- nir_imul(b, nir_imm_int(b, 16),
- nir_iadd(b, nir_imm_int(b, nir_intrinsic_base(instr)),
- nir_ssa_for_src(b, instr->src[0], 1)));
-
- nir_intrinsic_instr *load =
- nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
- load->num_components = instr->num_components;
- load->src[0] = nir_src_for_ssa(ubo_idx);
- load->src[1] = nir_src_for_ssa(ubo_offset);
- nir_ssa_dest_init(&load->instr, &load->dest,
- load->num_components, instr->dest.ssa.bit_size,
- instr->dest.ssa.name);
- nir_builder_instr_insert(b, &load->instr);
- nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&load->dest.ssa));
-
- nir_instr_remove(&instr->instr);
- return true;
- }
-
- return false;
-}
-
-bool
-nir_lower_uniforms_to_ubo(nir_shader *shader)
-{
- bool progress = false;
-
- nir_foreach_function(function, shader) {
- if (function->impl) {
- nir_builder builder;
- nir_builder_init(&builder, function->impl);
- nir_foreach_block(block, function->impl) {
- nir_foreach_instr_safe(instr, block) {
- if (instr->type == nir_instr_type_intrinsic)
- progress |= lower_instr(nir_instr_as_intrinsic(instr),
- &builder);
- }
- }
-
- nir_metadata_preserve(function->impl, nir_metadata_block_index |
- nir_metadata_dominance);
- }
- }
-
- return progress;
-}
-
#include "compiler/nir_types.h"
-static int
-type_size(const struct glsl_type *type)
-{
- return glsl_count_attribute_slots(type, false);
-}
-
static void scan_instruction(struct tgsi_shader_info *info,
nir_instr *instr)
{
* - ensure constant offsets for texture instructions are folded
* and copy-propagated
*/
- NIR_PASS_V(sel->nir, nir_lower_io, nir_var_uniform, type_size,
- (nir_lower_io_options)0);
- NIR_PASS_V(sel->nir, nir_lower_uniforms_to_ubo);
-
NIR_PASS_V(sel->nir, nir_lower_returns);
NIR_PASS_V(sel->nir, nir_lower_vars_to_ssa);
NIR_PASS_V(sel->nir, nir_lower_alu_to_scalar);
state_tracker/st_nir.h \
state_tracker/st_nir_lower_builtin.c \
state_tracker/st_nir_lower_tex_src_plane.c \
+ state_tracker/st_nir_lower_uniforms_to_ubo.c \
state_tracker/st_pbo.c \
state_tracker/st_pbo.h \
state_tracker/st_program.c \
'state_tracker/st_nir.h',
'state_tracker/st_nir_lower_builtin.c',
'state_tracker/st_nir_lower_tex_src_plane.c',
+ 'state_tracker/st_nir_lower_uniforms_to_ubo.c',
'state_tracker/st_pbo.c',
'state_tracker/st_pbo.h',
'state_tracker/st_program.c',
#include "main/uniforms.h"
#include "st_context.h"
+#include "st_glsl_types.h"
#include "st_program.h"
#include "compiler/nir/nir.h"
st_nir_assign_uniform_locations(st->ctx, prog, shader_program,
&nir->uniforms, &nir->num_uniforms);
+ /* Below is a quick hack so that uniform lowering only runs on radeonsi
+ * (the only NIR backend that currently supports tess) once we enable
+ * uniform packing support we will just use
+ * ctx->Const.PackedDriverUniformStorage for this check.
+ */
+ if (screen->get_shader_param(screen, PIPE_SHADER_TESS_CTRL,
+ PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) {
+ NIR_PASS_V(nir, nir_lower_io, nir_var_uniform, type_size,
+ (nir_lower_io_options)0);
+ NIR_PASS_V(nir, st_nir_lower_uniforms_to_ubo);
+ }
+
if (screen->get_param(screen, PIPE_CAP_NIR_SAMPLERS_AS_DEREF))
NIR_PASS_V(nir, nir_lower_samplers_as_deref, shader_program);
else
void st_nir_lower_builtin(struct nir_shader *shader);
void st_nir_lower_tex_src_plane(struct nir_shader *shader, unsigned free_slots,
unsigned lower_2plane, unsigned lower_3plane);
+bool st_nir_lower_uniforms_to_ubo(struct nir_shader *shader);
void st_finalize_nir(struct st_context *st, struct gl_program *prog,
struct gl_shader_program *shader_program,
--- /dev/null
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Remap load_uniform intrinsics to UBO accesses of UBO binding point 0. Both
+ * the base and the offset are interpreted as 16-byte units.
+ *
+ * Simultaneously, remap existing UBO accesses by increasing their binding
+ * point by 1.
+ */
+
+#include "nir.h"
+#include "nir_builder.h"
+#include "st_nir.h"
+
+static bool
+lower_instr(nir_intrinsic_instr *instr, nir_builder *b)
+{
+ b->cursor = nir_before_instr(&instr->instr);
+
+ if (instr->intrinsic == nir_intrinsic_load_ubo) {
+ nir_ssa_def *old_idx = nir_ssa_for_src(b, instr->src[0], 1);
+ nir_ssa_def *new_idx = nir_iadd(b, old_idx, nir_imm_int(b, 1));
+ nir_instr_rewrite_src(&instr->instr, &instr->src[0],
+ nir_src_for_ssa(new_idx));
+ return true;
+ }
+
+ if (instr->intrinsic == nir_intrinsic_load_uniform) {
+ nir_ssa_def *ubo_idx = nir_imm_int(b, 0);
+ nir_ssa_def *ubo_offset =
+ nir_imul(b, nir_imm_int(b, 16),
+ nir_iadd(b, nir_imm_int(b, nir_intrinsic_base(instr)),
+ nir_ssa_for_src(b, instr->src[0], 1)));
+
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
+ load->num_components = instr->num_components;
+ load->src[0] = nir_src_for_ssa(ubo_idx);
+ load->src[1] = nir_src_for_ssa(ubo_offset);
+ nir_ssa_dest_init(&load->instr, &load->dest,
+ load->num_components, instr->dest.ssa.bit_size,
+ instr->dest.ssa.name);
+ nir_builder_instr_insert(b, &load->instr);
+ nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&load->dest.ssa));
+
+ nir_instr_remove(&instr->instr);
+ return true;
+ }
+
+ return false;
+}
+
+bool
+st_nir_lower_uniforms_to_ubo(nir_shader *shader)
+{
+ bool progress = false;
+
+ nir_foreach_function(function, shader) {
+ if (function->impl) {
+ nir_builder builder;
+ nir_builder_init(&builder, function->impl);
+ nir_foreach_block(block, function->impl) {
+ nir_foreach_instr_safe(instr, block) {
+ if (instr->type == nir_instr_type_intrinsic)
+ progress |= lower_instr(nir_instr_as_intrinsic(instr),
+ &builder);
+ }
+ }
+
+ nir_metadata_preserve(function->impl, nir_metadata_block_index |
+ nir_metadata_dominance);
+ }
+ }
+
+ return progress;
+}
+