#include "brw_nir.h"
#include "brw_shader.h"
+#include "compiler/glsl_types.h"
#include "compiler/nir/glsl_to_nir.h"
#include "compiler/nir/nir_builder.h"
#include "program/prog_to_nir.h"
+#include "program/prog_parameter.h"
static bool
is_input(nir_intrinsic_instr *intrin)
* we don't know what part of a compound variable is accessed, we allocate
* storage for the entire thing.
*/
-struct add_const_offset_to_base_params {
- nir_builder b;
- nir_variable_mode mode;
-};
static bool
-add_const_offset_to_base_block(nir_block *block, void *closure)
+add_const_offset_to_base_block(nir_block *block, nir_builder *b,
+ nir_variable_mode mode)
{
- struct add_const_offset_to_base_params *params = closure;
- nir_builder *b = ¶ms->b;
-
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
- if ((params->mode == nir_var_shader_in && is_input(intrin)) ||
- (params->mode == nir_var_shader_out && is_output(intrin))) {
+ if ((mode == nir_var_shader_in && is_input(intrin)) ||
+ (mode == nir_var_shader_out && is_output(intrin))) {
nir_src *offset = nir_get_io_offset_src(intrin);
nir_const_value *const_offset = nir_src_as_const_value(*offset);
if (const_offset) {
- intrin->const_index[0] += const_offset->u[0];
+ intrin->const_index[0] += const_offset->u32[0];
b->cursor = nir_before_instr(&intrin->instr);
nir_instr_rewrite_src(&intrin->instr, offset,
nir_src_for_ssa(nir_imm_int(b, 0)));
static void
add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
{
- struct add_const_offset_to_base_params params = { .mode = mode };
-
- nir_foreach_function(nir, f) {
+ nir_foreach_function(f, nir) {
if (f->impl) {
- nir_builder_init(¶ms.b, f->impl);
- nir_foreach_block(f->impl, add_const_offset_to_base_block, ¶ms);
+ nir_builder b;
+ nir_builder_init(&b, f->impl);
+ nir_foreach_block(block, f->impl) {
+ add_const_offset_to_base_block(block, &b, mode);
+ }
}
}
}
static bool
-remap_vs_attrs(nir_block *block, void *closure)
+remap_vs_attrs(nir_block *block, struct nir_shader_info *nir_info)
{
- GLbitfield64 inputs_read = *((GLbitfield64 *) closure);
-
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
* before it and counting the bits.
*/
int attr = intrin->const_index[0];
- int slot = _mesa_bitcount_64(inputs_read & BITFIELD64_MASK(attr));
-
- intrin->const_index[0] = 4 * slot;
+ int slot = _mesa_bitcount_64(nir_info->inputs_read &
+ BITFIELD64_MASK(attr));
+ int dslot = _mesa_bitcount_64(nir_info->double_inputs_read &
+ BITFIELD64_MASK(attr));
+ intrin->const_index[0] = 4 * (slot + dslot);
}
}
return true;
}
static bool
-remap_inputs_with_vue_map(nir_block *block, void *closure)
+remap_inputs_with_vue_map(nir_block *block, const struct brw_vue_map *vue_map)
{
- const struct brw_vue_map *vue_map = closure;
-
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
return true;
}
-struct remap_patch_urb_offsets_state {
- nir_builder b;
- struct brw_vue_map vue_map;
-};
-
static bool
-remap_patch_urb_offsets(nir_block *block, void *closure)
+remap_patch_urb_offsets(nir_block *block, nir_builder *b,
+ const struct brw_vue_map *vue_map)
{
- struct remap_patch_urb_offsets_state *state = closure;
-
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
- gl_shader_stage stage = state->b.shader->stage;
+ gl_shader_stage stage = b->shader->stage;
if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
(stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
- int vue_slot = state->vue_map.varying_to_slot[intrin->const_index[0]];
+ int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
assert(vue_slot != -1);
intrin->const_index[0] = vue_slot;
if (vertex) {
nir_const_value *const_vertex = nir_src_as_const_value(*vertex);
if (const_vertex) {
- intrin->const_index[0] += const_vertex->u[0] *
- state->vue_map.num_per_vertex_slots;
+ intrin->const_index[0] += const_vertex->u32[0] *
+ vue_map->num_per_vertex_slots;
} else {
- state->b.cursor = nir_before_instr(&intrin->instr);
+ b->cursor = nir_before_instr(&intrin->instr);
/* Multiply by the number of per-vertex slots. */
nir_ssa_def *vertex_offset =
- nir_imul(&state->b,
- nir_ssa_for_src(&state->b, *vertex, 1),
- nir_imm_int(&state->b,
- state->vue_map.num_per_vertex_slots));
+ nir_imul(b,
+ nir_ssa_for_src(b, *vertex, 1),
+ nir_imm_int(b,
+ vue_map->num_per_vertex_slots));
/* Add it to the existing offset */
nir_src *offset = nir_get_io_offset_src(intrin);
nir_ssa_def *total_offset =
- nir_iadd(&state->b, vertex_offset,
- nir_ssa_for_src(&state->b, *offset, 1));
+ nir_iadd(b, vertex_offset,
+ nir_ssa_for_src(b, *offset, 1));
nir_instr_rewrite_src(&intrin->instr, offset,
nir_src_for_ssa(total_offset));
return true;
}
-static void
-brw_nir_lower_inputs(nir_shader *nir,
- const struct brw_device_info *devinfo,
- bool is_scalar,
- bool use_legacy_snorm_formula,
- const uint8_t *vs_attrib_wa_flags)
+void
+brw_nir_lower_vs_inputs(nir_shader *nir,
+ const struct brw_device_info *devinfo,
+ bool is_scalar,
+ bool use_legacy_snorm_formula,
+ const uint8_t *vs_attrib_wa_flags)
{
- switch (nir->stage) {
- case MESA_SHADER_VERTEX:
- /* Start with the location of the variable's base. */
- foreach_list_typed(nir_variable, var, node, &nir->inputs) {
- var->data.driver_location = var->data.location;
- }
+ /* Start with the location of the variable's base. */
+ foreach_list_typed(nir_variable, var, node, &nir->inputs) {
+ var->data.driver_location = var->data.location;
+ }
- /* Now use nir_lower_io to walk dereference chains. Attribute arrays
- * are loaded as one vec4 per element (or matrix column), so we use
- * type_size_vec4 here.
- */
- nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
+ /* Now use nir_lower_io to walk dereference chains. Attribute arrays are
+ * loaded as one vec4 or dvec4 per element (or matrix column), depending on
+ * whether it is a double-precision type or not.
+ */
+ nir_lower_io(nir, nir_var_shader_in, type_size_vs_input);
- /* This pass needs actual constants */
- nir_opt_constant_folding(nir);
+ /* This pass needs actual constants */
+ nir_opt_constant_folding(nir);
- add_const_offset_to_base(nir, nir_var_shader_in);
+ add_const_offset_to_base(nir, nir_var_shader_in);
- brw_nir_apply_attribute_workarounds(nir, use_legacy_snorm_formula,
- vs_attrib_wa_flags);
+ brw_nir_apply_attribute_workarounds(nir, use_legacy_snorm_formula,
+ vs_attrib_wa_flags);
- if (is_scalar) {
- /* Finally, translate VERT_ATTRIB_* values into the actual registers.
- *
- * Note that we can use nir->info.inputs_read instead of
- * key->inputs_read since the two are identical aside from Gen4-5
- * edge flag differences.
- */
- GLbitfield64 inputs_read = nir->info.inputs_read;
+ if (is_scalar) {
+ /* Finally, translate VERT_ATTRIB_* values into the actual registers. */
- nir_foreach_function(nir, function) {
- if (function->impl) {
- nir_foreach_block(function->impl, remap_vs_attrs, &inputs_read);
+ nir_foreach_function(function, nir) {
+ if (function->impl) {
+ nir_foreach_block(block, function->impl) {
+ remap_vs_attrs(block, &nir->info);
}
}
}
- break;
- case MESA_SHADER_TESS_CTRL:
- case MESA_SHADER_GEOMETRY: {
- if (!is_scalar && nir->stage == MESA_SHADER_GEOMETRY) {
- foreach_list_typed(nir_variable, var, node, &nir->inputs) {
- var->data.driver_location = var->data.location;
- }
- } else {
- /* The GLSL linker will have already matched up GS inputs and
- * the outputs of prior stages. The driver does extend VS outputs
- * in some cases, but only for legacy OpenGL or Gen4-5 hardware,
- * neither of which offer geometry shader support. So we can
- * safely ignore that.
- *
- * For SSO pipelines, we use a fixed VUE map layout based on variable
- * locations, so we can rely on rendezvous-by-location to make this
- * work.
- *
- * However, we need to ignore VARYING_SLOT_PRIMITIVE_ID, as it's not
- * written by previous stages and shows up via payload magic.
- */
- struct brw_vue_map input_vue_map;
- GLbitfield64 inputs_read =
- nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID;
- brw_compute_vue_map(devinfo, &input_vue_map, inputs_read,
- nir->info.separate_shader ||
- nir->stage == MESA_SHADER_TESS_CTRL);
-
- foreach_list_typed(nir_variable, var, node, &nir->inputs) {
- var->data.driver_location = var->data.location;
- }
+ }
+}
+
+void
+brw_nir_lower_vue_inputs(nir_shader *nir, bool is_scalar,
+ const struct brw_vue_map *vue_map)
+{
+ foreach_list_typed(nir_variable, var, node, &nir->inputs) {
+ var->data.driver_location = var->data.location;
+ }
- /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
- nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
+ /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
+ nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
- /* This pass needs actual constants */
- nir_opt_constant_folding(nir);
+ if (is_scalar || nir->stage != MESA_SHADER_GEOMETRY) {
+ /* This pass needs actual constants */
+ nir_opt_constant_folding(nir);
- add_const_offset_to_base(nir, nir_var_shader_in);
+ add_const_offset_to_base(nir, nir_var_shader_in);
- nir_foreach_function(nir, function) {
- if (function->impl) {
- nir_foreach_block(function->impl, remap_inputs_with_vue_map,
- &input_vue_map);
+ nir_foreach_function(function, nir) {
+ if (function->impl) {
+ nir_foreach_block(block, function->impl) {
+ remap_inputs_with_vue_map(block, vue_map);
}
}
}
- break;
}
- case MESA_SHADER_TESS_EVAL: {
- struct remap_patch_urb_offsets_state state;
- brw_compute_tess_vue_map(&state.vue_map,
- nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID,
- nir->info.patch_inputs_read);
+}
- foreach_list_typed(nir_variable, var, node, &nir->inputs) {
- var->data.driver_location = var->data.location;
- }
+void
+brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
+{
+ foreach_list_typed(nir_variable, var, node, &nir->inputs) {
+ var->data.driver_location = var->data.location;
+ }
- nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
+ nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
- /* This pass needs actual constants */
- nir_opt_constant_folding(nir);
+ /* This pass needs actual constants */
+ nir_opt_constant_folding(nir);
- add_const_offset_to_base(nir, nir_var_shader_in);
+ add_const_offset_to_base(nir, nir_var_shader_in);
- nir_foreach_function(nir, function) {
- if (function->impl) {
- nir_builder_init(&state.b, function->impl);
- nir_foreach_block(function->impl, remap_patch_urb_offsets, &state);
+ nir_foreach_function(function, nir) {
+ if (function->impl) {
+ nir_builder b;
+ nir_builder_init(&b, function->impl);
+ nir_foreach_block(block, function->impl) {
+ remap_patch_urb_offsets(block, &b, vue_map);
}
}
- break;
- }
- case MESA_SHADER_FRAGMENT:
- assert(is_scalar);
- nir_assign_var_locations(&nir->inputs, &nir->num_inputs,
- type_size_scalar);
- break;
- case MESA_SHADER_COMPUTE:
- /* Compute shaders have no inputs. */
- assert(exec_list_is_empty(&nir->inputs));
- break;
- default:
- unreachable("unsupported shader stage");
}
}
-static void
-brw_nir_lower_outputs(nir_shader *nir,
- const struct brw_device_info *devinfo,
- bool is_scalar)
+void
+brw_nir_lower_fs_inputs(nir_shader *nir)
{
- switch (nir->stage) {
- case MESA_SHADER_VERTEX:
- case MESA_SHADER_TESS_EVAL:
- case MESA_SHADER_GEOMETRY:
- if (is_scalar) {
- nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
- type_size_vec4_times_4);
- nir_lower_io(nir, nir_var_shader_out, type_size_vec4_times_4);
- } else {
- nir_foreach_variable(var, &nir->outputs)
- var->data.driver_location = var->data.location;
- }
- break;
- case MESA_SHADER_TESS_CTRL: {
- struct remap_patch_urb_offsets_state state;
- brw_compute_tess_vue_map(&state.vue_map, nir->info.outputs_written,
- nir->info.patch_outputs_written);
+ nir_assign_var_locations(&nir->inputs, &nir->num_inputs, type_size_scalar);
+ nir_lower_io(nir, nir_var_shader_in, type_size_scalar);
+}
- nir_foreach_variable(var, &nir->outputs) {
+void
+brw_nir_lower_vue_outputs(nir_shader *nir,
+ bool is_scalar)
+{
+ if (is_scalar) {
+ nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
+ type_size_vec4_times_4);
+ nir_lower_io(nir, nir_var_shader_out, type_size_vec4_times_4);
+ } else {
+ nir_foreach_variable(var, &nir->outputs)
var->data.driver_location = var->data.location;
- }
-
nir_lower_io(nir, nir_var_shader_out, type_size_vec4);
+ }
+}
- /* This pass needs actual constants */
- nir_opt_constant_folding(nir);
+void
+brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map)
+{
+ nir_foreach_variable(var, &nir->outputs) {
+ var->data.driver_location = var->data.location;
+ }
- add_const_offset_to_base(nir, nir_var_shader_out);
+ nir_lower_io(nir, nir_var_shader_out, type_size_vec4);
- nir_foreach_function(nir, function) {
- if (function->impl) {
- nir_builder_init(&state.b, function->impl);
- nir_foreach_block(function->impl, remap_patch_urb_offsets, &state);
+ /* This pass needs actual constants */
+ nir_opt_constant_folding(nir);
+
+ add_const_offset_to_base(nir, nir_var_shader_out);
+
+ nir_foreach_function(function, nir) {
+ if (function->impl) {
+ nir_builder b;
+ nir_builder_init(&b, function->impl);
+ nir_foreach_block(block, function->impl) {
+ remap_patch_urb_offsets(block, &b, vue_map);
}
}
- break;
- }
- case MESA_SHADER_FRAGMENT:
- nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
- type_size_scalar);
- break;
- case MESA_SHADER_COMPUTE:
- /* Compute shaders have no outputs. */
- assert(exec_list_is_empty(&nir->outputs));
- break;
- default:
- unreachable("unsupported shader stage");
}
}
+void
+brw_nir_lower_fs_outputs(nir_shader *nir)
+{
+ nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
+ type_size_scalar);
+ nir_lower_io(nir, nir_var_shader_out, type_size_scalar);
+}
+
static int
type_size_scalar_bytes(const struct glsl_type *type)
{
}
}
-static void
-brw_nir_lower_shared(nir_shader *nir)
+void
+brw_nir_lower_cs_shared(nir_shader *nir)
{
nir_assign_var_locations(&nir->shared, &nir->num_shared,
type_size_scalar_bytes);
OPT(nir_opt_dead_cf);
OPT(nir_opt_remove_phis);
OPT(nir_opt_undef);
+ OPT_V(nir_lower_doubles, nir_lower_drcp |
+ nir_lower_dsqrt |
+ nir_lower_drsq |
+ nir_lower_dtrunc |
+ nir_lower_dfloor |
+ nir_lower_dceil |
+ nir_lower_dfract |
+ nir_lower_dround_even |
+ nir_lower_dmod);
+ OPT_V(nir_lower_double_pack);
} while (progress);
return nir;
* is_scalar = true to scalarize everything prior to code gen.
*/
nir_shader *
-brw_preprocess_nir(nir_shader *nir, bool is_scalar)
+brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
{
bool progress; /* Written by OPT and OPT_V */
(void)progress;
+ const bool is_scalar = compiler->scalar_stage[nir->stage];
+
if (nir->stage == MESA_SHADER_GEOMETRY)
OPT(nir_lower_gs_intrinsics);
+ if (compiler->precise_trig)
+ OPT(brw_nir_apply_trig_workarounds);
+
static const nir_lower_tex_options tex_options = {
.lower_txp = ~0,
};
return nir;
}
-/** Lower input and output loads and stores for i965. */
-nir_shader *
-brw_nir_lower_io(nir_shader *nir,
- const struct brw_device_info *devinfo,
- bool is_scalar,
- bool use_legacy_snorm_formula,
- const uint8_t *vs_attrib_wa_flags)
-{
- bool progress; /* Written by OPT and OPT_V */
- (void)progress;
-
- OPT_V(brw_nir_lower_inputs, devinfo, is_scalar,
- use_legacy_snorm_formula, vs_attrib_wa_flags);
- OPT_V(brw_nir_lower_outputs, devinfo, is_scalar);
- if (nir->stage == MESA_SHADER_COMPUTE)
- OPT_V(brw_nir_lower_shared);
- OPT_V(nir_lower_io, nir_var_all, is_scalar ? type_size_scalar : type_size_vec4);
-
- return nir_optimize(nir, is_scalar);
-}
-
/* Prepare the given shader for codegen
*
* This function is intended to be called right before going into the actual
bool progress; /* Written by OPT and OPT_V */
(void)progress;
+ nir = nir_optimize(nir, is_scalar);
+
if (devinfo->gen >= 6) {
/* Try and fuse multiply-adds */
-// OPT(brw_nir_opt_peephole_ffma);
+ OPT(brw_nir_opt_peephole_ffma);
}
OPT(nir_opt_algebraic_late);
if (unlikely(debug_enabled)) {
/* Re-index SSA defs so we print more sensible numbers. */
- nir_foreach_function(nir, function) {
+ nir_foreach_function(function, nir) {
if (function->impl)
nir_index_ssa_defs(function->impl);
}
bool is_scalar)
{
struct gl_context *ctx = &brw->ctx;
- const struct brw_device_info *devinfo = brw->intelScreen->devinfo;
const nir_shader_compiler_options *options =
ctx->Const.ShaderCompilerOptions[stage].NirOptions;
bool progress;
/* First, lower the GLSL IR or Mesa IR to NIR */
if (shader_prog) {
nir = glsl_to_nir(shader_prog, stage, options);
+ nir_remove_dead_variables(nir, nir_var_shader_in | nir_var_shader_out);
+ OPT_V(nir_lower_io_to_temporaries,
+ nir_shader_get_entrypoint(nir),
+ true, false);
} else {
nir = prog_to_nir(prog, options);
OPT_V(nir_convert_to_ssa); /* turn registers into SSA */
(void)progress;
- nir = brw_preprocess_nir(nir, is_scalar);
+ nir = brw_preprocess_nir(brw->intelScreen->compiler, nir);
+
+ if (stage == MESA_SHADER_FRAGMENT) {
+ static const struct nir_lower_wpos_ytransform_options wpos_options = {
+ .state_tokens = {STATE_INTERNAL, STATE_FB_WPOS_Y_TRANSFORM, 0, 0, 0},
+ .fs_coord_pixel_center_integer = 1,
+ .fs_coord_origin_upper_left = 1,
+ };
+ _mesa_add_state_reference(prog->Parameters,
+ (gl_state_index *) wpos_options.state_tokens);
+
+ OPT(nir_lower_wpos_ytransform, &wpos_options);
+ }
OPT(nir_lower_system_values);
OPT_V(brw_nir_lower_uniforms, is_scalar);
OPT_V(nir_lower_atomics, shader_prog);
}
- if (nir->stage != MESA_SHADER_VERTEX &&
- nir->stage != MESA_SHADER_TESS_CTRL &&
- nir->stage != MESA_SHADER_TESS_EVAL &&
- nir->stage != MESA_SHADER_FRAGMENT) {
- nir = brw_nir_lower_io(nir, devinfo, is_scalar, false, NULL);
- }
-
return nir;
}
{
switch (type) {
case nir_type_uint:
+ case nir_type_uint32:
return BRW_REGISTER_TYPE_UD;
case nir_type_bool:
case nir_type_int:
+ case nir_type_bool32:
+ case nir_type_int32:
return BRW_REGISTER_TYPE_D;
case nir_type_float:
+ case nir_type_float32:
return BRW_REGISTER_TYPE_F;
+ case nir_type_float64:
+ return BRW_REGISTER_TYPE_DF;
+ case nir_type_int64:
+ case nir_type_uint64:
+ /* TODO we should only see these in moves, so for now it's ok, but when
+ * we add actual 64-bit integer support we should fix this.
+ */
+ return BRW_REGISTER_TYPE_DF;
default:
unreachable("unknown type");
}
{
switch (type) {
case nir_type_float:
+ case nir_type_float32:
return GLSL_TYPE_FLOAT;
+ case nir_type_float64:
+ return GLSL_TYPE_DOUBLE;
+
case nir_type_int:
+ case nir_type_int32:
return GLSL_TYPE_INT;
case nir_type_uint:
+ case nir_type_uint32:
return GLSL_TYPE_UINT;
default: