#include "brw_nir.h"
#include "brw_shader.h"
+#include "common/gen_debug.h"
#include "compiler/glsl_types.h"
#include "compiler/nir/nir_builder.h"
}
static bool
-remap_vs_attrs(nir_block *block, struct nir_shader_info *nir_info)
+remap_vs_attrs(nir_block *block, shader_info *nir_info)
{
nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
int attr = intrin->const_index[0];
int slot = _mesa_bitcount_64(nir_info->inputs_read &
BITFIELD64_MASK(attr));
- int dslot = _mesa_bitcount_64(nir_info->double_inputs_read &
- BITFIELD64_MASK(attr));
- intrin->const_index[0] = 4 * (slot + dslot);
+ intrin->const_index[0] = 4 * slot;
}
}
return true;
return true;
}
+static bool
+remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
+ GLenum primitive_mode)
+{
+ const int location = nir_intrinsic_base(intr);
+ const unsigned component = nir_intrinsic_component(intr);
+ bool out_of_bounds;
+
+ if (location == VARYING_SLOT_TESS_LEVEL_INNER) {
+ switch (primitive_mode) {
+ case GL_QUADS:
+ /* gl_TessLevelInner[0..1] lives at DWords 3-2 (reversed). */
+ nir_intrinsic_set_base(intr, 0);
+ nir_intrinsic_set_component(intr, 3 - component);
+ out_of_bounds = false;
+ break;
+ case GL_TRIANGLES:
+ /* gl_TessLevelInner[0] lives at DWord 4. */
+ nir_intrinsic_set_base(intr, 1);
+ out_of_bounds = component > 0;
+ break;
+ case GL_ISOLINES:
+ out_of_bounds = true;
+ break;
+ default:
+ unreachable("Bogus tessellation domain");
+ }
+ } else if (location == VARYING_SLOT_TESS_LEVEL_OUTER) {
+ if (primitive_mode == GL_ISOLINES) {
+ /* gl_TessLevelOuter[0..1] lives at DWords 6-7 (in order). */
+ nir_intrinsic_set_base(intr, 1);
+ nir_intrinsic_set_component(intr, 2 + nir_intrinsic_component(intr));
+ out_of_bounds = component > 1;
+ } else {
+ /* Triangles use DWords 7-5 (reversed); Quads use 7-4 (reversed) */
+ nir_intrinsic_set_base(intr, 1);
+ nir_intrinsic_set_component(intr, 3 - nir_intrinsic_component(intr));
+ out_of_bounds = component == 3 && primitive_mode == GL_TRIANGLES;
+ }
+ } else {
+ return false;
+ }
+
+ if (out_of_bounds) {
+ if (nir_intrinsic_infos[intr->intrinsic].has_dest) {
+ b->cursor = nir_before_instr(&intr->instr);
+ nir_ssa_def *undef = nir_ssa_undef(b, 1, 32);
+ nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(undef));
+ }
+ nir_instr_remove(&intr->instr);
+ }
+
+ return true;
+}
+
static bool
remap_patch_urb_offsets(nir_block *block, nir_builder *b,
- const struct brw_vue_map *vue_map)
+ const struct brw_vue_map *vue_map,
+ GLenum tes_primitive_mode)
{
+ const bool is_passthrough_tcs = b->shader->info->name &&
+ strcmp(b->shader->info->name, "passthrough") == 0;
+
nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
(stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
+
+ if (!is_passthrough_tcs &&
+ remap_tess_levels(b, intrin, tes_primitive_mode))
+ continue;
+
int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
assert(vue_slot != -1);
intrin->const_index[0] = vue_slot;
* loaded as one vec4 or dvec4 per element (or matrix column), depending on
* whether it is a double-precision type or not.
*/
- nir_lower_io(nir, nir_var_shader_in, type_size_vs_input, 0);
+ nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
nir_foreach_function(function, nir) {
if (function->impl) {
nir_foreach_block(block, function->impl) {
- remap_vs_attrs(block, &nir->info);
+ remap_vs_attrs(block, nir->info);
}
}
}
nir_builder b;
nir_builder_init(&b, function->impl);
nir_foreach_block(block, function->impl) {
- remap_patch_urb_offsets(block, &b, vue_map);
+ remap_patch_urb_offsets(block, &b, vue_map,
+ nir->info->tess.primitive_mode);
}
}
}
}
void
-brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map)
+brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map,
+ GLenum tes_primitive_mode)
{
nir_foreach_variable(var, &nir->outputs) {
var->data.driver_location = var->data.location;
nir_builder b;
nir_builder_init(&b, function->impl);
nir_foreach_block(block, function->impl) {
- remap_patch_urb_offsets(block, &b, vue_map);
+ remap_patch_urb_offsets(block, &b, vue_map, tes_primitive_mode);
}
}
}
void
brw_nir_lower_cs_shared(nir_shader *nir)
{
- nir_assign_var_locations(&nir->shared, &nir->num_shared, 0,
+ nir_assign_var_locations(&nir->shared, &nir->num_shared,
type_size_scalar_bytes);
nir_lower_io(nir, nir_var_shared, type_size_scalar_bytes, 0);
}
#define OPT_V(pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
static nir_shader *
-nir_optimize(nir_shader *nir, bool is_scalar)
+nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
+ bool is_scalar)
{
+ nir_variable_mode indirect_mask = 0;
+ if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectInput)
+ indirect_mask |= nir_var_shader_in;
+ if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectOutput)
+ indirect_mask |= nir_var_shader_out;
+ if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectTemp)
+ indirect_mask |= nir_var_local;
+
bool progress;
do {
progress = false;
OPT_V(nir_lower_vars_to_ssa);
+ OPT(nir_opt_copy_prop_vars);
if (is_scalar) {
OPT(nir_lower_alu_to_scalar);
OPT(nir_opt_algebraic);
OPT(nir_opt_constant_folding);
OPT(nir_opt_dead_cf);
+ if (OPT(nir_opt_trivial_continues)) {
+ /* If nir_opt_trivial_continues makes progress, then we need to clean
+ * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
+ * to make progress.
+ */
+ OPT(nir_copy_prop);
+ OPT(nir_opt_dce);
+ }
+ OPT(nir_opt_if);
+ if (nir->options->max_unroll_iterations != 0) {
+ OPT(nir_opt_loop_unroll, indirect_mask);
+ }
OPT(nir_opt_remove_phis);
OPT(nir_opt_undef);
OPT_V(nir_lower_doubles, nir_lower_drcp |
nir_lower_dfract |
nir_lower_dround_even |
nir_lower_dmod);
- OPT_V(nir_lower_double_pack);
+ OPT_V(nir_lower_64bit_pack);
} while (progress);
return nir;
nir_shader *
brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir)
{
+ const struct gen_device_info *devinfo = compiler->devinfo;
bool progress; /* Written by OPT and OPT_V */
(void)progress;
if (nir->stage == MESA_SHADER_GEOMETRY)
OPT(nir_lower_gs_intrinsics);
- if (compiler->precise_trig)
+ /* See also brw_nir_trig_workarounds.py */
+ if (compiler->precise_trig &&
+ !(devinfo->gen >= 10 || devinfo->is_kabylake))
OPT(brw_nir_apply_trig_workarounds);
static const nir_lower_tex_options tex_options = {
.lower_txp = ~0,
.lower_txf_offset = true,
.lower_rect_offset = true,
+ .lower_txd_cube_map = true,
};
OPT(nir_lower_tex, &tex_options);
OPT(nir_split_var_copies);
- nir = nir_optimize(nir, is_scalar);
+ nir = nir_optimize(nir, compiler, is_scalar);
if (is_scalar) {
OPT_V(nir_lower_load_const_to_scalar);
/* Lower a bunch of stuff */
OPT_V(nir_lower_var_copies);
+ OPT_V(nir_lower_clip_cull_distance_arrays);
+
+ nir_variable_mode indirect_mask = 0;
+ if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectInput)
+ indirect_mask |= nir_var_shader_in;
+ if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectOutput)
+ indirect_mask |= nir_var_shader_out;
+ if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectTemp)
+ indirect_mask |= nir_var_local;
+
+ nir_lower_indirect_derefs(nir, indirect_mask);
+
+ nir_lower_int64(nir, nir_lower_imul64 |
+ nir_lower_isign64 |
+ nir_lower_divmod64);
+
/* Get rid of split copies */
- nir = nir_optimize(nir, is_scalar);
+ nir = nir_optimize(nir, compiler, is_scalar);
OPT(nir_remove_dead_variables, nir_var_local);
* will not work.
*/
nir_shader *
-brw_postprocess_nir(nir_shader *nir,
- const struct gen_device_info *devinfo,
+brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
bool is_scalar)
{
+ const struct gen_device_info *devinfo = compiler->devinfo;
bool debug_enabled =
(INTEL_DEBUG & intel_debug_flag_for_shader_stage(nir->stage));
bool progress; /* Written by OPT and OPT_V */
(void)progress;
- nir = nir_optimize(nir, is_scalar);
+ nir = nir_optimize(nir, compiler, is_scalar);
if (devinfo->gen >= 6) {
/* Try and fuse multiply-adds */
OPT(nir_opt_algebraic_late);
- OPT(nir_lower_locals_to_regs);
-
OPT_V(nir_lower_to_source_mods);
OPT(nir_copy_prop);
OPT(nir_opt_dce);
+ OPT(nir_opt_move_comparisons);
+
+ OPT(nir_lower_locals_to_regs);
if (unlikely(debug_enabled)) {
/* Re-index SSA defs so we print more sensible numbers. */
nir_shader *
brw_nir_apply_sampler_key(nir_shader *nir,
- const struct gen_device_info *devinfo,
+ const struct brw_compiler *compiler,
const struct brw_sampler_prog_key_data *key_tex,
bool is_scalar)
{
+ const struct gen_device_info *devinfo = compiler->devinfo;
nir_lower_tex_options tex_options = { 0 };
/* Iron Lake and prior require lowering of all rectangle textures */
tex_options.swizzles[s][c] = GET_SWZ(key_tex->swizzles[s], c);
}
+ /* Prior to Haswell, we have to lower gradients on shadow samplers */
+ tex_options.lower_txd_shadow = devinfo->gen < 8 && !devinfo->is_haswell;
+
tex_options.lower_y_uv_external = key_tex->y_uv_image_mask;
tex_options.lower_y_u_v_external = key_tex->y_u_v_image_mask;
tex_options.lower_yx_xuxv_external = key_tex->yx_xuxv_image_mask;
if (nir_lower_tex(nir, &tex_options)) {
nir_validate_shader(nir);
- nir = nir_optimize(nir, is_scalar);
+ nir = nir_optimize(nir, compiler, is_scalar);
}
return nir;
}
enum brw_reg_type
-brw_type_for_nir_type(nir_alu_type type)
+brw_type_for_nir_type(const struct gen_device_info *devinfo, nir_alu_type type)
{
switch (type) {
case nir_type_uint:
case nir_type_float64:
return BRW_REGISTER_TYPE_DF;
case nir_type_int64:
+ return devinfo->gen < 8 ? BRW_REGISTER_TYPE_DF : BRW_REGISTER_TYPE_Q;
case nir_type_uint64:
- /* TODO we should only see these in moves, so for now it's ok, but when
- * we add actual 64-bit integer support we should fix this.
- */
- return BRW_REGISTER_TYPE_DF;
+ return devinfo->gen < 8 ? BRW_REGISTER_TYPE_DF : BRW_REGISTER_TYPE_UQ;
default:
unreachable("unknown type");
}