X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmesa%2Fdrivers%2Fdri%2Fi965%2Fbrw_nir.c;h=24a64cb09f0dfce70d09effe7013ceda8adb72da;hb=55364ab5b7136e09a61d858f1167dee81e17bd9f;hp=91358d8f389ac9be1832be783cdc84fa9e5726ca;hpb=d9b8fde963a53d4e06570d8bece97f806714507a;p=mesa.git diff --git a/src/mesa/drivers/dri/i965/brw_nir.c b/src/mesa/drivers/dri/i965/brw_nir.c index 91358d8f389..24a64cb09f0 100644 --- a/src/mesa/drivers/dri/i965/brw_nir.c +++ b/src/mesa/drivers/dri/i965/brw_nir.c @@ -23,23 +23,86 @@ #include "brw_nir.h" #include "brw_shader.h" -#include "glsl/glsl_parser_extras.h" -#include "glsl/nir/glsl_to_nir.h" -#include "program/prog_to_nir.h" +#include "compiler/glsl_types.h" +#include "compiler/nir/nir_builder.h" static bool -remap_vs_attrs(nir_block *block, void *closure) +is_input(nir_intrinsic_instr *intrin) { - GLbitfield64 inputs_read = *((GLbitfield64 *) closure); + return intrin->intrinsic == nir_intrinsic_load_input || + intrin->intrinsic == nir_intrinsic_load_per_vertex_input || + intrin->intrinsic == nir_intrinsic_load_interpolated_input; +} + +static bool +is_output(nir_intrinsic_instr *intrin) +{ + return intrin->intrinsic == nir_intrinsic_load_output || + intrin->intrinsic == nir_intrinsic_load_per_vertex_output || + intrin->intrinsic == nir_intrinsic_store_output || + intrin->intrinsic == nir_intrinsic_store_per_vertex_output; +} - nir_foreach_instr(block, instr) { +/** + * In many cases, we just add the base and offset together, so there's no + * reason to keep them separate. Sometimes, combining them is essential: + * if a shader only accesses part of a compound variable (such as a matrix + * or array), the variable's base may not actually exist in the VUE map. + * + * This pass adds constant offsets to instr->const_index[0], and resets + * the offset source to 0. Non-constant offsets remain unchanged - since + * we don't know what part of a compound variable is accessed, we allocate + * storage for the entire thing. + */ + +static bool +add_const_offset_to_base_block(nir_block *block, nir_builder *b, + nir_variable_mode mode) +{ + nir_foreach_instr_safe(instr, block) { if (instr->type != nir_instr_type_intrinsic) continue; nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); - /* We set EmitNoIndirect for VS inputs, so there are no indirects. */ - assert(intrin->intrinsic != nir_intrinsic_load_input_indirect); + if ((mode == nir_var_shader_in && is_input(intrin)) || + (mode == nir_var_shader_out && is_output(intrin))) { + nir_src *offset = nir_get_io_offset_src(intrin); + nir_const_value *const_offset = nir_src_as_const_value(*offset); + + if (const_offset) { + intrin->const_index[0] += const_offset->u32[0]; + b->cursor = nir_before_instr(&intrin->instr); + nir_instr_rewrite_src(&intrin->instr, offset, + nir_src_for_ssa(nir_imm_int(b, 0))); + } + } + } + return true; +} + +static void +add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode) +{ + nir_foreach_function(f, nir) { + if (f->impl) { + nir_builder b; + nir_builder_init(&b, f->impl); + nir_foreach_block(block, f->impl) { + add_const_offset_to_base_block(block, &b, mode); + } + } + } +} + +static bool +remap_vs_attrs(nir_block *block, struct nir_shader_info *nir_info) +{ + nir_foreach_instr(instr, block) { + if (instr->type != nir_instr_type_intrinsic) + continue; + + nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); if (intrin->intrinsic == nir_intrinsic_load_input) { /* Attributes come in a contiguous block, ordered by their @@ -48,163 +111,260 @@ remap_vs_attrs(nir_block *block, void *closure) * before it and counting the bits. */ int attr = intrin->const_index[0]; - int slot = _mesa_bitcount_64(inputs_read & BITFIELD64_MASK(attr)); - intrin->const_index[0] = 4 * slot; + int slot = _mesa_bitcount_64(nir_info->inputs_read & + BITFIELD64_MASK(attr)); + int dslot = _mesa_bitcount_64(nir_info->double_inputs_read & + BITFIELD64_MASK(attr)); + intrin->const_index[0] = 4 * (slot + dslot); } } return true; } -static void -brw_nir_lower_inputs(nir_shader *nir, - const struct brw_device_info *devinfo, - bool is_scalar) +static bool +remap_inputs_with_vue_map(nir_block *block, const struct brw_vue_map *vue_map) { - switch (nir->stage) { - case MESA_SHADER_VERTEX: - /* For now, leave the vec4 backend doing the old method. */ - if (!is_scalar) { - nir_assign_var_locations(&nir->inputs, &nir->num_inputs, - type_size_vec4); - break; - } + nir_foreach_instr(instr, block) { + if (instr->type != nir_instr_type_intrinsic) + continue; - /* Start with the location of the variable's base. */ - foreach_list_typed(nir_variable, var, node, &nir->inputs) { - var->data.driver_location = var->data.location; + nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); + + if (intrin->intrinsic == nir_intrinsic_load_input || + intrin->intrinsic == nir_intrinsic_load_per_vertex_input) { + int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]]; + assert(vue_slot != -1); + intrin->const_index[0] = vue_slot; } + } + return true; +} - /* Now use nir_lower_io to walk dereference chains. Attribute arrays - * are loaded as one vec4 per element (or matrix column), so we use - * type_size_vec4 here. - */ - nir_lower_io(nir, nir_var_shader_in, type_size_vec4); +static bool +remap_patch_urb_offsets(nir_block *block, nir_builder *b, + const struct brw_vue_map *vue_map) +{ + nir_foreach_instr_safe(instr, block) { + if (instr->type != nir_instr_type_intrinsic) + continue; - /* Finally, translate VERT_ATTRIB_* values into the actual registers. - * - * Note that we can use nir->info.inputs_read instead of key->inputs_read - * since the two are identical aside from Gen4-5 edge flag differences. - */ - GLbitfield64 inputs_read = nir->info.inputs_read; - nir_foreach_overload(nir, overload) { - if (overload->impl) { - nir_foreach_block(overload->impl, remap_vs_attrs, &inputs_read); + nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); + + gl_shader_stage stage = b->shader->stage; + + if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) || + (stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) { + int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]]; + assert(vue_slot != -1); + intrin->const_index[0] = vue_slot; + + nir_src *vertex = nir_get_io_vertex_index_src(intrin); + if (vertex) { + nir_const_value *const_vertex = nir_src_as_const_value(*vertex); + if (const_vertex) { + intrin->const_index[0] += const_vertex->u32[0] * + vue_map->num_per_vertex_slots; + } else { + b->cursor = nir_before_instr(&intrin->instr); + + /* Multiply by the number of per-vertex slots. */ + nir_ssa_def *vertex_offset = + nir_imul(b, + nir_ssa_for_src(b, *vertex, 1), + nir_imm_int(b, + vue_map->num_per_vertex_slots)); + + /* Add it to the existing offset */ + nir_src *offset = nir_get_io_offset_src(intrin); + nir_ssa_def *total_offset = + nir_iadd(b, vertex_offset, + nir_ssa_for_src(b, *offset, 1)); + + nir_instr_rewrite_src(&intrin->instr, offset, + nir_src_for_ssa(total_offset)); + } } } - break; - case MESA_SHADER_GEOMETRY: { - if (!is_scalar) { - foreach_list_typed(nir_variable, var, node, &nir->inputs) { - var->data.driver_location = var->data.location; + } + return true; +} + +void +brw_nir_lower_vs_inputs(nir_shader *nir, + const struct brw_device_info *devinfo, + bool is_scalar, + bool use_legacy_snorm_formula, + const uint8_t *vs_attrib_wa_flags) +{ + /* Start with the location of the variable's base. */ + foreach_list_typed(nir_variable, var, node, &nir->inputs) { + var->data.driver_location = var->data.location; + } + + /* Now use nir_lower_io to walk dereference chains. Attribute arrays are + * loaded as one vec4 or dvec4 per element (or matrix column), depending on + * whether it is a double-precision type or not. + */ + nir_lower_io(nir, nir_var_shader_in, type_size_vs_input); + + /* This pass needs actual constants */ + nir_opt_constant_folding(nir); + + add_const_offset_to_base(nir, nir_var_shader_in); + + brw_nir_apply_attribute_workarounds(nir, use_legacy_snorm_formula, + vs_attrib_wa_flags); + + if (is_scalar) { + /* Finally, translate VERT_ATTRIB_* values into the actual registers. */ + + nir_foreach_function(function, nir) { + if (function->impl) { + nir_foreach_block(block, function->impl) { + remap_vs_attrs(block, &nir->info); + } } - } else { - /* The GLSL linker will have already matched up GS inputs and - * the outputs of prior stages. The driver does extend VS outputs - * in some cases, but only for legacy OpenGL or Gen4-5 hardware, - * neither of which offer geometry shader support. So we can - * safely ignore that. - * - * For SSO pipelines, we use a fixed VUE map layout based on variable - * locations, so we can rely on rendezvous-by-location to make this - * work. - * - * However, we need to ignore VARYING_SLOT_PRIMITIVE_ID, as it's not - * written by previous stages and shows up via payload magic. - */ - struct brw_vue_map input_vue_map; - GLbitfield64 inputs_read = - nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID; - brw_compute_vue_map(devinfo, &input_vue_map, inputs_read, - nir->info.separate_shader); - - /* Start with the slot for the variable's base. */ - foreach_list_typed(nir_variable, var, node, &nir->inputs) { - assert(input_vue_map.varying_to_slot[var->data.location] != -1); - var->data.driver_location = - input_vue_map.varying_to_slot[var->data.location]; + } + } +} + +void +brw_nir_lower_vue_inputs(nir_shader *nir, bool is_scalar, + const struct brw_vue_map *vue_map) +{ + foreach_list_typed(nir_variable, var, node, &nir->inputs) { + var->data.driver_location = var->data.location; + } + + /* Inputs are stored in vec4 slots, so use type_size_vec4(). */ + nir_lower_io(nir, nir_var_shader_in, type_size_vec4); + + if (is_scalar || nir->stage != MESA_SHADER_GEOMETRY) { + /* This pass needs actual constants */ + nir_opt_constant_folding(nir); + + add_const_offset_to_base(nir, nir_var_shader_in); + + nir_foreach_function(function, nir) { + if (function->impl) { + nir_foreach_block(block, function->impl) { + remap_inputs_with_vue_map(block, vue_map); + } } + } + } +} - /* Inputs are stored in vec4 slots, so use type_size_vec4(). */ - nir_lower_io(nir, nir_var_shader_in, type_size_vec4); +void +brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map) +{ + foreach_list_typed(nir_variable, var, node, &nir->inputs) { + var->data.driver_location = var->data.location; + } + + nir_lower_io(nir, nir_var_shader_in, type_size_vec4); + + /* This pass needs actual constants */ + nir_opt_constant_folding(nir); + + add_const_offset_to_base(nir, nir_var_shader_in); + + nir_foreach_function(function, nir) { + if (function->impl) { + nir_builder b; + nir_builder_init(&b, function->impl); + nir_foreach_block(block, function->impl) { + remap_patch_urb_offsets(block, &b, vue_map); + } } - break; } - case MESA_SHADER_FRAGMENT: - assert(is_scalar); - nir_assign_var_locations(&nir->inputs, &nir->num_inputs, - type_size_scalar); - break; - case MESA_SHADER_COMPUTE: - /* Compute shaders have no inputs. */ - assert(exec_list_is_empty(&nir->inputs)); - break; - default: - unreachable("unsupported shader stage"); +} + +void +brw_nir_lower_fs_inputs(nir_shader *nir) +{ + foreach_list_typed(nir_variable, var, node, &nir->inputs) { + var->data.driver_location = var->data.location; } + + nir_lower_io(nir, nir_var_shader_in, type_size_vec4); + + /* This pass needs actual constants */ + nir_opt_constant_folding(nir); + + add_const_offset_to_base(nir, nir_var_shader_in); } -static void -brw_nir_lower_outputs(nir_shader *nir, bool is_scalar) +void +brw_nir_lower_vue_outputs(nir_shader *nir, + bool is_scalar) { - switch (nir->stage) { - case MESA_SHADER_VERTEX: - case MESA_SHADER_GEOMETRY: - if (is_scalar) { - nir_assign_var_locations(&nir->outputs, &nir->num_outputs, - type_size_vec4_times_4); - nir_lower_io(nir, nir_var_shader_out, type_size_vec4_times_4); - } else { - nir_foreach_variable(var, &nir->outputs) - var->data.driver_location = var->data.location; - } - break; - case MESA_SHADER_FRAGMENT: + if (is_scalar) { nir_assign_var_locations(&nir->outputs, &nir->num_outputs, - type_size_scalar); - break; - case MESA_SHADER_COMPUTE: - /* Compute shaders have no outputs. */ - assert(exec_list_is_empty(&nir->outputs)); - break; - default: - unreachable("unsupported shader stage"); + VARYING_SLOT_VAR0, + type_size_vec4_times_4); + nir_lower_io(nir, nir_var_shader_out, type_size_vec4_times_4); + } else { + nir_foreach_variable(var, &nir->outputs) + var->data.driver_location = var->data.location; + nir_lower_io(nir, nir_var_shader_out, type_size_vec4); } } -static bool -should_clone_nir() +void +brw_nir_lower_tcs_outputs(nir_shader *nir, const struct brw_vue_map *vue_map) { - static int should_clone = -1; - if (should_clone < 1) - should_clone = brw_env_var_as_boolean("NIR_TEST_CLONE", false); + nir_foreach_variable(var, &nir->outputs) { + var->data.driver_location = var->data.location; + } + + nir_lower_io(nir, nir_var_shader_out, type_size_vec4); - return should_clone; + /* This pass needs actual constants */ + nir_opt_constant_folding(nir); + + add_const_offset_to_base(nir, nir_var_shader_out); + + nir_foreach_function(function, nir) { + if (function->impl) { + nir_builder b; + nir_builder_init(&b, function->impl); + nir_foreach_block(block, function->impl) { + remap_patch_urb_offsets(block, &b, vue_map); + } + } + } } -#define _OPT(do_pass) (({ \ - bool this_progress = true; \ - do_pass \ - nir_validate_shader(nir); \ - if (should_clone_nir()) { \ - nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \ - ralloc_free(nir); \ - nir = clone; \ - } \ - this_progress; \ -})) - -#define OPT(pass, ...) _OPT( \ - nir_metadata_set_validation_flag(nir); \ - this_progress = pass(nir ,##__VA_ARGS__); \ - if (this_progress) { \ - progress = true; \ - nir_metadata_check_validation_flag(nir); \ - } \ -) - -#define OPT_V(pass, ...) _OPT( \ - pass(nir, ##__VA_ARGS__); \ -) +void +brw_nir_lower_fs_outputs(nir_shader *nir) +{ + nir_foreach_variable(var, &nir->outputs) { + var->data.driver_location = + SET_FIELD(var->data.index, BRW_NIR_FRAG_OUTPUT_INDEX) | + SET_FIELD(var->data.location, BRW_NIR_FRAG_OUTPUT_LOCATION); + } + + nir_lower_io(nir, nir_var_shader_out, type_size_dvec4); +} + +void +brw_nir_lower_cs_shared(nir_shader *nir) +{ + nir_assign_var_locations(&nir->shared, &nir->num_shared, 0, + type_size_scalar_bytes); + nir_lower_io(nir, nir_var_shared, type_size_scalar_bytes); +} + +#define OPT(pass, ...) ({ \ + bool this_progress = false; \ + NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \ + if (this_progress) \ + progress = true; \ + this_progress; \ +}) + +#define OPT_V(pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__) static nir_shader * nir_optimize(nir_shader *nir, bool is_scalar) @@ -233,6 +393,16 @@ nir_optimize(nir_shader *nir, bool is_scalar) OPT(nir_opt_dead_cf); OPT(nir_opt_remove_phis); OPT(nir_opt_undef); + OPT_V(nir_lower_doubles, nir_lower_drcp | + nir_lower_dsqrt | + nir_lower_drsq | + nir_lower_dtrunc | + nir_lower_dfloor | + nir_lower_dceil | + nir_lower_dfract | + nir_lower_dround_even | + nir_lower_dmod); + OPT_V(nir_lower_double_pack); } while (progress); return nir; @@ -248,16 +418,23 @@ nir_optimize(nir_shader *nir, bool is_scalar) * is_scalar = true to scalarize everything prior to code gen. */ nir_shader * -brw_preprocess_nir(nir_shader *nir, bool is_scalar) +brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir) { bool progress; /* Written by OPT and OPT_V */ (void)progress; + const bool is_scalar = compiler->scalar_stage[nir->stage]; + if (nir->stage == MESA_SHADER_GEOMETRY) OPT(nir_lower_gs_intrinsics); + if (compiler->precise_trig) + OPT(brw_nir_apply_trig_workarounds); + static const nir_lower_tex_options tex_options = { .lower_txp = ~0, + .lower_txf_offset = true, + .lower_rect_offset = true, }; OPT(nir_lower_tex, &tex_options); @@ -269,53 +446,21 @@ brw_preprocess_nir(nir_shader *nir, bool is_scalar) nir = nir_optimize(nir, is_scalar); + if (is_scalar) { + OPT_V(nir_lower_load_const_to_scalar); + } + /* Lower a bunch of stuff */ OPT_V(nir_lower_var_copies); /* Get rid of split copies */ nir = nir_optimize(nir, is_scalar); - OPT(nir_remove_dead_variables); + OPT(nir_remove_dead_variables, nir_var_local); return nir; } -/* Lowers inputs, outputs, uniforms, and samplers for i965 - * - * This function does all of the standard lowering prior to post-processing. - * The lowering done is highly gen, stage, and backend-specific. The - * shader_prog parameter is optional and is used only for lowering sampler - * derefs and atomics for GLSL shaders. - */ -nir_shader * -brw_lower_nir(nir_shader *nir, - const struct brw_device_info *devinfo, - const struct gl_shader_program *shader_prog, - bool is_scalar) -{ - bool progress; /* Written by OPT and OPT_V */ - (void)progress; - - OPT_V(brw_nir_lower_inputs, devinfo, is_scalar); - OPT_V(brw_nir_lower_outputs, is_scalar); - nir_assign_var_locations(&nir->uniforms, - &nir->num_uniforms, - is_scalar ? type_size_scalar : type_size_vec4); - OPT_V(nir_lower_io, nir_var_all, is_scalar ? type_size_scalar : type_size_vec4); - - if (shader_prog) { - OPT_V(nir_lower_samplers, shader_prog); - } - - OPT(nir_lower_system_values); - - if (shader_prog) { - OPT_V(nir_lower_atomics, shader_prog); - } - - return nir_optimize(nir, is_scalar); -} - /* Prepare the given shader for codegen * * This function is intended to be called right before going into the actual @@ -334,6 +479,8 @@ brw_postprocess_nir(nir_shader *nir, bool progress; /* Written by OPT and OPT_V */ (void)progress; + nir = nir_optimize(nir, is_scalar); + if (devinfo->gen >= 6) { /* Try and fuse multiply-adds */ OPT(brw_nir_opt_peephole_ffma); @@ -349,9 +496,9 @@ brw_postprocess_nir(nir_shader *nir, if (unlikely(debug_enabled)) { /* Re-index SSA defs so we print more sensible numbers. */ - nir_foreach_overload(nir, overload) { - if (overload->impl) - nir_index_ssa_defs(overload->impl); + nir_foreach_function(function, nir) { + if (function->impl) + nir_index_ssa_defs(function->impl); } fprintf(stderr, "NIR (SSA form) for %s shader:\n", @@ -385,37 +532,6 @@ brw_postprocess_nir(nir_shader *nir, return nir; } -nir_shader * -brw_create_nir(struct brw_context *brw, - const struct gl_shader_program *shader_prog, - const struct gl_program *prog, - gl_shader_stage stage, - bool is_scalar) -{ - struct gl_context *ctx = &brw->ctx; - const struct brw_device_info *devinfo = brw->intelScreen->devinfo; - const nir_shader_compiler_options *options = - ctx->Const.ShaderCompilerOptions[stage].NirOptions; - bool progress; - nir_shader *nir; - - /* First, lower the GLSL IR or Mesa IR to NIR */ - if (shader_prog) { - nir = glsl_to_nir(shader_prog, stage, options); - } else { - nir = prog_to_nir(prog, options); - OPT_V(nir_convert_to_ssa); /* turn registers into SSA */ - } - nir_validate_shader(nir); - - (void)progress; - - nir = brw_preprocess_nir(nir, is_scalar); - nir = brw_lower_nir(nir, devinfo, shader_prog, is_scalar); - - return nir; -} - nir_shader * brw_nir_apply_sampler_key(nir_shader *nir, const struct brw_device_info *devinfo, @@ -445,6 +561,10 @@ brw_nir_apply_sampler_key(nir_shader *nir, tex_options.swizzles[s][c] = GET_SWZ(key_tex->swizzles[s], c); } + tex_options.lower_y_uv_external = key_tex->y_uv_image_mask; + tex_options.lower_y_u_v_external = key_tex->y_u_v_image_mask; + tex_options.lower_yx_xuxv_external = key_tex->yx_xuxv_image_mask; + if (nir_lower_tex(nir, &tex_options)) { nir_validate_shader(nir); nir = nir_optimize(nir, is_scalar); @@ -458,12 +578,24 @@ brw_type_for_nir_type(nir_alu_type type) { switch (type) { case nir_type_uint: + case nir_type_uint32: return BRW_REGISTER_TYPE_UD; case nir_type_bool: case nir_type_int: + case nir_type_bool32: + case nir_type_int32: return BRW_REGISTER_TYPE_D; case nir_type_float: + case nir_type_float32: return BRW_REGISTER_TYPE_F; + case nir_type_float64: + return BRW_REGISTER_TYPE_DF; + case nir_type_int64: + case nir_type_uint64: + /* TODO we should only see these in moves, so for now it's ok, but when + * we add actual 64-bit integer support we should fix this. + */ + return BRW_REGISTER_TYPE_DF; default: unreachable("unknown type"); } @@ -479,12 +611,18 @@ brw_glsl_base_type_for_nir_type(nir_alu_type type) { switch (type) { case nir_type_float: + case nir_type_float32: return GLSL_TYPE_FLOAT; + case nir_type_float64: + return GLSL_TYPE_DOUBLE; + case nir_type_int: + case nir_type_int32: return GLSL_TYPE_INT; case nir_type_uint: + case nir_type_uint32: return GLSL_TYPE_UINT; default: