X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fintel%2Fcompiler%2Fbrw_compiler.h;h=736d85669dc3411972e26339bb41f722a72d91aa;hb=abc226cf41574454c79477c217e60e8ff1fddfad;hp=66d6a6f5ee877d61f86ad00fe8287bda63b8ebc1;hpb=81615ad444ba970d144eeffbe60a7f70adc30b5b;p=mesa.git diff --git a/src/intel/compiler/brw_compiler.h b/src/intel/compiler/brw_compiler.h index 66d6a6f5ee8..736d85669dc 100644 --- a/src/intel/compiler/brw_compiler.h +++ b/src/intel/compiler/brw_compiler.h @@ -25,9 +25,10 @@ #define BRW_COMPILER_H #include -#include "common/gen_device_info.h" -#include "main/mtypes.h" +#include "dev/gen_device_info.h" #include "main/macros.h" +#include "main/mtypes.h" +#include "util/ralloc.h" #ifdef __cplusplus extern "C" { @@ -36,7 +37,6 @@ extern "C" { struct ra_regs; struct nir_shader; struct brw_program; -union gl_constant_value; struct brw_compiler { const struct gen_device_info *devinfo; @@ -93,6 +93,7 @@ struct brw_compiler { void (*shader_perf_log)(void *, const char *str, ...) PRINTFLIKE(2, 3); bool scalar_stage[MESA_SHADER_STAGES]; + bool use_tcs_8_patch; struct gl_shader_compiler_options glsl_compiler_options[MESA_SHADER_STAGES]; /** @@ -106,8 +107,28 @@ struct brw_compiler { * Base Address? (If not, it's a normal GPU address.) */ bool constant_buffer_0_is_relative; + + /** + * Whether or not the driver supports pull constants. If not, the compiler + * will attempt to push everything. + */ + bool supports_pull_constants; + + /** + * Whether or not the driver supports NIR shader constants. This controls + * whether nir_opt_large_constants will be run. + */ + bool supports_shader_constants; }; +/** + * We use a constant subgroup size of 32. It really only needs to be a + * maximum and, since we do SIMD32 for compute shaders in some cases, it + * needs to be at least 32. SIMD8 and SIMD16 shaders will still claim a + * subgroup size of 32 but will act as if 16 or 24 of those channels are + * disabled. + */ +#define BRW_SUBGROUP_SIZE 32 /** * Program key structures. @@ -175,6 +196,35 @@ struct brw_sampler_prog_key_data { uint32_t y_uv_image_mask; uint32_t yx_xuxv_image_mask; uint32_t xy_uxvx_image_mask; + uint32_t ayuv_image_mask; + uint32_t xyuv_image_mask; + + /* Scale factor for each texture. */ + float scale_factors[32]; +}; + +/** An enum representing what kind of input gl_SubgroupSize is. */ +enum PACKED brw_subgroup_size_type +{ + BRW_SUBGROUP_SIZE_API_CONSTANT, /**< Default Vulkan behavior */ + BRW_SUBGROUP_SIZE_UNIFORM, /**< OpenGL behavior */ + BRW_SUBGROUP_SIZE_VARYING, /**< VK_EXT_subgroup_size_control */ + + /* These enums are specifically chosen so that the value of the enum is + * also the subgroup size. If any new values are added, they must respect + * this invariant. + */ + BRW_SUBGROUP_SIZE_REQUIRE_8 = 8, /**< VK_EXT_subgroup_size_control */ + BRW_SUBGROUP_SIZE_REQUIRE_16 = 16, /**< VK_EXT_subgroup_size_control */ + BRW_SUBGROUP_SIZE_REQUIRE_32 = 32, /**< VK_EXT_subgroup_size_control */ +}; + +struct brw_base_prog_key { + unsigned program_string_id; + + enum brw_subgroup_size_type subgroup_size_type; + + struct brw_sampler_prog_key_data tex; }; /** @@ -199,7 +249,7 @@ struct brw_sampler_prog_key_data { /** The program key for Vertex Shaders. */ struct brw_vs_prog_key { - unsigned program_string_id; + struct brw_base_prog_key base; /** * Per-attribute workaround flags @@ -237,14 +287,12 @@ struct brw_vs_prog_key { * the VUE, even if they aren't written by the vertex shader. */ uint8_t point_coord_replace; - - struct brw_sampler_prog_key_data tex; }; /** The program key for Tessellation Control Shaders. */ struct brw_tcs_prog_key { - unsigned program_string_id; + struct brw_base_prog_key base; GLenum tes_primitive_mode; @@ -257,30 +305,24 @@ struct brw_tcs_prog_key uint64_t outputs_written; bool quads_workaround; - - struct brw_sampler_prog_key_data tex; }; /** The program key for Tessellation Evaluation Shaders. */ struct brw_tes_prog_key { - unsigned program_string_id; + struct brw_base_prog_key base; /** A bitfield of per-patch inputs read. */ uint32_t patch_inputs_read; /** A bitfield of per-vertex inputs read. */ uint64_t inputs_read; - - struct brw_sampler_prog_key_data tex; }; /** The program key for Geometry Shaders. */ struct brw_gs_prog_key { - unsigned program_string_id; - - struct brw_sampler_prog_key_data tex; + struct brw_base_prog_key base; }; enum brw_sf_primitive { @@ -368,12 +410,15 @@ enum brw_wm_aa_enable { /** The program key for Fragment/Pixel Shaders. */ struct brw_wm_prog_key { + struct brw_base_prog_key base; + /* Some collection of BRW_WM_IZ_* */ uint8_t iz_lookup; bool stats_wm:1; bool flat_shade:1; unsigned nr_color_regions:5; - bool replicate_alpha:1; + bool alpha_test_replicate_alpha:1; + bool alpha_to_coverage:1; bool clamp_fragment_color:1; bool persample_interp:1; bool multisample_fbo:1; @@ -383,18 +428,25 @@ struct brw_wm_prog_key { bool force_dual_color_blend:1; bool coherent_fb_fetch:1; - uint16_t drawable_height; + uint8_t color_outputs_valid; uint64_t input_slots_valid; - unsigned program_string_id; GLenum alpha_test_func; /* < For Gen4/5 MRT alpha test */ float alpha_test_ref; - - struct brw_sampler_prog_key_data tex; }; struct brw_cs_prog_key { - uint32_t program_string_id; - struct brw_sampler_prog_key_data tex; + struct brw_base_prog_key base; +}; + +/* brw_any_prog_key is any of the keys that map to an API stage */ +union brw_any_prog_key { + struct brw_base_prog_key base; + struct brw_vs_prog_key vs; + struct brw_tcs_prog_key tcs; + struct brw_tes_prog_key tes; + struct brw_gs_prog_key gs; + struct brw_wm_prog_key wm; + struct brw_cs_prog_key cs; }; /* @@ -404,18 +456,14 @@ struct brw_cs_prog_key { * entries [most of them except when we're doing untyped surface * access] will be removed by the uniform packing pass. */ -#define BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET 0 -#define BRW_IMAGE_PARAM_OFFSET_OFFSET 4 -#define BRW_IMAGE_PARAM_SIZE_OFFSET 8 -#define BRW_IMAGE_PARAM_STRIDE_OFFSET 12 -#define BRW_IMAGE_PARAM_TILING_OFFSET 16 -#define BRW_IMAGE_PARAM_SWIZZLING_OFFSET 20 -#define BRW_IMAGE_PARAM_SIZE 24 +#define BRW_IMAGE_PARAM_OFFSET_OFFSET 0 +#define BRW_IMAGE_PARAM_SIZE_OFFSET 4 +#define BRW_IMAGE_PARAM_STRIDE_OFFSET 8 +#define BRW_IMAGE_PARAM_TILING_OFFSET 12 +#define BRW_IMAGE_PARAM_SWIZZLING_OFFSET 16 +#define BRW_IMAGE_PARAM_SIZE 20 struct brw_image_param { - /** Surface binding table index. */ - uint32_t surface_idx; - /** Offset applied to the X and Y surface coordinates. */ uint32_t offset[2]; @@ -491,6 +539,73 @@ struct brw_ubo_range uint8_t length; }; +/* We reserve the first 2^16 values for builtins */ +#define BRW_PARAM_IS_BUILTIN(param) (((param) & 0xffff0000) == 0) + +enum brw_param_builtin { + BRW_PARAM_BUILTIN_ZERO, + + BRW_PARAM_BUILTIN_CLIP_PLANE_0_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_0_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_0_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_0_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_1_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_1_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_1_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_1_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_2_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_2_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_2_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_2_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_3_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_3_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_3_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_3_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_4_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_4_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_4_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_4_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_5_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_5_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_5_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_5_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_6_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_6_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_6_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_6_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_7_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_7_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_7_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_7_W, + + BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X, + BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Y, + BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Z, + BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_W, + BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X, + BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y, + + BRW_PARAM_BUILTIN_PATCH_VERTICES_IN, + + BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_X, + BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Y, + BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Z, + BRW_PARAM_BUILTIN_SUBGROUP_ID, +}; + +#define BRW_PARAM_BUILTIN_CLIP_PLANE(idx, comp) \ + (BRW_PARAM_BUILTIN_CLIP_PLANE_0_X + ((idx) << 2) + (comp)) + +#define BRW_PARAM_BUILTIN_IS_CLIP_PLANE(param) \ + ((param) >= BRW_PARAM_BUILTIN_CLIP_PLANE_0_X && \ + (param) <= BRW_PARAM_BUILTIN_CLIP_PLANE_7_W) + +#define BRW_PARAM_BUILTIN_CLIP_PLANE_IDX(param) \ + (((param) - BRW_PARAM_BUILTIN_CLIP_PLANE_0_X) >> 2) + +#define BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(param) \ + (((param) - BRW_PARAM_BUILTIN_CLIP_PLANE_0_X) & 0x3) + struct brw_stage_prog_data { struct { /** size of our binding table. */ @@ -504,7 +619,6 @@ struct brw_stage_prog_data { uint32_t gather_texture_start; uint32_t ubo_start; uint32_t ssbo_start; - uint32_t abo_start; uint32_t image_start; uint32_t shader_time_start; uint32_t plane_start[3]; @@ -515,12 +629,13 @@ struct brw_stage_prog_data { GLuint nr_params; /**< number of float params/constants */ GLuint nr_pull_params; - unsigned nr_image_params; unsigned curb_read_length; unsigned total_scratch; unsigned total_shared; + unsigned program_size; + /** * Register where the thread expects to find input data from the URB * (typically uniforms, followed by vertex or fragment attributes). @@ -529,27 +644,26 @@ struct brw_stage_prog_data { bool use_alt_mode; /**< Use ALT floating point mode? Otherwise, IEEE. */ - /* Pointers to tracked values (only valid once - * _mesa_load_state_parameters has been called at runtime). + /* 32-bit identifiers for all push/pull parameters. These can be anything + * the driver wishes them to be; the core of the back-end compiler simply + * re-arranges them. The one restriction is that the bottom 2^16 values + * are reserved for builtins defined in the brw_param_builtin enum defined + * above. */ - const union gl_constant_value **param; - const union gl_constant_value **pull_param; - - /** Image metadata passed to the shader as uniforms. */ - struct brw_image_param *image_param; + uint32_t *param; + uint32_t *pull_param; }; -static inline void -brw_mark_surface_used(struct brw_stage_prog_data *prog_data, - unsigned surf_index) +static inline uint32_t * +brw_stage_prog_data_add_params(struct brw_stage_prog_data *prog_data, + unsigned nr_new_params) { - /* A binding table index is 8 bits and the top 3 values are reserved for - * special things (stateless and SLM). - */ - assert(surf_index <= 252); - - prog_data->binding_table.size_bytes = - MAX2(prog_data->binding_table.size_bytes, (surf_index + 1) * 4); + unsigned old_nr_params = prog_data->nr_params; + prog_data->nr_params += nr_new_params; + prog_data->param = reralloc(ralloc_parent(prog_data->param), + prog_data->param, uint32_t, + prog_data->nr_params); + return prog_data->param + old_nr_params; } enum brw_barycentric_mode { @@ -583,17 +697,19 @@ struct brw_wm_prog_data { GLuint num_varying_inputs; - uint8_t reg_blocks_0; - uint8_t reg_blocks_2; + uint8_t reg_blocks_8; + uint8_t reg_blocks_16; + uint8_t reg_blocks_32; - uint8_t dispatch_grf_start_reg_2; - uint32_t prog_offset_2; + uint8_t dispatch_grf_start_reg_16; + uint8_t dispatch_grf_start_reg_32; + uint32_t prog_offset_16; + uint32_t prog_offset_32; struct { /** @{ * surface indices the WM-specific surfaces */ - uint32_t render_target_start; uint32_t render_target_read_start; /** @} */ } binding_table; @@ -606,7 +722,9 @@ struct brw_wm_prog_data { bool inner_coverage; bool dispatch_8; bool dispatch_16; + bool dispatch_32; bool dual_src_blend; + bool replicate_alpha; bool persample_dispatch; bool uses_pos_offset; bool uses_omask; @@ -614,6 +732,7 @@ struct brw_wm_prog_data { bool uses_src_depth; bool uses_src_w; bool uses_sample_mask; + bool has_render_target_reads; bool has_side_effects; bool pulls_bary; @@ -645,6 +764,91 @@ struct brw_wm_prog_data { int urb_setup[VARYING_SLOT_MAX]; }; +/** Returns the SIMD width corresponding to a given KSP index + * + * The "Variable Pixel Dispatch" table in the PRM (which can be found, for + * example in Vol. 7 of the SKL PRM) has a mapping from dispatch widths to + * kernel start pointer (KSP) indices that is based on what dispatch widths + * are enabled. This function provides, effectively, the reverse mapping. + * + * If the given KSP is valid with respect to the SIMD8/16/32 enables, a SIMD + * width of 8, 16, or 32 is returned. If the KSP is invalid, 0 is returned. + */ +static inline unsigned +brw_fs_simd_width_for_ksp(unsigned ksp_idx, bool simd8_enabled, + bool simd16_enabled, bool simd32_enabled) +{ + /* This function strictly ignores contiguous dispatch */ + switch (ksp_idx) { + case 0: + return simd8_enabled ? 8 : + (simd16_enabled && !simd32_enabled) ? 16 : + (simd32_enabled && !simd16_enabled) ? 32 : 0; + case 1: + return (simd32_enabled && (simd16_enabled || simd8_enabled)) ? 32 : 0; + case 2: + return (simd16_enabled && (simd32_enabled || simd8_enabled)) ? 16 : 0; + default: + unreachable("Invalid KSP index"); + } +} + +#define brw_wm_state_simd_width_for_ksp(wm_state, ksp_idx) \ + brw_fs_simd_width_for_ksp((ksp_idx), (wm_state)._8PixelDispatchEnable, \ + (wm_state)._16PixelDispatchEnable, \ + (wm_state)._32PixelDispatchEnable) + +#define brw_wm_state_has_ksp(wm_state, ksp_idx) \ + (brw_wm_state_simd_width_for_ksp((wm_state), (ksp_idx)) != 0) + +static inline uint32_t +_brw_wm_prog_data_prog_offset(const struct brw_wm_prog_data *prog_data, + unsigned simd_width) +{ + switch (simd_width) { + case 8: return 0; + case 16: return prog_data->prog_offset_16; + case 32: return prog_data->prog_offset_32; + default: return 0; + } +} + +#define brw_wm_prog_data_prog_offset(prog_data, wm_state, ksp_idx) \ + _brw_wm_prog_data_prog_offset(prog_data, \ + brw_wm_state_simd_width_for_ksp(wm_state, ksp_idx)) + +static inline uint8_t +_brw_wm_prog_data_dispatch_grf_start_reg(const struct brw_wm_prog_data *prog_data, + unsigned simd_width) +{ + switch (simd_width) { + case 8: return prog_data->base.dispatch_grf_start_reg; + case 16: return prog_data->dispatch_grf_start_reg_16; + case 32: return prog_data->dispatch_grf_start_reg_32; + default: return 0; + } +} + +#define brw_wm_prog_data_dispatch_grf_start_reg(prog_data, wm_state, ksp_idx) \ + _brw_wm_prog_data_dispatch_grf_start_reg(prog_data, \ + brw_wm_state_simd_width_for_ksp(wm_state, ksp_idx)) + +static inline uint8_t +_brw_wm_prog_data_reg_blocks(const struct brw_wm_prog_data *prog_data, + unsigned simd_width) +{ + switch (simd_width) { + case 8: return prog_data->reg_blocks_8; + case 16: return prog_data->reg_blocks_16; + case 32: return prog_data->reg_blocks_32; + default: return 0; + } +} + +#define brw_wm_prog_data_reg_blocks(prog_data, wm_state, ksp_idx) \ + _brw_wm_prog_data_reg_blocks(prog_data, \ + brw_wm_state_simd_width_for_ksp(wm_state, ksp_idx)) + struct brw_push_const_block { unsigned dwords; /* Dword count, not reg aligned */ unsigned regs; @@ -654,13 +858,11 @@ struct brw_push_const_block { struct brw_cs_prog_data { struct brw_stage_prog_data base; - GLuint dispatch_grf_start_reg_16; unsigned local_size[3]; unsigned simd_size; unsigned threads; bool uses_barrier; bool uses_num_work_groups; - int thread_local_id_index; struct { struct brw_push_const_block cross_thread; @@ -809,14 +1011,16 @@ void brw_compute_tess_vue_map(struct brw_vue_map *const vue_map, /* brw_interpolation_map.c */ void brw_setup_vue_interpolation(struct brw_vue_map *vue_map, struct nir_shader *nir, - struct brw_wm_prog_data *prog_data, - const struct gen_device_info *devinfo); + struct brw_wm_prog_data *prog_data); enum shader_dispatch_mode { DISPATCH_MODE_4X1_SINGLE = 0, DISPATCH_MODE_4X2_DUAL_INSTANCE = 1, DISPATCH_MODE_4X2_DUAL_OBJECT = 2, DISPATCH_MODE_SIMD8 = 3, + + DISPATCH_MODE_TCS_SINGLE_PATCH = 0, + DISPATCH_MODE_TCS_8_PATCH = 2, }; /** @@ -875,12 +1079,12 @@ struct brw_vs_prog_data { GLbitfield64 inputs_read; GLbitfield64 double_inputs_read; - unsigned nr_attributes; unsigned nr_attribute_slots; bool uses_vertexid; bool uses_instanceid; - bool uses_basevertex; + bool uses_is_indexed_draw; + bool uses_firstvertex; bool uses_baseinstance; bool uses_drawid; }; @@ -889,6 +1093,9 @@ struct brw_tcs_prog_data { struct brw_vue_prog_data base; + /** Should the non-SINGLE_PATCH payload provide primitive ID? */ + bool include_primitive_id; + /** Number vertices in output patch */ int instances; }; @@ -985,6 +1192,18 @@ struct brw_clip_prog_data { uint32_t total_grf; }; +/* brw_any_prog_data is prog_data for any stage that maps to an API stage */ +union brw_any_prog_data { + struct brw_stage_prog_data base; + struct brw_vue_prog_data vue; + struct brw_vs_prog_data vs; + struct brw_tcs_prog_data tcs; + struct brw_tes_prog_data tes; + struct brw_gs_prog_data gs; + struct brw_wm_prog_data wm; + struct brw_cs_prog_data cs; +}; + #define DEFINE_PROG_DATA_DOWNCAST(stage) \ static inline struct brw_##stage##_prog_data * \ brw_##stage##_prog_data(struct brw_stage_prog_data *prog_data) \ @@ -1008,6 +1227,27 @@ DEFINE_PROG_DATA_DOWNCAST(sf) struct brw_compiler * brw_compiler_create(void *mem_ctx, const struct gen_device_info *devinfo); +/** + * Returns a compiler configuration for use with disk shader cache + * + * This value only needs to change for settings that can cause different + * program generation between two runs on the same hardware. + * + * For example, it doesn't need to be different for gen 8 and gen 9 hardware, + * but it does need to be different if INTEL_DEBUG=nocompact is or isn't used. + */ +uint64_t +brw_get_compiler_config_value(const struct brw_compiler *compiler); + +unsigned +brw_prog_data_size(gl_shader_stage stage); + +unsigned +brw_prog_key_size(gl_shader_stage stage); + +void +brw_prog_key_set_id(union brw_any_prog_key *key, gl_shader_stage, unsigned id); + /** * Compile a vertex shader. * @@ -1018,11 +1258,8 @@ brw_compile_vs(const struct brw_compiler *compiler, void *log_data, void *mem_ctx, const struct brw_vs_prog_key *key, struct brw_vs_prog_data *prog_data, - const struct nir_shader *shader, - gl_clip_plane *clip_planes, - bool use_legacy_snorm_formula, + struct nir_shader *shader, int shader_time_index, - unsigned *final_assembly_size, char **error_str); /** @@ -1036,9 +1273,8 @@ brw_compile_tcs(const struct brw_compiler *compiler, void *mem_ctx, const struct brw_tcs_prog_key *key, struct brw_tcs_prog_data *prog_data, - const struct nir_shader *nir, + struct nir_shader *nir, int shader_time_index, - unsigned *final_assembly_size, char **error_str); /** @@ -1052,10 +1288,9 @@ brw_compile_tes(const struct brw_compiler *compiler, void *log_data, const struct brw_tes_prog_key *key, const struct brw_vue_map *input_vue_map, struct brw_tes_prog_data *prog_data, - const struct nir_shader *shader, + struct nir_shader *shader, struct gl_program *prog, int shader_time_index, - unsigned *final_assembly_size, char **error_str); /** @@ -1068,10 +1303,9 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data, void *mem_ctx, const struct brw_gs_prog_key *key, struct brw_gs_prog_data *prog_data, - const struct nir_shader *shader, + struct nir_shader *shader, struct gl_program *prog, int shader_time_index, - unsigned *final_assembly_size, char **error_str); /** @@ -1116,13 +1350,13 @@ brw_compile_fs(const struct brw_compiler *compiler, void *log_data, void *mem_ctx, const struct brw_wm_prog_key *key, struct brw_wm_prog_data *prog_data, - const struct nir_shader *shader, + struct nir_shader *shader, struct gl_program *prog, int shader_time_index8, int shader_time_index16, + int shader_time_index32, bool allow_spilling, bool use_rep_send, struct brw_vue_map *vue_map, - unsigned *final_assembly_size, char **error_str); /** @@ -1137,9 +1371,13 @@ brw_compile_cs(const struct brw_compiler *compiler, void *log_data, struct brw_cs_prog_data *prog_data, const struct nir_shader *shader, int shader_time_index, - unsigned *final_assembly_size, char **error_str); +void brw_debug_key_recompile(const struct brw_compiler *c, void *log, + gl_shader_stage stage, + const struct brw_base_prog_key *old_key, + const struct brw_base_prog_key *key); + static inline uint32_t encode_slm_size(unsigned gen, uint32_t bytes) { @@ -1179,7 +1417,7 @@ encode_slm_size(unsigned gen, uint32_t bytes) * '2^n - 1' for some n. */ static inline bool -brw_stage_has_packed_dispatch(const struct gen_device_info *devinfo, +brw_stage_has_packed_dispatch(ASSERTED const struct gen_device_info *devinfo, gl_shader_stage stage, const struct brw_stage_prog_data *prog_data) { @@ -1188,7 +1426,7 @@ brw_stage_has_packed_dispatch(const struct gen_device_info *devinfo, * to do a full test run with brw_fs_test_dispatch_packing() hooked up to * the NIR front-end before changing this assertion. */ - assert(devinfo->gen <= 10); + assert(devinfo->gen <= 11); switch (stage) { case MESA_SHADER_FRAGMENT: { @@ -1221,6 +1459,35 @@ brw_stage_has_packed_dispatch(const struct gen_device_info *devinfo, } } +/** + * Computes the first varying slot in the URB produced by the previous stage + * that is used in the next stage. We do this by testing the varying slots in + * the previous stage's vue map against the inputs read in the next stage. + * + * Note that: + * + * - Each URB offset contains two varying slots and we can only skip a + * full offset if both slots are unused, so the value we return here is always + * rounded down to the closest multiple of two. + * + * - gl_Layer and gl_ViewportIndex don't have their own varying slots, they are + * part of the vue header, so if these are read we can't skip anything. + */ +static inline int +brw_compute_first_urb_slot_required(uint64_t inputs_read, + const struct brw_vue_map *prev_stage_vue_map) +{ + if ((inputs_read & (VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT)) == 0) { + for (int i = 0; i < prev_stage_vue_map->num_slots; i++) { + int varying = prev_stage_vue_map->slot_to_varying[i]; + if (varying > 0 && (inputs_read & BITFIELD64_BIT(varying)) != 0) + return ROUND_DOWN_TO(i, 2); + } + } + + return 0; +} + #ifdef __cplusplus } /* extern "C" */ #endif