X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fintel%2Fcompiler%2Fbrw_compiler.h;h=ab39af226840e14a236d1a0de1aafdc536ba55a5;hb=d6ae079771bc8f5ae3a9e8a333c50a6cacb7a77c;hp=0e27c898203f2db7466782030ed11b6b5a3ec070;hpb=03c07ac5480886ef5f5bd4cff4a7b6d20e142bc9;p=mesa.git diff --git a/src/intel/compiler/brw_compiler.h b/src/intel/compiler/brw_compiler.h index 0e27c898203..ab39af22684 100644 --- a/src/intel/compiler/brw_compiler.h +++ b/src/intel/compiler/brw_compiler.h @@ -27,6 +27,7 @@ #include #include "dev/gen_device_info.h" #include "main/macros.h" +#include "main/mtypes.h" #include "util/ralloc.h" #ifdef __cplusplus @@ -82,16 +83,17 @@ struct brw_compiler { uint8_t *ra_reg_to_grf; /** - * ra class for the aligned pairs we use for PLN, which doesn't + * ra class for the aligned barycentrics we use for PLN, which doesn't * appear in *classes. */ - int aligned_pairs_class; + int aligned_bary_class; } fs_reg_sets[3]; void (*shader_debug_log)(void *, const char *str, ...) PRINTFLIKE(2, 3); void (*shader_perf_log)(void *, const char *str, ...) PRINTFLIKE(2, 3); bool scalar_stage[MESA_SHADER_STAGES]; + bool use_tcs_8_patch; struct gl_shader_compiler_options glsl_compiler_options[MESA_SHADER_STAGES]; /** @@ -111,6 +113,18 @@ struct brw_compiler { * will attempt to push everything. */ bool supports_pull_constants; + + /** + * Whether or not the driver supports NIR shader constants. This controls + * whether nir_opt_large_constants will be run. + */ + bool supports_shader_constants; + + /** + * Whether or not the driver wants uniform params to be compacted by the + * back-end compiler. + */ + bool compact_params; }; /** @@ -188,6 +202,35 @@ struct brw_sampler_prog_key_data { uint32_t y_uv_image_mask; uint32_t yx_xuxv_image_mask; uint32_t xy_uxvx_image_mask; + uint32_t ayuv_image_mask; + uint32_t xyuv_image_mask; + + /* Scale factor for each texture. */ + float scale_factors[32]; +}; + +/** An enum representing what kind of input gl_SubgroupSize is. */ +enum PACKED brw_subgroup_size_type +{ + BRW_SUBGROUP_SIZE_API_CONSTANT, /**< Default Vulkan behavior */ + BRW_SUBGROUP_SIZE_UNIFORM, /**< OpenGL behavior */ + BRW_SUBGROUP_SIZE_VARYING, /**< VK_EXT_subgroup_size_control */ + + /* These enums are specifically chosen so that the value of the enum is + * also the subgroup size. If any new values are added, they must respect + * this invariant. + */ + BRW_SUBGROUP_SIZE_REQUIRE_8 = 8, /**< VK_EXT_subgroup_size_control */ + BRW_SUBGROUP_SIZE_REQUIRE_16 = 16, /**< VK_EXT_subgroup_size_control */ + BRW_SUBGROUP_SIZE_REQUIRE_32 = 32, /**< VK_EXT_subgroup_size_control */ +}; + +struct brw_base_prog_key { + unsigned program_string_id; + + enum brw_subgroup_size_type subgroup_size_type; + + struct brw_sampler_prog_key_data tex; }; /** @@ -212,7 +255,7 @@ struct brw_sampler_prog_key_data { /** The program key for Vertex Shaders. */ struct brw_vs_prog_key { - unsigned program_string_id; + struct brw_base_prog_key base; /** * Per-attribute workaround flags @@ -250,14 +293,12 @@ struct brw_vs_prog_key { * the VUE, even if they aren't written by the vertex shader. */ uint8_t point_coord_replace; - - struct brw_sampler_prog_key_data tex; }; /** The program key for Tessellation Control Shaders. */ struct brw_tcs_prog_key { - unsigned program_string_id; + struct brw_base_prog_key base; GLenum tes_primitive_mode; @@ -270,14 +311,12 @@ struct brw_tcs_prog_key uint64_t outputs_written; bool quads_workaround; - - struct brw_sampler_prog_key_data tex; }; /** The program key for Tessellation Evaluation Shaders. */ struct brw_tes_prog_key { - unsigned program_string_id; + struct brw_base_prog_key base; /** A bitfield of per-patch inputs read. */ uint32_t patch_inputs_read; @@ -285,15 +324,29 @@ struct brw_tes_prog_key /** A bitfield of per-vertex inputs read. */ uint64_t inputs_read; - struct brw_sampler_prog_key_data tex; + /** + * How many user clipping planes are being uploaded to the tessellation + * evaluation shader as push constants. + * + * These are used for lowering legacy gl_ClipVertex/gl_Position clipping to + * clip distances. + */ + unsigned nr_userclip_plane_consts:4; }; /** The program key for Geometry Shaders. */ struct brw_gs_prog_key { - unsigned program_string_id; + struct brw_base_prog_key base; - struct brw_sampler_prog_key_data tex; + /** + * How many user clipping planes are being uploaded to the geometry shader + * as push constants. + * + * These are used for lowering legacy gl_ClipVertex/gl_Position clipping to + * clip distances. + */ + unsigned nr_userclip_plane_consts:4; }; enum brw_sf_primitive { @@ -381,12 +434,15 @@ enum brw_wm_aa_enable { /** The program key for Fragment/Pixel Shaders. */ struct brw_wm_prog_key { + struct brw_base_prog_key base; + /* Some collection of BRW_WM_IZ_* */ uint8_t iz_lookup; bool stats_wm:1; bool flat_shade:1; unsigned nr_color_regions:5; - bool replicate_alpha:1; + bool alpha_test_replicate_alpha:1; + bool alpha_to_coverage:1; bool clamp_fragment_color:1; bool persample_interp:1; bool multisample_fbo:1; @@ -396,21 +452,19 @@ struct brw_wm_prog_key { bool force_dual_color_blend:1; bool coherent_fb_fetch:1; + uint8_t color_outputs_valid; uint64_t input_slots_valid; - unsigned program_string_id; GLenum alpha_test_func; /* < For Gen4/5 MRT alpha test */ float alpha_test_ref; - - struct brw_sampler_prog_key_data tex; }; struct brw_cs_prog_key { - uint32_t program_string_id; - struct brw_sampler_prog_key_data tex; + struct brw_base_prog_key base; }; /* brw_any_prog_key is any of the keys that map to an API stage */ union brw_any_prog_key { + struct brw_base_prog_key base; struct brw_vs_prog_key vs; struct brw_tcs_prog_key tcs; struct brw_tes_prog_key tes; @@ -426,18 +480,14 @@ union brw_any_prog_key { * entries [most of them except when we're doing untyped surface * access] will be removed by the uniform packing pass. */ -#define BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET 0 -#define BRW_IMAGE_PARAM_OFFSET_OFFSET 4 -#define BRW_IMAGE_PARAM_SIZE_OFFSET 8 -#define BRW_IMAGE_PARAM_STRIDE_OFFSET 12 -#define BRW_IMAGE_PARAM_TILING_OFFSET 16 -#define BRW_IMAGE_PARAM_SWIZZLING_OFFSET 20 -#define BRW_IMAGE_PARAM_SIZE 24 +#define BRW_IMAGE_PARAM_OFFSET_OFFSET 0 +#define BRW_IMAGE_PARAM_SIZE_OFFSET 4 +#define BRW_IMAGE_PARAM_STRIDE_OFFSET 8 +#define BRW_IMAGE_PARAM_TILING_OFFSET 12 +#define BRW_IMAGE_PARAM_SWIZZLING_OFFSET 16 +#define BRW_IMAGE_PARAM_SIZE 20 struct brw_image_param { - /** Surface binding table index. */ - uint32_t surface_idx; - /** Offset applied to the X and Y surface coordinates. */ uint32_t offset[2]; @@ -559,10 +609,15 @@ enum brw_param_builtin { BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X, BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y, + BRW_PARAM_BUILTIN_PATCH_VERTICES_IN, + BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_X, BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Y, BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Z, BRW_PARAM_BUILTIN_SUBGROUP_ID, + BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X, + BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_Y, + BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_Z, }; #define BRW_PARAM_BUILTIN_CLIP_PLANE(idx, comp) \ @@ -602,12 +657,28 @@ struct brw_stage_prog_data { GLuint nr_params; /**< number of float params/constants */ GLuint nr_pull_params; + /* zero_push_reg is a bitfield which indicates what push registers (if any) + * should be zeroed by SW at the start of the shader. The corresponding + * push_reg_mask_param specifies the param index (in 32-bit units) where + * the actual runtime 64-bit mask will be pushed. The shader will zero + * push reg i if + * + * reg_used & zero_push_reg & ~*push_reg_mask_param & (1ull << i) + * + * If this field is set, brw_compiler::compact_params must be false. + */ + uint64_t zero_push_reg; + unsigned push_reg_mask_param; + unsigned curb_read_length; unsigned total_scratch; unsigned total_shared; unsigned program_size; + /** Does this program pull from any UBO or other constant buffers? */ + bool has_ubo_pull; + /** * Register where the thread expects to find input data from the URB * (typically uniforms, followed by vertex or fragment attributes). @@ -624,6 +695,9 @@ struct brw_stage_prog_data { */ uint32_t *param; uint32_t *pull_param; + + /* Whether shader uses atomic operations. */ + bool uses_atomic_load_store; }; static inline uint32_t * @@ -638,19 +712,6 @@ brw_stage_prog_data_add_params(struct brw_stage_prog_data *prog_data, return prog_data->param + old_nr_params; } -static inline void -brw_mark_surface_used(struct brw_stage_prog_data *prog_data, - unsigned surf_index) -{ - /* A binding table index is 8 bits and the top 3 values are reserved for - * special things (stateless and SLM). - */ - assert(surf_index <= 252); - - prog_data->binding_table.size_bytes = - MAX2(prog_data->binding_table.size_bytes, (surf_index + 1) * 4); -} - enum brw_barycentric_mode { BRW_BARYCENTRIC_PERSPECTIVE_PIXEL = 0, BRW_BARYCENTRIC_PERSPECTIVE_CENTROID = 1, @@ -682,11 +743,14 @@ struct brw_wm_prog_data { GLuint num_varying_inputs; - uint8_t reg_blocks_0; - uint8_t reg_blocks_2; + uint8_t reg_blocks_8; + uint8_t reg_blocks_16; + uint8_t reg_blocks_32; - uint8_t dispatch_grf_start_reg_2; - uint32_t prog_offset_2; + uint8_t dispatch_grf_start_reg_16; + uint8_t dispatch_grf_start_reg_32; + uint32_t prog_offset_16; + uint32_t prog_offset_32; struct { /** @{ @@ -704,6 +768,7 @@ struct brw_wm_prog_data { bool inner_coverage; bool dispatch_8; bool dispatch_16; + bool dispatch_32; bool dual_src_blend; bool persample_dispatch; bool uses_pos_offset; @@ -731,6 +796,11 @@ struct brw_wm_prog_data { */ uint32_t flat_inputs; + /** + * The FS inputs + */ + uint64_t inputs; + /* Mapping of VUE slots to interpolation modes. * Used by the Gen4-5 clip/sf/wm stages. */ @@ -742,8 +812,101 @@ struct brw_wm_prog_data { * For varying slots that are not used by the FS, the value is -1. */ int urb_setup[VARYING_SLOT_MAX]; + + /** + * Cache structure into the urb_setup array above that contains the + * attribute numbers of active varyings out of urb_setup. + * The actual count is stored in urb_setup_attribs_count. + */ + uint8_t urb_setup_attribs[VARYING_SLOT_MAX]; + uint8_t urb_setup_attribs_count; }; +/** Returns the SIMD width corresponding to a given KSP index + * + * The "Variable Pixel Dispatch" table in the PRM (which can be found, for + * example in Vol. 7 of the SKL PRM) has a mapping from dispatch widths to + * kernel start pointer (KSP) indices that is based on what dispatch widths + * are enabled. This function provides, effectively, the reverse mapping. + * + * If the given KSP is valid with respect to the SIMD8/16/32 enables, a SIMD + * width of 8, 16, or 32 is returned. If the KSP is invalid, 0 is returned. + */ +static inline unsigned +brw_fs_simd_width_for_ksp(unsigned ksp_idx, bool simd8_enabled, + bool simd16_enabled, bool simd32_enabled) +{ + /* This function strictly ignores contiguous dispatch */ + switch (ksp_idx) { + case 0: + return simd8_enabled ? 8 : + (simd16_enabled && !simd32_enabled) ? 16 : + (simd32_enabled && !simd16_enabled) ? 32 : 0; + case 1: + return (simd32_enabled && (simd16_enabled || simd8_enabled)) ? 32 : 0; + case 2: + return (simd16_enabled && (simd32_enabled || simd8_enabled)) ? 16 : 0; + default: + unreachable("Invalid KSP index"); + } +} + +#define brw_wm_state_simd_width_for_ksp(wm_state, ksp_idx) \ + brw_fs_simd_width_for_ksp((ksp_idx), (wm_state)._8PixelDispatchEnable, \ + (wm_state)._16PixelDispatchEnable, \ + (wm_state)._32PixelDispatchEnable) + +#define brw_wm_state_has_ksp(wm_state, ksp_idx) \ + (brw_wm_state_simd_width_for_ksp((wm_state), (ksp_idx)) != 0) + +static inline uint32_t +_brw_wm_prog_data_prog_offset(const struct brw_wm_prog_data *prog_data, + unsigned simd_width) +{ + switch (simd_width) { + case 8: return 0; + case 16: return prog_data->prog_offset_16; + case 32: return prog_data->prog_offset_32; + default: return 0; + } +} + +#define brw_wm_prog_data_prog_offset(prog_data, wm_state, ksp_idx) \ + _brw_wm_prog_data_prog_offset(prog_data, \ + brw_wm_state_simd_width_for_ksp(wm_state, ksp_idx)) + +static inline uint8_t +_brw_wm_prog_data_dispatch_grf_start_reg(const struct brw_wm_prog_data *prog_data, + unsigned simd_width) +{ + switch (simd_width) { + case 8: return prog_data->base.dispatch_grf_start_reg; + case 16: return prog_data->dispatch_grf_start_reg_16; + case 32: return prog_data->dispatch_grf_start_reg_32; + default: return 0; + } +} + +#define brw_wm_prog_data_dispatch_grf_start_reg(prog_data, wm_state, ksp_idx) \ + _brw_wm_prog_data_dispatch_grf_start_reg(prog_data, \ + brw_wm_state_simd_width_for_ksp(wm_state, ksp_idx)) + +static inline uint8_t +_brw_wm_prog_data_reg_blocks(const struct brw_wm_prog_data *prog_data, + unsigned simd_width) +{ + switch (simd_width) { + case 8: return prog_data->reg_blocks_8; + case 16: return prog_data->reg_blocks_16; + case 32: return prog_data->reg_blocks_32; + default: return 0; + } +} + +#define brw_wm_prog_data_reg_blocks(prog_data, wm_state, ksp_idx) \ + _brw_wm_prog_data_reg_blocks(prog_data, \ + brw_wm_state_simd_width_for_ksp(wm_state, ksp_idx)) + struct brw_push_const_block { unsigned dwords; /* Dword count, not reg aligned */ unsigned regs; @@ -754,15 +917,16 @@ struct brw_cs_prog_data { struct brw_stage_prog_data base; unsigned local_size[3]; + unsigned max_variable_local_size; unsigned simd_size; - unsigned threads; + unsigned slm_size; bool uses_barrier; bool uses_num_work_groups; + bool uses_variable_group_size; struct { struct brw_push_const_block cross_thread; struct brw_push_const_block per_thread; - struct brw_push_const_block total; } push; struct { @@ -897,7 +1061,8 @@ GLuint brw_varying_to_offset(const struct brw_vue_map *vue_map, GLuint varying) void brw_compute_vue_map(const struct gen_device_info *devinfo, struct brw_vue_map *vue_map, uint64_t slots_valid, - bool separate_shader); + bool separate_shader, + uint32_t pos_slots); void brw_compute_tess_vue_map(struct brw_vue_map *const vue_map, uint64_t slots_valid, @@ -906,14 +1071,16 @@ void brw_compute_tess_vue_map(struct brw_vue_map *const vue_map, /* brw_interpolation_map.c */ void brw_setup_vue_interpolation(struct brw_vue_map *vue_map, struct nir_shader *nir, - struct brw_wm_prog_data *prog_data, - const struct gen_device_info *devinfo); + struct brw_wm_prog_data *prog_data); enum shader_dispatch_mode { DISPATCH_MODE_4X1_SINGLE = 0, DISPATCH_MODE_4X2_DUAL_INSTANCE = 1, DISPATCH_MODE_4X2_DUAL_OBJECT = 2, DISPATCH_MODE_SIMD8 = 3, + + DISPATCH_MODE_TCS_SINGLE_PATCH = 0, + DISPATCH_MODE_TCS_8_PATCH = 2, }; /** @@ -976,7 +1143,8 @@ struct brw_vs_prog_data { bool uses_vertexid; bool uses_instanceid; - bool uses_basevertex; + bool uses_is_indexed_draw; + bool uses_firstvertex; bool uses_baseinstance; bool uses_drawid; }; @@ -985,8 +1153,14 @@ struct brw_tcs_prog_data { struct brw_vue_prog_data base; + /** Should the non-SINGLE_PATCH payload provide primitive ID? */ + bool include_primitive_id; + /** Number vertices in output patch */ int instances; + + /** Track patch count threshold */ + int patch_count_threshold; }; @@ -1093,11 +1267,16 @@ union brw_any_prog_data { struct brw_cs_prog_data cs; }; -#define DEFINE_PROG_DATA_DOWNCAST(stage) \ -static inline struct brw_##stage##_prog_data * \ -brw_##stage##_prog_data(struct brw_stage_prog_data *prog_data) \ -{ \ - return (struct brw_##stage##_prog_data *) prog_data; \ +#define DEFINE_PROG_DATA_DOWNCAST(stage) \ +static inline struct brw_##stage##_prog_data * \ +brw_##stage##_prog_data(struct brw_stage_prog_data *prog_data) \ +{ \ + return (struct brw_##stage##_prog_data *) prog_data; \ +} \ +static inline const struct brw_##stage##_prog_data * \ +brw_##stage##_prog_data_const(const struct brw_stage_prog_data *prog_data) \ +{ \ + return (const struct brw_##stage##_prog_data *) prog_data; \ } DEFINE_PROG_DATA_DOWNCAST(vue) DEFINE_PROG_DATA_DOWNCAST(vs) @@ -1111,17 +1290,42 @@ DEFINE_PROG_DATA_DOWNCAST(clip) DEFINE_PROG_DATA_DOWNCAST(sf) #undef DEFINE_PROG_DATA_DOWNCAST +struct brw_compile_stats { + uint32_t dispatch_width; /**< 0 for vec4 */ + uint32_t instructions; + uint32_t sends; + uint32_t loops; + uint32_t cycles; + uint32_t spills; + uint32_t fills; +}; + /** @} */ struct brw_compiler * brw_compiler_create(void *mem_ctx, const struct gen_device_info *devinfo); +/** + * Returns a compiler configuration for use with disk shader cache + * + * This value only needs to change for settings that can cause different + * program generation between two runs on the same hardware. + * + * For example, it doesn't need to be different for gen 8 and gen 9 hardware, + * but it does need to be different if INTEL_DEBUG=nocompact is or isn't used. + */ +uint64_t +brw_get_compiler_config_value(const struct brw_compiler *compiler); + unsigned brw_prog_data_size(gl_shader_stage stage); unsigned brw_prog_key_size(gl_shader_stage stage); +void +brw_prog_key_set_id(union brw_any_prog_key *key, gl_shader_stage, unsigned id); + /** * Compile a vertex shader. * @@ -1132,8 +1336,9 @@ brw_compile_vs(const struct brw_compiler *compiler, void *log_data, void *mem_ctx, const struct brw_vs_prog_key *key, struct brw_vs_prog_data *prog_data, - const struct nir_shader *shader, + struct nir_shader *shader, int shader_time_index, + struct brw_compile_stats *stats, char **error_str); /** @@ -1147,8 +1352,9 @@ brw_compile_tcs(const struct brw_compiler *compiler, void *mem_ctx, const struct brw_tcs_prog_key *key, struct brw_tcs_prog_data *prog_data, - const struct nir_shader *nir, + struct nir_shader *nir, int shader_time_index, + struct brw_compile_stats *stats, char **error_str); /** @@ -1162,9 +1368,9 @@ brw_compile_tes(const struct brw_compiler *compiler, void *log_data, const struct brw_tes_prog_key *key, const struct brw_vue_map *input_vue_map, struct brw_tes_prog_data *prog_data, - const struct nir_shader *shader, - struct gl_program *prog, + struct nir_shader *shader, int shader_time_index, + struct brw_compile_stats *stats, char **error_str); /** @@ -1177,9 +1383,10 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data, void *mem_ctx, const struct brw_gs_prog_key *key, struct brw_gs_prog_data *prog_data, - const struct nir_shader *shader, + struct nir_shader *shader, struct gl_program *prog, int shader_time_index, + struct brw_compile_stats *stats, char **error_str); /** @@ -1224,12 +1431,13 @@ brw_compile_fs(const struct brw_compiler *compiler, void *log_data, void *mem_ctx, const struct brw_wm_prog_key *key, struct brw_wm_prog_data *prog_data, - const struct nir_shader *shader, - struct gl_program *prog, + struct nir_shader *shader, int shader_time_index8, int shader_time_index16, + int shader_time_index32, bool allow_spilling, bool use_rep_send, struct brw_vue_map *vue_map, + struct brw_compile_stats *stats, /**< Array of three stats */ char **error_str); /** @@ -1244,8 +1452,14 @@ brw_compile_cs(const struct brw_compiler *compiler, void *log_data, struct brw_cs_prog_data *prog_data, const struct nir_shader *shader, int shader_time_index, + struct brw_compile_stats *stats, char **error_str); +void brw_debug_key_recompile(const struct brw_compiler *c, void *log, + gl_shader_stage stage, + const struct brw_base_prog_key *old_key, + const struct brw_base_prog_key *key); + static inline uint32_t encode_slm_size(unsigned gen, uint32_t bytes) { @@ -1278,6 +1492,10 @@ encode_slm_size(unsigned gen, uint32_t bytes) return slm_size; } +unsigned +brw_cs_push_const_total_size(const struct brw_cs_prog_data *cs_prog_data, + unsigned threads); + /** * Return true if the given shader stage is dispatched contiguously by the * relevant fixed function starting from channel 0 of the SIMD thread, which @@ -1285,7 +1503,7 @@ encode_slm_size(unsigned gen, uint32_t bytes) * '2^n - 1' for some n. */ static inline bool -brw_stage_has_packed_dispatch(MAYBE_UNUSED const struct gen_device_info *devinfo, +brw_stage_has_packed_dispatch(ASSERTED const struct gen_device_info *devinfo, gl_shader_stage stage, const struct brw_stage_prog_data *prog_data) { @@ -1294,7 +1512,7 @@ brw_stage_has_packed_dispatch(MAYBE_UNUSED const struct gen_device_info *devinfo * to do a full test run with brw_fs_test_dispatch_packing() hooked up to * the NIR front-end before changing this assertion. */ - assert(devinfo->gen <= 10); + assert(devinfo->gen <= 12); switch (stage) { case MESA_SHADER_FRAGMENT: {