X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fintel%2Fcompiler%2Fbrw_compiler.h;h=8df2ee59207b7a98a26625e80ef92baffbd6e063;hb=a0d67b7a1c32f4fcfdc8c6a66bc3359b1003c752;hp=3f383403883cd26e08a0b6ce0f022120c494166c;hpb=165e704719b85cb43156f517803dd9a1f0187519;p=mesa.git diff --git a/src/intel/compiler/brw_compiler.h b/src/intel/compiler/brw_compiler.h index 3f383403883..8df2ee59207 100644 --- a/src/intel/compiler/brw_compiler.h +++ b/src/intel/compiler/brw_compiler.h @@ -25,9 +25,10 @@ #define BRW_COMPILER_H #include -#include "common/gen_device_info.h" -#include "main/mtypes.h" +#include "dev/gen_device_info.h" #include "main/macros.h" +#include "main/mtypes.h" +#include "util/ralloc.h" #ifdef __cplusplus extern "C" { @@ -36,7 +37,6 @@ extern "C" { struct ra_regs; struct nir_shader; struct brw_program; -union gl_constant_value; struct brw_compiler { const struct gen_device_info *devinfo; @@ -83,25 +83,64 @@ struct brw_compiler { uint8_t *ra_reg_to_grf; /** - * ra class for the aligned pairs we use for PLN, which doesn't + * ra class for the aligned barycentrics we use for PLN, which doesn't * appear in *classes. */ - int aligned_pairs_class; + int aligned_bary_class; } fs_reg_sets[3]; void (*shader_debug_log)(void *, const char *str, ...) PRINTFLIKE(2, 3); void (*shader_perf_log)(void *, const char *str, ...) PRINTFLIKE(2, 3); - bool scalar_stage[MESA_SHADER_STAGES]; - struct gl_shader_compiler_options glsl_compiler_options[MESA_SHADER_STAGES]; + bool scalar_stage[MESA_ALL_SHADER_STAGES]; + bool use_tcs_8_patch; + struct gl_shader_compiler_options glsl_compiler_options[MESA_ALL_SHADER_STAGES]; /** * Apply workarounds for SIN and COS output range problems. * This can negatively impact performance. */ bool precise_trig; + + /** + * Is 3DSTATE_CONSTANT_*'s Constant Buffer 0 relative to Dynamic State + * Base Address? (If not, it's a normal GPU address.) + */ + bool constant_buffer_0_is_relative; + + /** + * Whether or not the driver supports pull constants. If not, the compiler + * will attempt to push everything. + */ + bool supports_pull_constants; + + /** + * Whether or not the driver supports NIR shader constants. This controls + * whether nir_opt_large_constants will be run. + */ + bool supports_shader_constants; + + /** + * Whether or not the driver wants uniform params to be compacted by the + * back-end compiler. + */ + bool compact_params; + + /** + * Whether or not the driver wants variable group size to be lowered by the + * back-end compiler. + */ + bool lower_variable_group_size; }; +/** + * We use a constant subgroup size of 32. It really only needs to be a + * maximum and, since we do SIMD32 for compute shaders in some cases, it + * needs to be at least 32. SIMD8 and SIMD16 shaders will still claim a + * subgroup size of 32 but will act as if 16 or 24 of those channels are + * disabled. + */ +#define BRW_SUBGROUP_SIZE 32 /** * Program key structures. @@ -169,6 +208,37 @@ struct brw_sampler_prog_key_data { uint32_t y_uv_image_mask; uint32_t yx_xuxv_image_mask; uint32_t xy_uxvx_image_mask; + uint32_t ayuv_image_mask; + uint32_t xyuv_image_mask; + uint32_t bt709_mask; + uint32_t bt2020_mask; + + /* Scale factor for each texture. */ + float scale_factors[32]; +}; + +/** An enum representing what kind of input gl_SubgroupSize is. */ +enum PACKED brw_subgroup_size_type +{ + BRW_SUBGROUP_SIZE_API_CONSTANT, /**< Default Vulkan behavior */ + BRW_SUBGROUP_SIZE_UNIFORM, /**< OpenGL behavior */ + BRW_SUBGROUP_SIZE_VARYING, /**< VK_EXT_subgroup_size_control */ + + /* These enums are specifically chosen so that the value of the enum is + * also the subgroup size. If any new values are added, they must respect + * this invariant. + */ + BRW_SUBGROUP_SIZE_REQUIRE_8 = 8, /**< VK_EXT_subgroup_size_control */ + BRW_SUBGROUP_SIZE_REQUIRE_16 = 16, /**< VK_EXT_subgroup_size_control */ + BRW_SUBGROUP_SIZE_REQUIRE_32 = 32, /**< VK_EXT_subgroup_size_control */ +}; + +struct brw_base_prog_key { + unsigned program_string_id; + + enum brw_subgroup_size_type subgroup_size_type; + + struct brw_sampler_prog_key_data tex; }; /** @@ -182,16 +252,32 @@ struct brw_sampler_prog_key_data { #define BRW_ATTRIB_WA_SIGN 32 /* interpret as signed in shader */ #define BRW_ATTRIB_WA_SCALE 64 /* interpret as scaled in shader */ +/** + * OpenGL attribute slots fall in [0, VERT_ATTRIB_MAX - 1] with the range + * [VERT_ATTRIB_GENERIC0, VERT_ATTRIB_MAX - 1] reserved for up to 16 user + * input vertex attributes. In Vulkan, we expose up to 28 user vertex input + * attributes that are mapped to slots also starting at VERT_ATTRIB_GENERIC0. + */ +#define MAX_GL_VERT_ATTRIB VERT_ATTRIB_MAX +#define MAX_VK_VERT_ATTRIB (VERT_ATTRIB_GENERIC0 + 28) + /** The program key for Vertex Shaders. */ struct brw_vs_prog_key { - unsigned program_string_id; + struct brw_base_prog_key base; /** * Per-attribute workaround flags * * For each attribute, a combination of BRW_ATTRIB_WA_*. + * + * For OpenGL, where we expose a maximum of 16 user input atttributes + * we only need up to VERT_ATTRIB_MAX slots, however, in Vulkan + * slots preceding VERT_ATTRIB_GENERIC0 are unused and we can + * expose up to 28 user input vertex attributes that are mapped to slots + * starting at VERT_ATTRIB_GENERIC0, so this array needs to be large + * enough to hold this many slots. */ - uint8_t gl_attrib_wa_flags[VERT_ATTRIB_MAX]; + uint8_t gl_attrib_wa_flags[MAX2(MAX_GL_VERT_ATTRIB, MAX_VK_VERT_ATTRIB)]; bool copy_edgeflag:1; @@ -215,14 +301,12 @@ struct brw_vs_prog_key { * the VUE, even if they aren't written by the vertex shader. */ uint8_t point_coord_replace; - - struct brw_sampler_prog_key_data tex; }; /** The program key for Tessellation Control Shaders. */ struct brw_tcs_prog_key { - unsigned program_string_id; + struct brw_base_prog_key base; GLenum tes_primitive_mode; @@ -235,14 +319,12 @@ struct brw_tcs_prog_key uint64_t outputs_written; bool quads_workaround; - - struct brw_sampler_prog_key_data tex; }; /** The program key for Tessellation Evaluation Shaders. */ struct brw_tes_prog_key { - unsigned program_string_id; + struct brw_base_prog_key base; /** A bitfield of per-patch inputs read. */ uint32_t patch_inputs_read; @@ -250,15 +332,29 @@ struct brw_tes_prog_key /** A bitfield of per-vertex inputs read. */ uint64_t inputs_read; - struct brw_sampler_prog_key_data tex; + /** + * How many user clipping planes are being uploaded to the tessellation + * evaluation shader as push constants. + * + * These are used for lowering legacy gl_ClipVertex/gl_Position clipping to + * clip distances. + */ + unsigned nr_userclip_plane_consts:4; }; /** The program key for Geometry Shaders. */ struct brw_gs_prog_key { - unsigned program_string_id; + struct brw_base_prog_key base; - struct brw_sampler_prog_key_data tex; + /** + * How many user clipping planes are being uploaded to the geometry shader + * as push constants. + * + * These are used for lowering legacy gl_ClipVertex/gl_Position clipping to + * clip distances. + */ + unsigned nr_userclip_plane_consts:4; }; enum brw_sf_primitive { @@ -346,12 +442,15 @@ enum brw_wm_aa_enable { /** The program key for Fragment/Pixel Shaders. */ struct brw_wm_prog_key { + struct brw_base_prog_key base; + /* Some collection of BRW_WM_IZ_* */ uint8_t iz_lookup; bool stats_wm:1; bool flat_shade:1; unsigned nr_color_regions:5; - bool replicate_alpha:1; + bool alpha_test_replicate_alpha:1; + bool alpha_to_coverage:1; bool clamp_fragment_color:1; bool persample_interp:1; bool multisample_fbo:1; @@ -360,19 +459,27 @@ struct brw_wm_prog_key { bool high_quality_derivatives:1; bool force_dual_color_blend:1; bool coherent_fb_fetch:1; + bool ignore_sample_mask_out:1; - uint16_t drawable_height; + uint8_t color_outputs_valid; uint64_t input_slots_valid; - unsigned program_string_id; GLenum alpha_test_func; /* < For Gen4/5 MRT alpha test */ float alpha_test_ref; - - struct brw_sampler_prog_key_data tex; }; struct brw_cs_prog_key { - uint32_t program_string_id; - struct brw_sampler_prog_key_data tex; + struct brw_base_prog_key base; +}; + +/* brw_any_prog_key is any of the keys that map to an API stage */ +union brw_any_prog_key { + struct brw_base_prog_key base; + struct brw_vs_prog_key vs; + struct brw_tcs_prog_key tcs; + struct brw_tes_prog_key tes; + struct brw_gs_prog_key gs; + struct brw_wm_prog_key wm; + struct brw_cs_prog_key cs; }; /* @@ -382,18 +489,14 @@ struct brw_cs_prog_key { * entries [most of them except when we're doing untyped surface * access] will be removed by the uniform packing pass. */ -#define BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET 0 -#define BRW_IMAGE_PARAM_OFFSET_OFFSET 4 -#define BRW_IMAGE_PARAM_SIZE_OFFSET 8 -#define BRW_IMAGE_PARAM_STRIDE_OFFSET 12 -#define BRW_IMAGE_PARAM_TILING_OFFSET 16 -#define BRW_IMAGE_PARAM_SWIZZLING_OFFSET 20 -#define BRW_IMAGE_PARAM_SIZE 24 +#define BRW_IMAGE_PARAM_OFFSET_OFFSET 0 +#define BRW_IMAGE_PARAM_SIZE_OFFSET 4 +#define BRW_IMAGE_PARAM_STRIDE_OFFSET 8 +#define BRW_IMAGE_PARAM_TILING_OFFSET 12 +#define BRW_IMAGE_PARAM_SWIZZLING_OFFSET 16 +#define BRW_IMAGE_PARAM_SIZE 20 struct brw_image_param { - /** Surface binding table index. */ - uint32_t surface_idx; - /** Offset applied to the X and Y surface coordinates. */ uint32_t offset[2]; @@ -462,6 +565,83 @@ struct brw_image_param { */ #define BRW_SHADER_TIME_STRIDE 64 +struct brw_ubo_range +{ + uint16_t block; + uint8_t start; + uint8_t length; +}; + +/* We reserve the first 2^16 values for builtins */ +#define BRW_PARAM_IS_BUILTIN(param) (((param) & 0xffff0000) == 0) + +enum brw_param_builtin { + BRW_PARAM_BUILTIN_ZERO, + + BRW_PARAM_BUILTIN_CLIP_PLANE_0_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_0_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_0_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_0_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_1_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_1_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_1_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_1_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_2_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_2_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_2_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_2_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_3_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_3_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_3_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_3_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_4_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_4_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_4_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_4_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_5_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_5_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_5_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_5_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_6_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_6_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_6_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_6_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_7_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_7_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_7_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_7_W, + + BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X, + BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Y, + BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Z, + BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_W, + BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X, + BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y, + + BRW_PARAM_BUILTIN_PATCH_VERTICES_IN, + + BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_X, + BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Y, + BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Z, + BRW_PARAM_BUILTIN_SUBGROUP_ID, + BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X, + BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_Y, + BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_Z, +}; + +#define BRW_PARAM_BUILTIN_CLIP_PLANE(idx, comp) \ + (BRW_PARAM_BUILTIN_CLIP_PLANE_0_X + ((idx) << 2) + (comp)) + +#define BRW_PARAM_BUILTIN_IS_CLIP_PLANE(param) \ + ((param) >= BRW_PARAM_BUILTIN_CLIP_PLANE_0_X && \ + (param) <= BRW_PARAM_BUILTIN_CLIP_PLANE_7_W) + +#define BRW_PARAM_BUILTIN_CLIP_PLANE_IDX(param) \ + (((param) - BRW_PARAM_BUILTIN_CLIP_PLANE_0_X) >> 2) + +#define BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(param) \ + (((param) - BRW_PARAM_BUILTIN_CLIP_PLANE_0_X) & 0x3) + struct brw_stage_prog_data { struct { /** size of our binding table. */ @@ -475,21 +655,39 @@ struct brw_stage_prog_data { uint32_t gather_texture_start; uint32_t ubo_start; uint32_t ssbo_start; - uint32_t abo_start; uint32_t image_start; uint32_t shader_time_start; uint32_t plane_start[3]; /** @} */ } binding_table; + struct brw_ubo_range ubo_ranges[4]; + GLuint nr_params; /**< number of float params/constants */ GLuint nr_pull_params; - unsigned nr_image_params; + + /* zero_push_reg is a bitfield which indicates what push registers (if any) + * should be zeroed by SW at the start of the shader. The corresponding + * push_reg_mask_param specifies the param index (in 32-bit units) where + * the actual runtime 64-bit mask will be pushed. The shader will zero + * push reg i if + * + * reg_used & zero_push_reg & ~*push_reg_mask_param & (1ull << i) + * + * If this field is set, brw_compiler::compact_params must be false. + */ + uint64_t zero_push_reg; + unsigned push_reg_mask_param; unsigned curb_read_length; unsigned total_scratch; unsigned total_shared; + unsigned program_size; + + /** Does this program pull from any UBO or other constant buffers? */ + bool has_ubo_pull; + /** * Register where the thread expects to find input data from the URB * (typically uniforms, followed by vertex or fragment attributes). @@ -498,27 +696,29 @@ struct brw_stage_prog_data { bool use_alt_mode; /**< Use ALT floating point mode? Otherwise, IEEE. */ - /* Pointers to tracked values (only valid once - * _mesa_load_state_parameters has been called at runtime). + /* 32-bit identifiers for all push/pull parameters. These can be anything + * the driver wishes them to be; the core of the back-end compiler simply + * re-arranges them. The one restriction is that the bottom 2^16 values + * are reserved for builtins defined in the brw_param_builtin enum defined + * above. */ - const union gl_constant_value **param; - const union gl_constant_value **pull_param; + uint32_t *param; + uint32_t *pull_param; - /** Image metadata passed to the shader as uniforms. */ - struct brw_image_param *image_param; + /* Whether shader uses atomic operations. */ + bool uses_atomic_load_store; }; -static inline void -brw_mark_surface_used(struct brw_stage_prog_data *prog_data, - unsigned surf_index) +static inline uint32_t * +brw_stage_prog_data_add_params(struct brw_stage_prog_data *prog_data, + unsigned nr_new_params) { - /* A binding table index is 8 bits and the top 3 values are reserved for - * special things (stateless and SLM). - */ - assert(surf_index <= 252); - - prog_data->binding_table.size_bytes = - MAX2(prog_data->binding_table.size_bytes, (surf_index + 1) * 4); + unsigned old_nr_params = prog_data->nr_params; + prog_data->nr_params += nr_new_params; + prog_data->param = reralloc(ralloc_parent(prog_data->param), + prog_data->param, uint32_t, + prog_data->nr_params); + return prog_data->param + old_nr_params; } enum brw_barycentric_mode { @@ -552,17 +752,19 @@ struct brw_wm_prog_data { GLuint num_varying_inputs; - uint8_t reg_blocks_0; - uint8_t reg_blocks_2; + uint8_t reg_blocks_8; + uint8_t reg_blocks_16; + uint8_t reg_blocks_32; - uint8_t dispatch_grf_start_reg_2; - uint32_t prog_offset_2; + uint8_t dispatch_grf_start_reg_16; + uint8_t dispatch_grf_start_reg_32; + uint32_t prog_offset_16; + uint32_t prog_offset_32; struct { /** @{ * surface indices the WM-specific surfaces */ - uint32_t render_target_start; uint32_t render_target_read_start; /** @} */ } binding_table; @@ -575,6 +777,7 @@ struct brw_wm_prog_data { bool inner_coverage; bool dispatch_8; bool dispatch_16; + bool dispatch_32; bool dual_src_blend; bool persample_dispatch; bool uses_pos_offset; @@ -583,6 +786,7 @@ struct brw_wm_prog_data { bool uses_src_depth; bool uses_src_w; bool uses_sample_mask; + bool has_render_target_reads; bool has_side_effects; bool pulls_bary; @@ -601,6 +805,11 @@ struct brw_wm_prog_data { */ uint32_t flat_inputs; + /** + * The FS inputs + */ + uint64_t inputs; + /* Mapping of VUE slots to interpolation modes. * Used by the Gen4-5 clip/sf/wm stages. */ @@ -612,8 +821,101 @@ struct brw_wm_prog_data { * For varying slots that are not used by the FS, the value is -1. */ int urb_setup[VARYING_SLOT_MAX]; + + /** + * Cache structure into the urb_setup array above that contains the + * attribute numbers of active varyings out of urb_setup. + * The actual count is stored in urb_setup_attribs_count. + */ + uint8_t urb_setup_attribs[VARYING_SLOT_MAX]; + uint8_t urb_setup_attribs_count; }; +/** Returns the SIMD width corresponding to a given KSP index + * + * The "Variable Pixel Dispatch" table in the PRM (which can be found, for + * example in Vol. 7 of the SKL PRM) has a mapping from dispatch widths to + * kernel start pointer (KSP) indices that is based on what dispatch widths + * are enabled. This function provides, effectively, the reverse mapping. + * + * If the given KSP is valid with respect to the SIMD8/16/32 enables, a SIMD + * width of 8, 16, or 32 is returned. If the KSP is invalid, 0 is returned. + */ +static inline unsigned +brw_fs_simd_width_for_ksp(unsigned ksp_idx, bool simd8_enabled, + bool simd16_enabled, bool simd32_enabled) +{ + /* This function strictly ignores contiguous dispatch */ + switch (ksp_idx) { + case 0: + return simd8_enabled ? 8 : + (simd16_enabled && !simd32_enabled) ? 16 : + (simd32_enabled && !simd16_enabled) ? 32 : 0; + case 1: + return (simd32_enabled && (simd16_enabled || simd8_enabled)) ? 32 : 0; + case 2: + return (simd16_enabled && (simd32_enabled || simd8_enabled)) ? 16 : 0; + default: + unreachable("Invalid KSP index"); + } +} + +#define brw_wm_state_simd_width_for_ksp(wm_state, ksp_idx) \ + brw_fs_simd_width_for_ksp((ksp_idx), (wm_state)._8PixelDispatchEnable, \ + (wm_state)._16PixelDispatchEnable, \ + (wm_state)._32PixelDispatchEnable) + +#define brw_wm_state_has_ksp(wm_state, ksp_idx) \ + (brw_wm_state_simd_width_for_ksp((wm_state), (ksp_idx)) != 0) + +static inline uint32_t +_brw_wm_prog_data_prog_offset(const struct brw_wm_prog_data *prog_data, + unsigned simd_width) +{ + switch (simd_width) { + case 8: return 0; + case 16: return prog_data->prog_offset_16; + case 32: return prog_data->prog_offset_32; + default: return 0; + } +} + +#define brw_wm_prog_data_prog_offset(prog_data, wm_state, ksp_idx) \ + _brw_wm_prog_data_prog_offset(prog_data, \ + brw_wm_state_simd_width_for_ksp(wm_state, ksp_idx)) + +static inline uint8_t +_brw_wm_prog_data_dispatch_grf_start_reg(const struct brw_wm_prog_data *prog_data, + unsigned simd_width) +{ + switch (simd_width) { + case 8: return prog_data->base.dispatch_grf_start_reg; + case 16: return prog_data->dispatch_grf_start_reg_16; + case 32: return prog_data->dispatch_grf_start_reg_32; + default: return 0; + } +} + +#define brw_wm_prog_data_dispatch_grf_start_reg(prog_data, wm_state, ksp_idx) \ + _brw_wm_prog_data_dispatch_grf_start_reg(prog_data, \ + brw_wm_state_simd_width_for_ksp(wm_state, ksp_idx)) + +static inline uint8_t +_brw_wm_prog_data_reg_blocks(const struct brw_wm_prog_data *prog_data, + unsigned simd_width) +{ + switch (simd_width) { + case 8: return prog_data->reg_blocks_8; + case 16: return prog_data->reg_blocks_16; + case 32: return prog_data->reg_blocks_32; + default: return 0; + } +} + +#define brw_wm_prog_data_reg_blocks(prog_data, wm_state, ksp_idx) \ + _brw_wm_prog_data_reg_blocks(prog_data, \ + brw_wm_state_simd_width_for_ksp(wm_state, ksp_idx)) + struct brw_push_const_block { unsigned dwords; /* Dword count, not reg aligned */ unsigned regs; @@ -623,18 +925,27 @@ struct brw_push_const_block { struct brw_cs_prog_data { struct brw_stage_prog_data base; - GLuint dispatch_grf_start_reg_16; unsigned local_size[3]; - unsigned simd_size; - unsigned threads; + unsigned slm_size; + + /* Program offsets for the 8/16/32 SIMD variants. Multiple variants are + * kept when using variable group size, and the right one can only be + * decided at dispatch time. + */ + unsigned prog_offset[3]; + + /* Bitmask indicating which program offsets are valid. */ + unsigned prog_mask; + + /* Bitmask indicating which programs have spilled. */ + unsigned prog_spilled; + bool uses_barrier; bool uses_num_work_groups; - int thread_local_id_index; struct { struct brw_push_const_block cross_thread; struct brw_push_const_block per_thread; - struct brw_push_const_block total; } push; struct { @@ -646,6 +957,18 @@ struct brw_cs_prog_data { } binding_table; }; +static inline uint32_t +brw_cs_prog_data_prog_offset(const struct brw_cs_prog_data *prog_data, + unsigned dispatch_width) +{ + assert(dispatch_width == 8 || + dispatch_width == 16 || + dispatch_width == 32); + const unsigned index = dispatch_width / 16; + assert(prog_data->prog_mask & (1 << index)); + return prog_data->prog_offset[index]; +} + /** * Enum representing the i965-specific vertex results that don't correspond * exactly to any element of gl_varying_slot. The values of this enum are @@ -769,7 +1092,8 @@ GLuint brw_varying_to_offset(const struct brw_vue_map *vue_map, GLuint varying) void brw_compute_vue_map(const struct gen_device_info *devinfo, struct brw_vue_map *vue_map, uint64_t slots_valid, - bool separate_shader); + bool separate_shader, + uint32_t pos_slots); void brw_compute_tess_vue_map(struct brw_vue_map *const vue_map, uint64_t slots_valid, @@ -778,14 +1102,16 @@ void brw_compute_tess_vue_map(struct brw_vue_map *const vue_map, /* brw_interpolation_map.c */ void brw_setup_vue_interpolation(struct brw_vue_map *vue_map, struct nir_shader *nir, - struct brw_wm_prog_data *prog_data, - const struct gen_device_info *devinfo); + struct brw_wm_prog_data *prog_data); enum shader_dispatch_mode { DISPATCH_MODE_4X1_SINGLE = 0, DISPATCH_MODE_4X2_DUAL_INSTANCE = 1, DISPATCH_MODE_4X2_DUAL_OBJECT = 2, DISPATCH_MODE_SIMD8 = 3, + + DISPATCH_MODE_TCS_SINGLE_PATCH = 0, + DISPATCH_MODE_TCS_8_PATCH = 2, }; /** @@ -844,12 +1170,12 @@ struct brw_vs_prog_data { GLbitfield64 inputs_read; GLbitfield64 double_inputs_read; - unsigned nr_attributes; unsigned nr_attribute_slots; bool uses_vertexid; bool uses_instanceid; - bool uses_basevertex; + bool uses_is_indexed_draw; + bool uses_firstvertex; bool uses_baseinstance; bool uses_drawid; }; @@ -858,8 +1184,14 @@ struct brw_tcs_prog_data { struct brw_vue_prog_data base; + /** Should the non-SINGLE_PATCH payload provide primitive ID? */ + bool include_primitive_id; + /** Number vertices in output patch */ int instances; + + /** Track patch count threshold */ + int patch_count_threshold; }; @@ -954,11 +1286,28 @@ struct brw_clip_prog_data { uint32_t total_grf; }; -#define DEFINE_PROG_DATA_DOWNCAST(stage) \ -static inline struct brw_##stage##_prog_data * \ -brw_##stage##_prog_data(struct brw_stage_prog_data *prog_data) \ -{ \ - return (struct brw_##stage##_prog_data *) prog_data; \ +/* brw_any_prog_data is prog_data for any stage that maps to an API stage */ +union brw_any_prog_data { + struct brw_stage_prog_data base; + struct brw_vue_prog_data vue; + struct brw_vs_prog_data vs; + struct brw_tcs_prog_data tcs; + struct brw_tes_prog_data tes; + struct brw_gs_prog_data gs; + struct brw_wm_prog_data wm; + struct brw_cs_prog_data cs; +}; + +#define DEFINE_PROG_DATA_DOWNCAST(stage) \ +static inline struct brw_##stage##_prog_data * \ +brw_##stage##_prog_data(struct brw_stage_prog_data *prog_data) \ +{ \ + return (struct brw_##stage##_prog_data *) prog_data; \ +} \ +static inline const struct brw_##stage##_prog_data * \ +brw_##stage##_prog_data_const(const struct brw_stage_prog_data *prog_data) \ +{ \ + return (const struct brw_##stage##_prog_data *) prog_data; \ } DEFINE_PROG_DATA_DOWNCAST(vue) DEFINE_PROG_DATA_DOWNCAST(vs) @@ -972,11 +1321,42 @@ DEFINE_PROG_DATA_DOWNCAST(clip) DEFINE_PROG_DATA_DOWNCAST(sf) #undef DEFINE_PROG_DATA_DOWNCAST +struct brw_compile_stats { + uint32_t dispatch_width; /**< 0 for vec4 */ + uint32_t instructions; + uint32_t sends; + uint32_t loops; + uint32_t cycles; + uint32_t spills; + uint32_t fills; +}; + /** @} */ struct brw_compiler * brw_compiler_create(void *mem_ctx, const struct gen_device_info *devinfo); +/** + * Returns a compiler configuration for use with disk shader cache + * + * This value only needs to change for settings that can cause different + * program generation between two runs on the same hardware. + * + * For example, it doesn't need to be different for gen 8 and gen 9 hardware, + * but it does need to be different if INTEL_DEBUG=nocompact is or isn't used. + */ +uint64_t +brw_get_compiler_config_value(const struct brw_compiler *compiler); + +unsigned +brw_prog_data_size(gl_shader_stage stage); + +unsigned +brw_prog_key_size(gl_shader_stage stage); + +void +brw_prog_key_set_id(union brw_any_prog_key *key, gl_shader_stage, unsigned id); + /** * Compile a vertex shader. * @@ -987,11 +1367,9 @@ brw_compile_vs(const struct brw_compiler *compiler, void *log_data, void *mem_ctx, const struct brw_vs_prog_key *key, struct brw_vs_prog_data *prog_data, - const struct nir_shader *shader, - gl_clip_plane *clip_planes, - bool use_legacy_snorm_formula, + struct nir_shader *shader, int shader_time_index, - unsigned *final_assembly_size, + struct brw_compile_stats *stats, char **error_str); /** @@ -1005,9 +1383,9 @@ brw_compile_tcs(const struct brw_compiler *compiler, void *mem_ctx, const struct brw_tcs_prog_key *key, struct brw_tcs_prog_data *prog_data, - const struct nir_shader *nir, + struct nir_shader *nir, int shader_time_index, - unsigned *final_assembly_size, + struct brw_compile_stats *stats, char **error_str); /** @@ -1021,10 +1399,9 @@ brw_compile_tes(const struct brw_compiler *compiler, void *log_data, const struct brw_tes_prog_key *key, const struct brw_vue_map *input_vue_map, struct brw_tes_prog_data *prog_data, - const struct nir_shader *shader, - struct gl_program *prog, + struct nir_shader *shader, int shader_time_index, - unsigned *final_assembly_size, + struct brw_compile_stats *stats, char **error_str); /** @@ -1037,10 +1414,10 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data, void *mem_ctx, const struct brw_gs_prog_key *key, struct brw_gs_prog_data *prog_data, - const struct nir_shader *shader, + struct nir_shader *shader, struct gl_program *prog, int shader_time_index, - unsigned *final_assembly_size, + struct brw_compile_stats *stats, char **error_str); /** @@ -1085,13 +1462,13 @@ brw_compile_fs(const struct brw_compiler *compiler, void *log_data, void *mem_ctx, const struct brw_wm_prog_key *key, struct brw_wm_prog_data *prog_data, - const struct nir_shader *shader, - struct gl_program *prog, + struct nir_shader *shader, int shader_time_index8, int shader_time_index16, + int shader_time_index32, bool allow_spilling, bool use_rep_send, struct brw_vue_map *vue_map, - unsigned *final_assembly_size, + struct brw_compile_stats *stats, /**< Array of three stats */ char **error_str); /** @@ -1106,9 +1483,14 @@ brw_compile_cs(const struct brw_compiler *compiler, void *log_data, struct brw_cs_prog_data *prog_data, const struct nir_shader *shader, int shader_time_index, - unsigned *final_assembly_size, + struct brw_compile_stats *stats, char **error_str); +void brw_debug_key_recompile(const struct brw_compiler *c, void *log, + gl_shader_stage stage, + const struct brw_base_prog_key *old_key, + const struct brw_base_prog_key *key); + static inline uint32_t encode_slm_size(unsigned gen, uint32_t bytes) { @@ -1141,6 +1523,28 @@ encode_slm_size(unsigned gen, uint32_t bytes) return slm_size; } +unsigned +brw_cs_push_const_total_size(const struct brw_cs_prog_data *cs_prog_data, + unsigned threads); + +unsigned +brw_cs_simd_size_for_group_size(const struct gen_device_info *devinfo, + const struct brw_cs_prog_data *cs_prog_data, + unsigned group_size); + +/** + * Calculate the RightExecutionMask field used in GPGPU_WALKER. + */ +static inline unsigned +brw_cs_right_mask(unsigned group_size, unsigned simd_size) +{ + const uint32_t remainder = group_size & (simd_size - 1); + if (remainder > 0) + return ~0u >> (32 - remainder); + else + return ~0u >> (32 - simd_size); +} + /** * Return true if the given shader stage is dispatched contiguously by the * relevant fixed function starting from channel 0 of the SIMD thread, which @@ -1148,7 +1552,7 @@ encode_slm_size(unsigned gen, uint32_t bytes) * '2^n - 1' for some n. */ static inline bool -brw_stage_has_packed_dispatch(const struct gen_device_info *devinfo, +brw_stage_has_packed_dispatch(ASSERTED const struct gen_device_info *devinfo, gl_shader_stage stage, const struct brw_stage_prog_data *prog_data) { @@ -1157,7 +1561,7 @@ brw_stage_has_packed_dispatch(const struct gen_device_info *devinfo, * to do a full test run with brw_fs_test_dispatch_packing() hooked up to * the NIR front-end before changing this assertion. */ - assert(devinfo->gen <= 10); + assert(devinfo->gen <= 12); switch (stage) { case MESA_SHADER_FRAGMENT: { @@ -1190,6 +1594,35 @@ brw_stage_has_packed_dispatch(const struct gen_device_info *devinfo, } } +/** + * Computes the first varying slot in the URB produced by the previous stage + * that is used in the next stage. We do this by testing the varying slots in + * the previous stage's vue map against the inputs read in the next stage. + * + * Note that: + * + * - Each URB offset contains two varying slots and we can only skip a + * full offset if both slots are unused, so the value we return here is always + * rounded down to the closest multiple of two. + * + * - gl_Layer and gl_ViewportIndex don't have their own varying slots, they are + * part of the vue header, so if these are read we can't skip anything. + */ +static inline int +brw_compute_first_urb_slot_required(uint64_t inputs_read, + const struct brw_vue_map *prev_stage_vue_map) +{ + if ((inputs_read & (VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT)) == 0) { + for (int i = 0; i < prev_stage_vue_map->num_slots; i++) { + int varying = prev_stage_vue_map->slot_to_varying[i]; + if (varying > 0 && (inputs_read & BITFIELD64_BIT(varying)) != 0) + return ROUND_DOWN_TO(i, 2); + } + } + + return 0; +} + #ifdef __cplusplus } /* extern "C" */ #endif