X-Git-Url: https://git.libre-soc.org/?p=mesa.git;a=blobdiff_plain;f=src%2Fintel%2Fcompiler%2Fbrw_compiler.h;h=01fd0d034e55bd72fccf95cca4e9e2154eb177a3;hp=d4128bccbc17945698c3b8b2134fbcf126963610;hb=8d8a3815ef698348857cd9812eaa0dc76f5b19b5;hpb=2438c0a236639e510716228ffd9578329185fe00 diff --git a/src/intel/compiler/brw_compiler.h b/src/intel/compiler/brw_compiler.h index d4128bccbc1..01fd0d034e5 100644 --- a/src/intel/compiler/brw_compiler.h +++ b/src/intel/compiler/brw_compiler.h @@ -25,9 +25,10 @@ #define BRW_COMPILER_H #include -#include "common/gen_device_info.h" -#include "main/mtypes.h" +#include "dev/gen_device_info.h" #include "main/macros.h" +#include "main/mtypes.h" +#include "util/ralloc.h" #ifdef __cplusplus extern "C" { @@ -36,7 +37,6 @@ extern "C" { struct ra_regs; struct nir_shader; struct brw_program; -union gl_constant_value; struct brw_compiler { const struct gen_device_info *devinfo; @@ -83,25 +83,64 @@ struct brw_compiler { uint8_t *ra_reg_to_grf; /** - * ra class for the aligned pairs we use for PLN, which doesn't + * ra class for the aligned barycentrics we use for PLN, which doesn't * appear in *classes. */ - int aligned_pairs_class; + int aligned_bary_class; } fs_reg_sets[3]; void (*shader_debug_log)(void *, const char *str, ...) PRINTFLIKE(2, 3); void (*shader_perf_log)(void *, const char *str, ...) PRINTFLIKE(2, 3); - bool scalar_stage[MESA_SHADER_STAGES]; - struct gl_shader_compiler_options glsl_compiler_options[MESA_SHADER_STAGES]; + bool scalar_stage[MESA_ALL_SHADER_STAGES]; + bool use_tcs_8_patch; + struct gl_shader_compiler_options glsl_compiler_options[MESA_ALL_SHADER_STAGES]; /** * Apply workarounds for SIN and COS output range problems. * This can negatively impact performance. */ bool precise_trig; + + /** + * Is 3DSTATE_CONSTANT_*'s Constant Buffer 0 relative to Dynamic State + * Base Address? (If not, it's a normal GPU address.) + */ + bool constant_buffer_0_is_relative; + + /** + * Whether or not the driver supports pull constants. If not, the compiler + * will attempt to push everything. + */ + bool supports_pull_constants; + + /** + * Whether or not the driver supports NIR shader constants. This controls + * whether nir_opt_large_constants will be run. + */ + bool supports_shader_constants; + + /** + * Whether or not the driver wants uniform params to be compacted by the + * back-end compiler. + */ + bool compact_params; + + /** + * Whether or not the driver wants variable group size to be lowered by the + * back-end compiler. + */ + bool lower_variable_group_size; }; +/** + * We use a constant subgroup size of 32. It really only needs to be a + * maximum and, since we do SIMD32 for compute shaders in some cases, it + * needs to be at least 32. SIMD8 and SIMD16 shaders will still claim a + * subgroup size of 32 but will act as if 16 or 24 of those channels are + * disabled. + */ +#define BRW_SUBGROUP_SIZE 32 /** * Program key structures. @@ -168,6 +207,38 @@ struct brw_sampler_prog_key_data { uint32_t y_u_v_image_mask; uint32_t y_uv_image_mask; uint32_t yx_xuxv_image_mask; + uint32_t xy_uxvx_image_mask; + uint32_t ayuv_image_mask; + uint32_t xyuv_image_mask; + uint32_t bt709_mask; + uint32_t bt2020_mask; + + /* Scale factor for each texture. */ + float scale_factors[32]; +}; + +/** An enum representing what kind of input gl_SubgroupSize is. */ +enum PACKED brw_subgroup_size_type +{ + BRW_SUBGROUP_SIZE_API_CONSTANT, /**< Default Vulkan behavior */ + BRW_SUBGROUP_SIZE_UNIFORM, /**< OpenGL behavior */ + BRW_SUBGROUP_SIZE_VARYING, /**< VK_EXT_subgroup_size_control */ + + /* These enums are specifically chosen so that the value of the enum is + * also the subgroup size. If any new values are added, they must respect + * this invariant. + */ + BRW_SUBGROUP_SIZE_REQUIRE_8 = 8, /**< VK_EXT_subgroup_size_control */ + BRW_SUBGROUP_SIZE_REQUIRE_16 = 16, /**< VK_EXT_subgroup_size_control */ + BRW_SUBGROUP_SIZE_REQUIRE_32 = 32, /**< VK_EXT_subgroup_size_control */ +}; + +struct brw_base_prog_key { + unsigned program_string_id; + + enum brw_subgroup_size_type subgroup_size_type; + + struct brw_sampler_prog_key_data tex; }; /** @@ -181,16 +252,32 @@ struct brw_sampler_prog_key_data { #define BRW_ATTRIB_WA_SIGN 32 /* interpret as signed in shader */ #define BRW_ATTRIB_WA_SCALE 64 /* interpret as scaled in shader */ +/** + * OpenGL attribute slots fall in [0, VERT_ATTRIB_MAX - 1] with the range + * [VERT_ATTRIB_GENERIC0, VERT_ATTRIB_MAX - 1] reserved for up to 16 user + * input vertex attributes. In Vulkan, we expose up to 28 user vertex input + * attributes that are mapped to slots also starting at VERT_ATTRIB_GENERIC0. + */ +#define MAX_GL_VERT_ATTRIB VERT_ATTRIB_MAX +#define MAX_VK_VERT_ATTRIB (VERT_ATTRIB_GENERIC0 + 28) + /** The program key for Vertex Shaders. */ struct brw_vs_prog_key { - unsigned program_string_id; + struct brw_base_prog_key base; /** * Per-attribute workaround flags * * For each attribute, a combination of BRW_ATTRIB_WA_*. + * + * For OpenGL, where we expose a maximum of 16 user input atttributes + * we only need up to VERT_ATTRIB_MAX slots, however, in Vulkan + * slots preceding VERT_ATTRIB_GENERIC0 are unused and we can + * expose up to 28 user input vertex attributes that are mapped to slots + * starting at VERT_ATTRIB_GENERIC0, so this array needs to be large + * enough to hold this many slots. */ - uint8_t gl_attrib_wa_flags[VERT_ATTRIB_MAX]; + uint8_t gl_attrib_wa_flags[MAX2(MAX_GL_VERT_ATTRIB, MAX_VK_VERT_ATTRIB)]; bool copy_edgeflag:1; @@ -214,14 +301,12 @@ struct brw_vs_prog_key { * the VUE, even if they aren't written by the vertex shader. */ uint8_t point_coord_replace; - - struct brw_sampler_prog_key_data tex; }; /** The program key for Tessellation Control Shaders. */ struct brw_tcs_prog_key { - unsigned program_string_id; + struct brw_base_prog_key base; GLenum tes_primitive_mode; @@ -234,14 +319,12 @@ struct brw_tcs_prog_key uint64_t outputs_written; bool quads_workaround; - - struct brw_sampler_prog_key_data tex; }; /** The program key for Tessellation Evaluation Shaders. */ struct brw_tes_prog_key { - unsigned program_string_id; + struct brw_base_prog_key base; /** A bitfield of per-patch inputs read. */ uint32_t patch_inputs_read; @@ -249,15 +332,91 @@ struct brw_tes_prog_key /** A bitfield of per-vertex inputs read. */ uint64_t inputs_read; - struct brw_sampler_prog_key_data tex; + /** + * How many user clipping planes are being uploaded to the tessellation + * evaluation shader as push constants. + * + * These are used for lowering legacy gl_ClipVertex/gl_Position clipping to + * clip distances. + */ + unsigned nr_userclip_plane_consts:4; }; /** The program key for Geometry Shaders. */ struct brw_gs_prog_key { - unsigned program_string_id; + struct brw_base_prog_key base; - struct brw_sampler_prog_key_data tex; + /** + * How many user clipping planes are being uploaded to the geometry shader + * as push constants. + * + * These are used for lowering legacy gl_ClipVertex/gl_Position clipping to + * clip distances. + */ + unsigned nr_userclip_plane_consts:4; +}; + +enum brw_sf_primitive { + BRW_SF_PRIM_POINTS = 0, + BRW_SF_PRIM_LINES = 1, + BRW_SF_PRIM_TRIANGLES = 2, + BRW_SF_PRIM_UNFILLED_TRIS = 3, +}; + +struct brw_sf_prog_key { + uint64_t attrs; + bool contains_flat_varying; + unsigned char interp_mode[65]; /* BRW_VARYING_SLOT_COUNT */ + uint8_t point_sprite_coord_replace; + enum brw_sf_primitive primitive:2; + bool do_twoside_color:1; + bool frontface_ccw:1; + bool do_point_sprite:1; + bool do_point_coord:1; + bool sprite_origin_lower_left:1; + bool userclip_active:1; +}; + +enum brw_clip_mode { + BRW_CLIP_MODE_NORMAL = 0, + BRW_CLIP_MODE_CLIP_ALL = 1, + BRW_CLIP_MODE_CLIP_NON_REJECTED = 2, + BRW_CLIP_MODE_REJECT_ALL = 3, + BRW_CLIP_MODE_ACCEPT_ALL = 4, + BRW_CLIP_MODE_KERNEL_CLIP = 5, +}; + +enum brw_clip_fill_mode { + BRW_CLIP_FILL_MODE_LINE = 0, + BRW_CLIP_FILL_MODE_POINT = 1, + BRW_CLIP_FILL_MODE_FILL = 2, + BRW_CLIP_FILL_MODE_CULL = 3, +}; + +/* Note that if unfilled primitives are being emitted, we have to fix + * up polygon offset and flatshading at this point: + */ +struct brw_clip_prog_key { + uint64_t attrs; + bool contains_flat_varying; + bool contains_noperspective_varying; + unsigned char interp_mode[65]; /* BRW_VARYING_SLOT_COUNT */ + unsigned primitive:4; + unsigned nr_userclip:4; + bool pv_first:1; + bool do_unfilled:1; + enum brw_clip_fill_mode fill_cw:2; /* includes cull information */ + enum brw_clip_fill_mode fill_ccw:2; /* includes cull information */ + bool offset_cw:1; + bool offset_ccw:1; + bool copy_bfc_cw:1; + bool copy_bfc_ccw:1; + enum brw_clip_mode clip_mode:3; + + float offset_factor; + float offset_units; + float offset_clamp; }; /* A big lookup table is used to figure out which and how many @@ -283,32 +442,44 @@ enum brw_wm_aa_enable { /** The program key for Fragment/Pixel Shaders. */ struct brw_wm_prog_key { + struct brw_base_prog_key base; + /* Some collection of BRW_WM_IZ_* */ uint8_t iz_lookup; bool stats_wm:1; bool flat_shade:1; unsigned nr_color_regions:5; - bool replicate_alpha:1; + bool alpha_test_replicate_alpha:1; + bool alpha_to_coverage:1; bool clamp_fragment_color:1; bool persample_interp:1; bool multisample_fbo:1; + bool frag_coord_adds_sample_pos:1; enum brw_wm_aa_enable line_aa:2; bool high_quality_derivatives:1; bool force_dual_color_blend:1; bool coherent_fb_fetch:1; + bool ignore_sample_mask_out:1; - uint16_t drawable_height; + uint8_t color_outputs_valid; uint64_t input_slots_valid; - unsigned program_string_id; GLenum alpha_test_func; /* < For Gen4/5 MRT alpha test */ float alpha_test_ref; - - struct brw_sampler_prog_key_data tex; }; struct brw_cs_prog_key { - uint32_t program_string_id; - struct brw_sampler_prog_key_data tex; + struct brw_base_prog_key base; +}; + +/* brw_any_prog_key is any of the keys that map to an API stage */ +union brw_any_prog_key { + struct brw_base_prog_key base; + struct brw_vs_prog_key vs; + struct brw_tcs_prog_key tcs; + struct brw_tes_prog_key tes; + struct brw_gs_prog_key gs; + struct brw_wm_prog_key wm; + struct brw_cs_prog_key cs; }; /* @@ -318,18 +489,14 @@ struct brw_cs_prog_key { * entries [most of them except when we're doing untyped surface * access] will be removed by the uniform packing pass. */ -#define BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET 0 -#define BRW_IMAGE_PARAM_OFFSET_OFFSET 4 -#define BRW_IMAGE_PARAM_SIZE_OFFSET 8 -#define BRW_IMAGE_PARAM_STRIDE_OFFSET 12 -#define BRW_IMAGE_PARAM_TILING_OFFSET 16 -#define BRW_IMAGE_PARAM_SWIZZLING_OFFSET 20 -#define BRW_IMAGE_PARAM_SIZE 24 +#define BRW_IMAGE_PARAM_OFFSET_OFFSET 0 +#define BRW_IMAGE_PARAM_SIZE_OFFSET 4 +#define BRW_IMAGE_PARAM_STRIDE_OFFSET 8 +#define BRW_IMAGE_PARAM_TILING_OFFSET 12 +#define BRW_IMAGE_PARAM_SWIZZLING_OFFSET 16 +#define BRW_IMAGE_PARAM_SIZE 20 struct brw_image_param { - /** Surface binding table index. */ - uint32_t surface_idx; - /** Offset applied to the X and Y surface coordinates. */ uint32_t offset[2]; @@ -398,6 +565,109 @@ struct brw_image_param { */ #define BRW_SHADER_TIME_STRIDE 64 +struct brw_ubo_range +{ + uint16_t block; + uint8_t start; + uint8_t length; +}; + +/* We reserve the first 2^16 values for builtins */ +#define BRW_PARAM_IS_BUILTIN(param) (((param) & 0xffff0000) == 0) + +enum brw_param_builtin { + BRW_PARAM_BUILTIN_ZERO, + + BRW_PARAM_BUILTIN_CLIP_PLANE_0_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_0_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_0_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_0_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_1_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_1_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_1_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_1_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_2_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_2_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_2_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_2_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_3_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_3_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_3_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_3_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_4_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_4_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_4_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_4_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_5_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_5_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_5_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_5_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_6_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_6_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_6_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_6_W, + BRW_PARAM_BUILTIN_CLIP_PLANE_7_X, + BRW_PARAM_BUILTIN_CLIP_PLANE_7_Y, + BRW_PARAM_BUILTIN_CLIP_PLANE_7_Z, + BRW_PARAM_BUILTIN_CLIP_PLANE_7_W, + + BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X, + BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Y, + BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Z, + BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_W, + BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X, + BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y, + + BRW_PARAM_BUILTIN_PATCH_VERTICES_IN, + + BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_X, + BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Y, + BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Z, + BRW_PARAM_BUILTIN_SUBGROUP_ID, + BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X, + BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_Y, + BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_Z, +}; + +#define BRW_PARAM_BUILTIN_CLIP_PLANE(idx, comp) \ + (BRW_PARAM_BUILTIN_CLIP_PLANE_0_X + ((idx) << 2) + (comp)) + +#define BRW_PARAM_BUILTIN_IS_CLIP_PLANE(param) \ + ((param) >= BRW_PARAM_BUILTIN_CLIP_PLANE_0_X && \ + (param) <= BRW_PARAM_BUILTIN_CLIP_PLANE_7_W) + +#define BRW_PARAM_BUILTIN_CLIP_PLANE_IDX(param) \ + (((param) - BRW_PARAM_BUILTIN_CLIP_PLANE_0_X) >> 2) + +#define BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(param) \ + (((param) - BRW_PARAM_BUILTIN_CLIP_PLANE_0_X) & 0x3) + +/** Represents a code relocation + * + * Relocatable constants are immediates in the code which we want to be able + * to replace post-compile with the actual value. + */ +struct brw_shader_reloc { + /** The 32-bit ID of the relocatable constant */ + uint32_t id; + + /** The offset in the shader to the relocatable instruction + * + * This is the offset to the instruction rather than the immediate value + * itself. This allows us to do some sanity checking while we relocate. + */ + uint32_t offset; +}; + +/** A value to write to a relocation */ +struct brw_shader_reloc_value { + /** The 32-bit ID of the relocatable constant */ + uint32_t id; + + /** The value with which to replace the relocated immediate */ + uint32_t value; +}; + struct brw_stage_prog_data { struct { /** size of our binding table. */ @@ -411,21 +681,45 @@ struct brw_stage_prog_data { uint32_t gather_texture_start; uint32_t ubo_start; uint32_t ssbo_start; - uint32_t abo_start; uint32_t image_start; uint32_t shader_time_start; uint32_t plane_start[3]; /** @} */ } binding_table; + struct brw_ubo_range ubo_ranges[4]; + GLuint nr_params; /**< number of float params/constants */ GLuint nr_pull_params; - unsigned nr_image_params; + + /* zero_push_reg is a bitfield which indicates what push registers (if any) + * should be zeroed by SW at the start of the shader. The corresponding + * push_reg_mask_param specifies the param index (in 32-bit units) where + * the actual runtime 64-bit mask will be pushed. The shader will zero + * push reg i if + * + * reg_used & zero_push_reg & ~*push_reg_mask_param & (1ull << i) + * + * If this field is set, brw_compiler::compact_params must be false. + */ + uint64_t zero_push_reg; + unsigned push_reg_mask_param; unsigned curb_read_length; unsigned total_scratch; unsigned total_shared; + unsigned program_size; + + unsigned const_data_size; + unsigned const_data_offset; + + unsigned num_relocs; + const struct brw_shader_reloc *relocs; + + /** Does this program pull from any UBO or other constant buffers? */ + bool has_ubo_pull; + /** * Register where the thread expects to find input data from the URB * (typically uniforms, followed by vertex or fragment attributes). @@ -434,29 +728,52 @@ struct brw_stage_prog_data { bool use_alt_mode; /**< Use ALT floating point mode? Otherwise, IEEE. */ - /* Pointers to tracked values (only valid once - * _mesa_load_state_parameters has been called at runtime). + /* 32-bit identifiers for all push/pull parameters. These can be anything + * the driver wishes them to be; the core of the back-end compiler simply + * re-arranges them. The one restriction is that the bottom 2^16 values + * are reserved for builtins defined in the brw_param_builtin enum defined + * above. */ - const union gl_constant_value **param; - const union gl_constant_value **pull_param; + uint32_t *param; + uint32_t *pull_param; - /** Image metadata passed to the shader as uniforms. */ - struct brw_image_param *image_param; + /* Whether shader uses atomic operations. */ + bool uses_atomic_load_store; }; -static inline void -brw_mark_surface_used(struct brw_stage_prog_data *prog_data, - unsigned surf_index) +static inline uint32_t * +brw_stage_prog_data_add_params(struct brw_stage_prog_data *prog_data, + unsigned nr_new_params) { - /* A binding table index is 8 bits and the top 3 values are reserved for - * special things (stateless and SLM). - */ - assert(surf_index <= 252); - - prog_data->binding_table.size_bytes = - MAX2(prog_data->binding_table.size_bytes, (surf_index + 1) * 4); + unsigned old_nr_params = prog_data->nr_params; + prog_data->nr_params += nr_new_params; + prog_data->param = reralloc(ralloc_parent(prog_data->param), + prog_data->param, uint32_t, + prog_data->nr_params); + return prog_data->param + old_nr_params; } +enum brw_barycentric_mode { + BRW_BARYCENTRIC_PERSPECTIVE_PIXEL = 0, + BRW_BARYCENTRIC_PERSPECTIVE_CENTROID = 1, + BRW_BARYCENTRIC_PERSPECTIVE_SAMPLE = 2, + BRW_BARYCENTRIC_NONPERSPECTIVE_PIXEL = 3, + BRW_BARYCENTRIC_NONPERSPECTIVE_CENTROID = 4, + BRW_BARYCENTRIC_NONPERSPECTIVE_SAMPLE = 5, + BRW_BARYCENTRIC_MODE_COUNT = 6 +}; +#define BRW_BARYCENTRIC_NONPERSPECTIVE_BITS \ + ((1 << BRW_BARYCENTRIC_NONPERSPECTIVE_PIXEL) | \ + (1 << BRW_BARYCENTRIC_NONPERSPECTIVE_CENTROID) | \ + (1 << BRW_BARYCENTRIC_NONPERSPECTIVE_SAMPLE)) + +enum brw_pixel_shader_computed_depth_mode { + BRW_PSCDEPTH_OFF = 0, /* PS does not compute depth */ + BRW_PSCDEPTH_ON = 1, /* PS computes depth; no guarantee about value */ + BRW_PSCDEPTH_ON_GE = 2, /* PS guarantees output depth >= source depth */ + BRW_PSCDEPTH_ON_LE = 3, /* PS guarantees output depth <= source depth */ +}; + /* Data about a particular attempt to compile a program. Note that * there can be many of these, each in a different GL state * corresponding to a different brw_wm_prog_key struct, with different @@ -467,17 +784,19 @@ struct brw_wm_prog_data { GLuint num_varying_inputs; - uint8_t reg_blocks_0; - uint8_t reg_blocks_2; + uint8_t reg_blocks_8; + uint8_t reg_blocks_16; + uint8_t reg_blocks_32; - uint8_t dispatch_grf_start_reg_2; - uint32_t prog_offset_2; + uint8_t dispatch_grf_start_reg_16; + uint8_t dispatch_grf_start_reg_32; + uint32_t prog_offset_16; + uint32_t prog_offset_32; struct { /** @{ * surface indices the WM-specific surfaces */ - uint32_t render_target_start; uint32_t render_target_read_start; /** @} */ } binding_table; @@ -490,6 +809,7 @@ struct brw_wm_prog_data { bool inner_coverage; bool dispatch_8; bool dispatch_16; + bool dispatch_32; bool dual_src_blend; bool persample_dispatch; bool uses_pos_offset; @@ -498,6 +818,7 @@ struct brw_wm_prog_data { bool uses_src_depth; bool uses_src_w; bool uses_sample_mask; + bool has_render_target_reads; bool has_side_effects; bool pulls_bary; @@ -516,6 +837,11 @@ struct brw_wm_prog_data { */ uint32_t flat_inputs; + /** + * The FS inputs + */ + uint64_t inputs; + /* Mapping of VUE slots to interpolation modes. * Used by the Gen4-5 clip/sf/wm stages. */ @@ -527,8 +853,101 @@ struct brw_wm_prog_data { * For varying slots that are not used by the FS, the value is -1. */ int urb_setup[VARYING_SLOT_MAX]; + + /** + * Cache structure into the urb_setup array above that contains the + * attribute numbers of active varyings out of urb_setup. + * The actual count is stored in urb_setup_attribs_count. + */ + uint8_t urb_setup_attribs[VARYING_SLOT_MAX]; + uint8_t urb_setup_attribs_count; }; +/** Returns the SIMD width corresponding to a given KSP index + * + * The "Variable Pixel Dispatch" table in the PRM (which can be found, for + * example in Vol. 7 of the SKL PRM) has a mapping from dispatch widths to + * kernel start pointer (KSP) indices that is based on what dispatch widths + * are enabled. This function provides, effectively, the reverse mapping. + * + * If the given KSP is valid with respect to the SIMD8/16/32 enables, a SIMD + * width of 8, 16, or 32 is returned. If the KSP is invalid, 0 is returned. + */ +static inline unsigned +brw_fs_simd_width_for_ksp(unsigned ksp_idx, bool simd8_enabled, + bool simd16_enabled, bool simd32_enabled) +{ + /* This function strictly ignores contiguous dispatch */ + switch (ksp_idx) { + case 0: + return simd8_enabled ? 8 : + (simd16_enabled && !simd32_enabled) ? 16 : + (simd32_enabled && !simd16_enabled) ? 32 : 0; + case 1: + return (simd32_enabled && (simd16_enabled || simd8_enabled)) ? 32 : 0; + case 2: + return (simd16_enabled && (simd32_enabled || simd8_enabled)) ? 16 : 0; + default: + unreachable("Invalid KSP index"); + } +} + +#define brw_wm_state_simd_width_for_ksp(wm_state, ksp_idx) \ + brw_fs_simd_width_for_ksp((ksp_idx), (wm_state)._8PixelDispatchEnable, \ + (wm_state)._16PixelDispatchEnable, \ + (wm_state)._32PixelDispatchEnable) + +#define brw_wm_state_has_ksp(wm_state, ksp_idx) \ + (brw_wm_state_simd_width_for_ksp((wm_state), (ksp_idx)) != 0) + +static inline uint32_t +_brw_wm_prog_data_prog_offset(const struct brw_wm_prog_data *prog_data, + unsigned simd_width) +{ + switch (simd_width) { + case 8: return 0; + case 16: return prog_data->prog_offset_16; + case 32: return prog_data->prog_offset_32; + default: return 0; + } +} + +#define brw_wm_prog_data_prog_offset(prog_data, wm_state, ksp_idx) \ + _brw_wm_prog_data_prog_offset(prog_data, \ + brw_wm_state_simd_width_for_ksp(wm_state, ksp_idx)) + +static inline uint8_t +_brw_wm_prog_data_dispatch_grf_start_reg(const struct brw_wm_prog_data *prog_data, + unsigned simd_width) +{ + switch (simd_width) { + case 8: return prog_data->base.dispatch_grf_start_reg; + case 16: return prog_data->dispatch_grf_start_reg_16; + case 32: return prog_data->dispatch_grf_start_reg_32; + default: return 0; + } +} + +#define brw_wm_prog_data_dispatch_grf_start_reg(prog_data, wm_state, ksp_idx) \ + _brw_wm_prog_data_dispatch_grf_start_reg(prog_data, \ + brw_wm_state_simd_width_for_ksp(wm_state, ksp_idx)) + +static inline uint8_t +_brw_wm_prog_data_reg_blocks(const struct brw_wm_prog_data *prog_data, + unsigned simd_width) +{ + switch (simd_width) { + case 8: return prog_data->reg_blocks_8; + case 16: return prog_data->reg_blocks_16; + case 32: return prog_data->reg_blocks_32; + default: return 0; + } +} + +#define brw_wm_prog_data_reg_blocks(prog_data, wm_state, ksp_idx) \ + _brw_wm_prog_data_reg_blocks(prog_data, \ + brw_wm_state_simd_width_for_ksp(wm_state, ksp_idx)) + struct brw_push_const_block { unsigned dwords; /* Dword count, not reg aligned */ unsigned regs; @@ -538,18 +957,27 @@ struct brw_push_const_block { struct brw_cs_prog_data { struct brw_stage_prog_data base; - GLuint dispatch_grf_start_reg_16; unsigned local_size[3]; - unsigned simd_size; - unsigned threads; + unsigned slm_size; + + /* Program offsets for the 8/16/32 SIMD variants. Multiple variants are + * kept when using variable group size, and the right one can only be + * decided at dispatch time. + */ + unsigned prog_offset[3]; + + /* Bitmask indicating which program offsets are valid. */ + unsigned prog_mask; + + /* Bitmask indicating which programs have spilled. */ + unsigned prog_spilled; + bool uses_barrier; bool uses_num_work_groups; - int thread_local_id_index; struct { struct brw_push_const_block cross_thread; struct brw_push_const_block per_thread; - struct brw_push_const_block total; } push; struct { @@ -561,6 +989,18 @@ struct brw_cs_prog_data { } binding_table; }; +static inline uint32_t +brw_cs_prog_data_prog_offset(const struct brw_cs_prog_data *prog_data, + unsigned dispatch_width) +{ + assert(dispatch_width == 8 || + dispatch_width == 16 || + dispatch_width == 32); + const unsigned index = dispatch_width / 16; + assert(prog_data->prog_mask & (1 << index)); + return prog_data->prog_offset[index]; +} + /** * Enum representing the i965-specific vertex results that don't correspond * exactly to any element of gl_varying_slot. The values of this enum are @@ -684,7 +1124,8 @@ GLuint brw_varying_to_offset(const struct brw_vue_map *vue_map, GLuint varying) void brw_compute_vue_map(const struct gen_device_info *devinfo, struct brw_vue_map *vue_map, uint64_t slots_valid, - bool separate_shader); + bool separate_shader, + uint32_t pos_slots); void brw_compute_tess_vue_map(struct brw_vue_map *const vue_map, uint64_t slots_valid, @@ -693,14 +1134,16 @@ void brw_compute_tess_vue_map(struct brw_vue_map *const vue_map, /* brw_interpolation_map.c */ void brw_setup_vue_interpolation(struct brw_vue_map *vue_map, struct nir_shader *nir, - struct brw_wm_prog_data *prog_data, - const struct gen_device_info *devinfo); + struct brw_wm_prog_data *prog_data); enum shader_dispatch_mode { DISPATCH_MODE_4X1_SINGLE = 0, DISPATCH_MODE_4X2_DUAL_INSTANCE = 1, DISPATCH_MODE_4X2_DUAL_OBJECT = 2, DISPATCH_MODE_SIMD8 = 3, + + DISPATCH_MODE_TCS_SINGLE_PATCH = 0, + DISPATCH_MODE_TCS_8_PATCH = 2, }; /** @@ -759,12 +1202,12 @@ struct brw_vs_prog_data { GLbitfield64 inputs_read; GLbitfield64 double_inputs_read; - unsigned nr_attributes; unsigned nr_attribute_slots; bool uses_vertexid; bool uses_instanceid; - bool uses_basevertex; + bool uses_is_indexed_draw; + bool uses_firstvertex; bool uses_baseinstance; bool uses_drawid; }; @@ -773,8 +1216,14 @@ struct brw_tcs_prog_data { struct brw_vue_prog_data base; + /** Should the non-SINGLE_PATCH payload provide primitive ID? */ + bool include_primitive_id; + /** Number vertices in output patch */ int instances; + + /** Track patch count threshold */ + int patch_count_threshold; }; @@ -849,11 +1298,48 @@ struct brw_gs_prog_data unsigned char transform_feedback_swizzles[64 /* BRW_MAX_SOL_BINDINGS */]; }; -#define DEFINE_PROG_DATA_DOWNCAST(stage) \ -static inline struct brw_##stage##_prog_data * \ -brw_##stage##_prog_data(struct brw_stage_prog_data *prog_data) \ -{ \ - return (struct brw_##stage##_prog_data *) prog_data; \ +struct brw_sf_prog_data { + uint32_t urb_read_length; + uint32_t total_grf; + + /* Each vertex may have upto 12 attributes, 4 components each, + * except WPOS which requires only 2. (11*4 + 2) == 44 ==> 11 + * rows. + * + * Actually we use 4 for each, so call it 12 rows. + */ + unsigned urb_entry_size; +}; + +struct brw_clip_prog_data { + uint32_t curb_read_length; /* user planes? */ + uint32_t clip_mode; + uint32_t urb_read_length; + uint32_t total_grf; +}; + +/* brw_any_prog_data is prog_data for any stage that maps to an API stage */ +union brw_any_prog_data { + struct brw_stage_prog_data base; + struct brw_vue_prog_data vue; + struct brw_vs_prog_data vs; + struct brw_tcs_prog_data tcs; + struct brw_tes_prog_data tes; + struct brw_gs_prog_data gs; + struct brw_wm_prog_data wm; + struct brw_cs_prog_data cs; +}; + +#define DEFINE_PROG_DATA_DOWNCAST(stage) \ +static inline struct brw_##stage##_prog_data * \ +brw_##stage##_prog_data(struct brw_stage_prog_data *prog_data) \ +{ \ + return (struct brw_##stage##_prog_data *) prog_data; \ +} \ +static inline const struct brw_##stage##_prog_data * \ +brw_##stage##_prog_data_const(const struct brw_stage_prog_data *prog_data) \ +{ \ + return (const struct brw_##stage##_prog_data *) prog_data; \ } DEFINE_PROG_DATA_DOWNCAST(vue) DEFINE_PROG_DATA_DOWNCAST(vs) @@ -867,11 +1353,42 @@ DEFINE_PROG_DATA_DOWNCAST(clip) DEFINE_PROG_DATA_DOWNCAST(sf) #undef DEFINE_PROG_DATA_DOWNCAST +struct brw_compile_stats { + uint32_t dispatch_width; /**< 0 for vec4 */ + uint32_t instructions; + uint32_t sends; + uint32_t loops; + uint32_t cycles; + uint32_t spills; + uint32_t fills; +}; + /** @} */ struct brw_compiler * brw_compiler_create(void *mem_ctx, const struct gen_device_info *devinfo); +/** + * Returns a compiler configuration for use with disk shader cache + * + * This value only needs to change for settings that can cause different + * program generation between two runs on the same hardware. + * + * For example, it doesn't need to be different for gen 8 and gen 9 hardware, + * but it does need to be different if INTEL_DEBUG=nocompact is or isn't used. + */ +uint64_t +brw_get_compiler_config_value(const struct brw_compiler *compiler); + +unsigned +brw_prog_data_size(gl_shader_stage stage); + +unsigned +brw_prog_key_size(gl_shader_stage stage); + +void +brw_prog_key_set_id(union brw_any_prog_key *key, gl_shader_stage, unsigned id); + /** * Compile a vertex shader. * @@ -882,11 +1399,9 @@ brw_compile_vs(const struct brw_compiler *compiler, void *log_data, void *mem_ctx, const struct brw_vs_prog_key *key, struct brw_vs_prog_data *prog_data, - const struct nir_shader *shader, - gl_clip_plane *clip_planes, - bool use_legacy_snorm_formula, + struct nir_shader *shader, int shader_time_index, - unsigned *final_assembly_size, + struct brw_compile_stats *stats, char **error_str); /** @@ -900,9 +1415,9 @@ brw_compile_tcs(const struct brw_compiler *compiler, void *mem_ctx, const struct brw_tcs_prog_key *key, struct brw_tcs_prog_data *prog_data, - const struct nir_shader *nir, + struct nir_shader *nir, int shader_time_index, - unsigned *final_assembly_size, + struct brw_compile_stats *stats, char **error_str); /** @@ -916,10 +1431,9 @@ brw_compile_tes(const struct brw_compiler *compiler, void *log_data, const struct brw_tes_prog_key *key, const struct brw_vue_map *input_vue_map, struct brw_tes_prog_data *prog_data, - const struct nir_shader *shader, - struct gl_program *prog, + struct nir_shader *shader, int shader_time_index, - unsigned *final_assembly_size, + struct brw_compile_stats *stats, char **error_str); /** @@ -932,12 +1446,44 @@ brw_compile_gs(const struct brw_compiler *compiler, void *log_data, void *mem_ctx, const struct brw_gs_prog_key *key, struct brw_gs_prog_data *prog_data, - const struct nir_shader *shader, + struct nir_shader *shader, struct gl_program *prog, int shader_time_index, - unsigned *final_assembly_size, + struct brw_compile_stats *stats, char **error_str); +/** + * Compile a strips and fans shader. + * + * This is a fixed-function shader determined entirely by the shader key and + * a VUE map. + * + * Returns the final assembly and the program's size. + */ +const unsigned * +brw_compile_sf(const struct brw_compiler *compiler, + void *mem_ctx, + const struct brw_sf_prog_key *key, + struct brw_sf_prog_data *prog_data, + struct brw_vue_map *vue_map, + unsigned *final_assembly_size); + +/** + * Compile a clipper shader. + * + * This is a fixed-function shader determined entirely by the shader key and + * a VUE map. + * + * Returns the final assembly and the program's size. + */ +const unsigned * +brw_compile_clip(const struct brw_compiler *compiler, + void *mem_ctx, + const struct brw_clip_prog_key *key, + struct brw_clip_prog_data *prog_data, + struct brw_vue_map *vue_map, + unsigned *final_assembly_size); + /** * Compile a fragment shader. * @@ -948,13 +1494,13 @@ brw_compile_fs(const struct brw_compiler *compiler, void *log_data, void *mem_ctx, const struct brw_wm_prog_key *key, struct brw_wm_prog_data *prog_data, - const struct nir_shader *shader, - struct gl_program *prog, + struct nir_shader *shader, int shader_time_index8, int shader_time_index16, + int shader_time_index32, bool allow_spilling, bool use_rep_send, struct brw_vue_map *vue_map, - unsigned *final_assembly_size, + struct brw_compile_stats *stats, /**< Array of three stats */ char **error_str); /** @@ -969,9 +1515,14 @@ brw_compile_cs(const struct brw_compiler *compiler, void *log_data, struct brw_cs_prog_data *prog_data, const struct nir_shader *shader, int shader_time_index, - unsigned *final_assembly_size, + struct brw_compile_stats *stats, char **error_str); +void brw_debug_key_recompile(const struct brw_compiler *c, void *log, + gl_shader_stage stage, + const struct brw_base_prog_key *old_key, + const struct brw_base_prog_key *key); + static inline uint32_t encode_slm_size(unsigned gen, uint32_t bytes) { @@ -1004,6 +1555,35 @@ encode_slm_size(unsigned gen, uint32_t bytes) return slm_size; } +unsigned +brw_cs_push_const_total_size(const struct brw_cs_prog_data *cs_prog_data, + unsigned threads); + +unsigned +brw_cs_simd_size_for_group_size(const struct gen_device_info *devinfo, + const struct brw_cs_prog_data *cs_prog_data, + unsigned group_size); + +void +brw_write_shader_relocs(const struct gen_device_info *devinfo, + void *program, + const struct brw_stage_prog_data *prog_data, + struct brw_shader_reloc_value *values, + unsigned num_values); + +/** + * Calculate the RightExecutionMask field used in GPGPU_WALKER. + */ +static inline unsigned +brw_cs_right_mask(unsigned group_size, unsigned simd_size) +{ + const uint32_t remainder = group_size & (simd_size - 1); + if (remainder > 0) + return ~0u >> (32 - remainder); + else + return ~0u >> (32 - simd_size); +} + /** * Return true if the given shader stage is dispatched contiguously by the * relevant fixed function starting from channel 0 of the SIMD thread, which @@ -1011,7 +1591,7 @@ encode_slm_size(unsigned gen, uint32_t bytes) * '2^n - 1' for some n. */ static inline bool -brw_stage_has_packed_dispatch(const struct gen_device_info *devinfo, +brw_stage_has_packed_dispatch(ASSERTED const struct gen_device_info *devinfo, gl_shader_stage stage, const struct brw_stage_prog_data *prog_data) { @@ -1020,7 +1600,7 @@ brw_stage_has_packed_dispatch(const struct gen_device_info *devinfo, * to do a full test run with brw_fs_test_dispatch_packing() hooked up to * the NIR front-end before changing this assertion. */ - assert(devinfo->gen <= 9); + assert(devinfo->gen <= 12); switch (stage) { case MESA_SHADER_FRAGMENT: { @@ -1053,6 +1633,35 @@ brw_stage_has_packed_dispatch(const struct gen_device_info *devinfo, } } +/** + * Computes the first varying slot in the URB produced by the previous stage + * that is used in the next stage. We do this by testing the varying slots in + * the previous stage's vue map against the inputs read in the next stage. + * + * Note that: + * + * - Each URB offset contains two varying slots and we can only skip a + * full offset if both slots are unused, so the value we return here is always + * rounded down to the closest multiple of two. + * + * - gl_Layer and gl_ViewportIndex don't have their own varying slots, they are + * part of the vue header, so if these are read we can't skip anything. + */ +static inline int +brw_compute_first_urb_slot_required(uint64_t inputs_read, + const struct brw_vue_map *prev_stage_vue_map) +{ + if ((inputs_read & (VARYING_BIT_LAYER | VARYING_BIT_VIEWPORT)) == 0) { + for (int i = 0; i < prev_stage_vue_map->num_slots; i++) { + int varying = prev_stage_vue_map->slot_to_varying[i]; + if (varying > 0 && (inputs_read & BITFIELD64_BIT(varying)) != 0) + return ROUND_DOWN_TO(i, 2); + } + } + + return 0; +} + #ifdef __cplusplus } /* extern "C" */ #endif