From: Kenneth Graunke Date: Tue, 15 Nov 2016 19:43:07 +0000 (-0800) Subject: intel: Share URB configuration code between GL and Vulkan. X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=9ef2b9277d3bead6dbfa47e95794ca61e8be4e84;p=mesa.git intel: Share URB configuration code between GL and Vulkan. This code is far too complicated to cut and paste. v2: Update the newly added genX_gpu_memcpy.c; const a few things. Signed-off-by: Kenneth Graunke Reviewed-by: Topi Pohjolainen --- diff --git a/src/intel/Makefile.sources b/src/intel/Makefile.sources index c1740fe3b57..be6bdef7b69 100644 --- a/src/intel/Makefile.sources +++ b/src/intel/Makefile.sources @@ -11,6 +11,7 @@ COMMON_FILES = \ common/gen_device_info.h \ common/gen_l3_config.c \ common/gen_l3_config.h \ + common/gen_urb_config.c \ common/gen_sample_positions.h GENXML_GENERATED_FILES = \ diff --git a/src/intel/common/gen_l3_config.h b/src/intel/common/gen_l3_config.h index 25a9675f462..8dc7dda0fcc 100644 --- a/src/intel/common/gen_l3_config.h +++ b/src/intel/common/gen_l3_config.h @@ -92,4 +92,10 @@ gen_get_l3_config_urb_size(const struct gen_device_info *devinfo, void gen_dump_l3_config(const struct gen_l3_config *cfg, FILE *fp); +void gen_get_urb_config(const struct gen_device_info *devinfo, + unsigned push_constant_bytes, unsigned urb_size_bytes, + bool tess_present, bool gs_present, + const unsigned entry_size[4], + unsigned entries[4], unsigned start[4]); + #endif /* GEN_L3_CONFIG_H */ diff --git a/src/intel/common/gen_urb_config.c b/src/intel/common/gen_urb_config.c new file mode 100644 index 00000000000..c925a0dbb36 --- /dev/null +++ b/src/intel/common/gen_urb_config.c @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2011 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include + +#include "util/macros.h" +#include "main/macros.h" + +#include "gen_l3_config.h" + +/** + * The following diagram shows how we partition the URB: + * + * 16kb or 32kb Rest of the URB space + * __________-__________ _________________-_________________ + * / \ / \ + * +-------------------------------------------------------------+ + * | VS/HS/DS/GS/FS Push | VS/HS/DS/GS URB | + * | Constants | Entries | + * +-------------------------------------------------------------+ + * + * Push constants must be stored at the beginning of the URB space, + * while URB entries can be stored anywhere. We choose to lay them + * out in pipeline order (VS -> HS -> DS -> GS). + */ + +/** + * Decide how to partition the URB among the various stages. + * + * \param[in] push_constant_bytes - space allocate for push constants. + * \param[in] urb_size_bytes - total size of the URB (from L3 config). + * \param[in] tess_present - are tessellation shaders active? + * \param[in] gs_present - are geometry shaders active? + * \param[in] entry_size - the URB entry size (from the shader compiler) + * \param[out] entries - the number of URB entries for each stage + * \param[out] start - the starting offset for each stage + */ +void +gen_get_urb_config(const struct gen_device_info *devinfo, + unsigned push_constant_bytes, unsigned urb_size_bytes, + bool tess_present, bool gs_present, + const unsigned entry_size[4], + unsigned entries[4], unsigned start[4]) +{ + const bool active[4] = { true, tess_present, tess_present, gs_present }; + + /* URB allocations must be done in 8k chunks. */ + const unsigned chunk_size_bytes = 8192; + + const unsigned push_constant_chunks = + push_constant_bytes / chunk_size_bytes; + const unsigned urb_chunks = urb_size_bytes / chunk_size_bytes; + + /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS): + * + * VS Number of URB Entries must be divisible by 8 if the VS URB Entry + * Allocation Size is less than 9 512-bit URB entries. + * + * Similar text exists for HS, DS and GS. + */ + unsigned granularity[4]; + for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { + granularity[i] = (entry_size[i] < 9) ? 8 : 1; + } + + unsigned min_entries[4] = { + /* VS has a lower limit on the number of URB entries. + * + * From the Broadwell PRM, 3DSTATE_URB_VS instruction: + * "When tessellation is enabled, the VS Number of URB Entries must be + * greater than or equal to 192." + */ + [MESA_SHADER_VERTEX] = tess_present && devinfo->gen == 8 ? + 192 : devinfo->urb.min_entries[MESA_SHADER_VERTEX], + + /* There are two constraints on the minimum amount of URB space we can + * allocate: + * + * (1) We need room for at least 2 URB entries, since we always operate + * the GS in DUAL_OBJECT mode. + * + * (2) We can't allocate less than nr_gs_entries_granularity. + */ + [MESA_SHADER_GEOMETRY] = gs_present ? 2 : 0, + + [MESA_SHADER_TESS_CTRL] = tess_present ? 1 : 0, + + [MESA_SHADER_TESS_EVAL] = tess_present ? + devinfo->urb.min_entries[MESA_SHADER_TESS_EVAL] : 0, + }; + + /* Min VS Entries isn't a multiple of 8 on Cherryview/Broxton; round up. + * Round them all up. + */ + for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { + min_entries[i] = ALIGN(min_entries[i], granularity[i]); + } + + unsigned entry_size_bytes[4]; + for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { + entry_size_bytes[i] = 64 * entry_size[i]; + } + + /* Initially, assign each stage the minimum amount of URB space it needs, + * and make a note of how much additional space it "wants" (the amount of + * additional space it could actually make use of). + */ + unsigned chunks[4]; + unsigned wants[4]; + unsigned total_needs = push_constant_chunks; + unsigned total_wants = 0; + + for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { + if (active[i]) { + chunks[i] = DIV_ROUND_UP(min_entries[i] * entry_size_bytes[i], + chunk_size_bytes); + + wants[i] = + DIV_ROUND_UP(devinfo->urb.max_entries[i] * entry_size_bytes[i], + chunk_size_bytes) - chunks[i]; + } else { + chunks[i] = 0; + wants[i] = 0; + } + + total_needs += chunks[i]; + total_wants += wants[i]; + } + + assert(total_needs <= urb_chunks); + + /* Mete out remaining space (if any) in proportion to "wants". */ + unsigned remaining_space = MIN2(urb_chunks - total_needs, total_wants); + + if (remaining_space > 0) { + for (int i = MESA_SHADER_VERTEX; + total_wants > 0 && i <= MESA_SHADER_TESS_EVAL; i++) { + unsigned additional = (unsigned) + roundf(wants[i] * (((float) remaining_space) / total_wants)); + chunks[i] += additional; + remaining_space -= additional; + total_wants -= additional; + } + + chunks[MESA_SHADER_GEOMETRY] += remaining_space; + } + + /* Sanity check that we haven't over-allocated. */ + unsigned total_chunks = push_constant_chunks; + for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { + total_chunks += chunks[i]; + } + assert(total_chunks <= urb_chunks); + + /* Finally, compute the number of entries that can fit in the space + * allocated to each stage. + */ + for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { + entries[i] = chunks[i] * chunk_size_bytes / entry_size_bytes[i]; + + /* Since we rounded up when computing wants[], this may be slightly + * more than the maximum allowed amount, so correct for that. + */ + entries[i] = MIN2(entries[i], devinfo->urb.max_entries[i]); + + /* Ensure that we program a multiple of the granularity. */ + entries[i] = ROUND_DOWN_TO(entries[i], granularity[i]); + + /* Finally, sanity check to make sure we have at least the minimum + * number of entries needed for each stage. + */ + assert(entries[i] >= min_entries[i]); + } + + /* Lay out the URB in pipeline order: push constants, VS, HS, DS, GS. */ + start[0] = push_constant_chunks; + for (int i = MESA_SHADER_TESS_CTRL; i <= MESA_SHADER_GEOMETRY; i++) { + start[i] = start[i - 1] + chunks[i - 1]; + } +} diff --git a/src/intel/vulkan/anv_genX.h b/src/intel/vulkan/anv_genX.h index 793717012b6..44868936eb2 100644 --- a/src/intel/vulkan/anv_genX.h +++ b/src/intel/vulkan/anv_genX.h @@ -55,9 +55,9 @@ void genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer); void genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch, + const struct gen_l3_config *l3_config, VkShaderStageFlags active_stages, - unsigned vs_entry_size, unsigned gs_entry_size, - const struct gen_l3_config *l3_config); + const unsigned entry_size[4]); void genX(cmd_buffer_emit_hz_op)(struct anv_cmd_buffer *cmd_buffer, enum blorp_hiz_op op); diff --git a/src/intel/vulkan/genX_blorp_exec.c b/src/intel/vulkan/genX_blorp_exec.c index a705de0d345..a07f370a14e 100644 --- a/src/intel/vulkan/genX_blorp_exec.c +++ b/src/intel/vulkan/genX_blorp_exec.c @@ -124,11 +124,13 @@ blorp_emit_urb_config(struct blorp_batch *batch, unsigned vs_entry_size) struct anv_device *device = batch->blorp->driver_ctx; struct anv_cmd_buffer *cmd_buffer = batch->driver_batch; + const unsigned entry_size[4] = { vs_entry_size, 1, 1, 1 }; + genX(emit_urb_setup)(device, &cmd_buffer->batch, + cmd_buffer->state.current_l3_config, VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, - vs_entry_size, 0, - cmd_buffer->state.current_l3_config); + entry_size); } void genX(blorp_exec)(struct blorp_batch *batch, diff --git a/src/intel/vulkan/genX_gpu_memcpy.c b/src/intel/vulkan/genX_gpu_memcpy.c index 10751b0a35e..eb11c2f0874 100644 --- a/src/intel/vulkan/genX_gpu_memcpy.c +++ b/src/intel/vulkan/genX_gpu_memcpy.c @@ -149,9 +149,11 @@ genX(cmd_buffer_gpu_memcpy)(struct anv_cmd_buffer *cmd_buffer, * allocate space for the VS. Even though one isn't run, we need VUEs to * store the data that VF is going to pass to SOL. */ + const unsigned entry_size[4] = { DIV_ROUND_UP(32, 64), 1, 1, 1 }; + genX(emit_urb_setup)(cmd_buffer->device, &cmd_buffer->batch, - VK_SHADER_STAGE_VERTEX_BIT, DIV_ROUND_UP(32, 64), 0, - cmd_buffer->state.current_l3_config); + cmd_buffer->state.current_l3_config, + VK_SHADER_STAGE_VERTEX_BIT, entry_size); anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_SO_BUFFER), sob) { sob.SOBufferIndex = 0; diff --git a/src/intel/vulkan/genX_pipeline.c b/src/intel/vulkan/genX_pipeline.c index 89c9caf9fea..c54265c656d 100644 --- a/src/intel/vulkan/genX_pipeline.c +++ b/src/intel/vulkan/genX_pipeline.c @@ -183,129 +183,27 @@ emit_vertex_input(struct anv_pipeline *pipeline, void genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch, + const struct gen_l3_config *l3_config, VkShaderStageFlags active_stages, - unsigned vs_size, unsigned gs_size, - const struct gen_l3_config *l3_config) + const unsigned entry_size[4]) { - if (!(active_stages & VK_SHADER_STAGE_VERTEX_BIT)) - vs_size = 1; - - if (!(active_stages & VK_SHADER_STAGE_GEOMETRY_BIT)) - gs_size = 1; - - unsigned vs_entry_size_bytes = vs_size * 64; - unsigned gs_entry_size_bytes = gs_size * 64; - - /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS): - * - * VS Number of URB Entries must be divisible by 8 if the VS URB Entry - * Allocation Size is less than 9 512-bit URB entries. - * - * Similar text exists for GS. - */ - unsigned vs_granularity = (vs_size < 9) ? 8 : 1; - unsigned gs_granularity = (gs_size < 9) ? 8 : 1; - - /* URB allocations must be done in 8k chunks. */ - unsigned chunk_size_bytes = 8192; - - /* Determine the size of the URB in chunks. */ - const unsigned total_urb_size = - gen_get_l3_config_urb_size(&device->info, l3_config); - const unsigned urb_chunks = total_urb_size * 1024 / chunk_size_bytes; - - /* Reserve space for push constants */ - unsigned push_constant_kb; - if (device->info.gen >= 8) - push_constant_kb = 32; - else if (device->info.is_haswell) - push_constant_kb = device->info.gt == 3 ? 32 : 16; - else - push_constant_kb = 16; - - unsigned push_constant_bytes = push_constant_kb * 1024; - unsigned push_constant_chunks = - push_constant_bytes / chunk_size_bytes; - - /* Initially, assign each stage the minimum amount of URB space it needs, - * and make a note of how much additional space it "wants" (the amount of - * additional space it could actually make use of). - */ - - /* VS has a lower limit on the number of URB entries */ - unsigned vs_chunks = - ALIGN(device->info.urb.min_entries[MESA_SHADER_VERTEX] * - vs_entry_size_bytes, chunk_size_bytes) / chunk_size_bytes; - unsigned vs_wants = - ALIGN(device->info.urb.max_entries[MESA_SHADER_VERTEX] * - vs_entry_size_bytes, - chunk_size_bytes) / chunk_size_bytes - vs_chunks; - - unsigned gs_chunks = 0; - unsigned gs_wants = 0; - if (active_stages & VK_SHADER_STAGE_GEOMETRY_BIT) { - /* There are two constraints on the minimum amount of URB space we can - * allocate: - * - * (1) We need room for at least 2 URB entries, since we always operate - * the GS in DUAL_OBJECT mode. - * - * (2) We can't allocate less than nr_gs_entries_granularity. - */ - gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes, - chunk_size_bytes) / chunk_size_bytes; - gs_wants = - ALIGN(device->info.urb.max_entries[MESA_SHADER_GEOMETRY] * - gs_entry_size_bytes, - chunk_size_bytes) / chunk_size_bytes - gs_chunks; - } - - /* There should always be enough URB space to satisfy the minimum - * requirements of each stage. - */ - unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks; - assert(total_needs <= urb_chunks); - - /* Mete out remaining space (if any) in proportion to "wants". */ - unsigned total_wants = vs_wants + gs_wants; - unsigned remaining_space = urb_chunks - total_needs; - if (remaining_space > total_wants) - remaining_space = total_wants; - if (remaining_space > 0) { - unsigned vs_additional = (unsigned) - round(vs_wants * (((double) remaining_space) / total_wants)); - vs_chunks += vs_additional; - remaining_space -= vs_additional; - gs_chunks += remaining_space; - } - - /* Sanity check that we haven't over-allocated. */ - assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks); - - /* Finally, compute the number of entries that can fit in the space - * allocated to each stage. - */ - unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes; - unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes; - - /* Since we rounded up when computing *_wants, this may be slightly more - * than the maximum allowed amount, so correct for that. - */ - nr_vs_entries = MIN2(nr_vs_entries, - device->info.urb.max_entries[MESA_SHADER_VERTEX]); - nr_gs_entries = MIN2(nr_gs_entries, - device->info.urb.max_entries[MESA_SHADER_GEOMETRY]); + const struct gen_device_info *devinfo = &device->info; +#if GEN_IS_HASWELL + const unsigned push_constant_kb = devinfo->gt == 3 ? 32 : 16; +#else + const unsigned push_constant_kb = GEN_GEN >= 8 ? 32 : 16; +#endif - /* Ensure that we program a multiple of the granularity. */ - nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity); - nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity); + const unsigned urb_size_kb = gen_get_l3_config_urb_size(devinfo, l3_config); - /* Finally, sanity check to make sure we have at least the minimum number - * of entries needed for each stage. - */ - assert(nr_vs_entries >= device->info.urb.min_entries[MESA_SHADER_VERTEX]); - if (active_stages & VK_SHADER_STAGE_GEOMETRY_BIT) - assert(nr_gs_entries >= 2); + unsigned entries[4]; + unsigned start[4]; + gen_get_urb_config(devinfo, + 1024 * push_constant_kb, 1024 * urb_size_kb, + active_stages & + VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, + active_stages & VK_SHADER_STAGE_GEOMETRY_BIT, + entry_size, entries, start); #if GEN_GEN == 7 && !GEN_IS_HASWELL /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1: @@ -323,45 +221,31 @@ genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch, } #endif - /* Lay out the URB in the following order: - * - push constants - * - VS - * - GS - */ - anv_batch_emit(batch, GENX(3DSTATE_URB_VS), urb) { - urb.VSURBStartingAddress = push_constant_chunks; - urb.VSURBEntryAllocationSize = vs_size - 1; - urb.VSNumberofURBEntries = nr_vs_entries; - } - - anv_batch_emit(batch, GENX(3DSTATE_URB_HS), urb) { - urb.HSURBStartingAddress = push_constant_chunks; - } - - anv_batch_emit(batch, GENX(3DSTATE_URB_DS), urb) { - urb.DSURBStartingAddress = push_constant_chunks; - } - - anv_batch_emit(batch, GENX(3DSTATE_URB_GS), urb) { - urb.GSURBStartingAddress = push_constant_chunks + vs_chunks; - urb.GSURBEntryAllocationSize = gs_size - 1; - urb.GSNumberofURBEntries = nr_gs_entries; + for (int i = 0; i <= MESA_SHADER_GEOMETRY; i++) { + anv_batch_emit(batch, GENX(3DSTATE_URB_VS), urb) { + urb._3DCommandSubOpcode += i; + urb.VSURBStartingAddress = start[i]; + urb.VSURBEntryAllocationSize = entry_size[i] - 1; + urb.VSNumberofURBEntries = entries[i]; + } } } static inline void emit_urb_setup(struct anv_pipeline *pipeline) { - unsigned vs_entry_size = - (pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT) ? - get_vs_prog_data(pipeline)->base.urb_entry_size : 0; - unsigned gs_entry_size = - (pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT) ? - get_gs_prog_data(pipeline)->base.urb_entry_size : 0; + unsigned entry_size[4]; + for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { + const struct brw_vue_prog_data *prog_data = + !anv_pipeline_has_stage(pipeline, i) ? NULL : + (const struct brw_vue_prog_data *) pipeline->shaders[i]->prog_data; + + entry_size[i] = prog_data ? prog_data->urb_entry_size : 1; + } genX(emit_urb_setup)(pipeline->device, &pipeline->batch, - pipeline->active_stages, vs_entry_size, gs_entry_size, - pipeline->urb.l3_config); + pipeline->urb.l3_config, + pipeline->active_stages, entry_size); } static void diff --git a/src/mesa/drivers/dri/i965/gen7_urb.c b/src/mesa/drivers/dri/i965/gen7_urb.c index 72b409cdbbc..18474dfaf7e 100644 --- a/src/mesa/drivers/dri/i965/gen7_urb.c +++ b/src/mesa/drivers/dri/i965/gen7_urb.c @@ -27,6 +27,8 @@ #include "brw_state.h" #include "brw_defines.h" +#include "common/gen_l3_config.h" + /** * The following diagram shows how we partition the URB: * @@ -214,146 +216,10 @@ gen7_upload_urb(struct brw_context *brw, unsigned vs_size, brw->urb.hsize = entry_size[MESA_SHADER_TESS_CTRL]; brw->urb.dsize = entry_size[MESA_SHADER_TESS_EVAL]; - /* URB allocations must be done in 8k chunks. */ - unsigned chunk_size_bytes = 8192; - - /* Determine the size of the URB in chunks. - * BRW_NEW_URB_SIZE - */ - unsigned urb_chunks = brw->urb.size * 1024 / chunk_size_bytes; - - /* Reserve space for push constants */ - unsigned push_constant_bytes = 1024 * push_size_kB; - unsigned push_constant_chunks = push_constant_bytes / chunk_size_bytes; - - /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS): - * - * VS Number of URB Entries must be divisible by 8 if the VS URB Entry - * Allocation Size is less than 9 512-bit URB entries. - * - * Similar text exists for HS, DS and GS. - */ - unsigned granularity[4]; - for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { - granularity[i] = (entry_size[i] < 9) ? 8 : 1; - } - - unsigned min_entries[4] = { - /* VS has a lower limit on the number of URB entries. - * - * From the Broadwell PRM, 3DSTATE_URB_VS instruction: - * "When tessellation is enabled, the VS Number of URB Entries must be - * greater than or equal to 192." - */ - [MESA_SHADER_VERTEX] = tess_present && brw->gen == 8 ? - 192 : devinfo->urb.min_entries[MESA_SHADER_VERTEX], - - /* There are two constraints on the minimum amount of URB space we can - * allocate: - * - * (1) We need room for at least 2 URB entries, since we always operate - * the GS in DUAL_OBJECT mode. - * - * (2) We can't allocate less than nr_gs_entries_granularity. - */ - [MESA_SHADER_GEOMETRY] = gs_present ? 2 : 0, - - [MESA_SHADER_TESS_CTRL] = tess_present ? 1 : 0, - - [MESA_SHADER_TESS_EVAL] = tess_present ? - devinfo->urb.min_entries[MESA_SHADER_TESS_EVAL] : 0, - }; - - /* Min VS Entries isn't a multiple of 8 on Cherryview/Broxton; round up. - * Round them all up. - */ - for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { - min_entries[i] = ALIGN(min_entries[i], granularity[i]); - } - - unsigned entry_size_bytes[4]; - for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { - entry_size_bytes[i] = 64 * entry_size[i]; - } - - /* Initially, assign each stage the minimum amount of URB space it needs, - * and make a note of how much additional space it "wants" (the amount of - * additional space it could actually make use of). - */ - unsigned chunks[4]; - unsigned wants[4]; - unsigned total_needs = push_constant_chunks; - unsigned total_wants = 0; - - for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { - if (active[i]) { - chunks[i] = DIV_ROUND_UP(min_entries[i] * entry_size_bytes[i], - chunk_size_bytes); - - wants[i] = - DIV_ROUND_UP(devinfo->urb.max_entries[i] * entry_size_bytes[i], - chunk_size_bytes) - chunks[i]; - } else { - chunks[i] = 0; - wants[i] = 0; - } - - total_needs += chunks[i]; - total_wants += wants[i]; - } - - assert(total_needs <= urb_chunks); - - /* Mete out remaining space (if any) in proportion to "wants". */ - unsigned remaining_space = MIN2(urb_chunks - total_needs, total_wants); - - if (remaining_space > 0) { - for (int i = MESA_SHADER_VERTEX; - total_wants > 0 && i <= MESA_SHADER_TESS_EVAL; i++) { - unsigned additional = (unsigned) - roundf(wants[i] * (((float) remaining_space) / total_wants)); - chunks[i] += additional; - remaining_space -= additional; - total_wants -= additional; - } - - chunks[MESA_SHADER_GEOMETRY] += remaining_space; - } - - /* Sanity check that we haven't over-allocated. */ - unsigned total_chunks = push_constant_chunks; - for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { - total_chunks += chunks[i]; - } - assert(total_chunks <= urb_chunks); - - /* Finally, compute the number of entries that can fit in the space - * allocated to each stage. - */ unsigned entries[4]; - for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) { - entries[i] = chunks[i] * chunk_size_bytes / entry_size_bytes[i]; - - /* Since we rounded up when computing wants[], this may be slightly - * more than the maximum allowed amount, so correct for that. - */ - entries[i] = MIN2(entries[i], devinfo->urb.max_entries[i]); - - /* Ensure that we program a multiple of the granularity. */ - entries[i] = ROUND_DOWN_TO(entries[i], granularity[i]); - - /* Finally, sanity check to make sure we have at least the minimum - * number of entries needed for each stage. - */ - assert(entries[i] >= min_entries[i]); - } - - /* Lay out the URB in pipeline order: push constants, VS, HS, DS, GS. */ unsigned start[4]; - start[0] = push_constant_chunks; - for (int i = MESA_SHADER_TESS_CTRL; i <= MESA_SHADER_GEOMETRY; i++) { - start[i] = start[i - 1] + chunks[i - 1]; - } + gen_get_urb_config(devinfo, 1024 * push_size_kB, 1024 * brw->urb.size, + tess_present, gs_present, entry_size, entries, start); if (brw->gen == 7 && !brw->is_haswell && !brw->is_baytrail) gen7_emit_vs_workaround_flush(brw);