radeonsi: switch to 3-spaces style
authorPierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Fri, 27 Mar 2020 18:32:38 +0000 (19:32 +0100)
committerMarge Bot <eric+marge@anholt.net>
Mon, 30 Mar 2020 11:05:52 +0000 (11:05 +0000)
Generated automatically using clang-format and the following config:

AlignAfterOpenBracket: true
AlignConsecutiveMacros: true
AllowAllArgumentsOnNextLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: false
AlwaysBreakAfterReturnType: None
BasedOnStyle: LLVM
BraceWrapping:
  AfterControlStatement: false
  AfterEnum: true
  AfterFunction: true
  AfterStruct: false
  BeforeElse: false
  SplitEmptyFunction: true
BinPackArguments: true
BinPackParameters: true
BreakBeforeBraces: Custom
ColumnLimit: 100
ContinuationIndentWidth: 3
Cpp11BracedListStyle: false
Cpp11BracedListStyle: true
ForEachMacros:
  - LIST_FOR_EACH_ENTRY
  - LIST_FOR_EACH_ENTRY_SAFE
  - util_dynarray_foreach
  - nir_foreach_variable
  - nir_foreach_variable_safe
  - nir_foreach_register
  - nir_foreach_register_safe
  - nir_foreach_use
  - nir_foreach_use_safe
  - nir_foreach_if_use
  - nir_foreach_if_use_safe
  - nir_foreach_def
  - nir_foreach_def_safe
  - nir_foreach_phi_src
  - nir_foreach_phi_src_safe
  - nir_foreach_parallel_copy_entry
  - nir_foreach_instr
  - nir_foreach_instr_reverse
  - nir_foreach_instr_safe
  - nir_foreach_instr_reverse_safe
  - nir_foreach_function
  - nir_foreach_block
  - nir_foreach_block_safe
  - nir_foreach_block_reverse
  - nir_foreach_block_reverse_safe
  - nir_foreach_block_in_cf_node
IncludeBlocks: Regroup
IncludeCategories:
  - Regex:           '<[[:alnum:].]+>'
    Priority:        2
  - Regex:           '.*'
    Priority:        1
IndentWidth: 3
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyExcessCharacter: 100
SpaceAfterCStyleCast: false
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: false
SpacesInContainerLiterals: false

Reviewed-by: Marek Olšák <marek.olsak@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4319>

52 files changed:
src/gallium/drivers/radeonsi/.editorconfig [deleted file]
src/gallium/drivers/radeonsi/cik_sdma.c
src/gallium/drivers/radeonsi/driinfo_radeonsi.h
src/gallium/drivers/radeonsi/gfx10_query.c
src/gallium/drivers/radeonsi/gfx10_shader_ngg.c
src/gallium/drivers/radeonsi/si_blit.c
src/gallium/drivers/radeonsi/si_buffer.c
src/gallium/drivers/radeonsi/si_build_pm4.h
src/gallium/drivers/radeonsi/si_clear.c
src/gallium/drivers/radeonsi/si_compute.c
src/gallium/drivers/radeonsi/si_compute.h
src/gallium/drivers/radeonsi/si_compute_blit.c
src/gallium/drivers/radeonsi/si_compute_prim_discard.c
src/gallium/drivers/radeonsi/si_cp_dma.c
src/gallium/drivers/radeonsi/si_debug.c
src/gallium/drivers/radeonsi/si_debug_options.h
src/gallium/drivers/radeonsi/si_descriptors.c
src/gallium/drivers/radeonsi/si_dma_cs.c
src/gallium/drivers/radeonsi/si_fence.c
src/gallium/drivers/radeonsi/si_get.c
src/gallium/drivers/radeonsi/si_gfx_cs.c
src/gallium/drivers/radeonsi/si_gpu_load.c
src/gallium/drivers/radeonsi/si_perfcounter.c
src/gallium/drivers/radeonsi/si_pipe.c
src/gallium/drivers/radeonsi/si_pipe.h
src/gallium/drivers/radeonsi/si_pm4.c
src/gallium/drivers/radeonsi/si_pm4.h
src/gallium/drivers/radeonsi/si_query.c
src/gallium/drivers/radeonsi/si_query.h
src/gallium/drivers/radeonsi/si_shader.c
src/gallium/drivers/radeonsi/si_shader.h
src/gallium/drivers/radeonsi/si_shader_internal.h
src/gallium/drivers/radeonsi/si_shader_llvm.c
src/gallium/drivers/radeonsi/si_shader_llvm_gs.c
src/gallium/drivers/radeonsi/si_shader_llvm_ps.c
src/gallium/drivers/radeonsi/si_shader_llvm_resources.c
src/gallium/drivers/radeonsi/si_shader_llvm_tess.c
src/gallium/drivers/radeonsi/si_shader_llvm_vs.c
src/gallium/drivers/radeonsi/si_shader_nir.c
src/gallium/drivers/radeonsi/si_shaderlib_tgsi.c
src/gallium/drivers/radeonsi/si_state.c
src/gallium/drivers/radeonsi/si_state.h
src/gallium/drivers/radeonsi/si_state_binning.c
src/gallium/drivers/radeonsi/si_state_draw.c
src/gallium/drivers/radeonsi/si_state_msaa.c
src/gallium/drivers/radeonsi/si_state_shaders.c
src/gallium/drivers/radeonsi/si_state_streamout.c
src/gallium/drivers/radeonsi/si_state_viewport.c
src/gallium/drivers/radeonsi/si_test_dma.c
src/gallium/drivers/radeonsi/si_test_dma_perf.c
src/gallium/drivers/radeonsi/si_texture.c
src/gallium/drivers/radeonsi/si_uvd.c

diff --git a/src/gallium/drivers/radeonsi/.editorconfig b/src/gallium/drivers/radeonsi/.editorconfig
deleted file mode 100644 (file)
index 21a3c7d..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-[*.{c,h}]
-indent_style = tab
-indent_size = tab
index df8a2fcd577f9fac92e4ef7d4349645c4e70b501..74c289b01349bce4329d249bb6c03f49fe98cd86 100644 (file)
  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include "sid.h"
 #include "si_pipe.h"
+#include "sid.h"
 
 static unsigned minify_as_blocks(unsigned width, unsigned level, unsigned blk_w)
 {
-       width = u_minify(width, level);
-       return DIV_ROUND_UP(width, blk_w);
+   width = u_minify(width, level);
+   return DIV_ROUND_UP(width, blk_w);
 }
 
-static unsigned encode_tile_info(struct si_context *sctx,
-                                struct si_texture *tex, unsigned level,
-                                bool set_bpp)
+static unsigned encode_tile_info(struct si_context *sctx, struct si_texture *tex, unsigned level,
+                                 bool set_bpp)
 {
-       struct radeon_info *info = &sctx->screen->info;
-       unsigned tile_index = tex->surface.u.legacy.tiling_index[level];
-       unsigned macro_tile_index = tex->surface.u.legacy.macro_tile_index;
-       unsigned tile_mode = info->si_tile_mode_array[tile_index];
-       unsigned macro_tile_mode = info->cik_macrotile_mode_array[macro_tile_index];
-
-       return (set_bpp ? util_logbase2(tex->surface.bpe) : 0) |
-               (G_009910_ARRAY_MODE(tile_mode) << 3) |
-               (G_009910_MICRO_TILE_MODE_NEW(tile_mode) << 8) |
-               /* Non-depth modes don't have TILE_SPLIT set. */
-               ((util_logbase2(tex->surface.u.legacy.tile_split >> 6)) << 11) |
-               (G_009990_BANK_WIDTH(macro_tile_mode) << 15) |
-               (G_009990_BANK_HEIGHT(macro_tile_mode) << 18) |
-               (G_009990_NUM_BANKS(macro_tile_mode) << 21) |
-               (G_009990_MACRO_TILE_ASPECT(macro_tile_mode) << 24) |
-               (G_009910_PIPE_CONFIG(tile_mode) << 26);
+   struct radeon_info *info = &sctx->screen->info;
+   unsigned tile_index = tex->surface.u.legacy.tiling_index[level];
+   unsigned macro_tile_index = tex->surface.u.legacy.macro_tile_index;
+   unsigned tile_mode = info->si_tile_mode_array[tile_index];
+   unsigned macro_tile_mode = info->cik_macrotile_mode_array[macro_tile_index];
+
+   return (set_bpp ? util_logbase2(tex->surface.bpe) : 0) | (G_009910_ARRAY_MODE(tile_mode) << 3) |
+          (G_009910_MICRO_TILE_MODE_NEW(tile_mode) << 8) |
+          /* Non-depth modes don't have TILE_SPLIT set. */
+          ((util_logbase2(tex->surface.u.legacy.tile_split >> 6)) << 11) |
+          (G_009990_BANK_WIDTH(macro_tile_mode) << 15) |
+          (G_009990_BANK_HEIGHT(macro_tile_mode) << 18) |
+          (G_009990_NUM_BANKS(macro_tile_mode) << 21) |
+          (G_009990_MACRO_TILE_ASPECT(macro_tile_mode) << 24) |
+          (G_009910_PIPE_CONFIG(tile_mode) << 26);
 }
 
-
-static bool si_sdma_v4_copy_texture(struct si_context *sctx,
-                                 struct pipe_resource *dst,
-                                 unsigned dst_level,
-                                 unsigned dstx, unsigned dsty, unsigned dstz,
-                                 struct pipe_resource *src,
-                                 unsigned src_level,
-                                 const struct pipe_box *src_box)
+static bool si_sdma_v4_copy_texture(struct si_context *sctx, struct pipe_resource *dst,
+                                    unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz,
+                                    struct pipe_resource *src, unsigned src_level,
+                                    const struct pipe_box *src_box)
 {
-       struct si_texture *ssrc = (struct si_texture*)src;
-       struct si_texture *sdst = (struct si_texture*)dst;
-
-       unsigned bpp = sdst->surface.bpe;
-       uint64_t dst_address = sdst->buffer.gpu_address +
-               sdst->surface.u.gfx9.surf_offset;
-       uint64_t src_address = ssrc->buffer.gpu_address +
-               ssrc->surface.u.gfx9.surf_offset;
-       unsigned dst_pitch = sdst->surface.u.gfx9.surf_pitch;
-       unsigned src_pitch = ssrc->surface.u.gfx9.surf_pitch;
-       uint64_t dst_slice_pitch = ((uint64_t)sdst->surface.u.gfx9.surf_slice_size) / bpp;
-       uint64_t src_slice_pitch = ((uint64_t)ssrc->surface.u.gfx9.surf_slice_size) / bpp;
-       unsigned srcx = src_box->x / ssrc->surface.blk_w;
-       unsigned srcy = src_box->y / ssrc->surface.blk_h;
-       unsigned srcz = src_box->z;
-       unsigned copy_width = DIV_ROUND_UP(src_box->width, ssrc->surface.blk_w);
-       unsigned copy_height = DIV_ROUND_UP(src_box->height, ssrc->surface.blk_h);
-       unsigned copy_depth = src_box->depth;
-       unsigned xalign = MAX2(1, 4 / bpp);
-
-       assert(src_level <= src->last_level);
-       assert(dst_level <= dst->last_level);
-       assert(sdst->surface.u.gfx9.surf_offset +
-              dst_slice_pitch * bpp * (dstz + src_box->depth) <=
-              sdst->buffer.buf->size);
-       assert(ssrc->surface.u.gfx9.surf_offset +
-              src_slice_pitch * bpp * (srcz + src_box->depth) <=
-              ssrc->buffer.buf->size);
-
-       if (!si_prepare_for_dma_blit(sctx, sdst, dst_level, dstx, dsty,
-                                    dstz, ssrc, src_level, src_box))
-               return false;
-
-       dstx /= sdst->surface.blk_w;
-       dsty /= sdst->surface.blk_h;
-
-       if (srcx >= (1 << 14) ||
-           srcy >= (1 << 14) ||
-           srcz >= (1 << 11) ||
-           dstx >= (1 << 14) ||
-           dsty >= (1 << 14) ||
-           dstz >= (1 << 11))
-               return false;
-
-       /* Linear -> linear sub-window copy. */
-       if (ssrc->surface.is_linear &&
-           sdst->surface.is_linear) {
-               struct radeon_cmdbuf *cs = sctx->sdma_cs;
-
-               /* Check if everything fits into the bitfields */
-               if (!(src_pitch <= (1 << 19) &&
-                     dst_pitch <= (1 << 19) &&
-                     src_slice_pitch <= (1 << 28) &&
-                     dst_slice_pitch <= (1 << 28) &&
-                     copy_width <= (1 << 14) &&
-                     copy_height <= (1 << 14) &&
-                     copy_depth <= (1 << 11)))
-                       return false;
-
-               si_need_dma_space(sctx, 13, &sdst->buffer, &ssrc->buffer);
-
-               src_address += ssrc->surface.u.gfx9.offset[src_level];
-               dst_address += sdst->surface.u.gfx9.offset[dst_level];
-
-               /* Check alignments */
-               if ((src_address % 4) != 0 ||
-                   (dst_address % 4) != 0 ||
-                   (src_pitch % xalign) != 0)
-                       return false;
-
-               radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
-                                               CIK_SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW, 0) |
-                           (util_logbase2(bpp) << 29));
-               radeon_emit(cs, src_address);
-               radeon_emit(cs, src_address >> 32);
-               radeon_emit(cs, srcx | (srcy << 16));
-               radeon_emit(cs, srcz | ((src_pitch - 1) << 13));
-               radeon_emit(cs, src_slice_pitch - 1);
-               radeon_emit(cs, dst_address);
-               radeon_emit(cs, dst_address >> 32);
-               radeon_emit(cs, dstx | (dsty << 16));
-               radeon_emit(cs, dstz | ((dst_pitch - 1) << 13));
-               radeon_emit(cs, dst_slice_pitch - 1);
-               radeon_emit(cs, (copy_width - 1) | ((copy_height - 1) << 16));
-               radeon_emit(cs, (copy_depth - 1));
-               return true;
-       }
-
-       /* Linear <-> Tiled sub-window copy */
-       if (ssrc->surface.is_linear != sdst->surface.is_linear) {
-               struct si_texture *tiled = ssrc->surface.is_linear ? sdst : ssrc;
-               struct si_texture *linear = tiled == ssrc ? sdst : ssrc;
-               unsigned tiled_level =  tiled   == ssrc ? src_level : dst_level;
-               unsigned linear_level = linear  == ssrc ? src_level : dst_level;
-               unsigned tiled_x =      tiled   == ssrc ? srcx : dstx;
-               unsigned linear_x =     linear  == ssrc ? srcx : dstx;
-               unsigned tiled_y =      tiled   == ssrc ? srcy : dsty;
-               unsigned linear_y =     linear  == ssrc ? srcy : dsty;
-               unsigned tiled_z =      tiled   == ssrc ? srcz : dstz;
-               unsigned linear_z =     linear  == ssrc ? srcz : dstz;
-               unsigned tiled_width = tiled == ssrc ?
-                       DIV_ROUND_UP(ssrc->buffer.b.b.width0, ssrc->surface.blk_w) :
-                       DIV_ROUND_UP(sdst->buffer.b.b.width0, sdst->surface.blk_w);
-               unsigned tiled_height = tiled == ssrc ?
-                       DIV_ROUND_UP(ssrc->buffer.b.b.height0, ssrc->surface.blk_h) :
-                       DIV_ROUND_UP(sdst->buffer.b.b.height0, sdst->surface.blk_h);
-               unsigned tiled_depth =  tiled   == ssrc ?
-                       ssrc->buffer.b.b.depth0 :
-                       sdst->buffer.b.b.depth0;
-               unsigned linear_pitch = linear  == ssrc ? src_pitch : dst_pitch;
-               unsigned linear_slice_pitch = linear == ssrc ? src_slice_pitch : dst_slice_pitch;
-               uint64_t tiled_address =  tiled  == ssrc ? src_address : dst_address;
-               uint64_t linear_address = linear == ssrc ? src_address : dst_address;
-               struct radeon_cmdbuf *cs = sctx->sdma_cs;
-
-               linear_address += linear->surface.u.gfx9.offset[linear_level];
-
-               /* Check if everything fits into the bitfields */
-               if (!(tiled_x <= (1 << 14) &&
-                     tiled_y <= (1 << 14) &&
-                     tiled_z <= (1 << 11) &&
-                     tiled_width <= (1 << 14) &&
-                     tiled_height <= (1 << 14) &&
-                     tiled_depth <= (1 << 11) &&
-                     tiled->surface.u.gfx9.surf.epitch <= (1 << 16) &&
-                     linear_x <= (1 << 14) &&
-                     linear_y <= (1 << 14) &&
-                     linear_z <= (1 << 11) &&
-                     linear_pitch <= (1 << 14) &&
-                     linear_slice_pitch <= (1 << 28) &&
-                     copy_width <= (1 << 14) &&
-                     copy_height <= (1 << 14) &&
-                     copy_depth <= (1 << 11)))
-                       return false;
-
-               /* Check alignments */
-               if ((tiled_address % 256 != 0) ||
-                   (linear_address % 4 != 0) ||
-                   (linear_pitch % xalign != 0) ||
-                   (linear_slice_pitch % xalign != 0))
-                       return false;
-
-               si_need_dma_space(sctx, 14, &sdst->buffer, &ssrc->buffer);
-
-               radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
-                                               CIK_SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW, 0) |
-                                               tiled->buffer.b.b.last_level << 20 |
-                                               tiled_level << 24 |
-                                               (linear == sdst ? 1u : 0) << 31);
-               radeon_emit(cs, (uint32_t) tiled_address);
-               radeon_emit(cs, (uint32_t) (tiled_address >> 32));
-               radeon_emit(cs, tiled_x | (tiled_y << 16));
-               radeon_emit(cs, tiled_z | ((tiled_width - 1) << 16));
-               radeon_emit(cs, (tiled_height - 1) | (tiled_depth - 1) << 16);
-               radeon_emit(cs, util_logbase2(bpp) |
-                               tiled->surface.u.gfx9.surf.swizzle_mode << 3 |
-                               tiled->surface.u.gfx9.resource_type << 9 |
-                               tiled->surface.u.gfx9.surf.epitch << 16);
-               radeon_emit(cs, (uint32_t) linear_address);
-               radeon_emit(cs, (uint32_t) (linear_address >> 32));
-               radeon_emit(cs, linear_x | (linear_y << 16));
-               radeon_emit(cs, linear_z | ((linear_pitch - 1) << 16));
-               radeon_emit(cs, linear_slice_pitch - 1);
-               radeon_emit(cs, (copy_width - 1) | ((copy_height - 1) << 16));
-               radeon_emit(cs, (copy_depth - 1));
-               return true;
-       }
-
-       return false;
+   struct si_texture *ssrc = (struct si_texture *)src;
+   struct si_texture *sdst = (struct si_texture *)dst;
+
+   unsigned bpp = sdst->surface.bpe;
+   uint64_t dst_address = sdst->buffer.gpu_address + sdst->surface.u.gfx9.surf_offset;
+   uint64_t src_address = ssrc->buffer.gpu_address + ssrc->surface.u.gfx9.surf_offset;
+   unsigned dst_pitch = sdst->surface.u.gfx9.surf_pitch;
+   unsigned src_pitch = ssrc->surface.u.gfx9.surf_pitch;
+   uint64_t dst_slice_pitch = ((uint64_t)sdst->surface.u.gfx9.surf_slice_size) / bpp;
+   uint64_t src_slice_pitch = ((uint64_t)ssrc->surface.u.gfx9.surf_slice_size) / bpp;
+   unsigned srcx = src_box->x / ssrc->surface.blk_w;
+   unsigned srcy = src_box->y / ssrc->surface.blk_h;
+   unsigned srcz = src_box->z;
+   unsigned copy_width = DIV_ROUND_UP(src_box->width, ssrc->surface.blk_w);
+   unsigned copy_height = DIV_ROUND_UP(src_box->height, ssrc->surface.blk_h);
+   unsigned copy_depth = src_box->depth;
+   unsigned xalign = MAX2(1, 4 / bpp);
+
+   assert(src_level <= src->last_level);
+   assert(dst_level <= dst->last_level);
+   assert(sdst->surface.u.gfx9.surf_offset + dst_slice_pitch * bpp * (dstz + src_box->depth) <=
+          sdst->buffer.buf->size);
+   assert(ssrc->surface.u.gfx9.surf_offset + src_slice_pitch * bpp * (srcz + src_box->depth) <=
+          ssrc->buffer.buf->size);
+
+   if (!si_prepare_for_dma_blit(sctx, sdst, dst_level, dstx, dsty, dstz, ssrc, src_level, src_box))
+      return false;
+
+   dstx /= sdst->surface.blk_w;
+   dsty /= sdst->surface.blk_h;
+
+   if (srcx >= (1 << 14) || srcy >= (1 << 14) || srcz >= (1 << 11) || dstx >= (1 << 14) ||
+       dsty >= (1 << 14) || dstz >= (1 << 11))
+      return false;
+
+   /* Linear -> linear sub-window copy. */
+   if (ssrc->surface.is_linear && sdst->surface.is_linear) {
+      struct radeon_cmdbuf *cs = sctx->sdma_cs;
+
+      /* Check if everything fits into the bitfields */
+      if (!(src_pitch <= (1 << 19) && dst_pitch <= (1 << 19) && src_slice_pitch <= (1 << 28) &&
+            dst_slice_pitch <= (1 << 28) && copy_width <= (1 << 14) && copy_height <= (1 << 14) &&
+            copy_depth <= (1 << 11)))
+         return false;
+
+      si_need_dma_space(sctx, 13, &sdst->buffer, &ssrc->buffer);
+
+      src_address += ssrc->surface.u.gfx9.offset[src_level];
+      dst_address += sdst->surface.u.gfx9.offset[dst_level];
+
+      /* Check alignments */
+      if ((src_address % 4) != 0 || (dst_address % 4) != 0 || (src_pitch % xalign) != 0)
+         return false;
+
+      radeon_emit(
+         cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY, CIK_SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW, 0) |
+                (util_logbase2(bpp) << 29));
+      radeon_emit(cs, src_address);
+      radeon_emit(cs, src_address >> 32);
+      radeon_emit(cs, srcx | (srcy << 16));
+      radeon_emit(cs, srcz | ((src_pitch - 1) << 13));
+      radeon_emit(cs, src_slice_pitch - 1);
+      radeon_emit(cs, dst_address);
+      radeon_emit(cs, dst_address >> 32);
+      radeon_emit(cs, dstx | (dsty << 16));
+      radeon_emit(cs, dstz | ((dst_pitch - 1) << 13));
+      radeon_emit(cs, dst_slice_pitch - 1);
+      radeon_emit(cs, (copy_width - 1) | ((copy_height - 1) << 16));
+      radeon_emit(cs, (copy_depth - 1));
+      return true;
+   }
+
+   /* Linear <-> Tiled sub-window copy */
+   if (ssrc->surface.is_linear != sdst->surface.is_linear) {
+      struct si_texture *tiled = ssrc->surface.is_linear ? sdst : ssrc;
+      struct si_texture *linear = tiled == ssrc ? sdst : ssrc;
+      unsigned tiled_level = tiled == ssrc ? src_level : dst_level;
+      unsigned linear_level = linear == ssrc ? src_level : dst_level;
+      unsigned tiled_x = tiled == ssrc ? srcx : dstx;
+      unsigned linear_x = linear == ssrc ? srcx : dstx;
+      unsigned tiled_y = tiled == ssrc ? srcy : dsty;
+      unsigned linear_y = linear == ssrc ? srcy : dsty;
+      unsigned tiled_z = tiled == ssrc ? srcz : dstz;
+      unsigned linear_z = linear == ssrc ? srcz : dstz;
+      unsigned tiled_width = tiled == ssrc
+                                ? DIV_ROUND_UP(ssrc->buffer.b.b.width0, ssrc->surface.blk_w)
+                                : DIV_ROUND_UP(sdst->buffer.b.b.width0, sdst->surface.blk_w);
+      unsigned tiled_height = tiled == ssrc
+                                 ? DIV_ROUND_UP(ssrc->buffer.b.b.height0, ssrc->surface.blk_h)
+                                 : DIV_ROUND_UP(sdst->buffer.b.b.height0, sdst->surface.blk_h);
+      unsigned tiled_depth = tiled == ssrc ? ssrc->buffer.b.b.depth0 : sdst->buffer.b.b.depth0;
+      unsigned linear_pitch = linear == ssrc ? src_pitch : dst_pitch;
+      unsigned linear_slice_pitch = linear == ssrc ? src_slice_pitch : dst_slice_pitch;
+      uint64_t tiled_address = tiled == ssrc ? src_address : dst_address;
+      uint64_t linear_address = linear == ssrc ? src_address : dst_address;
+      struct radeon_cmdbuf *cs = sctx->sdma_cs;
+
+      linear_address += linear->surface.u.gfx9.offset[linear_level];
+
+      /* Check if everything fits into the bitfields */
+      if (!(tiled_x <= (1 << 14) && tiled_y <= (1 << 14) && tiled_z <= (1 << 11) &&
+            tiled_width <= (1 << 14) && tiled_height <= (1 << 14) && tiled_depth <= (1 << 11) &&
+            tiled->surface.u.gfx9.surf.epitch <= (1 << 16) && linear_x <= (1 << 14) &&
+            linear_y <= (1 << 14) && linear_z <= (1 << 11) && linear_pitch <= (1 << 14) &&
+            linear_slice_pitch <= (1 << 28) && copy_width <= (1 << 14) &&
+            copy_height <= (1 << 14) && copy_depth <= (1 << 11)))
+         return false;
+
+      /* Check alignments */
+      if ((tiled_address % 256 != 0) || (linear_address % 4 != 0) || (linear_pitch % xalign != 0) ||
+          (linear_slice_pitch % xalign != 0))
+         return false;
+
+      si_need_dma_space(sctx, 14, &sdst->buffer, &ssrc->buffer);
+
+      radeon_emit(
+         cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY, CIK_SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW, 0) |
+                tiled->buffer.b.b.last_level << 20 | tiled_level << 24 |
+                (linear == sdst ? 1u : 0) << 31);
+      radeon_emit(cs, (uint32_t)tiled_address);
+      radeon_emit(cs, (uint32_t)(tiled_address >> 32));
+      radeon_emit(cs, tiled_x | (tiled_y << 16));
+      radeon_emit(cs, tiled_z | ((tiled_width - 1) << 16));
+      radeon_emit(cs, (tiled_height - 1) | (tiled_depth - 1) << 16);
+      radeon_emit(cs, util_logbase2(bpp) | tiled->surface.u.gfx9.surf.swizzle_mode << 3 |
+                         tiled->surface.u.gfx9.resource_type << 9 |
+                         tiled->surface.u.gfx9.surf.epitch << 16);
+      radeon_emit(cs, (uint32_t)linear_address);
+      radeon_emit(cs, (uint32_t)(linear_address >> 32));
+      radeon_emit(cs, linear_x | (linear_y << 16));
+      radeon_emit(cs, linear_z | ((linear_pitch - 1) << 16));
+      radeon_emit(cs, linear_slice_pitch - 1);
+      radeon_emit(cs, (copy_width - 1) | ((copy_height - 1) << 16));
+      radeon_emit(cs, (copy_depth - 1));
+      return true;
+   }
+
+   return false;
 }
 
-static bool cik_sdma_copy_texture(struct si_context *sctx,
-                                 struct pipe_resource *dst,
-                                 unsigned dst_level,
-                                 unsigned dstx, unsigned dsty, unsigned dstz,
-                                 struct pipe_resource *src,
-                                 unsigned src_level,
-                                 const struct pipe_box *src_box)
+static bool cik_sdma_copy_texture(struct si_context *sctx, struct pipe_resource *dst,
+                                  unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz,
+                                  struct pipe_resource *src, unsigned src_level,
+                                  const struct pipe_box *src_box)
 {
-       struct radeon_info *info = &sctx->screen->info;
-       struct si_texture *ssrc = (struct si_texture*)src;
-       struct si_texture *sdst = (struct si_texture*)dst;
-       unsigned bpp = sdst->surface.bpe;
-       uint64_t dst_address = sdst->buffer.gpu_address +
-                              sdst->surface.u.legacy.level[dst_level].offset;
-       uint64_t src_address = ssrc->buffer.gpu_address +
-                              ssrc->surface.u.legacy.level[src_level].offset;
-       unsigned dst_mode = sdst->surface.u.legacy.level[dst_level].mode;
-       unsigned src_mode = ssrc->surface.u.legacy.level[src_level].mode;
-       unsigned dst_tile_index = sdst->surface.u.legacy.tiling_index[dst_level];
-       unsigned src_tile_index = ssrc->surface.u.legacy.tiling_index[src_level];
-       unsigned dst_tile_mode = info->si_tile_mode_array[dst_tile_index];
-       unsigned src_tile_mode = info->si_tile_mode_array[src_tile_index];
-       unsigned dst_micro_mode = G_009910_MICRO_TILE_MODE_NEW(dst_tile_mode);
-       unsigned src_micro_mode = G_009910_MICRO_TILE_MODE_NEW(src_tile_mode);
-       unsigned dst_tile_swizzle = dst_mode == RADEON_SURF_MODE_2D ?
-                                           sdst->surface.tile_swizzle : 0;
-       unsigned src_tile_swizzle = src_mode == RADEON_SURF_MODE_2D ?
-                                           ssrc->surface.tile_swizzle : 0;
-       unsigned dst_pitch = sdst->surface.u.legacy.level[dst_level].nblk_x;
-       unsigned src_pitch = ssrc->surface.u.legacy.level[src_level].nblk_x;
-       uint64_t dst_slice_pitch = ((uint64_t)sdst->surface.u.legacy.level[dst_level].slice_size_dw * 4) / bpp;
-       uint64_t src_slice_pitch = ((uint64_t)ssrc->surface.u.legacy.level[src_level].slice_size_dw * 4) / bpp;
-       unsigned dst_width = minify_as_blocks(sdst->buffer.b.b.width0,
-                                             dst_level, sdst->surface.blk_w);
-       unsigned src_width = minify_as_blocks(ssrc->buffer.b.b.width0,
-                                             src_level, ssrc->surface.blk_w);
-       unsigned dst_height = minify_as_blocks(sdst->buffer.b.b.height0,
-                                              dst_level, sdst->surface.blk_h);
-       unsigned src_height = minify_as_blocks(ssrc->buffer.b.b.height0,
-                                              src_level, ssrc->surface.blk_h);
-       unsigned srcx = src_box->x / ssrc->surface.blk_w;
-       unsigned srcy = src_box->y / ssrc->surface.blk_h;
-       unsigned srcz = src_box->z;
-       unsigned copy_width = DIV_ROUND_UP(src_box->width, ssrc->surface.blk_w);
-       unsigned copy_height = DIV_ROUND_UP(src_box->height, ssrc->surface.blk_h);
-       unsigned copy_depth = src_box->depth;
-
-       assert(src_level <= src->last_level);
-       assert(dst_level <= dst->last_level);
-       assert(sdst->surface.u.legacy.level[dst_level].offset +
-              dst_slice_pitch * bpp * (dstz + src_box->depth) <=
-              sdst->buffer.buf->size);
-       assert(ssrc->surface.u.legacy.level[src_level].offset +
-              src_slice_pitch * bpp * (srcz + src_box->depth) <=
-              ssrc->buffer.buf->size);
-
-       if (!si_prepare_for_dma_blit(sctx, sdst, dst_level, dstx, dsty,
-                                    dstz, ssrc, src_level, src_box))
-               return false;
-
-       dstx /= sdst->surface.blk_w;
-       dsty /= sdst->surface.blk_h;
-
-       if (srcx >= (1 << 14) ||
-           srcy >= (1 << 14) ||
-           srcz >= (1 << 11) ||
-           dstx >= (1 << 14) ||
-           dsty >= (1 << 14) ||
-           dstz >= (1 << 11))
-               return false;
-
-       dst_address |= dst_tile_swizzle << 8;
-       src_address |= src_tile_swizzle << 8;
-
-       /* Linear -> linear sub-window copy. */
-       if (dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED &&
-           src_mode == RADEON_SURF_MODE_LINEAR_ALIGNED &&
-           /* check if everything fits into the bitfields */
-           src_pitch <= (1 << 14) &&
-           dst_pitch <= (1 << 14) &&
-           src_slice_pitch <= (1 << 28) &&
-           dst_slice_pitch <= (1 << 28) &&
-           copy_width <= (1 << 14) &&
-           copy_height <= (1 << 14) &&
-           copy_depth <= (1 << 11) &&
-           /* HW limitation - GFX7: */
-           (sctx->chip_class != GFX7 ||
-            (copy_width < (1 << 14) &&
-             copy_height < (1 << 14) &&
-             copy_depth < (1 << 11))) &&
-           /* HW limitation - some GFX7 parts: */
-           ((sctx->family != CHIP_BONAIRE &&
-             sctx->family != CHIP_KAVERI) ||
-            (srcx + copy_width != (1 << 14) &&
-             srcy + copy_height != (1 << 14)))) {
-               struct radeon_cmdbuf *cs = sctx->sdma_cs;
-
-               si_need_dma_space(sctx, 13, &sdst->buffer, &ssrc->buffer);
-
-               radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
-                                               CIK_SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW, 0) |
-                           (util_logbase2(bpp) << 29));
-               radeon_emit(cs, src_address);
-               radeon_emit(cs, src_address >> 32);
-               radeon_emit(cs, srcx | (srcy << 16));
-               radeon_emit(cs, srcz | ((src_pitch - 1) << 16));
-               radeon_emit(cs, src_slice_pitch - 1);
-               radeon_emit(cs, dst_address);
-               radeon_emit(cs, dst_address >> 32);
-               radeon_emit(cs, dstx | (dsty << 16));
-               radeon_emit(cs, dstz | ((dst_pitch - 1) << 16));
-               radeon_emit(cs, dst_slice_pitch - 1);
-               if (sctx->chip_class == GFX7) {
-                       radeon_emit(cs, copy_width | (copy_height << 16));
-                       radeon_emit(cs, copy_depth);
-               } else {
-                       radeon_emit(cs, (copy_width - 1) | ((copy_height - 1) << 16));
-                       radeon_emit(cs, (copy_depth - 1));
-               }
-               return true;
-       }
-
-       /* Tiled <-> linear sub-window copy. */
-       if ((src_mode >= RADEON_SURF_MODE_1D) != (dst_mode >= RADEON_SURF_MODE_1D)) {
-               struct si_texture *tiled = src_mode >= RADEON_SURF_MODE_1D ? ssrc : sdst;
-               struct si_texture *linear = tiled == ssrc ? sdst : ssrc;
-               unsigned tiled_level =  tiled   == ssrc ? src_level : dst_level;
-               unsigned linear_level = linear  == ssrc ? src_level : dst_level;
-               unsigned tiled_x =      tiled   == ssrc ? srcx : dstx;
-               unsigned linear_x =     linear  == ssrc ? srcx : dstx;
-               unsigned tiled_y =      tiled   == ssrc ? srcy : dsty;
-               unsigned linear_y =     linear  == ssrc ? srcy : dsty;
-               unsigned tiled_z =      tiled   == ssrc ? srcz : dstz;
-               unsigned linear_z =     linear  == ssrc ? srcz : dstz;
-               unsigned tiled_width =  tiled   == ssrc ? src_width : dst_width;
-               unsigned linear_width = linear  == ssrc ? src_width : dst_width;
-               unsigned tiled_pitch =  tiled   == ssrc ? src_pitch : dst_pitch;
-               unsigned linear_pitch = linear  == ssrc ? src_pitch : dst_pitch;
-               unsigned tiled_slice_pitch  = tiled  == ssrc ? src_slice_pitch : dst_slice_pitch;
-               unsigned linear_slice_pitch = linear == ssrc ? src_slice_pitch : dst_slice_pitch;
-               uint64_t tiled_address =  tiled  == ssrc ? src_address : dst_address;
-               uint64_t linear_address = linear == ssrc ? src_address : dst_address;
-               unsigned tiled_micro_mode = tiled == ssrc ? src_micro_mode : dst_micro_mode;
-
-               assert(tiled_pitch % 8 == 0);
-               assert(tiled_slice_pitch % 64 == 0);
-               unsigned pitch_tile_max = tiled_pitch / 8 - 1;
-               unsigned slice_tile_max = tiled_slice_pitch / 64 - 1;
-               unsigned xalign = MAX2(1, 4 / bpp);
-               unsigned copy_width_aligned = copy_width;
-
-               /* If the region ends at the last pixel and is unaligned, we
-                * can copy the remainder of the line that is not visible to
-                * make it aligned.
-                */
-               if (copy_width % xalign != 0 &&
-                   linear_x + copy_width == linear_width &&
-                   tiled_x  + copy_width == tiled_width &&
-                   linear_x + align(copy_width, xalign) <= linear_pitch &&
-                   tiled_x  + align(copy_width, xalign) <= tiled_pitch)
-                       copy_width_aligned = align(copy_width, xalign);
-
-               /* HW limitations. */
-               if ((sctx->family == CHIP_BONAIRE ||
-                    sctx->family == CHIP_KAVERI) &&
-                   linear_pitch - 1 == 0x3fff &&
-                   bpp == 16)
-                       return false;
-
-               if (sctx->chip_class == GFX7 &&
-                   (copy_width_aligned == (1 << 14) ||
-                    copy_height == (1 << 14) ||
-                    copy_depth == (1 << 11)))
-                       return false;
-
-               if ((sctx->family == CHIP_BONAIRE ||
-                    sctx->family == CHIP_KAVERI ||
-                    sctx->family == CHIP_KABINI) &&
-                   (tiled_x + copy_width == (1 << 14) ||
-                    tiled_y + copy_height == (1 << 14)))
-                       return false;
-
-               /* The hw can read outside of the given linear buffer bounds,
-                * or access those pages but not touch the memory in case
-                * of writes. (it still causes a VM fault)
-                *
-                * Out-of-bounds memory access or page directory access must
-                * be prevented.
-                */
-               int64_t start_linear_address, end_linear_address;
-               unsigned granularity;
-
-               /* Deduce the size of reads from the linear surface. */
-               switch (tiled_micro_mode) {
-               case V_009910_ADDR_SURF_DISPLAY_MICRO_TILING:
-                       granularity = bpp == 1 ? 64 / (8*bpp) :
-                                                128 / (8*bpp);
-                       break;
-               case V_009910_ADDR_SURF_THIN_MICRO_TILING:
-               case V_009910_ADDR_SURF_DEPTH_MICRO_TILING:
-                       if (0 /* TODO: THICK microtiling */)
-                               granularity = bpp == 1 ? 32 / (8*bpp) :
-                                             bpp == 2 ? 64 / (8*bpp) :
-                                             bpp <= 8 ? 128 / (8*bpp) :
-                                                        256 / (8*bpp);
-                       else
-                               granularity = bpp <= 2 ? 64 / (8*bpp) :
-                                             bpp <= 8 ? 128 / (8*bpp) :
-                                                        256 / (8*bpp);
-                       break;
-               default:
-                       return false;
-               }
-
-               /* The linear reads start at tiled_x & ~(granularity - 1).
-                * If linear_x == 0 && tiled_x % granularity != 0, the hw
-                * starts reading from an address preceding linear_address!!!
-                */
-               start_linear_address =
-                       linear->surface.u.legacy.level[linear_level].offset +
-                       bpp * (linear_z * linear_slice_pitch +
-                              linear_y * linear_pitch +
-                              linear_x);
-               start_linear_address -= (int)(bpp * (tiled_x % granularity));
-
-               end_linear_address =
-                       linear->surface.u.legacy.level[linear_level].offset +
-                       bpp * ((linear_z + copy_depth - 1) * linear_slice_pitch +
-                              (linear_y + copy_height - 1) * linear_pitch +
-                              (linear_x + copy_width));
-
-               if ((tiled_x + copy_width) % granularity)
-                       end_linear_address += granularity -
-                                             (tiled_x + copy_width) % granularity;
-
-               if (start_linear_address < 0 ||
-                   end_linear_address > linear->surface.surf_size)
-                       return false;
-
-               /* Check requirements. */
-               if (tiled_address % 256 == 0 &&
-                   linear_address % 4 == 0 &&
-                   linear_pitch % xalign == 0 &&
-                   linear_x % xalign == 0 &&
-                   tiled_x % xalign == 0 &&
-                   copy_width_aligned % xalign == 0 &&
-                   tiled_micro_mode != V_009910_ADDR_SURF_ROTATED_MICRO_TILING &&
-                   /* check if everything fits into the bitfields */
-                   tiled->surface.u.legacy.tile_split <= 4096 &&
-                   pitch_tile_max < (1 << 11) &&
-                   slice_tile_max < (1 << 22) &&
-                   linear_pitch <= (1 << 14) &&
-                   linear_slice_pitch <= (1 << 28) &&
-                   copy_width_aligned <= (1 << 14) &&
-                   copy_height <= (1 << 14) &&
-                   copy_depth <= (1 << 11)) {
-                       struct radeon_cmdbuf *cs = sctx->sdma_cs;
-                       uint32_t direction = linear == sdst ? 1u << 31 : 0;
-
-                       si_need_dma_space(sctx, 14, &sdst->buffer, &ssrc->buffer);
-
-                       radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
-                                                       CIK_SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW, 0) |
-                                       direction);
-                       radeon_emit(cs, tiled_address);
-                       radeon_emit(cs, tiled_address >> 32);
-                       radeon_emit(cs, tiled_x | (tiled_y << 16));
-                       radeon_emit(cs, tiled_z | (pitch_tile_max << 16));
-                       radeon_emit(cs, slice_tile_max);
-                       radeon_emit(cs, encode_tile_info(sctx, tiled, tiled_level, true));
-                       radeon_emit(cs, linear_address);
-                       radeon_emit(cs, linear_address >> 32);
-                       radeon_emit(cs, linear_x | (linear_y << 16));
-                       radeon_emit(cs, linear_z | ((linear_pitch - 1) << 16));
-                       radeon_emit(cs, linear_slice_pitch - 1);
-                       if (sctx->chip_class == GFX7) {
-                               radeon_emit(cs, copy_width_aligned | (copy_height << 16));
-                               radeon_emit(cs, copy_depth);
-                       } else {
-                               radeon_emit(cs, (copy_width_aligned - 1) | ((copy_height - 1) << 16));
-                               radeon_emit(cs, (copy_depth - 1));
-                       }
-                       return true;
-               }
-       }
-
-       /* Tiled -> Tiled sub-window copy. */
-       if (dst_mode >= RADEON_SURF_MODE_1D &&
-           src_mode >= RADEON_SURF_MODE_1D &&
-           /* check if these fit into the bitfields */
-           src_address % 256 == 0 &&
-           dst_address % 256 == 0 &&
-           ssrc->surface.u.legacy.tile_split <= 4096 &&
-           sdst->surface.u.legacy.tile_split <= 4096 &&
-           dstx % 8 == 0 &&
-           dsty % 8 == 0 &&
-           srcx % 8 == 0 &&
-           srcy % 8 == 0 &&
-           /* this can either be equal, or display->rotated (GFX8+ only) */
-           (src_micro_mode == dst_micro_mode ||
-            (sctx->chip_class >= GFX8 &&
-             src_micro_mode == V_009910_ADDR_SURF_DISPLAY_MICRO_TILING &&
-             dst_micro_mode == V_009910_ADDR_SURF_ROTATED_MICRO_TILING))) {
-               assert(src_pitch % 8 == 0);
-               assert(dst_pitch % 8 == 0);
-               assert(src_slice_pitch % 64 == 0);
-               assert(dst_slice_pitch % 64 == 0);
-               unsigned src_pitch_tile_max = src_pitch / 8 - 1;
-               unsigned dst_pitch_tile_max = dst_pitch / 8 - 1;
-               unsigned src_slice_tile_max = src_slice_pitch / 64 - 1;
-               unsigned dst_slice_tile_max = dst_slice_pitch / 64 - 1;
-               unsigned copy_width_aligned = copy_width;
-               unsigned copy_height_aligned = copy_height;
-
-               /* If the region ends at the last pixel and is unaligned, we
-                * can copy the remainder of the tile that is not visible to
-                * make it aligned.
-                */
-               if (copy_width % 8 != 0 &&
-                   srcx + copy_width == src_width &&
-                   dstx + copy_width == dst_width)
-                       copy_width_aligned = align(copy_width, 8);
-
-               if (copy_height % 8 != 0 &&
-                   srcy + copy_height == src_height &&
-                   dsty + copy_height == dst_height)
-                       copy_height_aligned = align(copy_height, 8);
-
-               /* check if these fit into the bitfields */
-               if (src_pitch_tile_max < (1 << 11) &&
-                   dst_pitch_tile_max < (1 << 11) &&
-                   src_slice_tile_max < (1 << 22) &&
-                   dst_slice_tile_max < (1 << 22) &&
-                   copy_width_aligned <= (1 << 14) &&
-                   copy_height_aligned <= (1 << 14) &&
-                   copy_depth <= (1 << 11) &&
-                   copy_width_aligned % 8 == 0 &&
-                   copy_height_aligned % 8 == 0 &&
-                   /* HW limitation - GFX7: */
-                   (sctx->chip_class != GFX7 ||
-                    (copy_width_aligned < (1 << 14) &&
-                     copy_height_aligned < (1 << 14) &&
-                     copy_depth < (1 << 11))) &&
-                   /* HW limitation - some GFX7 parts: */
-                   ((sctx->family != CHIP_BONAIRE &&
-                     sctx->family != CHIP_KAVERI &&
-                     sctx->family != CHIP_KABINI) ||
-                    (srcx + copy_width_aligned != (1 << 14) &&
-                     srcy + copy_height_aligned != (1 << 14) &&
-                     dstx + copy_width != (1 << 14)))) {
-                       struct radeon_cmdbuf *cs = sctx->sdma_cs;
-
-                       si_need_dma_space(sctx, 15, &sdst->buffer, &ssrc->buffer);
-
-                       radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
-                                                       CIK_SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW, 0));
-                       radeon_emit(cs, src_address);
-                       radeon_emit(cs, src_address >> 32);
-                       radeon_emit(cs, srcx | (srcy << 16));
-                       radeon_emit(cs, srcz | (src_pitch_tile_max << 16));
-                       radeon_emit(cs, src_slice_tile_max);
-                       radeon_emit(cs, encode_tile_info(sctx, ssrc, src_level, true));
-                       radeon_emit(cs, dst_address);
-                       radeon_emit(cs, dst_address >> 32);
-                       radeon_emit(cs, dstx | (dsty << 16));
-                       radeon_emit(cs, dstz | (dst_pitch_tile_max << 16));
-                       radeon_emit(cs, dst_slice_tile_max);
-                       radeon_emit(cs, encode_tile_info(sctx, sdst, dst_level, false));
-                       if (sctx->chip_class == GFX7) {
-                               radeon_emit(cs, copy_width_aligned |
-                                               (copy_height_aligned << 16));
-                               radeon_emit(cs, copy_depth);
-                       } else {
-                               radeon_emit(cs, (copy_width_aligned - 8) |
-                                               ((copy_height_aligned - 8) << 16));
-                               radeon_emit(cs, (copy_depth - 1));
-                       }
-                       return true;
-               }
-       }
-
-       return false;
+   struct radeon_info *info = &sctx->screen->info;
+   struct si_texture *ssrc = (struct si_texture *)src;
+   struct si_texture *sdst = (struct si_texture *)dst;
+   unsigned bpp = sdst->surface.bpe;
+   uint64_t dst_address = sdst->buffer.gpu_address + sdst->surface.u.legacy.level[dst_level].offset;
+   uint64_t src_address = ssrc->buffer.gpu_address + ssrc->surface.u.legacy.level[src_level].offset;
+   unsigned dst_mode = sdst->surface.u.legacy.level[dst_level].mode;
+   unsigned src_mode = ssrc->surface.u.legacy.level[src_level].mode;
+   unsigned dst_tile_index = sdst->surface.u.legacy.tiling_index[dst_level];
+   unsigned src_tile_index = ssrc->surface.u.legacy.tiling_index[src_level];
+   unsigned dst_tile_mode = info->si_tile_mode_array[dst_tile_index];
+   unsigned src_tile_mode = info->si_tile_mode_array[src_tile_index];
+   unsigned dst_micro_mode = G_009910_MICRO_TILE_MODE_NEW(dst_tile_mode);
+   unsigned src_micro_mode = G_009910_MICRO_TILE_MODE_NEW(src_tile_mode);
+   unsigned dst_tile_swizzle = dst_mode == RADEON_SURF_MODE_2D ? sdst->surface.tile_swizzle : 0;
+   unsigned src_tile_swizzle = src_mode == RADEON_SURF_MODE_2D ? ssrc->surface.tile_swizzle : 0;
+   unsigned dst_pitch = sdst->surface.u.legacy.level[dst_level].nblk_x;
+   unsigned src_pitch = ssrc->surface.u.legacy.level[src_level].nblk_x;
+   uint64_t dst_slice_pitch =
+      ((uint64_t)sdst->surface.u.legacy.level[dst_level].slice_size_dw * 4) / bpp;
+   uint64_t src_slice_pitch =
+      ((uint64_t)ssrc->surface.u.legacy.level[src_level].slice_size_dw * 4) / bpp;
+   unsigned dst_width = minify_as_blocks(sdst->buffer.b.b.width0, dst_level, sdst->surface.blk_w);
+   unsigned src_width = minify_as_blocks(ssrc->buffer.b.b.width0, src_level, ssrc->surface.blk_w);
+   unsigned dst_height = minify_as_blocks(sdst->buffer.b.b.height0, dst_level, sdst->surface.blk_h);
+   unsigned src_height = minify_as_blocks(ssrc->buffer.b.b.height0, src_level, ssrc->surface.blk_h);
+   unsigned srcx = src_box->x / ssrc->surface.blk_w;
+   unsigned srcy = src_box->y / ssrc->surface.blk_h;
+   unsigned srcz = src_box->z;
+   unsigned copy_width = DIV_ROUND_UP(src_box->width, ssrc->surface.blk_w);
+   unsigned copy_height = DIV_ROUND_UP(src_box->height, ssrc->surface.blk_h);
+   unsigned copy_depth = src_box->depth;
+
+   assert(src_level <= src->last_level);
+   assert(dst_level <= dst->last_level);
+   assert(sdst->surface.u.legacy.level[dst_level].offset +
+             dst_slice_pitch * bpp * (dstz + src_box->depth) <=
+          sdst->buffer.buf->size);
+   assert(ssrc->surface.u.legacy.level[src_level].offset +
+             src_slice_pitch * bpp * (srcz + src_box->depth) <=
+          ssrc->buffer.buf->size);
+
+   if (!si_prepare_for_dma_blit(sctx, sdst, dst_level, dstx, dsty, dstz, ssrc, src_level, src_box))
+      return false;
+
+   dstx /= sdst->surface.blk_w;
+   dsty /= sdst->surface.blk_h;
+
+   if (srcx >= (1 << 14) || srcy >= (1 << 14) || srcz >= (1 << 11) || dstx >= (1 << 14) ||
+       dsty >= (1 << 14) || dstz >= (1 << 11))
+      return false;
+
+   dst_address |= dst_tile_swizzle << 8;
+   src_address |= src_tile_swizzle << 8;
+
+   /* Linear -> linear sub-window copy. */
+   if (dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED && src_mode == RADEON_SURF_MODE_LINEAR_ALIGNED &&
+       /* check if everything fits into the bitfields */
+       src_pitch <= (1 << 14) && dst_pitch <= (1 << 14) && src_slice_pitch <= (1 << 28) &&
+       dst_slice_pitch <= (1 << 28) && copy_width <= (1 << 14) && copy_height <= (1 << 14) &&
+       copy_depth <= (1 << 11) &&
+       /* HW limitation - GFX7: */
+       (sctx->chip_class != GFX7 ||
+        (copy_width < (1 << 14) && copy_height < (1 << 14) && copy_depth < (1 << 11))) &&
+       /* HW limitation - some GFX7 parts: */
+       ((sctx->family != CHIP_BONAIRE && sctx->family != CHIP_KAVERI) ||
+        (srcx + copy_width != (1 << 14) && srcy + copy_height != (1 << 14)))) {
+      struct radeon_cmdbuf *cs = sctx->sdma_cs;
+
+      si_need_dma_space(sctx, 13, &sdst->buffer, &ssrc->buffer);
+
+      radeon_emit(
+         cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY, CIK_SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW, 0) |
+                (util_logbase2(bpp) << 29));
+      radeon_emit(cs, src_address);
+      radeon_emit(cs, src_address >> 32);
+      radeon_emit(cs, srcx | (srcy << 16));
+      radeon_emit(cs, srcz | ((src_pitch - 1) << 16));
+      radeon_emit(cs, src_slice_pitch - 1);
+      radeon_emit(cs, dst_address);
+      radeon_emit(cs, dst_address >> 32);
+      radeon_emit(cs, dstx | (dsty << 16));
+      radeon_emit(cs, dstz | ((dst_pitch - 1) << 16));
+      radeon_emit(cs, dst_slice_pitch - 1);
+      if (sctx->chip_class == GFX7) {
+         radeon_emit(cs, copy_width | (copy_height << 16));
+         radeon_emit(cs, copy_depth);
+      } else {
+         radeon_emit(cs, (copy_width - 1) | ((copy_height - 1) << 16));
+         radeon_emit(cs, (copy_depth - 1));
+      }
+      return true;
+   }
+
+   /* Tiled <-> linear sub-window copy. */
+   if ((src_mode >= RADEON_SURF_MODE_1D) != (dst_mode >= RADEON_SURF_MODE_1D)) {
+      struct si_texture *tiled = src_mode >= RADEON_SURF_MODE_1D ? ssrc : sdst;
+      struct si_texture *linear = tiled == ssrc ? sdst : ssrc;
+      unsigned tiled_level = tiled == ssrc ? src_level : dst_level;
+      unsigned linear_level = linear == ssrc ? src_level : dst_level;
+      unsigned tiled_x = tiled == ssrc ? srcx : dstx;
+      unsigned linear_x = linear == ssrc ? srcx : dstx;
+      unsigned tiled_y = tiled == ssrc ? srcy : dsty;
+      unsigned linear_y = linear == ssrc ? srcy : dsty;
+      unsigned tiled_z = tiled == ssrc ? srcz : dstz;
+      unsigned linear_z = linear == ssrc ? srcz : dstz;
+      unsigned tiled_width = tiled == ssrc ? src_width : dst_width;
+      unsigned linear_width = linear == ssrc ? src_width : dst_width;
+      unsigned tiled_pitch = tiled == ssrc ? src_pitch : dst_pitch;
+      unsigned linear_pitch = linear == ssrc ? src_pitch : dst_pitch;
+      unsigned tiled_slice_pitch = tiled == ssrc ? src_slice_pitch : dst_slice_pitch;
+      unsigned linear_slice_pitch = linear == ssrc ? src_slice_pitch : dst_slice_pitch;
+      uint64_t tiled_address = tiled == ssrc ? src_address : dst_address;
+      uint64_t linear_address = linear == ssrc ? src_address : dst_address;
+      unsigned tiled_micro_mode = tiled == ssrc ? src_micro_mode : dst_micro_mode;
+
+      assert(tiled_pitch % 8 == 0);
+      assert(tiled_slice_pitch % 64 == 0);
+      unsigned pitch_tile_max = tiled_pitch / 8 - 1;
+      unsigned slice_tile_max = tiled_slice_pitch / 64 - 1;
+      unsigned xalign = MAX2(1, 4 / bpp);
+      unsigned copy_width_aligned = copy_width;
+
+      /* If the region ends at the last pixel and is unaligned, we
+       * can copy the remainder of the line that is not visible to
+       * make it aligned.
+       */
+      if (copy_width % xalign != 0 && linear_x + copy_width == linear_width &&
+          tiled_x + copy_width == tiled_width &&
+          linear_x + align(copy_width, xalign) <= linear_pitch &&
+          tiled_x + align(copy_width, xalign) <= tiled_pitch)
+         copy_width_aligned = align(copy_width, xalign);
+
+      /* HW limitations. */
+      if ((sctx->family == CHIP_BONAIRE || sctx->family == CHIP_KAVERI) &&
+          linear_pitch - 1 == 0x3fff && bpp == 16)
+         return false;
+
+      if (sctx->chip_class == GFX7 &&
+          (copy_width_aligned == (1 << 14) || copy_height == (1 << 14) || copy_depth == (1 << 11)))
+         return false;
+
+      if ((sctx->family == CHIP_BONAIRE || sctx->family == CHIP_KAVERI ||
+           sctx->family == CHIP_KABINI) &&
+          (tiled_x + copy_width == (1 << 14) || tiled_y + copy_height == (1 << 14)))
+         return false;
+
+      /* The hw can read outside of the given linear buffer bounds,
+       * or access those pages but not touch the memory in case
+       * of writes. (it still causes a VM fault)
+       *
+       * Out-of-bounds memory access or page directory access must
+       * be prevented.
+       */
+      int64_t start_linear_address, end_linear_address;
+      unsigned granularity;
+
+      /* Deduce the size of reads from the linear surface. */
+      switch (tiled_micro_mode) {
+      case V_009910_ADDR_SURF_DISPLAY_MICRO_TILING:
+         granularity = bpp == 1 ? 64 / (8 * bpp) : 128 / (8 * bpp);
+         break;
+      case V_009910_ADDR_SURF_THIN_MICRO_TILING:
+      case V_009910_ADDR_SURF_DEPTH_MICRO_TILING:
+         if (0 /* TODO: THICK microtiling */)
+            granularity =
+               bpp == 1 ? 32 / (8 * bpp)
+                        : bpp == 2 ? 64 / (8 * bpp) : bpp <= 8 ? 128 / (8 * bpp) : 256 / (8 * bpp);
+         else
+            granularity = bpp <= 2 ? 64 / (8 * bpp) : bpp <= 8 ? 128 / (8 * bpp) : 256 / (8 * bpp);
+         break;
+      default:
+         return false;
+      }
+
+      /* The linear reads start at tiled_x & ~(granularity - 1).
+       * If linear_x == 0 && tiled_x % granularity != 0, the hw
+       * starts reading from an address preceding linear_address!!!
+       */
+      start_linear_address =
+         linear->surface.u.legacy.level[linear_level].offset +
+         bpp * (linear_z * linear_slice_pitch + linear_y * linear_pitch + linear_x);
+      start_linear_address -= (int)(bpp * (tiled_x % granularity));
+
+      end_linear_address =
+         linear->surface.u.legacy.level[linear_level].offset +
+         bpp * ((linear_z + copy_depth - 1) * linear_slice_pitch +
+                (linear_y + copy_height - 1) * linear_pitch + (linear_x + copy_width));
+
+      if ((tiled_x + copy_width) % granularity)
+         end_linear_address += granularity - (tiled_x + copy_width) % granularity;
+
+      if (start_linear_address < 0 || end_linear_address > linear->surface.surf_size)
+         return false;
+
+      /* Check requirements. */
+      if (tiled_address % 256 == 0 && linear_address % 4 == 0 && linear_pitch % xalign == 0 &&
+          linear_x % xalign == 0 && tiled_x % xalign == 0 && copy_width_aligned % xalign == 0 &&
+          tiled_micro_mode != V_009910_ADDR_SURF_ROTATED_MICRO_TILING &&
+          /* check if everything fits into the bitfields */
+          tiled->surface.u.legacy.tile_split <= 4096 && pitch_tile_max < (1 << 11) &&
+          slice_tile_max < (1 << 22) && linear_pitch <= (1 << 14) &&
+          linear_slice_pitch <= (1 << 28) && copy_width_aligned <= (1 << 14) &&
+          copy_height <= (1 << 14) && copy_depth <= (1 << 11)) {
+         struct radeon_cmdbuf *cs = sctx->sdma_cs;
+         uint32_t direction = linear == sdst ? 1u << 31 : 0;
+
+         si_need_dma_space(sctx, 14, &sdst->buffer, &ssrc->buffer);
+
+         radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
+                                         CIK_SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW, 0) |
+                            direction);
+         radeon_emit(cs, tiled_address);
+         radeon_emit(cs, tiled_address >> 32);
+         radeon_emit(cs, tiled_x | (tiled_y << 16));
+         radeon_emit(cs, tiled_z | (pitch_tile_max << 16));
+         radeon_emit(cs, slice_tile_max);
+         radeon_emit(cs, encode_tile_info(sctx, tiled, tiled_level, true));
+         radeon_emit(cs, linear_address);
+         radeon_emit(cs, linear_address >> 32);
+         radeon_emit(cs, linear_x | (linear_y << 16));
+         radeon_emit(cs, linear_z | ((linear_pitch - 1) << 16));
+         radeon_emit(cs, linear_slice_pitch - 1);
+         if (sctx->chip_class == GFX7) {
+            radeon_emit(cs, copy_width_aligned | (copy_height << 16));
+            radeon_emit(cs, copy_depth);
+         } else {
+            radeon_emit(cs, (copy_width_aligned - 1) | ((copy_height - 1) << 16));
+            radeon_emit(cs, (copy_depth - 1));
+         }
+         return true;
+      }
+   }
+
+   /* Tiled -> Tiled sub-window copy. */
+   if (dst_mode >= RADEON_SURF_MODE_1D && src_mode >= RADEON_SURF_MODE_1D &&
+       /* check if these fit into the bitfields */
+       src_address % 256 == 0 && dst_address % 256 == 0 &&
+       ssrc->surface.u.legacy.tile_split <= 4096 && sdst->surface.u.legacy.tile_split <= 4096 &&
+       dstx % 8 == 0 && dsty % 8 == 0 && srcx % 8 == 0 && srcy % 8 == 0 &&
+       /* this can either be equal, or display->rotated (GFX8+ only) */
+       (src_micro_mode == dst_micro_mode ||
+        (sctx->chip_class >= GFX8 && src_micro_mode == V_009910_ADDR_SURF_DISPLAY_MICRO_TILING &&
+         dst_micro_mode == V_009910_ADDR_SURF_ROTATED_MICRO_TILING))) {
+      assert(src_pitch % 8 == 0);
+      assert(dst_pitch % 8 == 0);
+      assert(src_slice_pitch % 64 == 0);
+      assert(dst_slice_pitch % 64 == 0);
+      unsigned src_pitch_tile_max = src_pitch / 8 - 1;
+      unsigned dst_pitch_tile_max = dst_pitch / 8 - 1;
+      unsigned src_slice_tile_max = src_slice_pitch / 64 - 1;
+      unsigned dst_slice_tile_max = dst_slice_pitch / 64 - 1;
+      unsigned copy_width_aligned = copy_width;
+      unsigned copy_height_aligned = copy_height;
+
+      /* If the region ends at the last pixel and is unaligned, we
+       * can copy the remainder of the tile that is not visible to
+       * make it aligned.
+       */
+      if (copy_width % 8 != 0 && srcx + copy_width == src_width && dstx + copy_width == dst_width)
+         copy_width_aligned = align(copy_width, 8);
+
+      if (copy_height % 8 != 0 && srcy + copy_height == src_height &&
+          dsty + copy_height == dst_height)
+         copy_height_aligned = align(copy_height, 8);
+
+      /* check if these fit into the bitfields */
+      if (src_pitch_tile_max < (1 << 11) && dst_pitch_tile_max < (1 << 11) &&
+          src_slice_tile_max < (1 << 22) && dst_slice_tile_max < (1 << 22) &&
+          copy_width_aligned <= (1 << 14) && copy_height_aligned <= (1 << 14) &&
+          copy_depth <= (1 << 11) && copy_width_aligned % 8 == 0 && copy_height_aligned % 8 == 0 &&
+          /* HW limitation - GFX7: */
+          (sctx->chip_class != GFX7 ||
+           (copy_width_aligned < (1 << 14) && copy_height_aligned < (1 << 14) &&
+            copy_depth < (1 << 11))) &&
+          /* HW limitation - some GFX7 parts: */
+          ((sctx->family != CHIP_BONAIRE && sctx->family != CHIP_KAVERI &&
+            sctx->family != CHIP_KABINI) ||
+           (srcx + copy_width_aligned != (1 << 14) && srcy + copy_height_aligned != (1 << 14) &&
+            dstx + copy_width != (1 << 14)))) {
+         struct radeon_cmdbuf *cs = sctx->sdma_cs;
+
+         si_need_dma_space(sctx, 15, &sdst->buffer, &ssrc->buffer);
+
+         radeon_emit(
+            cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY, CIK_SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW, 0));
+         radeon_emit(cs, src_address);
+         radeon_emit(cs, src_address >> 32);
+         radeon_emit(cs, srcx | (srcy << 16));
+         radeon_emit(cs, srcz | (src_pitch_tile_max << 16));
+         radeon_emit(cs, src_slice_tile_max);
+         radeon_emit(cs, encode_tile_info(sctx, ssrc, src_level, true));
+         radeon_emit(cs, dst_address);
+         radeon_emit(cs, dst_address >> 32);
+         radeon_emit(cs, dstx | (dsty << 16));
+         radeon_emit(cs, dstz | (dst_pitch_tile_max << 16));
+         radeon_emit(cs, dst_slice_tile_max);
+         radeon_emit(cs, encode_tile_info(sctx, sdst, dst_level, false));
+         if (sctx->chip_class == GFX7) {
+            radeon_emit(cs, copy_width_aligned | (copy_height_aligned << 16));
+            radeon_emit(cs, copy_depth);
+         } else {
+            radeon_emit(cs, (copy_width_aligned - 8) | ((copy_height_aligned - 8) << 16));
+            radeon_emit(cs, (copy_depth - 1));
+         }
+         return true;
+      }
+   }
+
+   return false;
 }
 
-static void cik_sdma_copy(struct pipe_context *ctx,
-                         struct pipe_resource *dst,
-                         unsigned dst_level,
-                         unsigned dstx, unsigned dsty, unsigned dstz,
-                         struct pipe_resource *src,
-                         unsigned src_level,
-                         const struct pipe_box *src_box)
+static void cik_sdma_copy(struct pipe_context *ctx, struct pipe_resource *dst, unsigned dst_level,
+                          unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource *src,
+                          unsigned src_level, const struct pipe_box *src_box)
 {
-       struct si_context *sctx = (struct si_context *)ctx;
-
-       assert(src->target != PIPE_BUFFER);
-
-       if (!sctx->sdma_cs ||
-           src->flags & PIPE_RESOURCE_FLAG_SPARSE ||
-           dst->flags & PIPE_RESOURCE_FLAG_SPARSE)
-               goto fallback;
-
-       /* SDMA causes corruption. See:
-        *   https://bugs.freedesktop.org/show_bug.cgi?id=110575
-        *   https://bugs.freedesktop.org/show_bug.cgi?id=110635
-        *
-        * Keep SDMA enabled on APUs.
-        */
-       if (sctx->screen->debug_flags & DBG(FORCE_SDMA) ||
-           (!sctx->screen->info.has_dedicated_vram &&
-            !(sctx->screen->debug_flags & DBG(NO_SDMA_COPY_IMAGE)))) {
-               if ((sctx->chip_class == GFX7 || sctx->chip_class == GFX8) &&
-                   cik_sdma_copy_texture(sctx, dst, dst_level, dstx, dsty, dstz,
-                                         src, src_level, src_box))
-                       return;
-               else if (sctx->chip_class == GFX9 &&
-                        si_sdma_v4_copy_texture(sctx, dst, dst_level, dstx, dsty, dstz,
-                                                src, src_level, src_box))
-                       return;
-       }
+   struct si_context *sctx = (struct si_context *)ctx;
+
+   assert(src->target != PIPE_BUFFER);
+
+   if (!sctx->sdma_cs || src->flags & PIPE_RESOURCE_FLAG_SPARSE ||
+       dst->flags & PIPE_RESOURCE_FLAG_SPARSE)
+      goto fallback;
+
+   /* SDMA causes corruption. See:
+    *   https://bugs.freedesktop.org/show_bug.cgi?id=110575
+    *   https://bugs.freedesktop.org/show_bug.cgi?id=110635
+    *
+    * Keep SDMA enabled on APUs.
+    */
+   if (sctx->screen->debug_flags & DBG(FORCE_SDMA) ||
+       (!sctx->screen->info.has_dedicated_vram &&
+        !(sctx->screen->debug_flags & DBG(NO_SDMA_COPY_IMAGE)))) {
+      if ((sctx->chip_class == GFX7 || sctx->chip_class == GFX8) &&
+          cik_sdma_copy_texture(sctx, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box))
+         return;
+      else if (sctx->chip_class == GFX9 && si_sdma_v4_copy_texture(sctx, dst, dst_level, dstx, dsty,
+                                                                   dstz, src, src_level, src_box))
+         return;
+   }
 
 fallback:
-       si_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz,
-                               src, src_level, src_box);
+   si_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box);
 }
 
 void cik_init_sdma_functions(struct si_context *sctx)
 {
-       sctx->dma_copy = cik_sdma_copy;
+   sctx->dma_copy = cik_sdma_copy;
 }
index 59b3d0a6b49b0657a05e8ccf9eacb437791e2151..1570f2860531af96d9a15c11df27c4936d9631b9 100644 (file)
@@ -1,18 +1,18 @@
 // DriConf options specific to radeonsi
 DRI_CONF_SECTION_PERFORMANCE
-    DRI_CONF_ADAPTIVE_SYNC("true")
-    DRI_CONF_RADEONSI_ASSUME_NO_Z_FIGHTS("false")
-    DRI_CONF_RADEONSI_COMMUTATIVE_BLEND_ADD("false")
-    DRI_CONF_RADEONSI_ZERO_ALL_VRAM_ALLOCS("false")
+DRI_CONF_ADAPTIVE_SYNC("true")
+DRI_CONF_RADEONSI_ASSUME_NO_Z_FIGHTS("false")
+DRI_CONF_RADEONSI_COMMUTATIVE_BLEND_ADD("false")
+DRI_CONF_RADEONSI_ZERO_ALL_VRAM_ALLOCS("false")
 DRI_CONF_SECTION_END
 
 DRI_CONF_SECTION_DEBUG
 
 //= BEGIN VERBATIM
-#define OPT_BOOL(name, dflt, description) \
-       DRI_CONF_OPT_BEGIN_B(radeonsi_##name, #dflt) \
-               DRI_CONF_DESC(en, description) \
-       DRI_CONF_OPT_END
+#define OPT_BOOL(name, dflt, description)                                                          \
+   DRI_CONF_OPT_BEGIN_B(radeonsi_##name, #dflt)                                                    \
+   DRI_CONF_DESC(en, description)                                                                  \
+   DRI_CONF_OPT_END
 
 #include "radeonsi/si_debug_options.h"
 //= END VERBATIM
index c0a0bc8ce57fe70edfa7ccd9c8950f196b5dfdf2..aedf5090eed1b9ac7151c86d3fd543c51587155f 100644 (file)
  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include <stddef.h>
-
 #include "si_pipe.h"
 #include "si_query.h"
+#include "sid.h"
 #include "util/u_memory.h"
 #include "util/u_suballoc.h"
-#include "sid.h"
+
+#include <stddef.h>
 
 /**
  * The query buffer is written to by ESGS NGG shaders with statistics about
  * without additional GPU cost.
  */
 struct gfx10_sh_query_buffer {
-       struct list_head list;
-       struct si_resource *buf;
-       unsigned refcount;
+   struct list_head list;
+   struct si_resource *buf;
+   unsigned refcount;
 
-       /* Offset into the buffer in bytes; points at the first un-emitted entry. */
-       unsigned head;
+   /* Offset into the buffer in bytes; points at the first un-emitted entry. */
+   unsigned head;
 };
 
 /* Memory layout of the query buffer. Must be kept in sync with shaders
@@ -55,469 +55,454 @@ struct gfx10_sh_query_buffer {
  * of all those values unconditionally.
  */
 struct gfx10_sh_query_buffer_mem {
-       struct {
-               uint64_t generated_primitives_start_dummy;
-               uint64_t emitted_primitives_start_dummy;
-               uint64_t generated_primitives;
-               uint64_t emitted_primitives;
-       } stream[4];
-       uint32_t fence; /* bottom-of-pipe fence: set to ~0 when draws have finished */
-       uint32_t pad[31];
+   struct {
+      uint64_t generated_primitives_start_dummy;
+      uint64_t emitted_primitives_start_dummy;
+      uint64_t generated_primitives;
+      uint64_t emitted_primitives;
+   } stream[4];
+   uint32_t fence; /* bottom-of-pipe fence: set to ~0 when draws have finished */
+   uint32_t pad[31];
 };
 
 /* Shader-based queries. */
 struct gfx10_sh_query {
-       struct si_query b;
+   struct si_query b;
 
-       struct gfx10_sh_query_buffer *first;
-       struct gfx10_sh_query_buffer *last;
-       unsigned first_begin;
-       unsigned last_end;
+   struct gfx10_sh_query_buffer *first;
+   struct gfx10_sh_query_buffer *last;
+   unsigned first_begin;
+   unsigned last_end;
 
-       unsigned stream;
+   unsigned stream;
 };
 
 static void emit_shader_query(struct si_context *sctx)
 {
-       assert(!list_is_empty(&sctx->shader_query_buffers));
+   assert(!list_is_empty(&sctx->shader_query_buffers));
 
-       struct gfx10_sh_query_buffer *qbuf = list_last_entry(&sctx->shader_query_buffers,
-                                                            struct gfx10_sh_query_buffer, list);
-       qbuf->head += sizeof(struct gfx10_sh_query_buffer_mem);
+   struct gfx10_sh_query_buffer *qbuf =
+      list_last_entry(&sctx->shader_query_buffers, struct gfx10_sh_query_buffer, list);
+   qbuf->head += sizeof(struct gfx10_sh_query_buffer_mem);
 }
 
 static void gfx10_release_query_buffers(struct si_context *sctx,
-                                       struct gfx10_sh_query_buffer *first,
-                                       struct gfx10_sh_query_buffer *last)
+                                        struct gfx10_sh_query_buffer *first,
+                                        struct gfx10_sh_query_buffer *last)
 {
-       while (first) {
-               struct gfx10_sh_query_buffer *qbuf = first;
-               if (first != last)
-                       first = LIST_ENTRY(struct gfx10_sh_query_buffer, qbuf->list.next, list);
-               else
-                       first = NULL;
-
-               qbuf->refcount--;
-               if (qbuf->refcount)
-                       continue;
-
-               if (qbuf->list.next == &sctx->shader_query_buffers)
-                       continue; /* keep the most recent buffer; it may not be full yet */
-               if (qbuf->list.prev == &sctx->shader_query_buffers)
-                       continue; /* keep the oldest buffer for recycling */
-
-               list_del(&qbuf->list);
-               si_resource_reference(&qbuf->buf, NULL);
-               FREE(qbuf);
-       }
+   while (first) {
+      struct gfx10_sh_query_buffer *qbuf = first;
+      if (first != last)
+         first = LIST_ENTRY(struct gfx10_sh_query_buffer, qbuf->list.next, list);
+      else
+         first = NULL;
+
+      qbuf->refcount--;
+      if (qbuf->refcount)
+         continue;
+
+      if (qbuf->list.next == &sctx->shader_query_buffers)
+         continue; /* keep the most recent buffer; it may not be full yet */
+      if (qbuf->list.prev == &sctx->shader_query_buffers)
+         continue; /* keep the oldest buffer for recycling */
+
+      list_del(&qbuf->list);
+      si_resource_reference(&qbuf->buf, NULL);
+      FREE(qbuf);
+   }
 }
 
 static bool gfx10_alloc_query_buffer(struct si_context *sctx)
 {
-       if (si_is_atom_dirty(sctx, &sctx->atoms.s.shader_query))
-               return true;
-
-       struct gfx10_sh_query_buffer *qbuf = NULL;
-
-       if (!list_is_empty(&sctx->shader_query_buffers)) {
-               qbuf = list_last_entry(&sctx->shader_query_buffers,
-                                      struct gfx10_sh_query_buffer, list);
-               if (qbuf->head + sizeof(struct gfx10_sh_query_buffer_mem) <= qbuf->buf->b.b.width0)
-                       goto success;
-
-               qbuf = list_first_entry(&sctx->shader_query_buffers,
-                                       struct gfx10_sh_query_buffer, list);
-               if (!qbuf->refcount &&
-                   !si_rings_is_buffer_referenced(sctx, qbuf->buf->buf, RADEON_USAGE_READWRITE) &&
-                   sctx->ws->buffer_wait(qbuf->buf->buf, 0, RADEON_USAGE_READWRITE)) {
-                       /* Can immediately re-use the oldest buffer */
-                       list_del(&qbuf->list);
-               } else {
-                       qbuf = NULL;
-               }
-       }
-
-       if (!qbuf) {
-               qbuf = CALLOC_STRUCT(gfx10_sh_query_buffer);
-               if (unlikely(!qbuf))
-                       return false;
-
-               struct si_screen *screen = sctx->screen;
-               unsigned buf_size = MAX2(sizeof(struct gfx10_sh_query_buffer_mem),
-                                        screen->info.min_alloc_size);
-               qbuf->buf = si_resource(
-                       pipe_buffer_create(&screen->b, 0, PIPE_USAGE_STAGING, buf_size));
-               if (unlikely(!qbuf->buf)) {
-                       FREE(qbuf);
-                       return false;
-               }
-       }
-
-       /* The buffer is currently unused by the GPU. Initialize it.
-        *
-        * We need to set the high bit of all the primitive counters for
-        * compatibility with the SET_PREDICATION packet.
-        */
-       uint64_t *results = sctx->ws->buffer_map(qbuf->buf->buf, NULL,
-                                                PIPE_TRANSFER_WRITE |
-                                                PIPE_TRANSFER_UNSYNCHRONIZED);
-       assert(results);
-
-       for (unsigned i = 0, e = qbuf->buf->b.b.width0 / sizeof(struct gfx10_sh_query_buffer_mem);
-            i < e; ++i) {
-               for (unsigned j = 0; j < 16; ++j)
-                       results[32 * i + j] = (uint64_t)1 << 63;
-               results[32 * i + 16] = 0;
-       }
-
-       list_addtail(&qbuf->list, &sctx->shader_query_buffers);
-       qbuf->head = 0;
-       qbuf->refcount = sctx->num_active_shader_queries;
+   if (si_is_atom_dirty(sctx, &sctx->atoms.s.shader_query))
+      return true;
+
+   struct gfx10_sh_query_buffer *qbuf = NULL;
+
+   if (!list_is_empty(&sctx->shader_query_buffers)) {
+      qbuf = list_last_entry(&sctx->shader_query_buffers, struct gfx10_sh_query_buffer, list);
+      if (qbuf->head + sizeof(struct gfx10_sh_query_buffer_mem) <= qbuf->buf->b.b.width0)
+         goto success;
+
+      qbuf = list_first_entry(&sctx->shader_query_buffers, struct gfx10_sh_query_buffer, list);
+      if (!qbuf->refcount &&
+          !si_rings_is_buffer_referenced(sctx, qbuf->buf->buf, RADEON_USAGE_READWRITE) &&
+          sctx->ws->buffer_wait(qbuf->buf->buf, 0, RADEON_USAGE_READWRITE)) {
+         /* Can immediately re-use the oldest buffer */
+         list_del(&qbuf->list);
+      } else {
+         qbuf = NULL;
+      }
+   }
+
+   if (!qbuf) {
+      qbuf = CALLOC_STRUCT(gfx10_sh_query_buffer);
+      if (unlikely(!qbuf))
+         return false;
+
+      struct si_screen *screen = sctx->screen;
+      unsigned buf_size =
+         MAX2(sizeof(struct gfx10_sh_query_buffer_mem), screen->info.min_alloc_size);
+      qbuf->buf = si_resource(pipe_buffer_create(&screen->b, 0, PIPE_USAGE_STAGING, buf_size));
+      if (unlikely(!qbuf->buf)) {
+         FREE(qbuf);
+         return false;
+      }
+   }
+
+   /* The buffer is currently unused by the GPU. Initialize it.
+    *
+    * We need to set the high bit of all the primitive counters for
+    * compatibility with the SET_PREDICATION packet.
+    */
+   uint64_t *results = sctx->ws->buffer_map(qbuf->buf->buf, NULL,
+                                            PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED);
+   assert(results);
+
+   for (unsigned i = 0, e = qbuf->buf->b.b.width0 / sizeof(struct gfx10_sh_query_buffer_mem); i < e;
+        ++i) {
+      for (unsigned j = 0; j < 16; ++j)
+         results[32 * i + j] = (uint64_t)1 << 63;
+      results[32 * i + 16] = 0;
+   }
+
+   list_addtail(&qbuf->list, &sctx->shader_query_buffers);
+   qbuf->head = 0;
+   qbuf->refcount = sctx->num_active_shader_queries;
 
 success:;
-       struct pipe_shader_buffer sbuf;
-       sbuf.buffer = &qbuf->buf->b.b;
-       sbuf.buffer_offset = qbuf->head;
-       sbuf.buffer_size = sizeof(struct gfx10_sh_query_buffer_mem);
-       si_set_rw_shader_buffer(sctx, GFX10_GS_QUERY_BUF, &sbuf);
-       sctx->current_vs_state |= S_VS_STATE_STREAMOUT_QUERY_ENABLED(1);
-
-       si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_query);
-       return true;
+   struct pipe_shader_buffer sbuf;
+   sbuf.buffer = &qbuf->buf->b.b;
+   sbuf.buffer_offset = qbuf->head;
+   sbuf.buffer_size = sizeof(struct gfx10_sh_query_buffer_mem);
+   si_set_rw_shader_buffer(sctx, GFX10_GS_QUERY_BUF, &sbuf);
+   sctx->current_vs_state |= S_VS_STATE_STREAMOUT_QUERY_ENABLED(1);
+
+   si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_query);
+   return true;
 }
 
 static void gfx10_sh_query_destroy(struct si_context *sctx, struct si_query *rquery)
 {
-       struct gfx10_sh_query *query = (struct gfx10_sh_query *)rquery;
-       gfx10_release_query_buffers(sctx, query->first, query->last);
-       FREE(query);
+   struct gfx10_sh_query *query = (struct gfx10_sh_query *)rquery;
+   gfx10_release_query_buffers(sctx, query->first, query->last);
+   FREE(query);
 }
 
 static bool gfx10_sh_query_begin(struct si_context *sctx, struct si_query *rquery)
 {
-       struct gfx10_sh_query *query = (struct gfx10_sh_query *)rquery;
+   struct gfx10_sh_query *query = (struct gfx10_sh_query *)rquery;
 
-       gfx10_release_query_buffers(sctx, query->first, query->last);
-       query->first = query->last = NULL;
+   gfx10_release_query_buffers(sctx, query->first, query->last);
+   query->first = query->last = NULL;
 
-       if (unlikely(!gfx10_alloc_query_buffer(sctx)))
-               return false;
+   if (unlikely(!gfx10_alloc_query_buffer(sctx)))
+      return false;
 
-       query->first = list_last_entry(&sctx->shader_query_buffers,
-                                      struct gfx10_sh_query_buffer, list);
-       query->first_begin = query->first->head;
+   query->first = list_last_entry(&sctx->shader_query_buffers, struct gfx10_sh_query_buffer, list);
+   query->first_begin = query->first->head;
 
-       sctx->num_active_shader_queries++;
-       query->first->refcount++;
+   sctx->num_active_shader_queries++;
+   query->first->refcount++;
 
-       return true;
+   return true;
 }
 
 static bool gfx10_sh_query_end(struct si_context *sctx, struct si_query *rquery)
 {
-       struct gfx10_sh_query *query = (struct gfx10_sh_query *)rquery;
-
-       if (unlikely(!query->first))
-               return false; /* earlier out of memory error */
-
-       query->last = list_last_entry(&sctx->shader_query_buffers,
-                                     struct gfx10_sh_query_buffer, list);
-       query->last_end = query->last->head;
-
-       /* Signal the fence of the previous chunk */
-       if (query->last_end != 0) {
-               uint64_t fence_va = query->last->buf->gpu_address;
-               fence_va += query->last_end - sizeof(struct gfx10_sh_query_buffer_mem);
-               fence_va += offsetof(struct gfx10_sh_query_buffer_mem, fence);
-               si_cp_release_mem(sctx, sctx->gfx_cs,
-                                 V_028A90_BOTTOM_OF_PIPE_TS, 0,
-                                 EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
-                                 EOP_DATA_SEL_VALUE_32BIT,
-                                 query->last->buf, fence_va, 0xffffffff,
-                                 PIPE_QUERY_GPU_FINISHED);
-       }
-
-       sctx->num_active_shader_queries--;
-
-       if (sctx->num_active_shader_queries > 0) {
-               gfx10_alloc_query_buffer(sctx);
-       } else {
-               si_set_rw_shader_buffer(sctx, GFX10_GS_QUERY_BUF, NULL);
-               sctx->current_vs_state &= C_VS_STATE_STREAMOUT_QUERY_ENABLED;
-
-               /* If a query_begin is followed by a query_end without a draw
-                * in-between, we need to clear the atom to ensure that the
-                * next query_begin will re-initialize the shader buffer. */
-               si_set_atom_dirty(sctx, &sctx->atoms.s.shader_query, false);
-       }
-
-       return true;
+   struct gfx10_sh_query *query = (struct gfx10_sh_query *)rquery;
+
+   if (unlikely(!query->first))
+      return false; /* earlier out of memory error */
+
+   query->last = list_last_entry(&sctx->shader_query_buffers, struct gfx10_sh_query_buffer, list);
+   query->last_end = query->last->head;
+
+   /* Signal the fence of the previous chunk */
+   if (query->last_end != 0) {
+      uint64_t fence_va = query->last->buf->gpu_address;
+      fence_va += query->last_end - sizeof(struct gfx10_sh_query_buffer_mem);
+      fence_va += offsetof(struct gfx10_sh_query_buffer_mem, fence);
+      si_cp_release_mem(sctx, sctx->gfx_cs, V_028A90_BOTTOM_OF_PIPE_TS, 0, EOP_DST_SEL_MEM,
+                        EOP_INT_SEL_NONE, EOP_DATA_SEL_VALUE_32BIT, query->last->buf, fence_va,
+                        0xffffffff, PIPE_QUERY_GPU_FINISHED);
+   }
+
+   sctx->num_active_shader_queries--;
+
+   if (sctx->num_active_shader_queries > 0) {
+      gfx10_alloc_query_buffer(sctx);
+   } else {
+      si_set_rw_shader_buffer(sctx, GFX10_GS_QUERY_BUF, NULL);
+      sctx->current_vs_state &= C_VS_STATE_STREAMOUT_QUERY_ENABLED;
+
+      /* If a query_begin is followed by a query_end without a draw
+       * in-between, we need to clear the atom to ensure that the
+       * next query_begin will re-initialize the shader buffer. */
+      si_set_atom_dirty(sctx, &sctx->atoms.s.shader_query, false);
+   }
+
+   return true;
 }
 
 static void gfx10_sh_query_add_result(struct gfx10_sh_query *query,
-                                     struct gfx10_sh_query_buffer_mem *qmem,
-                                     union pipe_query_result *result)
+                                      struct gfx10_sh_query_buffer_mem *qmem,
+                                      union pipe_query_result *result)
 {
-       static const uint64_t mask = ((uint64_t)1 << 63) - 1;
-
-       switch (query->b.type) {
-       case PIPE_QUERY_PRIMITIVES_EMITTED:
-               result->u64 += qmem->stream[query->stream].emitted_primitives & mask;
-               break;
-       case PIPE_QUERY_PRIMITIVES_GENERATED:
-               result->u64 += qmem->stream[query->stream].generated_primitives & mask;
-               break;
-       case PIPE_QUERY_SO_STATISTICS:
-               result->so_statistics.num_primitives_written +=
-                       qmem->stream[query->stream].emitted_primitives & mask;
-               result->so_statistics.primitives_storage_needed +=
-                       qmem->stream[query->stream].generated_primitives & mask;
-               break;
-       case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
-               result->b |= qmem->stream[query->stream].emitted_primitives !=
-                            qmem->stream[query->stream].generated_primitives;
-               break;
-       case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
-               for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream) {
-                       result->b |= qmem->stream[query->stream].emitted_primitives !=
-                                    qmem->stream[query->stream].generated_primitives;
-               }
-               break;
-       default:
-               assert(0);
-       }
+   static const uint64_t mask = ((uint64_t)1 << 63) - 1;
+
+   switch (query->b.type) {
+   case PIPE_QUERY_PRIMITIVES_EMITTED:
+      result->u64 += qmem->stream[query->stream].emitted_primitives & mask;
+      break;
+   case PIPE_QUERY_PRIMITIVES_GENERATED:
+      result->u64 += qmem->stream[query->stream].generated_primitives & mask;
+      break;
+   case PIPE_QUERY_SO_STATISTICS:
+      result->so_statistics.num_primitives_written +=
+         qmem->stream[query->stream].emitted_primitives & mask;
+      result->so_statistics.primitives_storage_needed +=
+         qmem->stream[query->stream].generated_primitives & mask;
+      break;
+   case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
+      result->b |= qmem->stream[query->stream].emitted_primitives !=
+                   qmem->stream[query->stream].generated_primitives;
+      break;
+   case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
+      for (unsigned stream = 0; stream < SI_MAX_STREAMS; ++stream) {
+         result->b |= qmem->stream[query->stream].emitted_primitives !=
+                      qmem->stream[query->stream].generated_primitives;
+      }
+      break;
+   default:
+      assert(0);
+   }
 }
 
-static bool gfx10_sh_query_get_result(struct si_context *sctx, struct si_query *rquery,
-                                     bool wait, union pipe_query_result *result)
+static bool gfx10_sh_query_get_result(struct si_context *sctx, struct si_query *rquery, bool wait,
+                                      union pipe_query_result *result)
 {
-       struct gfx10_sh_query *query = (struct gfx10_sh_query *)rquery;
+   struct gfx10_sh_query *query = (struct gfx10_sh_query *)rquery;
 
-       util_query_clear_result(result, query->b.type);
+   util_query_clear_result(result, query->b.type);
 
-       if (unlikely(!query->first))
-               return false; /* earlier out of memory error */
-       assert(query->last);
+   if (unlikely(!query->first))
+      return false; /* earlier out of memory error */
+   assert(query->last);
 
-       for (struct gfx10_sh_query_buffer *qbuf = query->last;;
-            qbuf = LIST_ENTRY(struct gfx10_sh_query_buffer, qbuf->list.prev, list)) {
-               unsigned usage = PIPE_TRANSFER_READ |
-                                (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
-               void *map;
+   for (struct gfx10_sh_query_buffer *qbuf = query->last;;
+        qbuf = LIST_ENTRY(struct gfx10_sh_query_buffer, qbuf->list.prev, list)) {
+      unsigned usage = PIPE_TRANSFER_READ | (wait ? 0 : PIPE_TRANSFER_DONTBLOCK);
+      void *map;
 
-               if (rquery->b.flushed)
-                       map = sctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
-               else
-                       map = si_buffer_map_sync_with_rings(sctx, qbuf->buf, usage);
+      if (rquery->b.flushed)
+         map = sctx->ws->buffer_map(qbuf->buf->buf, NULL, usage);
+      else
+         map = si_buffer_map_sync_with_rings(sctx, qbuf->buf, usage);
 
-               if (!map)
-                       return false;
+      if (!map)
+         return false;
 
-               unsigned results_begin = 0;
-               unsigned results_end = qbuf->head;
-               if (qbuf == query->first)
-                       results_begin = query->first_begin;
-               if (qbuf == query->last)
-                       results_end = query->last_end;
+      unsigned results_begin = 0;
+      unsigned results_end = qbuf->head;
+      if (qbuf == query->first)
+         results_begin = query->first_begin;
+      if (qbuf == query->last)
+         results_end = query->last_end;
 
-               while (results_begin != results_end) {
-                       struct gfx10_sh_query_buffer_mem *qmem = map + results_begin;
-                       results_begin += sizeof(*qmem);
+      while (results_begin != results_end) {
+         struct gfx10_sh_query_buffer_mem *qmem = map + results_begin;
+         results_begin += sizeof(*qmem);
 
-                       gfx10_sh_query_add_result(query, qmem, result);
-               }
+         gfx10_sh_query_add_result(query, qmem, result);
+      }
 
-               if (qbuf == query->first)
-                       break;
-       }
+      if (qbuf == query->first)
+         break;
+   }
 
-       return true;
+   return true;
 }
 
-static void gfx10_sh_query_get_result_resource(struct si_context *sctx,
-                                              struct si_query *rquery,
-                                              bool wait,
-                                              enum pipe_query_value_type result_type,
-                                              int index,
-                                              struct pipe_resource *resource,
-                                              unsigned offset)
+static void gfx10_sh_query_get_result_resource(struct si_context *sctx, struct si_query *rquery,
+                                               bool wait, enum pipe_query_value_type result_type,
+                                               int index, struct pipe_resource *resource,
+                                               unsigned offset)
 {
-       struct gfx10_sh_query *query = (struct gfx10_sh_query *)rquery;
-       struct si_qbo_state saved_state = {};
-       struct pipe_resource *tmp_buffer = NULL;
-       unsigned tmp_buffer_offset = 0;
-
-       if (!sctx->sh_query_result_shader) {
-               sctx->sh_query_result_shader = gfx10_create_sh_query_result_cs(sctx);
-               if (!sctx->sh_query_result_shader)
-                       return;
-       }
-
-       if (query->first != query->last) {
-               u_suballocator_alloc(sctx->allocator_zeroed_memory, 16, 16,
-                                    &tmp_buffer_offset, &tmp_buffer);
-               if (!tmp_buffer)
-                       return;
-       }
-
-       si_save_qbo_state(sctx, &saved_state);
-
-       /* Pre-fill the constants configuring the shader behavior. */
-       struct {
-               uint32_t config;
-               uint32_t offset;
-               uint32_t chain;
-               uint32_t result_count;
-       } consts;
-       struct pipe_constant_buffer constant_buffer = {};
-
-       if (index >= 0) {
-               switch (query->b.type) {
-               case PIPE_QUERY_PRIMITIVES_GENERATED:
-                       consts.offset = sizeof(uint32_t) * query->stream;
-                       consts.config = 0;
-                       break;
-               case PIPE_QUERY_PRIMITIVES_EMITTED:
-                       consts.offset = sizeof(uint32_t) * (4 + query->stream);
-                       consts.config = 0;
-                       break;
-               case PIPE_QUERY_SO_STATISTICS:
-                       consts.offset = sizeof(uint32_t) * (4 * index + query->stream);
-                       consts.config = 0;
-                       break;
-               case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
-                       consts.offset = sizeof(uint32_t) * query->stream;
-                       consts.config = 2;
-                       break;
-               case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
-                       consts.offset = 0;
-                       consts.config = 3;
-                       break;
-               default: unreachable("bad query type");
-               }
-       } else {
-               /* Check result availability. */
-               consts.offset = 0;
-               consts.config = 1;
-       }
-
-       if (result_type == PIPE_QUERY_TYPE_I64 || result_type == PIPE_QUERY_TYPE_U64)
-               consts.config |= 8;
-
-       constant_buffer.buffer_size = sizeof(consts);
-       constant_buffer.user_buffer = &consts;
-
-       /* Pre-fill the SSBOs and grid. */
-       struct pipe_shader_buffer ssbo[3];
-       struct pipe_grid_info grid = {};
-
-       ssbo[1].buffer = tmp_buffer;
-       ssbo[1].buffer_offset = tmp_buffer_offset;
-       ssbo[1].buffer_size = 16;
-
-       ssbo[2] = ssbo[1];
-
-       sctx->b.bind_compute_state(&sctx->b, sctx->sh_query_result_shader);
-
-       grid.block[0] = 1;
-       grid.block[1] = 1;
-       grid.block[2] = 1;
-       grid.grid[0] = 1;
-       grid.grid[1] = 1;
-       grid.grid[2] = 1;
-
-       struct gfx10_sh_query_buffer *qbuf = query->first;
-       for (;;) {
-               unsigned begin = qbuf == query->first ? query->first_begin : 0;
-               unsigned end = qbuf == query->last ? query->last_end : qbuf->buf->b.b.width0;
-               if (!end)
-                       continue;
-
-               ssbo[0].buffer = &qbuf->buf->b.b;
-               ssbo[0].buffer_offset = begin;
-               ssbo[0].buffer_size = end - begin;
-
-               consts.result_count = (end - begin) / sizeof(struct gfx10_sh_query_buffer_mem);
-               consts.chain = 0;
-               if (qbuf != query->first)
-                       consts.chain |= 1;
-               if (qbuf != query->last)
-                       consts.chain |= 2;
-
-               if (qbuf == query->last) {
-                       ssbo[2].buffer = resource;
-                       ssbo[2].buffer_offset = offset;
-                       ssbo[2].buffer_size = 8;
-               }
-
-               sctx->b.set_constant_buffer(&sctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
-               sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo, 0x6);
-
-               if (wait) {
-                       uint64_t va;
-
-                       /* Wait for result availability. Wait only for readiness
-                        * of the last entry, since the fence writes should be
-                        * serialized in the CP.
-                        */
-                       va = qbuf->buf->gpu_address;
-                       va += end - sizeof(struct gfx10_sh_query_buffer_mem);
-                       va += offsetof(struct gfx10_sh_query_buffer_mem, fence);
-
-                       si_cp_wait_mem(sctx, sctx->gfx_cs, va, 0x00000001, 0x00000001, 0);
-               }
-
-               sctx->b.launch_grid(&sctx->b, &grid);
-               sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
-
-               if (qbuf == query->last)
-                       break;
-               qbuf = LIST_ENTRY(struct gfx10_sh_query_buffer, qbuf->list.next, list);
-       }
-
-       si_restore_qbo_state(sctx, &saved_state);
-       pipe_resource_reference(&tmp_buffer, NULL);
+   struct gfx10_sh_query *query = (struct gfx10_sh_query *)rquery;
+   struct si_qbo_state saved_state = {};
+   struct pipe_resource *tmp_buffer = NULL;
+   unsigned tmp_buffer_offset = 0;
+
+   if (!sctx->sh_query_result_shader) {
+      sctx->sh_query_result_shader = gfx10_create_sh_query_result_cs(sctx);
+      if (!sctx->sh_query_result_shader)
+         return;
+   }
+
+   if (query->first != query->last) {
+      u_suballocator_alloc(sctx->allocator_zeroed_memory, 16, 16, &tmp_buffer_offset, &tmp_buffer);
+      if (!tmp_buffer)
+         return;
+   }
+
+   si_save_qbo_state(sctx, &saved_state);
+
+   /* Pre-fill the constants configuring the shader behavior. */
+   struct {
+      uint32_t config;
+      uint32_t offset;
+      uint32_t chain;
+      uint32_t result_count;
+   } consts;
+   struct pipe_constant_buffer constant_buffer = {};
+
+   if (index >= 0) {
+      switch (query->b.type) {
+      case PIPE_QUERY_PRIMITIVES_GENERATED:
+         consts.offset = sizeof(uint32_t) * query->stream;
+         consts.config = 0;
+         break;
+      case PIPE_QUERY_PRIMITIVES_EMITTED:
+         consts.offset = sizeof(uint32_t) * (4 + query->stream);
+         consts.config = 0;
+         break;
+      case PIPE_QUERY_SO_STATISTICS:
+         consts.offset = sizeof(uint32_t) * (4 * index + query->stream);
+         consts.config = 0;
+         break;
+      case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
+         consts.offset = sizeof(uint32_t) * query->stream;
+         consts.config = 2;
+         break;
+      case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
+         consts.offset = 0;
+         consts.config = 3;
+         break;
+      default:
+         unreachable("bad query type");
+      }
+   } else {
+      /* Check result availability. */
+      consts.offset = 0;
+      consts.config = 1;
+   }
+
+   if (result_type == PIPE_QUERY_TYPE_I64 || result_type == PIPE_QUERY_TYPE_U64)
+      consts.config |= 8;
+
+   constant_buffer.buffer_size = sizeof(consts);
+   constant_buffer.user_buffer = &consts;
+
+   /* Pre-fill the SSBOs and grid. */
+   struct pipe_shader_buffer ssbo[3];
+   struct pipe_grid_info grid = {};
+
+   ssbo[1].buffer = tmp_buffer;
+   ssbo[1].buffer_offset = tmp_buffer_offset;
+   ssbo[1].buffer_size = 16;
+
+   ssbo[2] = ssbo[1];
+
+   sctx->b.bind_compute_state(&sctx->b, sctx->sh_query_result_shader);
+
+   grid.block[0] = 1;
+   grid.block[1] = 1;
+   grid.block[2] = 1;
+   grid.grid[0] = 1;
+   grid.grid[1] = 1;
+   grid.grid[2] = 1;
+
+   struct gfx10_sh_query_buffer *qbuf = query->first;
+   for (;;) {
+      unsigned begin = qbuf == query->first ? query->first_begin : 0;
+      unsigned end = qbuf == query->last ? query->last_end : qbuf->buf->b.b.width0;
+      if (!end)
+         continue;
+
+      ssbo[0].buffer = &qbuf->buf->b.b;
+      ssbo[0].buffer_offset = begin;
+      ssbo[0].buffer_size = end - begin;
+
+      consts.result_count = (end - begin) / sizeof(struct gfx10_sh_query_buffer_mem);
+      consts.chain = 0;
+      if (qbuf != query->first)
+         consts.chain |= 1;
+      if (qbuf != query->last)
+         consts.chain |= 2;
+
+      if (qbuf == query->last) {
+         ssbo[2].buffer = resource;
+         ssbo[2].buffer_offset = offset;
+         ssbo[2].buffer_size = 8;
+      }
+
+      sctx->b.set_constant_buffer(&sctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
+      sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo, 0x6);
+
+      if (wait) {
+         uint64_t va;
+
+         /* Wait for result availability. Wait only for readiness
+          * of the last entry, since the fence writes should be
+          * serialized in the CP.
+          */
+         va = qbuf->buf->gpu_address;
+         va += end - sizeof(struct gfx10_sh_query_buffer_mem);
+         va += offsetof(struct gfx10_sh_query_buffer_mem, fence);
+
+         si_cp_wait_mem(sctx, sctx->gfx_cs, va, 0x00000001, 0x00000001, 0);
+      }
+
+      sctx->b.launch_grid(&sctx->b, &grid);
+      sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
+
+      if (qbuf == query->last)
+         break;
+      qbuf = LIST_ENTRY(struct gfx10_sh_query_buffer, qbuf->list.next, list);
+   }
+
+   si_restore_qbo_state(sctx, &saved_state);
+   pipe_resource_reference(&tmp_buffer, NULL);
 }
 
 static const struct si_query_ops gfx10_sh_query_ops = {
-       .destroy = gfx10_sh_query_destroy,
-       .begin = gfx10_sh_query_begin,
-       .end = gfx10_sh_query_end,
-       .get_result = gfx10_sh_query_get_result,
-       .get_result_resource = gfx10_sh_query_get_result_resource,
+   .destroy = gfx10_sh_query_destroy,
+   .begin = gfx10_sh_query_begin,
+   .end = gfx10_sh_query_end,
+   .get_result = gfx10_sh_query_get_result,
+   .get_result_resource = gfx10_sh_query_get_result_resource,
 };
 
-struct pipe_query *gfx10_sh_query_create(struct si_screen *screen,
-                                        enum pipe_query_type query_type,
-                                        unsigned index)
+struct pipe_query *gfx10_sh_query_create(struct si_screen *screen, enum pipe_query_type query_type,
+                                         unsigned index)
 {
-       struct gfx10_sh_query *query = CALLOC_STRUCT(gfx10_sh_query);
-       if (unlikely(!query))
-               return NULL;
+   struct gfx10_sh_query *query = CALLOC_STRUCT(gfx10_sh_query);
+   if (unlikely(!query))
+      return NULL;
 
-       query->b.ops = &gfx10_sh_query_ops;
-       query->b.type = query_type;
-       query->stream = index;
+   query->b.ops = &gfx10_sh_query_ops;
+   query->b.type = query_type;
+   query->stream = index;
 
-       return (struct pipe_query *)query;
+   return (struct pipe_query *)query;
 }
 
 void gfx10_init_query(struct si_context *sctx)
 {
-       list_inithead(&sctx->shader_query_buffers);
-       sctx->atoms.s.shader_query.emit = emit_shader_query;
+   list_inithead(&sctx->shader_query_buffers);
+   sctx->atoms.s.shader_query.emit = emit_shader_query;
 }
 
 void gfx10_destroy_query(struct si_context *sctx)
 {
-       while (!list_is_empty(&sctx->shader_query_buffers)) {
-               struct gfx10_sh_query_buffer *qbuf =
-                       list_first_entry(&sctx->shader_query_buffers,
-                                        struct gfx10_sh_query_buffer, list);
-               list_del(&qbuf->list);
-
-               assert(!qbuf->refcount);
-               si_resource_reference(&qbuf->buf, NULL);
-               FREE(qbuf);
-       }
+   while (!list_is_empty(&sctx->shader_query_buffers)) {
+      struct gfx10_sh_query_buffer *qbuf =
+         list_first_entry(&sctx->shader_query_buffers, struct gfx10_sh_query_buffer, list);
+      list_del(&qbuf->list);
+
+      assert(!qbuf->refcount);
+      si_resource_reference(&qbuf->buf, NULL);
+      FREE(qbuf);
+   }
 }
index 63439733507e2dfe3349b4089c6a180f2ceb3476..06eba4a1f61f2bcab2613771833aa6e373b5c427 100644 (file)
  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include "ac_llvm_cull.h"
 #include "si_pipe.h"
 #include "si_shader_internal.h"
-
 #include "sid.h"
-
 #include "util/u_memory.h"
 #include "util/u_prim.h"
-#include "ac_llvm_cull.h"
 
 static LLVMValueRef get_wave_id_in_tg(struct si_shader_context *ctx)
 {
-       return si_unpack_param(ctx, ctx->merged_wave_info, 24, 4);
+   return si_unpack_param(ctx, ctx->merged_wave_info, 24, 4);
 }
 
 static LLVMValueRef get_tgsize(struct si_shader_context *ctx)
 {
-       return si_unpack_param(ctx, ctx->merged_wave_info, 28, 4);
+   return si_unpack_param(ctx, ctx->merged_wave_info, 28, 4);
 }
 
 static LLVMValueRef get_thread_id_in_tg(struct si_shader_context *ctx)
 {
-       LLVMBuilderRef builder = ctx->ac.builder;
-       LLVMValueRef tmp;
-       tmp = LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
-                          LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, false), "");
-       return LLVMBuildAdd(builder, tmp, ac_get_thread_id(&ctx->ac), "");
+   LLVMBuilderRef builder = ctx->ac.builder;
+   LLVMValueRef tmp;
+   tmp = LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
+                      LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, false), "");
+   return LLVMBuildAdd(builder, tmp, ac_get_thread_id(&ctx->ac), "");
 }
 
 static LLVMValueRef ngg_get_vtx_cnt(struct si_shader_context *ctx)
 {
-       return si_unpack_param(ctx, ctx->gs_tg_info, 12, 9);
+   return si_unpack_param(ctx, ctx->gs_tg_info, 12, 9);
 }
 
 static LLVMValueRef ngg_get_prim_cnt(struct si_shader_context *ctx)
 {
-       return si_unpack_param(ctx, ctx->gs_tg_info, 22, 9);
+   return si_unpack_param(ctx, ctx->gs_tg_info, 22, 9);
 }
 
 static LLVMValueRef ngg_get_ordered_id(struct si_shader_context *ctx)
 {
-       return si_unpack_param(ctx, ctx->gs_tg_info, 0, 12);
+   return si_unpack_param(ctx, ctx->gs_tg_info, 0, 12);
 }
 
 static LLVMValueRef ngg_get_query_buf(struct si_shader_context *ctx)
 {
-       LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
+   LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
 
-       return ac_build_load_to_sgpr(&ctx->ac, buf_ptr,
-                                    LLVMConstInt(ctx->ac.i32, GFX10_GS_QUERY_BUF, false));
+   return ac_build_load_to_sgpr(&ctx->ac, buf_ptr,
+                                LLVMConstInt(ctx->ac.i32, GFX10_GS_QUERY_BUF, false));
 }
 
 static LLVMValueRef ngg_get_initial_edgeflag(struct si_shader_context *ctx, unsigned index)
 {
-       if (ctx->type == PIPE_SHADER_VERTEX) {
-               LLVMValueRef tmp;
-               tmp = LLVMBuildLShr(ctx->ac.builder,
-                                   ac_get_arg(&ctx->ac, ctx->args.gs_invocation_id),
-                                   LLVMConstInt(ctx->ac.i32, 8 + index, false), "");
-               return LLVMBuildTrunc(ctx->ac.builder, tmp, ctx->ac.i1, "");
-       }
-       return ctx->ac.i1false;
+   if (ctx->type == PIPE_SHADER_VERTEX) {
+      LLVMValueRef tmp;
+      tmp = LLVMBuildLShr(ctx->ac.builder, ac_get_arg(&ctx->ac, ctx->args.gs_invocation_id),
+                          LLVMConstInt(ctx->ac.i32, 8 + index, false), "");
+      return LLVMBuildTrunc(ctx->ac.builder, tmp, ctx->ac.i1, "");
+   }
+   return ctx->ac.i1false;
 }
 
 /**
  * Return the number of vertices as a constant in \p num_vertices,
  * and return a more precise value as LLVMValueRef from the function.
  */
-static LLVMValueRef ngg_get_vertices_per_prim(struct si_shader_context *ctx,
-                                             unsigned *num_vertices)
+static LLVMValueRef ngg_get_vertices_per_prim(struct si_shader_context *ctx, unsigned *num_vertices)
 {
-       const struct si_shader_info *info = &ctx->shader->selector->info;
-
-       if (ctx->type == PIPE_SHADER_VERTEX) {
-               if (info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
-                       /* Blits always use axis-aligned rectangles with 3 vertices. */
-                       *num_vertices = 3;
-                       return LLVMConstInt(ctx->ac.i32, 3, 0);
-               } else {
-                       /* We always build up all three indices for the prim export
-                        * independent of the primitive type. The additional garbage
-                        * data shouldn't hurt. This number doesn't matter with
-                        * NGG passthrough.
-                        */
-                       *num_vertices = 3;
-
-                       /* Extract OUTPRIM field. */
-                       LLVMValueRef num = si_unpack_param(ctx, ctx->vs_state_bits, 2, 2);
-                       return LLVMBuildAdd(ctx->ac.builder, num, ctx->ac.i32_1, "");
-               }
-       } else {
-               assert(ctx->type == PIPE_SHADER_TESS_EVAL);
-
-               if (info->properties[TGSI_PROPERTY_TES_POINT_MODE])
-                       *num_vertices = 1;
-               else if (info->properties[TGSI_PROPERTY_TES_PRIM_MODE] == PIPE_PRIM_LINES)
-                       *num_vertices = 2;
-               else
-                       *num_vertices = 3;
-
-               return LLVMConstInt(ctx->ac.i32, *num_vertices, false);
-       }
+   const struct si_shader_info *info = &ctx->shader->selector->info;
+
+   if (ctx->type == PIPE_SHADER_VERTEX) {
+      if (info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
+         /* Blits always use axis-aligned rectangles with 3 vertices. */
+         *num_vertices = 3;
+         return LLVMConstInt(ctx->ac.i32, 3, 0);
+      } else {
+         /* We always build up all three indices for the prim export
+          * independent of the primitive type. The additional garbage
+          * data shouldn't hurt. This number doesn't matter with
+          * NGG passthrough.
+          */
+         *num_vertices = 3;
+
+         /* Extract OUTPRIM field. */
+         LLVMValueRef num = si_unpack_param(ctx, ctx->vs_state_bits, 2, 2);
+         return LLVMBuildAdd(ctx->ac.builder, num, ctx->ac.i32_1, "");
+      }
+   } else {
+      assert(ctx->type == PIPE_SHADER_TESS_EVAL);
+
+      if (info->properties[TGSI_PROPERTY_TES_POINT_MODE])
+         *num_vertices = 1;
+      else if (info->properties[TGSI_PROPERTY_TES_PRIM_MODE] == PIPE_PRIM_LINES)
+         *num_vertices = 2;
+      else
+         *num_vertices = 3;
+
+      return LLVMConstInt(ctx->ac.i32, *num_vertices, false);
+   }
 }
 
 bool gfx10_ngg_export_prim_early(struct si_shader *shader)
 {
-       struct si_shader_selector *sel = shader->selector;
+   struct si_shader_selector *sel = shader->selector;
 
-       assert(shader->key.as_ngg && !shader->key.as_es);
+   assert(shader->key.as_ngg && !shader->key.as_es);
 
-       return sel->type != PIPE_SHADER_GEOMETRY &&
-              !sel->info.writes_edgeflag;
+   return sel->type != PIPE_SHADER_GEOMETRY && !sel->info.writes_edgeflag;
 }
 
 void gfx10_ngg_build_sendmsg_gs_alloc_req(struct si_shader_context *ctx)
 {
-       ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx),
-                                     ngg_get_vtx_cnt(ctx),
-                                     ngg_get_prim_cnt(ctx));
+   ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx), ngg_get_vtx_cnt(ctx),
+                                 ngg_get_prim_cnt(ctx));
 }
 
-void gfx10_ngg_build_export_prim(struct si_shader_context *ctx,
-                                LLVMValueRef user_edgeflags[3],
-                                LLVMValueRef prim_passthrough)
+void gfx10_ngg_build_export_prim(struct si_shader_context *ctx, LLVMValueRef user_edgeflags[3],
+                                 LLVMValueRef prim_passthrough)
 {
-       LLVMBuilderRef builder = ctx->ac.builder;
-
-       if (gfx10_is_ngg_passthrough(ctx->shader) ||
-           ctx->shader->key.opt.ngg_culling) {
-               ac_build_ifcc(&ctx->ac, si_is_gs_thread(ctx), 6001);
-               {
-                       struct ac_ngg_prim prim = {};
-
-                       if (prim_passthrough)
-                               prim.passthrough = prim_passthrough;
-                       else
-                               prim.passthrough = ac_get_arg(&ctx->ac, ctx->gs_vtx01_offset);
-
-                       /* This is only used with NGG culling, which returns the NGG
-                        * passthrough prim export encoding.
-                        */
-                       if (ctx->shader->selector->info.writes_edgeflag) {
-                               unsigned all_bits_no_edgeflags = ~SI_NGG_PRIM_EDGE_FLAG_BITS;
-                               LLVMValueRef edgeflags = LLVMConstInt(ctx->ac.i32, all_bits_no_edgeflags, 0);
-
-                               unsigned num_vertices;
-                               ngg_get_vertices_per_prim(ctx, &num_vertices);
-
-                               for (unsigned i = 0; i < num_vertices; i++) {
-                                       unsigned shift = 9 + i*10;
-                                       LLVMValueRef edge;
-
-                                       edge = LLVMBuildLoad(builder, user_edgeflags[i], "");
-                                       edge = LLVMBuildZExt(builder, edge, ctx->ac.i32, "");
-                                       edge = LLVMBuildShl(builder, edge, LLVMConstInt(ctx->ac.i32, shift, 0), "");
-                                       edgeflags = LLVMBuildOr(builder, edgeflags, edge, "");
-                               }
-                               prim.passthrough = LLVMBuildAnd(builder, prim.passthrough, edgeflags, "");
-                       }
-
-                       ac_build_export_prim(&ctx->ac, &prim);
-               }
-               ac_build_endif(&ctx->ac, 6001);
-               return;
-       }
-
-       ac_build_ifcc(&ctx->ac, si_is_gs_thread(ctx), 6001);
-       {
-               struct ac_ngg_prim prim = {};
-
-               ngg_get_vertices_per_prim(ctx, &prim.num_vertices);
-
-               prim.isnull = ctx->ac.i1false;
-               prim.index[0] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16);
-               prim.index[1] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16);
-               prim.index[2] = si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16);
-
-               for (unsigned i = 0; i < prim.num_vertices; ++i) {
-                       prim.edgeflag[i] = ngg_get_initial_edgeflag(ctx, i);
-
-                       if (ctx->shader->selector->info.writes_edgeflag) {
-                               LLVMValueRef edge;
-
-                               edge = LLVMBuildLoad(ctx->ac.builder, user_edgeflags[i], "");
-                               edge = LLVMBuildAnd(ctx->ac.builder, prim.edgeflag[i], edge, "");
-                               prim.edgeflag[i] = edge;
-                       }
-               }
-
-               ac_build_export_prim(&ctx->ac, &prim);
-       }
-       ac_build_endif(&ctx->ac, 6001);
+   LLVMBuilderRef builder = ctx->ac.builder;
+
+   if (gfx10_is_ngg_passthrough(ctx->shader) || ctx->shader->key.opt.ngg_culling) {
+      ac_build_ifcc(&ctx->ac, si_is_gs_thread(ctx), 6001);
+      {
+         struct ac_ngg_prim prim = {};
+
+         if (prim_passthrough)
+            prim.passthrough = prim_passthrough;
+         else
+            prim.passthrough = ac_get_arg(&ctx->ac, ctx->gs_vtx01_offset);
+
+         /* This is only used with NGG culling, which returns the NGG
+          * passthrough prim export encoding.
+          */
+         if (ctx->shader->selector->info.writes_edgeflag) {
+            unsigned all_bits_no_edgeflags = ~SI_NGG_PRIM_EDGE_FLAG_BITS;
+            LLVMValueRef edgeflags = LLVMConstInt(ctx->ac.i32, all_bits_no_edgeflags, 0);
+
+            unsigned num_vertices;
+            ngg_get_vertices_per_prim(ctx, &num_vertices);
+
+            for (unsigned i = 0; i < num_vertices; i++) {
+               unsigned shift = 9 + i * 10;
+               LLVMValueRef edge;
+
+               edge = LLVMBuildLoad(builder, user_edgeflags[i], "");
+               edge = LLVMBuildZExt(builder, edge, ctx->ac.i32, "");
+               edge = LLVMBuildShl(builder, edge, LLVMConstInt(ctx->ac.i32, shift, 0), "");
+               edgeflags = LLVMBuildOr(builder, edgeflags, edge, "");
+            }
+            prim.passthrough = LLVMBuildAnd(builder, prim.passthrough, edgeflags, "");
+         }
+
+         ac_build_export_prim(&ctx->ac, &prim);
+      }
+      ac_build_endif(&ctx->ac, 6001);
+      return;
+   }
+
+   ac_build_ifcc(&ctx->ac, si_is_gs_thread(ctx), 6001);
+   {
+      struct ac_ngg_prim prim = {};
+
+      ngg_get_vertices_per_prim(ctx, &prim.num_vertices);
+
+      prim.isnull = ctx->ac.i1false;
+      prim.index[0] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16);
+      prim.index[1] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16);
+      prim.index[2] = si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16);
+
+      for (unsigned i = 0; i < prim.num_vertices; ++i) {
+         prim.edgeflag[i] = ngg_get_initial_edgeflag(ctx, i);
+
+         if (ctx->shader->selector->info.writes_edgeflag) {
+            LLVMValueRef edge;
+
+            edge = LLVMBuildLoad(ctx->ac.builder, user_edgeflags[i], "");
+            edge = LLVMBuildAnd(ctx->ac.builder, prim.edgeflag[i], edge, "");
+            prim.edgeflag[i] = edge;
+         }
+      }
+
+      ac_build_export_prim(&ctx->ac, &prim);
+   }
+   ac_build_endif(&ctx->ac, 6001);
 }
 
-static void build_streamout_vertex(struct si_shader_context *ctx,
-                                  LLVMValueRef *so_buffer, LLVMValueRef *wg_offset_dw,
-                                  unsigned stream, LLVMValueRef offset_vtx,
-                                  LLVMValueRef vertexptr)
+static void build_streamout_vertex(struct si_shader_context *ctx, LLVMValueRef *so_buffer,
+                                   LLVMValueRef *wg_offset_dw, unsigned stream,
+                                   LLVMValueRef offset_vtx, LLVMValueRef vertexptr)
 {
-       struct si_shader_info *info = &ctx->shader->selector->info;
-       struct pipe_stream_output_info *so = &ctx->shader->selector->so;
-       LLVMBuilderRef builder = ctx->ac.builder;
-       LLVMValueRef offset[4] = {};
-       LLVMValueRef tmp;
-
-       for (unsigned buffer = 0; buffer < 4; ++buffer) {
-               if (!wg_offset_dw[buffer])
-                       continue;
-
-               tmp = LLVMBuildMul(builder, offset_vtx,
-                                  LLVMConstInt(ctx->ac.i32, so->stride[buffer], false), "");
-               tmp = LLVMBuildAdd(builder, wg_offset_dw[buffer], tmp, "");
-               offset[buffer] = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->ac.i32, 2, false), "");
-       }
-
-       for (unsigned i = 0; i < so->num_outputs; ++i) {
-               if (so->output[i].stream != stream)
-                       continue;
-
-               unsigned reg = so->output[i].register_index;
-               struct si_shader_output_values out;
-               out.semantic_name = info->output_semantic_name[reg];
-               out.semantic_index = info->output_semantic_index[reg];
-
-               for (unsigned comp = 0; comp < 4; comp++) {
-                       tmp = ac_build_gep0(&ctx->ac, vertexptr,
-                                           LLVMConstInt(ctx->ac.i32, 4 * reg + comp, false));
-                       out.values[comp] = LLVMBuildLoad(builder, tmp, "");
-                       out.vertex_stream[comp] =
-                               (info->output_streams[reg] >> (2 * comp)) & 3;
-               }
-
-               si_llvm_streamout_store_output(ctx, so_buffer, offset, &so->output[i], &out);
-       }
+   struct si_shader_info *info = &ctx->shader->selector->info;
+   struct pipe_stream_output_info *so = &ctx->shader->selector->so;
+   LLVMBuilderRef builder = ctx->ac.builder;
+   LLVMValueRef offset[4] = {};
+   LLVMValueRef tmp;
+
+   for (unsigned buffer = 0; buffer < 4; ++buffer) {
+      if (!wg_offset_dw[buffer])
+         continue;
+
+      tmp = LLVMBuildMul(builder, offset_vtx, LLVMConstInt(ctx->ac.i32, so->stride[buffer], false),
+                         "");
+      tmp = LLVMBuildAdd(builder, wg_offset_dw[buffer], tmp, "");
+      offset[buffer] = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->ac.i32, 2, false), "");
+   }
+
+   for (unsigned i = 0; i < so->num_outputs; ++i) {
+      if (so->output[i].stream != stream)
+         continue;
+
+      unsigned reg = so->output[i].register_index;
+      struct si_shader_output_values out;
+      out.semantic_name = info->output_semantic_name[reg];
+      out.semantic_index = info->output_semantic_index[reg];
+
+      for (unsigned comp = 0; comp < 4; comp++) {
+         tmp = ac_build_gep0(&ctx->ac, vertexptr, LLVMConstInt(ctx->ac.i32, 4 * reg + comp, false));
+         out.values[comp] = LLVMBuildLoad(builder, tmp, "");
+         out.vertex_stream[comp] = (info->output_streams[reg] >> (2 * comp)) & 3;
+      }
+
+      si_llvm_streamout_store_output(ctx, so_buffer, offset, &so->output[i], &out);
+   }
 }
 
 struct ngg_streamout {
-       LLVMValueRef num_vertices;
+   LLVMValueRef num_vertices;
 
-       /* per-thread data */
-       LLVMValueRef prim_enable[4]; /* i1 per stream */
-       LLVMValueRef vertices[3]; /* [N x i32] addrspace(LDS)* */
+   /* per-thread data */
+   LLVMValueRef prim_enable[4]; /* i1 per stream */
+   LLVMValueRef vertices[3];    /* [N x i32] addrspace(LDS)* */
 
-       /* Output */
-       LLVMValueRef emit[4]; /* per-stream emitted primitives (only valid for used streams) */
+   /* Output */
+   LLVMValueRef emit[4]; /* per-stream emitted primitives (only valid for used streams) */
 };
 
 /**
@@ -276,427 +265,405 @@ struct ngg_streamout {
  *
  * Clobbers gs_ngg_scratch[8:].
  */
-static void build_streamout(struct si_shader_context *ctx,
-                           struct ngg_streamout *nggso)
+static void build_streamout(struct si_shader_context *ctx, struct ngg_streamout *nggso)
 {
-       struct si_shader_info *info = &ctx->shader->selector->info;
-       struct pipe_stream_output_info *so = &ctx->shader->selector->so;
-       LLVMBuilderRef builder = ctx->ac.builder;
-       LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
-       LLVMValueRef tid = get_thread_id_in_tg(ctx);
-       LLVMValueRef tmp, tmp2;
-       LLVMValueRef i32_2 = LLVMConstInt(ctx->ac.i32, 2, false);
-       LLVMValueRef i32_4 = LLVMConstInt(ctx->ac.i32, 4, false);
-       LLVMValueRef i32_8 = LLVMConstInt(ctx->ac.i32, 8, false);
-       LLVMValueRef so_buffer[4] = {};
-       unsigned max_num_vertices = 1 + (nggso->vertices[1] ? 1 : 0) +
-                                       (nggso->vertices[2] ? 1 : 0);
-       LLVMValueRef prim_stride_dw[4] = {};
-       LLVMValueRef prim_stride_dw_vgpr = LLVMGetUndef(ctx->ac.i32);
-       int stream_for_buffer[4] = { -1, -1, -1, -1 };
-       unsigned bufmask_for_stream[4] = {};
-       bool isgs = ctx->type == PIPE_SHADER_GEOMETRY;
-       unsigned scratch_emit_base = isgs ? 4 : 0;
-       LLVMValueRef scratch_emit_basev = isgs ? i32_4 : ctx->ac.i32_0;
-       unsigned scratch_offset_base = isgs ? 8 : 4;
-       LLVMValueRef scratch_offset_basev = isgs ? i32_8 : i32_4;
-
-       ac_llvm_add_target_dep_function_attr(ctx->main_fn, "amdgpu-gds-size", 256);
-
-       /* Determine the mapping of streamout buffers to vertex streams. */
-       for (unsigned i = 0; i < so->num_outputs; ++i) {
-               unsigned buf = so->output[i].output_buffer;
-               unsigned stream = so->output[i].stream;
-               assert(stream_for_buffer[buf] < 0 || stream_for_buffer[buf] == stream);
-               stream_for_buffer[buf] = stream;
-               bufmask_for_stream[stream] |= 1 << buf;
-       }
-
-       for (unsigned buffer = 0; buffer < 4; ++buffer) {
-               if (stream_for_buffer[buffer] == -1)
-                       continue;
-
-               assert(so->stride[buffer]);
-
-               tmp = LLVMConstInt(ctx->ac.i32, so->stride[buffer], false);
-               prim_stride_dw[buffer] = LLVMBuildMul(builder, tmp, nggso->num_vertices, "");
-               prim_stride_dw_vgpr = ac_build_writelane(
-                       &ctx->ac, prim_stride_dw_vgpr, prim_stride_dw[buffer],
-                       LLVMConstInt(ctx->ac.i32, buffer, false));
-
-               so_buffer[buffer] = ac_build_load_to_sgpr(
-                       &ctx->ac, buf_ptr,
-                       LLVMConstInt(ctx->ac.i32, SI_VS_STREAMOUT_BUF0 + buffer, false));
-       }
-
-       tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
-       ac_build_ifcc(&ctx->ac, tmp, 5200);
-       {
-               LLVMTypeRef gdsptr = LLVMPointerType(ctx->ac.i32, AC_ADDR_SPACE_GDS);
-               LLVMValueRef gdsbase = LLVMBuildIntToPtr(builder, ctx->ac.i32_0, gdsptr, "");
-
-               /* Advance the streamout offsets in GDS. */
-               LLVMValueRef offsets_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
-               LLVMValueRef generated_by_stream_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
-
-               tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
-               ac_build_ifcc(&ctx->ac, tmp, 5210);
-               {
-                       if (isgs) {
-                               tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid);
-                               tmp = LLVMBuildLoad(builder, tmp, "");
-                       } else {
-                               tmp = ac_build_writelane(&ctx->ac, ctx->ac.i32_0,
-                                               ngg_get_prim_cnt(ctx), ctx->ac.i32_0);
-                       }
-                       LLVMBuildStore(builder, tmp, generated_by_stream_vgpr);
-
-                       unsigned swizzle[4];
-                       int unused_stream = -1;
-                       for (unsigned stream = 0; stream < 4; ++stream) {
-                               if (!info->num_stream_output_components[stream]) {
-                                       unused_stream = stream;
-                                       break;
-                               }
-                       }
-                       for (unsigned buffer = 0; buffer < 4; ++buffer) {
-                               if (stream_for_buffer[buffer] >= 0) {
-                                       swizzle[buffer] = stream_for_buffer[buffer];
-                               } else {
-                                       assert(unused_stream >= 0);
-                                       swizzle[buffer] = unused_stream;
-                               }
-                       }
-
-                       tmp = ac_build_quad_swizzle(&ctx->ac, tmp,
-                               swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
-                       tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
-
-                       LLVMValueRef args[] = {
-                               LLVMBuildIntToPtr(builder, ngg_get_ordered_id(ctx), gdsptr, ""),
-                               tmp,
-                               ctx->ac.i32_0, // ordering
-                               ctx->ac.i32_0, // scope
-                               ctx->ac.i1false, // isVolatile
-                               LLVMConstInt(ctx->ac.i32, 4 << 24, false), // OA index
-                               ctx->ac.i1true, // wave release
-                               ctx->ac.i1true, // wave done
-                       };
-                       tmp = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add",
-                                                ctx->ac.i32, args, ARRAY_SIZE(args), 0);
-
-                       /* Keep offsets in a VGPR for quick retrieval via readlane by
-                        * the first wave for bounds checking, and also store in LDS
-                        * for retrieval by all waves later. */
-                       LLVMBuildStore(builder, tmp, offsets_vgpr);
-
-                       tmp2 = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
-                                           scratch_offset_basev, "");
-                       tmp2 = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp2);
-                       LLVMBuildStore(builder, tmp, tmp2);
-               }
-               ac_build_endif(&ctx->ac, 5210);
-
-               /* Determine the max emit per buffer. This is done via the SALU, in part
-                * because LLVM can't generate divide-by-multiply if we try to do this
-                * via VALU with one lane per buffer.
-                */
-               LLVMValueRef max_emit[4] = {};
-               for (unsigned buffer = 0; buffer < 4; ++buffer) {
-                       if (stream_for_buffer[buffer] == -1)
-                               continue;
-
-                       LLVMValueRef bufsize_dw =
-                               LLVMBuildLShr(builder,
-                                       LLVMBuildExtractElement(builder, so_buffer[buffer], i32_2, ""),
-                                       i32_2, "");
-
-                       tmp = LLVMBuildLoad(builder, offsets_vgpr, "");
-                       LLVMValueRef offset_dw =
-                               ac_build_readlane(&ctx->ac, tmp,
-                                               LLVMConstInt(ctx->ac.i32, buffer, false));
-
-                       tmp = LLVMBuildSub(builder, bufsize_dw, offset_dw, "");
-                       tmp = LLVMBuildUDiv(builder, tmp, prim_stride_dw[buffer], "");
-
-                       tmp2 = LLVMBuildICmp(builder, LLVMIntULT, bufsize_dw, offset_dw, "");
-                       max_emit[buffer] = LLVMBuildSelect(builder, tmp2, ctx->ac.i32_0, tmp, "");
-               }
-
-               /* Determine the number of emitted primitives per stream and fixup the
-                * GDS counter if necessary.
-                *
-                * This is complicated by the fact that a single stream can emit to
-                * multiple buffers (but luckily not vice versa).
-                */
-               LLVMValueRef emit_vgpr = ctx->ac.i32_0;
-
-               for (unsigned stream = 0; stream < 4; ++stream) {
-                       if (!info->num_stream_output_components[stream])
-                               continue;
-
-                       tmp = LLVMBuildLoad(builder, generated_by_stream_vgpr, "");
-                       LLVMValueRef generated =
-                               ac_build_readlane(&ctx->ac, tmp,
-                                                 LLVMConstInt(ctx->ac.i32, stream, false));
-
-                       LLVMValueRef emit = generated;
-                       for (unsigned buffer = 0; buffer < 4; ++buffer) {
-                               if (stream_for_buffer[buffer] == stream)
-                                       emit = ac_build_umin(&ctx->ac, emit, max_emit[buffer]);
-                       }
-
-                       emit_vgpr = ac_build_writelane(&ctx->ac, emit_vgpr, emit,
-                                                      LLVMConstInt(ctx->ac.i32, stream, false));
-
-                       /* Fixup the offset using a plain GDS atomic if we overflowed. */
-                       tmp = LLVMBuildICmp(builder, LLVMIntULT, emit, generated, "");
-                       ac_build_ifcc(&ctx->ac, tmp, 5221); /* scalar branch */
-                       tmp = LLVMBuildLShr(builder,
-                                           LLVMConstInt(ctx->ac.i32, bufmask_for_stream[stream], false),
-                                           ac_get_thread_id(&ctx->ac), "");
-                       tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
-                       ac_build_ifcc(&ctx->ac, tmp, 5222);
-                       {
-                               tmp = LLVMBuildSub(builder, generated, emit, "");
-                               tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
-                               tmp2 = LLVMBuildGEP(builder, gdsbase, &tid, 1, "");
-                               LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpSub, tmp2, tmp,
-                                                  LLVMAtomicOrderingMonotonic, false);
-                       }
-                       ac_build_endif(&ctx->ac, 5222);
-                       ac_build_endif(&ctx->ac, 5221);
-               }
-
-               tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
-               ac_build_ifcc(&ctx->ac, tmp, 5225);
-               {
-                       tmp = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
-                                          scratch_emit_basev, "");
-                       tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp);
-                       LLVMBuildStore(builder, emit_vgpr, tmp);
-               }
-               ac_build_endif(&ctx->ac, 5225);
-       }
-       ac_build_endif(&ctx->ac, 5200);
-
-       /* Determine the workgroup-relative per-thread / primitive offset into
-        * the streamout buffers */
-       struct ac_wg_scan primemit_scan[4] = {};
-
-       if (isgs) {
-               for (unsigned stream = 0; stream < 4; ++stream) {
-                       if (!info->num_stream_output_components[stream])
-                               continue;
-
-                       primemit_scan[stream].enable_exclusive = true;
-                       primemit_scan[stream].op = nir_op_iadd;
-                       primemit_scan[stream].src = nggso->prim_enable[stream];
-                       primemit_scan[stream].scratch =
-                               ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
-                                       LLVMConstInt(ctx->ac.i32, 12 + 8 * stream, false));
-                       primemit_scan[stream].waveidx = get_wave_id_in_tg(ctx);
-                       primemit_scan[stream].numwaves = get_tgsize(ctx);
-                       primemit_scan[stream].maxwaves = 8;
-                       ac_build_wg_scan_top(&ctx->ac, &primemit_scan[stream]);
-               }
-       }
-
-       ac_build_s_barrier(&ctx->ac);
-
-       /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
-       LLVMValueRef wgoffset_dw[4] = {};
-
-       {
-               LLVMValueRef scratch_vgpr;
-
-               tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ac_get_thread_id(&ctx->ac));
-               scratch_vgpr = LLVMBuildLoad(builder, tmp, "");
-
-               for (unsigned buffer = 0; buffer < 4; ++buffer) {
-                       if (stream_for_buffer[buffer] >= 0) {
-                               wgoffset_dw[buffer] = ac_build_readlane(
-                                       &ctx->ac, scratch_vgpr,
-                                       LLVMConstInt(ctx->ac.i32, scratch_offset_base + buffer, false));
-                       }
-               }
-
-               for (unsigned stream = 0; stream < 4; ++stream) {
-                       if (info->num_stream_output_components[stream]) {
-                               nggso->emit[stream] = ac_build_readlane(
-                                       &ctx->ac, scratch_vgpr,
-                                       LLVMConstInt(ctx->ac.i32, scratch_emit_base + stream, false));
-                       }
-               }
-       }
-
-       /* Write out primitive data */
-       for (unsigned stream = 0; stream < 4; ++stream) {
-               if (!info->num_stream_output_components[stream])
-                       continue;
-
-               if (isgs) {
-                       ac_build_wg_scan_bottom(&ctx->ac, &primemit_scan[stream]);
-               } else {
-                       primemit_scan[stream].result_exclusive = tid;
-               }
-
-               tmp = LLVMBuildICmp(builder, LLVMIntULT,
-                                   primemit_scan[stream].result_exclusive,
-                                   nggso->emit[stream], "");
-               tmp = LLVMBuildAnd(builder, tmp, nggso->prim_enable[stream], "");
-               ac_build_ifcc(&ctx->ac, tmp, 5240);
-               {
-                       LLVMValueRef offset_vtx =
-                               LLVMBuildMul(builder, primemit_scan[stream].result_exclusive,
-                                            nggso->num_vertices, "");
-
-                       for (unsigned i = 0; i < max_num_vertices; ++i) {
-                               tmp = LLVMBuildICmp(builder, LLVMIntULT,
-                                                   LLVMConstInt(ctx->ac.i32, i, false),
-                                                   nggso->num_vertices, "");
-                               ac_build_ifcc(&ctx->ac, tmp, 5241);
-                               build_streamout_vertex(ctx, so_buffer, wgoffset_dw,
-                                                      stream, offset_vtx, nggso->vertices[i]);
-                               ac_build_endif(&ctx->ac, 5241);
-                               offset_vtx = LLVMBuildAdd(builder, offset_vtx, ctx->ac.i32_1, "");
-                       }
-               }
-               ac_build_endif(&ctx->ac, 5240);
-       }
+   struct si_shader_info *info = &ctx->shader->selector->info;
+   struct pipe_stream_output_info *so = &ctx->shader->selector->so;
+   LLVMBuilderRef builder = ctx->ac.builder;
+   LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->rw_buffers);
+   LLVMValueRef tid = get_thread_id_in_tg(ctx);
+   LLVMValueRef tmp, tmp2;
+   LLVMValueRef i32_2 = LLVMConstInt(ctx->ac.i32, 2, false);
+   LLVMValueRef i32_4 = LLVMConstInt(ctx->ac.i32, 4, false);
+   LLVMValueRef i32_8 = LLVMConstInt(ctx->ac.i32, 8, false);
+   LLVMValueRef so_buffer[4] = {};
+   unsigned max_num_vertices = 1 + (nggso->vertices[1] ? 1 : 0) + (nggso->vertices[2] ? 1 : 0);
+   LLVMValueRef prim_stride_dw[4] = {};
+   LLVMValueRef prim_stride_dw_vgpr = LLVMGetUndef(ctx->ac.i32);
+   int stream_for_buffer[4] = {-1, -1, -1, -1};
+   unsigned bufmask_for_stream[4] = {};
+   bool isgs = ctx->type == PIPE_SHADER_GEOMETRY;
+   unsigned scratch_emit_base = isgs ? 4 : 0;
+   LLVMValueRef scratch_emit_basev = isgs ? i32_4 : ctx->ac.i32_0;
+   unsigned scratch_offset_base = isgs ? 8 : 4;
+   LLVMValueRef scratch_offset_basev = isgs ? i32_8 : i32_4;
+
+   ac_llvm_add_target_dep_function_attr(ctx->main_fn, "amdgpu-gds-size", 256);
+
+   /* Determine the mapping of streamout buffers to vertex streams. */
+   for (unsigned i = 0; i < so->num_outputs; ++i) {
+      unsigned buf = so->output[i].output_buffer;
+      unsigned stream = so->output[i].stream;
+      assert(stream_for_buffer[buf] < 0 || stream_for_buffer[buf] == stream);
+      stream_for_buffer[buf] = stream;
+      bufmask_for_stream[stream] |= 1 << buf;
+   }
+
+   for (unsigned buffer = 0; buffer < 4; ++buffer) {
+      if (stream_for_buffer[buffer] == -1)
+         continue;
+
+      assert(so->stride[buffer]);
+
+      tmp = LLVMConstInt(ctx->ac.i32, so->stride[buffer], false);
+      prim_stride_dw[buffer] = LLVMBuildMul(builder, tmp, nggso->num_vertices, "");
+      prim_stride_dw_vgpr =
+         ac_build_writelane(&ctx->ac, prim_stride_dw_vgpr, prim_stride_dw[buffer],
+                            LLVMConstInt(ctx->ac.i32, buffer, false));
+
+      so_buffer[buffer] = ac_build_load_to_sgpr(
+         &ctx->ac, buf_ptr, LLVMConstInt(ctx->ac.i32, SI_VS_STREAMOUT_BUF0 + buffer, false));
+   }
+
+   tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
+   ac_build_ifcc(&ctx->ac, tmp, 5200);
+   {
+      LLVMTypeRef gdsptr = LLVMPointerType(ctx->ac.i32, AC_ADDR_SPACE_GDS);
+      LLVMValueRef gdsbase = LLVMBuildIntToPtr(builder, ctx->ac.i32_0, gdsptr, "");
+
+      /* Advance the streamout offsets in GDS. */
+      LLVMValueRef offsets_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
+      LLVMValueRef generated_by_stream_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
+
+      tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
+      ac_build_ifcc(&ctx->ac, tmp, 5210);
+      {
+         if (isgs) {
+            tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid);
+            tmp = LLVMBuildLoad(builder, tmp, "");
+         } else {
+            tmp = ac_build_writelane(&ctx->ac, ctx->ac.i32_0, ngg_get_prim_cnt(ctx), ctx->ac.i32_0);
+         }
+         LLVMBuildStore(builder, tmp, generated_by_stream_vgpr);
+
+         unsigned swizzle[4];
+         int unused_stream = -1;
+         for (unsigned stream = 0; stream < 4; ++stream) {
+            if (!info->num_stream_output_components[stream]) {
+               unused_stream = stream;
+               break;
+            }
+         }
+         for (unsigned buffer = 0; buffer < 4; ++buffer) {
+            if (stream_for_buffer[buffer] >= 0) {
+               swizzle[buffer] = stream_for_buffer[buffer];
+            } else {
+               assert(unused_stream >= 0);
+               swizzle[buffer] = unused_stream;
+            }
+         }
+
+         tmp = ac_build_quad_swizzle(&ctx->ac, tmp, swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
+         tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
+
+         LLVMValueRef args[] = {
+            LLVMBuildIntToPtr(builder, ngg_get_ordered_id(ctx), gdsptr, ""),
+            tmp,
+            ctx->ac.i32_0,                             // ordering
+            ctx->ac.i32_0,                             // scope
+            ctx->ac.i1false,                           // isVolatile
+            LLVMConstInt(ctx->ac.i32, 4 << 24, false), // OA index
+            ctx->ac.i1true,                            // wave release
+            ctx->ac.i1true,                            // wave done
+         };
+         tmp = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add", ctx->ac.i32, args,
+                                  ARRAY_SIZE(args), 0);
+
+         /* Keep offsets in a VGPR for quick retrieval via readlane by
+          * the first wave for bounds checking, and also store in LDS
+          * for retrieval by all waves later. */
+         LLVMBuildStore(builder, tmp, offsets_vgpr);
+
+         tmp2 = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac), scratch_offset_basev, "");
+         tmp2 = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp2);
+         LLVMBuildStore(builder, tmp, tmp2);
+      }
+      ac_build_endif(&ctx->ac, 5210);
+
+      /* Determine the max emit per buffer. This is done via the SALU, in part
+       * because LLVM can't generate divide-by-multiply if we try to do this
+       * via VALU with one lane per buffer.
+       */
+      LLVMValueRef max_emit[4] = {};
+      for (unsigned buffer = 0; buffer < 4; ++buffer) {
+         if (stream_for_buffer[buffer] == -1)
+            continue;
+
+         LLVMValueRef bufsize_dw = LLVMBuildLShr(
+            builder, LLVMBuildExtractElement(builder, so_buffer[buffer], i32_2, ""), i32_2, "");
+
+         tmp = LLVMBuildLoad(builder, offsets_vgpr, "");
+         LLVMValueRef offset_dw =
+            ac_build_readlane(&ctx->ac, tmp, LLVMConstInt(ctx->ac.i32, buffer, false));
+
+         tmp = LLVMBuildSub(builder, bufsize_dw, offset_dw, "");
+         tmp = LLVMBuildUDiv(builder, tmp, prim_stride_dw[buffer], "");
+
+         tmp2 = LLVMBuildICmp(builder, LLVMIntULT, bufsize_dw, offset_dw, "");
+         max_emit[buffer] = LLVMBuildSelect(builder, tmp2, ctx->ac.i32_0, tmp, "");
+      }
+
+      /* Determine the number of emitted primitives per stream and fixup the
+       * GDS counter if necessary.
+       *
+       * This is complicated by the fact that a single stream can emit to
+       * multiple buffers (but luckily not vice versa).
+       */
+      LLVMValueRef emit_vgpr = ctx->ac.i32_0;
+
+      for (unsigned stream = 0; stream < 4; ++stream) {
+         if (!info->num_stream_output_components[stream])
+            continue;
+
+         tmp = LLVMBuildLoad(builder, generated_by_stream_vgpr, "");
+         LLVMValueRef generated =
+            ac_build_readlane(&ctx->ac, tmp, LLVMConstInt(ctx->ac.i32, stream, false));
+
+         LLVMValueRef emit = generated;
+         for (unsigned buffer = 0; buffer < 4; ++buffer) {
+            if (stream_for_buffer[buffer] == stream)
+               emit = ac_build_umin(&ctx->ac, emit, max_emit[buffer]);
+         }
+
+         emit_vgpr =
+            ac_build_writelane(&ctx->ac, emit_vgpr, emit, LLVMConstInt(ctx->ac.i32, stream, false));
+
+         /* Fixup the offset using a plain GDS atomic if we overflowed. */
+         tmp = LLVMBuildICmp(builder, LLVMIntULT, emit, generated, "");
+         ac_build_ifcc(&ctx->ac, tmp, 5221); /* scalar branch */
+         tmp = LLVMBuildLShr(builder, LLVMConstInt(ctx->ac.i32, bufmask_for_stream[stream], false),
+                             ac_get_thread_id(&ctx->ac), "");
+         tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
+         ac_build_ifcc(&ctx->ac, tmp, 5222);
+         {
+            tmp = LLVMBuildSub(builder, generated, emit, "");
+            tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
+            tmp2 = LLVMBuildGEP(builder, gdsbase, &tid, 1, "");
+            LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpSub, tmp2, tmp,
+                               LLVMAtomicOrderingMonotonic, false);
+         }
+         ac_build_endif(&ctx->ac, 5222);
+         ac_build_endif(&ctx->ac, 5221);
+      }
+
+      tmp = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
+      ac_build_ifcc(&ctx->ac, tmp, 5225);
+      {
+         tmp = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac), scratch_emit_basev, "");
+         tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp);
+         LLVMBuildStore(builder, emit_vgpr, tmp);
+      }
+      ac_build_endif(&ctx->ac, 5225);
+   }
+   ac_build_endif(&ctx->ac, 5200);
+
+   /* Determine the workgroup-relative per-thread / primitive offset into
+    * the streamout buffers */
+   struct ac_wg_scan primemit_scan[4] = {};
+
+   if (isgs) {
+      for (unsigned stream = 0; stream < 4; ++stream) {
+         if (!info->num_stream_output_components[stream])
+            continue;
+
+         primemit_scan[stream].enable_exclusive = true;
+         primemit_scan[stream].op = nir_op_iadd;
+         primemit_scan[stream].src = nggso->prim_enable[stream];
+         primemit_scan[stream].scratch = ac_build_gep0(
+            &ctx->ac, ctx->gs_ngg_scratch, LLVMConstInt(ctx->ac.i32, 12 + 8 * stream, false));
+         primemit_scan[stream].waveidx = get_wave_id_in_tg(ctx);
+         primemit_scan[stream].numwaves = get_tgsize(ctx);
+         primemit_scan[stream].maxwaves = 8;
+         ac_build_wg_scan_top(&ctx->ac, &primemit_scan[stream]);
+      }
+   }
+
+   ac_build_s_barrier(&ctx->ac);
+
+   /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
+   LLVMValueRef wgoffset_dw[4] = {};
+
+   {
+      LLVMValueRef scratch_vgpr;
+
+      tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ac_get_thread_id(&ctx->ac));
+      scratch_vgpr = LLVMBuildLoad(builder, tmp, "");
+
+      for (unsigned buffer = 0; buffer < 4; ++buffer) {
+         if (stream_for_buffer[buffer] >= 0) {
+            wgoffset_dw[buffer] =
+               ac_build_readlane(&ctx->ac, scratch_vgpr,
+                                 LLVMConstInt(ctx->ac.i32, scratch_offset_base + buffer, false));
+         }
+      }
+
+      for (unsigned stream = 0; stream < 4; ++stream) {
+         if (info->num_stream_output_components[stream]) {
+            nggso->emit[stream] =
+               ac_build_readlane(&ctx->ac, scratch_vgpr,
+                                 LLVMConstInt(ctx->ac.i32, scratch_emit_base + stream, false));
+         }
+      }
+   }
+
+   /* Write out primitive data */
+   for (unsigned stream = 0; stream < 4; ++stream) {
+      if (!info->num_stream_output_components[stream])
+         continue;
+
+      if (isgs) {
+         ac_build_wg_scan_bottom(&ctx->ac, &primemit_scan[stream]);
+      } else {
+         primemit_scan[stream].result_exclusive = tid;
+      }
+
+      tmp = LLVMBuildICmp(builder, LLVMIntULT, primemit_scan[stream].result_exclusive,
+                          nggso->emit[stream], "");
+      tmp = LLVMBuildAnd(builder, tmp, nggso->prim_enable[stream], "");
+      ac_build_ifcc(&ctx->ac, tmp, 5240);
+      {
+         LLVMValueRef offset_vtx =
+            LLVMBuildMul(builder, primemit_scan[stream].result_exclusive, nggso->num_vertices, "");
+
+         for (unsigned i = 0; i < max_num_vertices; ++i) {
+            tmp = LLVMBuildICmp(builder, LLVMIntULT, LLVMConstInt(ctx->ac.i32, i, false),
+                                nggso->num_vertices, "");
+            ac_build_ifcc(&ctx->ac, tmp, 5241);
+            build_streamout_vertex(ctx, so_buffer, wgoffset_dw, stream, offset_vtx,
+                                   nggso->vertices[i]);
+            ac_build_endif(&ctx->ac, 5241);
+            offset_vtx = LLVMBuildAdd(builder, offset_vtx, ctx->ac.i32_1, "");
+         }
+      }
+      ac_build_endif(&ctx->ac, 5240);
+   }
 }
 
 /* LDS layout of ES vertex data for NGG culling. */
-enum {
-       /* Byte 0: Boolean ES thread accepted (unculled) flag, and later the old
-        *         ES thread ID. After vertex compaction, compacted ES threads
-        *         store the old thread ID here to copy input VGPRs from uncompacted
-        *         ES threads.
-        * Byte 1: New ES thread ID, loaded by GS to prepare the prim export value.
-        * Byte 2: TES rel patch ID
-        * Byte 3: Unused
-        */
-       lds_byte0_accept_flag = 0,
-       lds_byte0_old_thread_id = 0,
-       lds_byte1_new_thread_id,
-       lds_byte2_tes_rel_patch_id,
-       lds_byte3_unused,
-
-       lds_packed_data = 0, /* lds_byteN_... */
-
-       lds_pos_x,
-       lds_pos_y,
-       lds_pos_z,
-       lds_pos_w,
-       lds_pos_x_div_w,
-       lds_pos_y_div_w,
-       /* If VS: */
-       lds_vertex_id,
-       lds_instance_id, /* optional */
-       /* If TES: */
-       lds_tes_u = lds_vertex_id,
-       lds_tes_v = lds_instance_id,
-       lds_tes_patch_id, /* optional */
+enum
+{
+   /* Byte 0: Boolean ES thread accepted (unculled) flag, and later the old
+    *         ES thread ID. After vertex compaction, compacted ES threads
+    *         store the old thread ID here to copy input VGPRs from uncompacted
+    *         ES threads.
+    * Byte 1: New ES thread ID, loaded by GS to prepare the prim export value.
+    * Byte 2: TES rel patch ID
+    * Byte 3: Unused
+    */
+   lds_byte0_accept_flag = 0,
+   lds_byte0_old_thread_id = 0,
+   lds_byte1_new_thread_id,
+   lds_byte2_tes_rel_patch_id,
+   lds_byte3_unused,
+
+   lds_packed_data = 0, /* lds_byteN_... */
+
+   lds_pos_x,
+   lds_pos_y,
+   lds_pos_z,
+   lds_pos_w,
+   lds_pos_x_div_w,
+   lds_pos_y_div_w,
+   /* If VS: */
+   lds_vertex_id,
+   lds_instance_id, /* optional */
+   /* If TES: */
+   lds_tes_u = lds_vertex_id,
+   lds_tes_v = lds_instance_id,
+   lds_tes_patch_id, /* optional */
 };
 
-static LLVMValueRef si_build_gep_i8(struct si_shader_context *ctx,
-                                   LLVMValueRef ptr, unsigned byte_index)
+static LLVMValueRef si_build_gep_i8(struct si_shader_context *ctx, LLVMValueRef ptr,
+                                    unsigned byte_index)
 {
-       assert(byte_index < 4);
-       LLVMTypeRef pi8 = LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_LDS);
-       LLVMValueRef index = LLVMConstInt(ctx->ac.i32, byte_index, 0);
+   assert(byte_index < 4);
+   LLVMTypeRef pi8 = LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_LDS);
+   LLVMValueRef index = LLVMConstInt(ctx->ac.i32, byte_index, 0);
 
-       return LLVMBuildGEP(ctx->ac.builder,
-                           LLVMBuildPointerCast(ctx->ac.builder, ptr, pi8, ""),
-                           &index, 1, "");
+   return LLVMBuildGEP(ctx->ac.builder, LLVMBuildPointerCast(ctx->ac.builder, ptr, pi8, ""), &index,
+                       1, "");
 }
 
 static unsigned ngg_nogs_vertex_size(struct si_shader *shader)
 {
-       unsigned lds_vertex_size = 0;
-
-       /* The edgeflag is always stored in the last element that's also
-        * used for padding to reduce LDS bank conflicts. */
-       if (shader->selector->so.num_outputs)
-               lds_vertex_size = 4 * shader->selector->info.num_outputs + 1;
-       if (shader->selector->info.writes_edgeflag)
-               lds_vertex_size = MAX2(lds_vertex_size, 1);
-
-       /* LDS size for passing data from GS to ES.
-        * GS stores Primitive IDs into LDS at the address corresponding
-        * to the ES thread of the provoking vertex. All ES threads
-        * load and export PrimitiveID for their thread.
-        */
-       if (shader->selector->type == PIPE_SHADER_VERTEX &&
-           shader->key.mono.u.vs_export_prim_id)
-               lds_vertex_size = MAX2(lds_vertex_size, 1);
-
-       if (shader->key.opt.ngg_culling) {
-               if (shader->selector->type == PIPE_SHADER_VERTEX) {
-                       STATIC_ASSERT(lds_instance_id + 1 == 9);
-                       lds_vertex_size = MAX2(lds_vertex_size, 9);
-               } else {
-                       assert(shader->selector->type == PIPE_SHADER_TESS_EVAL);
-
-                       if (shader->selector->info.uses_primid ||
-                           shader->key.mono.u.vs_export_prim_id) {
-                               STATIC_ASSERT(lds_tes_patch_id + 2 == 11);
-                               lds_vertex_size = MAX2(lds_vertex_size, 11);
-                       } else {
-                               STATIC_ASSERT(lds_tes_v + 1 == 9);
-                               lds_vertex_size = MAX2(lds_vertex_size, 9);
-                       }
-               }
-       }
-
-       return lds_vertex_size;
+   unsigned lds_vertex_size = 0;
+
+   /* The edgeflag is always stored in the last element that's also
+    * used for padding to reduce LDS bank conflicts. */
+   if (shader->selector->so.num_outputs)
+      lds_vertex_size = 4 * shader->selector->info.num_outputs + 1;
+   if (shader->selector->info.writes_edgeflag)
+      lds_vertex_size = MAX2(lds_vertex_size, 1);
+
+   /* LDS size for passing data from GS to ES.
+    * GS stores Primitive IDs into LDS at the address corresponding
+    * to the ES thread of the provoking vertex. All ES threads
+    * load and export PrimitiveID for their thread.
+    */
+   if (shader->selector->type == PIPE_SHADER_VERTEX && shader->key.mono.u.vs_export_prim_id)
+      lds_vertex_size = MAX2(lds_vertex_size, 1);
+
+   if (shader->key.opt.ngg_culling) {
+      if (shader->selector->type == PIPE_SHADER_VERTEX) {
+         STATIC_ASSERT(lds_instance_id + 1 == 9);
+         lds_vertex_size = MAX2(lds_vertex_size, 9);
+      } else {
+         assert(shader->selector->type == PIPE_SHADER_TESS_EVAL);
+
+         if (shader->selector->info.uses_primid || shader->key.mono.u.vs_export_prim_id) {
+            STATIC_ASSERT(lds_tes_patch_id + 2 == 11);
+            lds_vertex_size = MAX2(lds_vertex_size, 11);
+         } else {
+            STATIC_ASSERT(lds_tes_v + 1 == 9);
+            lds_vertex_size = MAX2(lds_vertex_size, 9);
+         }
+      }
+   }
+
+   return lds_vertex_size;
 }
 
 /**
  * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
  * for the vertex outputs.
  */
-static LLVMValueRef ngg_nogs_vertex_ptr(struct si_shader_context *ctx,
-                                       LLVMValueRef vtxid)
+static LLVMValueRef ngg_nogs_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef vtxid)
 {
-       /* The extra dword is used to avoid LDS bank conflicts. */
-       unsigned vertex_size = ngg_nogs_vertex_size(ctx->shader);
-       LLVMTypeRef ai32 = LLVMArrayType(ctx->ac.i32, vertex_size);
-       LLVMTypeRef pai32 = LLVMPointerType(ai32, AC_ADDR_SPACE_LDS);
-       LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, ctx->esgs_ring, pai32, "");
-       return LLVMBuildGEP(ctx->ac.builder, tmp, &vtxid, 1, "");
+   /* The extra dword is used to avoid LDS bank conflicts. */
+   unsigned vertex_size = ngg_nogs_vertex_size(ctx->shader);
+   LLVMTypeRef ai32 = LLVMArrayType(ctx->ac.i32, vertex_size);
+   LLVMTypeRef pai32 = LLVMPointerType(ai32, AC_ADDR_SPACE_LDS);
+   LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, ctx->esgs_ring, pai32, "");
+   return LLVMBuildGEP(ctx->ac.builder, tmp, &vtxid, 1, "");
 }
 
-static LLVMValueRef si_insert_input_v4i32(struct si_shader_context *ctx,
-                                         LLVMValueRef ret, struct ac_arg param,
-                                         unsigned return_index)
+static LLVMValueRef si_insert_input_v4i32(struct si_shader_context *ctx, LLVMValueRef ret,
+                                          struct ac_arg param, unsigned return_index)
 {
-       LLVMValueRef v = ac_get_arg(&ctx->ac, param);
-
-       for (unsigned i = 0; i < 4; i++) {
-               ret = LLVMBuildInsertValue(ctx->ac.builder, ret,
-                                          ac_llvm_extract_elem(&ctx->ac, v, i),
-                                          return_index + i, "");
-       }
-       return ret;
+   LLVMValueRef v = ac_get_arg(&ctx->ac, param);
+
+   for (unsigned i = 0; i < 4; i++) {
+      ret = LLVMBuildInsertValue(ctx->ac.builder, ret, ac_llvm_extract_elem(&ctx->ac, v, i),
+                                 return_index + i, "");
+   }
+   return ret;
 }
 
-static void load_bitmasks_2x64(struct si_shader_context *ctx,
-                              LLVMValueRef lds_ptr, unsigned dw_offset,
-                              LLVMValueRef mask[2], LLVMValueRef *total_bitcount)
+static void load_bitmasks_2x64(struct si_shader_context *ctx, LLVMValueRef lds_ptr,
+                               unsigned dw_offset, LLVMValueRef mask[2],
+                               LLVMValueRef *total_bitcount)
 {
-       LLVMBuilderRef builder = ctx->ac.builder;
-       LLVMValueRef ptr64 = LLVMBuildPointerCast(builder, lds_ptr,
-                                                 LLVMPointerType(LLVMArrayType(ctx->ac.i64, 2),
-                                                                 AC_ADDR_SPACE_LDS), "");
-       for (unsigned i = 0; i < 2; i++) {
-               LLVMValueRef index = LLVMConstInt(ctx->ac.i32, dw_offset / 2 + i, 0);
-               mask[i] = LLVMBuildLoad(builder, ac_build_gep0(&ctx->ac, ptr64, index), "");
-       }
-
-       /* We get better code if we don't use the 128-bit bitcount. */
-       *total_bitcount = LLVMBuildAdd(builder, ac_build_bit_count(&ctx->ac, mask[0]),
-                                      ac_build_bit_count(&ctx->ac, mask[1]), "");
+   LLVMBuilderRef builder = ctx->ac.builder;
+   LLVMValueRef ptr64 = LLVMBuildPointerCast(
+      builder, lds_ptr, LLVMPointerType(LLVMArrayType(ctx->ac.i64, 2), AC_ADDR_SPACE_LDS), "");
+   for (unsigned i = 0; i < 2; i++) {
+      LLVMValueRef index = LLVMConstInt(ctx->ac.i32, dw_offset / 2 + i, 0);
+      mask[i] = LLVMBuildLoad(builder, ac_build_gep0(&ctx->ac, ptr64, index), "");
+   }
+
+   /* We get better code if we don't use the 128-bit bitcount. */
+   *total_bitcount = LLVMBuildAdd(builder, ac_build_bit_count(&ctx->ac, mask[0]),
+                                  ac_build_bit_count(&ctx->ac, mask[1]), "");
 }
 
 /**
@@ -711,38 +678,33 @@ static void load_bitmasks_2x64(struct si_shader_context *ctx,
  * \param wave_info_num_bits the bit size of thread count field in merged_wave_info
  * \param wave_info_shift    the bit offset of the thread count field in merged_wave_info
  */
-static void update_thread_counts(struct si_shader_context *ctx,
-                                LLVMValueRef *new_num_threads,
-                                LLVMValueRef *tg_info,
-                                unsigned tg_info_num_bits,
-                                unsigned tg_info_shift,
-                                LLVMValueRef *wave_info,
-                                unsigned wave_info_num_bits,
-                                unsigned wave_info_shift)
+static void update_thread_counts(struct si_shader_context *ctx, LLVMValueRef *new_num_threads,
+                                 LLVMValueRef *tg_info, unsigned tg_info_num_bits,
+                                 unsigned tg_info_shift, LLVMValueRef *wave_info,
+                                 unsigned wave_info_num_bits, unsigned wave_info_shift)
 {
-       LLVMBuilderRef builder = ctx->ac.builder;
-
-       /* Update the total thread count. */
-       unsigned tg_info_mask = ~(u_bit_consecutive(0, tg_info_num_bits) << tg_info_shift);
-       *tg_info = LLVMBuildAnd(builder, *tg_info,
-                               LLVMConstInt(ctx->ac.i32, tg_info_mask, 0), "");
-       *tg_info = LLVMBuildOr(builder, *tg_info,
-                              LLVMBuildShl(builder, *new_num_threads,
-                                           LLVMConstInt(ctx->ac.i32, tg_info_shift, 0), ""), "");
-
-       /* Update the per-wave thread count. */
-       LLVMValueRef prev_threads = LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
-                                                LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, 0), "");
-       *new_num_threads = LLVMBuildSub(builder, *new_num_threads, prev_threads, "");
-       *new_num_threads = ac_build_imax(&ctx->ac, *new_num_threads, ctx->ac.i32_0);
-       *new_num_threads = ac_build_imin(&ctx->ac, *new_num_threads,
-                                       LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, 0));
-       unsigned wave_info_mask = ~(u_bit_consecutive(0, wave_info_num_bits) << wave_info_shift);
-       *wave_info = LLVMBuildAnd(builder, *wave_info,
-                                 LLVMConstInt(ctx->ac.i32, wave_info_mask, 0), "");
-       *wave_info = LLVMBuildOr(builder, *wave_info,
-                                LLVMBuildShl(builder, *new_num_threads,
-                                             LLVMConstInt(ctx->ac.i32, wave_info_shift, 0), ""), "");
+   LLVMBuilderRef builder = ctx->ac.builder;
+
+   /* Update the total thread count. */
+   unsigned tg_info_mask = ~(u_bit_consecutive(0, tg_info_num_bits) << tg_info_shift);
+   *tg_info = LLVMBuildAnd(builder, *tg_info, LLVMConstInt(ctx->ac.i32, tg_info_mask, 0), "");
+   *tg_info = LLVMBuildOr(
+      builder, *tg_info,
+      LLVMBuildShl(builder, *new_num_threads, LLVMConstInt(ctx->ac.i32, tg_info_shift, 0), ""), "");
+
+   /* Update the per-wave thread count. */
+   LLVMValueRef prev_threads = LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
+                                            LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, 0), "");
+   *new_num_threads = LLVMBuildSub(builder, *new_num_threads, prev_threads, "");
+   *new_num_threads = ac_build_imax(&ctx->ac, *new_num_threads, ctx->ac.i32_0);
+   *new_num_threads =
+      ac_build_imin(&ctx->ac, *new_num_threads, LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, 0));
+   unsigned wave_info_mask = ~(u_bit_consecutive(0, wave_info_num_bits) << wave_info_shift);
+   *wave_info = LLVMBuildAnd(builder, *wave_info, LLVMConstInt(ctx->ac.i32, wave_info_mask, 0), "");
+   *wave_info = LLVMBuildOr(
+      builder, *wave_info,
+      LLVMBuildShl(builder, *new_num_threads, LLVMConstInt(ctx->ac.i32, wave_info_shift, 0), ""),
+      "");
 }
 
 /**
@@ -751,759 +713,719 @@ static void update_thread_counts(struct si_shader_context *ctx,
  * Also return the position, which is passed to the shader as an input,
  * so that we don't compute it twice.
  */
-void gfx10_emit_ngg_culling_epilogue_4x_wave32(struct ac_shader_abi *abi,
-                                              unsigned max_outputs,
-                                              LLVMValueRef *addrs)
+void gfx10_emit_ngg_culling_epilogue_4x_wave32(struct ac_shader_abi *abi, unsigned max_outputs,
+                                               LLVMValueRef *addrs)
 {
-       struct si_shader_context *ctx = si_shader_context_from_abi(abi);
-       struct si_shader *shader = ctx->shader;
-       struct si_shader_selector *sel = shader->selector;
-       struct si_shader_info *info = &sel->info;
-       LLVMBuilderRef builder = ctx->ac.builder;
-
-       assert(shader->key.opt.ngg_culling);
-       assert(shader->key.as_ngg);
-       assert(sel->type == PIPE_SHADER_VERTEX ||
-              (sel->type == PIPE_SHADER_TESS_EVAL && !shader->key.as_es));
-
-       LLVMValueRef position[4] = {};
-       for (unsigned i = 0; i < info->num_outputs; i++) {
-               switch (info->output_semantic_name[i]) {
-               case TGSI_SEMANTIC_POSITION:
-                       for (unsigned j = 0; j < 4; j++) {
-                               position[j] = LLVMBuildLoad(ctx->ac.builder,
-                                                           addrs[4 * i + j], "");
-                       }
-                       break;
-               }
-       }
-       assert(position[0]);
-
-       /* Store Position.XYZW into LDS. */
-       LLVMValueRef es_vtxptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
-       for (unsigned chan = 0; chan < 4; chan++) {
-               LLVMBuildStore(builder, ac_to_integer(&ctx->ac, position[chan]),
-                               ac_build_gep0(&ctx->ac, es_vtxptr,
-                                             LLVMConstInt(ctx->ac.i32, lds_pos_x + chan, 0)));
-       }
-       /* Store Position.XY / W into LDS. */
-       for (unsigned chan = 0; chan < 2; chan++) {
-               LLVMValueRef val = ac_build_fdiv(&ctx->ac, position[chan], position[3]);
-               LLVMBuildStore(builder, ac_to_integer(&ctx->ac, val),
-                               ac_build_gep0(&ctx->ac, es_vtxptr,
-                                             LLVMConstInt(ctx->ac.i32, lds_pos_x_div_w + chan, 0)));
-       }
-
-       /* Store VertexID and InstanceID. ES threads will have to load them
-        * from LDS after vertex compaction and use them instead of their own
-        * system values.
-        */
-       bool uses_instance_id = false;
-       bool uses_tes_prim_id = false;
-       LLVMValueRef packed_data = ctx->ac.i32_0;
-
-       if (ctx->type == PIPE_SHADER_VERTEX) {
-               uses_instance_id = sel->info.uses_instanceid ||
-                                  shader->key.part.vs.prolog.instance_divisor_is_one ||
-                                  shader->key.part.vs.prolog.instance_divisor_is_fetched;
-
-               LLVMBuildStore(builder, ctx->abi.vertex_id,
-                              ac_build_gep0(&ctx->ac, es_vtxptr,
-                                            LLVMConstInt(ctx->ac.i32, lds_vertex_id, 0)));
-               if (uses_instance_id) {
-                       LLVMBuildStore(builder, ctx->abi.instance_id,
-                                      ac_build_gep0(&ctx->ac, es_vtxptr,
-                                                    LLVMConstInt(ctx->ac.i32, lds_instance_id, 0)));
-               }
-       } else {
-               uses_tes_prim_id = sel->info.uses_primid ||
-                                  shader->key.mono.u.vs_export_prim_id;
-
-               assert(ctx->type == PIPE_SHADER_TESS_EVAL);
-               LLVMBuildStore(builder, ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, ctx->tes_u)),
-                              ac_build_gep0(&ctx->ac, es_vtxptr,
-                                            LLVMConstInt(ctx->ac.i32, lds_tes_u, 0)));
-               LLVMBuildStore(builder, ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, ctx->tes_v)),
-                              ac_build_gep0(&ctx->ac, es_vtxptr,
-                                            LLVMConstInt(ctx->ac.i32, lds_tes_v, 0)));
-               packed_data = LLVMBuildShl(builder, ac_get_arg(&ctx->ac, ctx->tes_rel_patch_id),
-                                          LLVMConstInt(ctx->ac.i32, lds_byte2_tes_rel_patch_id * 8, 0), "");
-               if (uses_tes_prim_id) {
-                       LLVMBuildStore(builder, ac_get_arg(&ctx->ac, ctx->args.tes_patch_id),
-                                      ac_build_gep0(&ctx->ac, es_vtxptr,
-                                                    LLVMConstInt(ctx->ac.i32, lds_tes_patch_id, 0)));
-               }
-       }
-       /* Initialize the packed data. */
-       LLVMBuildStore(builder, packed_data,
-                      ac_build_gep0(&ctx->ac, es_vtxptr,
-                                    LLVMConstInt(ctx->ac.i32, lds_packed_data, 0)));
-       ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
-
-       LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
-
-       /* Initialize the last 3 gs_ngg_scratch dwords to 0, because we may have less
-        * than 4 waves, but we always read all 4 values. This is where the thread
-        * bitmasks of unculled threads will be stored.
-        *
-        * gs_ngg_scratch layout: esmask[0..3]
-        */
-       ac_build_ifcc(&ctx->ac,
-                     LLVMBuildICmp(builder, LLVMIntULT, get_thread_id_in_tg(ctx),
-                                   LLVMConstInt(ctx->ac.i32, 3, 0), ""), 16101);
-       {
-               LLVMValueRef index = LLVMBuildAdd(builder, tid, ctx->ac.i32_1, "");
-               LLVMBuildStore(builder, ctx->ac.i32_0,
-                              ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, index));
-       }
-       ac_build_endif(&ctx->ac, 16101);
-       ac_build_s_barrier(&ctx->ac);
-
-       /* The hardware requires that there are no holes between unculled vertices,
-        * which means we have to pack ES threads, i.e. reduce the ES thread count
-        * and move ES input VGPRs to lower threads. The upside is that varyings
-        * are only fetched and computed for unculled vertices.
-        *
-        * Vertex compaction in GS threads:
-        *
-        * Part 1: Compute the surviving vertex mask in GS threads:
-        * - Compute 4 32-bit surviving vertex masks in LDS. (max 4 waves)
-        *   - In GS, notify ES threads whether the vertex survived.
-        *   - Barrier
-        *   - ES threads will create the mask and store it in LDS.
-        * - Barrier
-        * - Each GS thread loads the vertex masks from LDS.
-        *
-        * Part 2: Compact ES threads in GS threads:
-        * - Compute the prefix sum for all 3 vertices from the masks. These are the new
-        *   thread IDs for each vertex within the primitive.
-        * - Write the value of the old thread ID into the LDS address of the new thread ID.
-        *   The ES thread will load the old thread ID and use it to load the position, VertexID,
-        *   and InstanceID.
-        * - Update vertex indices and null flag in the GS input VGPRs.
-        * - Barrier
-        *
-        * Part 3: Update inputs GPRs
-        * - For all waves, update per-wave thread counts in input SGPRs.
-        * - In ES threads, update the ES input VGPRs (VertexID, InstanceID, TES inputs).
-        */
-
-       LLVMValueRef vtxindex[3];
-       if (shader->key.opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_ALL) {
-               /* For the GS fast launch, the VS prologs simply puts the Vertex IDs
-                * into these VGPRs.
-                */
-               vtxindex[0] = ac_get_arg(&ctx->ac, ctx->gs_vtx01_offset);
-               vtxindex[1] = ac_get_arg(&ctx->ac, ctx->gs_vtx23_offset);
-               vtxindex[2] = ac_get_arg(&ctx->ac, ctx->gs_vtx45_offset);
-       } else {
-               vtxindex[0] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16);
-               vtxindex[1] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16);
-               vtxindex[2] = si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16);
-       };
-       LLVMValueRef gs_vtxptr[] = {
-               ngg_nogs_vertex_ptr(ctx, vtxindex[0]),
-               ngg_nogs_vertex_ptr(ctx, vtxindex[1]),
-               ngg_nogs_vertex_ptr(ctx, vtxindex[2]),
-       };
-       es_vtxptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
-
-       LLVMValueRef gs_accepted = ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
-
-       /* Do culling in GS threads. */
-       ac_build_ifcc(&ctx->ac, si_is_gs_thread(ctx), 16002);
-       {
-               /* Load positions. */
-               LLVMValueRef pos[3][4] = {};
-               for (unsigned vtx = 0; vtx < 3; vtx++) {
-                       for (unsigned chan = 0; chan < 4; chan++) {
-                               unsigned index;
-                               if (chan == 0 || chan == 1)
-                                       index = lds_pos_x_div_w + chan;
-                               else if (chan == 3)
-                                       index = lds_pos_w;
-                               else
-                                       continue;
-
-                               LLVMValueRef addr = ac_build_gep0(&ctx->ac, gs_vtxptr[vtx],
-                                                                 LLVMConstInt(ctx->ac.i32, index, 0));
-                               pos[vtx][chan] = LLVMBuildLoad(builder, addr, "");
-                               pos[vtx][chan] = ac_to_float(&ctx->ac, pos[vtx][chan]);
-                       }
-               }
-
-               /* Load the viewport state for small prim culling. */
-               LLVMValueRef vp = ac_build_load_invariant(&ctx->ac,
-                                                         ac_get_arg(&ctx->ac, ctx->small_prim_cull_info),
-                                                         ctx->ac.i32_0);
-               vp = LLVMBuildBitCast(builder, vp, ctx->ac.v4f32, "");
-               LLVMValueRef vp_scale[2], vp_translate[2];
-               vp_scale[0] = ac_llvm_extract_elem(&ctx->ac, vp, 0);
-               vp_scale[1] = ac_llvm_extract_elem(&ctx->ac, vp, 1);
-               vp_translate[0] = ac_llvm_extract_elem(&ctx->ac, vp, 2);
-               vp_translate[1] = ac_llvm_extract_elem(&ctx->ac, vp, 3);
-
-               /* Get the small prim filter precision. */
-               LLVMValueRef small_prim_precision = si_unpack_param(ctx, ctx->vs_state_bits, 7, 4);
-               small_prim_precision = LLVMBuildOr(builder, small_prim_precision,
-                                                  LLVMConstInt(ctx->ac.i32, 0x70, 0), "");
-               small_prim_precision = LLVMBuildShl(builder, small_prim_precision,
-                                                   LLVMConstInt(ctx->ac.i32, 23, 0), "");
-               small_prim_precision = LLVMBuildBitCast(builder, small_prim_precision, ctx->ac.f32, "");
-
-               /* Execute culling code. */
-               struct ac_cull_options options = {};
-               options.cull_front = shader->key.opt.ngg_culling & SI_NGG_CULL_FRONT_FACE;
-               options.cull_back = shader->key.opt.ngg_culling & SI_NGG_CULL_BACK_FACE;
-               options.cull_view_xy = shader->key.opt.ngg_culling & SI_NGG_CULL_VIEW_SMALLPRIMS;
-               options.cull_small_prims = options.cull_view_xy;
-               options.cull_zero_area = options.cull_front || options.cull_back;
-               options.cull_w = true;
-
-               /* Tell ES threads whether their vertex survived. */
-               ac_build_ifcc(&ctx->ac, ac_cull_triangle(&ctx->ac, pos, ctx->ac.i1true,
-                                                        vp_scale, vp_translate,
-                                                        small_prim_precision, &options), 16003);
-               {
-                       LLVMBuildStore(builder, ctx->ac.i32_1, gs_accepted);
-                       for (unsigned vtx = 0; vtx < 3; vtx++) {
-                               LLVMBuildStore(builder, ctx->ac.i8_1,
-                                              si_build_gep_i8(ctx, gs_vtxptr[vtx], lds_byte0_accept_flag));
-                       }
-               }
-               ac_build_endif(&ctx->ac, 16003);
-       }
-       ac_build_endif(&ctx->ac, 16002);
-       ac_build_s_barrier(&ctx->ac);
-
-       gs_accepted = LLVMBuildLoad(builder, gs_accepted, "");
-
-       LLVMValueRef es_accepted = ac_build_alloca(&ctx->ac, ctx->ac.i1, "");
-
-       /* Convert the per-vertex flag to a thread bitmask in ES threads and store it in LDS. */
-       ac_build_ifcc(&ctx->ac, si_is_es_thread(ctx), 16007);
-       {
-               LLVMValueRef es_accepted_flag =
-                       LLVMBuildLoad(builder,
-                                     si_build_gep_i8(ctx, es_vtxptr, lds_byte0_accept_flag), "");
-
-               LLVMValueRef es_accepted_bool = LLVMBuildICmp(builder, LLVMIntNE,
-                                                             es_accepted_flag, ctx->ac.i8_0, "");
-               LLVMValueRef es_mask = ac_get_i1_sgpr_mask(&ctx->ac, es_accepted_bool);
-
-               LLVMBuildStore(builder, es_accepted_bool, es_accepted);
-
-               ac_build_ifcc(&ctx->ac, LLVMBuildICmp(builder, LLVMIntEQ,
-                                                     tid, ctx->ac.i32_0, ""), 16008);
-               {
-                       LLVMBuildStore(builder, es_mask,
-                                      ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
-                                                    get_wave_id_in_tg(ctx)));
-               }
-               ac_build_endif(&ctx->ac, 16008);
-       }
-       ac_build_endif(&ctx->ac, 16007);
-       ac_build_s_barrier(&ctx->ac);
-
-       /* Load the vertex masks and compute the new ES thread count. */
-       LLVMValueRef es_mask[2], new_num_es_threads, kill_wave;
-       load_bitmasks_2x64(ctx, ctx->gs_ngg_scratch, 0, es_mask, &new_num_es_threads);
-       new_num_es_threads = ac_build_readlane_no_opt_barrier(&ctx->ac, new_num_es_threads, NULL);
-
-       /* ES threads compute their prefix sum, which is the new ES thread ID.
-        * Then they write the value of the old thread ID into the LDS address
-        * of the new thread ID. It will be used it to load input VGPRs from
-        * the old thread's LDS location.
-        */
-       ac_build_ifcc(&ctx->ac, LLVMBuildLoad(builder, es_accepted, ""), 16009);
-       {
-               LLVMValueRef old_id = get_thread_id_in_tg(ctx);
-               LLVMValueRef new_id = ac_prefix_bitcount_2x64(&ctx->ac, es_mask, old_id);
-
-               LLVMBuildStore(builder, LLVMBuildTrunc(builder, old_id, ctx->ac.i8, ""),
-                              si_build_gep_i8(ctx, ngg_nogs_vertex_ptr(ctx, new_id),
-                                              lds_byte0_old_thread_id));
-               LLVMBuildStore(builder, LLVMBuildTrunc(builder, new_id, ctx->ac.i8, ""),
-                              si_build_gep_i8(ctx, es_vtxptr, lds_byte1_new_thread_id));
-       }
-       ac_build_endif(&ctx->ac, 16009);
-
-       /* Kill waves that have inactive threads. */
-       kill_wave = LLVMBuildICmp(builder, LLVMIntULE,
-                                 ac_build_imax(&ctx->ac, new_num_es_threads, ngg_get_prim_cnt(ctx)),
-                                 LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
-                                              LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, 0), ""), "");
-       ac_build_ifcc(&ctx->ac, kill_wave, 19202);
-       {
-               /* If we are killing wave 0, send that there are no primitives
-                * in this threadgroup.
-                */
-               ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx),
-                                             ctx->ac.i32_0, ctx->ac.i32_0);
-               ac_build_s_endpgm(&ctx->ac);
-       }
-       ac_build_endif(&ctx->ac, 19202);
-       ac_build_s_barrier(&ctx->ac);
-
-       /* Send the final vertex and primitive counts. */
-       ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx),
-                                     new_num_es_threads, ngg_get_prim_cnt(ctx));
-
-       /* Update thread counts in SGPRs. */
-       LLVMValueRef new_gs_tg_info = ac_get_arg(&ctx->ac, ctx->gs_tg_info);
-       LLVMValueRef new_merged_wave_info = ac_get_arg(&ctx->ac, ctx->merged_wave_info);
-
-       /* This also converts the thread count from the total count to the per-wave count. */
-       update_thread_counts(ctx, &new_num_es_threads, &new_gs_tg_info, 9, 12,
-                            &new_merged_wave_info, 8, 0);
-
-       /* Update vertex indices in VGPR0 (same format as NGG passthrough). */
-       LLVMValueRef new_vgpr0 = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
-
-       /* Set the null flag at the beginning (culled), and then
-        * overwrite it for accepted primitives.
-        */
-       LLVMBuildStore(builder, LLVMConstInt(ctx->ac.i32, 1u << 31, 0), new_vgpr0);
-
-       /* Get vertex indices after vertex compaction. */
-       ac_build_ifcc(&ctx->ac, LLVMBuildTrunc(builder, gs_accepted, ctx->ac.i1, ""), 16011);
-       {
-               struct ac_ngg_prim prim = {};
-               prim.num_vertices = 3;
-               prim.isnull = ctx->ac.i1false;
-
-               for (unsigned vtx = 0; vtx < 3; vtx++) {
-                       prim.index[vtx] =
-                               LLVMBuildLoad(builder,
-                                             si_build_gep_i8(ctx, gs_vtxptr[vtx],
-                                                             lds_byte1_new_thread_id), "");
-                       prim.index[vtx] = LLVMBuildZExt(builder, prim.index[vtx], ctx->ac.i32, "");
-                       prim.edgeflag[vtx] = ngg_get_initial_edgeflag(ctx, vtx);
-               }
-
-               /* Set the new GS input VGPR. */
-               LLVMBuildStore(builder, ac_pack_prim_export(&ctx->ac, &prim), new_vgpr0);
-       }
-       ac_build_endif(&ctx->ac, 16011);
-
-       if (gfx10_ngg_export_prim_early(shader))
-               gfx10_ngg_build_export_prim(ctx, NULL, LLVMBuildLoad(builder, new_vgpr0, ""));
-
-       /* Set the new ES input VGPRs. */
-       LLVMValueRef es_data[4];
-       LLVMValueRef old_thread_id = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
-
-       for (unsigned i = 0; i < 4; i++)
-               es_data[i] = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
-
-       ac_build_ifcc(&ctx->ac, LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, tid,
-                                             new_num_es_threads, ""), 16012);
-       {
-               LLVMValueRef old_id, old_es_vtxptr, tmp;
-
-               /* Load ES input VGPRs from the ES thread before compaction. */
-               old_id = LLVMBuildLoad(builder,
-                                      si_build_gep_i8(ctx, es_vtxptr, lds_byte0_old_thread_id), "");
-               old_id = LLVMBuildZExt(builder, old_id, ctx->ac.i32, "");
-
-               LLVMBuildStore(builder, old_id, old_thread_id);
-               old_es_vtxptr = ngg_nogs_vertex_ptr(ctx, old_id);
-
-               for (unsigned i = 0; i < 2; i++) {
-                       tmp = LLVMBuildLoad(builder,
-                                           ac_build_gep0(&ctx->ac, old_es_vtxptr,
-                                                         LLVMConstInt(ctx->ac.i32, lds_vertex_id + i, 0)), "");
-                       LLVMBuildStore(builder, tmp, es_data[i]);
-               }
-
-               if (ctx->type == PIPE_SHADER_TESS_EVAL) {
-                       tmp = LLVMBuildLoad(builder,
-                                           si_build_gep_i8(ctx, old_es_vtxptr,
-                                                           lds_byte2_tes_rel_patch_id), "");
-                       tmp = LLVMBuildZExt(builder, tmp, ctx->ac.i32, "");
-                       LLVMBuildStore(builder, tmp, es_data[2]);
-
-                       if (uses_tes_prim_id) {
-                               tmp = LLVMBuildLoad(builder,
-                                                   ac_build_gep0(&ctx->ac, old_es_vtxptr,
-                                                                 LLVMConstInt(ctx->ac.i32, lds_tes_patch_id, 0)), "");
-                               LLVMBuildStore(builder, tmp, es_data[3]);
-                       }
-               }
-       }
-       ac_build_endif(&ctx->ac, 16012);
-
-       /* Return values for the main function. */
-       LLVMValueRef ret = ctx->return_value;
-       LLVMValueRef val;
-
-       ret = LLVMBuildInsertValue(ctx->ac.builder, ret, new_gs_tg_info, 2, "");
-       ret = LLVMBuildInsertValue(ctx->ac.builder, ret, new_merged_wave_info, 3, "");
-       if (ctx->type == PIPE_SHADER_TESS_EVAL)
-               ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset, 4);
-
-       ret = si_insert_input_ptr(ctx, ret, ctx->rw_buffers,
-                                 8 + SI_SGPR_RW_BUFFERS);
-       ret = si_insert_input_ptr(ctx, ret,
-                                 ctx->bindless_samplers_and_images,
-                                 8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
-       ret = si_insert_input_ptr(ctx, ret,
-                                 ctx->const_and_shader_buffers,
-                                 8 + SI_SGPR_CONST_AND_SHADER_BUFFERS);
-       ret = si_insert_input_ptr(ctx, ret,
-                                 ctx->samplers_and_images,
-                                 8 + SI_SGPR_SAMPLERS_AND_IMAGES);
-       ret = si_insert_input_ptr(ctx, ret, ctx->vs_state_bits,
-                                 8 + SI_SGPR_VS_STATE_BITS);
-
-       if (ctx->type == PIPE_SHADER_VERTEX) {
-               ret = si_insert_input_ptr(ctx, ret, ctx->args.base_vertex,
-                                         8 + SI_SGPR_BASE_VERTEX);
-               ret = si_insert_input_ptr(ctx, ret, ctx->args.start_instance,
-                                         8 + SI_SGPR_START_INSTANCE);
-               ret = si_insert_input_ptr(ctx, ret, ctx->args.draw_id,
-                                         8 + SI_SGPR_DRAWID);
-               ret = si_insert_input_ptr(ctx, ret, ctx->vertex_buffers,
-                                         8 + SI_VS_NUM_USER_SGPR);
-
-               for (unsigned i = 0; i < shader->selector->num_vbos_in_user_sgprs; i++) {
-                       ret = si_insert_input_v4i32(ctx, ret, ctx->vb_descriptors[i],
-                                                   8 + SI_SGPR_VS_VB_DESCRIPTOR_FIRST + i * 4);
-               }
-       } else {
-               assert(ctx->type == PIPE_SHADER_TESS_EVAL);
-               ret = si_insert_input_ptr(ctx, ret, ctx->tcs_offchip_layout,
-                                         8 + SI_SGPR_TES_OFFCHIP_LAYOUT);
-               ret = si_insert_input_ptr(ctx, ret, ctx->tes_offchip_addr,
-                                         8 + SI_SGPR_TES_OFFCHIP_ADDR);
-       }
-
-       unsigned vgpr;
-       if (ctx->type == PIPE_SHADER_VERTEX) {
-               if (shader->selector->num_vbos_in_user_sgprs) {
-                       vgpr = 8 + SI_SGPR_VS_VB_DESCRIPTOR_FIRST +
-                              shader->selector->num_vbos_in_user_sgprs * 4;
-               } else {
-                       vgpr = 8 + GFX9_VSGS_NUM_USER_SGPR + 1;
-               }
-       } else {
-               vgpr = 8 + GFX9_TESGS_NUM_USER_SGPR;
-       }
-
-       val = LLVMBuildLoad(builder, new_vgpr0, "");
-       ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val),
-                                  vgpr++, "");
-       vgpr++; /* gs_vtx23_offset */
-
-       ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_prim_id, vgpr++);
-       ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_invocation_id, vgpr++);
-       vgpr++; /* gs_vtx45_offset */
-
-       if (ctx->type == PIPE_SHADER_VERTEX) {
-               val = LLVMBuildLoad(builder, es_data[0], "");
-               ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val),
-                                          vgpr++, ""); /* VGPR5 - VertexID */
-               vgpr += 2;
-               if (uses_instance_id) {
-                       val = LLVMBuildLoad(builder, es_data[1], "");
-                       ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val),
-                                                  vgpr++, ""); /* VGPR8 - InstanceID */
-               } else {
-                       vgpr++;
-               }
-       } else {
-               assert(ctx->type == PIPE_SHADER_TESS_EVAL);
-               unsigned num_vgprs = uses_tes_prim_id ? 4 : 3;
-               for (unsigned i = 0; i < num_vgprs; i++) {
-                       val = LLVMBuildLoad(builder, es_data[i], "");
-                       ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val),
-                                                  vgpr++, "");
-               }
-               if (num_vgprs == 3)
-                       vgpr++;
-       }
-       /* Return the old thread ID. */
-       val = LLVMBuildLoad(builder, old_thread_id, "");
-       ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), vgpr++, "");
-
-       /* These two also use LDS. */
-       if (sel->info.writes_edgeflag ||
-           (ctx->type == PIPE_SHADER_VERTEX && shader->key.mono.u.vs_export_prim_id))
-               ac_build_s_barrier(&ctx->ac);
-
-       ctx->return_value = ret;
+   struct si_shader_context *ctx = si_shader_context_from_abi(abi);
+   struct si_shader *shader = ctx->shader;
+   struct si_shader_selector *sel = shader->selector;
+   struct si_shader_info *info = &sel->info;
+   LLVMBuilderRef builder = ctx->ac.builder;
+
+   assert(shader->key.opt.ngg_culling);
+   assert(shader->key.as_ngg);
+   assert(sel->type == PIPE_SHADER_VERTEX ||
+          (sel->type == PIPE_SHADER_TESS_EVAL && !shader->key.as_es));
+
+   LLVMValueRef position[4] = {};
+   for (unsigned i = 0; i < info->num_outputs; i++) {
+      switch (info->output_semantic_name[i]) {
+      case TGSI_SEMANTIC_POSITION:
+         for (unsigned j = 0; j < 4; j++) {
+            position[j] = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + j], "");
+         }
+         break;
+      }
+   }
+   assert(position[0]);
+
+   /* Store Position.XYZW into LDS. */
+   LLVMValueRef es_vtxptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
+   for (unsigned chan = 0; chan < 4; chan++) {
+      LLVMBuildStore(
+         builder, ac_to_integer(&ctx->ac, position[chan]),
+         ac_build_gep0(&ctx->ac, es_vtxptr, LLVMConstInt(ctx->ac.i32, lds_pos_x + chan, 0)));
+   }
+   /* Store Position.XY / W into LDS. */
+   for (unsigned chan = 0; chan < 2; chan++) {
+      LLVMValueRef val = ac_build_fdiv(&ctx->ac, position[chan], position[3]);
+      LLVMBuildStore(
+         builder, ac_to_integer(&ctx->ac, val),
+         ac_build_gep0(&ctx->ac, es_vtxptr, LLVMConstInt(ctx->ac.i32, lds_pos_x_div_w + chan, 0)));
+   }
+
+   /* Store VertexID and InstanceID. ES threads will have to load them
+    * from LDS after vertex compaction and use them instead of their own
+    * system values.
+    */
+   bool uses_instance_id = false;
+   bool uses_tes_prim_id = false;
+   LLVMValueRef packed_data = ctx->ac.i32_0;
+
+   if (ctx->type == PIPE_SHADER_VERTEX) {
+      uses_instance_id = sel->info.uses_instanceid ||
+                         shader->key.part.vs.prolog.instance_divisor_is_one ||
+                         shader->key.part.vs.prolog.instance_divisor_is_fetched;
+
+      LLVMBuildStore(
+         builder, ctx->abi.vertex_id,
+         ac_build_gep0(&ctx->ac, es_vtxptr, LLVMConstInt(ctx->ac.i32, lds_vertex_id, 0)));
+      if (uses_instance_id) {
+         LLVMBuildStore(
+            builder, ctx->abi.instance_id,
+            ac_build_gep0(&ctx->ac, es_vtxptr, LLVMConstInt(ctx->ac.i32, lds_instance_id, 0)));
+      }
+   } else {
+      uses_tes_prim_id = sel->info.uses_primid || shader->key.mono.u.vs_export_prim_id;
+
+      assert(ctx->type == PIPE_SHADER_TESS_EVAL);
+      LLVMBuildStore(builder, ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, ctx->tes_u)),
+                     ac_build_gep0(&ctx->ac, es_vtxptr, LLVMConstInt(ctx->ac.i32, lds_tes_u, 0)));
+      LLVMBuildStore(builder, ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, ctx->tes_v)),
+                     ac_build_gep0(&ctx->ac, es_vtxptr, LLVMConstInt(ctx->ac.i32, lds_tes_v, 0)));
+      packed_data = LLVMBuildShl(builder, ac_get_arg(&ctx->ac, ctx->tes_rel_patch_id),
+                                 LLVMConstInt(ctx->ac.i32, lds_byte2_tes_rel_patch_id * 8, 0), "");
+      if (uses_tes_prim_id) {
+         LLVMBuildStore(
+            builder, ac_get_arg(&ctx->ac, ctx->args.tes_patch_id),
+            ac_build_gep0(&ctx->ac, es_vtxptr, LLVMConstInt(ctx->ac.i32, lds_tes_patch_id, 0)));
+      }
+   }
+   /* Initialize the packed data. */
+   LLVMBuildStore(
+      builder, packed_data,
+      ac_build_gep0(&ctx->ac, es_vtxptr, LLVMConstInt(ctx->ac.i32, lds_packed_data, 0)));
+   ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
+
+   LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
+
+   /* Initialize the last 3 gs_ngg_scratch dwords to 0, because we may have less
+    * than 4 waves, but we always read all 4 values. This is where the thread
+    * bitmasks of unculled threads will be stored.
+    *
+    * gs_ngg_scratch layout: esmask[0..3]
+    */
+   ac_build_ifcc(&ctx->ac,
+                 LLVMBuildICmp(builder, LLVMIntULT, get_thread_id_in_tg(ctx),
+                               LLVMConstInt(ctx->ac.i32, 3, 0), ""),
+                 16101);
+   {
+      LLVMValueRef index = LLVMBuildAdd(builder, tid, ctx->ac.i32_1, "");
+      LLVMBuildStore(builder, ctx->ac.i32_0, ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, index));
+   }
+   ac_build_endif(&ctx->ac, 16101);
+   ac_build_s_barrier(&ctx->ac);
+
+   /* The hardware requires that there are no holes between unculled vertices,
+    * which means we have to pack ES threads, i.e. reduce the ES thread count
+    * and move ES input VGPRs to lower threads. The upside is that varyings
+    * are only fetched and computed for unculled vertices.
+    *
+    * Vertex compaction in GS threads:
+    *
+    * Part 1: Compute the surviving vertex mask in GS threads:
+    * - Compute 4 32-bit surviving vertex masks in LDS. (max 4 waves)
+    *   - In GS, notify ES threads whether the vertex survived.
+    *   - Barrier
+    *   - ES threads will create the mask and store it in LDS.
+    * - Barrier
+    * - Each GS thread loads the vertex masks from LDS.
+    *
+    * Part 2: Compact ES threads in GS threads:
+    * - Compute the prefix sum for all 3 vertices from the masks. These are the new
+    *   thread IDs for each vertex within the primitive.
+    * - Write the value of the old thread ID into the LDS address of the new thread ID.
+    *   The ES thread will load the old thread ID and use it to load the position, VertexID,
+    *   and InstanceID.
+    * - Update vertex indices and null flag in the GS input VGPRs.
+    * - Barrier
+    *
+    * Part 3: Update inputs GPRs
+    * - For all waves, update per-wave thread counts in input SGPRs.
+    * - In ES threads, update the ES input VGPRs (VertexID, InstanceID, TES inputs).
+    */
+
+   LLVMValueRef vtxindex[3];
+   if (shader->key.opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_ALL) {
+      /* For the GS fast launch, the VS prologs simply puts the Vertex IDs
+       * into these VGPRs.
+       */
+      vtxindex[0] = ac_get_arg(&ctx->ac, ctx->gs_vtx01_offset);
+      vtxindex[1] = ac_get_arg(&ctx->ac, ctx->gs_vtx23_offset);
+      vtxindex[2] = ac_get_arg(&ctx->ac, ctx->gs_vtx45_offset);
+   } else {
+      vtxindex[0] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16);
+      vtxindex[1] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16);
+      vtxindex[2] = si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16);
+   };
+   LLVMValueRef gs_vtxptr[] = {
+      ngg_nogs_vertex_ptr(ctx, vtxindex[0]),
+      ngg_nogs_vertex_ptr(ctx, vtxindex[1]),
+      ngg_nogs_vertex_ptr(ctx, vtxindex[2]),
+   };
+   es_vtxptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
+
+   LLVMValueRef gs_accepted = ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
+
+   /* Do culling in GS threads. */
+   ac_build_ifcc(&ctx->ac, si_is_gs_thread(ctx), 16002);
+   {
+      /* Load positions. */
+      LLVMValueRef pos[3][4] = {};
+      for (unsigned vtx = 0; vtx < 3; vtx++) {
+         for (unsigned chan = 0; chan < 4; chan++) {
+            unsigned index;
+            if (chan == 0 || chan == 1)
+               index = lds_pos_x_div_w + chan;
+            else if (chan == 3)
+               index = lds_pos_w;
+            else
+               continue;
+
+            LLVMValueRef addr =
+               ac_build_gep0(&ctx->ac, gs_vtxptr[vtx], LLVMConstInt(ctx->ac.i32, index, 0));
+            pos[vtx][chan] = LLVMBuildLoad(builder, addr, "");
+            pos[vtx][chan] = ac_to_float(&ctx->ac, pos[vtx][chan]);
+         }
+      }
+
+      /* Load the viewport state for small prim culling. */
+      LLVMValueRef vp = ac_build_load_invariant(
+         &ctx->ac, ac_get_arg(&ctx->ac, ctx->small_prim_cull_info), ctx->ac.i32_0);
+      vp = LLVMBuildBitCast(builder, vp, ctx->ac.v4f32, "");
+      LLVMValueRef vp_scale[2], vp_translate[2];
+      vp_scale[0] = ac_llvm_extract_elem(&ctx->ac, vp, 0);
+      vp_scale[1] = ac_llvm_extract_elem(&ctx->ac, vp, 1);
+      vp_translate[0] = ac_llvm_extract_elem(&ctx->ac, vp, 2);
+      vp_translate[1] = ac_llvm_extract_elem(&ctx->ac, vp, 3);
+
+      /* Get the small prim filter precision. */
+      LLVMValueRef small_prim_precision = si_unpack_param(ctx, ctx->vs_state_bits, 7, 4);
+      small_prim_precision =
+         LLVMBuildOr(builder, small_prim_precision, LLVMConstInt(ctx->ac.i32, 0x70, 0), "");
+      small_prim_precision =
+         LLVMBuildShl(builder, small_prim_precision, LLVMConstInt(ctx->ac.i32, 23, 0), "");
+      small_prim_precision = LLVMBuildBitCast(builder, small_prim_precision, ctx->ac.f32, "");
+
+      /* Execute culling code. */
+      struct ac_cull_options options = {};
+      options.cull_front = shader->key.opt.ngg_culling & SI_NGG_CULL_FRONT_FACE;
+      options.cull_back = shader->key.opt.ngg_culling & SI_NGG_CULL_BACK_FACE;
+      options.cull_view_xy = shader->key.opt.ngg_culling & SI_NGG_CULL_VIEW_SMALLPRIMS;
+      options.cull_small_prims = options.cull_view_xy;
+      options.cull_zero_area = options.cull_front || options.cull_back;
+      options.cull_w = true;
+
+      /* Tell ES threads whether their vertex survived. */
+      ac_build_ifcc(&ctx->ac,
+                    ac_cull_triangle(&ctx->ac, pos, ctx->ac.i1true, vp_scale, vp_translate,
+                                     small_prim_precision, &options),
+                    16003);
+      {
+         LLVMBuildStore(builder, ctx->ac.i32_1, gs_accepted);
+         for (unsigned vtx = 0; vtx < 3; vtx++) {
+            LLVMBuildStore(builder, ctx->ac.i8_1,
+                           si_build_gep_i8(ctx, gs_vtxptr[vtx], lds_byte0_accept_flag));
+         }
+      }
+      ac_build_endif(&ctx->ac, 16003);
+   }
+   ac_build_endif(&ctx->ac, 16002);
+   ac_build_s_barrier(&ctx->ac);
+
+   gs_accepted = LLVMBuildLoad(builder, gs_accepted, "");
+
+   LLVMValueRef es_accepted = ac_build_alloca(&ctx->ac, ctx->ac.i1, "");
+
+   /* Convert the per-vertex flag to a thread bitmask in ES threads and store it in LDS. */
+   ac_build_ifcc(&ctx->ac, si_is_es_thread(ctx), 16007);
+   {
+      LLVMValueRef es_accepted_flag =
+         LLVMBuildLoad(builder, si_build_gep_i8(ctx, es_vtxptr, lds_byte0_accept_flag), "");
+
+      LLVMValueRef es_accepted_bool =
+         LLVMBuildICmp(builder, LLVMIntNE, es_accepted_flag, ctx->ac.i8_0, "");
+      LLVMValueRef es_mask = ac_get_i1_sgpr_mask(&ctx->ac, es_accepted_bool);
+
+      LLVMBuildStore(builder, es_accepted_bool, es_accepted);
+
+      ac_build_ifcc(&ctx->ac, LLVMBuildICmp(builder, LLVMIntEQ, tid, ctx->ac.i32_0, ""), 16008);
+      {
+         LLVMBuildStore(builder, es_mask,
+                        ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, get_wave_id_in_tg(ctx)));
+      }
+      ac_build_endif(&ctx->ac, 16008);
+   }
+   ac_build_endif(&ctx->ac, 16007);
+   ac_build_s_barrier(&ctx->ac);
+
+   /* Load the vertex masks and compute the new ES thread count. */
+   LLVMValueRef es_mask[2], new_num_es_threads, kill_wave;
+   load_bitmasks_2x64(ctx, ctx->gs_ngg_scratch, 0, es_mask, &new_num_es_threads);
+   new_num_es_threads = ac_build_readlane_no_opt_barrier(&ctx->ac, new_num_es_threads, NULL);
+
+   /* ES threads compute their prefix sum, which is the new ES thread ID.
+    * Then they write the value of the old thread ID into the LDS address
+    * of the new thread ID. It will be used it to load input VGPRs from
+    * the old thread's LDS location.
+    */
+   ac_build_ifcc(&ctx->ac, LLVMBuildLoad(builder, es_accepted, ""), 16009);
+   {
+      LLVMValueRef old_id = get_thread_id_in_tg(ctx);
+      LLVMValueRef new_id = ac_prefix_bitcount_2x64(&ctx->ac, es_mask, old_id);
+
+      LLVMBuildStore(
+         builder, LLVMBuildTrunc(builder, old_id, ctx->ac.i8, ""),
+         si_build_gep_i8(ctx, ngg_nogs_vertex_ptr(ctx, new_id), lds_byte0_old_thread_id));
+      LLVMBuildStore(builder, LLVMBuildTrunc(builder, new_id, ctx->ac.i8, ""),
+                     si_build_gep_i8(ctx, es_vtxptr, lds_byte1_new_thread_id));
+   }
+   ac_build_endif(&ctx->ac, 16009);
+
+   /* Kill waves that have inactive threads. */
+   kill_wave = LLVMBuildICmp(builder, LLVMIntULE,
+                             ac_build_imax(&ctx->ac, new_num_es_threads, ngg_get_prim_cnt(ctx)),
+                             LLVMBuildMul(builder, get_wave_id_in_tg(ctx),
+                                          LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, 0), ""),
+                             "");
+   ac_build_ifcc(&ctx->ac, kill_wave, 19202);
+   {
+      /* If we are killing wave 0, send that there are no primitives
+       * in this threadgroup.
+       */
+      ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx), ctx->ac.i32_0, ctx->ac.i32_0);
+      ac_build_s_endpgm(&ctx->ac);
+   }
+   ac_build_endif(&ctx->ac, 19202);
+   ac_build_s_barrier(&ctx->ac);
+
+   /* Send the final vertex and primitive counts. */
+   ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx), new_num_es_threads,
+                                 ngg_get_prim_cnt(ctx));
+
+   /* Update thread counts in SGPRs. */
+   LLVMValueRef new_gs_tg_info = ac_get_arg(&ctx->ac, ctx->gs_tg_info);
+   LLVMValueRef new_merged_wave_info = ac_get_arg(&ctx->ac, ctx->merged_wave_info);
+
+   /* This also converts the thread count from the total count to the per-wave count. */
+   update_thread_counts(ctx, &new_num_es_threads, &new_gs_tg_info, 9, 12, &new_merged_wave_info, 8,
+                        0);
+
+   /* Update vertex indices in VGPR0 (same format as NGG passthrough). */
+   LLVMValueRef new_vgpr0 = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
+
+   /* Set the null flag at the beginning (culled), and then
+    * overwrite it for accepted primitives.
+    */
+   LLVMBuildStore(builder, LLVMConstInt(ctx->ac.i32, 1u << 31, 0), new_vgpr0);
+
+   /* Get vertex indices after vertex compaction. */
+   ac_build_ifcc(&ctx->ac, LLVMBuildTrunc(builder, gs_accepted, ctx->ac.i1, ""), 16011);
+   {
+      struct ac_ngg_prim prim = {};
+      prim.num_vertices = 3;
+      prim.isnull = ctx->ac.i1false;
+
+      for (unsigned vtx = 0; vtx < 3; vtx++) {
+         prim.index[vtx] = LLVMBuildLoad(
+            builder, si_build_gep_i8(ctx, gs_vtxptr[vtx], lds_byte1_new_thread_id), "");
+         prim.index[vtx] = LLVMBuildZExt(builder, prim.index[vtx], ctx->ac.i32, "");
+         prim.edgeflag[vtx] = ngg_get_initial_edgeflag(ctx, vtx);
+      }
+
+      /* Set the new GS input VGPR. */
+      LLVMBuildStore(builder, ac_pack_prim_export(&ctx->ac, &prim), new_vgpr0);
+   }
+   ac_build_endif(&ctx->ac, 16011);
+
+   if (gfx10_ngg_export_prim_early(shader))
+      gfx10_ngg_build_export_prim(ctx, NULL, LLVMBuildLoad(builder, new_vgpr0, ""));
+
+   /* Set the new ES input VGPRs. */
+   LLVMValueRef es_data[4];
+   LLVMValueRef old_thread_id = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
+
+   for (unsigned i = 0; i < 4; i++)
+      es_data[i] = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
+
+   ac_build_ifcc(&ctx->ac, LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, tid, new_num_es_threads, ""),
+                 16012);
+   {
+      LLVMValueRef old_id, old_es_vtxptr, tmp;
+
+      /* Load ES input VGPRs from the ES thread before compaction. */
+      old_id = LLVMBuildLoad(builder, si_build_gep_i8(ctx, es_vtxptr, lds_byte0_old_thread_id), "");
+      old_id = LLVMBuildZExt(builder, old_id, ctx->ac.i32, "");
+
+      LLVMBuildStore(builder, old_id, old_thread_id);
+      old_es_vtxptr = ngg_nogs_vertex_ptr(ctx, old_id);
+
+      for (unsigned i = 0; i < 2; i++) {
+         tmp = LLVMBuildLoad(
+            builder,
+            ac_build_gep0(&ctx->ac, old_es_vtxptr, LLVMConstInt(ctx->ac.i32, lds_vertex_id + i, 0)),
+            "");
+         LLVMBuildStore(builder, tmp, es_data[i]);
+      }
+
+      if (ctx->type == PIPE_SHADER_TESS_EVAL) {
+         tmp = LLVMBuildLoad(builder,
+                             si_build_gep_i8(ctx, old_es_vtxptr, lds_byte2_tes_rel_patch_id), "");
+         tmp = LLVMBuildZExt(builder, tmp, ctx->ac.i32, "");
+         LLVMBuildStore(builder, tmp, es_data[2]);
+
+         if (uses_tes_prim_id) {
+            tmp = LLVMBuildLoad(builder,
+                                ac_build_gep0(&ctx->ac, old_es_vtxptr,
+                                              LLVMConstInt(ctx->ac.i32, lds_tes_patch_id, 0)),
+                                "");
+            LLVMBuildStore(builder, tmp, es_data[3]);
+         }
+      }
+   }
+   ac_build_endif(&ctx->ac, 16012);
+
+   /* Return values for the main function. */
+   LLVMValueRef ret = ctx->return_value;
+   LLVMValueRef val;
+
+   ret = LLVMBuildInsertValue(ctx->ac.builder, ret, new_gs_tg_info, 2, "");
+   ret = LLVMBuildInsertValue(ctx->ac.builder, ret, new_merged_wave_info, 3, "");
+   if (ctx->type == PIPE_SHADER_TESS_EVAL)
+      ret = si_insert_input_ret(ctx, ret, ctx->tcs_offchip_offset, 4);
+
+   ret = si_insert_input_ptr(ctx, ret, ctx->rw_buffers, 8 + SI_SGPR_RW_BUFFERS);
+   ret = si_insert_input_ptr(ctx, ret, ctx->bindless_samplers_and_images,
+                             8 + SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES);
+   ret = si_insert_input_ptr(ctx, ret, ctx->const_and_shader_buffers,
+                             8 + SI_SGPR_CONST_AND_SHADER_BUFFERS);
+   ret = si_insert_input_ptr(ctx, ret, ctx->samplers_and_images, 8 + SI_SGPR_SAMPLERS_AND_IMAGES);
+   ret = si_insert_input_ptr(ctx, ret, ctx->vs_state_bits, 8 + SI_SGPR_VS_STATE_BITS);
+
+   if (ctx->type == PIPE_SHADER_VERTEX) {
+      ret = si_insert_input_ptr(ctx, ret, ctx->args.base_vertex, 8 + SI_SGPR_BASE_VERTEX);
+      ret = si_insert_input_ptr(ctx, ret, ctx->args.start_instance, 8 + SI_SGPR_START_INSTANCE);
+      ret = si_insert_input_ptr(ctx, ret, ctx->args.draw_id, 8 + SI_SGPR_DRAWID);
+      ret = si_insert_input_ptr(ctx, ret, ctx->vertex_buffers, 8 + SI_VS_NUM_USER_SGPR);
+
+      for (unsigned i = 0; i < shader->selector->num_vbos_in_user_sgprs; i++) {
+         ret = si_insert_input_v4i32(ctx, ret, ctx->vb_descriptors[i],
+                                     8 + SI_SGPR_VS_VB_DESCRIPTOR_FIRST + i * 4);
+      }
+   } else {
+      assert(ctx->type == PIPE_SHADER_TESS_EVAL);
+      ret = si_insert_input_ptr(ctx, ret, ctx->tcs_offchip_layout, 8 + SI_SGPR_TES_OFFCHIP_LAYOUT);
+      ret = si_insert_input_ptr(ctx, ret, ctx->tes_offchip_addr, 8 + SI_SGPR_TES_OFFCHIP_ADDR);
+   }
+
+   unsigned vgpr;
+   if (ctx->type == PIPE_SHADER_VERTEX) {
+      if (shader->selector->num_vbos_in_user_sgprs) {
+         vgpr = 8 + SI_SGPR_VS_VB_DESCRIPTOR_FIRST + shader->selector->num_vbos_in_user_sgprs * 4;
+      } else {
+         vgpr = 8 + GFX9_VSGS_NUM_USER_SGPR + 1;
+      }
+   } else {
+      vgpr = 8 + GFX9_TESGS_NUM_USER_SGPR;
+   }
+
+   val = LLVMBuildLoad(builder, new_vgpr0, "");
+   ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), vgpr++, "");
+   vgpr++; /* gs_vtx23_offset */
+
+   ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_prim_id, vgpr++);
+   ret = si_insert_input_ret_float(ctx, ret, ctx->args.gs_invocation_id, vgpr++);
+   vgpr++; /* gs_vtx45_offset */
+
+   if (ctx->type == PIPE_SHADER_VERTEX) {
+      val = LLVMBuildLoad(builder, es_data[0], "");
+      ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), vgpr++,
+                                 ""); /* VGPR5 - VertexID */
+      vgpr += 2;
+      if (uses_instance_id) {
+         val = LLVMBuildLoad(builder, es_data[1], "");
+         ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), vgpr++,
+                                    ""); /* VGPR8 - InstanceID */
+      } else {
+         vgpr++;
+      }
+   } else {
+      assert(ctx->type == PIPE_SHADER_TESS_EVAL);
+      unsigned num_vgprs = uses_tes_prim_id ? 4 : 3;
+      for (unsigned i = 0; i < num_vgprs; i++) {
+         val = LLVMBuildLoad(builder, es_data[i], "");
+         ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), vgpr++, "");
+      }
+      if (num_vgprs == 3)
+         vgpr++;
+   }
+   /* Return the old thread ID. */
+   val = LLVMBuildLoad(builder, old_thread_id, "");
+   ret = LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, val), vgpr++, "");
+
+   /* These two also use LDS. */
+   if (sel->info.writes_edgeflag ||
+       (ctx->type == PIPE_SHADER_VERTEX && shader->key.mono.u.vs_export_prim_id))
+      ac_build_s_barrier(&ctx->ac);
+
+   ctx->return_value = ret;
 }
 
 /**
  * Emit the epilogue of an API VS or TES shader compiled as ESGS shader.
  */
-void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi,
-                            unsigned max_outputs,
-                            LLVMValueRef *addrs)
+void gfx10_emit_ngg_epilogue(struct ac_shader_abi *abi, unsigned max_outputs, LLVMValueRef *addrs)
 {
-       struct si_shader_context *ctx = si_shader_context_from_abi(abi);
-       struct si_shader_selector *sel = ctx->shader->selector;
-       struct si_shader_info *info = &sel->info;
-       struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
-       LLVMBuilderRef builder = ctx->ac.builder;
-       LLVMValueRef tmp, tmp2;
-
-       assert(!ctx->shader->is_gs_copy_shader);
-       assert(info->num_outputs <= max_outputs);
-
-       LLVMValueRef vertex_ptr = NULL;
-
-       if (sel->so.num_outputs || sel->info.writes_edgeflag)
-               vertex_ptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
-
-       for (unsigned i = 0; i < info->num_outputs; i++) {
-               outputs[i].semantic_name = info->output_semantic_name[i];
-               outputs[i].semantic_index = info->output_semantic_index[i];
-
-               for (unsigned j = 0; j < 4; j++) {
-                       outputs[i].vertex_stream[j] =
-                               (info->output_streams[i] >> (2 * j)) & 3;
-
-                       /* TODO: we may store more outputs than streamout needs,
-                        * but streamout performance isn't that important.
-                        */
-                       if (sel->so.num_outputs) {
-                               tmp = ac_build_gep0(&ctx->ac, vertex_ptr,
-                                       LLVMConstInt(ctx->ac.i32, 4 * i + j, false));
-                               tmp2 = LLVMBuildLoad(builder, addrs[4 * i + j], "");
-                               tmp2 = ac_to_integer(&ctx->ac, tmp2);
-                               LLVMBuildStore(builder, tmp2, tmp);
-                       }
-               }
-
-               /* Store the edgeflag at the end (if streamout is enabled) */
-               if (info->output_semantic_name[i] == TGSI_SEMANTIC_EDGEFLAG &&
-                   sel->info.writes_edgeflag) {
-                       LLVMValueRef edgeflag = LLVMBuildLoad(builder, addrs[4 * i], "");
-                       /* The output is a float, but the hw expects a 1-bit integer. */
-                       edgeflag = LLVMBuildFPToUI(ctx->ac.builder, edgeflag, ctx->ac.i32, "");
-                       edgeflag = ac_build_umin(&ctx->ac, edgeflag, ctx->ac.i32_1);
-
-                       tmp = LLVMConstInt(ctx->ac.i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
-                       tmp = ac_build_gep0(&ctx->ac, vertex_ptr, tmp);
-                       LLVMBuildStore(builder, edgeflag, tmp);
-               }
-       }
-
-       bool unterminated_es_if_block =
-               !sel->so.num_outputs &&
-               !sel->info.writes_edgeflag &&
-               !ctx->screen->use_ngg_streamout && /* no query buffer */
-               (ctx->type != PIPE_SHADER_VERTEX ||
-                !ctx->shader->key.mono.u.vs_export_prim_id);
-
-       if (!unterminated_es_if_block)
-               ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
-
-       LLVMValueRef is_gs_thread = si_is_gs_thread(ctx);
-       LLVMValueRef is_es_thread = si_is_es_thread(ctx);
-       LLVMValueRef vtxindex[3];
-
-       if (ctx->shader->key.opt.ngg_culling) {
-               vtxindex[0] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 9);
-               vtxindex[1] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 10, 9);
-               vtxindex[2] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 20, 9);
-       } else {
-               vtxindex[0] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16);
-               vtxindex[1] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16);
-               vtxindex[2] = si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16);
-       }
-
-       /* Determine the number of vertices per primitive. */
-       unsigned num_vertices;
-       LLVMValueRef num_vertices_val = ngg_get_vertices_per_prim(ctx, &num_vertices);
-
-       /* Streamout */
-       LLVMValueRef emitted_prims = NULL;
-
-       if (sel->so.num_outputs) {
-               assert(!unterminated_es_if_block);
-
-               struct ngg_streamout nggso = {};
-               nggso.num_vertices = num_vertices_val;
-               nggso.prim_enable[0] = is_gs_thread;
-
-               for (unsigned i = 0; i < num_vertices; ++i)
-                       nggso.vertices[i] = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
-
-               build_streamout(ctx, &nggso);
-               emitted_prims = nggso.emit[0];
-       }
-
-       LLVMValueRef user_edgeflags[3] = {};
-
-       if (sel->info.writes_edgeflag) {
-               assert(!unterminated_es_if_block);
-
-               /* Streamout already inserted the barrier, so don't insert it again. */
-               if (!sel->so.num_outputs)
-                       ac_build_s_barrier(&ctx->ac);
-
-               ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
-               /* Load edge flags from ES threads and store them into VGPRs in GS threads. */
-               for (unsigned i = 0; i < num_vertices; i++) {
-                       tmp = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
-                       tmp2 = LLVMConstInt(ctx->ac.i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
-                       tmp = ac_build_gep0(&ctx->ac, tmp, tmp2);
-                       tmp = LLVMBuildLoad(builder, tmp, "");
-                       tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
-
-                       user_edgeflags[i] = ac_build_alloca_undef(&ctx->ac, ctx->ac.i1, "");
-                       LLVMBuildStore(builder, tmp, user_edgeflags[i]);
-               }
-               ac_build_endif(&ctx->ac, 5400);
-       }
-
-       /* Copy Primitive IDs from GS threads to the LDS address corresponding
-        * to the ES thread of the provoking vertex.
-        */
-       if (ctx->type == PIPE_SHADER_VERTEX &&
-           ctx->shader->key.mono.u.vs_export_prim_id) {
-               assert(!unterminated_es_if_block);
-
-               /* Streamout and edge flags use LDS. Make it idle, so that we can reuse it. */
-               if (sel->so.num_outputs || sel->info.writes_edgeflag)
-                       ac_build_s_barrier(&ctx->ac);
-
-               ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
-               /* Extract the PROVOKING_VTX_INDEX field. */
-               LLVMValueRef provoking_vtx_in_prim =
-                       si_unpack_param(ctx, ctx->vs_state_bits, 4, 2);
-
-               /* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
-               LLVMValueRef indices = ac_build_gather_values(&ctx->ac, vtxindex, 3);
-               LLVMValueRef provoking_vtx_index =
-                       LLVMBuildExtractElement(builder, indices, provoking_vtx_in_prim, "");
-               LLVMValueRef vertex_ptr = ngg_nogs_vertex_ptr(ctx, provoking_vtx_index);
-
-               LLVMBuildStore(builder, ac_get_arg(&ctx->ac, ctx->args.gs_prim_id),
-                              ac_build_gep0(&ctx->ac, vertex_ptr, ctx->ac.i32_0));
-               ac_build_endif(&ctx->ac, 5400);
-       }
-
-       /* Update query buffer */
-       if (ctx->screen->use_ngg_streamout &&
-           !info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
-               assert(!unterminated_es_if_block);
-
-               tmp = si_unpack_param(ctx, ctx->vs_state_bits, 6, 1);
-               tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
-               ac_build_ifcc(&ctx->ac, tmp, 5029); /* if (STREAMOUT_QUERY_ENABLED) */
-               tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
-               ac_build_ifcc(&ctx->ac, tmp, 5030);
-               tmp = LLVMBuildICmp(builder, LLVMIntULE, ac_get_thread_id(&ctx->ac),
-                                   sel->so.num_outputs ? ctx->ac.i32_1 : ctx->ac.i32_0, "");
-               ac_build_ifcc(&ctx->ac, tmp, 5031);
-               {
-                       LLVMValueRef args[] = {
-                               ngg_get_prim_cnt(ctx),
-                               ngg_get_query_buf(ctx),
-                               LLVMConstInt(ctx->ac.i32, 16, false), /* offset of stream[0].generated_primitives */
-                               ctx->ac.i32_0, /* soffset */
-                               ctx->ac.i32_0, /* cachepolicy */
-                       };
-
-                       if (sel->so.num_outputs) {
-                               args[0] = ac_build_writelane(&ctx->ac, args[0], emitted_prims, ctx->ac.i32_1);
-                               args[2] = ac_build_writelane(&ctx->ac, args[2],
-                                               LLVMConstInt(ctx->ac.i32, 24, false), ctx->ac.i32_1);
-                       }
-
-                       /* TODO: should this be 64-bit atomics? */
-                       ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32",
-                                          ctx->ac.i32, args, 5, 0);
-               }
-               ac_build_endif(&ctx->ac, 5031);
-               ac_build_endif(&ctx->ac, 5030);
-               ac_build_endif(&ctx->ac, 5029);
-       }
-
-       /* Build the primitive export. */
-       if (!gfx10_ngg_export_prim_early(ctx->shader)) {
-               assert(!unterminated_es_if_block);
-               gfx10_ngg_build_export_prim(ctx, user_edgeflags, NULL);
-       }
-
-       /* Export per-vertex data (positions and parameters). */
-       if (!unterminated_es_if_block)
-               ac_build_ifcc(&ctx->ac, is_es_thread, 6002);
-       {
-               unsigned i;
-
-               /* Unconditionally (re-)load the values for proper SSA form. */
-               for (i = 0; i < info->num_outputs; i++) {
-                       /* If the NGG cull shader part computed the position, don't
-                        * use the position from the current shader part. Instead,
-                        * load it from LDS.
-                        */
-                       if (info->output_semantic_name[i] == TGSI_SEMANTIC_POSITION &&
-                           ctx->shader->key.opt.ngg_culling) {
-                               vertex_ptr = ngg_nogs_vertex_ptr(ctx,
-                                               ac_get_arg(&ctx->ac, ctx->ngg_old_thread_id));
-
-                               for (unsigned j = 0; j < 4; j++) {
-                                       tmp = LLVMConstInt(ctx->ac.i32, lds_pos_x + j, 0);
-                                       tmp = ac_build_gep0(&ctx->ac, vertex_ptr, tmp);
-                                       tmp = LLVMBuildLoad(builder, tmp, "");
-                                       outputs[i].values[j] = ac_to_float(&ctx->ac, tmp);
-                               }
-                       } else {
-                               for (unsigned j = 0; j < 4; j++) {
-                                       outputs[i].values[j] =
-                                               LLVMBuildLoad(builder,
-                                                             addrs[4 * i + j], "");
-                               }
-                       }
-               }
-
-               if (ctx->shader->key.mono.u.vs_export_prim_id) {
-                       outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
-                       outputs[i].semantic_index = 0;
-
-                       if (ctx->type == PIPE_SHADER_VERTEX) {
-                               /* Wait for GS stores to finish. */
-                               ac_build_s_barrier(&ctx->ac);
-
-                               tmp = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
-                               tmp = ac_build_gep0(&ctx->ac, tmp, ctx->ac.i32_0);
-                               outputs[i].values[0] = LLVMBuildLoad(builder, tmp, "");
-                       } else {
-                               assert(ctx->type == PIPE_SHADER_TESS_EVAL);
-                               outputs[i].values[0] = si_get_primitive_id(ctx, 0);
-                       }
-
-                       outputs[i].values[0] = ac_to_float(&ctx->ac, outputs[i].values[0]);
-                       for (unsigned j = 1; j < 4; j++)
-                               outputs[i].values[j] = LLVMGetUndef(ctx->ac.f32);
-
-                       memset(outputs[i].vertex_stream, 0,
-                              sizeof(outputs[i].vertex_stream));
-                       i++;
-               }
-
-               si_llvm_build_vs_exports(ctx, outputs, i);
-       }
-       ac_build_endif(&ctx->ac, 6002);
+   struct si_shader_context *ctx = si_shader_context_from_abi(abi);
+   struct si_shader_selector *sel = ctx->shader->selector;
+   struct si_shader_info *info = &sel->info;
+   struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
+   LLVMBuilderRef builder = ctx->ac.builder;
+   LLVMValueRef tmp, tmp2;
+
+   assert(!ctx->shader->is_gs_copy_shader);
+   assert(info->num_outputs <= max_outputs);
+
+   LLVMValueRef vertex_ptr = NULL;
+
+   if (sel->so.num_outputs || sel->info.writes_edgeflag)
+      vertex_ptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
+
+   for (unsigned i = 0; i < info->num_outputs; i++) {
+      outputs[i].semantic_name = info->output_semantic_name[i];
+      outputs[i].semantic_index = info->output_semantic_index[i];
+
+      for (unsigned j = 0; j < 4; j++) {
+         outputs[i].vertex_stream[j] = (info->output_streams[i] >> (2 * j)) & 3;
+
+         /* TODO: we may store more outputs than streamout needs,
+          * but streamout performance isn't that important.
+          */
+         if (sel->so.num_outputs) {
+            tmp = ac_build_gep0(&ctx->ac, vertex_ptr, LLVMConstInt(ctx->ac.i32, 4 * i + j, false));
+            tmp2 = LLVMBuildLoad(builder, addrs[4 * i + j], "");
+            tmp2 = ac_to_integer(&ctx->ac, tmp2);
+            LLVMBuildStore(builder, tmp2, tmp);
+         }
+      }
+
+      /* Store the edgeflag at the end (if streamout is enabled) */
+      if (info->output_semantic_name[i] == TGSI_SEMANTIC_EDGEFLAG && sel->info.writes_edgeflag) {
+         LLVMValueRef edgeflag = LLVMBuildLoad(builder, addrs[4 * i], "");
+         /* The output is a float, but the hw expects a 1-bit integer. */
+         edgeflag = LLVMBuildFPToUI(ctx->ac.builder, edgeflag, ctx->ac.i32, "");
+         edgeflag = ac_build_umin(&ctx->ac, edgeflag, ctx->ac.i32_1);
+
+         tmp = LLVMConstInt(ctx->ac.i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
+         tmp = ac_build_gep0(&ctx->ac, vertex_ptr, tmp);
+         LLVMBuildStore(builder, edgeflag, tmp);
+      }
+   }
+
+   bool unterminated_es_if_block =
+      !sel->so.num_outputs && !sel->info.writes_edgeflag &&
+      !ctx->screen->use_ngg_streamout && /* no query buffer */
+      (ctx->type != PIPE_SHADER_VERTEX || !ctx->shader->key.mono.u.vs_export_prim_id);
+
+   if (!unterminated_es_if_block)
+      ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
+
+   LLVMValueRef is_gs_thread = si_is_gs_thread(ctx);
+   LLVMValueRef is_es_thread = si_is_es_thread(ctx);
+   LLVMValueRef vtxindex[3];
+
+   if (ctx->shader->key.opt.ngg_culling) {
+      vtxindex[0] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 9);
+      vtxindex[1] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 10, 9);
+      vtxindex[2] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 20, 9);
+   } else {
+      vtxindex[0] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 0, 16);
+      vtxindex[1] = si_unpack_param(ctx, ctx->gs_vtx01_offset, 16, 16);
+      vtxindex[2] = si_unpack_param(ctx, ctx->gs_vtx23_offset, 0, 16);
+   }
+
+   /* Determine the number of vertices per primitive. */
+   unsigned num_vertices;
+   LLVMValueRef num_vertices_val = ngg_get_vertices_per_prim(ctx, &num_vertices);
+
+   /* Streamout */
+   LLVMValueRef emitted_prims = NULL;
+
+   if (sel->so.num_outputs) {
+      assert(!unterminated_es_if_block);
+
+      struct ngg_streamout nggso = {};
+      nggso.num_vertices = num_vertices_val;
+      nggso.prim_enable[0] = is_gs_thread;
+
+      for (unsigned i = 0; i < num_vertices; ++i)
+         nggso.vertices[i] = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
+
+      build_streamout(ctx, &nggso);
+      emitted_prims = nggso.emit[0];
+   }
+
+   LLVMValueRef user_edgeflags[3] = {};
+
+   if (sel->info.writes_edgeflag) {
+      assert(!unterminated_es_if_block);
+
+      /* Streamout already inserted the barrier, so don't insert it again. */
+      if (!sel->so.num_outputs)
+         ac_build_s_barrier(&ctx->ac);
+
+      ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
+      /* Load edge flags from ES threads and store them into VGPRs in GS threads. */
+      for (unsigned i = 0; i < num_vertices; i++) {
+         tmp = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
+         tmp2 = LLVMConstInt(ctx->ac.i32, ngg_nogs_vertex_size(ctx->shader) - 1, 0);
+         tmp = ac_build_gep0(&ctx->ac, tmp, tmp2);
+         tmp = LLVMBuildLoad(builder, tmp, "");
+         tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
+
+         user_edgeflags[i] = ac_build_alloca_undef(&ctx->ac, ctx->ac.i1, "");
+         LLVMBuildStore(builder, tmp, user_edgeflags[i]);
+      }
+      ac_build_endif(&ctx->ac, 5400);
+   }
+
+   /* Copy Primitive IDs from GS threads to the LDS address corresponding
+    * to the ES thread of the provoking vertex.
+    */
+   if (ctx->type == PIPE_SHADER_VERTEX && ctx->shader->key.mono.u.vs_export_prim_id) {
+      assert(!unterminated_es_if_block);
+
+      /* Streamout and edge flags use LDS. Make it idle, so that we can reuse it. */
+      if (sel->so.num_outputs || sel->info.writes_edgeflag)
+         ac_build_s_barrier(&ctx->ac);
+
+      ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
+      /* Extract the PROVOKING_VTX_INDEX field. */
+      LLVMValueRef provoking_vtx_in_prim = si_unpack_param(ctx, ctx->vs_state_bits, 4, 2);
+
+      /* provoking_vtx_index = vtxindex[provoking_vtx_in_prim]; */
+      LLVMValueRef indices = ac_build_gather_values(&ctx->ac, vtxindex, 3);
+      LLVMValueRef provoking_vtx_index =
+         LLVMBuildExtractElement(builder, indices, provoking_vtx_in_prim, "");
+      LLVMValueRef vertex_ptr = ngg_nogs_vertex_ptr(ctx, provoking_vtx_index);
+
+      LLVMBuildStore(builder, ac_get_arg(&ctx->ac, ctx->args.gs_prim_id),
+                     ac_build_gep0(&ctx->ac, vertex_ptr, ctx->ac.i32_0));
+      ac_build_endif(&ctx->ac, 5400);
+   }
+
+   /* Update query buffer */
+   if (ctx->screen->use_ngg_streamout && !info->properties[TGSI_PROPERTY_VS_BLIT_SGPRS_AMD]) {
+      assert(!unterminated_es_if_block);
+
+      tmp = si_unpack_param(ctx, ctx->vs_state_bits, 6, 1);
+      tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
+      ac_build_ifcc(&ctx->ac, tmp, 5029); /* if (STREAMOUT_QUERY_ENABLED) */
+      tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
+      ac_build_ifcc(&ctx->ac, tmp, 5030);
+      tmp = LLVMBuildICmp(builder, LLVMIntULE, ac_get_thread_id(&ctx->ac),
+                          sel->so.num_outputs ? ctx->ac.i32_1 : ctx->ac.i32_0, "");
+      ac_build_ifcc(&ctx->ac, tmp, 5031);
+      {
+         LLVMValueRef args[] = {
+            ngg_get_prim_cnt(ctx),
+            ngg_get_query_buf(ctx),
+            LLVMConstInt(ctx->ac.i32, 16, false), /* offset of stream[0].generated_primitives */
+            ctx->ac.i32_0,                        /* soffset */
+            ctx->ac.i32_0,                        /* cachepolicy */
+         };
+
+         if (sel->so.num_outputs) {
+            args[0] = ac_build_writelane(&ctx->ac, args[0], emitted_prims, ctx->ac.i32_1);
+            args[2] = ac_build_writelane(&ctx->ac, args[2], LLVMConstInt(ctx->ac.i32, 24, false),
+                                         ctx->ac.i32_1);
+         }
+
+         /* TODO: should this be 64-bit atomics? */
+         ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32", ctx->ac.i32, args, 5,
+                            0);
+      }
+      ac_build_endif(&ctx->ac, 5031);
+      ac_build_endif(&ctx->ac, 5030);
+      ac_build_endif(&ctx->ac, 5029);
+   }
+
+   /* Build the primitive export. */
+   if (!gfx10_ngg_export_prim_early(ctx->shader)) {
+      assert(!unterminated_es_if_block);
+      gfx10_ngg_build_export_prim(ctx, user_edgeflags, NULL);
+   }
+
+   /* Export per-vertex data (positions and parameters). */
+   if (!unterminated_es_if_block)
+      ac_build_ifcc(&ctx->ac, is_es_thread, 6002);
+   {
+      unsigned i;
+
+      /* Unconditionally (re-)load the values for proper SSA form. */
+      for (i = 0; i < info->num_outputs; i++) {
+         /* If the NGG cull shader part computed the position, don't
+          * use the position from the current shader part. Instead,
+          * load it from LDS.
+          */
+         if (info->output_semantic_name[i] == TGSI_SEMANTIC_POSITION &&
+             ctx->shader->key.opt.ngg_culling) {
+            vertex_ptr = ngg_nogs_vertex_ptr(ctx, ac_get_arg(&ctx->ac, ctx->ngg_old_thread_id));
+
+            for (unsigned j = 0; j < 4; j++) {
+               tmp = LLVMConstInt(ctx->ac.i32, lds_pos_x + j, 0);
+               tmp = ac_build_gep0(&ctx->ac, vertex_ptr, tmp);
+               tmp = LLVMBuildLoad(builder, tmp, "");
+               outputs[i].values[j] = ac_to_float(&ctx->ac, tmp);
+            }
+         } else {
+            for (unsigned j = 0; j < 4; j++) {
+               outputs[i].values[j] = LLVMBuildLoad(builder, addrs[4 * i + j], "");
+            }
+         }
+      }
+
+      if (ctx->shader->key.mono.u.vs_export_prim_id) {
+         outputs[i].semantic_name = TGSI_SEMANTIC_PRIMID;
+         outputs[i].semantic_index = 0;
+
+         if (ctx->type == PIPE_SHADER_VERTEX) {
+            /* Wait for GS stores to finish. */
+            ac_build_s_barrier(&ctx->ac);
+
+            tmp = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
+            tmp = ac_build_gep0(&ctx->ac, tmp, ctx->ac.i32_0);
+            outputs[i].values[0] = LLVMBuildLoad(builder, tmp, "");
+         } else {
+            assert(ctx->type == PIPE_SHADER_TESS_EVAL);
+            outputs[i].values[0] = si_get_primitive_id(ctx, 0);
+         }
+
+         outputs[i].values[0] = ac_to_float(&ctx->ac, outputs[i].values[0]);
+         for (unsigned j = 1; j < 4; j++)
+            outputs[i].values[j] = LLVMGetUndef(ctx->ac.f32);
+
+         memset(outputs[i].vertex_stream, 0, sizeof(outputs[i].vertex_stream));
+         i++;
+      }
+
+      si_llvm_build_vs_exports(ctx, outputs, i);
+   }
+   ac_build_endif(&ctx->ac, 6002);
 }
 
-static LLVMValueRef
-ngg_gs_get_vertex_storage(struct si_shader_context *ctx)
+static LLVMValueRef ngg_gs_get_vertex_storage(struct si_shader_context *ctx)
 {
-       const struct si_shader_selector *sel = ctx->shader->selector;
-       const struct si_shader_info *info = &sel->info;
-
-       LLVMTypeRef elements[2] = {
-               LLVMArrayType(ctx->ac.i32, 4 * info->num_outputs),
-               LLVMArrayType(ctx->ac.i8, 4),
-       };
-       LLVMTypeRef type = LLVMStructTypeInContext(ctx->ac.context, elements, 2, false);
-       type = LLVMPointerType(LLVMArrayType(type, 0), AC_ADDR_SPACE_LDS);
-       return LLVMBuildBitCast(ctx->ac.builder, ctx->gs_ngg_emit, type, "");
+   const struct si_shader_selector *sel = ctx->shader->selector;
+   const struct si_shader_info *info = &sel->info;
+
+   LLVMTypeRef elements[2] = {
+      LLVMArrayType(ctx->ac.i32, 4 * info->num_outputs),
+      LLVMArrayType(ctx->ac.i8, 4),
+   };
+   LLVMTypeRef type = LLVMStructTypeInContext(ctx->ac.context, elements, 2, false);
+   type = LLVMPointerType(LLVMArrayType(type, 0), AC_ADDR_SPACE_LDS);
+   return LLVMBuildBitCast(ctx->ac.builder, ctx->gs_ngg_emit, type, "");
 }
 
 /**
@@ -1536,452 +1458,424 @@ ngg_gs_get_vertex_storage(struct si_shader_context *ctx)
  *
  * \return an LDS pointer to type {[N x i32], [4 x i8]}
  */
-static LLVMValueRef
-ngg_gs_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef vertexidx)
+static LLVMValueRef ngg_gs_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef vertexidx)
 {
-       struct si_shader_selector *sel = ctx->shader->selector;
-       LLVMBuilderRef builder = ctx->ac.builder;
-       LLVMValueRef storage = ngg_gs_get_vertex_storage(ctx);
-
-       /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
-       unsigned write_stride_2exp = ffs(sel->gs_max_out_vertices) - 1;
-       if (write_stride_2exp) {
-               LLVMValueRef row =
-                       LLVMBuildLShr(builder, vertexidx,
-                                     LLVMConstInt(ctx->ac.i32, 5, false), "");
-               LLVMValueRef swizzle =
-                       LLVMBuildAnd(builder, row,
-                                    LLVMConstInt(ctx->ac.i32, (1u << write_stride_2exp) - 1,
-                                                 false), "");
-               vertexidx = LLVMBuildXor(builder, vertexidx, swizzle, "");
-       }
-
-       return ac_build_gep0(&ctx->ac, storage, vertexidx);
+   struct si_shader_selector *sel = ctx->shader->selector;
+   LLVMBuilderRef builder = ctx->ac.builder;
+   LLVMValueRef storage = ngg_gs_get_vertex_storage(ctx);
+
+   /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
+   unsigned write_stride_2exp = ffs(sel->gs_max_out_vertices) - 1;
+   if (write_stride_2exp) {
+      LLVMValueRef row = LLVMBuildLShr(builder, vertexidx, LLVMConstInt(ctx->ac.i32, 5, false), "");
+      LLVMValueRef swizzle = LLVMBuildAnd(
+         builder, row, LLVMConstInt(ctx->ac.i32, (1u << write_stride_2exp) - 1, false), "");
+      vertexidx = LLVMBuildXor(builder, vertexidx, swizzle, "");
+   }
+
+   return ac_build_gep0(&ctx->ac, storage, vertexidx);
 }
 
-static LLVMValueRef
-ngg_gs_emit_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef gsthread,
-                      LLVMValueRef emitidx)
+static LLVMValueRef ngg_gs_emit_vertex_ptr(struct si_shader_context *ctx, LLVMValueRef gsthread,
+                                           LLVMValueRef emitidx)
 {
-       struct si_shader_selector *sel = ctx->shader->selector;
-       LLVMBuilderRef builder = ctx->ac.builder;
-       LLVMValueRef tmp;
-
-       tmp = LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false);
-       tmp = LLVMBuildMul(builder, tmp, gsthread, "");
-       const LLVMValueRef vertexidx = LLVMBuildAdd(builder, tmp, emitidx, "");
-       return ngg_gs_vertex_ptr(ctx, vertexidx);
+   struct si_shader_selector *sel = ctx->shader->selector;
+   LLVMBuilderRef builder = ctx->ac.builder;
+   LLVMValueRef tmp;
+
+   tmp = LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false);
+   tmp = LLVMBuildMul(builder, tmp, gsthread, "");
+   const LLVMValueRef vertexidx = LLVMBuildAdd(builder, tmp, emitidx, "");
+   return ngg_gs_vertex_ptr(ctx, vertexidx);
 }
 
-static LLVMValueRef
-ngg_gs_get_emit_output_ptr(struct si_shader_context *ctx, LLVMValueRef vertexptr,
-                          unsigned out_idx)
+static LLVMValueRef ngg_gs_get_emit_output_ptr(struct si_shader_context *ctx,
+                                               LLVMValueRef vertexptr, unsigned out_idx)
 {
-       LLVMValueRef gep_idx[3] = {
-               ctx->ac.i32_0, /* implied C-style array */
-               ctx->ac.i32_0, /* first struct entry */
-               LLVMConstInt(ctx->ac.i32, out_idx, false),
-       };
-       return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
+   LLVMValueRef gep_idx[3] = {
+      ctx->ac.i32_0, /* implied C-style array */
+      ctx->ac.i32_0, /* first struct entry */
+      LLVMConstInt(ctx->ac.i32, out_idx, false),
+   };
+   return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
 }
 
-static LLVMValueRef
-ngg_gs_get_emit_primflag_ptr(struct si_shader_context *ctx, LLVMValueRef vertexptr,
-                            unsigned stream)
+static LLVMValueRef ngg_gs_get_emit_primflag_ptr(struct si_shader_context *ctx,
+                                                 LLVMValueRef vertexptr, unsigned stream)
 {
-       LLVMValueRef gep_idx[3] = {
-               ctx->ac.i32_0, /* implied C-style array */
-               ctx->ac.i32_1, /* second struct entry */
-               LLVMConstInt(ctx->ac.i32, stream, false),
-       };
-       return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
+   LLVMValueRef gep_idx[3] = {
+      ctx->ac.i32_0, /* implied C-style array */
+      ctx->ac.i32_1, /* second struct entry */
+      LLVMConstInt(ctx->ac.i32, stream, false),
+   };
+   return LLVMBuildGEP(ctx->ac.builder, vertexptr, gep_idx, 3, "");
 }
 
-void gfx10_ngg_gs_emit_vertex(struct si_shader_context *ctx,
-                             unsigned stream,
-                             LLVMValueRef *addrs)
+void gfx10_ngg_gs_emit_vertex(struct si_shader_context *ctx, unsigned stream, LLVMValueRef *addrs)
 {
-       const struct si_shader_selector *sel = ctx->shader->selector;
-       const struct si_shader_info *info = &sel->info;
-       LLVMBuilderRef builder = ctx->ac.builder;
-       LLVMValueRef tmp;
-       const LLVMValueRef vertexidx =
-               LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
-
-       /* If this thread has already emitted the declared maximum number of
-        * vertices, skip the write: excessive vertex emissions are not
-        * supposed to have any effect.
-        */
-       const LLVMValueRef can_emit =
-               LLVMBuildICmp(builder, LLVMIntULT, vertexidx,
-                             LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false), "");
-
-       tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
-       tmp = LLVMBuildSelect(builder, can_emit, tmp, vertexidx, "");
-       LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
-
-       ac_build_ifcc(&ctx->ac, can_emit, 9001);
-
-       const LLVMValueRef vertexptr =
-               ngg_gs_emit_vertex_ptr(ctx, get_thread_id_in_tg(ctx), vertexidx);
-       unsigned out_idx = 0;
-       for (unsigned i = 0; i < info->num_outputs; i++) {
-               for (unsigned chan = 0; chan < 4; chan++, out_idx++) {
-                       if (!(info->output_usagemask[i] & (1 << chan)) ||
-                           ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
-                               continue;
-
-                       LLVMValueRef out_val = LLVMBuildLoad(builder, addrs[4 * i + chan], "");
-                       out_val = ac_to_integer(&ctx->ac, out_val);
-                       LLVMBuildStore(builder, out_val,
-                                      ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx));
-               }
-       }
-       assert(out_idx * 4 == sel->gsvs_vertex_size);
-
-       /* Determine and store whether this vertex completed a primitive. */
-       const LLVMValueRef curverts = LLVMBuildLoad(builder, ctx->gs_curprim_verts[stream], "");
-
-       tmp = LLVMConstInt(ctx->ac.i32, u_vertices_per_prim(sel->gs_output_prim) - 1, false);
-       const LLVMValueRef iscompleteprim =
-               LLVMBuildICmp(builder, LLVMIntUGE, curverts, tmp, "");
-
-       /* Since the geometry shader emits triangle strips, we need to
-        * track which primitive is odd and swap vertex indices to get
-        * the correct vertex order.
-        */
-       LLVMValueRef is_odd = ctx->ac.i1false;
-       if (stream == 0 && u_vertices_per_prim(sel->gs_output_prim) == 3) {
-               tmp = LLVMBuildAnd(builder, curverts, ctx->ac.i32_1, "");
-               is_odd = LLVMBuildICmp(builder, LLVMIntEQ, tmp, ctx->ac.i32_1, "");
-       }
-
-       tmp = LLVMBuildAdd(builder, curverts, ctx->ac.i32_1, "");
-       LLVMBuildStore(builder, tmp, ctx->gs_curprim_verts[stream]);
-
-       /* The per-vertex primitive flag encoding:
-        *   bit 0: whether this vertex finishes a primitive
-        *   bit 1: whether the primitive is odd (if we are emitting triangle strips)
-        */
-       tmp = LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i8, "");
-       tmp = LLVMBuildOr(builder, tmp,
-                         LLVMBuildShl(builder,
-                                      LLVMBuildZExt(builder, is_odd, ctx->ac.i8, ""),
-                                      ctx->ac.i8_1, ""), "");
-       LLVMBuildStore(builder, tmp, ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream));
-
-       tmp = LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
-       tmp = LLVMBuildAdd(builder, tmp, LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i32, ""), "");
-       LLVMBuildStore(builder, tmp, ctx->gs_generated_prims[stream]);
-
-       ac_build_endif(&ctx->ac, 9001);
+   const struct si_shader_selector *sel = ctx->shader->selector;
+   const struct si_shader_info *info = &sel->info;
+   LLVMBuilderRef builder = ctx->ac.builder;
+   LLVMValueRef tmp;
+   const LLVMValueRef vertexidx = LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
+
+   /* If this thread has already emitted the declared maximum number of
+    * vertices, skip the write: excessive vertex emissions are not
+    * supposed to have any effect.
+    */
+   const LLVMValueRef can_emit =
+      LLVMBuildICmp(builder, LLVMIntULT, vertexidx,
+                    LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false), "");
+
+   tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
+   tmp = LLVMBuildSelect(builder, can_emit, tmp, vertexidx, "");
+   LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
+
+   ac_build_ifcc(&ctx->ac, can_emit, 9001);
+
+   const LLVMValueRef vertexptr = ngg_gs_emit_vertex_ptr(ctx, get_thread_id_in_tg(ctx), vertexidx);
+   unsigned out_idx = 0;
+   for (unsigned i = 0; i < info->num_outputs; i++) {
+      for (unsigned chan = 0; chan < 4; chan++, out_idx++) {
+         if (!(info->output_usagemask[i] & (1 << chan)) ||
+             ((info->output_streams[i] >> (2 * chan)) & 3) != stream)
+            continue;
+
+         LLVMValueRef out_val = LLVMBuildLoad(builder, addrs[4 * i + chan], "");
+         out_val = ac_to_integer(&ctx->ac, out_val);
+         LLVMBuildStore(builder, out_val, ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx));
+      }
+   }
+   assert(out_idx * 4 == sel->gsvs_vertex_size);
+
+   /* Determine and store whether this vertex completed a primitive. */
+   const LLVMValueRef curverts = LLVMBuildLoad(builder, ctx->gs_curprim_verts[stream], "");
+
+   tmp = LLVMConstInt(ctx->ac.i32, u_vertices_per_prim(sel->gs_output_prim) - 1, false);
+   const LLVMValueRef iscompleteprim = LLVMBuildICmp(builder, LLVMIntUGE, curverts, tmp, "");
+
+   /* Since the geometry shader emits triangle strips, we need to
+    * track which primitive is odd and swap vertex indices to get
+    * the correct vertex order.
+    */
+   LLVMValueRef is_odd = ctx->ac.i1false;
+   if (stream == 0 && u_vertices_per_prim(sel->gs_output_prim) == 3) {
+      tmp = LLVMBuildAnd(builder, curverts, ctx->ac.i32_1, "");
+      is_odd = LLVMBuildICmp(builder, LLVMIntEQ, tmp, ctx->ac.i32_1, "");
+   }
+
+   tmp = LLVMBuildAdd(builder, curverts, ctx->ac.i32_1, "");
+   LLVMBuildStore(builder, tmp, ctx->gs_curprim_verts[stream]);
+
+   /* The per-vertex primitive flag encoding:
+    *   bit 0: whether this vertex finishes a primitive
+    *   bit 1: whether the primitive is odd (if we are emitting triangle strips)
+    */
+   tmp = LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i8, "");
+   tmp = LLVMBuildOr(
+      builder, tmp,
+      LLVMBuildShl(builder, LLVMBuildZExt(builder, is_odd, ctx->ac.i8, ""), ctx->ac.i8_1, ""), "");
+   LLVMBuildStore(builder, tmp, ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream));
+
+   tmp = LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
+   tmp = LLVMBuildAdd(builder, tmp, LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i32, ""), "");
+   LLVMBuildStore(builder, tmp, ctx->gs_generated_prims[stream]);
+
+   ac_build_endif(&ctx->ac, 9001);
 }
 
 void gfx10_ngg_gs_emit_prologue(struct si_shader_context *ctx)
 {
-       /* Zero out the part of LDS scratch that is used to accumulate the
-        * per-stream generated primitive count.
-        */
-       LLVMBuilderRef builder = ctx->ac.builder;
-       LLVMValueRef scratchptr = ctx->gs_ngg_scratch;
-       LLVMValueRef tid = get_thread_id_in_tg(ctx);
-       LLVMValueRef tmp;
-
-       tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, LLVMConstInt(ctx->ac.i32, 4, false), "");
-       ac_build_ifcc(&ctx->ac, tmp, 5090);
-       {
-               LLVMValueRef ptr = ac_build_gep0(&ctx->ac, scratchptr, tid);
-               LLVMBuildStore(builder, ctx->ac.i32_0, ptr);
-       }
-       ac_build_endif(&ctx->ac, 5090);
-
-       ac_build_s_barrier(&ctx->ac);
+   /* Zero out the part of LDS scratch that is used to accumulate the
+    * per-stream generated primitive count.
+    */
+   LLVMBuilderRef builder = ctx->ac.builder;
+   LLVMValueRef scratchptr = ctx->gs_ngg_scratch;
+   LLVMValueRef tid = get_thread_id_in_tg(ctx);
+   LLVMValueRef tmp;
+
+   tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, LLVMConstInt(ctx->ac.i32, 4, false), "");
+   ac_build_ifcc(&ctx->ac, tmp, 5090);
+   {
+      LLVMValueRef ptr = ac_build_gep0(&ctx->ac, scratchptr, tid);
+      LLVMBuildStore(builder, ctx->ac.i32_0, ptr);
+   }
+   ac_build_endif(&ctx->ac, 5090);
+
+   ac_build_s_barrier(&ctx->ac);
 }
 
 void gfx10_ngg_gs_emit_epilogue(struct si_shader_context *ctx)
 {
-       const struct si_shader_selector *sel = ctx->shader->selector;
-       const struct si_shader_info *info = &sel->info;
-       const unsigned verts_per_prim = u_vertices_per_prim(sel->gs_output_prim);
-       LLVMBuilderRef builder = ctx->ac.builder;
-       LLVMValueRef i8_0 = LLVMConstInt(ctx->ac.i8, 0, false);
-       LLVMValueRef tmp, tmp2;
-
-       /* Zero out remaining (non-emitted) primitive flags.
-        *
-        * Note: Alternatively, we could pass the relevant gs_next_vertex to
-        *       the emit threads via LDS. This is likely worse in the expected
-        *       typical case where each GS thread emits the full set of
-        *       vertices.
-        */
-       for (unsigned stream = 0; stream < 4; ++stream) {
-               if (!info->num_stream_output_components[stream])
-                       continue;
-
-               const LLVMValueRef gsthread = get_thread_id_in_tg(ctx);
-
-               ac_build_bgnloop(&ctx->ac, 5100);
-
-               const LLVMValueRef vertexidx =
-                       LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
-               tmp = LLVMBuildICmp(builder, LLVMIntUGE, vertexidx,
-                       LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false), "");
-               ac_build_ifcc(&ctx->ac, tmp, 5101);
-               ac_build_break(&ctx->ac);
-               ac_build_endif(&ctx->ac, 5101);
-
-               tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
-               LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
-
-               tmp = ngg_gs_emit_vertex_ptr(ctx, gsthread, vertexidx);
-               LLVMBuildStore(builder, i8_0, ngg_gs_get_emit_primflag_ptr(ctx, tmp, stream));
-
-               ac_build_endloop(&ctx->ac, 5100);
-       }
-
-       /* Accumulate generated primitives counts across the entire threadgroup. */
-       for (unsigned stream = 0; stream < 4; ++stream) {
-               if (!info->num_stream_output_components[stream])
-                       continue;
-
-               LLVMValueRef numprims =
-                       LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
-               numprims = ac_build_reduce(&ctx->ac, numprims, nir_op_iadd, ctx->ac.wave_size);
-
-               tmp = LLVMBuildICmp(builder, LLVMIntEQ, ac_get_thread_id(&ctx->ac), ctx->ac.i32_0, "");
-               ac_build_ifcc(&ctx->ac, tmp, 5105);
-               {
-                       LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpAdd,
-                                          ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
-                                                        LLVMConstInt(ctx->ac.i32, stream, false)),
-                                          numprims, LLVMAtomicOrderingMonotonic, false);
-               }
-               ac_build_endif(&ctx->ac, 5105);
-       }
-
-       ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
-
-       ac_build_s_barrier(&ctx->ac);
-
-       const LLVMValueRef tid = get_thread_id_in_tg(ctx);
-       LLVMValueRef num_emit_threads = ngg_get_prim_cnt(ctx);
-
-       /* Streamout */
-       if (sel->so.num_outputs) {
-               struct ngg_streamout nggso = {};
-
-               nggso.num_vertices = LLVMConstInt(ctx->ac.i32, verts_per_prim, false);
-
-               LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tid);
-               for (unsigned stream = 0; stream < 4; ++stream) {
-                       if (!info->num_stream_output_components[stream])
-                               continue;
-
-                       tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream), "");
-                       tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
-                       tmp2 = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
-                       nggso.prim_enable[stream] = LLVMBuildAnd(builder, tmp, tmp2, "");
-               }
-
-               for (unsigned i = 0; i < verts_per_prim; ++i) {
-                       tmp = LLVMBuildSub(builder, tid,
-                                          LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false), "");
-                       tmp = ngg_gs_vertex_ptr(ctx, tmp);
-                       nggso.vertices[i] = ac_build_gep0(&ctx->ac, tmp, ctx->ac.i32_0);
-               }
-
-               build_streamout(ctx, &nggso);
-       }
-
-       /* Write shader query data. */
-       if (ctx->screen->use_ngg_streamout) {
-               tmp = si_unpack_param(ctx, ctx->vs_state_bits, 6, 1);
-               tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
-               ac_build_ifcc(&ctx->ac, tmp, 5109); /* if (STREAMOUT_QUERY_ENABLED) */
-               unsigned num_query_comps = sel->so.num_outputs ? 8 : 4;
-               tmp = LLVMBuildICmp(builder, LLVMIntULT, tid,
-                                   LLVMConstInt(ctx->ac.i32, num_query_comps, false), "");
-               ac_build_ifcc(&ctx->ac, tmp, 5110);
-               {
-                       LLVMValueRef offset;
-                       tmp = tid;
-                       if (sel->so.num_outputs)
-                               tmp = LLVMBuildAnd(builder, tmp, LLVMConstInt(ctx->ac.i32, 3, false), "");
-                       offset = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->ac.i32, 32, false), "");
-                       if (sel->so.num_outputs) {
-                               tmp = LLVMBuildLShr(builder, tid, LLVMConstInt(ctx->ac.i32, 2, false), "");
-                               tmp = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->ac.i32, 8, false), "");
-                               offset = LLVMBuildAdd(builder, offset, tmp, "");
-                       }
-
-                       tmp = LLVMBuildLoad(builder, ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid), "");
-                       LLVMValueRef args[] = {
-                               tmp,
-                               ngg_get_query_buf(ctx),
-                               offset,
-                               LLVMConstInt(ctx->ac.i32, 16, false), /* soffset */
-                               ctx->ac.i32_0, /* cachepolicy */
-                       };
-                       ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32",
-                                          ctx->ac.i32, args, 5, 0);
-               }
-               ac_build_endif(&ctx->ac, 5110);
-               ac_build_endif(&ctx->ac, 5109);
-       }
-
-       /* Determine vertex liveness. */
-       LLVMValueRef vertliveptr = ac_build_alloca(&ctx->ac, ctx->ac.i1, "vertexlive");
-
-       tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
-       ac_build_ifcc(&ctx->ac, tmp, 5120);
-       {
-               for (unsigned i = 0; i < verts_per_prim; ++i) {
-                       const LLVMValueRef primidx =
-                               LLVMBuildAdd(builder, tid,
-                                            LLVMConstInt(ctx->ac.i32, i, false), "");
-
-                       if (i > 0) {
-                               tmp = LLVMBuildICmp(builder, LLVMIntULT, primidx, num_emit_threads, "");
-                               ac_build_ifcc(&ctx->ac, tmp, 5121 + i);
-                       }
-
-                       /* Load primitive liveness */
-                       tmp = ngg_gs_vertex_ptr(ctx, primidx);
-                       tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
-                       const LLVMValueRef primlive =
-                               LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
-
-                       tmp = LLVMBuildLoad(builder, vertliveptr, "");
-                       tmp = LLVMBuildOr(builder, tmp, primlive, ""),
-                       LLVMBuildStore(builder, tmp, vertliveptr);
-
-                       if (i > 0)
-                               ac_build_endif(&ctx->ac, 5121 + i);
-               }
-       }
-       ac_build_endif(&ctx->ac, 5120);
-
-       /* Inclusive scan addition across the current wave. */
-       LLVMValueRef vertlive = LLVMBuildLoad(builder, vertliveptr, "");
-       struct ac_wg_scan vertlive_scan = {};
-       vertlive_scan.op = nir_op_iadd;
-       vertlive_scan.enable_reduce = true;
-       vertlive_scan.enable_exclusive = true;
-       vertlive_scan.src = vertlive;
-       vertlive_scan.scratch = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ctx->ac.i32_0);
-       vertlive_scan.waveidx = get_wave_id_in_tg(ctx);
-       vertlive_scan.numwaves = get_tgsize(ctx);
-       vertlive_scan.maxwaves = 8;
-
-       ac_build_wg_scan(&ctx->ac, &vertlive_scan);
-
-       /* Skip all exports (including index exports) when possible. At least on
-        * early gfx10 revisions this is also to avoid hangs.
-        */
-       LLVMValueRef have_exports =
-               LLVMBuildICmp(builder, LLVMIntNE, vertlive_scan.result_reduce, ctx->ac.i32_0, "");
-       num_emit_threads =
-               LLVMBuildSelect(builder, have_exports, num_emit_threads, ctx->ac.i32_0, "");
-
-       /* Allocate export space. Send this message as early as possible, to
-        * hide the latency of the SQ <-> SPI roundtrip.
-        *
-        * Note: We could consider compacting primitives for export as well.
-        *       PA processes 1 non-null prim / clock, but it fetches 4 DW of
-        *       prim data per clock and skips null primitives at no additional
-        *       cost. So compacting primitives can only be beneficial when
-        *       there are 4 or more contiguous null primitives in the export
-        *       (in the common case of single-dword prim exports).
-        */
-       ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx),
-                                     vertlive_scan.result_reduce, num_emit_threads);
-
-       /* Setup the reverse vertex compaction permutation. We re-use stream 1
-        * of the primitive liveness flags, relying on the fact that each
-        * threadgroup can have at most 256 threads. */
-       ac_build_ifcc(&ctx->ac, vertlive, 5130);
-       {
-               tmp = ngg_gs_vertex_ptr(ctx, vertlive_scan.result_exclusive);
-               tmp2 = LLVMBuildTrunc(builder, tid, ctx->ac.i8, "");
-               LLVMBuildStore(builder, tmp2, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1));
-       }
-       ac_build_endif(&ctx->ac, 5130);
-
-       ac_build_s_barrier(&ctx->ac);
-
-       /* Export primitive data */
-       tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
-       ac_build_ifcc(&ctx->ac, tmp, 5140);
-       {
-               LLVMValueRef flags;
-               struct ac_ngg_prim prim = {};
-               prim.num_vertices = verts_per_prim;
-
-               tmp = ngg_gs_vertex_ptr(ctx, tid);
-               flags = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
-               prim.isnull = LLVMBuildNot(builder, LLVMBuildTrunc(builder, flags, ctx->ac.i1, ""), "");
-
-               for (unsigned i = 0; i < verts_per_prim; ++i) {
-                       prim.index[i] = LLVMBuildSub(builder, vertlive_scan.result_exclusive,
-                               LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false), "");
-                       prim.edgeflag[i] = ctx->ac.i1false;
-               }
-
-               /* Geometry shaders output triangle strips, but NGG expects triangles. */
-               if (verts_per_prim == 3) {
-                       LLVMValueRef is_odd = LLVMBuildLShr(builder, flags, ctx->ac.i8_1, "");
-                       is_odd = LLVMBuildTrunc(builder, is_odd, ctx->ac.i1, "");
-                       LLVMValueRef flatshade_first =
-                               LLVMBuildICmp(builder, LLVMIntEQ,
-                                             si_unpack_param(ctx, ctx->vs_state_bits, 4, 2),
-                                             ctx->ac.i32_0, "");
-
-                       ac_build_triangle_strip_indices_to_triangle(&ctx->ac, is_odd,
-                                                                   flatshade_first,
-                                                                   prim.index);
-               }
-
-               ac_build_export_prim(&ctx->ac, &prim);
-       }
-       ac_build_endif(&ctx->ac, 5140);
-
-       /* Export position and parameter data */
-       tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, vertlive_scan.result_reduce, "");
-       ac_build_ifcc(&ctx->ac, tmp, 5145);
-       {
-               struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
-
-               tmp = ngg_gs_vertex_ptr(ctx, tid);
-               tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1), "");
-               tmp = LLVMBuildZExt(builder, tmp, ctx->ac.i32, "");
-               const LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tmp);
-
-               unsigned out_idx = 0;
-               for (unsigned i = 0; i < info->num_outputs; i++) {
-                       outputs[i].semantic_name = info->output_semantic_name[i];
-                       outputs[i].semantic_index = info->output_semantic_index[i];
-
-                       for (unsigned j = 0; j < 4; j++, out_idx++) {
-                               tmp = ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx);
-                               tmp = LLVMBuildLoad(builder, tmp, "");
-                               outputs[i].values[j] = ac_to_float(&ctx->ac, tmp);
-                               outputs[i].vertex_stream[j] =
-                                       (info->output_streams[i] >> (2 * j)) & 3;
-                       }
-               }
-
-               si_llvm_build_vs_exports(ctx, outputs, info->num_outputs);
-       }
-       ac_build_endif(&ctx->ac, 5145);
+   const struct si_shader_selector *sel = ctx->shader->selector;
+   const struct si_shader_info *info = &sel->info;
+   const unsigned verts_per_prim = u_vertices_per_prim(sel->gs_output_prim);
+   LLVMBuilderRef builder = ctx->ac.builder;
+   LLVMValueRef i8_0 = LLVMConstInt(ctx->ac.i8, 0, false);
+   LLVMValueRef tmp, tmp2;
+
+   /* Zero out remaining (non-emitted) primitive flags.
+    *
+    * Note: Alternatively, we could pass the relevant gs_next_vertex to
+    *       the emit threads via LDS. This is likely worse in the expected
+    *       typical case where each GS thread emits the full set of
+    *       vertices.
+    */
+   for (unsigned stream = 0; stream < 4; ++stream) {
+      if (!info->num_stream_output_components[stream])
+         continue;
+
+      const LLVMValueRef gsthread = get_thread_id_in_tg(ctx);
+
+      ac_build_bgnloop(&ctx->ac, 5100);
+
+      const LLVMValueRef vertexidx = LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
+      tmp = LLVMBuildICmp(builder, LLVMIntUGE, vertexidx,
+                          LLVMConstInt(ctx->ac.i32, sel->gs_max_out_vertices, false), "");
+      ac_build_ifcc(&ctx->ac, tmp, 5101);
+      ac_build_break(&ctx->ac);
+      ac_build_endif(&ctx->ac, 5101);
+
+      tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
+      LLVMBuildStore(builder, tmp, ctx->gs_next_vertex[stream]);
+
+      tmp = ngg_gs_emit_vertex_ptr(ctx, gsthread, vertexidx);
+      LLVMBuildStore(builder, i8_0, ngg_gs_get_emit_primflag_ptr(ctx, tmp, stream));
+
+      ac_build_endloop(&ctx->ac, 5100);
+   }
+
+   /* Accumulate generated primitives counts across the entire threadgroup. */
+   for (unsigned stream = 0; stream < 4; ++stream) {
+      if (!info->num_stream_output_components[stream])
+         continue;
+
+      LLVMValueRef numprims = LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
+      numprims = ac_build_reduce(&ctx->ac, numprims, nir_op_iadd, ctx->ac.wave_size);
+
+      tmp = LLVMBuildICmp(builder, LLVMIntEQ, ac_get_thread_id(&ctx->ac), ctx->ac.i32_0, "");
+      ac_build_ifcc(&ctx->ac, tmp, 5105);
+      {
+         LLVMBuildAtomicRMW(
+            builder, LLVMAtomicRMWBinOpAdd,
+            ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, LLVMConstInt(ctx->ac.i32, stream, false)),
+            numprims, LLVMAtomicOrderingMonotonic, false);
+      }
+      ac_build_endif(&ctx->ac, 5105);
+   }
+
+   ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
+
+   ac_build_s_barrier(&ctx->ac);
+
+   const LLVMValueRef tid = get_thread_id_in_tg(ctx);
+   LLVMValueRef num_emit_threads = ngg_get_prim_cnt(ctx);
+
+   /* Streamout */
+   if (sel->so.num_outputs) {
+      struct ngg_streamout nggso = {};
+
+      nggso.num_vertices = LLVMConstInt(ctx->ac.i32, verts_per_prim, false);
+
+      LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tid);
+      for (unsigned stream = 0; stream < 4; ++stream) {
+         if (!info->num_stream_output_components[stream])
+            continue;
+
+         tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, vertexptr, stream), "");
+         tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
+         tmp2 = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
+         nggso.prim_enable[stream] = LLVMBuildAnd(builder, tmp, tmp2, "");
+      }
+
+      for (unsigned i = 0; i < verts_per_prim; ++i) {
+         tmp = LLVMBuildSub(builder, tid, LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false),
+                            "");
+         tmp = ngg_gs_vertex_ptr(ctx, tmp);
+         nggso.vertices[i] = ac_build_gep0(&ctx->ac, tmp, ctx->ac.i32_0);
+      }
+
+      build_streamout(ctx, &nggso);
+   }
+
+   /* Write shader query data. */
+   if (ctx->screen->use_ngg_streamout) {
+      tmp = si_unpack_param(ctx, ctx->vs_state_bits, 6, 1);
+      tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
+      ac_build_ifcc(&ctx->ac, tmp, 5109); /* if (STREAMOUT_QUERY_ENABLED) */
+      unsigned num_query_comps = sel->so.num_outputs ? 8 : 4;
+      tmp = LLVMBuildICmp(builder, LLVMIntULT, tid,
+                          LLVMConstInt(ctx->ac.i32, num_query_comps, false), "");
+      ac_build_ifcc(&ctx->ac, tmp, 5110);
+      {
+         LLVMValueRef offset;
+         tmp = tid;
+         if (sel->so.num_outputs)
+            tmp = LLVMBuildAnd(builder, tmp, LLVMConstInt(ctx->ac.i32, 3, false), "");
+         offset = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->ac.i32, 32, false), "");
+         if (sel->so.num_outputs) {
+            tmp = LLVMBuildLShr(builder, tid, LLVMConstInt(ctx->ac.i32, 2, false), "");
+            tmp = LLVMBuildNUWMul(builder, tmp, LLVMConstInt(ctx->ac.i32, 8, false), "");
+            offset = LLVMBuildAdd(builder, offset, tmp, "");
+         }
+
+         tmp = LLVMBuildLoad(builder, ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid), "");
+         LLVMValueRef args[] = {
+            tmp,           ngg_get_query_buf(ctx),
+            offset,        LLVMConstInt(ctx->ac.i32, 16, false), /* soffset */
+            ctx->ac.i32_0,                                       /* cachepolicy */
+         };
+         ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.raw.buffer.atomic.add.i32", ctx->ac.i32, args, 5,
+                            0);
+      }
+      ac_build_endif(&ctx->ac, 5110);
+      ac_build_endif(&ctx->ac, 5109);
+   }
+
+   /* Determine vertex liveness. */
+   LLVMValueRef vertliveptr = ac_build_alloca(&ctx->ac, ctx->ac.i1, "vertexlive");
+
+   tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
+   ac_build_ifcc(&ctx->ac, tmp, 5120);
+   {
+      for (unsigned i = 0; i < verts_per_prim; ++i) {
+         const LLVMValueRef primidx =
+            LLVMBuildAdd(builder, tid, LLVMConstInt(ctx->ac.i32, i, false), "");
+
+         if (i > 0) {
+            tmp = LLVMBuildICmp(builder, LLVMIntULT, primidx, num_emit_threads, "");
+            ac_build_ifcc(&ctx->ac, tmp, 5121 + i);
+         }
+
+         /* Load primitive liveness */
+         tmp = ngg_gs_vertex_ptr(ctx, primidx);
+         tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
+         const LLVMValueRef primlive = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
+
+         tmp = LLVMBuildLoad(builder, vertliveptr, "");
+         tmp = LLVMBuildOr(builder, tmp, primlive, ""), LLVMBuildStore(builder, tmp, vertliveptr);
+
+         if (i > 0)
+            ac_build_endif(&ctx->ac, 5121 + i);
+      }
+   }
+   ac_build_endif(&ctx->ac, 5120);
+
+   /* Inclusive scan addition across the current wave. */
+   LLVMValueRef vertlive = LLVMBuildLoad(builder, vertliveptr, "");
+   struct ac_wg_scan vertlive_scan = {};
+   vertlive_scan.op = nir_op_iadd;
+   vertlive_scan.enable_reduce = true;
+   vertlive_scan.enable_exclusive = true;
+   vertlive_scan.src = vertlive;
+   vertlive_scan.scratch = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ctx->ac.i32_0);
+   vertlive_scan.waveidx = get_wave_id_in_tg(ctx);
+   vertlive_scan.numwaves = get_tgsize(ctx);
+   vertlive_scan.maxwaves = 8;
+
+   ac_build_wg_scan(&ctx->ac, &vertlive_scan);
+
+   /* Skip all exports (including index exports) when possible. At least on
+    * early gfx10 revisions this is also to avoid hangs.
+    */
+   LLVMValueRef have_exports =
+      LLVMBuildICmp(builder, LLVMIntNE, vertlive_scan.result_reduce, ctx->ac.i32_0, "");
+   num_emit_threads = LLVMBuildSelect(builder, have_exports, num_emit_threads, ctx->ac.i32_0, "");
+
+   /* Allocate export space. Send this message as early as possible, to
+    * hide the latency of the SQ <-> SPI roundtrip.
+    *
+    * Note: We could consider compacting primitives for export as well.
+    *       PA processes 1 non-null prim / clock, but it fetches 4 DW of
+    *       prim data per clock and skips null primitives at no additional
+    *       cost. So compacting primitives can only be beneficial when
+    *       there are 4 or more contiguous null primitives in the export
+    *       (in the common case of single-dword prim exports).
+    */
+   ac_build_sendmsg_gs_alloc_req(&ctx->ac, get_wave_id_in_tg(ctx), vertlive_scan.result_reduce,
+                                 num_emit_threads);
+
+   /* Setup the reverse vertex compaction permutation. We re-use stream 1
+    * of the primitive liveness flags, relying on the fact that each
+    * threadgroup can have at most 256 threads. */
+   ac_build_ifcc(&ctx->ac, vertlive, 5130);
+   {
+      tmp = ngg_gs_vertex_ptr(ctx, vertlive_scan.result_exclusive);
+      tmp2 = LLVMBuildTrunc(builder, tid, ctx->ac.i8, "");
+      LLVMBuildStore(builder, tmp2, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1));
+   }
+   ac_build_endif(&ctx->ac, 5130);
+
+   ac_build_s_barrier(&ctx->ac);
+
+   /* Export primitive data */
+   tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
+   ac_build_ifcc(&ctx->ac, tmp, 5140);
+   {
+      LLVMValueRef flags;
+      struct ac_ngg_prim prim = {};
+      prim.num_vertices = verts_per_prim;
+
+      tmp = ngg_gs_vertex_ptr(ctx, tid);
+      flags = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 0), "");
+      prim.isnull = LLVMBuildNot(builder, LLVMBuildTrunc(builder, flags, ctx->ac.i1, ""), "");
+
+      for (unsigned i = 0; i < verts_per_prim; ++i) {
+         prim.index[i] = LLVMBuildSub(builder, vertlive_scan.result_exclusive,
+                                      LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false), "");
+         prim.edgeflag[i] = ctx->ac.i1false;
+      }
+
+      /* Geometry shaders output triangle strips, but NGG expects triangles. */
+      if (verts_per_prim == 3) {
+         LLVMValueRef is_odd = LLVMBuildLShr(builder, flags, ctx->ac.i8_1, "");
+         is_odd = LLVMBuildTrunc(builder, is_odd, ctx->ac.i1, "");
+         LLVMValueRef flatshade_first = LLVMBuildICmp(
+            builder, LLVMIntEQ, si_unpack_param(ctx, ctx->vs_state_bits, 4, 2), ctx->ac.i32_0, "");
+
+         ac_build_triangle_strip_indices_to_triangle(&ctx->ac, is_odd, flatshade_first, prim.index);
+      }
+
+      ac_build_export_prim(&ctx->ac, &prim);
+   }
+   ac_build_endif(&ctx->ac, 5140);
+
+   /* Export position and parameter data */
+   tmp = LLVMBuildICmp(builder, LLVMIntULT, tid, vertlive_scan.result_reduce, "");
+   ac_build_ifcc(&ctx->ac, tmp, 5145);
+   {
+      struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
+
+      tmp = ngg_gs_vertex_ptr(ctx, tid);
+      tmp = LLVMBuildLoad(builder, ngg_gs_get_emit_primflag_ptr(ctx, tmp, 1), "");
+      tmp = LLVMBuildZExt(builder, tmp, ctx->ac.i32, "");
+      const LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tmp);
+
+      unsigned out_idx = 0;
+      for (unsigned i = 0; i < info->num_outputs; i++) {
+         outputs[i].semantic_name = info->output_semantic_name[i];
+         outputs[i].semantic_index = info->output_semantic_index[i];
+
+         for (unsigned j = 0; j < 4; j++, out_idx++) {
+            tmp = ngg_gs_get_emit_output_ptr(ctx, vertexptr, out_idx);
+            tmp = LLVMBuildLoad(builder, tmp, "");
+            outputs[i].values[j] = ac_to_float(&ctx->ac, tmp);
+            outputs[i].vertex_stream[j] = (info->output_streams[i] >> (2 * j)) & 3;
+         }
+      }
+
+      si_llvm_build_vs_exports(ctx, outputs, info->num_outputs);
+   }
+   ac_build_endif(&ctx->ac, 5145);
 }
 
 static void clamp_gsprims_to_esverts(unsigned *max_gsprims, unsigned max_esverts,
-                                    unsigned min_verts_per_prim, bool use_adjacency)
+                                     unsigned min_verts_per_prim, bool use_adjacency)
 {
-       unsigned max_reuse = max_esverts - min_verts_per_prim;
-       if (use_adjacency)
-               max_reuse /= 2;
-       *max_gsprims = MIN2(*max_gsprims, 1 + max_reuse);
+   unsigned max_reuse = max_esverts - min_verts_per_prim;
+   if (use_adjacency)
+      max_reuse /= 2;
+   *max_gsprims = MIN2(*max_gsprims, 1 + max_reuse);
 }
 
 /**
@@ -1992,172 +1886,165 @@ static void clamp_gsprims_to_esverts(unsigned *max_gsprims, unsigned max_esverts
  */
 void gfx10_ngg_calculate_subgroup_info(struct si_shader *shader)
 {
-       const struct si_shader_selector *gs_sel = shader->selector;
-       const struct si_shader_selector *es_sel =
-               shader->previous_stage_sel ? shader->previous_stage_sel : gs_sel;
-       const enum pipe_shader_type gs_type = gs_sel->type;
-       const unsigned gs_num_invocations = MAX2(gs_sel->gs_num_invocations, 1);
-       const unsigned input_prim = si_get_input_prim(gs_sel);
-       const bool use_adjacency = input_prim >= PIPE_PRIM_LINES_ADJACENCY &&
-                                  input_prim <= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
-       const unsigned max_verts_per_prim = u_vertices_per_prim(input_prim);
-       const unsigned min_verts_per_prim =
-               gs_type == PIPE_SHADER_GEOMETRY ? max_verts_per_prim : 1;
-
-       /* All these are in dwords: */
-       /* We can't allow using the whole LDS, because GS waves compete with
-        * other shader stages for LDS space.
-        *
-        * TODO: We should really take the shader's internal LDS use into
-        *       account. The linker will fail if the size is greater than
-        *       8K dwords.
-        */
-       const unsigned max_lds_size = 8 * 1024 - 768;
-       const unsigned target_lds_size = max_lds_size;
-       unsigned esvert_lds_size = 0;
-       unsigned gsprim_lds_size = 0;
-
-       /* All these are per subgroup: */
-       bool max_vert_out_per_gs_instance = false;
-       unsigned max_gsprims_base = 128; /* default prim group size clamp */
-       unsigned max_esverts_base = 128;
-
-       if (shader->key.opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_TRI_LIST) {
-               max_gsprims_base = 128 / 3;
-               max_esverts_base = max_gsprims_base * 3;
-       } else if (shader->key.opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_TRI_STRIP) {
-               max_gsprims_base = 126;
-               max_esverts_base = 128;
-       }
-
-       /* Hardware has the following non-natural restrictions on the value
-        * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
-        * the draw:
-        *  - at most 252 for any line input primitive type
-        *  - at most 251 for any quad input primitive type
-        *  - at most 251 for triangle strips with adjacency (this happens to
-        *    be the natural limit for triangle *lists* with adjacency)
-        */
-       max_esverts_base = MIN2(max_esverts_base, 251 + max_verts_per_prim - 1);
-
-       if (gs_type == PIPE_SHADER_GEOMETRY) {
-               unsigned max_out_verts_per_gsprim =
-                       gs_sel->gs_max_out_vertices * gs_num_invocations;
-
-               if (max_out_verts_per_gsprim <= 256) {
-                       if (max_out_verts_per_gsprim) {
-                               max_gsprims_base = MIN2(max_gsprims_base,
-                                                       256 / max_out_verts_per_gsprim);
-                       }
-               } else {
-                       /* Use special multi-cycling mode in which each GS
-                        * instance gets its own subgroup. Does not work with
-                        * tessellation. */
-                       max_vert_out_per_gs_instance = true;
-                       max_gsprims_base = 1;
-                       max_out_verts_per_gsprim = gs_sel->gs_max_out_vertices;
-               }
-
-               esvert_lds_size = es_sel->esgs_itemsize / 4;
-               gsprim_lds_size = (gs_sel->gsvs_vertex_size / 4 + 1) * max_out_verts_per_gsprim;
-       } else {
-               /* VS and TES. */
-               /* LDS size for passing data from ES to GS. */
-               esvert_lds_size = ngg_nogs_vertex_size(shader);
-       }
-
-       unsigned max_gsprims = max_gsprims_base;
-       unsigned max_esverts = max_esverts_base;
-
-       if (esvert_lds_size)
-               max_esverts = MIN2(max_esverts, target_lds_size / esvert_lds_size);
-       if (gsprim_lds_size)
-               max_gsprims = MIN2(max_gsprims, target_lds_size / gsprim_lds_size);
-
-       max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
-       clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, use_adjacency);
-       assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
-
-       if (esvert_lds_size || gsprim_lds_size) {
-               /* Now that we have a rough proportionality between esverts
-                * and gsprims based on the primitive type, scale both of them
-                * down simultaneously based on required LDS space.
-                *
-                * We could be smarter about this if we knew how much vertex
-                * reuse to expect.
-                */
-               unsigned lds_total = max_esverts * esvert_lds_size +
-                                    max_gsprims * gsprim_lds_size;
-               if (lds_total > target_lds_size) {
-                       max_esverts = max_esverts * target_lds_size / lds_total;
-                       max_gsprims = max_gsprims * target_lds_size / lds_total;
-
-                       max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
-                       clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
-                                                min_verts_per_prim, use_adjacency);
-                       assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
-               }
-       }
-
-       /* Round up towards full wave sizes for better ALU utilization. */
-       if (!max_vert_out_per_gs_instance) {
-               const unsigned wavesize = gs_sel->screen->ge_wave_size;
-               unsigned orig_max_esverts;
-               unsigned orig_max_gsprims;
-               do {
-                       orig_max_esverts = max_esverts;
-                       orig_max_gsprims = max_gsprims;
-
-                       max_esverts = align(max_esverts, wavesize);
-                       max_esverts = MIN2(max_esverts, max_esverts_base);
-                       if (esvert_lds_size)
-                               max_esverts = MIN2(max_esverts,
-                                                  (max_lds_size - max_gsprims * gsprim_lds_size) /
-                                                  esvert_lds_size);
-                       max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
-
-                       max_gsprims = align(max_gsprims, wavesize);
-                       max_gsprims = MIN2(max_gsprims, max_gsprims_base);
-                       if (gsprim_lds_size)
-                               max_gsprims = MIN2(max_gsprims,
-                                                  (max_lds_size - max_esverts * esvert_lds_size) /
-                                                  gsprim_lds_size);
-                       clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
-                                                min_verts_per_prim, use_adjacency);
-                       assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
-               } while (orig_max_esverts != max_esverts || orig_max_gsprims != max_gsprims);
-       }
-
-       /* Hardware restriction: minimum value of max_esverts */
-       max_esverts = MAX2(max_esverts, 23 + max_verts_per_prim);
-
-       unsigned max_out_vertices =
-               max_vert_out_per_gs_instance ? gs_sel->gs_max_out_vertices :
-               gs_type == PIPE_SHADER_GEOMETRY ?
-               max_gsprims * gs_num_invocations * gs_sel->gs_max_out_vertices :
-               max_esverts;
-       assert(max_out_vertices <= 256);
-
-       unsigned prim_amp_factor = 1;
-       if (gs_type == PIPE_SHADER_GEOMETRY) {
-               /* Number of output primitives per GS input primitive after
-                * GS instancing. */
-               prim_amp_factor = gs_sel->gs_max_out_vertices;
-       }
-
-       /* The GE only checks against the maximum number of ES verts after
-        * allocating a full GS primitive. So we need to ensure that whenever
-        * this check passes, there is enough space for a full primitive without
-        * vertex reuse.
-        */
-       shader->ngg.hw_max_esverts = max_esverts - max_verts_per_prim + 1;
-       shader->ngg.max_gsprims = max_gsprims;
-       shader->ngg.max_out_verts = max_out_vertices;
-       shader->ngg.prim_amp_factor = prim_amp_factor;
-       shader->ngg.max_vert_out_per_gs_instance = max_vert_out_per_gs_instance;
-
-       shader->gs_info.esgs_ring_size = 4 * max_esverts * esvert_lds_size;
-       shader->ngg.ngg_emit_size = max_gsprims * gsprim_lds_size;
-
-       assert(shader->ngg.hw_max_esverts >= 24); /* HW limitation */
+   const struct si_shader_selector *gs_sel = shader->selector;
+   const struct si_shader_selector *es_sel =
+      shader->previous_stage_sel ? shader->previous_stage_sel : gs_sel;
+   const enum pipe_shader_type gs_type = gs_sel->type;
+   const unsigned gs_num_invocations = MAX2(gs_sel->gs_num_invocations, 1);
+   const unsigned input_prim = si_get_input_prim(gs_sel);
+   const bool use_adjacency =
+      input_prim >= PIPE_PRIM_LINES_ADJACENCY && input_prim <= PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
+   const unsigned max_verts_per_prim = u_vertices_per_prim(input_prim);
+   const unsigned min_verts_per_prim = gs_type == PIPE_SHADER_GEOMETRY ? max_verts_per_prim : 1;
+
+   /* All these are in dwords: */
+   /* We can't allow using the whole LDS, because GS waves compete with
+    * other shader stages for LDS space.
+    *
+    * TODO: We should really take the shader's internal LDS use into
+    *       account. The linker will fail if the size is greater than
+    *       8K dwords.
+    */
+   const unsigned max_lds_size = 8 * 1024 - 768;
+   const unsigned target_lds_size = max_lds_size;
+   unsigned esvert_lds_size = 0;
+   unsigned gsprim_lds_size = 0;
+
+   /* All these are per subgroup: */
+   bool max_vert_out_per_gs_instance = false;
+   unsigned max_gsprims_base = 128; /* default prim group size clamp */
+   unsigned max_esverts_base = 128;
+
+   if (shader->key.opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_TRI_LIST) {
+      max_gsprims_base = 128 / 3;
+      max_esverts_base = max_gsprims_base * 3;
+   } else if (shader->key.opt.ngg_culling & SI_NGG_CULL_GS_FAST_LAUNCH_TRI_STRIP) {
+      max_gsprims_base = 126;
+      max_esverts_base = 128;
+   }
+
+   /* Hardware has the following non-natural restrictions on the value
+    * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
+    * the draw:
+    *  - at most 252 for any line input primitive type
+    *  - at most 251 for any quad input primitive type
+    *  - at most 251 for triangle strips with adjacency (this happens to
+    *    be the natural limit for triangle *lists* with adjacency)
+    */
+   max_esverts_base = MIN2(max_esverts_base, 251 + max_verts_per_prim - 1);
+
+   if (gs_type == PIPE_SHADER_GEOMETRY) {
+      unsigned max_out_verts_per_gsprim = gs_sel->gs_max_out_vertices * gs_num_invocations;
+
+      if (max_out_verts_per_gsprim <= 256) {
+         if (max_out_verts_per_gsprim) {
+            max_gsprims_base = MIN2(max_gsprims_base, 256 / max_out_verts_per_gsprim);
+         }
+      } else {
+         /* Use special multi-cycling mode in which each GS
+          * instance gets its own subgroup. Does not work with
+          * tessellation. */
+         max_vert_out_per_gs_instance = true;
+         max_gsprims_base = 1;
+         max_out_verts_per_gsprim = gs_sel->gs_max_out_vertices;
+      }
+
+      esvert_lds_size = es_sel->esgs_itemsize / 4;
+      gsprim_lds_size = (gs_sel->gsvs_vertex_size / 4 + 1) * max_out_verts_per_gsprim;
+   } else {
+      /* VS and TES. */
+      /* LDS size for passing data from ES to GS. */
+      esvert_lds_size = ngg_nogs_vertex_size(shader);
+   }
+
+   unsigned max_gsprims = max_gsprims_base;
+   unsigned max_esverts = max_esverts_base;
+
+   if (esvert_lds_size)
+      max_esverts = MIN2(max_esverts, target_lds_size / esvert_lds_size);
+   if (gsprim_lds_size)
+      max_gsprims = MIN2(max_gsprims, target_lds_size / gsprim_lds_size);
+
+   max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
+   clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, use_adjacency);
+   assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
+
+   if (esvert_lds_size || gsprim_lds_size) {
+      /* Now that we have a rough proportionality between esverts
+       * and gsprims based on the primitive type, scale both of them
+       * down simultaneously based on required LDS space.
+       *
+       * We could be smarter about this if we knew how much vertex
+       * reuse to expect.
+       */
+      unsigned lds_total = max_esverts * esvert_lds_size + max_gsprims * gsprim_lds_size;
+      if (lds_total > target_lds_size) {
+         max_esverts = max_esverts * target_lds_size / lds_total;
+         max_gsprims = max_gsprims * target_lds_size / lds_total;
+
+         max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
+         clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, use_adjacency);
+         assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
+      }
+   }
+
+   /* Round up towards full wave sizes for better ALU utilization. */
+   if (!max_vert_out_per_gs_instance) {
+      const unsigned wavesize = gs_sel->screen->ge_wave_size;
+      unsigned orig_max_esverts;
+      unsigned orig_max_gsprims;
+      do {
+         orig_max_esverts = max_esverts;
+         orig_max_gsprims = max_gsprims;
+
+         max_esverts = align(max_esverts, wavesize);
+         max_esverts = MIN2(max_esverts, max_esverts_base);
+         if (esvert_lds_size)
+            max_esverts =
+               MIN2(max_esverts, (max_lds_size - max_gsprims * gsprim_lds_size) / esvert_lds_size);
+         max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
+
+         max_gsprims = align(max_gsprims, wavesize);
+         max_gsprims = MIN2(max_gsprims, max_gsprims_base);
+         if (gsprim_lds_size)
+            max_gsprims =
+               MIN2(max_gsprims, (max_lds_size - max_esverts * esvert_lds_size) / gsprim_lds_size);
+         clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, use_adjacency);
+         assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
+      } while (orig_max_esverts != max_esverts || orig_max_gsprims != max_gsprims);
+   }
+
+   /* Hardware restriction: minimum value of max_esverts */
+   max_esverts = MAX2(max_esverts, 23 + max_verts_per_prim);
+
+   unsigned max_out_vertices =
+      max_vert_out_per_gs_instance
+         ? gs_sel->gs_max_out_vertices
+         : gs_type == PIPE_SHADER_GEOMETRY
+              ? max_gsprims * gs_num_invocations * gs_sel->gs_max_out_vertices
+              : max_esverts;
+   assert(max_out_vertices <= 256);
+
+   unsigned prim_amp_factor = 1;
+   if (gs_type == PIPE_SHADER_GEOMETRY) {
+      /* Number of output primitives per GS input primitive after
+       * GS instancing. */
+      prim_amp_factor = gs_sel->gs_max_out_vertices;
+   }
+
+   /* The GE only checks against the maximum number of ES verts after
+    * allocating a full GS primitive. So we need to ensure that whenever
+    * this check passes, there is enough space for a full primitive without
+    * vertex reuse.
+    */
+   shader->ngg.hw_max_esverts = max_esverts - max_verts_per_prim + 1;
+   shader->ngg.max_gsprims = max_gsprims;
+   shader->ngg.max_out_verts = max_out_vertices;
+   shader->ngg.prim_amp_factor = prim_amp_factor;
+   shader->ngg.max_vert_out_per_gs_instance = max_vert_out_per_gs_instance;
+
+   shader->gs_info.esgs_ring_size = 4 * max_esverts * esvert_lds_size;
+   shader->ngg.ngg_emit_size = max_gsprims * gsprim_lds_size;
+
+   assert(shader->ngg.hw_max_esverts >= 24); /* HW limitation */
 }
index e662de1612745a769c3ec044a70fd400c1b99af9..ab69c7e4ddda318b902068324cd2db32072b2e8c 100644 (file)
  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include "si_pipe.h"
 #include "si_compute.h"
+#include "si_pipe.h"
 #include "util/format/u_format.h"
 #include "util/u_log.h"
 #include "util/u_surface.h"
 
-enum {
-       SI_COPY          = SI_SAVE_FRAMEBUFFER | SI_SAVE_TEXTURES |
-                          SI_SAVE_FRAGMENT_STATE | SI_DISABLE_RENDER_COND,
+enum
+{
+   SI_COPY =
+      SI_SAVE_FRAMEBUFFER | SI_SAVE_TEXTURES | SI_SAVE_FRAGMENT_STATE | SI_DISABLE_RENDER_COND,
 
-       SI_BLIT          = SI_SAVE_FRAMEBUFFER | SI_SAVE_TEXTURES |
-                          SI_SAVE_FRAGMENT_STATE,
+   SI_BLIT = SI_SAVE_FRAMEBUFFER | SI_SAVE_TEXTURES | SI_SAVE_FRAGMENT_STATE,
 
-       SI_DECOMPRESS    = SI_SAVE_FRAMEBUFFER | SI_SAVE_FRAGMENT_STATE |
-                          SI_DISABLE_RENDER_COND,
+   SI_DECOMPRESS = SI_SAVE_FRAMEBUFFER | SI_SAVE_FRAGMENT_STATE | SI_DISABLE_RENDER_COND,
 
-       SI_COLOR_RESOLVE = SI_SAVE_FRAMEBUFFER | SI_SAVE_FRAGMENT_STATE
+   SI_COLOR_RESOLVE = SI_SAVE_FRAMEBUFFER | SI_SAVE_FRAGMENT_STATE
 };
 
 void si_blitter_begin(struct si_context *sctx, enum si_blitter_op op)
 {
-       util_blitter_save_vertex_shader(sctx->blitter, sctx->vs_shader.cso);
-       util_blitter_save_tessctrl_shader(sctx->blitter, sctx->tcs_shader.cso);
-       util_blitter_save_tesseval_shader(sctx->blitter, sctx->tes_shader.cso);
-       util_blitter_save_geometry_shader(sctx->blitter, sctx->gs_shader.cso);
-       util_blitter_save_so_targets(sctx->blitter, sctx->streamout.num_targets,
-                                    (struct pipe_stream_output_target**)sctx->streamout.targets);
-       util_blitter_save_rasterizer(sctx->blitter, sctx->queued.named.rasterizer);
-
-       if (op & SI_SAVE_FRAGMENT_STATE) {
-               util_blitter_save_blend(sctx->blitter, sctx->queued.named.blend);
-               util_blitter_save_depth_stencil_alpha(sctx->blitter, sctx->queued.named.dsa);
-               util_blitter_save_stencil_ref(sctx->blitter, &sctx->stencil_ref.state);
-               util_blitter_save_fragment_shader(sctx->blitter, sctx->ps_shader.cso);
-               util_blitter_save_sample_mask(sctx->blitter, sctx->sample_mask);
-               util_blitter_save_scissor(sctx->blitter, &sctx->scissors[0]);
-               util_blitter_save_window_rectangles(sctx->blitter,
-                                                   sctx->window_rectangles_include,
-                                                   sctx->num_window_rectangles,
-                                                   sctx->window_rectangles);
-       }
-
-       if (op & SI_SAVE_FRAMEBUFFER)
-               util_blitter_save_framebuffer(sctx->blitter, &sctx->framebuffer.state);
-
-       if (op & SI_SAVE_TEXTURES) {
-               util_blitter_save_fragment_sampler_states(
-                       sctx->blitter, 2,
-                       (void**)sctx->samplers[PIPE_SHADER_FRAGMENT].sampler_states);
-
-               util_blitter_save_fragment_sampler_views(sctx->blitter, 2,
-                       sctx->samplers[PIPE_SHADER_FRAGMENT].views);
-       }
-
-       if (op & SI_DISABLE_RENDER_COND)
-               sctx->render_cond_force_off = true;
-
-       if (sctx->screen->dpbb_allowed) {
-               sctx->dpbb_force_off = true;
-               si_mark_atom_dirty(sctx, &sctx->atoms.s.dpbb_state);
-       }
+   util_blitter_save_vertex_shader(sctx->blitter, sctx->vs_shader.cso);
+   util_blitter_save_tessctrl_shader(sctx->blitter, sctx->tcs_shader.cso);
+   util_blitter_save_tesseval_shader(sctx->blitter, sctx->tes_shader.cso);
+   util_blitter_save_geometry_shader(sctx->blitter, sctx->gs_shader.cso);
+   util_blitter_save_so_targets(sctx->blitter, sctx->streamout.num_targets,
+                                (struct pipe_stream_output_target **)sctx->streamout.targets);
+   util_blitter_save_rasterizer(sctx->blitter, sctx->queued.named.rasterizer);
+
+   if (op & SI_SAVE_FRAGMENT_STATE) {
+      util_blitter_save_blend(sctx->blitter, sctx->queued.named.blend);
+      util_blitter_save_depth_stencil_alpha(sctx->blitter, sctx->queued.named.dsa);
+      util_blitter_save_stencil_ref(sctx->blitter, &sctx->stencil_ref.state);
+      util_blitter_save_fragment_shader(sctx->blitter, sctx->ps_shader.cso);
+      util_blitter_save_sample_mask(sctx->blitter, sctx->sample_mask);
+      util_blitter_save_scissor(sctx->blitter, &sctx->scissors[0]);
+      util_blitter_save_window_rectangles(sctx->blitter, sctx->window_rectangles_include,
+                                          sctx->num_window_rectangles, sctx->window_rectangles);
+   }
+
+   if (op & SI_SAVE_FRAMEBUFFER)
+      util_blitter_save_framebuffer(sctx->blitter, &sctx->framebuffer.state);
+
+   if (op & SI_SAVE_TEXTURES) {
+      util_blitter_save_fragment_sampler_states(
+         sctx->blitter, 2, (void **)sctx->samplers[PIPE_SHADER_FRAGMENT].sampler_states);
+
+      util_blitter_save_fragment_sampler_views(sctx->blitter, 2,
+                                               sctx->samplers[PIPE_SHADER_FRAGMENT].views);
+   }
+
+   if (op & SI_DISABLE_RENDER_COND)
+      sctx->render_cond_force_off = true;
+
+   if (sctx->screen->dpbb_allowed) {
+      sctx->dpbb_force_off = true;
+      si_mark_atom_dirty(sctx, &sctx->atoms.s.dpbb_state);
+   }
 }
 
 void si_blitter_end(struct si_context *sctx)
 {
-       if (sctx->screen->dpbb_allowed) {
-               sctx->dpbb_force_off = false;
-               si_mark_atom_dirty(sctx, &sctx->atoms.s.dpbb_state);
-       }
-
-       sctx->render_cond_force_off = false;
-
-       /* Restore shader pointers because the VS blit shader changed all
-        * non-global VS user SGPRs. */
-       sctx->shader_pointers_dirty |= SI_DESCS_SHADER_MASK(VERTEX);
-       sctx->vertex_buffer_pointer_dirty = sctx->vb_descriptors_buffer != NULL;
-       sctx->vertex_buffer_user_sgprs_dirty = sctx->num_vertex_elements > 0;
-       si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
+   if (sctx->screen->dpbb_allowed) {
+      sctx->dpbb_force_off = false;
+      si_mark_atom_dirty(sctx, &sctx->atoms.s.dpbb_state);
+   }
+
+   sctx->render_cond_force_off = false;
+
+   /* Restore shader pointers because the VS blit shader changed all
+    * non-global VS user SGPRs. */
+   sctx->shader_pointers_dirty |= SI_DESCS_SHADER_MASK(VERTEX);
+   sctx->vertex_buffer_pointer_dirty = sctx->vb_descriptors_buffer != NULL;
+   sctx->vertex_buffer_user_sgprs_dirty = sctx->num_vertex_elements > 0;
+   si_mark_atom_dirty(sctx, &sctx->atoms.s.shader_pointers);
 }
 
 static unsigned u_max_sample(struct pipe_resource *r)
 {
-       return r->nr_samples ? r->nr_samples - 1 : 0;
+   return r->nr_samples ? r->nr_samples - 1 : 0;
 }
 
-static unsigned
-si_blit_dbcb_copy(struct si_context *sctx,
-                 struct si_texture *src,
-                 struct si_texture *dst,
-                 unsigned planes, unsigned level_mask,
-                 unsigned first_layer, unsigned last_layer,
-                 unsigned first_sample, unsigned last_sample)
+static unsigned si_blit_dbcb_copy(struct si_context *sctx, struct si_texture *src,
+                                  struct si_texture *dst, unsigned planes, unsigned level_mask,
+                                  unsigned first_layer, unsigned last_layer, unsigned first_sample,
+                                  unsigned last_sample)
 {
-       struct pipe_surface surf_tmpl = {{0}};
-       unsigned layer, sample, checked_last_layer, max_layer;
-       unsigned fully_copied_levels = 0;
+   struct pipe_surface surf_tmpl = {{0}};
+   unsigned layer, sample, checked_last_layer, max_layer;
+   unsigned fully_copied_levels = 0;
 
-       if (planes & PIPE_MASK_Z)
-               sctx->dbcb_depth_copy_enabled = true;
-       if (planes & PIPE_MASK_S)
-               sctx->dbcb_stencil_copy_enabled = true;
-       si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
+   if (planes & PIPE_MASK_Z)
+      sctx->dbcb_depth_copy_enabled = true;
+   if (planes & PIPE_MASK_S)
+      sctx->dbcb_stencil_copy_enabled = true;
+   si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
 
-       assert(sctx->dbcb_depth_copy_enabled || sctx->dbcb_stencil_copy_enabled);
+   assert(sctx->dbcb_depth_copy_enabled || sctx->dbcb_stencil_copy_enabled);
 
-       sctx->decompression_enabled = true;
+   sctx->decompression_enabled = true;
 
-       while (level_mask) {
-               unsigned level = u_bit_scan(&level_mask);
+   while (level_mask) {
+      unsigned level = u_bit_scan(&level_mask);
 
-               /* The smaller the mipmap level, the less layers there are
-                * as far as 3D textures are concerned. */
-               max_layer = util_max_layer(&src->buffer.b.b, level);
-               checked_last_layer = MIN2(last_layer, max_layer);
+      /* The smaller the mipmap level, the less layers there are
+       * as far as 3D textures are concerned. */
+      max_layer = util_max_layer(&src->buffer.b.b, level);
+      checked_last_layer = MIN2(last_layer, max_layer);
 
-               surf_tmpl.u.tex.level = level;
+      surf_tmpl.u.tex.level = level;
 
-               for (layer = first_layer; layer <= checked_last_layer; layer++) {
-                       struct pipe_surface *zsurf, *cbsurf;
+      for (layer = first_layer; layer <= checked_last_layer; layer++) {
+         struct pipe_surface *zsurf, *cbsurf;
 
-                       surf_tmpl.format = src->buffer.b.b.format;
-                       surf_tmpl.u.tex.first_layer = layer;
-                       surf_tmpl.u.tex.last_layer = layer;
+         surf_tmpl.format = src->buffer.b.b.format;
+         surf_tmpl.u.tex.first_layer = layer;
+         surf_tmpl.u.tex.last_layer = layer;
 
-                       zsurf = sctx->b.create_surface(&sctx->b, &src->buffer.b.b, &surf_tmpl);
+         zsurf = sctx->b.create_surface(&sctx->b, &src->buffer.b.b, &surf_tmpl);
 
-                       surf_tmpl.format = dst->buffer.b.b.format;
-                       cbsurf = sctx->b.create_surface(&sctx->b, &dst->buffer.b.b, &surf_tmpl);
+         surf_tmpl.format = dst->buffer.b.b.format;
+         cbsurf = sctx->b.create_surface(&sctx->b, &dst->buffer.b.b, &surf_tmpl);
 
-                       for (sample = first_sample; sample <= last_sample; sample++) {
-                               if (sample != sctx->dbcb_copy_sample) {
-                                       sctx->dbcb_copy_sample = sample;
-                                       si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
-                               }
+         for (sample = first_sample; sample <= last_sample; sample++) {
+            if (sample != sctx->dbcb_copy_sample) {
+               sctx->dbcb_copy_sample = sample;
+               si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
+            }
 
-                               si_blitter_begin(sctx, SI_DECOMPRESS);
-                               util_blitter_custom_depth_stencil(sctx->blitter, zsurf, cbsurf, 1 << sample,
-                                                                 sctx->custom_dsa_flush, 1.0f);
-                               si_blitter_end(sctx);
-                       }
+            si_blitter_begin(sctx, SI_DECOMPRESS);
+            util_blitter_custom_depth_stencil(sctx->blitter, zsurf, cbsurf, 1 << sample,
+                                              sctx->custom_dsa_flush, 1.0f);
+            si_blitter_end(sctx);
+         }
 
-                       pipe_surface_reference(&zsurf, NULL);
-                       pipe_surface_reference(&cbsurf, NULL);
-               }
+         pipe_surface_reference(&zsurf, NULL);
+         pipe_surface_reference(&cbsurf, NULL);
+      }
 
-               if (first_layer == 0 && last_layer >= max_layer &&
-                   first_sample == 0 && last_sample >= u_max_sample(&src->buffer.b.b))
-                       fully_copied_levels |= 1u << level;
-       }
+      if (first_layer == 0 && last_layer >= max_layer && first_sample == 0 &&
+          last_sample >= u_max_sample(&src->buffer.b.b))
+         fully_copied_levels |= 1u << level;
+   }
 
-       sctx->decompression_enabled = false;
-       sctx->dbcb_depth_copy_enabled = false;
-       sctx->dbcb_stencil_copy_enabled = false;
-       si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
+   sctx->decompression_enabled = false;
+   sctx->dbcb_depth_copy_enabled = false;
+   sctx->dbcb_stencil_copy_enabled = false;
+   si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
 
-       return fully_copied_levels;
+   return fully_copied_levels;
 }
 
 /* Helper function for si_blit_decompress_zs_in_place.
  */
-static void
-si_blit_decompress_zs_planes_in_place(struct si_context *sctx,
-                                     struct si_texture *texture,
-                                     unsigned planes, unsigned level_mask,
-                                     unsigned first_layer, unsigned last_layer)
+static void si_blit_decompress_zs_planes_in_place(struct si_context *sctx,
+                                                  struct si_texture *texture, unsigned planes,
+                                                  unsigned level_mask, unsigned first_layer,
+                                                  unsigned last_layer)
 {
-       struct pipe_surface *zsurf, surf_tmpl = {{0}};
-       unsigned layer, max_layer, checked_last_layer;
-       unsigned fully_decompressed_mask = 0;
+   struct pipe_surface *zsurf, surf_tmpl = {{0}};
+   unsigned layer, max_layer, checked_last_layer;
+   unsigned fully_decompressed_mask = 0;
 
-       if (!level_mask)
-               return;
+   if (!level_mask)
+      return;
 
-       if (planes & PIPE_MASK_S)
-               sctx->db_flush_stencil_inplace = true;
-       if (planes & PIPE_MASK_Z)
-               sctx->db_flush_depth_inplace = true;
-       si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
+   if (planes & PIPE_MASK_S)
+      sctx->db_flush_stencil_inplace = true;
+   if (planes & PIPE_MASK_Z)
+      sctx->db_flush_depth_inplace = true;
+   si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
 
-       surf_tmpl.format = texture->buffer.b.b.format;
+   surf_tmpl.format = texture->buffer.b.b.format;
 
-       sctx->decompression_enabled = true;
+   sctx->decompression_enabled = true;
 
-       while (level_mask) {
-               unsigned level = u_bit_scan(&level_mask);
+   while (level_mask) {
+      unsigned level = u_bit_scan(&level_mask);
 
-               surf_tmpl.u.tex.level = level;
+      surf_tmpl.u.tex.level = level;
 
-               /* The smaller the mipmap level, the less layers there are
-                * as far as 3D textures are concerned. */
-               max_layer = util_max_layer(&texture->buffer.b.b, level);
-               checked_last_layer = MIN2(last_layer, max_layer);
+      /* The smaller the mipmap level, the less layers there are
+       * as far as 3D textures are concerned. */
+      max_layer = util_max_layer(&texture->buffer.b.b, level);
+      checked_last_layer = MIN2(last_layer, max_layer);
 
-               for (layer = first_layer; layer <= checked_last_layer; layer++) {
-                       surf_tmpl.u.tex.first_layer = layer;
-                       surf_tmpl.u.tex.last_layer = layer;
+      for (layer = first_layer; layer <= checked_last_layer; layer++) {
+         surf_tmpl.u.tex.first_layer = layer;
+         surf_tmpl.u.tex.last_layer = layer;
 
-                       zsurf = sctx->b.create_surface(&sctx->b, &texture->buffer.b.b, &surf_tmpl);
+         zsurf = sctx->b.create_surface(&sctx->b, &texture->buffer.b.b, &surf_tmpl);
 
-                       si_blitter_begin(sctx, SI_DECOMPRESS);
-                       util_blitter_custom_depth_stencil(sctx->blitter, zsurf, NULL, ~0,
-                                                         sctx->custom_dsa_flush,
-                                                         1.0f);
-                       si_blitter_end(sctx);
+         si_blitter_begin(sctx, SI_DECOMPRESS);
+         util_blitter_custom_depth_stencil(sctx->blitter, zsurf, NULL, ~0, sctx->custom_dsa_flush,
+                                           1.0f);
+         si_blitter_end(sctx);
 
-                       pipe_surface_reference(&zsurf, NULL);
-               }
+         pipe_surface_reference(&zsurf, NULL);
+      }
 
-               /* The texture will always be dirty if some layers aren't flushed.
-                * I don't think this case occurs often though. */
-               if (first_layer == 0 && last_layer >= max_layer) {
-                       fully_decompressed_mask |= 1u << level;
-               }
-       }
+      /* The texture will always be dirty if some layers aren't flushed.
+       * I don't think this case occurs often though. */
+      if (first_layer == 0 && last_layer >= max_layer) {
+         fully_decompressed_mask |= 1u << level;
+      }
+   }
 
-       if (planes & PIPE_MASK_Z)
-               texture->dirty_level_mask &= ~fully_decompressed_mask;
-       if (planes & PIPE_MASK_S)
-               texture->stencil_dirty_level_mask &= ~fully_decompressed_mask;
+   if (planes & PIPE_MASK_Z)
+      texture->dirty_level_mask &= ~fully_decompressed_mask;
+   if (planes & PIPE_MASK_S)
+      texture->stencil_dirty_level_mask &= ~fully_decompressed_mask;
 
-       sctx->decompression_enabled = false;
-       sctx->db_flush_depth_inplace = false;
-       sctx->db_flush_stencil_inplace = false;
-       si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
+   sctx->decompression_enabled = false;
+   sctx->db_flush_depth_inplace = false;
+   sctx->db_flush_stencil_inplace = false;
+   si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
 }
 
 /* Helper function of si_flush_depth_texture: decompress the given levels
  * of Z and/or S planes in place.
  */
-static void
-si_blit_decompress_zs_in_place(struct si_context *sctx,
-                              struct si_texture *texture,
-                              unsigned levels_z, unsigned levels_s,
-                              unsigned first_layer, unsigned last_layer)
+static void si_blit_decompress_zs_in_place(struct si_context *sctx, struct si_texture *texture,
+                                           unsigned levels_z, unsigned levels_s,
+                                           unsigned first_layer, unsigned last_layer)
 {
-       unsigned both = levels_z & levels_s;
-
-       /* First, do combined Z & S decompresses for levels that need it. */
-       if (both) {
-               si_blit_decompress_zs_planes_in_place(
-                               sctx, texture, PIPE_MASK_Z | PIPE_MASK_S,
-                               both,
-                               first_layer, last_layer);
-               levels_z &= ~both;
-               levels_s &= ~both;
-       }
-
-       /* Now do separate Z and S decompresses. */
-       if (levels_z) {
-               si_blit_decompress_zs_planes_in_place(
-                               sctx, texture, PIPE_MASK_Z,
-                               levels_z,
-                               first_layer, last_layer);
-       }
-
-       if (levels_s) {
-               si_blit_decompress_zs_planes_in_place(
-                               sctx, texture, PIPE_MASK_S,
-                               levels_s,
-                               first_layer, last_layer);
-       }
+   unsigned both = levels_z & levels_s;
+
+   /* First, do combined Z & S decompresses for levels that need it. */
+   if (both) {
+      si_blit_decompress_zs_planes_in_place(sctx, texture, PIPE_MASK_Z | PIPE_MASK_S, both,
+                                            first_layer, last_layer);
+      levels_z &= ~both;
+      levels_s &= ~both;
+   }
+
+   /* Now do separate Z and S decompresses. */
+   if (levels_z) {
+      si_blit_decompress_zs_planes_in_place(sctx, texture, PIPE_MASK_Z, levels_z, first_layer,
+                                            last_layer);
+   }
+
+   if (levels_s) {
+      si_blit_decompress_zs_planes_in_place(sctx, texture, PIPE_MASK_S, levels_s, first_layer,
+                                            last_layer);
+   }
 }
 
-static void
-si_decompress_depth(struct si_context *sctx,
-                   struct si_texture *tex,
-                   unsigned required_planes,
-                   unsigned first_level, unsigned last_level,
-                   unsigned first_layer, unsigned last_layer)
+static void si_decompress_depth(struct si_context *sctx, struct si_texture *tex,
+                                unsigned required_planes, unsigned first_level, unsigned last_level,
+                                unsigned first_layer, unsigned last_layer)
 {
-       unsigned inplace_planes = 0;
-       unsigned copy_planes = 0;
-       unsigned level_mask = u_bit_consecutive(first_level, last_level - first_level + 1);
-       unsigned levels_z = 0;
-       unsigned levels_s = 0;
-
-       if (required_planes & PIPE_MASK_Z) {
-               levels_z = level_mask & tex->dirty_level_mask;
-
-               if (levels_z) {
-                       if (si_can_sample_zs(tex, false))
-                               inplace_planes |= PIPE_MASK_Z;
-                       else
-                               copy_planes |= PIPE_MASK_Z;
-               }
-       }
-       if (required_planes & PIPE_MASK_S) {
-               levels_s = level_mask & tex->stencil_dirty_level_mask;
-
-               if (levels_s) {
-                       if (si_can_sample_zs(tex, true))
-                               inplace_planes |= PIPE_MASK_S;
-                       else
-                               copy_planes |= PIPE_MASK_S;
-               }
-       }
-
-       if (unlikely(sctx->log))
-               u_log_printf(sctx->log,
-                            "\n------------------------------------------------\n"
-                            "Decompress Depth (levels %u - %u, levels Z: 0x%x S: 0x%x)\n\n",
-                            first_level, last_level, levels_z, levels_s);
-
-       /* We may have to allocate the flushed texture here when called from
-        * si_decompress_subresource.
-        */
-       if (copy_planes &&
-           (tex->flushed_depth_texture ||
-            si_init_flushed_depth_texture(&sctx->b, &tex->buffer.b.b))) {
-               struct si_texture *dst = tex->flushed_depth_texture;
-               unsigned fully_copied_levels;
-               unsigned levels = 0;
-
-               assert(tex->flushed_depth_texture);
-
-               if (util_format_is_depth_and_stencil(dst->buffer.b.b.format))
-                       copy_planes = PIPE_MASK_Z | PIPE_MASK_S;
-
-               if (copy_planes & PIPE_MASK_Z) {
-                       levels |= levels_z;
-                       levels_z = 0;
-               }
-               if (copy_planes & PIPE_MASK_S) {
-                       levels |= levels_s;
-                       levels_s = 0;
-               }
-
-               fully_copied_levels = si_blit_dbcb_copy(
-                       sctx, tex, dst, copy_planes, levels,
-                       first_layer, last_layer,
-                       0, u_max_sample(&tex->buffer.b.b));
-
-               if (copy_planes & PIPE_MASK_Z)
-                       tex->dirty_level_mask &= ~fully_copied_levels;
-               if (copy_planes & PIPE_MASK_S)
-                       tex->stencil_dirty_level_mask &= ~fully_copied_levels;
-       }
-
-       if (inplace_planes) {
-               bool has_htile = si_htile_enabled(tex, first_level, inplace_planes);
-               bool tc_compat_htile = vi_tc_compat_htile_enabled(tex, first_level,
-                                                                 inplace_planes);
-
-               /* Don't decompress if there is no HTILE or when HTILE is
-                * TC-compatible. */
-               if (has_htile && !tc_compat_htile) {
-                       si_blit_decompress_zs_in_place(
-                                               sctx, tex,
-                                               levels_z, levels_s,
-                                               first_layer, last_layer);
-               } else {
-                       /* This is only a cache flush.
-                        *
-                        * Only clear the mask that we are flushing, because
-                        * si_make_DB_shader_coherent() treats different levels
-                        * and depth and stencil differently.
-                        */
-                       if (inplace_planes & PIPE_MASK_Z)
-                               tex->dirty_level_mask &= ~levels_z;
-                       if (inplace_planes & PIPE_MASK_S)
-                               tex->stencil_dirty_level_mask &= ~levels_s;
-               }
-
-               /* Only in-place decompression needs to flush DB caches, or
-                * when we don't decompress but TC-compatible planes are dirty.
-                */
-               si_make_DB_shader_coherent(sctx, tex->buffer.b.b.nr_samples,
-                                          inplace_planes & PIPE_MASK_S,
-                                          tc_compat_htile);
-       }
-       /* set_framebuffer_state takes care of coherency for single-sample.
-        * The DB->CB copy uses CB for the final writes.
-        */
-       if (copy_planes && tex->buffer.b.b.nr_samples > 1)
-               si_make_CB_shader_coherent(sctx, tex->buffer.b.b.nr_samples,
-                                          false, true /* no DCC */);
+   unsigned inplace_planes = 0;
+   unsigned copy_planes = 0;
+   unsigned level_mask = u_bit_consecutive(first_level, last_level - first_level + 1);
+   unsigned levels_z = 0;
+   unsigned levels_s = 0;
+
+   if (required_planes & PIPE_MASK_Z) {
+      levels_z = level_mask & tex->dirty_level_mask;
+
+      if (levels_z) {
+         if (si_can_sample_zs(tex, false))
+            inplace_planes |= PIPE_MASK_Z;
+         else
+            copy_planes |= PIPE_MASK_Z;
+      }
+   }
+   if (required_planes & PIPE_MASK_S) {
+      levels_s = level_mask & tex->stencil_dirty_level_mask;
+
+      if (levels_s) {
+         if (si_can_sample_zs(tex, true))
+            inplace_planes |= PIPE_MASK_S;
+         else
+            copy_planes |= PIPE_MASK_S;
+      }
+   }
+
+   if (unlikely(sctx->log))
+      u_log_printf(sctx->log,
+                   "\n------------------------------------------------\n"
+                   "Decompress Depth (levels %u - %u, levels Z: 0x%x S: 0x%x)\n\n",
+                   first_level, last_level, levels_z, levels_s);
+
+   /* We may have to allocate the flushed texture here when called from
+    * si_decompress_subresource.
+    */
+   if (copy_planes &&
+       (tex->flushed_depth_texture || si_init_flushed_depth_texture(&sctx->b, &tex->buffer.b.b))) {
+      struct si_texture *dst = tex->flushed_depth_texture;
+      unsigned fully_copied_levels;
+      unsigned levels = 0;
+
+      assert(tex->flushed_depth_texture);
+
+      if (util_format_is_depth_and_stencil(dst->buffer.b.b.format))
+         copy_planes = PIPE_MASK_Z | PIPE_MASK_S;
+
+      if (copy_planes & PIPE_MASK_Z) {
+         levels |= levels_z;
+         levels_z = 0;
+      }
+      if (copy_planes & PIPE_MASK_S) {
+         levels |= levels_s;
+         levels_s = 0;
+      }
+
+      fully_copied_levels = si_blit_dbcb_copy(sctx, tex, dst, copy_planes, levels, first_layer,
+                                              last_layer, 0, u_max_sample(&tex->buffer.b.b));
+
+      if (copy_planes & PIPE_MASK_Z)
+         tex->dirty_level_mask &= ~fully_copied_levels;
+      if (copy_planes & PIPE_MASK_S)
+         tex->stencil_dirty_level_mask &= ~fully_copied_levels;
+   }
+
+   if (inplace_planes) {
+      bool has_htile = si_htile_enabled(tex, first_level, inplace_planes);
+      bool tc_compat_htile = vi_tc_compat_htile_enabled(tex, first_level, inplace_planes);
+
+      /* Don't decompress if there is no HTILE or when HTILE is
+       * TC-compatible. */
+      if (has_htile && !tc_compat_htile) {
+         si_blit_decompress_zs_in_place(sctx, tex, levels_z, levels_s, first_layer, last_layer);
+      } else {
+         /* This is only a cache flush.
+          *
+          * Only clear the mask that we are flushing, because
+          * si_make_DB_shader_coherent() treats different levels
+          * and depth and stencil differently.
+          */
+         if (inplace_planes & PIPE_MASK_Z)
+            tex->dirty_level_mask &= ~levels_z;
+         if (inplace_planes & PIPE_MASK_S)
+            tex->stencil_dirty_level_mask &= ~levels_s;
+      }
+
+      /* Only in-place decompression needs to flush DB caches, or
+       * when we don't decompress but TC-compatible planes are dirty.
+       */
+      si_make_DB_shader_coherent(sctx, tex->buffer.b.b.nr_samples, inplace_planes & PIPE_MASK_S,
+                                 tc_compat_htile);
+   }
+   /* set_framebuffer_state takes care of coherency for single-sample.
+    * The DB->CB copy uses CB for the final writes.
+    */
+   if (copy_planes && tex->buffer.b.b.nr_samples > 1)
+      si_make_CB_shader_coherent(sctx, tex->buffer.b.b.nr_samples, false, true /* no DCC */);
 }
 
-static void
-si_decompress_sampler_depth_textures(struct si_context *sctx,
-                                    struct si_samplers *textures)
+static void si_decompress_sampler_depth_textures(struct si_context *sctx,
+                                                 struct si_samplers *textures)
 {
-       unsigned i;
-       unsigned mask = textures->needs_depth_decompress_mask;
+   unsigned i;
+   unsigned mask = textures->needs_depth_decompress_mask;
 
-       while (mask) {
-               struct pipe_sampler_view *view;
-               struct si_sampler_view *sview;
-               struct si_texture *tex;
+   while (mask) {
+      struct pipe_sampler_view *view;
+      struct si_sampler_view *sview;
+      struct si_texture *tex;
 
-               i = u_bit_scan(&mask);
+      i = u_bit_scan(&mask);
 
-               view = textures->views[i];
-               assert(view);
-               sview = (struct si_sampler_view*)view;
+      view = textures->views[i];
+      assert(view);
+      sview = (struct si_sampler_view *)view;
 
-               tex = (struct si_texture *)view->texture;
-               assert(tex->db_compatible);
+      tex = (struct si_texture *)view->texture;
+      assert(tex->db_compatible);
 
-               si_decompress_depth(sctx, tex,
-                                   sview->is_stencil_sampler ? PIPE_MASK_S : PIPE_MASK_Z,
-                                   view->u.tex.first_level, view->u.tex.last_level,
-                                   0, util_max_layer(&tex->buffer.b.b, view->u.tex.first_level));
-       }
+      si_decompress_depth(sctx, tex, sview->is_stencil_sampler ? PIPE_MASK_S : PIPE_MASK_Z,
+                          view->u.tex.first_level, view->u.tex.last_level, 0,
+                          util_max_layer(&tex->buffer.b.b, view->u.tex.first_level));
+   }
 }
 
-static void si_blit_decompress_color(struct si_context *sctx,
-                                    struct si_texture *tex,
-                                    unsigned first_level, unsigned last_level,
-                                    unsigned first_layer, unsigned last_layer,
-                                    bool need_dcc_decompress,
-                                    bool need_fmask_expand)
+static void si_blit_decompress_color(struct si_context *sctx, struct si_texture *tex,
+                                     unsigned first_level, unsigned last_level,
+                                     unsigned first_layer, unsigned last_layer,
+                                     bool need_dcc_decompress, bool need_fmask_expand)
 {
-       void* custom_blend;
-       unsigned layer, checked_last_layer, max_layer;
-       unsigned level_mask =
-               u_bit_consecutive(first_level, last_level - first_level + 1);
-
-       if (!need_dcc_decompress)
-               level_mask &= tex->dirty_level_mask;
-       if (!level_mask)
-               goto expand_fmask;
-
-       if (unlikely(sctx->log))
-               u_log_printf(sctx->log,
-                            "\n------------------------------------------------\n"
-                            "Decompress Color (levels %u - %u, mask 0x%x)\n\n",
-                            first_level, last_level, level_mask);
-
-       if (need_dcc_decompress) {
-               custom_blend = sctx->custom_blend_dcc_decompress;
-
-               assert(tex->surface.dcc_offset);
-
-               /* disable levels without DCC */
-               for (int i = first_level; i <= last_level; i++) {
-                       if (!vi_dcc_enabled(tex, i))
-                               level_mask &= ~(1 << i);
-               }
-       } else if (tex->surface.fmask_size) {
-               custom_blend = sctx->custom_blend_fmask_decompress;
-       } else {
-               custom_blend = sctx->custom_blend_eliminate_fastclear;
-       }
-
-       sctx->decompression_enabled = true;
-
-       while (level_mask) {
-               unsigned level = u_bit_scan(&level_mask);
-
-               /* The smaller the mipmap level, the less layers there are
-                * as far as 3D textures are concerned. */
-               max_layer = util_max_layer(&tex->buffer.b.b, level);
-               checked_last_layer = MIN2(last_layer, max_layer);
-
-               for (layer = first_layer; layer <= checked_last_layer; layer++) {
-                       struct pipe_surface *cbsurf, surf_tmpl;
-
-                       surf_tmpl.format = tex->buffer.b.b.format;
-                       surf_tmpl.u.tex.level = level;
-                       surf_tmpl.u.tex.first_layer = layer;
-                       surf_tmpl.u.tex.last_layer = layer;
-                       cbsurf = sctx->b.create_surface(&sctx->b, &tex->buffer.b.b, &surf_tmpl);
-
-                       /* Required before and after FMASK and DCC_DECOMPRESS. */
-                       if (custom_blend == sctx->custom_blend_fmask_decompress ||
-                           custom_blend == sctx->custom_blend_dcc_decompress)
-                               sctx->flags |= SI_CONTEXT_FLUSH_AND_INV_CB;
-
-                       si_blitter_begin(sctx, SI_DECOMPRESS);
-