radv: hardcode the number of waves for the GFX6 LS-HS bug
[mesa.git] / src / amd / vulkan / radv_nir_to_llvm.c
index 1c1633a51e77486e391b88bd4f606c163939f76c..5b4ccc60e7237ea19d5b0d9089d27c3fad55684f 100644 (file)
@@ -48,7 +48,8 @@
 struct radv_shader_context {
        struct ac_llvm_context ac;
        const struct radv_nir_compiler_options *options;
-       struct radv_shader_variant_info *shader_info;
+       struct radv_shader_info *shader_info;
+       const struct nir_shader *shader;
        struct ac_shader_abi abi;
 
        unsigned max_workgroup_size;
@@ -104,18 +105,9 @@ struct radv_shader_context {
        LLVMValueRef gs_generated_prims[4];
        LLVMValueRef gs_ngg_emit;
        LLVMValueRef gs_ngg_scratch;
-       unsigned gs_max_out_vertices;
-       unsigned gs_output_prim;
 
-       unsigned tes_primitive_mode;
-
-       uint32_t tcs_patch_outputs_read;
-       uint64_t tcs_outputs_read;
-       uint32_t tcs_vertices_per_patch;
        uint32_t tcs_num_inputs;
        uint32_t tcs_num_patches;
-       uint32_t max_gsvs_emit_size;
-       uint32_t gsvs_vertex_size;
 
        LLVMValueRef vertexptr; /* GFX10 only */
 };
@@ -159,13 +151,13 @@ static unsigned
 get_tcs_num_patches(struct radv_shader_context *ctx)
 {
        unsigned num_tcs_input_cp = ctx->options->key.tcs.input_vertices;
-       unsigned num_tcs_output_cp = ctx->tcs_vertices_per_patch;
+       unsigned num_tcs_output_cp = ctx->shader->info.tess.tcs_vertices_out;
        uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
        uint32_t input_patch_size = ctx->options->key.tcs.input_vertices * input_vertex_size;
-       uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
-       uint32_t num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->info.tcs.patch_outputs_written);
+       uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->tcs.outputs_written);
+       uint32_t num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->tcs.patch_outputs_written);
        uint32_t output_vertex_size = num_tcs_outputs * 16;
-       uint32_t pervertex_output_patch_size = ctx->tcs_vertices_per_patch * output_vertex_size;
+       uint32_t pervertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
        uint32_t output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
        unsigned num_patches;
        unsigned hardware_lds_size;
@@ -198,7 +190,7 @@ get_tcs_num_patches(struct radv_shader_context *ctx)
 
        /* GFX6 bug workaround - limit LS-HS threadgroups to only one wave. */
        if (ctx->options->chip_class == GFX6) {
-               unsigned one_wave = ctx->options->wave_size / MAX2(num_tcs_input_cp, num_tcs_output_cp);
+               unsigned one_wave = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp);
                num_patches = MIN2(num_patches, one_wave);
        }
        return num_patches;
@@ -217,9 +209,9 @@ calculate_tess_lds_size(struct radv_shader_context *ctx)
        unsigned num_patches;
        unsigned lds_size;
 
-       num_tcs_output_cp = ctx->tcs_vertices_per_patch;
-       num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
-       num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->info.tcs.patch_outputs_written);
+       num_tcs_output_cp = ctx->shader->info.tess.tcs_vertices_out;
+       num_tcs_outputs = util_last_bit64(ctx->shader_info->tcs.outputs_written);
+       num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->tcs.patch_outputs_written);
 
        input_vertex_size = ctx->tcs_num_inputs * 16;
        output_vertex_size = num_tcs_outputs * 16;
@@ -270,10 +262,10 @@ get_tcs_in_patch_stride(struct radv_shader_context *ctx)
 static LLVMValueRef
 get_tcs_out_patch_stride(struct radv_shader_context *ctx)
 {
-       uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
-       uint32_t num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->info.tcs.patch_outputs_written);
+       uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->tcs.outputs_written);
+       uint32_t num_tcs_patch_outputs = util_last_bit64(ctx->shader_info->tcs.patch_outputs_written);
        uint32_t output_vertex_size = num_tcs_outputs * 16;
-       uint32_t pervertex_output_patch_size = ctx->tcs_vertices_per_patch * output_vertex_size;
+       uint32_t pervertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
        uint32_t output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
        output_patch_size /= 4;
        return LLVMConstInt(ctx->ac.i32, output_patch_size, false);
@@ -282,7 +274,7 @@ get_tcs_out_patch_stride(struct radv_shader_context *ctx)
 static LLVMValueRef
 get_tcs_out_vertex_stride(struct radv_shader_context *ctx)
 {
-       uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
+       uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->tcs.outputs_written);
        uint32_t output_vertex_size = num_tcs_outputs * 16;
        output_vertex_size /= 4;
        return LLVMConstInt(ctx->ac.i32, output_vertex_size, false);
@@ -310,9 +302,9 @@ get_tcs_out_patch0_patch_data_offset(struct radv_shader_context *ctx)
        uint32_t input_patch_size = ctx->options->key.tcs.input_vertices * input_vertex_size;
        uint32_t output_patch0_offset = input_patch_size;
 
-       uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
+       uint32_t num_tcs_outputs = util_last_bit64(ctx->shader_info->tcs.outputs_written);
        uint32_t output_vertex_size = num_tcs_outputs * 16;
-       uint32_t pervertex_output_patch_size = ctx->tcs_vertices_per_patch * output_vertex_size;
+       uint32_t pervertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
        unsigned num_patches = ctx->tcs_num_patches;
 
        output_patch0_offset *= num_patches;
@@ -517,17 +509,17 @@ static bool needs_view_index_sgpr(struct radv_shader_context *ctx,
 {
        switch (stage) {
        case MESA_SHADER_VERTEX:
-               if (ctx->shader_info->info.needs_multiview_view_index ||
+               if (ctx->shader_info->needs_multiview_view_index ||
                    (!ctx->options->key.vs_common_out.as_es && !ctx->options->key.vs_common_out.as_ls && ctx->options->key.has_multiview_view_index))
                        return true;
                break;
        case MESA_SHADER_TESS_EVAL:
-               if (ctx->shader_info->info.needs_multiview_view_index || (!ctx->options->key.vs_common_out.as_es && ctx->options->key.has_multiview_view_index))
+               if (ctx->shader_info->needs_multiview_view_index || (!ctx->options->key.vs_common_out.as_es && ctx->options->key.has_multiview_view_index))
                        return true;
                break;
        case MESA_SHADER_GEOMETRY:
        case MESA_SHADER_TESS_CTRL:
-               if (ctx->shader_info->info.needs_multiview_view_index)
+               if (ctx->shader_info->needs_multiview_view_index)
                        return true;
                break;
        default:
@@ -541,9 +533,9 @@ count_vs_user_sgprs(struct radv_shader_context *ctx)
 {
        uint8_t count = 0;
 
-       if (ctx->shader_info->info.vs.has_vertex_buffers)
+       if (ctx->shader_info->vs.has_vertex_buffers)
                count++;
-       count += ctx->shader_info->info.vs.needs_draw_id ? 3 : 2;
+       count += ctx->shader_info->vs.needs_draw_id ? 3 : 2;
 
        return count;
 }
@@ -554,42 +546,42 @@ static void allocate_inline_push_consts(struct radv_shader_context *ctx,
        uint8_t remaining_sgprs = user_sgpr_info->remaining_sgprs;
 
        /* Only supported if shaders use push constants. */
-       if (ctx->shader_info->info.min_push_constant_used == UINT8_MAX)
+       if (ctx->shader_info->min_push_constant_used == UINT8_MAX)
                return;
 
        /* Only supported if shaders don't have indirect push constants. */
-       if (ctx->shader_info->info.has_indirect_push_constants)
+       if (ctx->shader_info->has_indirect_push_constants)
                return;
 
        /* Only supported for 32-bit push constants. */
-       if (!ctx->shader_info->info.has_only_32bit_push_constants)
+       if (!ctx->shader_info->has_only_32bit_push_constants)
                return;
 
        uint8_t num_push_consts =
-               (ctx->shader_info->info.max_push_constant_used -
-                ctx->shader_info->info.min_push_constant_used) / 4;
+               (ctx->shader_info->max_push_constant_used -
+                ctx->shader_info->min_push_constant_used) / 4;
 
        /* Check if the number of user SGPRs is large enough. */
        if (num_push_consts < remaining_sgprs) {
-               ctx->shader_info->info.num_inline_push_consts = num_push_consts;
+               ctx->shader_info->num_inline_push_consts = num_push_consts;
        } else {
-               ctx->shader_info->info.num_inline_push_consts = remaining_sgprs;
+               ctx->shader_info->num_inline_push_consts = remaining_sgprs;
        }
 
        /* Clamp to the maximum number of allowed inlined push constants. */
-       if (ctx->shader_info->info.num_inline_push_consts > AC_MAX_INLINE_PUSH_CONSTS)
-               ctx->shader_info->info.num_inline_push_consts = AC_MAX_INLINE_PUSH_CONSTS;
+       if (ctx->shader_info->num_inline_push_consts > AC_MAX_INLINE_PUSH_CONSTS)
+               ctx->shader_info->num_inline_push_consts = AC_MAX_INLINE_PUSH_CONSTS;
 
-       if (ctx->shader_info->info.num_inline_push_consts == num_push_consts &&
-           !ctx->shader_info->info.loads_dynamic_offsets) {
+       if (ctx->shader_info->num_inline_push_consts == num_push_consts &&
+           !ctx->shader_info->loads_dynamic_offsets) {
                /* Disable the default push constants path if all constants are
                 * inlined and if shaders don't use dynamic descriptors.
                 */
-               ctx->shader_info->info.loads_push_constants = false;
+               ctx->shader_info->loads_push_constants = false;
        }
 
-       ctx->shader_info->info.base_inline_push_consts =
-               ctx->shader_info->info.min_push_constant_used / 4;
+       ctx->shader_info->base_inline_push_consts =
+               ctx->shader_info->min_push_constant_used / 4;
 }
 
 static void allocate_user_sgprs(struct radv_shader_context *ctx,
@@ -612,7 +604,7 @@ static void allocate_user_sgprs(struct radv_shader_context *ctx,
                user_sgpr_info->need_ring_offsets = true;
 
        if (stage == MESA_SHADER_FRAGMENT &&
-           ctx->shader_info->info.ps.needs_sample_positions)
+           ctx->shader_info->ps.needs_sample_positions)
                user_sgpr_info->need_ring_offsets = true;
 
        /* 2 user sgprs will nearly always be allocated for scratch/rings */
@@ -622,11 +614,11 @@ static void allocate_user_sgprs(struct radv_shader_context *ctx,
 
        switch (stage) {
        case MESA_SHADER_COMPUTE:
-               if (ctx->shader_info->info.cs.uses_grid_size)
+               if (ctx->shader_info->cs.uses_grid_size)
                        user_sgpr_count += 3;
                break;
        case MESA_SHADER_FRAGMENT:
-               user_sgpr_count += ctx->shader_info->info.ps.needs_sample_positions;
+               user_sgpr_count += ctx->shader_info->ps.needs_sample_positions;
                break;
        case MESA_SHADER_VERTEX:
                if (!ctx->is_gs_copy_shader)
@@ -654,16 +646,16 @@ static void allocate_user_sgprs(struct radv_shader_context *ctx,
        if (needs_view_index)
                user_sgpr_count++;
 
-       if (ctx->shader_info->info.loads_push_constants)
+       if (ctx->shader_info->loads_push_constants)
                user_sgpr_count++;
 
-       if (ctx->streamout_buffers)
+       if (ctx->shader_info->so.num_outputs)
                user_sgpr_count++;
 
        uint32_t available_sgprs = ctx->options->chip_class >= GFX9 && stage != MESA_SHADER_COMPUTE ? 32 : 16;
        uint32_t remaining_sgprs = available_sgprs - user_sgpr_count;
        uint32_t num_desc_set =
-               util_bitcount(ctx->shader_info->info.desc_set_used_mask);
+               util_bitcount(ctx->shader_info->desc_set_used_mask);
 
        if (remaining_sgprs < num_desc_set) {
                user_sgpr_info->indirect_all_descriptor_sets = true;
@@ -685,7 +677,7 @@ declare_global_input_sgprs(struct radv_shader_context *ctx,
 
        /* 1 for each descriptor set */
        if (!user_sgpr_info->indirect_all_descriptor_sets) {
-               uint32_t mask = ctx->shader_info->info.desc_set_used_mask;
+               uint32_t mask = ctx->shader_info->desc_set_used_mask;
 
                while (mask) {
                        int i = u_bit_scan(&mask);
@@ -697,19 +689,19 @@ declare_global_input_sgprs(struct radv_shader_context *ctx,
                        desc_sets);
        }
 
-       if (ctx->shader_info->info.loads_push_constants) {
+       if (ctx->shader_info->loads_push_constants) {
                /* 1 for push constants and dynamic descriptors */
                add_arg(args, ARG_SGPR, type, &ctx->abi.push_constants);
        }
 
-       for (unsigned i = 0; i < ctx->shader_info->info.num_inline_push_consts; i++) {
+       for (unsigned i = 0; i < ctx->shader_info->num_inline_push_consts; i++) {
                add_arg(args, ARG_SGPR, ctx->ac.i32,
                        &ctx->abi.inline_push_consts[i]);
        }
-       ctx->abi.num_inline_push_consts = ctx->shader_info->info.num_inline_push_consts;
-       ctx->abi.base_inline_push_consts = ctx->shader_info->info.base_inline_push_consts;
+       ctx->abi.num_inline_push_consts = ctx->shader_info->num_inline_push_consts;
+       ctx->abi.base_inline_push_consts = ctx->shader_info->base_inline_push_consts;
 
-       if (ctx->shader_info->info.so.num_outputs) {
+       if (ctx->shader_info->so.num_outputs) {
                add_arg(args, ARG_SGPR,
                        ac_array_in_const32_addr_space(ctx->ac.v4i32),
                        &ctx->streamout_buffers);
@@ -726,14 +718,14 @@ declare_vs_specific_input_sgprs(struct radv_shader_context *ctx,
        if (!ctx->is_gs_copy_shader &&
            (stage == MESA_SHADER_VERTEX ||
             (has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) {
-               if (ctx->shader_info->info.vs.has_vertex_buffers) {
+               if (ctx->shader_info->vs.has_vertex_buffers) {
                        add_arg(args, ARG_SGPR,
                                ac_array_in_const32_addr_space(ctx->ac.v4i32),
                                &ctx->vertex_buffers);
                }
                add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.base_vertex);
                add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.start_instance);
-               if (ctx->shader_info->info.vs.needs_draw_id) {
+               if (ctx->shader_info->vs.needs_draw_id) {
                        add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->abi.draw_id);
                }
        }
@@ -779,8 +771,11 @@ declare_streamout_sgprs(struct radv_shader_context *ctx, gl_shader_stage stage,
 {
        int i;
 
+       if (ctx->options->use_ngg_streamout)
+               return;
+
        /* Streamout SGPRs. */
-       if (ctx->shader_info->info.so.num_outputs) {
+       if (ctx->shader_info->so.num_outputs) {
                assert(stage == MESA_SHADER_VERTEX ||
                       stage == MESA_SHADER_TESS_EVAL);
 
@@ -796,7 +791,7 @@ declare_streamout_sgprs(struct radv_shader_context *ctx, gl_shader_stage stage,
 
        /* A streamout buffer offset is loaded if the stride is non-zero. */
        for (i = 0; i < 4; i++) {
-               if (!ctx->shader_info->info.so.strides[i])
+               if (!ctx->shader_info->so.strides[i])
                        continue;
 
                add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->streamout_offset[i]);
@@ -817,7 +812,7 @@ set_global_input_locs(struct radv_shader_context *ctx,
                      const struct user_sgpr_info *user_sgpr_info,
                      LLVMValueRef desc_sets, uint8_t *user_sgpr_idx)
 {
-       uint32_t mask = ctx->shader_info->info.desc_set_used_mask;
+       uint32_t mask = ctx->shader_info->desc_set_used_mask;
 
        if (!user_sgpr_info->indirect_all_descriptor_sets) {
                while (mask) {
@@ -841,13 +836,13 @@ set_global_input_locs(struct radv_shader_context *ctx,
                ctx->shader_info->need_indirect_descriptor_sets = true;
        }
 
-       if (ctx->shader_info->info.loads_push_constants) {
+       if (ctx->shader_info->loads_push_constants) {
                set_loc_shader_ptr(ctx, AC_UD_PUSH_CONSTANTS, user_sgpr_idx);
        }
 
-       if (ctx->shader_info->info.num_inline_push_consts) {
+       if (ctx->shader_info->num_inline_push_consts) {
                set_loc_shader(ctx, AC_UD_INLINE_PUSH_CONSTANTS, user_sgpr_idx,
-                              ctx->shader_info->info.num_inline_push_consts);
+                              ctx->shader_info->num_inline_push_consts);
        }
 
        if (ctx->streamout_buffers) {
@@ -865,13 +860,13 @@ set_vs_specific_input_locs(struct radv_shader_context *ctx,
        if (!ctx->is_gs_copy_shader &&
            (stage == MESA_SHADER_VERTEX ||
             (has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) {
-               if (ctx->shader_info->info.vs.has_vertex_buffers) {
+               if (ctx->shader_info->vs.has_vertex_buffers) {
                        set_loc_shader_ptr(ctx, AC_UD_VS_VERTEX_BUFFERS,
                                           user_sgpr_idx);
                }
 
                unsigned vs_num = 2;
-               if (ctx->shader_info->info.vs.needs_draw_id)
+               if (ctx->shader_info->vs.needs_draw_id)
                        vs_num++;
 
                set_loc_shader(ctx, AC_UD_VS_BASE_VERTEX_START_INSTANCE,
@@ -947,20 +942,20 @@ static void create_function(struct radv_shader_context *ctx,
                declare_global_input_sgprs(ctx, &user_sgpr_info, &args,
                                           &desc_sets);
 
-               if (ctx->shader_info->info.cs.uses_grid_size) {
+               if (ctx->shader_info->cs.uses_grid_size) {
                        add_arg(&args, ARG_SGPR, ctx->ac.v3i32,
                                &ctx->abi.num_work_groups);
                }
 
                for (int i = 0; i < 3; i++) {
                        ctx->abi.workgroup_ids[i] = NULL;
-                       if (ctx->shader_info->info.cs.uses_block_id[i]) {
+                       if (ctx->shader_info->cs.uses_block_id[i]) {
                                add_arg(&args, ARG_SGPR, ctx->ac.i32,
                                        &ctx->abi.workgroup_ids[i]);
                        }
                }
 
-               if (ctx->shader_info->info.cs.uses_local_invocation_idx)
+               if (ctx->shader_info->cs.uses_local_invocation_idx)
                        add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->abi.tg_size);
                add_arg(&args, ARG_VGPR, ctx->ac.v3i32,
                        &ctx->abi.local_invocation_ids);
@@ -1195,7 +1190,7 @@ static void create_function(struct radv_shader_context *ctx,
 
        switch (stage) {
        case MESA_SHADER_COMPUTE:
-               if (ctx->shader_info->info.cs.uses_grid_size) {
+               if (ctx->shader_info->cs.uses_grid_size) {
                        set_loc_shader(ctx, AC_UD_CS_GRID_SIZE,
                                       &user_sgpr_idx, 3);
                }
@@ -1328,12 +1323,12 @@ static LLVMValueRef get_non_vertex_index_offset(struct radv_shader_context *ctx)
        uint32_t num_patches = ctx->tcs_num_patches;
        uint32_t num_tcs_outputs;
        if (ctx->stage == MESA_SHADER_TESS_CTRL)
-               num_tcs_outputs = util_last_bit64(ctx->shader_info->info.tcs.outputs_written);
+               num_tcs_outputs = util_last_bit64(ctx->shader_info->tcs.outputs_written);
        else
                num_tcs_outputs = ctx->options->key.tes.tcs_num_outputs;
 
        uint32_t output_vertex_size = num_tcs_outputs * 16;
-       uint32_t pervertex_output_patch_size = ctx->tcs_vertices_per_patch * output_vertex_size;
+       uint32_t pervertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
 
        return LLVMConstInt(ctx->ac.i32, pervertex_output_patch_size * num_patches, false);
 }
@@ -1343,7 +1338,7 @@ static LLVMValueRef calc_param_stride(struct radv_shader_context *ctx,
 {
        LLVMValueRef param_stride;
        if (vertex_index)
-               param_stride = LLVMConstInt(ctx->ac.i32, ctx->tcs_vertices_per_patch * ctx->tcs_num_patches, false);
+               param_stride = LLVMConstInt(ctx->ac.i32, ctx->shader->info.tess.tcs_vertices_out * ctx->tcs_num_patches, false);
        else
                param_stride = LLVMConstInt(ctx->ac.i32, ctx->tcs_num_patches, false);
        return param_stride;
@@ -1356,7 +1351,7 @@ static LLVMValueRef get_tcs_tes_buffer_address(struct radv_shader_context *ctx,
        LLVMValueRef base_addr;
        LLVMValueRef param_stride, constant16;
        LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
-       LLVMValueRef vertices_per_patch = LLVMConstInt(ctx->ac.i32, ctx->tcs_vertices_per_patch, false);
+       LLVMValueRef vertices_per_patch = LLVMConstInt(ctx->ac.i32, ctx->shader->info.tess.tcs_vertices_out, false);
        constant16 = LLVMConstInt(ctx->ac.i32, 16, false);
        param_stride = calc_param_stride(ctx, vertex_index);
        if (vertex_index) {
@@ -1503,10 +1498,10 @@ store_tcs_output(struct ac_shader_abi *abi,
        bool store_lds = true;
 
        if (is_patch) {
-               if (!(ctx->tcs_patch_outputs_read & (1U << (location - VARYING_SLOT_PATCH0))))
+               if (!(ctx->shader->info.patch_outputs_read & (1U << (location - VARYING_SLOT_PATCH0))))
                        store_lds = false;
        } else {
-               if (!(ctx->tcs_outputs_read & (1ULL << location)))
+               if (!(ctx->shader->info.outputs_read & (1ULL << location)))
                        store_lds = false;
        }
 
@@ -1606,6 +1601,18 @@ load_tes_input(struct ac_shader_abi *abi,
        return result;
 }
 
+static LLVMValueRef
+radv_emit_fetch_64bit(struct radv_shader_context *ctx,
+                     LLVMTypeRef type, LLVMValueRef a, LLVMValueRef b)
+{
+       LLVMValueRef values[2] = {
+               ac_to_integer(&ctx->ac, a),
+               ac_to_integer(&ctx->ac, b),
+       };
+       LLVMValueRef result = ac_build_gather_values(&ctx->ac, values, 2);
+       return LLVMBuildBitCast(ctx->ac.builder, result, type, "");
+}
+
 static LLVMValueRef
 load_gs_input(struct ac_shader_abi *abi,
              unsigned location,
@@ -1634,6 +1641,14 @@ load_gs_input(struct ac_shader_abi *abi,
                        dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
                                               LLVMConstInt(ctx->ac.i32, param * 4 + i + const_index, 0), "");
                        value[i] = ac_lds_load(&ctx->ac, dw_addr);
+
+                       if (ac_get_type_size(type) == 8) {
+                               dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
+                                                      LLVMConstInt(ctx->ac.i32, param * 4 + i + const_index + 1, 0), "");
+                               LLVMValueRef tmp = ac_lds_load(&ctx->ac, dw_addr);
+
+                               value[i] = radv_emit_fetch_64bit(ctx, type, value[i], tmp);
+                       }
                } else {
                        LLVMValueRef soffset =
                                LLVMConstInt(ctx->ac.i32,
@@ -1645,6 +1660,21 @@ load_gs_input(struct ac_shader_abi *abi,
                                                        ctx->ac.i32_0,
                                                        vtx_offset, soffset,
                                                        0, ac_glc, true, false);
+
+                       if (ac_get_type_size(type) == 8) {
+                               soffset = LLVMConstInt(ctx->ac.i32,
+                                                      (param * 4 + i + const_index + 1) * 256,
+                                                      false);
+
+                               LLVMValueRef tmp =
+                                       ac_build_buffer_load(&ctx->ac,
+                                                            ctx->esgs_ring, 1,
+                                                            ctx->ac.i32_0,
+                                                            vtx_offset, soffset,
+                                                            0, ac_glc, true, false);
+
+                               value[i] = radv_emit_fetch_64bit(ctx, type, value[i], tmp);
+                       }
                }
 
                if (ac_get_type_size(type) == 2) {
@@ -1715,7 +1745,7 @@ static LLVMValueRef load_sample_mask_in(struct ac_shader_abi *abi)
        struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
        uint8_t log2_ps_iter_samples;
 
-       if (ctx->shader_info->info.ps.force_persample) {
+       if (ctx->shader_info->ps.force_persample) {
                log2_ps_iter_samples =
                        util_logbase2(ctx->options->key.fs.num_samples);
        } else {
@@ -1766,19 +1796,23 @@ visit_emit_vertex(struct ac_shader_abi *abi, unsigned stream, LLVMValueRef *addr
                                       "");
 
        /* If this thread has already emitted the declared maximum number of
-        * vertices, kill it: excessive vertex emissions are not supposed to
-        * have any effect, and GS threads have no externally observable
-        * effects other than emitting vertices.
+        * vertices, don't emit any more: excessive vertex emissions are not
+        * supposed to have any effect.
         */
        can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, gs_next_vertex,
-                                LLVMConstInt(ctx->ac.i32, ctx->gs_max_out_vertices, false), "");
-       ac_build_kill_if_false(&ctx->ac, can_emit);
+                                LLVMConstInt(ctx->ac.i32, ctx->shader->info.gs.vertices_out, false), "");
+
+       bool use_kill = !ctx->shader_info->gs.writes_memory;
+       if (use_kill)
+               ac_build_kill_if_false(&ctx->ac, can_emit);
+       else
+               ac_build_ifcc(&ctx->ac, can_emit, 6505);
 
        for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
                unsigned output_usage_mask =
-                       ctx->shader_info->info.gs.output_usage_mask[i];
+                       ctx->shader_info->gs.output_usage_mask[i];
                uint8_t output_stream =
-                       ctx->shader_info->info.gs.output_streams[i];
+                       ctx->shader_info->gs.output_streams[i];
                LLVMValueRef *out_ptr = &addrs[i * 4];
                int length = util_last_bit(output_usage_mask);
 
@@ -1794,7 +1828,7 @@ visit_emit_vertex(struct ac_shader_abi *abi, unsigned stream, LLVMValueRef *addr
                                                             out_ptr[j], "");
                        LLVMValueRef voffset =
                                LLVMConstInt(ctx->ac.i32, offset *
-                                            ctx->gs_max_out_vertices, false);
+                                            ctx->shader->info.gs.vertices_out, false);
 
                        offset++;
 
@@ -1819,6 +1853,9 @@ visit_emit_vertex(struct ac_shader_abi *abi, unsigned stream, LLVMValueRef *addr
        ac_build_sendmsg(&ctx->ac,
                         AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
                         ctx->gs_wave_id);
+
+       if (!use_kill)
+               ac_build_endif(&ctx->ac, 6505);
 }
 
 static void
@@ -1846,7 +1883,7 @@ load_tess_coord(struct ac_shader_abi *abi)
                ctx->ac.f32_0,
        };
 
-       if (ctx->tes_primitive_mode == GL_TRIANGLES)
+       if (ctx->shader->info.tess.primitive_mode == GL_TRIANGLES)
                coord[2] = LLVMBuildFSub(ctx->ac.builder, ctx->ac.f32_1,
                                        LLVMBuildFAdd(ctx->ac.builder, coord[0], coord[1], ""), "");
 
@@ -2122,7 +2159,7 @@ handle_vs_input_decl(struct radv_shader_context *ctx,
        LLVMValueRef buffer_index;
        unsigned attrib_count = glsl_count_attribute_slots(variable->type, true);
        uint8_t input_usage_mask =
-               ctx->shader_info->info.vs.input_usage_mask[variable->data.location];
+               ctx->shader_info->vs.input_usage_mask[variable->data.location];
        unsigned num_input_channels = util_last_bit(input_usage_mask);
 
        variable->data.driver_location = variable->data.location * 4;
@@ -2283,27 +2320,6 @@ scan_shader_output_decl(struct radv_shader_context *ctx,
        }
 
        mask_attribs = ((1ull << attrib_count) - 1) << idx;
-       if (stage == MESA_SHADER_VERTEX ||
-           stage == MESA_SHADER_TESS_EVAL ||
-           stage == MESA_SHADER_GEOMETRY) {
-               if (idx == VARYING_SLOT_CLIP_DIST0) {
-                       if (stage == MESA_SHADER_VERTEX) {
-                               ctx->shader_info->vs.outinfo.clip_dist_mask = (1 << shader->info.clip_distance_array_size) - 1;
-                               ctx->shader_info->vs.outinfo.cull_dist_mask = (1 << shader->info.cull_distance_array_size) - 1;
-                               ctx->shader_info->vs.outinfo.cull_dist_mask <<= shader->info.clip_distance_array_size;
-                       }
-                       if (stage == MESA_SHADER_TESS_EVAL) {
-                               ctx->shader_info->tes.outinfo.clip_dist_mask = (1 << shader->info.clip_distance_array_size) - 1;
-                               ctx->shader_info->tes.outinfo.cull_dist_mask = (1 << shader->info.cull_distance_array_size) - 1;
-                               ctx->shader_info->tes.outinfo.cull_dist_mask <<= shader->info.clip_distance_array_size;
-                       }
-                       if (stage == MESA_SHADER_GEOMETRY) {
-                               ctx->shader_info->vs.outinfo.clip_dist_mask = (1 << shader->info.clip_distance_array_size) - 1;
-                               ctx->shader_info->vs.outinfo.cull_dist_mask = (1 << shader->info.cull_distance_array_size) - 1;
-                               ctx->shader_info->vs.outinfo.cull_dist_mask <<= shader->info.clip_distance_array_size;
-                       }
-               }
-       }
 
        ctx->output_mask |= mask_attribs;
 }
@@ -2585,7 +2601,7 @@ radv_emit_streamout(struct radv_shader_context *ctx, unsigned stream)
                LLVMValueRef buf_ptr = ctx->streamout_buffers;
 
                for (i = 0; i < 4; i++) {
-                       uint16_t stride = ctx->shader_info->info.so.strides[i];
+                       uint16_t stride = ctx->shader_info->so.strides[i];
 
                        if (!stride)
                                continue;
@@ -2609,10 +2625,10 @@ radv_emit_streamout(struct radv_shader_context *ctx, unsigned stream)
                }
 
                /* Write streamout data. */
-               for (i = 0; i < ctx->shader_info->info.so.num_outputs; i++) {
+               for (i = 0; i < ctx->shader_info->so.num_outputs; i++) {
                        struct radv_shader_output_values shader_out = {};
                        struct radv_stream_output *output =
-                               &ctx->shader_info->info.so.outputs[i];
+                               &ctx->shader_info->so.outputs[i];
 
                        if (stream != output->stream)
                                continue;
@@ -2815,19 +2831,8 @@ handle_vs_outputs_post(struct radv_shader_context *ctx,
               sizeof(outinfo->vs_output_param_offset));
        outinfo->pos_exports = 0;
 
-       if (ctx->output_mask & (1ull << VARYING_SLOT_PSIZ)) {
-               outinfo->writes_pointsize = true;
-       }
-
-       if (ctx->output_mask & (1ull << VARYING_SLOT_LAYER)) {
-               outinfo->writes_layer = true;
-       }
-
-       if (ctx->output_mask & (1ull << VARYING_SLOT_VIEWPORT)) {
-               outinfo->writes_viewport_index = true;
-       }
-
-       if (ctx->shader_info->info.so.num_outputs &&
+       if (!ctx->options->use_ngg_streamout &&
+           ctx->shader_info->so.num_outputs &&
            !ctx->is_gs_copy_shader) {
                /* The GS copy shader emission already emits streamout. */
                radv_emit_streamout(ctx, 0);
@@ -2847,14 +2852,14 @@ handle_vs_outputs_post(struct radv_shader_context *ctx,
                if (ctx->stage == MESA_SHADER_VERTEX &&
                    !ctx->is_gs_copy_shader) {
                        outputs[noutput].usage_mask =
-                               ctx->shader_info->info.vs.output_usage_mask[i];
+                               ctx->shader_info->vs.output_usage_mask[i];
                } else if (ctx->stage == MESA_SHADER_TESS_EVAL) {
                        outputs[noutput].usage_mask =
-                               ctx->shader_info->info.tes.output_usage_mask[i];
+                               ctx->shader_info->tes.output_usage_mask[i];
                } else {
                        assert(ctx->is_gs_copy_shader);
                        outputs[noutput].usage_mask =
-                               ctx->shader_info->info.gs.output_usage_mask[i];
+                               ctx->shader_info->gs.output_usage_mask[i];
                }
 
                for (unsigned j = 0; j < 4; j++) {
@@ -2867,8 +2872,6 @@ handle_vs_outputs_post(struct radv_shader_context *ctx,
 
        /* Export PrimitiveID. */
        if (export_prim_id) {
-               outinfo->export_prim_id = true;
-
                outputs[noutput].slot_name = VARYING_SLOT_PRIMITIVE_ID;
                outputs[noutput].slot_index = 0;
                outputs[noutput].usage_mask = 0x1;
@@ -2888,22 +2891,8 @@ handle_es_outputs_post(struct radv_shader_context *ctx,
                       struct radv_es_output_info *outinfo)
 {
        int j;
-       uint64_t max_output_written = 0;
        LLVMValueRef lds_base = NULL;
 
-       for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
-               int param_index;
-
-               if (!(ctx->output_mask & (1ull << i)))
-                       continue;
-
-               param_index = shader_io_get_unique_index(i);
-
-               max_output_written = MAX2(param_index, max_output_written);
-       }
-
-       outinfo->esgs_itemsize = (max_output_written + 1) * 16;
-
        if (ctx->ac.chip_class  >= GFX9) {
                unsigned itemsize_dw = outinfo->esgs_itemsize / 4;
                LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac);
@@ -2927,11 +2916,11 @@ handle_es_outputs_post(struct radv_shader_context *ctx,
 
                if (ctx->stage == MESA_SHADER_VERTEX) {
                        output_usage_mask =
-                               ctx->shader_info->info.vs.output_usage_mask[i];
+                               ctx->shader_info->vs.output_usage_mask[i];
                } else {
                        assert(ctx->stage == MESA_SHADER_TESS_EVAL);
                        output_usage_mask =
-                               ctx->shader_info->info.tes.output_usage_mask[i];
+                               ctx->shader_info->tes.output_usage_mask[i];
                }
 
                param_index = shader_io_get_unique_index(i);
@@ -2973,7 +2962,7 @@ static void
 handle_ls_outputs_post(struct radv_shader_context *ctx)
 {
        LLVMValueRef vertex_id = ctx->rel_auto_id;
-       uint32_t num_tcs_inputs = util_last_bit64(ctx->shader_info->info.vs.ls_outputs_written);
+       uint32_t num_tcs_inputs = util_last_bit64(ctx->shader_info->vs.ls_outputs_written);
        LLVMValueRef vertex_dw_stride = LLVMConstInt(ctx->ac.i32, num_tcs_inputs * 4, false);
        LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->ac.builder, vertex_id,
                                                 vertex_dw_stride, "");
@@ -3033,11 +3022,22 @@ static LLVMValueRef ngg_get_prim_cnt(struct radv_shader_context *ctx)
                            false);
 }
 
+static LLVMValueRef ngg_get_ordered_id(struct radv_shader_context *ctx)
+{
+       return ac_build_bfe(&ctx->ac, ctx->gs_tg_info,
+                           ctx->ac.i32_0,
+                           LLVMConstInt(ctx->ac.i32, 11, false),
+                           false);
+}
+
 static LLVMValueRef
 ngg_gs_get_vertex_storage(struct radv_shader_context *ctx)
 {
        unsigned num_outputs = util_bitcount64(ctx->output_mask);
 
+       if (ctx->options->key.has_multiview_view_index)
+               num_outputs++;
+
        LLVMTypeRef elements[2] = {
                LLVMArrayType(ctx->ac.i32, 4 * num_outputs),
                LLVMArrayType(ctx->ac.i8, 4),
@@ -3084,7 +3084,7 @@ ngg_gs_vertex_ptr(struct radv_shader_context *ctx, LLVMValueRef vertexidx)
        LLVMValueRef storage = ngg_gs_get_vertex_storage(ctx);
 
        /* gs_max_out_vertices = 2^(write_stride_2exp) * some odd number */
-       unsigned write_stride_2exp = ffs(ctx->gs_max_out_vertices) - 1;
+       unsigned write_stride_2exp = ffs(ctx->shader->info.gs.vertices_out) - 1;
        if (write_stride_2exp) {
                LLVMValueRef row =
                        LLVMBuildLShr(builder, vertexidx,
@@ -3106,7 +3106,7 @@ ngg_gs_emit_vertex_ptr(struct radv_shader_context *ctx, LLVMValueRef gsthread,
        LLVMBuilderRef builder = ctx->ac.builder;
        LLVMValueRef tmp;
 
-       tmp = LLVMConstInt(ctx->ac.i32, ctx->gs_max_out_vertices, false);
+       tmp = LLVMConstInt(ctx->ac.i32, ctx->shader->info.gs.vertices_out, false);
        tmp = LLVMBuildMul(builder, tmp, gsthread, "");
        const LLVMValueRef vertexidx = LLVMBuildAdd(builder, tmp, emitidx, "");
        return ngg_gs_vertex_ptr(ctx, vertexidx);
@@ -3175,11 +3175,511 @@ static void build_export_prim(struct radv_shader_context *ctx,
        ac_build_export(&ctx->ac, &args);
 }
 
+static struct radv_stream_output *
+radv_get_stream_output_by_loc(struct radv_streamout_info *so, unsigned location)
+{
+       for (unsigned i = 0; i < so->num_outputs; ++i) {
+               if (so->outputs[i].location == location)
+                       return &so->outputs[i];
+       }
+
+       return NULL;
+}
+
+static void build_streamout_vertex(struct radv_shader_context *ctx,
+                                  LLVMValueRef *so_buffer, LLVMValueRef *wg_offset_dw,
+                                  unsigned stream, LLVMValueRef offset_vtx,
+                                  LLVMValueRef vertexptr)
+{
+       struct radv_streamout_info *so = &ctx->shader_info->so;
+       LLVMBuilderRef builder = ctx->ac.builder;
+       LLVMValueRef offset[4] = {};
+       LLVMValueRef tmp;
+
+       for (unsigned buffer = 0; buffer < 4; ++buffer) {
+               if (!wg_offset_dw[buffer])
+                       continue;
+
+               tmp = LLVMBuildMul(builder, offset_vtx,
+                                  LLVMConstInt(ctx->ac.i32, so->strides[buffer], false), "");
+               tmp = LLVMBuildAdd(builder, wg_offset_dw[buffer], tmp, "");
+               offset[buffer] = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->ac.i32, 2, false), "");
+       }
+
+       if (ctx->stage == MESA_SHADER_GEOMETRY) {
+               struct radv_shader_output_values outputs[AC_LLVM_MAX_OUTPUTS];
+               unsigned noutput = 0;
+               unsigned out_idx = 0;
+
+               for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
+                       unsigned output_usage_mask =
+                               ctx->shader_info->gs.output_usage_mask[i];
+                       uint8_t output_stream =
+                               output_stream = ctx->shader_info->gs.output_streams[i];
+
+                       if (!(ctx->output_mask & (1ull << i)) ||
+                           output_stream != stream)
+                               continue;
+
+                       outputs[noutput].slot_name = i;
+                       outputs[noutput].slot_index = i == VARYING_SLOT_CLIP_DIST1;
+                       outputs[noutput].usage_mask = output_usage_mask;
+
+                       int length = util_last_bit(output_usage_mask);
+
+                       for (unsigned j = 0; j < length; j++, out_idx++) {
+                               if (!(output_usage_mask & (1 << j)))
+                                       continue;
+
+                               tmp = ac_build_gep0(&ctx->ac, vertexptr,
+                                                   LLVMConstInt(ctx->ac.i32, out_idx, false));
+                               outputs[noutput].values[j] = LLVMBuildLoad(builder, tmp, "");
+                       }
+
+                       for (unsigned j = length; j < 4; j++)
+                               outputs[noutput].values[j] = LLVMGetUndef(ctx->ac.f32);
+
+                       noutput++;
+               }
+
+               for (unsigned i = 0; i < noutput; i++) {
+                       struct radv_stream_output *output =
+                               radv_get_stream_output_by_loc(so, outputs[i].slot_name);
+
+                       if (!output ||
+                           output->stream != stream)
+                               continue;
+
+                       struct radv_shader_output_values out = {};
+
+                       for (unsigned j = 0; j < 4; j++) {
+                               out.values[j] = outputs[i].values[j];
+                       }
+
+                       radv_emit_stream_output(ctx, so_buffer, offset, output, &out);
+               }
+       } else {
+               for (unsigned i = 0; i < so->num_outputs; ++i) {
+                       struct radv_stream_output *output =
+                               &ctx->shader_info->so.outputs[i];
+
+                       if (stream != output->stream)
+                               continue;
+
+                       struct radv_shader_output_values out = {};
+
+                       for (unsigned comp = 0; comp < 4; comp++) {
+                               if (!(output->component_mask & (1 << comp)))
+                                       continue;
+
+                               tmp = ac_build_gep0(&ctx->ac, vertexptr,
+                                                   LLVMConstInt(ctx->ac.i32, 4 * i + comp, false));
+                               out.values[comp] = LLVMBuildLoad(builder, tmp, "");
+                       }
+
+                       radv_emit_stream_output(ctx, so_buffer, offset, output, &out);
+               }
+       }
+}
+
+struct ngg_streamout {
+       LLVMValueRef num_vertices;
+
+       /* per-thread data */
+       LLVMValueRef prim_enable[4]; /* i1 per stream */
+       LLVMValueRef vertices[3]; /* [N x i32] addrspace(LDS)* */
+
+       /* Output */
+       LLVMValueRef emit[4]; /* per-stream emitted primitives (only valid for used streams) */
+};
+
+/**
+ * Build streamout logic.
+ *
+ * Implies a barrier.
+ *
+ * Writes number of emitted primitives to gs_ngg_scratch[4:7].
+ *
+ * Clobbers gs_ngg_scratch[8:].
+ */
+static void build_streamout(struct radv_shader_context *ctx,
+                           struct ngg_streamout *nggso)
+{
+       struct radv_streamout_info *so = &ctx->shader_info->so;
+       LLVMBuilderRef builder = ctx->ac.builder;
+       LLVMValueRef buf_ptr = ctx->streamout_buffers;
+       LLVMValueRef tid = get_thread_id_in_tg(ctx);
+       LLVMValueRef cond, tmp, tmp2;
+       LLVMValueRef i32_2 = LLVMConstInt(ctx->ac.i32, 2, false);
+       LLVMValueRef i32_4 = LLVMConstInt(ctx->ac.i32, 4, false);
+       LLVMValueRef i32_8 = LLVMConstInt(ctx->ac.i32, 8, false);
+       LLVMValueRef so_buffer[4] = {};
+       unsigned max_num_vertices = 1 + (nggso->vertices[1] ? 1 : 0) +
+                                       (nggso->vertices[2] ? 1 : 0);
+       LLVMValueRef prim_stride_dw[4] = {};
+       LLVMValueRef prim_stride_dw_vgpr = LLVMGetUndef(ctx->ac.i32);
+       int stream_for_buffer[4] = { -1, -1, -1, -1 };
+       unsigned bufmask_for_stream[4] = {};
+       bool isgs = ctx->stage == MESA_SHADER_GEOMETRY;
+       unsigned scratch_emit_base = isgs ? 4 : 0;
+       LLVMValueRef scratch_emit_basev = isgs ? i32_4 : ctx->ac.i32_0;
+       unsigned scratch_offset_base = isgs ? 8 : 4;
+       LLVMValueRef scratch_offset_basev = isgs ? i32_8 : i32_4;
+
+       ac_llvm_add_target_dep_function_attr(ctx->main_function,
+                                            "amdgpu-gds-size", 256);
+
+       /* Determine the mapping of streamout buffers to vertex streams. */
+       for (unsigned i = 0; i < so->num_outputs; ++i) {
+               unsigned buf = so->outputs[i].buffer;
+               unsigned stream = so->outputs[i].stream;
+               assert(stream_for_buffer[buf] < 0 || stream_for_buffer[buf] == stream);
+               stream_for_buffer[buf] = stream;
+               bufmask_for_stream[stream] |= 1 << buf;
+       }
+
+       for (unsigned buffer = 0; buffer < 4; ++buffer) {
+               if (stream_for_buffer[buffer] == -1)
+                       continue;
+
+               assert(so->strides[buffer]);
+
+               LLVMValueRef stride_for_buffer =
+                       LLVMConstInt(ctx->ac.i32, so->strides[buffer], false);
+               prim_stride_dw[buffer] =
+                       LLVMBuildMul(builder, stride_for_buffer,
+                                    nggso->num_vertices, "");
+               prim_stride_dw_vgpr = ac_build_writelane(
+                       &ctx->ac, prim_stride_dw_vgpr, prim_stride_dw[buffer],
+                       LLVMConstInt(ctx->ac.i32, buffer, false));
+
+               LLVMValueRef offset = LLVMConstInt(ctx->ac.i32, buffer, false);
+               so_buffer[buffer] = ac_build_load_to_sgpr(&ctx->ac, buf_ptr,
+                                                         offset);
+       }
+
+       cond = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
+       ac_build_ifcc(&ctx->ac, cond, 5200);
+       {
+               LLVMTypeRef gdsptr = LLVMPointerType(ctx->ac.i32, AC_ADDR_SPACE_GDS);
+               LLVMValueRef gdsbase = LLVMBuildIntToPtr(builder, ctx->ac.i32_0, gdsptr, "");
+
+               /* Advance the streamout offsets in GDS. */
+               LLVMValueRef offsets_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
+               LLVMValueRef generated_by_stream_vgpr = ac_build_alloca_undef(&ctx->ac, ctx->ac.i32, "");
+
+               cond = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
+               ac_build_ifcc(&ctx->ac, cond, 5210);
+               {
+                       /* Fetch the number of generated primitives and store
+                        * it in GDS for later use.
+                        */
+                       if (isgs) {
+                               tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tid);
+                               tmp = LLVMBuildLoad(builder, tmp, "");
+                       } else {
+                               tmp = ac_build_writelane(&ctx->ac, ctx->ac.i32_0,
+                                               ngg_get_prim_cnt(ctx), ctx->ac.i32_0);
+                       }
+                       LLVMBuildStore(builder, tmp, generated_by_stream_vgpr);
+
+                       unsigned swizzle[4];
+                       int unused_stream = -1;
+                       for (unsigned stream = 0; stream < 4; ++stream) {
+                               if (!ctx->shader_info->gs.num_stream_output_components[stream]) {
+                                       unused_stream = stream;
+                                       break;
+                               }
+                       }
+                       for (unsigned buffer = 0; buffer < 4; ++buffer) {
+                               if (stream_for_buffer[buffer] >= 0) {
+                                       swizzle[buffer] = stream_for_buffer[buffer];
+                               } else {
+                                       assert(unused_stream >= 0);
+                                       swizzle[buffer] = unused_stream;
+                               }
+                       }
+
+                       tmp = ac_build_quad_swizzle(&ctx->ac, tmp,
+                               swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
+                       tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
+
+                       LLVMValueRef args[] = {
+                               LLVMBuildIntToPtr(builder, ngg_get_ordered_id(ctx), gdsptr, ""),
+                               tmp,
+                               ctx->ac.i32_0, // ordering
+                               ctx->ac.i32_0, // scope
+                               ctx->ac.i1false, // isVolatile
+                               LLVMConstInt(ctx->ac.i32, 4 << 24, false), // OA index
+                               ctx->ac.i1true, // wave release
+                               ctx->ac.i1true, // wave done
+                       };
+
+                       tmp = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.ds.ordered.add",
+                                                ctx->ac.i32, args, ARRAY_SIZE(args), 0);
+
+                       /* Keep offsets in a VGPR for quick retrieval via readlane by
+                        * the first wave for bounds checking, and also store in LDS
+                        * for retrieval by all waves later. */
+                       LLVMBuildStore(builder, tmp, offsets_vgpr);
+
+                       tmp2 = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
+                                           scratch_offset_basev, "");
+                       tmp2 = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp2);
+                       LLVMBuildStore(builder, tmp, tmp2);
+               }
+               ac_build_endif(&ctx->ac, 5210);
+
+               /* Determine the max emit per buffer. This is done via the SALU, in part
+                * because LLVM can't generate divide-by-multiply if we try to do this
+                * via VALU with one lane per buffer.
+                */
+               LLVMValueRef max_emit[4] = {};
+               for (unsigned buffer = 0; buffer < 4; ++buffer) {
+                       if (stream_for_buffer[buffer] == -1)
+                               continue;
+
+                       /* Compute the streamout buffer size in DWORD. */
+                       LLVMValueRef bufsize_dw =
+                               LLVMBuildLShr(builder,
+                                       LLVMBuildExtractElement(builder, so_buffer[buffer], i32_2, ""),
+                                       i32_2, "");
+
+                       /* Load the streamout buffer offset from GDS. */
+                       tmp = LLVMBuildLoad(builder, offsets_vgpr, "");
+                       LLVMValueRef offset_dw =
+                               ac_build_readlane(&ctx->ac, tmp,
+                                               LLVMConstInt(ctx->ac.i32, buffer, false));
+
+                       /* Compute the remaining size to emit. */
+                       LLVMValueRef remaining_dw =
+                               LLVMBuildSub(builder, bufsize_dw, offset_dw, "");
+                       tmp = LLVMBuildUDiv(builder, remaining_dw,
+                                           prim_stride_dw[buffer], "");
+
+                       cond = LLVMBuildICmp(builder, LLVMIntULT,
+                                            bufsize_dw, offset_dw, "");
+                       max_emit[buffer] = LLVMBuildSelect(builder, cond,
+                                                          ctx->ac.i32_0, tmp, "");
+               }
+
+               /* Determine the number of emitted primitives per stream and fixup the
+                * GDS counter if necessary.
+                *
+                * This is complicated by the fact that a single stream can emit to
+                * multiple buffers (but luckily not vice versa).
+                */
+               LLVMValueRef emit_vgpr = ctx->ac.i32_0;
+
+               for (unsigned stream = 0; stream < 4; ++stream) {
+                       if (!ctx->shader_info->gs.num_stream_output_components[stream])
+                               continue;
+
+                       /* Load the number of generated primitives from GDS and
+                        * determine that number for the given stream.
+                        */
+                       tmp = LLVMBuildLoad(builder, generated_by_stream_vgpr, "");
+                       LLVMValueRef generated =
+                               ac_build_readlane(&ctx->ac, tmp,
+                                                 LLVMConstInt(ctx->ac.i32, stream, false));
+
+
+                       /* Compute the number of emitted primitives. */
+                       LLVMValueRef emit = generated;
+                       for (unsigned buffer = 0; buffer < 4; ++buffer) {
+                               if (stream_for_buffer[buffer] == stream)
+                                       emit = ac_build_umin(&ctx->ac, emit, max_emit[buffer]);
+                       }
+
+                       /* Store the number of emitted primitives for that
+                        * stream.
+                        */
+                       emit_vgpr = ac_build_writelane(&ctx->ac, emit_vgpr, emit,
+                                                      LLVMConstInt(ctx->ac.i32, stream, false));
+
+                       /* Fixup the offset using a plain GDS atomic if we overflowed. */
+                       cond = LLVMBuildICmp(builder, LLVMIntULT, emit, generated, "");
+                       ac_build_ifcc(&ctx->ac, cond, 5221); /* scalar branch */
+                       tmp = LLVMBuildLShr(builder,
+                                           LLVMConstInt(ctx->ac.i32, bufmask_for_stream[stream], false),
+                                           ac_get_thread_id(&ctx->ac), "");
+                       tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
+                       ac_build_ifcc(&ctx->ac, tmp, 5222);
+                       {
+                               tmp = LLVMBuildSub(builder, generated, emit, "");
+                               tmp = LLVMBuildMul(builder, tmp, prim_stride_dw_vgpr, "");
+                               tmp2 = LLVMBuildGEP(builder, gdsbase, &tid, 1, "");
+                               LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpSub, tmp2, tmp,
+                                                  LLVMAtomicOrderingMonotonic, false);
+                       }
+                       ac_build_endif(&ctx->ac, 5222);
+                       ac_build_endif(&ctx->ac, 5221);
+               }
+
+               /* Store the number of emitted primitives to LDS for later use. */
+               cond = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), i32_4, "");
+               ac_build_ifcc(&ctx->ac, cond, 5225);
+               {
+                       tmp = LLVMBuildAdd(builder, ac_get_thread_id(&ctx->ac),
+                                          scratch_emit_basev, "");
+                       tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, tmp);
+                       LLVMBuildStore(builder, emit_vgpr, tmp);
+               }
+               ac_build_endif(&ctx->ac, 5225);
+       }
+       ac_build_endif(&ctx->ac, 5200);
+
+       /* Determine the workgroup-relative per-thread / primitive offset into
+        * the streamout buffers */
+       struct ac_wg_scan primemit_scan[4] = {};
+
+       if (isgs) {
+               for (unsigned stream = 0; stream < 4; ++stream) {
+                       if (!ctx->shader_info->gs.num_stream_output_components[stream])
+                               continue;
+
+                       primemit_scan[stream].enable_exclusive = true;
+                       primemit_scan[stream].op = nir_op_iadd;
+                       primemit_scan[stream].src = nggso->prim_enable[stream];
+                       primemit_scan[stream].scratch =
+                               ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
+                                       LLVMConstInt(ctx->ac.i32, 12 + 8 * stream, false));
+                       primemit_scan[stream].waveidx = get_wave_id_in_tg(ctx);
+                       primemit_scan[stream].numwaves = get_tgsize(ctx);
+                       primemit_scan[stream].maxwaves = 8;
+                       ac_build_wg_scan_top(&ctx->ac, &primemit_scan[stream]);
+               }
+       }
+
+       ac_build_s_barrier(&ctx->ac);
+
+       /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
+       LLVMValueRef wgoffset_dw[4] = {};
+
+       {
+               LLVMValueRef scratch_vgpr;
+
+               tmp = ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch, ac_get_thread_id(&ctx->ac));
+               scratch_vgpr = LLVMBuildLoad(builder, tmp, "");
+
+               for (unsigned buffer = 0; buffer < 4; ++buffer) {
+                       if (stream_for_buffer[buffer] >= 0) {
+                               wgoffset_dw[buffer] = ac_build_readlane(
+                                       &ctx->ac, scratch_vgpr,
+                                       LLVMConstInt(ctx->ac.i32, scratch_offset_base + buffer, false));
+                       }
+               }
+
+               for (unsigned stream = 0; stream < 4; ++stream) {
+                       if (ctx->shader_info->gs.num_stream_output_components[stream]) {
+                               nggso->emit[stream] = ac_build_readlane(
+                                       &ctx->ac, scratch_vgpr,
+                                       LLVMConstInt(ctx->ac.i32, scratch_emit_base + stream, false));
+                       }
+               }
+       }
+
+       /* Write out primitive data */
+       for (unsigned stream = 0; stream < 4; ++stream) {
+               if (!ctx->shader_info->gs.num_stream_output_components[stream])
+                       continue;
+
+               if (isgs) {
+                       ac_build_wg_scan_bottom(&ctx->ac, &primemit_scan[stream]);
+               } else {
+                       primemit_scan[stream].result_exclusive = tid;
+               }
+
+               cond = LLVMBuildICmp(builder, LLVMIntULT,
+                                   primemit_scan[stream].result_exclusive,
+                                   nggso->emit[stream], "");
+               cond = LLVMBuildAnd(builder, cond, nggso->prim_enable[stream], "");
+               ac_build_ifcc(&ctx->ac, cond, 5240);
+               {
+                       LLVMValueRef offset_vtx =
+                               LLVMBuildMul(builder, primemit_scan[stream].result_exclusive,
+                                            nggso->num_vertices, "");
+
+                       for (unsigned i = 0; i < max_num_vertices; ++i) {
+                               cond = LLVMBuildICmp(builder, LLVMIntULT,
+                                                   LLVMConstInt(ctx->ac.i32, i, false),
+                                                   nggso->num_vertices, "");
+                               ac_build_ifcc(&ctx->ac, cond, 5241);
+                               build_streamout_vertex(ctx, so_buffer, wgoffset_dw,
+                                                      stream, offset_vtx, nggso->vertices[i]);
+                               ac_build_endif(&ctx->ac, 5241);
+                               offset_vtx = LLVMBuildAdd(builder, offset_vtx, ctx->ac.i32_1, "");
+                       }
+               }
+               ac_build_endif(&ctx->ac, 5240);
+       }
+}
+
+static unsigned ngg_nogs_vertex_size(struct radv_shader_context *ctx)
+{
+       unsigned lds_vertex_size = 0;
+
+       if (ctx->shader_info->so.num_outputs)
+               lds_vertex_size = 4 * ctx->shader_info->so.num_outputs + 1;
+
+       return lds_vertex_size;
+}
+
+/**
+ * Returns an `[N x i32] addrspace(LDS)*` pointing at contiguous LDS storage
+ * for the vertex outputs.
+ */
+static LLVMValueRef ngg_nogs_vertex_ptr(struct radv_shader_context *ctx,
+                                       LLVMValueRef vtxid)
+{
+       /* The extra dword is used to avoid LDS bank conflicts. */
+       unsigned vertex_size = ngg_nogs_vertex_size(ctx);
+       LLVMTypeRef ai32 = LLVMArrayType(ctx->ac.i32, vertex_size);
+       LLVMTypeRef pai32 = LLVMPointerType(ai32, AC_ADDR_SPACE_LDS);
+       LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, ctx->esgs_ring, pai32, "");
+       return LLVMBuildGEP(ctx->ac.builder, tmp, &vtxid, 1, "");
+}
+
+static void
+handle_ngg_outputs_post_1(struct radv_shader_context *ctx)
+{
+       struct radv_streamout_info *so = &ctx->shader_info->so;
+       LLVMBuilderRef builder = ctx->ac.builder;
+       LLVMValueRef vertex_ptr = NULL;
+       LLVMValueRef tmp, tmp2;
+
+       assert((ctx->stage == MESA_SHADER_VERTEX ||
+               ctx->stage == MESA_SHADER_TESS_EVAL) && !ctx->is_gs_copy_shader);
+
+       if (!ctx->shader_info->so.num_outputs)
+               return;
+
+       vertex_ptr = ngg_nogs_vertex_ptr(ctx, get_thread_id_in_tg(ctx));
+
+       for (unsigned i = 0; i < so->num_outputs; ++i) {
+               struct radv_stream_output *output =
+                       &ctx->shader_info->so.outputs[i];
+
+               unsigned loc = output->location;
+
+               for (unsigned comp = 0; comp < 4; comp++) {
+                       if (!(output->component_mask & (1 << comp)))
+                               continue;
+
+                       tmp = ac_build_gep0(&ctx->ac, vertex_ptr,
+                                           LLVMConstInt(ctx->ac.i32, 4 * i + comp, false));
+                       tmp2 = LLVMBuildLoad(builder,
+                                            ctx->abi.outputs[4 * loc + comp], "");
+                       tmp2 = ac_to_integer(&ctx->ac, tmp2);
+                       LLVMBuildStore(builder, tmp2, tmp);
+               }
+       }
+}
+
 static void
-handle_ngg_outputs_post(struct radv_shader_context *ctx)
+handle_ngg_outputs_post_2(struct radv_shader_context *ctx)
 {
        LLVMBuilderRef builder = ctx->ac.builder;
-       unsigned num_vertices = 3;
        LLVMValueRef tmp;
 
        assert((ctx->stage == MESA_SHADER_VERTEX ||
@@ -3197,14 +3697,50 @@ handle_ngg_outputs_post(struct radv_shader_context *ctx)
                ac_unpack_param(&ctx->ac, ctx->gs_vtx_offset[2], 0, 16),
        };
 
-       /* TODO: streamout */
+       /* Determine the number of vertices per primitive. */
+       unsigned num_vertices;
+       LLVMValueRef num_vertices_val;
+
+       if (ctx->stage == MESA_SHADER_VERTEX) {
+               LLVMValueRef outprim_val =
+                       LLVMConstInt(ctx->ac.i32,
+                                    ctx->options->key.vs.outprim, false);
+               num_vertices_val = LLVMBuildAdd(builder, outprim_val,
+                                               ctx->ac.i32_1, "");
+               num_vertices = 3; /* TODO: optimize for points & lines */
+       } else {
+               assert(ctx->stage == MESA_SHADER_TESS_EVAL);
+
+               if (ctx->shader->info.tess.point_mode)
+                       num_vertices = 1;
+               else if (ctx->shader->info.tess.primitive_mode == GL_ISOLINES)
+                       num_vertices = 2;
+               else
+                       num_vertices = 3;
+
+               num_vertices_val = LLVMConstInt(ctx->ac.i32, num_vertices, false);
+       }
+
+       /* Streamout */
+       if (ctx->shader_info->so.num_outputs) {
+               struct ngg_streamout nggso = {};
+
+               nggso.num_vertices = num_vertices_val;
+               nggso.prim_enable[0] = is_gs_thread;
+
+               for (unsigned i = 0; i < num_vertices; ++i)
+                       nggso.vertices[i] = ngg_nogs_vertex_ptr(ctx, vtxindex[i]);
+
+               build_streamout(ctx, &nggso);
+       }
 
        /* Copy Primitive IDs from GS threads to the LDS address corresponding
         * to the ES thread of the provoking vertex.
         */
        if (ctx->stage == MESA_SHADER_VERTEX &&
            ctx->options->key.vs_common_out.export_prim_id) {
-               /* TODO: streamout */
+               if (ctx->shader_info->so.num_outputs)
+                       ac_build_s_barrier(&ctx->ac);
 
                ac_build_ifcc(&ctx->ac, is_gs_thread, 5400);
                /* Extract the PROVOKING_VTX_INDEX field. */
@@ -3295,7 +3831,6 @@ handle_ngg_outputs_post(struct radv_shader_context *ctx)
                        radv_export_param(ctx, param_count, values, 0x1);
 
                        outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID] = param_count++;
-                       outinfo->export_prim_id = true;
                        outinfo->param_exports = param_count;
                }
        }
@@ -3347,7 +3882,7 @@ static void gfx10_ngg_gs_emit_epilogue_1(struct radv_shader_context *ctx)
                unsigned num_components;
 
                num_components =
-                       ctx->shader_info->info.gs.num_stream_output_components[stream];
+                       ctx->shader_info->gs.num_stream_output_components[stream];
                if (!num_components)
                        continue;
 
@@ -3358,7 +3893,7 @@ static void gfx10_ngg_gs_emit_epilogue_1(struct radv_shader_context *ctx)
                const LLVMValueRef vertexidx =
                        LLVMBuildLoad(builder, ctx->gs_next_vertex[stream], "");
                tmp = LLVMBuildICmp(builder, LLVMIntUGE, vertexidx,
-                       LLVMConstInt(ctx->ac.i32, ctx->gs_max_out_vertices, false), "");
+                       LLVMConstInt(ctx->ac.i32, ctx->shader->info.gs.vertices_out, false), "");
                ac_build_ifcc(&ctx->ac, tmp, 5101);
                ac_build_break(&ctx->ac);
                ac_build_endif(&ctx->ac, 5101);
@@ -3377,11 +3912,35 @@ static void gfx10_ngg_gs_emit_epilogue_1(struct radv_shader_context *ctx)
 
                ac_build_endloop(&ctx->ac, 5100);
        }
+
+       /* Accumulate generated primitives counts across the entire threadgroup. */
+       for (unsigned stream = 0; stream < 4; ++stream) {
+               unsigned num_components;
+
+               num_components =
+                       ctx->shader_info->gs.num_stream_output_components[stream];
+               if (!num_components)
+                       continue;
+
+               LLVMValueRef numprims =
+                       LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
+               numprims = ac_build_reduce(&ctx->ac, numprims, nir_op_iadd, ctx->ac.wave_size);
+
+               tmp = LLVMBuildICmp(builder, LLVMIntEQ, ac_get_thread_id(&ctx->ac), ctx->ac.i32_0, "");
+               ac_build_ifcc(&ctx->ac, tmp, 5105);
+               {
+                       LLVMBuildAtomicRMW(builder, LLVMAtomicRMWBinOpAdd,
+                                          ac_build_gep0(&ctx->ac, ctx->gs_ngg_scratch,
+                                                        LLVMConstInt(ctx->ac.i32, stream, false)),
+                                          numprims, LLVMAtomicOrderingMonotonic, false);
+               }
+               ac_build_endif(&ctx->ac, 5105);
+       }
 }
 
 static void gfx10_ngg_gs_emit_epilogue_2(struct radv_shader_context *ctx)
 {
-       const unsigned verts_per_prim = si_conv_gl_prim_to_vertices(ctx->gs_output_prim);
+       const unsigned verts_per_prim = si_conv_gl_prim_to_vertices(ctx->shader->info.gs.output_primitive);
        LLVMBuilderRef builder = ctx->ac.builder;
        LLVMValueRef tmp, tmp2;
 
@@ -3390,7 +3949,38 @@ static void gfx10_ngg_gs_emit_epilogue_2(struct radv_shader_context *ctx)
        const LLVMValueRef tid = get_thread_id_in_tg(ctx);
        LLVMValueRef num_emit_threads = ngg_get_prim_cnt(ctx);
 
-       /* TODO: streamout */
+       /* Streamout */
+       if (ctx->shader_info->so.num_outputs) {
+               struct ngg_streamout nggso = {};
+
+               nggso.num_vertices = LLVMConstInt(ctx->ac.i32, verts_per_prim, false);
+
+               LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tid);
+               for (unsigned stream = 0; stream < 4; ++stream) {
+                       if (!ctx->shader_info->gs.num_stream_output_components[stream])
+                               continue;
+
+                       LLVMValueRef gep_idx[3] = {
+                               ctx->ac.i32_0, /* implicit C-style array */
+                               ctx->ac.i32_1, /* second value of struct */
+                               LLVMConstInt(ctx->ac.i32, stream, false),
+                       };
+                       tmp = LLVMBuildGEP(builder, vertexptr, gep_idx, 3, "");
+                       tmp = LLVMBuildLoad(builder, tmp, "");
+                       tmp = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
+                       tmp2 = LLVMBuildICmp(builder, LLVMIntULT, tid, num_emit_threads, "");
+                       nggso.prim_enable[stream] = LLVMBuildAnd(builder, tmp, tmp2, "");
+               }
+
+               for (unsigned i = 0; i < verts_per_prim; ++i) {
+                       tmp = LLVMBuildSub(builder, tid,
+                                          LLVMConstInt(ctx->ac.i32, verts_per_prim - i - 1, false), "");
+                       tmp = ngg_gs_vertex_ptr(ctx, tmp);
+                       nggso.vertices[i] = ac_build_gep0(&ctx->ac, tmp, ctx->ac.i32_0);
+               }
+
+               build_streamout(ctx, &nggso);
+       }
 
        /* TODO: culling */
 
@@ -3541,31 +4131,24 @@ static void gfx10_ngg_gs_emit_epilogue_2(struct radv_shader_context *ctx)
                tmp = LLVMBuildZExt(builder, tmp, ctx->ac.i32, "");
                const LLVMValueRef vertexptr = ngg_gs_vertex_ptr(ctx, tmp);
 
-               if (ctx->output_mask & (1ull << VARYING_SLOT_PSIZ)) {
-                       outinfo->writes_pointsize = true;
-               }
-
-               if (ctx->output_mask & (1ull << VARYING_SLOT_LAYER)) {
-                       outinfo->writes_layer = true;
-               }
-
-               if (ctx->output_mask & (1ull << VARYING_SLOT_VIEWPORT)) {
-                       outinfo->writes_viewport_index = true;
-               }
-
                unsigned out_idx = 0;
                gep_idx[1] = ctx->ac.i32_0;
                for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
+                       unsigned output_usage_mask =
+                               ctx->shader_info->gs.output_usage_mask[i];
+                       int length = util_last_bit(output_usage_mask);
+
                        if (!(ctx->output_mask & (1ull << i)))
                                continue;
 
                        outputs[noutput].slot_name = i;
                        outputs[noutput].slot_index = i == VARYING_SLOT_CLIP_DIST1;
-
-                       outputs[noutput].usage_mask = ctx->shader_info->info.gs.output_usage_mask[i];
-                       int length = util_last_bit(outputs[noutput].usage_mask);
+                       outputs[noutput].usage_mask = output_usage_mask;
 
                        for (unsigned j = 0; j < length; j++, out_idx++) {
+                               if (!(output_usage_mask & (1 << j)))
+                                       continue;
+
                                gep_idx[2] = LLVMConstInt(ctx->ac.i32, out_idx, false);
                                tmp = LLVMBuildGEP(builder, vertexptr, gep_idx, 3, "");
                                tmp = LLVMBuildLoad(builder, tmp, "");
@@ -3587,8 +4170,6 @@ static void gfx10_ngg_gs_emit_epilogue_2(struct radv_shader_context *ctx)
 
                /* Export ViewIndex. */
                if (export_view_index) {
-                       outinfo->writes_layer = true;
-
                        outputs[noutput].slot_name = VARYING_SLOT_LAYER;
                        outputs[noutput].slot_index = 0;
                        outputs[noutput].usage_mask = 0x1;
@@ -3620,8 +4201,8 @@ static void gfx10_ngg_gs_emit_vertex(struct radv_shader_context *ctx,
         */
        const LLVMValueRef can_emit =
                LLVMBuildICmp(builder, LLVMIntULT, vertexidx,
-                             LLVMConstInt(ctx->ac.i32, ctx->gs_max_out_vertices, false), "");
-       ac_build_kill_if_false(&ctx->ac, can_emit);
+                             LLVMConstInt(ctx->ac.i32, ctx->shader->info.gs.vertices_out, false), "");
+       ac_build_ifcc(&ctx->ac, can_emit, 9001);
 
        tmp = LLVMBuildAdd(builder, vertexidx, ctx->ac.i32_1, "");
        tmp = LLVMBuildSelect(builder, can_emit, tmp, vertexidx, "");
@@ -3632,9 +4213,9 @@ static void gfx10_ngg_gs_emit_vertex(struct radv_shader_context *ctx,
        unsigned out_idx = 0;
        for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
                unsigned output_usage_mask =
-                       ctx->shader_info->info.gs.output_usage_mask[i];
+                       ctx->shader_info->gs.output_usage_mask[i];
                uint8_t output_stream =
-                       ctx->shader_info->info.gs.output_streams[i];
+                       ctx->shader_info->gs.output_streams[i];
                LLVMValueRef *out_ptr = &addrs[i * 4];
                int length = util_last_bit(output_usage_mask);
 
@@ -3661,12 +4242,12 @@ static void gfx10_ngg_gs_emit_vertex(struct radv_shader_context *ctx,
                        LLVMBuildStore(builder, out_val, ptr);
                }
        }
-       assert(out_idx * 4 <= ctx->gsvs_vertex_size);
+       assert(out_idx * 4 <= ctx->shader_info->gs.gsvs_vertex_size);
 
        /* Determine and store whether this vertex completed a primitive. */
        const LLVMValueRef curverts = LLVMBuildLoad(builder, ctx->gs_curprim_verts[stream], "");
 
-       tmp = LLVMConstInt(ctx->ac.i32, si_conv_gl_prim_to_vertices(ctx->gs_output_prim) - 1, false);
+       tmp = LLVMConstInt(ctx->ac.i32, si_conv_gl_prim_to_vertices(ctx->shader->info.gs.output_primitive) - 1, false);
        const LLVMValueRef iscompleteprim =
                LLVMBuildICmp(builder, LLVMIntUGE, curverts, tmp, "");
 
@@ -3687,6 +4268,8 @@ static void gfx10_ngg_gs_emit_vertex(struct radv_shader_context *ctx,
        tmp = LLVMBuildLoad(builder, ctx->gs_generated_prims[stream], "");
        tmp = LLVMBuildAdd(builder, tmp, LLVMBuildZExt(builder, iscompleteprim, ctx->ac.i32, ""), "");
        LLVMBuildStore(builder, tmp, ctx->gs_generated_prims[stream]);
+
+       ac_build_endif(&ctx->ac, 9001);
 }
 
 static void
@@ -3893,15 +4476,15 @@ handle_fs_outputs_post(struct radv_shader_context *ctx)
        }
 
        /* Process depth, stencil, samplemask. */
-       if (ctx->shader_info->info.ps.writes_z) {
+       if (ctx->shader_info->ps.writes_z) {
                depth = ac_to_float(&ctx->ac,
                                    radv_load_output(ctx, FRAG_RESULT_DEPTH, 0));
        }
-       if (ctx->shader_info->info.ps.writes_stencil) {
+       if (ctx->shader_info->ps.writes_stencil) {
                stencil = ac_to_float(&ctx->ac,
                                      radv_load_output(ctx, FRAG_RESULT_STENCIL, 0));
        }
-       if (ctx->shader_info->info.ps.writes_sample_mask) {
+       if (ctx->shader_info->ps.writes_sample_mask) {
                samplemask = ac_to_float(&ctx->ac,
                                         radv_load_output(ctx, FRAG_RESULT_SAMPLE_MASK, 0));
        }
@@ -3910,9 +4493,9 @@ handle_fs_outputs_post(struct radv_shader_context *ctx)
         * exported.
         */
        if (index > 0 &&
-           !ctx->shader_info->info.ps.writes_z &&
-           !ctx->shader_info->info.ps.writes_stencil &&
-           !ctx->shader_info->info.ps.writes_sample_mask) {
+           !ctx->shader_info->ps.writes_z &&
+           !ctx->shader_info->ps.writes_stencil &&
+           !ctx->shader_info->ps.writes_sample_mask) {
                unsigned last = index - 1;
 
                color_args[last].valid_mask = 1; /* whether the EXEC mask is valid */
@@ -3956,7 +4539,7 @@ handle_shader_outputs_post(struct ac_shader_abi *abi, unsigned max_outputs,
                else if (ctx->options->key.vs_common_out.as_es)
                        handle_es_outputs_post(ctx, &ctx->shader_info->vs.es_info);
                else if (ctx->options->key.vs_common_out.as_ngg)
-                       break; /* handled outside of the shader body */
+                       handle_ngg_outputs_post_1(ctx);
                else
                        handle_vs_outputs_post(ctx, ctx->options->key.vs_common_out.export_prim_id,
                                               ctx->options->key.vs_common_out.export_clip_dists,
@@ -3975,7 +4558,7 @@ handle_shader_outputs_post(struct ac_shader_abi *abi, unsigned max_outputs,
                if (ctx->options->key.vs_common_out.as_es)
                        handle_es_outputs_post(ctx, &ctx->shader_info->tes.es_info);
                else if (ctx->options->key.vs_common_out.as_ngg)
-                       break; /* handled outside of the shader body */
+                       handle_ngg_outputs_post_1(ctx);
                else
                        handle_vs_outputs_post(ctx, ctx->options->key.vs_common_out.export_prim_id,
                                               ctx->options->key.vs_common_out.export_clip_dists,
@@ -4075,12 +4658,12 @@ ac_setup_rings(struct radv_shader_context *ctx)
                        LLVMValueRef ring, tmp;
 
                        num_components =
-                               ctx->shader_info->info.gs.num_stream_output_components[stream];
+                               ctx->shader_info->gs.num_stream_output_components[stream];
 
                        if (!num_components)
                                continue;
 
-                       stride = 4 * num_components * ctx->gs_max_out_vertices;
+                       stride = 4 * num_components * ctx->shader->info.gs.vertices_out;
 
                        /* Limit on the stride field for <= GFX7. */
                        assert(stride < (1 << 14));
@@ -4178,7 +4761,7 @@ static
 LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
                                        struct nir_shader *const *shaders,
                                        int shader_count,
-                                       struct radv_shader_variant_info *shader_info,
+                                       struct radv_shader_info *shader_info,
                                        const struct radv_nir_compiler_options *options)
 {
        struct radv_shader_context ctx = {0};
@@ -4186,20 +4769,18 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
        ctx.options = options;
        ctx.shader_info = shader_info;
 
-       enum ac_float_mode float_mode =
-               options->unsafe_math ? AC_FLOAT_MODE_UNSAFE_FP_MATH :
-                                      AC_FLOAT_MODE_DEFAULT;
+       enum ac_float_mode float_mode = AC_FLOAT_MODE_DEFAULT;
+
+       if (shader_info->float_controls_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32) {
+               float_mode = AC_FLOAT_MODE_DENORM_FLUSH_TO_ZERO;
+       } else if (options->unsafe_math) {
+               float_mode = AC_FLOAT_MODE_UNSAFE_FP_MATH;
+       }
 
        ac_llvm_context_init(&ctx.ac, ac_llvm, options->chip_class,
-                            options->family, float_mode, options->wave_size,
-                            options->wave_size);
+                            options->family, float_mode, options->wave_size, 64);
        ctx.context = ctx.ac.context;
 
-       radv_nir_shader_info_init(&shader_info->info);
-
-       for(int i = 0; i < shader_count; ++i)
-               radv_nir_shader_info_pass(shaders[i], options, &shader_info->info);
-
        for (i = 0; i < MAX_SETS; i++)
                shader_info->user_sgprs_locs.descriptor_sets[i].sgpr_idx = -1;
        for (i = 0; i < AC_UD_MAX_UD; i++)
@@ -4241,8 +4822,29 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
            shaders[shader_count - 1]->info.stage == MESA_SHADER_TESS_CTRL)
                ac_nir_fixup_ls_hs_input_vgprs(&ctx);
 
+       if (is_ngg) {
+               /* Declare scratch space base for streamout and vertex
+                * compaction. Whether space is actually allocated is
+                * determined during linking / PM4 creation.
+                *
+                * Add an extra dword per vertex to ensure an odd stride, which
+                * avoids bank conflicts for SoA accesses.
+                */
+               declare_esgs_ring(&ctx);
+
+               /* This is really only needed when streamout and / or vertex
+                * compaction is enabled.
+                */
+               LLVMTypeRef asi32 = LLVMArrayType(ctx.ac.i32, 8);
+               ctx.gs_ngg_scratch = LLVMAddGlobalInAddressSpace(ctx.ac.module,
+                       asi32, "ngg_scratch", AC_ADDR_SPACE_LDS);
+               LLVMSetInitializer(ctx.gs_ngg_scratch, LLVMGetUndef(asi32));
+               LLVMSetAlignment(ctx.gs_ngg_scratch, 4);
+       }
+
        for(int i = 0; i < shader_count; ++i) {
                ctx.stage = shaders[i]->info.stage;
+               ctx.shader = shaders[i];
                ctx.output_mask = 0;
 
                if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
@@ -4258,42 +4860,38 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
                                                ac_build_alloca(&ctx.ac, ctx.ac.i32, "");
                                }
 
-                               /* TODO: streamout */
+                               unsigned scratch_size = 8;
+                               if (ctx.shader_info->so.num_outputs)
+                                       scratch_size = 44;
 
-                               LLVMTypeRef ai32 = LLVMArrayType(ctx.ac.i32, 8);
+                               LLVMTypeRef ai32 = LLVMArrayType(ctx.ac.i32, scratch_size);
                                ctx.gs_ngg_scratch =
                                        LLVMAddGlobalInAddressSpace(ctx.ac.module,
                                                                    ai32, "ngg_scratch", AC_ADDR_SPACE_LDS);
                                LLVMSetInitializer(ctx.gs_ngg_scratch, LLVMGetUndef(ai32));
                                LLVMSetAlignment(ctx.gs_ngg_scratch, 4);
 
-                               ctx.gs_ngg_emit = LLVMBuildIntToPtr(ctx.ac.builder, ctx.ac.i32_0,
-                                       LLVMPointerType(LLVMArrayType(ctx.ac.i32, 0), AC_ADDR_SPACE_LDS),
-                                       "ngg_emit");
+                               ctx.gs_ngg_emit = LLVMAddGlobalInAddressSpace(ctx.ac.module,
+                                       LLVMArrayType(ctx.ac.i32, 0), "ngg_emit", AC_ADDR_SPACE_LDS);
+                               LLVMSetLinkage(ctx.gs_ngg_emit, LLVMExternalLinkage);
+                               LLVMSetAlignment(ctx.gs_ngg_emit, 4);
                        }
 
-                       ctx.gs_max_out_vertices = shaders[i]->info.gs.vertices_out;
-                       ctx.gs_output_prim = shaders[i]->info.gs.output_primitive;
                        ctx.abi.load_inputs = load_gs_input;
                        ctx.abi.emit_primitive = visit_end_primitive;
                } else if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
-                       ctx.tcs_outputs_read = shaders[i]->info.outputs_read;
-                       ctx.tcs_patch_outputs_read = shaders[i]->info.patch_outputs_read;
                        ctx.abi.load_tess_varyings = load_tcs_varyings;
                        ctx.abi.load_patch_vertices_in = load_patch_vertices_in;
                        ctx.abi.store_tcs_outputs = store_tcs_output;
-                       ctx.tcs_vertices_per_patch = shaders[i]->info.tess.tcs_vertices_out;
                        if (shader_count == 1)
                                ctx.tcs_num_inputs = ctx.options->key.tcs.num_inputs;
                        else
-                               ctx.tcs_num_inputs = util_last_bit64(shader_info->info.vs.ls_outputs_written);
+                               ctx.tcs_num_inputs = util_last_bit64(shader_info->vs.ls_outputs_written);
                        ctx.tcs_num_patches = get_tcs_num_patches(&ctx);
                } else if (shaders[i]->info.stage == MESA_SHADER_TESS_EVAL) {
-                       ctx.tes_primitive_mode = shaders[i]->info.tess.primitive_mode;
                        ctx.abi.load_tess_varyings = load_tes_input;
                        ctx.abi.load_tess_coord = load_tess_coord;
                        ctx.abi.load_patch_vertices_in = load_patch_vertices_in;
-                       ctx.tcs_vertices_per_patch = shaders[i]->info.tess.tcs_vertices_out;
                        ctx.tcs_num_patches = ctx.options->key.tes.num_patches;
                } else if (shaders[i]->info.stage == MESA_SHADER_VERTEX) {
                        ctx.abi.load_base_vertex = radv_load_base_vertex;
@@ -4345,14 +4943,6 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
                nir_foreach_variable(variable, &shaders[i]->outputs)
                        scan_shader_output_decl(&ctx, variable, shaders[i], shaders[i]->info.stage);
 
-               if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
-                       unsigned addclip = shaders[i]->info.clip_distance_array_size +
-                                       shaders[i]->info.cull_distance_array_size > 4;
-                       ctx.gsvs_vertex_size = (util_bitcount64(ctx.output_mask) + addclip) * 16;
-                       ctx.max_gsvs_emit_size = ctx.gsvs_vertex_size *
-                               shaders[i]->info.gs.vertices_out;
-               }
-
                ac_setup_rings(&ctx);
 
                LLVMBasicBlockRef merge_block;
@@ -4389,16 +4979,13 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
                if (is_pre_gs_stage(shaders[i]->info.stage) &&
                    ctx.options->key.vs_common_out.as_ngg &&
                    i == shader_count - 1) {
-                       handle_ngg_outputs_post(&ctx);
+                       handle_ngg_outputs_post_2(&ctx);
                } else if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY &&
                           ctx.options->key.vs_common_out.as_ngg) {
                        gfx10_ngg_gs_emit_epilogue_2(&ctx);
                }
 
-               if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
-                       shader_info->gs.gsvs_vertex_size = ctx.gsvs_vertex_size;
-                       shader_info->gs.max_gsvs_emit_size = ctx.max_gsvs_emit_size;
-               } else if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
+               if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
                        shader_info->tcs.num_patches = ctx.tcs_num_patches;
                        shader_info->tcs.lds_size = calculate_tess_lds_size(&ctx);
                }
@@ -4478,7 +5065,7 @@ static void ac_compile_llvm_module(struct ac_llvm_compiler *ac_llvm,
                fprintf(stderr, "\n");
        }
 
-       if (options->record_llvm_ir) {
+       if (options->record_ir) {
                char *llvm_ir = LLVMPrintModuleToString(llvm_module);
                llvm_ir_string = strdup(llvm_ir);
                LLVMDisposeMessage(llvm_ir);
@@ -4511,52 +5098,10 @@ static void ac_compile_llvm_module(struct ac_llvm_compiler *ac_llvm,
        free(elf_buffer);
 }
 
-static void
-ac_fill_shader_info(struct radv_shader_variant_info *shader_info, struct nir_shader *nir, const struct radv_nir_compiler_options *options)
-{
-        switch (nir->info.stage) {
-        case MESA_SHADER_COMPUTE:
-                for (int i = 0; i < 3; ++i)
-                        shader_info->cs.block_size[i] = nir->info.cs.local_size[i];
-                break;
-        case MESA_SHADER_FRAGMENT:
-               shader_info->fs.can_discard = nir->info.fs.uses_discard;
-                shader_info->fs.early_fragment_test = nir->info.fs.early_fragment_tests;
-                shader_info->fs.post_depth_coverage = nir->info.fs.post_depth_coverage;
-                break;
-        case MESA_SHADER_GEOMETRY:
-                shader_info->gs.vertices_in = nir->info.gs.vertices_in;
-                shader_info->gs.vertices_out = nir->info.gs.vertices_out;
-                shader_info->gs.output_prim = nir->info.gs.output_primitive;
-                shader_info->gs.invocations = nir->info.gs.invocations;
-                break;
-        case MESA_SHADER_TESS_EVAL:
-                shader_info->tes.primitive_mode = nir->info.tess.primitive_mode;
-                shader_info->tes.spacing = nir->info.tess.spacing;
-                shader_info->tes.ccw = nir->info.tess.ccw;
-                shader_info->tes.point_mode = nir->info.tess.point_mode;
-                shader_info->tes.as_es = options->key.vs_common_out.as_es;
-                shader_info->tes.export_prim_id = options->key.vs_common_out.export_prim_id;
-                shader_info->is_ngg = options->key.vs_common_out.as_ngg;
-                break;
-        case MESA_SHADER_TESS_CTRL:
-                shader_info->tcs.tcs_vertices_out = nir->info.tess.tcs_vertices_out;
-                break;
-        case MESA_SHADER_VERTEX:
-                shader_info->vs.as_es = options->key.vs_common_out.as_es;
-                shader_info->vs.as_ls = options->key.vs_common_out.as_ls;
-                shader_info->vs.export_prim_id = options->key.vs_common_out.export_prim_id;
-                shader_info->is_ngg = options->key.vs_common_out.as_ngg;
-                break;
-        default:
-                break;
-        }
-}
-
 void
 radv_compile_nir_shader(struct ac_llvm_compiler *ac_llvm,
                        struct radv_shader_binary **rbinary,
-                       struct radv_shader_variant_info *shader_info,
+                       struct radv_shader_info *shader_info,
                        struct nir_shader *const *nir,
                        int nir_count,
                        const struct radv_nir_compiler_options *options)
@@ -4573,9 +5118,6 @@ radv_compile_nir_shader(struct ac_llvm_compiler *ac_llvm,
                                                    nir[nir_count - 1]->info.stage),
                               options);
 
-       for (int i = 0; i < nir_count; ++i)
-               ac_fill_shader_info(shader_info, nir[i], options);
-
        /* Determine the ES type (VS or TES) for the GS on GFX9. */
        if (options->chip_class >= GFX9) {
                if (nir_count == 2 &&
@@ -4583,7 +5125,7 @@ radv_compile_nir_shader(struct ac_llvm_compiler *ac_llvm,
                        shader_info->gs.es_type = nir[0]->info.stage;
                }
        }
-       shader_info->info.wave_size = options->wave_size;
+       shader_info->wave_size = options->wave_size;
 }
 
 static void
@@ -4595,7 +5137,8 @@ ac_gs_copy_shader_emit(struct radv_shader_context *ctx)
        LLVMValueRef stream_id;
 
        /* Fetch the vertex stream ID. */
-       if (ctx->shader_info->info.so.num_outputs) {
+       if (!ctx->options->use_ngg_streamout &&
+           ctx->shader_info->so.num_outputs) {
                stream_id =
                        ac_unpack_param(&ctx->ac, ctx->streamout_config, 24, 2);
        } else {
@@ -4611,14 +5154,14 @@ ac_gs_copy_shader_emit(struct radv_shader_context *ctx)
 
        for (unsigned stream = 0; stream < 4; stream++) {
                unsigned num_components =
-                       ctx->shader_info->info.gs.num_stream_output_components[stream];
+                       ctx->shader_info->gs.num_stream_output_components[stream];
                LLVMBasicBlockRef bb;
                unsigned offset;
 
-               if (!num_components)
+               if (stream > 0 && !num_components)
                        continue;
 
-               if (stream > 0 && !ctx->shader_info->info.so.num_outputs)
+               if (stream > 0 && !ctx->shader_info->so.num_outputs)
                        continue;
 
                bb = LLVMInsertBasicBlockInContext(ctx->ac.context, end_bb, "out");
@@ -4628,9 +5171,9 @@ ac_gs_copy_shader_emit(struct radv_shader_context *ctx)
                offset = 0;
                for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
                        unsigned output_usage_mask =
-                               ctx->shader_info->info.gs.output_usage_mask[i];
+                               ctx->shader_info->gs.output_usage_mask[i];
                        unsigned output_stream =
-                               ctx->shader_info->info.gs.output_streams[i];
+                               ctx->shader_info->gs.output_streams[i];
                        int length = util_last_bit(output_usage_mask);
 
                        if (!(ctx->output_mask & (1ull << i)) ||
@@ -4645,7 +5188,7 @@ ac_gs_copy_shader_emit(struct radv_shader_context *ctx)
 
                                soffset = LLVMConstInt(ctx->ac.i32,
                                                       offset *
-                                                      ctx->gs_max_out_vertices * 16 * 4, false);
+                                                      ctx->shader->info.gs.vertices_out * 16 * 4, false);
 
                                offset++;
 
@@ -4666,7 +5209,8 @@ ac_gs_copy_shader_emit(struct radv_shader_context *ctx)
                        }
                }
 
-               if (ctx->shader_info->info.so.num_outputs)
+               if (!ctx->options->use_ngg_streamout &&
+                   ctx->shader_info->so.num_outputs)
                        radv_emit_streamout(ctx, stream);
 
                if (stream == 0) {
@@ -4684,7 +5228,7 @@ void
 radv_compile_gs_copy_shader(struct ac_llvm_compiler *ac_llvm,
                            struct nir_shader *geom_shader,
                            struct radv_shader_binary **rbinary,
-                           struct radv_shader_variant_info *shader_info,
+                           struct radv_shader_info *shader_info,
                            const struct radv_nir_compiler_options *options)
 {
        struct radv_shader_context ctx = {0};
@@ -4701,12 +5245,10 @@ radv_compile_gs_copy_shader(struct ac_llvm_compiler *ac_llvm,
 
        ctx.is_gs_copy_shader = true;
        ctx.stage = MESA_SHADER_VERTEX;
-
-       radv_nir_shader_info_pass(geom_shader, options, &shader_info->info);
+       ctx.shader = geom_shader;
 
        create_function(&ctx, MESA_SHADER_VERTEX, false, MESA_SHADER_VERTEX);
 
-       ctx.gs_max_out_vertices = geom_shader->info.gs.vertices_out;
        ac_setup_rings(&ctx);
 
        nir_foreach_variable(variable, &geom_shader->outputs) {