radv: Lower input attachments in NIR.
[mesa.git] / src / amd / vulkan / radv_nir_to_llvm.c
index 116419557be168d8bb86a209602e437bc70f59b5..323c7c5aecacf7dc836574182e21c635c0c3bbb2 100644 (file)
 #include <llvm-c/Core.h>
 #include <llvm-c/TargetMachine.h>
 #include <llvm-c/Transforms/Scalar.h>
-#if HAVE_LLVM >= 0x0700
 #include <llvm-c/Transforms/Utils.h>
-#endif
 
 #include "sid.h"
-#include "gfx9d.h"
 #include "ac_binary.h"
 #include "ac_llvm_util.h"
 #include "ac_llvm_build.h"
@@ -73,27 +70,41 @@ struct radv_shader_context {
        LLVMValueRef tes_u;
        LLVMValueRef tes_v;
 
+       /* HW GS */
+       /* On gfx10:
+        *  - bits 0..10: ordered_wave_id
+        *  - bits 12..20: number of vertices in group
+        *  - bits 22..30: number of primitives in group
+        */
+       LLVMValueRef gs_tg_info;
        LLVMValueRef gs2vs_offset;
        LLVMValueRef gs_wave_id;
        LLVMValueRef gs_vtx_offset[6];
 
        LLVMValueRef esgs_ring;
-       LLVMValueRef gsvs_ring;
+       LLVMValueRef gsvs_ring[4];
        LLVMValueRef hs_ring_tess_offchip;
        LLVMValueRef hs_ring_tess_factor;
 
        LLVMValueRef persp_sample, persp_center, persp_centroid;
        LLVMValueRef linear_sample, linear_center, linear_centroid;
 
+       /* Streamout */
+       LLVMValueRef streamout_buffers;
+       LLVMValueRef streamout_write_idx;
+       LLVMValueRef streamout_config;
+       LLVMValueRef streamout_offset[4];
+
        gl_shader_stage stage;
 
        LLVMValueRef inputs[RADEON_LLVM_MAX_INPUTS * 4];
+       uint64_t float16_shaded_mask;
 
        uint64_t input_mask;
        uint64_t output_mask;
 
        bool is_gs_copy_shader;
-       LLVMValueRef gs_next_vertex;
+       LLVMValueRef gs_next_vertex[4];
        unsigned gs_max_out_vertices;
 
        unsigned tes_primitive_mode;
@@ -250,7 +261,16 @@ get_tcs_num_patches(struct radv_shader_context *ctx)
        /* Make sure that the data fits in LDS. This assumes the shaders only
         * use LDS for the inputs and outputs.
         */
-       hardware_lds_size = ctx->options->chip_class >= CIK ? 65536 : 32768;
+       hardware_lds_size = 32768;
+
+       /* Looks like STONEY hangs if we use more than 32 KiB LDS in a single
+        * threadgroup, even though there is more than 32 KiB LDS.
+        *
+        * Test: dEQP-VK.tessellation.shader_input_output.barrier
+        */
+       if (ctx->options->chip_class >= GFX7 && ctx->options->family != CHIP_STONEY)
+               hardware_lds_size = 65536;
+
        num_patches = MIN2(num_patches, hardware_lds_size / (input_patch_size + output_patch_size));
        /* Make sure the output data fits in the offchip buffer */
        num_patches = MIN2(num_patches, (ctx->options->tess_offchip_block_dw_size * 4) / output_patch_size);
@@ -259,8 +279,8 @@ get_tcs_num_patches(struct radv_shader_context *ctx)
         */
        num_patches = MIN2(num_patches, 40);
 
-       /* SI bug workaround - limit LS-HS threadgroups to only one wave. */
-       if (ctx->options->chip_class == SI) {
+       /* GFX6 bug workaround - limit LS-HS threadgroups to only one wave. */
+       if (ctx->options->chip_class == GFX6) {
                unsigned one_wave = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp);
                num_patches = MIN2(num_patches, one_wave);
        }
@@ -416,11 +436,10 @@ get_tcs_out_current_patch_data_offset(struct radv_shader_context *ctx)
                             patch0_patch_data_offset);
 }
 
-#define MAX_ARGS 23
+#define MAX_ARGS 64
 struct arg_info {
        LLVMTypeRef types[MAX_ARGS];
        LLVMValueRef *assign[MAX_ARGS];
-       unsigned array_params_mask;
        uint8_t count;
        uint8_t sgpr_count;
        uint8_t num_sgprs_used;
@@ -451,13 +470,6 @@ add_arg(struct arg_info *info, enum ac_arg_regfile regfile, LLVMTypeRef type,
        }
 }
 
-static inline void
-add_array_arg(struct arg_info *info, LLVMTypeRef type, LLVMValueRef *param_ptr)
-{
-       info->array_params_mask |= (1 << info->count);
-       add_arg(info, ARG_SGPR, type, param_ptr);
-}
-
 static void assign_arguments(LLVMValueRef main_function,
                             struct arg_info *info)
 {
@@ -496,10 +508,11 @@ create_llvm_function(LLVMContextRef ctx, LLVMModuleRef module,
 
        LLVMSetFunctionCallConv(main_function, RADEON_LLVM_AMDGPU_CS);
        for (unsigned i = 0; i < args->sgpr_count; ++i) {
+               LLVMValueRef P = LLVMGetParam(main_function, i);
+
                ac_add_function_attr(ctx, main_function, i + 1, AC_FUNC_ATTR_INREG);
 
-               if (args->array_params_mask & (1 << i)) {
-                       LLVMValueRef P = LLVMGetParam(main_function, i);
+               if (LLVMGetTypeKind(LLVMTypeOf(P)) == LLVMPointerTypeKind) {
                        ac_add_function_attr(ctx, main_function, i + 1, AC_FUNC_ATTR_NOALIAS);
                        ac_add_attr_dereferenceable(P, UINT64_MAX);
                }
@@ -511,11 +524,8 @@ create_llvm_function(LLVMContextRef ctx, LLVMModuleRef module,
                                                     options->address32_hi);
        }
 
-       if (max_workgroup_size) {
-               ac_llvm_add_target_dep_function_attr(main_function,
-                                                    "amdgpu-max-work-group-size",
-                                                    max_workgroup_size);
-       }
+       ac_llvm_set_workgroup_size(main_function, max_workgroup_size);
+
        if (options->unsafe_math) {
                /* These were copied from some LLVM test. */
                LLVMAddTargetDependentFunctionAttr(main_function,
@@ -539,13 +549,11 @@ create_llvm_function(LLVMContextRef ctx, LLVMModuleRef module,
 
 
 static void
-set_loc(struct radv_userdata_info *ud_info, uint8_t *sgpr_idx, uint8_t num_sgprs,
-       uint32_t indirect_offset)
+set_loc(struct radv_userdata_info *ud_info, uint8_t *sgpr_idx,
+       uint8_t num_sgprs)
 {
        ud_info->sgpr_idx = *sgpr_idx;
        ud_info->num_sgprs = num_sgprs;
-       ud_info->indirect = indirect_offset > 0;
-       ud_info->indirect_offset = indirect_offset;
        *sgpr_idx += num_sgprs;
 }
 
@@ -557,35 +565,34 @@ set_loc_shader(struct radv_shader_context *ctx, int idx, uint8_t *sgpr_idx,
                &ctx->shader_info->user_sgprs_locs.shader_data[idx];
        assert(ud_info);
 
-       set_loc(ud_info, sgpr_idx, num_sgprs, 0);
+       set_loc(ud_info, sgpr_idx, num_sgprs);
 }
 
 static void
 set_loc_shader_ptr(struct radv_shader_context *ctx, int idx, uint8_t *sgpr_idx)
 {
-       bool use_32bit_pointers = HAVE_32BIT_POINTERS &&
-                                 idx != AC_UD_SCRATCH_RING_OFFSETS;
+       bool use_32bit_pointers = idx != AC_UD_SCRATCH_RING_OFFSETS;
 
        set_loc_shader(ctx, idx, sgpr_idx, use_32bit_pointers ? 1 : 2);
 }
 
 static void
-set_loc_desc(struct radv_shader_context *ctx, int idx,  uint8_t *sgpr_idx,
-            uint32_t indirect_offset)
+set_loc_desc(struct radv_shader_context *ctx, int idx, uint8_t *sgpr_idx)
 {
        struct radv_userdata_locations *locs =
                &ctx->shader_info->user_sgprs_locs;
        struct radv_userdata_info *ud_info = &locs->descriptor_sets[idx];
        assert(ud_info);
 
-       set_loc(ud_info, sgpr_idx, HAVE_32BIT_POINTERS ? 1 : 2, indirect_offset);
-       if (indirect_offset == 0)
-               locs->descriptor_sets_enabled |= 1 << idx;
+       set_loc(ud_info, sgpr_idx, 1);
+
+       locs->descriptor_sets_enabled |= 1 << idx;
 }
 
 struct user_sgpr_info {
        bool need_ring_offsets;
        bool indirect_all_descriptor_sets;
+       uint8_t remaining_sgprs;
 };
 
 static bool needs_view_index_sgpr(struct radv_shader_context *ctx,
@@ -594,11 +601,11 @@ static bool needs_view_index_sgpr(struct radv_shader_context *ctx,
        switch (stage) {
        case MESA_SHADER_VERTEX:
                if (ctx->shader_info->info.needs_multiview_view_index ||
-                   (!ctx->options->key.vs.as_es && !ctx->options->key.vs.as_ls && ctx->options->key.has_multiview_view_index))
+                   (!ctx->options->key.vs.out.as_es && !ctx->options->key.vs.out.as_ls && ctx->options->key.has_multiview_view_index))
                        return true;
                break;
        case MESA_SHADER_TESS_EVAL:
-               if (ctx->shader_info->info.needs_multiview_view_index || (!ctx->options->key.tes.as_es && ctx->options->key.has_multiview_view_index))
+               if (ctx->shader_info->info.needs_multiview_view_index || (!ctx->options->key.tes.out.as_es && ctx->options->key.has_multiview_view_index))
                        return true;
                break;
        case MESA_SHADER_GEOMETRY:
@@ -618,12 +625,56 @@ count_vs_user_sgprs(struct radv_shader_context *ctx)
        uint8_t count = 0;
 
        if (ctx->shader_info->info.vs.has_vertex_buffers)
-               count += HAVE_32BIT_POINTERS ? 1 : 2;
+               count++;
        count += ctx->shader_info->info.vs.needs_draw_id ? 3 : 2;
 
        return count;
 }
 
+static void allocate_inline_push_consts(struct radv_shader_context *ctx,
+                                       struct user_sgpr_info *user_sgpr_info)
+{
+       uint8_t remaining_sgprs = user_sgpr_info->remaining_sgprs;
+
+       /* Only supported if shaders use push constants. */
+       if (ctx->shader_info->info.min_push_constant_used == UINT8_MAX)
+               return;
+
+       /* Only supported if shaders don't have indirect push constants. */
+       if (ctx->shader_info->info.has_indirect_push_constants)
+               return;
+
+       /* Only supported for 32-bit push constants. */
+       if (!ctx->shader_info->info.has_only_32bit_push_constants)
+               return;
+
+       uint8_t num_push_consts =
+               (ctx->shader_info->info.max_push_constant_used -
+                ctx->shader_info->info.min_push_constant_used) / 4;
+
+       /* Check if the number of user SGPRs is large enough. */
+       if (num_push_consts < remaining_sgprs) {
+               ctx->shader_info->info.num_inline_push_consts = num_push_consts;
+       } else {
+               ctx->shader_info->info.num_inline_push_consts = remaining_sgprs;
+       }
+
+       /* Clamp to the maximum number of allowed inlined push constants. */
+       if (ctx->shader_info->info.num_inline_push_consts > AC_MAX_INLINE_PUSH_CONSTS)
+               ctx->shader_info->info.num_inline_push_consts = AC_MAX_INLINE_PUSH_CONSTS;
+
+       if (ctx->shader_info->info.num_inline_push_consts == num_push_consts &&
+           !ctx->shader_info->info.loads_dynamic_offsets) {
+               /* Disable the default push constants path if all constants are
+                * inlined and if shaders don't use dynamic descriptors.
+                */
+               ctx->shader_info->info.loads_push_constants = false;
+       }
+
+       ctx->shader_info->info.base_inline_push_consts =
+               ctx->shader_info->info.min_push_constant_used / 4;
+}
+
 static void allocate_user_sgprs(struct radv_shader_context *ctx,
                                gl_shader_stage stage,
                                bool has_previous_stage,
@@ -687,51 +738,64 @@ static void allocate_user_sgprs(struct radv_shader_context *ctx,
                user_sgpr_count++;
 
        if (ctx->shader_info->info.loads_push_constants)
-               user_sgpr_count += HAVE_32BIT_POINTERS ? 1 : 2;
+               user_sgpr_count++;
+
+       if (ctx->streamout_buffers)
+               user_sgpr_count++;
 
-       uint32_t available_sgprs = ctx->options->chip_class >= GFX9 ? 32 : 16;
+       uint32_t available_sgprs = ctx->options->chip_class >= GFX9 && stage != MESA_SHADER_COMPUTE ? 32 : 16;
        uint32_t remaining_sgprs = available_sgprs - user_sgpr_count;
        uint32_t num_desc_set =
                util_bitcount(ctx->shader_info->info.desc_set_used_mask);
 
-       if (remaining_sgprs / (HAVE_32BIT_POINTERS ? 1 : 2) < num_desc_set) {
+       if (remaining_sgprs < num_desc_set) {
                user_sgpr_info->indirect_all_descriptor_sets = true;
+               user_sgpr_info->remaining_sgprs = remaining_sgprs - 1;
+       } else {
+               user_sgpr_info->remaining_sgprs = remaining_sgprs - num_desc_set;
        }
+
+       allocate_inline_push_consts(ctx, user_sgpr_info);
 }
 
 static void
 declare_global_input_sgprs(struct radv_shader_context *ctx,
-                          gl_shader_stage stage,
-                          bool has_previous_stage,
-                          gl_shader_stage previous_stage,
                           const struct user_sgpr_info *user_sgpr_info,
                           struct arg_info *args,
                           LLVMValueRef *desc_sets)
 {
        LLVMTypeRef type = ac_array_in_const32_addr_space(ctx->ac.i8);
-       unsigned num_sets = ctx->options->layout ?
-                           ctx->options->layout->num_sets : 0;
-       unsigned stage_mask = 1 << stage;
-
-       if (has_previous_stage)
-               stage_mask |= 1 << previous_stage;
 
        /* 1 for each descriptor set */
        if (!user_sgpr_info->indirect_all_descriptor_sets) {
-               for (unsigned i = 0; i < num_sets; ++i) {
-                       if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
-                           ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
-                               add_array_arg(args, type,
-                                             &ctx->descriptor_sets[i]);
-                       }
+               uint32_t mask = ctx->shader_info->info.desc_set_used_mask;
+
+               while (mask) {
+                       int i = u_bit_scan(&mask);
+
+                       add_arg(args, ARG_SGPR, type, &ctx->descriptor_sets[i]);
                }
        } else {
-               add_array_arg(args, ac_array_in_const32_addr_space(type), desc_sets);
+               add_arg(args, ARG_SGPR, ac_array_in_const32_addr_space(type),
+                       desc_sets);
        }
 
        if (ctx->shader_info->info.loads_push_constants) {
                /* 1 for push constants and dynamic descriptors */
-               add_array_arg(args, type, &ctx->abi.push_constants);
+               add_arg(args, ARG_SGPR, type, &ctx->abi.push_constants);
+       }
+
+       for (unsigned i = 0; i < ctx->shader_info->info.num_inline_push_consts; i++) {
+               add_arg(args, ARG_SGPR, ctx->ac.i32,
+                       &ctx->abi.inline_push_consts[i]);
+       }
+       ctx->abi.num_inline_push_consts = ctx->shader_info->info.num_inline_push_consts;
+       ctx->abi.base_inline_push_consts = ctx->shader_info->info.base_inline_push_consts;
+
+       if (ctx->shader_info->info.so.num_outputs) {
+               add_arg(args, ARG_SGPR,
+                       ac_array_in_const32_addr_space(ctx->ac.v4i32),
+                       &ctx->streamout_buffers);
        }
 }
 
@@ -763,14 +827,54 @@ declare_vs_input_vgprs(struct radv_shader_context *ctx, struct arg_info *args)
 {
        add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.vertex_id);
        if (!ctx->is_gs_copy_shader) {
-               if (ctx->options->key.vs.as_ls) {
+               if (ctx->options->key.vs.out.as_ls) {
                        add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->rel_auto_id);
                        add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.instance_id);
+                       add_arg(args, ARG_VGPR, ctx->ac.i32, NULL); /* unused */
                } else {
-                       add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.instance_id);
-                       add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->vs_prim_id);
+                       if (ctx->ac.chip_class >= GFX10) {
+                               add_arg(args, ARG_VGPR, ctx->ac.i32, NULL); /* user vgpr */
+                               add_arg(args, ARG_VGPR, ctx->ac.i32, NULL); /* user vgpr */
+                               add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.instance_id);
+                       } else {
+                               add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->abi.instance_id);
+                               add_arg(args, ARG_VGPR, ctx->ac.i32, &ctx->vs_prim_id);
+                               add_arg(args, ARG_VGPR, ctx->ac.i32, NULL); /* unused */
+                       }
+               }
+       }
+}
+
+static void
+declare_streamout_sgprs(struct radv_shader_context *ctx, gl_shader_stage stage,
+                       struct arg_info *args)
+{
+       int i;
+
+       if (ctx->ac.chip_class >= GFX10)
+               return;
+
+       /* Streamout SGPRs. */
+       if (ctx->shader_info->info.so.num_outputs) {
+               assert(stage == MESA_SHADER_VERTEX ||
+                      stage == MESA_SHADER_TESS_EVAL);
+
+               if (stage != MESA_SHADER_TESS_EVAL) {
+                       add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->streamout_config);
+               } else {
+                       args->assign[args->count - 1] = &ctx->streamout_config;
+                       args->types[args->count - 1] = ctx->ac.i32;
                }
-               add_arg(args, ARG_VGPR, ctx->ac.i32, NULL); /* unused */
+
+               add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->streamout_write_idx);
+       }
+
+       /* A streamout buffer offset is loaded if the stride is non-zero. */
+       for (i = 0; i < 4; i++) {
+               if (!ctx->shader_info->info.so.strides[i])
+                       continue;
+
+               add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->streamout_offset[i]);
        }
 }
 
@@ -784,48 +888,47 @@ declare_tes_input_vgprs(struct radv_shader_context *ctx, struct arg_info *args)
 }
 
 static void
-set_global_input_locs(struct radv_shader_context *ctx, gl_shader_stage stage,
-                     bool has_previous_stage, gl_shader_stage previous_stage,
+set_global_input_locs(struct radv_shader_context *ctx,
                      const struct user_sgpr_info *user_sgpr_info,
                      LLVMValueRef desc_sets, uint8_t *user_sgpr_idx)
 {
-       unsigned num_sets = ctx->options->layout ?
-                           ctx->options->layout->num_sets : 0;
-       unsigned stage_mask = 1 << stage;
-
-       if (has_previous_stage)
-               stage_mask |= 1 << previous_stage;
+       uint32_t mask = ctx->shader_info->info.desc_set_used_mask;
 
        if (!user_sgpr_info->indirect_all_descriptor_sets) {
-               for (unsigned i = 0; i < num_sets; ++i) {
-                       if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
-                           ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
-                               set_loc_desc(ctx, i, user_sgpr_idx, 0);
-                       } else
-                               ctx->descriptor_sets[i] = NULL;
+               while (mask) {
+                       int i = u_bit_scan(&mask);
+
+                       set_loc_desc(ctx, i, user_sgpr_idx);
                }
        } else {
                set_loc_shader_ptr(ctx, AC_UD_INDIRECT_DESCRIPTOR_SETS,
                                   user_sgpr_idx);
 
-               for (unsigned i = 0; i < num_sets; ++i) {
-                       if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
-                           ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
-                               set_loc_desc(ctx, i, user_sgpr_idx, i * 8);
-                               ctx->descriptor_sets[i] =
-                                       ac_build_load_to_sgpr(&ctx->ac,
-                                                             desc_sets,
-                                                             LLVMConstInt(ctx->ac.i32, i, false));
+               while (mask) {
+                       int i = u_bit_scan(&mask);
+
+                       ctx->descriptor_sets[i] =
+                               ac_build_load_to_sgpr(&ctx->ac, desc_sets,
+                                                     LLVMConstInt(ctx->ac.i32, i, false));
 
-                       } else
-                               ctx->descriptor_sets[i] = NULL;
                }
+
                ctx->shader_info->need_indirect_descriptor_sets = true;
        }
 
        if (ctx->shader_info->info.loads_push_constants) {
                set_loc_shader_ptr(ctx, AC_UD_PUSH_CONSTANTS, user_sgpr_idx);
        }
+
+       if (ctx->shader_info->info.num_inline_push_consts) {
+               set_loc_shader(ctx, AC_UD_INLINE_PUSH_CONSTANTS, user_sgpr_idx,
+                              ctx->shader_info->info.num_inline_push_consts);
+       }
+
+       if (ctx->streamout_buffers) {
+               set_loc_shader_ptr(ctx, AC_UD_STREAMOUT_BUFFERS,
+                              user_sgpr_idx);
+       }
 }
 
 static void
@@ -880,6 +983,12 @@ static void set_llvm_calling_convention(LLVMValueRef func,
        LLVMSetFunctionCallConv(func, calling_conv);
 }
 
+/* Returns whether the stage is a stage that can be directly before the GS */
+static bool is_pre_gs_stage(gl_shader_stage stage)
+{
+       return stage == MESA_SHADER_VERTEX || stage == MESA_SHADER_TESS_EVAL;
+}
+
 static void create_function(struct radv_shader_context *ctx,
                             gl_shader_stage stage,
                             bool has_previous_stage,
@@ -898,11 +1007,19 @@ static void create_function(struct radv_shader_context *ctx,
                        &ctx->ring_offsets);
        }
 
+       if (ctx->ac.chip_class >= GFX10) {
+               if (stage == MESA_SHADER_VERTEX && ctx->options->key.vs.out.as_ngg) {
+                       /* On GFX10, VS is merged into GS for NGG. */
+                       stage = MESA_SHADER_GEOMETRY;
+                       has_previous_stage = true;
+                       previous_stage = MESA_SHADER_VERTEX;
+               }
+       }
+
        switch (stage) {
        case MESA_SHADER_COMPUTE:
-               declare_global_input_sgprs(ctx, stage, has_previous_stage,
-                                          previous_stage, &user_sgpr_info,
-                                          &args, &desc_sets);
+               declare_global_input_sgprs(ctx, &user_sgpr_info, &args,
+                                          &desc_sets);
 
                if (ctx->shader_info->info.cs.uses_grid_size) {
                        add_arg(&args, ARG_SGPR, ctx->ac.v3i32,
@@ -923,18 +1040,23 @@ static void create_function(struct radv_shader_context *ctx,
                        &ctx->abi.local_invocation_ids);
                break;
        case MESA_SHADER_VERTEX:
-               declare_global_input_sgprs(ctx, stage, has_previous_stage,
-                                          previous_stage, &user_sgpr_info,
-                                          &args, &desc_sets);
+               declare_global_input_sgprs(ctx, &user_sgpr_info, &args,
+                                          &desc_sets);
+
                declare_vs_specific_input_sgprs(ctx, stage, has_previous_stage,
                                                previous_stage, &args);
 
                if (needs_view_index)
                        add_arg(&args, ARG_SGPR, ctx->ac.i32,
                                &ctx->abi.view_index);
-               if (ctx->options->key.vs.as_es)
+               if (ctx->options->key.vs.out.as_es) {
                        add_arg(&args, ARG_SGPR, ctx->ac.i32,
                                &ctx->es2gs_offset);
+               } else if (ctx->options->key.vs.out.as_ls) {
+                       /* no extra parameters */
+               } else {
+                       declare_streamout_sgprs(ctx, stage, &args);
+               }
 
                declare_vs_input_vgprs(ctx, &args);
                break;
@@ -951,11 +1073,9 @@ static void create_function(struct radv_shader_context *ctx,
                        add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
                        add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
 
-                       declare_global_input_sgprs(ctx, stage,
-                                                  has_previous_stage,
-                                                  previous_stage,
-                                                  &user_sgpr_info, &args,
+                       declare_global_input_sgprs(ctx, &user_sgpr_info, &args,
                                                   &desc_sets);
+
                        declare_vs_specific_input_sgprs(ctx, stage,
                                                        has_previous_stage,
                                                        previous_stage, &args);
@@ -971,10 +1091,7 @@ static void create_function(struct radv_shader_context *ctx,
 
                        declare_vs_input_vgprs(ctx, &args);
                } else {
-                       declare_global_input_sgprs(ctx, stage,
-                                                  has_previous_stage,
-                                                  previous_stage,
-                                                  &user_sgpr_info, &args,
+                       declare_global_input_sgprs(ctx, &user_sgpr_info, &args,
                                                   &desc_sets);
 
                        if (needs_view_index)
@@ -991,21 +1108,21 @@ static void create_function(struct radv_shader_context *ctx,
                }
                break;
        case MESA_SHADER_TESS_EVAL:
-               declare_global_input_sgprs(ctx, stage, has_previous_stage,
-                                          previous_stage, &user_sgpr_info,
-                                          &args, &desc_sets);
+               declare_global_input_sgprs(ctx, &user_sgpr_info, &args,
+                                          &desc_sets);
 
                if (needs_view_index)
                        add_arg(&args, ARG_SGPR, ctx->ac.i32,
                                &ctx->abi.view_index);
 
-               if (ctx->options->key.tes.as_es) {
+               if (ctx->options->key.tes.out.as_es) {
                        add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
                        add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL);
                        add_arg(&args, ARG_SGPR, ctx->ac.i32,
                                &ctx->es2gs_offset);
                } else {
                        add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL);
+                       declare_streamout_sgprs(ctx, stage, &args);
                        add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
                }
                declare_tes_input_vgprs(ctx, &args);
@@ -1013,8 +1130,14 @@ static void create_function(struct radv_shader_context *ctx,
        case MESA_SHADER_GEOMETRY:
                if (has_previous_stage) {
                        // First 6 system regs
-                       add_arg(&args, ARG_SGPR, ctx->ac.i32,
-                               &ctx->gs2vs_offset);
+                       if (ctx->options->key.vs.out.as_ngg) {
+                               add_arg(&args, ARG_SGPR, ctx->ac.i32,
+                                       &ctx->gs_tg_info);
+                       } else {
+                               add_arg(&args, ARG_SGPR, ctx->ac.i32,
+                                       &ctx->gs2vs_offset);
+                       }
+
                        add_arg(&args, ARG_SGPR, ctx->ac.i32,
                                &ctx->merged_wave_info);
                        add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
@@ -1023,10 +1146,7 @@ static void create_function(struct radv_shader_context *ctx,
                        add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
                        add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
 
-                       declare_global_input_sgprs(ctx, stage,
-                                                  has_previous_stage,
-                                                  previous_stage,
-                                                  &user_sgpr_info, &args,
+                       declare_global_input_sgprs(ctx, &user_sgpr_info, &args,
                                                   &desc_sets);
 
                        if (previous_stage != MESA_SHADER_TESS_EVAL) {
@@ -1057,10 +1177,7 @@ static void create_function(struct radv_shader_context *ctx,
                                declare_tes_input_vgprs(ctx, &args);
                        }
                } else {
-                       declare_global_input_sgprs(ctx, stage,
-                                                  has_previous_stage,
-                                                  previous_stage,
-                                                  &user_sgpr_info, &args,
+                       declare_global_input_sgprs(ctx, &user_sgpr_info, &args,
                                                   &desc_sets);
 
                        if (needs_view_index)
@@ -1088,9 +1205,8 @@ static void create_function(struct radv_shader_context *ctx,
                }
                break;
        case MESA_SHADER_FRAGMENT:
-               declare_global_input_sgprs(ctx, stage, has_previous_stage,
-                                          previous_stage, &user_sgpr_info,
-                                          &args, &desc_sets);
+               declare_global_input_sgprs(ctx, &user_sgpr_info, &args,
+                                          &desc_sets);
 
                add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->abi.prim_mask);
                add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->persp_sample);
@@ -1137,7 +1253,7 @@ static void create_function(struct radv_shader_context *ctx,
                                   &user_sgpr_idx);
                if (ctx->options->supports_spill) {
                        ctx->ring_offsets = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.implicit.buffer.ptr",
-                                                              LLVMPointerType(ctx->ac.i8, AC_CONST_ADDR_SPACE),
+                                                              LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_CONST),
                                                               NULL, 0, AC_FUNC_ATTR_READNONE);
                        ctx->ring_offsets = LLVMBuildBitCast(ctx->ac.builder, ctx->ring_offsets,
                                                             ac_array_in_const_addr_space(ctx->ac.v4i32), "");
@@ -1149,8 +1265,7 @@ static void create_function(struct radv_shader_context *ctx,
        if (has_previous_stage)
                user_sgpr_idx = 0;
 
-       set_global_input_locs(ctx, stage, has_previous_stage, previous_stage,
-                             &user_sgpr_info, desc_sets, &user_sgpr_idx);
+       set_global_input_locs(ctx, &user_sgpr_info, desc_sets, &user_sgpr_idx);
 
        switch (stage) {
        case MESA_SHADER_COMPUTE:
@@ -1193,7 +1308,7 @@ static void create_function(struct radv_shader_context *ctx,
        }
 
        if (stage == MESA_SHADER_TESS_CTRL ||
-           (stage == MESA_SHADER_VERTEX && ctx->options->key.vs.as_ls) ||
+           (stage == MESA_SHADER_VERTEX && ctx->options->key.vs.out.as_ls) ||
            /* GFX9 has the ESGS ring buffer in LDS. */
            (stage == MESA_SHADER_GEOMETRY && has_previous_stage)) {
                ac_declare_lds_as_pointer(&ctx->ac);
@@ -1224,13 +1339,35 @@ radv_load_resource(struct ac_shader_abi *abi, LLVMValueRef index,
        } else
                stride = LLVMConstInt(ctx->ac.i32, layout->binding[binding].size, false);
 
-       offset = ac_build_imad(&ctx->ac, index, stride,
-                              LLVMConstInt(ctx->ac.i32, base_offset, false));
+       offset = LLVMConstInt(ctx->ac.i32, base_offset, false);
 
-       desc_ptr = ac_build_gep0(&ctx->ac, desc_ptr, offset);
+       if (layout->binding[binding].type != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
+               offset = ac_build_imad(&ctx->ac, index, stride, offset);
+       }
+
+       desc_ptr = LLVMBuildGEP(ctx->ac.builder, desc_ptr, &offset, 1, "");
        desc_ptr = ac_cast_ptr(&ctx->ac, desc_ptr, ctx->ac.v4i32);
        LLVMSetMetadata(desc_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
 
+       if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
+               uint32_t desc_type = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
+                       S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
+                       S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
+                       S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
+                       S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
+                       S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+
+               LLVMValueRef desc_components[4] = {
+                       LLVMBuildPtrToInt(ctx->ac.builder, desc_ptr, ctx->ac.intptr, ""),
+                       LLVMConstInt(ctx->ac.i32, S_008F04_BASE_ADDRESS_HI(ctx->options->address32_hi), false),
+                       /* High limit to support variable sizes. */
+                       LLVMConstInt(ctx->ac.i32, 0xffffffff, false),
+                       LLVMConstInt(ctx->ac.i32, desc_type, false),
+               };
+
+               return ac_build_gather_values(&ctx->ac, desc_components, 4);
+       }
+
        return desc_ptr;
 }
 
@@ -1423,7 +1560,7 @@ store_tcs_output(struct ac_shader_abi *abi,
 {
        struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
        const unsigned location = var->data.location;
-       const unsigned component = var->data.location_frac;
+       unsigned component = var->data.location_frac;
        const bool is_patch = var->data.patch;
        const bool is_compact = var->data.compact;
        LLVMValueRef dw_addr;
@@ -1441,10 +1578,14 @@ store_tcs_output(struct ac_shader_abi *abi,
        }
 
        param = shader_io_get_unique_index(location);
-       if (location == VARYING_SLOT_CLIP_DIST0 &&
-           is_compact && const_index > 3) {
-               const_index -= 3;
-               param++;
+       if ((location == VARYING_SLOT_CLIP_DIST0 || location == VARYING_SLOT_CLIP_DIST1) && is_compact) {
+               const_index += component;
+               component = 0;
+
+               if (const_index >= 4) {
+                       const_index -= 4;
+                       param++;
+               }
        }
 
        if (!is_patch) {
@@ -1482,13 +1623,13 @@ store_tcs_output(struct ac_shader_abi *abi,
                if (!is_tess_factor && writemask != 0xF)
                        ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, value, 1,
                                                    buf_addr, ctx->oc_lds,
-                                                   4 * (base + chan), 1, 0, true, false);
+                                                   4 * (base + chan), ac_glc, false);
        }
 
        if (writemask == 0xF) {
                ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, src, 4,
                                            buf_addr, ctx->oc_lds,
-                                           (base * 4), 1, 0, true, false);
+                                           (base * 4), ac_glc, false);
        }
 }
 
@@ -1511,9 +1652,13 @@ load_tes_input(struct ac_shader_abi *abi,
        LLVMValueRef result;
        unsigned param = shader_io_get_unique_index(location);
 
-       if (location == VARYING_SLOT_CLIP_DIST0 && is_compact && const_index > 3) {
-               const_index -= 3;
-               param++;
+       if ((location == VARYING_SLOT_CLIP_DIST0 || location == VARYING_SLOT_CLIP_DIST1) && is_compact) {
+               const_index += component;
+               component = 0;
+               if (const_index >= 4) {
+                       const_index -= 4;
+                       param++;
+               }
        }
 
        buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index,
@@ -1523,7 +1668,7 @@ load_tes_input(struct ac_shader_abi *abi,
        buf_addr = LLVMBuildAdd(ctx->ac.builder, buf_addr, comp_offset, "");
 
        result = ac_build_buffer_load(&ctx->ac, ctx->hs_ring_tess_offchip, num_components, NULL,
-                                     buf_addr, ctx->oc_lds, is_compact ? (4 * const_index) : 0, 1, 0, true, false);
+                                     buf_addr, ctx->oc_lds, is_compact ? (4 * const_index) : 0, ac_glc, true, false);
        result = ac_trim_vector(&ctx->ac, result, num_components);
        return result;
 }
@@ -1566,7 +1711,7 @@ load_gs_input(struct ac_shader_abi *abi,
                                                        ctx->esgs_ring, 1,
                                                        ctx->ac.i32_0,
                                                        vtx_offset, soffset,
-                                                       0, 1, 0, true, false);
+                                                       0, ac_glc, true, false);
                }
 
                if (ac_get_type_size(type) == 2) {
@@ -1632,9 +1777,6 @@ radv_get_sample_pos_offset(uint32_t num_samples)
        case 8:
                sample_pos_offset = 7;
                break;
-       case 16:
-               sample_pos_offset = 15;
-               break;
        default:
                break;
        }
@@ -1647,7 +1789,8 @@ static LLVMValueRef load_sample_position(struct ac_shader_abi *abi,
        struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
 
        LLVMValueRef result;
-       LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_PS_SAMPLE_POSITIONS, false));
+       LLVMValueRef index = LLVMConstInt(ctx->ac.i32, RING_PS_SAMPLE_POSITIONS, false);
+       LLVMValueRef ptr = LLVMBuildGEP(ctx->ac.builder, ctx->ring_offsets, &index, 1, "");
 
        ptr = LLVMBuildBitCast(ctx->ac.builder, ptr,
                               ac_array_in_const_addr_space(ctx->ac.v2f32), "");
@@ -1705,11 +1848,9 @@ visit_emit_vertex(struct ac_shader_abi *abi, unsigned stream, LLVMValueRef *addr
        unsigned offset = 0;
        struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
 
-       assert(stream == 0);
-
        /* Write vertex attribute values to GSVS ring */
        gs_next_vertex = LLVMBuildLoad(ctx->ac.builder,
-                                      ctx->gs_next_vertex,
+                                      ctx->gs_next_vertex[stream],
                                       "");
 
        /* If this thread has already emitted the declared maximum number of
@@ -1724,17 +1865,15 @@ visit_emit_vertex(struct ac_shader_abi *abi, unsigned stream, LLVMValueRef *addr
        for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
                unsigned output_usage_mask =
                        ctx->shader_info->info.gs.output_usage_mask[i];
+               uint8_t output_stream =
+                       ctx->shader_info->info.gs.output_streams[i];
                LLVMValueRef *out_ptr = &addrs[i * 4];
-               int length = 4;
+               int length = util_last_bit(output_usage_mask);
 
-               if (!(ctx->output_mask & (1ull << i)))
+               if (!(ctx->output_mask & (1ull << i)) ||
+                   output_stream != stream)
                        continue;
 
-               if (i == VARYING_SLOT_CLIP_DIST0) {
-                       /* pack clip and cull into a single set of slots */
-                       length = util_last_bit(output_usage_mask);
-               }
-
                for (unsigned j = 0; j < length; j++) {
                        if (!(output_usage_mask & (1 << j)))
                                continue;
@@ -1753,18 +1892,21 @@ visit_emit_vertex(struct ac_shader_abi *abi, unsigned stream, LLVMValueRef *addr
                        out_val = ac_to_integer(&ctx->ac, out_val);
                        out_val = LLVMBuildZExtOrBitCast(ctx->ac.builder, out_val, ctx->ac.i32, "");
 
-                       ac_build_buffer_store_dword(&ctx->ac, ctx->gsvs_ring,
+                       ac_build_buffer_store_dword(&ctx->ac,
+                                                   ctx->gsvs_ring[stream],
                                                    out_val, 1,
                                                    voffset, ctx->gs2vs_offset, 0,
-                                                   1, 1, true, true);
+                                                   ac_glc | ac_slc, true);
                }
        }
 
        gs_next_vertex = LLVMBuildAdd(ctx->ac.builder, gs_next_vertex,
                                      ctx->ac.i32_1, "");
-       LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex);
+       LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
 
-       ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (0 << 8), ctx->gs_wave_id);
+       ac_build_sendmsg(&ctx->ac,
+                        AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
+                        ctx->gs_wave_id);
 }
 
 static void
@@ -1825,6 +1967,11 @@ static LLVMValueRef radv_load_ubo(struct ac_shader_abi *abi, LLVMValueRef buffer
        struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
        LLVMValueRef result;
 
+       if (LLVMGetTypeKind(LLVMTypeOf(buffer_ptr)) != LLVMPointerTypeKind) {
+               /* Do not load the descriptor for inlined uniform blocks. */
+               return buffer_ptr;
+       }
+
        LLVMSetMetadata(buffer_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
 
        result = LLVMBuildLoad(ctx->ac.builder, buffer_ptr, "");
@@ -1866,8 +2013,9 @@ static LLVMValueRef radv_get_sampler_desc(struct ac_shader_abi *abi,
                break;
        case AC_DESC_SAMPLER:
                type = ctx->ac.v4i32;
-               if (binding->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
-                       offset += 64;
+               if (binding->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
+                       offset += radv_combined_image_descriptor_sampler_offset(binding);
+               }
 
                type_size = 16;
                break;
@@ -1875,6 +2023,13 @@ static LLVMValueRef radv_get_sampler_desc(struct ac_shader_abi *abi,
                type = ctx->ac.v4i32;
                type_size = 16;
                break;
+       case AC_DESC_PLANE_0:
+       case AC_DESC_PLANE_1:
+       case AC_DESC_PLANE_2:
+               type = ctx->ac.v8i32;
+               type_size = 32;
+               offset += 32 * (desc_type - AC_DESC_PLANE_0);
+               break;
        default:
                unreachable("invalid desc_type\n");
        }
@@ -1899,16 +2054,35 @@ static LLVMValueRef radv_get_sampler_desc(struct ac_shader_abi *abi,
 
        assert(stride % type_size == 0);
 
-       if (!index)
-               index = ctx->ac.i32_0;
+       LLVMValueRef adjusted_index = index;
+       if (!adjusted_index)
+               adjusted_index = ctx->ac.i32_0;
 
-       index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->ac.i32, stride / type_size, 0), "");
+       adjusted_index = LLVMBuildMul(builder, adjusted_index, LLVMConstInt(ctx->ac.i32, stride / type_size, 0), "");
 
-       list = ac_build_gep0(&ctx->ac, list, LLVMConstInt(ctx->ac.i32, offset, 0));
+       LLVMValueRef val_offset = LLVMConstInt(ctx->ac.i32, offset, 0);
+       list = LLVMBuildGEP(builder, list, &val_offset, 1, "");
        list = LLVMBuildPointerCast(builder, list,
                                    ac_array_in_const32_addr_space(type), "");
 
-       return ac_build_load_to_sgpr(&ctx->ac, list, index);
+       LLVMValueRef descriptor = ac_build_load_to_sgpr(&ctx->ac, list, adjusted_index);
+
+       /* 3 plane formats always have same size and format for plane 1 & 2, so
+        * use the tail from plane 1 so that we can store only the first 16 bytes
+        * of the last plane. */
+       if (desc_type == AC_DESC_PLANE_2) {
+               LLVMValueRef descriptor2 = radv_get_sampler_desc(abi, descriptor_set, base_index, constant_index, index, AC_DESC_PLANE_1,image, write, bindless);
+
+               LLVMValueRef components[8];
+               for (unsigned i = 0; i < 4; ++i)
+                       components[i] = ac_llvm_extract_elem(&ctx->ac, descriptor, i);
+
+               for (unsigned i = 4; i < 8; ++i)
+                       components[i] = ac_llvm_extract_elem(&ctx->ac, descriptor2, i);
+               descriptor = ac_build_gather_values(&ctx->ac, components, 8);
+       }
+
+       return descriptor;
 }
 
 /* For 2_10_10_10 formats the alpha is handled as unsigned by pre-vega HW.
@@ -1923,6 +2097,8 @@ adjust_vertex_fetch_alpha(struct radv_shader_context *ctx,
 
        LLVMValueRef c30 = LLVMConstInt(ctx->ac.i32, 30, 0);
 
+       alpha = LLVMBuildBitCast(ctx->ac.builder, alpha, ctx->ac.f32, "");
+
        if (adjustment == RADV_ALPHA_ADJUST_SSCALED)
                alpha = LLVMBuildFPToUI(ctx->ac.builder, alpha, ctx->ac.i32, "");
        else
@@ -1950,7 +2126,71 @@ adjust_vertex_fetch_alpha(struct radv_shader_context *ctx,
                alpha = LLVMBuildSIToFP(ctx->ac.builder, alpha, ctx->ac.f32, "");
        }
 
-       return alpha;
+       return LLVMBuildBitCast(ctx->ac.builder, alpha, ctx->ac.i32, "");
+}
+
+static unsigned
+get_num_channels_from_data_format(unsigned data_format)
+{
+       switch (data_format) {
+       case V_008F0C_BUF_DATA_FORMAT_8:
+       case V_008F0C_BUF_DATA_FORMAT_16:
+       case V_008F0C_BUF_DATA_FORMAT_32:
+               return 1;
+       case V_008F0C_BUF_DATA_FORMAT_8_8:
+       case V_008F0C_BUF_DATA_FORMAT_16_16:
+       case V_008F0C_BUF_DATA_FORMAT_32_32:
+               return 2;
+       case V_008F0C_BUF_DATA_FORMAT_10_11_11:
+       case V_008F0C_BUF_DATA_FORMAT_11_11_10:
+       case V_008F0C_BUF_DATA_FORMAT_32_32_32:
+               return 3;
+       case V_008F0C_BUF_DATA_FORMAT_8_8_8_8:
+       case V_008F0C_BUF_DATA_FORMAT_10_10_10_2:
+       case V_008F0C_BUF_DATA_FORMAT_2_10_10_10:
+       case V_008F0C_BUF_DATA_FORMAT_16_16_16_16:
+       case V_008F0C_BUF_DATA_FORMAT_32_32_32_32:
+               return 4;
+       default:
+               break;
+       }
+
+       return 4;
+}
+
+static LLVMValueRef
+radv_fixup_vertex_input_fetches(struct radv_shader_context *ctx,
+                               LLVMValueRef value,
+                               unsigned num_channels,
+                               bool is_float)
+{
+       LLVMValueRef zero = is_float ? ctx->ac.f32_0 : ctx->ac.i32_0;
+       LLVMValueRef one = is_float ? ctx->ac.f32_1 : ctx->ac.i32_1;
+       LLVMValueRef chan[4];
+
+       if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMVectorTypeKind) {
+               unsigned vec_size = LLVMGetVectorSize(LLVMTypeOf(value));
+
+               if (num_channels == 4 && num_channels == vec_size)
+                       return value;
+
+               num_channels = MIN2(num_channels, vec_size);
+
+               for (unsigned i = 0; i < num_channels; i++)
+                       chan[i] = ac_llvm_extract_elem(&ctx->ac, value, i);
+       } else {
+               if (num_channels) {
+                       assert(num_channels == 1);
+                       chan[0] = value;
+               }
+       }
+
+       for (unsigned i = num_channels; i < 4; i++) {
+               chan[i] = i == 3 ? one : zero;
+               chan[i] = ac_to_integer(&ctx->ac, chan[i]);
+       }
+
+       return ac_build_gather_values(&ctx->ac, chan, 4);
 }
 
 static void
@@ -1965,7 +2205,7 @@ handle_vs_input_decl(struct radv_shader_context *ctx,
        unsigned attrib_count = glsl_count_attribute_slots(variable->type, true);
        uint8_t input_usage_mask =
                ctx->shader_info->info.vs.input_usage_mask[variable->data.location];
-       unsigned num_channels = util_last_bit(input_usage_mask);
+       unsigned num_input_channels = util_last_bit(input_usage_mask);
 
        variable->data.driver_location = variable->data.location * 4;
 
@@ -1973,6 +2213,11 @@ handle_vs_input_decl(struct radv_shader_context *ctx,
        for (unsigned i = 0; i < attrib_count; ++i) {
                LLVMValueRef output[4];
                unsigned attrib_index = variable->data.location + i - VERT_ATTRIB_GENERIC0;
+               unsigned attrib_format = ctx->options->key.vs.vertex_attribute_formats[attrib_index];
+               unsigned data_format = attrib_format & 0x0f;
+               unsigned num_format = (attrib_format >> 4) & 0x07;
+               bool is_float = num_format != V_008F0C_BUF_NUM_FORMAT_UINT &&
+                               num_format != V_008F0C_BUF_NUM_FORMAT_SINT;
 
                if (ctx->options->key.vs.instance_rate_inputs & (1u << attrib_index)) {
                        uint32_t divisor = ctx->options->key.vs.instance_rate_divisors[attrib_index];
@@ -1984,14 +2229,6 @@ handle_vs_input_decl(struct radv_shader_context *ctx,
                                        buffer_index = LLVMBuildUDiv(ctx->ac.builder, buffer_index,
                                                                     LLVMConstInt(ctx->ac.i32, divisor, 0), "");
                                }
-
-                               if (ctx->options->key.vs.as_ls) {
-                                       ctx->shader_info->vs.vgpr_comp_cnt =
-                                               MAX2(2, ctx->shader_info->vs.vgpr_comp_cnt);
-                               } else {
-                                       ctx->shader_info->vs.vgpr_comp_cnt =
-                                               MAX2(1, ctx->shader_info->vs.vgpr_comp_cnt);
-                               }
                        } else {
                                buffer_index = ctx->ac.i32_0;
                        }
@@ -2000,16 +2237,57 @@ handle_vs_input_decl(struct radv_shader_context *ctx,
                } else
                        buffer_index = LLVMBuildAdd(ctx->ac.builder, ctx->abi.vertex_id,
                                                    ctx->abi.base_vertex, "");
-               t_offset = LLVMConstInt(ctx->ac.i32, attrib_index, false);
 
-               t_list = ac_build_load_to_sgpr(&ctx->ac, t_list_ptr, t_offset);
+               /* Adjust the number of channels to load based on the vertex
+                * attribute format.
+                */
+               unsigned num_format_channels = get_num_channels_from_data_format(data_format);
+               unsigned num_channels = MIN2(num_input_channels, num_format_channels);
+               unsigned attrib_binding = ctx->options->key.vs.vertex_attribute_bindings[attrib_index];
+               unsigned attrib_offset = ctx->options->key.vs.vertex_attribute_offsets[attrib_index];
+               unsigned attrib_stride = ctx->options->key.vs.vertex_attribute_strides[attrib_index];
+
+               if (ctx->options->key.vs.post_shuffle & (1 << attrib_index)) {
+                       /* Always load, at least, 3 channels for formats that
+                        * need to be shuffled because X<->Z.
+                        */
+                       num_channels = MAX2(num_channels, 3);
+               }
+
+               if (attrib_stride != 0 && attrib_offset > attrib_stride) {
+                       LLVMValueRef buffer_offset =
+                               LLVMConstInt(ctx->ac.i32,
+                                            attrib_offset / attrib_stride, false);
 
-               input = ac_build_buffer_load_format(&ctx->ac, t_list,
+                       buffer_index = LLVMBuildAdd(ctx->ac.builder,
                                                    buffer_index,
-                                                   ctx->ac.i32_0,
-                                                   num_channels, false, true);
+                                                   buffer_offset, "");
 
-               input = ac_build_expand_to_vec4(&ctx->ac, input, num_channels);
+                       attrib_offset = attrib_offset % attrib_stride;
+               }
+
+               t_offset = LLVMConstInt(ctx->ac.i32, attrib_binding, false);
+               t_list = ac_build_load_to_sgpr(&ctx->ac, t_list_ptr, t_offset);
+
+               input = ac_build_struct_tbuffer_load(&ctx->ac, t_list,
+                                                    buffer_index,
+                                                    LLVMConstInt(ctx->ac.i32, attrib_offset, false),
+                                                    ctx->ac.i32_0, ctx->ac.i32_0,
+                                                    num_channels,
+                                                    data_format, num_format, 0, true);
+
+               if (ctx->options->key.vs.post_shuffle & (1 << attrib_index)) {
+                       LLVMValueRef c[4];
+                       c[0] = ac_llvm_extract_elem(&ctx->ac, input, 2);
+                       c[1] = ac_llvm_extract_elem(&ctx->ac, input, 1);
+                       c[2] = ac_llvm_extract_elem(&ctx->ac, input, 0);
+                       c[3] = ac_llvm_extract_elem(&ctx->ac, input, 3);
+
+                       input = ac_build_gather_values(&ctx->ac, c, 4);
+               }
+
+               input = radv_fixup_vertex_input_fetches(ctx, input, num_channels,
+                                                       is_float);
 
                for (unsigned chan = 0; chan < 4; chan++) {
                        LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
@@ -2037,6 +2315,7 @@ static void interp_fs_input(struct radv_shader_context *ctx,
                            unsigned attr,
                            LLVMValueRef interp_param,
                            LLVMValueRef prim_mask,
+                           bool float16,
                            LLVMValueRef result[4])
 {
        LLVMValueRef attr_number;
@@ -2069,7 +2348,12 @@ static void interp_fs_input(struct radv_shader_context *ctx,
        for (chan = 0; chan < 4; chan++) {
                LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
 
-               if (interp) {
+               if (interp && float16) {
+                       result[chan] = ac_build_fs_interp_f16(&ctx->ac,
+                                                             llvm_chan,
+                                                             attr_number,
+                                                             prim_mask, i, j);
+               } else if (interp) {
                        result[chan] = ac_build_fs_interp(&ctx->ac,
                                                          llvm_chan,
                                                          attr_number,
@@ -2081,7 +2365,30 @@ static void interp_fs_input(struct radv_shader_context *ctx,
                                                              attr_number,
                                                              prim_mask);
                        result[chan] = LLVMBuildBitCast(ctx->ac.builder, result[chan], ctx->ac.i32, "");
-                       result[chan] = LLVMBuildTruncOrBitCast(ctx->ac.builder, result[chan], LLVMTypeOf(interp_param), "");
+                       result[chan] = LLVMBuildTruncOrBitCast(ctx->ac.builder, result[chan], float16 ? ctx->ac.i16 : ctx->ac.i32, "");
+               }
+       }
+}
+
+static void mark_16bit_fs_input(struct radv_shader_context *ctx,
+                                const struct glsl_type *type,
+                                int location)
+{
+       if (glsl_type_is_scalar(type) || glsl_type_is_vector(type) || glsl_type_is_matrix(type)) {
+               unsigned attrib_count = glsl_count_attribute_slots(type, false);
+               if (glsl_type_is_16bit(type)) {
+                       ctx->float16_shaded_mask |= ((1ull << attrib_count) - 1) << location;
+               }
+       } else if (glsl_type_is_array(type)) {
+               unsigned stride = glsl_count_attribute_slots(glsl_get_array_element(type), false);
+               for (unsigned i = 0; i < glsl_get_length(type); ++i) {
+                       mark_16bit_fs_input(ctx, glsl_get_array_element(type), location + i * stride);
+               }
+       } else {
+               assert(glsl_type_is_struct_or_ifc(type));
+               for (unsigned i = 0; i < glsl_get_length(type); i++) {
+                       mark_16bit_fs_input(ctx, glsl_get_struct_field(type, i), location);
+                       location += glsl_count_attribute_slots(glsl_get_struct_field(type, i), false);
                }
        }
 }
@@ -2096,9 +2403,20 @@ handle_fs_input_decl(struct radv_shader_context *ctx,
        uint64_t mask;
 
        variable->data.driver_location = idx * 4;
+
+
+       if (variable->data.compact) {
+               unsigned component_count = variable->data.location_frac +
+                                          glsl_get_length(variable->type);
+               attrib_count = (component_count + 3) / 4;
+       } else
+               mark_16bit_fs_input(ctx, variable->type, idx);
+
        mask = ((1ull << attrib_count) - 1) << variable->data.location;
 
-       if (glsl_get_base_type(glsl_without_array(variable->type)) == GLSL_TYPE_FLOAT) {
+       if (glsl_get_base_type(glsl_without_array(variable->type)) == GLSL_TYPE_FLOAT ||
+           glsl_get_base_type(glsl_without_array(variable->type)) == GLSL_TYPE_FLOAT16 ||
+           glsl_get_base_type(glsl_without_array(variable->type)) == GLSL_TYPE_STRUCT) {
                unsigned interp_type;
                if (variable->data.sample)
                        interp_type = INTERP_SAMPLE;
@@ -2109,22 +2427,12 @@ handle_fs_input_decl(struct radv_shader_context *ctx,
 
                interp = lookup_interp_param(&ctx->abi, variable->data.interpolation, interp_type);
        }
-       bool is_16bit = glsl_type_is_16bit(variable->type);
-       LLVMTypeRef type = is_16bit ? ctx->ac.i16 : ctx->ac.i32;
        if (interp == NULL)
-               interp = LLVMGetUndef(type);
+               interp = LLVMGetUndef(ctx->ac.i32);
 
        for (unsigned i = 0; i < attrib_count; ++i)
                ctx->inputs[ac_llvm_reg_index_soa(idx + i, 0)] = interp;
 
-       if (idx == VARYING_SLOT_CLIP_DIST0) {
-               /* Do not account for the number of components inside the array
-                * of clip/cull distances because this might wrongly set other
-                * bits like primitive ID or layer.
-                */
-               mask = 1ull << VARYING_SLOT_CLIP_DIST0;
-       }
-
        ctx->input_mask |= mask;
 }
 
@@ -2170,8 +2478,8 @@ handle_fs_inputs(struct radv_shader_context *ctx,
 
        unsigned index = 0;
 
-       if (ctx->shader_info->info.ps.uses_input_attachments ||
-           ctx->shader_info->info.needs_multiview_view_index) {
+       if (ctx->shader_info->info.needs_multiview_view_index ||
+           ctx->shader_info->info.ps.layer_input) {
                ctx->input_mask |= 1ull << VARYING_SLOT_LAYER;
                ctx->inputs[ac_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)] = LLVMGetUndef(ctx->ac.i32);
        }
@@ -2186,11 +2494,16 @@ handle_fs_inputs(struct radv_shader_context *ctx,
                if (i >= VARYING_SLOT_VAR0 || i == VARYING_SLOT_PNTC ||
                    i == VARYING_SLOT_PRIMITIVE_ID || i == VARYING_SLOT_LAYER) {
                        interp_param = *inputs;
-                       interp_fs_input(ctx, index, interp_param, ctx->abi.prim_mask,
+                       bool float16 = (ctx->float16_shaded_mask >> i) & 1;
+                       interp_fs_input(ctx, index, interp_param, ctx->abi.prim_mask, float16,
                                        inputs);
 
                        if (LLVMIsUndef(interp_param))
                                ctx->shader_info->fs.flat_shaded_mask |= 1u << index;
+                       if (float16)
+                               ctx->shader_info->fs.float16_shaded_mask |= 1u << index;
+                       if (i >= VARYING_SLOT_VAR0)
+                               ctx->abi.fs_input_attr_indices[i - VARYING_SLOT_VAR0] = index;
                        ++index;
                } else if (i == VARYING_SLOT_CLIP_DIST0) {
                        int length = ctx->shader_info->info.ps.num_input_clips_culls;
@@ -2200,7 +2513,7 @@ handle_fs_inputs(struct radv_shader_context *ctx,
 
                                interp_param = *inputs;
                                interp_fs_input(ctx, index, interp_param,
-                                               ctx->abi.prim_mask, inputs);
+                                               ctx->abi.prim_mask, false, inputs);
                                ++index;
                        }
                } else if (i == VARYING_SLOT_POS) {
@@ -2234,6 +2547,12 @@ scan_shader_output_decl(struct radv_shader_context *ctx,
        if (stage == MESA_SHADER_TESS_CTRL)
                return;
 
+       if (variable->data.compact) {
+               unsigned component_count = variable->data.location_frac +
+                                          glsl_get_length(variable->type);
+               attrib_count = (component_count + 3) / 4;
+       }
+
        mask_attribs = ((1ull << attrib_count) - 1) << idx;
        if (stage == MESA_SHADER_VERTEX ||
            stage == MESA_SHADER_TESS_EVAL ||
@@ -2249,8 +2568,6 @@ scan_shader_output_decl(struct radv_shader_context *ctx,
                                ctx->shader_info->tes.outinfo.cull_dist_mask = (1 << shader->info.cull_distance_array_size) - 1;
                                ctx->shader_info->tes.outinfo.cull_dist_mask <<= shader->info.clip_distance_array_size;
                        }
-
-                       mask_attribs = 1ull << idx;
                }
        }
 
@@ -2288,7 +2605,7 @@ si_llvm_init_export_args(struct radv_shader_context *ctx,
                return;
 
        bool is_16bit = ac_get_type_size(LLVMTypeOf(values[0])) == 2;
-       if (ctx->stage == MESA_SHADER_FRAGMENT && target >= V_008DFC_SQ_EXP_MRT) {
+       if (ctx->stage == MESA_SHADER_FRAGMENT) {
                unsigned index = target - V_008DFC_SQ_EXP_MRT;
                unsigned col_format = (ctx->options->key.fs.col_format >> (4 * index)) & 0xf;
                bool is_int8 = (ctx->options->key.fs.is_int8 >> index) & 1;
@@ -2317,9 +2634,15 @@ si_llvm_init_export_args(struct radv_shader_context *ctx,
                        break;
 
                case V_028714_SPI_SHADER_32_AR:
-                       args->enabled_channels = 0x9;
-                       args->out[0] = values[0];
-                       args->out[3] = values[3];
+                       if (ctx->ac.chip_class >= GFX10) {
+                               args->enabled_channels = 0x3;
+                               args->out[0] = values[0];
+                               args->out[1] = values[3];
+                       } else {
+                               args->enabled_channels = 0x9;
+                               args->out[0] = values[0];
+                               args->out[3] = values[3];
+                       }
                        break;
 
                case V_028714_SPI_SHADER_FP16_ABGR:
@@ -2349,7 +2672,7 @@ si_llvm_init_export_args(struct radv_shader_context *ctx,
                        if (is_16bit) {
                                for (unsigned chan = 0; chan < 4; chan++)
                                        values[chan] = LLVMBuildZExt(ctx->ac.builder,
-                                                                     values[chan],
+                                                                     ac_to_integer(&ctx->ac, values[chan]),
                                                                      ctx->ac.i32, "");
                        }
                        break;
@@ -2360,7 +2683,7 @@ si_llvm_init_export_args(struct radv_shader_context *ctx,
                        if (is_16bit) {
                                for (unsigned chan = 0; chan < 4; chan++)
                                        values[chan] = LLVMBuildSExt(ctx->ac.builder,
-                                                                     values[chan],
+                                                                     ac_to_integer(&ctx->ac, values[chan]),
                                                                      ctx->ac.i32, "");
                        }
                        break;
@@ -2413,12 +2736,8 @@ si_llvm_init_export_args(struct radv_shader_context *ctx,
        } else
                memcpy(&args->out[0], values, sizeof(values[0]) * 4);
 
-       for (unsigned i = 0; i < 4; ++i) {
-               if (!(args->enabled_channels & (1 << i)))
-                       continue;
-
+       for (unsigned i = 0; i < 4; ++i)
                args->out[i] = ac_to_float(&ctx->ac, args->out[i]);
-       }
 }
 
 static void
@@ -2441,9 +2760,143 @@ radv_load_output(struct radv_shader_context *ctx, unsigned index, unsigned chan)
        return LLVMBuildLoad(ctx->ac.builder, output, "");
 }
 
+static void
+radv_emit_stream_output(struct radv_shader_context *ctx,
+                        LLVMValueRef const *so_buffers,
+                        LLVMValueRef const *so_write_offsets,
+                        const struct radv_stream_output *output)
+{
+       unsigned num_comps = util_bitcount(output->component_mask);
+       unsigned loc = output->location;
+       unsigned buf = output->buffer;
+       unsigned offset = output->offset;
+       unsigned start;
+       LLVMValueRef out[4];
+
+       assert(num_comps && num_comps <= 4);
+       if (!num_comps || num_comps > 4)
+               return;
+
+       /* Get the first component. */
+       start = ffs(output->component_mask) - 1;
+
+       /* Load the output as int. */
+       for (int i = 0; i < num_comps; i++) {
+               out[i] = ac_to_integer(&ctx->ac,
+                                      radv_load_output(ctx, loc, start + i));
+       }
+
+       /* Pack the output. */
+       LLVMValueRef vdata = NULL;
+
+       switch (num_comps) {
+       case 1: /* as i32 */
+               vdata = out[0];
+               break;
+       case 2: /* as v2i32 */
+       case 3: /* as v4i32 (aligned to 4) */
+               out[3] = LLVMGetUndef(ctx->ac.i32);
+               /* fall through */
+       case 4: /* as v4i32 */
+               vdata = ac_build_gather_values(&ctx->ac, out,
+                                              !ac_has_vec3_support(ctx->ac.chip_class, false) ?
+                                              util_next_power_of_two(num_comps) :
+                                              num_comps);
+               break;
+       }
+
+       ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf],
+                                   vdata, num_comps, so_write_offsets[buf],
+                                   ctx->ac.i32_0, offset,
+                                   ac_glc | ac_slc, false);
+}
+
+static void
+radv_emit_streamout(struct radv_shader_context *ctx, unsigned stream)
+{
+       struct ac_build_if_state if_ctx;
+       int i;
+
+       /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
+       assert(ctx->streamout_config);
+       LLVMValueRef so_vtx_count =
+               ac_build_bfe(&ctx->ac, ctx->streamout_config,
+                            LLVMConstInt(ctx->ac.i32, 16, false),
+                            LLVMConstInt(ctx->ac.i32, 7, false), false);
+
+       LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
+
+       /* can_emit = tid < so_vtx_count; */
+       LLVMValueRef can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
+                                             tid, so_vtx_count, "");
+
+       /* Emit the streamout code conditionally. This actually avoids
+        * out-of-bounds buffer access. The hw tells us via the SGPR
+        * (so_vtx_count) which threads are allowed to emit streamout data.
+        */
+       ac_nir_build_if(&if_ctx, ctx, can_emit);
+       {
+               /* The buffer offset is computed as follows:
+                *   ByteOffset = streamout_offset[buffer_id]*4 +
+                *                (streamout_write_index + thread_id)*stride[buffer_id] +
+                *                attrib_offset
+                */
+               LLVMValueRef so_write_index = ctx->streamout_write_idx;
+
+               /* Compute (streamout_write_index + thread_id). */
+               so_write_index =
+                       LLVMBuildAdd(ctx->ac.builder, so_write_index, tid, "");
+
+               /* Load the descriptor and compute the write offset for each
+                * enabled buffer.
+                */
+               LLVMValueRef so_write_offset[4] = {};
+               LLVMValueRef so_buffers[4] = {};
+               LLVMValueRef buf_ptr = ctx->streamout_buffers;
+
+               for (i = 0; i < 4; i++) {
+                       uint16_t stride = ctx->shader_info->info.so.strides[i];
+
+                       if (!stride)
+                               continue;
+
+                       LLVMValueRef offset =
+                               LLVMConstInt(ctx->ac.i32, i, false);
+
+                       so_buffers[i] = ac_build_load_to_sgpr(&ctx->ac,
+                                                             buf_ptr, offset);
+
+                       LLVMValueRef so_offset = ctx->streamout_offset[i];
+
+                       so_offset = LLVMBuildMul(ctx->ac.builder, so_offset,
+                                                LLVMConstInt(ctx->ac.i32, 4, false), "");
+
+                       so_write_offset[i] =
+                               ac_build_imad(&ctx->ac, so_write_index,
+                                             LLVMConstInt(ctx->ac.i32,
+                                                          stride * 4, false),
+                                             so_offset);
+               }
+
+               /* Write streamout data. */
+               for (i = 0; i < ctx->shader_info->info.so.num_outputs; i++) {
+                       struct radv_stream_output *output =
+                               &ctx->shader_info->info.so.outputs[i];
+
+                       if (stream != output->stream)
+                               continue;
+
+                       radv_emit_stream_output(ctx, so_buffers,
+                                               so_write_offset, output);
+               }
+       }
+       ac_nir_build_endif(&if_ctx);
+}
+
 static void
 handle_vs_outputs_post(struct radv_shader_context *ctx,
                       bool export_prim_id, bool export_layer_id,
+                      bool export_clip_dists,
                       struct radv_vs_output_info *outinfo)
 {
        uint32_t param_count = 0;
@@ -2468,51 +2921,43 @@ handle_vs_outputs_post(struct radv_shader_context *ctx,
        memset(outinfo->vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
               sizeof(outinfo->vs_output_param_offset));
 
-       if (ctx->output_mask & (1ull << VARYING_SLOT_CLIP_DIST0)) {
-               unsigned output_usage_mask, length;
-               LLVMValueRef slots[8];
-               unsigned j;
-
-               if (ctx->stage == MESA_SHADER_VERTEX &&
-                   !ctx->is_gs_copy_shader) {
-                       output_usage_mask =
-                               ctx->shader_info->info.vs.output_usage_mask[VARYING_SLOT_CLIP_DIST0];
-               } else if (ctx->stage == MESA_SHADER_TESS_EVAL) {
-                       output_usage_mask =
-                               ctx->shader_info->info.tes.output_usage_mask[VARYING_SLOT_CLIP_DIST0];
-               } else {
-                       assert(ctx->is_gs_copy_shader);
-                       output_usage_mask =
-                               ctx->shader_info->info.gs.output_usage_mask[VARYING_SLOT_CLIP_DIST0];
-               }
+       for(unsigned location = VARYING_SLOT_CLIP_DIST0; location <= VARYING_SLOT_CLIP_DIST1; ++location) {
+               if (ctx->output_mask & (1ull << location)) {
+                       unsigned output_usage_mask, length;
+                       LLVMValueRef slots[4];
+                       unsigned j;
+
+                       if (ctx->stage == MESA_SHADER_VERTEX &&
+                       !ctx->is_gs_copy_shader) {
+                               output_usage_mask =
+                                       ctx->shader_info->info.vs.output_usage_mask[location];
+                       } else if (ctx->stage == MESA_SHADER_TESS_EVAL) {
+                               output_usage_mask =
+                                       ctx->shader_info->info.tes.output_usage_mask[location];
+                       } else {
+                               assert(ctx->is_gs_copy_shader);
+                               output_usage_mask =
+                                       ctx->shader_info->info.gs.output_usage_mask[location];
+                       }
 
-               length = util_last_bit(output_usage_mask);
+                       length = util_last_bit(output_usage_mask);
 
-               i = VARYING_SLOT_CLIP_DIST0;
-               for (j = 0; j < length; j++)
-                       slots[j] = ac_to_float(&ctx->ac, radv_load_output(ctx, i, j));
+                       for (j = 0; j < length; j++)
+                               slots[j] = ac_to_float(&ctx->ac, radv_load_output(ctx, location, j));
 
-               for (i = length; i < 8; i++)
-                       slots[i] = LLVMGetUndef(ctx->ac.f32);
+                       for (i = length; i < 4; i++)
+                               slots[i] = LLVMGetUndef(ctx->ac.f32);
 
-               if (length > 4) {
-                       target = V_008DFC_SQ_EXP_POS + 3;
-                       si_llvm_init_export_args(ctx, &slots[4], 0xf, target, &args);
+                       target = V_008DFC_SQ_EXP_POS + 2 + (location - VARYING_SLOT_CLIP_DIST0);
+                       si_llvm_init_export_args(ctx, &slots[0], 0xf, target, &args);
                        memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
-                              &args, sizeof(args));
-               }
-
-               target = V_008DFC_SQ_EXP_POS + 2;
-               si_llvm_init_export_args(ctx, &slots[0], 0xf, target, &args);
-               memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
-                      &args, sizeof(args));
+                       &args, sizeof(args));
 
-               /* Export the clip/cull distances values to the next stage. */
-               radv_export_param(ctx, param_count, &slots[0], 0xf);
-               outinfo->vs_output_param_offset[VARYING_SLOT_CLIP_DIST0] = param_count++;
-               if (length > 4) {
-                       radv_export_param(ctx, param_count, &slots[4], 0xf);
-                       outinfo->vs_output_param_offset[VARYING_SLOT_CLIP_DIST1] = param_count++;
+                       if (export_clip_dists) {
+                               /* Export the clip/cull distances values to the next stage. */
+                               radv_export_param(ctx, param_count, &slots[0], 0xf);
+                               outinfo->vs_output_param_offset[location] = param_count++;
+                       }
                }
        }
 
@@ -2538,6 +2983,12 @@ handle_vs_outputs_post(struct radv_shader_context *ctx,
                viewport_index_value = radv_load_output(ctx, VARYING_SLOT_VIEWPORT, 0);
        }
 
+       if (ctx->shader_info->info.so.num_outputs &&
+           !ctx->is_gs_copy_shader) {
+               /* The GS copy shader emission already emits streamout. */
+               radv_emit_streamout(ctx, 0);
+       }
+
        if (outinfo->writes_pointsize ||
            outinfo->writes_layer ||
            outinfo->writes_viewport_index) {
@@ -2582,6 +3033,14 @@ handle_vs_outputs_post(struct radv_shader_context *ctx,
                        num_pos_exports++;
        }
 
+       /* Navi10-14 skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
+        * Setting valid_mask=1 prevents it and has no other effect.
+        */
+       if (ctx->ac.family == CHIP_NAVI10 ||
+           ctx->ac.family == CHIP_NAVI12 ||
+           ctx->ac.family == CHIP_NAVI14)
+               pos_args[0].valid_mask = 1;
+
        pos_idx = 0;
        for (i = 0; i < 4; i++) {
                if (!pos_args[i].out[0])
@@ -2631,8 +3090,6 @@ handle_vs_outputs_post(struct radv_shader_context *ctx,
                LLVMValueRef values[4];
 
                values[0] = ctx->vs_prim_id;
-               ctx->shader_info->vs.vgpr_comp_cnt = MAX2(2,
-                                                         ctx->shader_info->vs.vgpr_comp_cnt);
                for (unsigned j = 1; j < 4; j++)
                        values[j] = ctx->ac.f32_0;
 
@@ -2667,28 +3124,14 @@ handle_es_outputs_post(struct radv_shader_context *ctx,
        LLVMValueRef lds_base = NULL;
 
        for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
-               unsigned output_usage_mask;
                int param_index;
-               int length = 4;
 
                if (!(ctx->output_mask & (1ull << i)))
                        continue;
 
-               if (ctx->stage == MESA_SHADER_VERTEX) {
-                       output_usage_mask =
-                               ctx->shader_info->info.vs.output_usage_mask[i];
-               } else {
-                       assert(ctx->stage == MESA_SHADER_TESS_EVAL);
-                       output_usage_mask =
-                               ctx->shader_info->info.tes.output_usage_mask[i];
-               }
-
-               if (i == VARYING_SLOT_CLIP_DIST0)
-                       length = util_last_bit(output_usage_mask);
-
                param_index = shader_io_get_unique_index(i);
 
-               max_output_written = MAX2(param_index + (length > 4), max_output_written);
+               max_output_written = MAX2(param_index, max_output_written);
        }
 
        outinfo->esgs_itemsize = (max_output_written + 1) * 16;
@@ -2709,7 +3152,6 @@ handle_es_outputs_post(struct radv_shader_context *ctx,
                LLVMValueRef *out_ptr = &ctx->abi.outputs[i * 4];
                unsigned output_usage_mask;
                int param_index;
-               int length = 4;
 
                if (!(ctx->output_mask & (1ull << i)))
                        continue;
@@ -2723,9 +3165,6 @@ handle_es_outputs_post(struct radv_shader_context *ctx,
                                ctx->shader_info->info.tes.output_usage_mask[i];
                }
 
-               if (i == VARYING_SLOT_CLIP_DIST0)
-                       length = util_last_bit(output_usage_mask);
-
                param_index = shader_io_get_unique_index(i);
 
                if (lds_base) {
@@ -2734,7 +3173,7 @@ handle_es_outputs_post(struct radv_shader_context *ctx,
                                               "");
                }
 
-               for (j = 0; j < length; j++) {
+               for (j = 0; j < 4; j++) {
                        if (!(output_usage_mask & (1 << j)))
                                continue;
 
@@ -2755,7 +3194,7 @@ handle_es_outputs_post(struct radv_shader_context *ctx,
                                                            out_val, 1,
                                                            NULL, ctx->es2gs_offset,
                                                            (4 * param_index + j) * 4,
-                                                           1, 1, true, true);
+                                                           ac_glc | ac_slc, true);
                        }
                }
        }
@@ -2771,22 +3210,16 @@ handle_ls_outputs_post(struct radv_shader_context *ctx)
                                                 vertex_dw_stride, "");
 
        for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
-               unsigned output_usage_mask =
-                       ctx->shader_info->info.vs.output_usage_mask[i];
                LLVMValueRef *out_ptr = &ctx->abi.outputs[i * 4];
-               int length = 4;
 
                if (!(ctx->output_mask & (1ull << i)))
                        continue;
 
-               if (i == VARYING_SLOT_CLIP_DIST0)
-                       length = util_last_bit(output_usage_mask);
-
                int param = shader_io_get_unique_index(i);
                LLVMValueRef dw_addr = LLVMBuildAdd(ctx->ac.builder, base_dw_addr,
                                                    LLVMConstInt(ctx->ac.i32, param * 4, false),
                                                    "");
-               for (unsigned j = 0; j < length; j++) {
+               for (unsigned j = 0; j < 4; j++) {
                        LLVMValueRef value = LLVMBuildLoad(ctx->ac.builder, out_ptr[j], "");
                        value = ac_to_integer(&ctx->ac, value);
                        value = LLVMBuildZExtOrBitCast(ctx->ac.builder, value, ctx->ac.i32, "");
@@ -2796,6 +3229,168 @@ handle_ls_outputs_post(struct radv_shader_context *ctx)
        }
 }
 
+static LLVMValueRef get_wave_id_in_tg(struct radv_shader_context *ctx)
+{
+       return ac_unpack_param(&ctx->ac, ctx->merged_wave_info, 24, 4);
+}
+
+static LLVMValueRef ngg_get_vtx_cnt(struct radv_shader_context *ctx)
+{
+       return ac_build_bfe(&ctx->ac, ctx->gs_tg_info,
+                           LLVMConstInt(ctx->ac.i32, 12, false),
+                           LLVMConstInt(ctx->ac.i32, 9, false),
+                           false);
+}
+
+static LLVMValueRef ngg_get_prim_cnt(struct radv_shader_context *ctx)
+{
+       return ac_build_bfe(&ctx->ac, ctx->gs_tg_info,
+                           LLVMConstInt(ctx->ac.i32, 22, false),
+                           LLVMConstInt(ctx->ac.i32, 9, false),
+                           false);
+}
+
+/* Send GS Alloc Req message from the first wave of the group to SPI.
+ * Message payload is:
+ * - bits 0..10: vertices in group
+ * - bits 12..22: primitives in group
+ */
+static void build_sendmsg_gs_alloc_req(struct radv_shader_context *ctx,
+                                      LLVMValueRef vtx_cnt,
+                                      LLVMValueRef prim_cnt)
+{
+       LLVMBuilderRef builder = ctx->ac.builder;
+       LLVMValueRef tmp;
+
+       tmp = LLVMBuildICmp(builder, LLVMIntEQ, get_wave_id_in_tg(ctx), ctx->ac.i32_0, "");
+       ac_build_ifcc(&ctx->ac, tmp, 5020);
+
+       tmp = LLVMBuildShl(builder, prim_cnt, LLVMConstInt(ctx->ac.i32, 12, false),"");
+       tmp = LLVMBuildOr(builder, tmp, vtx_cnt, "");
+       ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_ALLOC_REQ, tmp);
+
+       ac_build_endif(&ctx->ac, 5020);
+}
+
+struct ngg_prim {
+       unsigned num_vertices;
+       LLVMValueRef isnull;
+       LLVMValueRef index[3];
+       LLVMValueRef edgeflag[3];
+};
+
+static void build_export_prim(struct radv_shader_context *ctx,
+                             const struct ngg_prim *prim)
+{
+       LLVMBuilderRef builder = ctx->ac.builder;
+       struct ac_export_args args;
+       LLVMValueRef tmp;
+
+       tmp = LLVMBuildZExt(builder, prim->isnull, ctx->ac.i32, "");
+       args.out[0] = LLVMBuildShl(builder, tmp, LLVMConstInt(ctx->ac.i32, 31, false), "");
+
+       for (unsigned i = 0; i < prim->num_vertices; ++i) {
+               tmp = LLVMBuildShl(builder, prim->index[i],
+                                  LLVMConstInt(ctx->ac.i32, 10 * i, false), "");
+               args.out[0] = LLVMBuildOr(builder, args.out[0], tmp, "");
+               tmp = LLVMBuildZExt(builder, prim->edgeflag[i], ctx->ac.i32, "");
+               tmp = LLVMBuildShl(builder, tmp,
+                                  LLVMConstInt(ctx->ac.i32, 10 * i + 9, false), "");
+               args.out[0] = LLVMBuildOr(builder, args.out[0], tmp, "");
+       }
+
+       args.out[0] = LLVMBuildBitCast(builder, args.out[0], ctx->ac.f32, "");
+       args.out[1] = LLVMGetUndef(ctx->ac.f32);
+       args.out[2] = LLVMGetUndef(ctx->ac.f32);
+       args.out[3] = LLVMGetUndef(ctx->ac.f32);
+
+       args.target = V_008DFC_SQ_EXP_PRIM;
+       args.enabled_channels = 1;
+       args.done = true;
+       args.valid_mask = false;
+       args.compr = false;
+
+       ac_build_export(&ctx->ac, &args);
+}
+
+static void
+handle_ngg_outputs_post(struct radv_shader_context *ctx)
+{
+       LLVMBuilderRef builder = ctx->ac.builder;
+       struct ac_build_if_state if_state;
+       unsigned num_vertices = 3;
+       LLVMValueRef tmp;
+
+       assert(ctx->stage == MESA_SHADER_VERTEX && !ctx->is_gs_copy_shader);
+
+       LLVMValueRef prims_in_wave = ac_unpack_param(&ctx->ac, ctx->merged_wave_info, 8, 8);
+       LLVMValueRef vtx_in_wave = ac_unpack_param(&ctx->ac, ctx->merged_wave_info, 0, 8);
+       LLVMValueRef is_gs_thread = LLVMBuildICmp(builder, LLVMIntULT,
+                                                 ac_get_thread_id(&ctx->ac), prims_in_wave, "");
+       LLVMValueRef is_es_thread = LLVMBuildICmp(builder, LLVMIntULT,
+                                                 ac_get_thread_id(&ctx->ac), vtx_in_wave, "");
+       LLVMValueRef vtxindex[] = {
+               ac_unpack_param(&ctx->ac, ctx->gs_vtx_offset[0], 0, 16),
+               ac_unpack_param(&ctx->ac, ctx->gs_vtx_offset[0], 16, 16),
+               ac_unpack_param(&ctx->ac, ctx->gs_vtx_offset[2], 0, 16),
+       };
+
+       /* TODO: streamout */
+
+       /* TODO: VS primitive ID */
+       if (ctx->options->key.vs.out.export_prim_id)
+               assert(0);
+
+       /* TODO: primitive culling */
+
+       build_sendmsg_gs_alloc_req(ctx, ngg_get_vtx_cnt(ctx), ngg_get_prim_cnt(ctx));
+
+       /* TODO: streamout queries */
+       /* Export primitive data to the index buffer. Format is:
+        *  - bits 0..8: index 0
+        *  - bit 9: edge flag 0
+        *  - bits 10..18: index 1
+        *  - bit 19: edge flag 1
+        *  - bits 20..28: index 2
+        *  - bit 29: edge flag 2
+        *  - bit 31: null primitive (skip)
+        *
+        * For the first version, we will always build up all three indices
+        * independent of the primitive type. The additional garbage data
+        * shouldn't hurt.
+        *
+        * TODO: culling depends on the primitive type, so can have some
+        * interaction here.
+        */
+       ac_nir_build_if(&if_state, ctx, is_gs_thread);
+       {
+               struct ngg_prim prim = {};
+
+               prim.num_vertices = num_vertices;
+               prim.isnull = ctx->ac.i1false;
+               memcpy(prim.index, vtxindex, sizeof(vtxindex[0]) * 3);
+
+               for (unsigned i = 0; i < num_vertices; ++i) {
+                       tmp = LLVMBuildLShr(builder, ctx->abi.gs_invocation_id,
+                                           LLVMConstInt(ctx->ac.i32, 8 + i, false), "");
+                       prim.edgeflag[i] = LLVMBuildTrunc(builder, tmp, ctx->ac.i1, "");
+               }
+
+               build_export_prim(ctx, &prim);
+       }
+       ac_nir_build_endif(&if_state);
+
+       /* Export per-vertex data (positions and parameters). */
+       ac_nir_build_if(&if_state, ctx, is_es_thread);
+       {
+               handle_vs_outputs_post(ctx, ctx->options->key.vs.out.export_prim_id,
+                                      ctx->options->key.vs.out.export_layer_id,
+                                      ctx->options->key.vs.out.export_clip_dists,
+                                      &ctx->shader_info->vs.outinfo);
+       }
+       ac_nir_build_endif(&if_state);
+}
+
 static void
 write_tess_factors(struct radv_shader_context *ctx)
 {
@@ -2885,7 +3480,7 @@ write_tess_factors(struct radv_shader_context *ctx)
                                  LLVMConstInt(ctx->ac.i32, 4 * stride, false), "");
        unsigned tf_offset = 0;
 
-       if (ctx->options->chip_class <= VI) {
+       if (ctx->options->chip_class <= GFX8) {
                ac_nir_build_if(&inner_if_ctx, ctx,
                                LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
                                              rel_patch_id, ctx->ac.i32_0, ""));
@@ -2894,7 +3489,7 @@ write_tess_factors(struct radv_shader_context *ctx)
                ac_build_buffer_store_dword(&ctx->ac, buffer,
                                            LLVMConstInt(ctx->ac.i32, 0x80000000, false),
                                            1, ctx->ac.i32_0, tf_base,
-                                           0, 1, 0, true, false);
+                                           0, ac_glc, false);
                tf_offset += 4;
 
                ac_nir_build_endif(&inner_if_ctx);
@@ -2903,11 +3498,11 @@ write_tess_factors(struct radv_shader_context *ctx)
        /* Store the tessellation factors. */
        ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
                                    MIN2(stride, 4), byteoffset, tf_base,
-                                   tf_offset, 1, 0, true, false);
+                                   tf_offset, ac_glc, false);
        if (vec1)
                ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
                                            stride - 4, byteoffset, tf_base,
-                                           16 + tf_offset, 1, 0, true, false);
+                                           16 + tf_offset, ac_glc, false);
 
        //store to offchip for TES to read - only if TES reads them
        if (ctx->options->key.tcs.tes_reads_tess_factors) {
@@ -2924,7 +3519,7 @@ write_tess_factors(struct radv_shader_context *ctx)
 
                ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, outer_vec,
                                            outer_comps, tf_outer_offset,
-                                           ctx->oc_lds, 0, 1, 0, true, false);
+                                           ctx->oc_lds, 0, ac_glc, false);
                if (inner_comps) {
                        param_inner = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER);
                        tf_inner_offset = get_tcs_tes_buffer_address(ctx, NULL,
@@ -2934,7 +3529,7 @@ write_tess_factors(struct radv_shader_context *ctx)
                                ac_build_gather_values(&ctx->ac, inner, inner_comps);
                        ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, inner_vec,
                                                    inner_comps, tf_inner_offset,
-                                                   ctx->oc_lds, 0, 1, 0, true, false);
+                                                   ctx->oc_lds, 0, ac_glc, false);
                }
        }
        ac_nir_build_endif(&if_ctx);
@@ -3050,13 +3645,16 @@ handle_shader_outputs_post(struct ac_shader_abi *abi, unsigned max_outputs,
 
        switch (ctx->stage) {
        case MESA_SHADER_VERTEX:
-               if (ctx->options->key.vs.as_ls)
+               if (ctx->options->key.vs.out.as_ls)
                        handle_ls_outputs_post(ctx);
-               else if (ctx->options->key.vs.as_es)
+               else if (ctx->options->key.vs.out.as_ngg)
+                       break; /* handled outside of the shader body */
+               else if (ctx->options->key.vs.out.as_es)
                        handle_es_outputs_post(ctx, &ctx->shader_info->vs.es_info);
                else
-                       handle_vs_outputs_post(ctx, ctx->options->key.vs.export_prim_id,
-                                              ctx->options->key.vs.export_layer_id,
+                       handle_vs_outputs_post(ctx, ctx->options->key.vs.out.export_prim_id,
+                                              ctx->options->key.vs.out.export_layer_id,
+                                              ctx->options->key.vs.out.export_clip_dists,
                                               &ctx->shader_info->vs.outinfo);
                break;
        case MESA_SHADER_FRAGMENT:
@@ -3069,11 +3667,12 @@ handle_shader_outputs_post(struct ac_shader_abi *abi, unsigned max_outputs,
                handle_tcs_outputs_post(ctx);
                break;
        case MESA_SHADER_TESS_EVAL:
-               if (ctx->options->key.tes.as_es)
+               if (ctx->options->key.tes.out.as_es)
                        handle_es_outputs_post(ctx, &ctx->shader_info->tes.es_info);
                else
-                       handle_vs_outputs_post(ctx, ctx->options->key.tes.export_prim_id,
-                                              ctx->options->key.tes.export_layer_id,
+                       handle_vs_outputs_post(ctx, ctx->options->key.tes.out.export_prim_id,
+                                              ctx->options->key.tes.out.export_layer_id,
+                                              ctx->options->key.tes.out.export_clip_dists,
                                               &ctx->shader_info->tes.outinfo);
                break;
        default:
@@ -3103,13 +3702,13 @@ ac_nir_eliminate_const_vs_outputs(struct radv_shader_context *ctx)
        case MESA_SHADER_GEOMETRY:
                return;
        case MESA_SHADER_VERTEX:
-               if (ctx->options->key.vs.as_ls ||
-                   ctx->options->key.vs.as_es)
+               if (ctx->options->key.vs.out.as_ls ||
+                   ctx->options->key.vs.out.as_es)
                        return;
                outinfo = &ctx->shader_info->vs.outinfo;
                break;
        case MESA_SHADER_TESS_EVAL:
-               if (ctx->options->key.vs.as_es)
+               if (ctx->options->key.vs.out.as_es)
                        return;
                outinfo = &ctx->shader_info->tes.outinfo;
                break;
@@ -3127,9 +3726,9 @@ ac_nir_eliminate_const_vs_outputs(struct radv_shader_context *ctx)
 static void
 ac_setup_rings(struct radv_shader_context *ctx)
 {
-       if (ctx->options->chip_class <= VI &&
+       if (ctx->options->chip_class <= GFX8 &&
            (ctx->stage == MESA_SHADER_GEOMETRY ||
-            ctx->options->key.vs.as_es || ctx->options->key.tes.as_es)) {
+            ctx->options->key.vs.out.as_es || ctx->options->key.tes.out.as_es)) {
                unsigned ring = ctx->stage == MESA_SHADER_GEOMETRY ? RING_ESGS_GS
                                                                   : RING_ESGS_VS;
                LLVMValueRef offset = LLVMConstInt(ctx->ac.i32, ring, false);
@@ -3140,24 +3739,76 @@ ac_setup_rings(struct radv_shader_context *ctx)
        }
 
        if (ctx->is_gs_copy_shader) {
-               ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_VS, false));
+               ctx->gsvs_ring[0] =
+                       ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets,
+                                             LLVMConstInt(ctx->ac.i32,
+                                                          RING_GSVS_VS, false));
        }
+
        if (ctx->stage == MESA_SHADER_GEOMETRY) {
-               LLVMValueRef tmp;
-               uint32_t num_entries = 64;
-               LLVMValueRef gsvs_ring_stride = LLVMConstInt(ctx->ac.i32, ctx->max_gsvs_emit_size, false);
-               LLVMValueRef gsvs_ring_desc = LLVMConstInt(ctx->ac.i32, ctx->max_gsvs_emit_size << 16, false);
-               ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_GS, false));
+               /* The conceptual layout of the GSVS ring is
+                *   v0c0 .. vLv0 v0c1 .. vLc1 ..
+                * but the real memory layout is swizzled across
+                * threads:
+                *   t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
+                *   t16v0c0 ..
+                * Override the buffer descriptor accordingly.
+                */
+               LLVMTypeRef v2i64 = LLVMVectorType(ctx->ac.i64, 2);
+               uint64_t stream_offset = 0;
+               unsigned num_records = 64;
+               LLVMValueRef base_ring;
+
+               base_ring =
+                       ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets,
+                                             LLVMConstInt(ctx->ac.i32,
+                                                          RING_GSVS_GS, false));
 
-               ctx->gsvs_ring = LLVMBuildBitCast(ctx->ac.builder, ctx->gsvs_ring, ctx->ac.v4i32, "");
+               for (unsigned stream = 0; stream < 4; stream++) {
+                       unsigned num_components, stride;
+                       LLVMValueRef ring, tmp;
 
-               tmp = LLVMConstInt(ctx->ac.i32, num_entries, false);
-               if (ctx->options->chip_class >= VI)
-                       tmp = LLVMBuildMul(ctx->ac.builder, gsvs_ring_stride, tmp, "");
-               ctx->gsvs_ring = LLVMBuildInsertElement(ctx->ac.builder, ctx->gsvs_ring, tmp, LLVMConstInt(ctx->ac.i32, 2, false), "");
-               tmp = LLVMBuildExtractElement(ctx->ac.builder, ctx->gsvs_ring, ctx->ac.i32_1, "");
-               tmp = LLVMBuildOr(ctx->ac.builder, tmp, gsvs_ring_desc, "");
-               ctx->gsvs_ring = LLVMBuildInsertElement(ctx->ac.builder, ctx->gsvs_ring, tmp, ctx->ac.i32_1, "");
+                       num_components =
+                               ctx->shader_info->info.gs.num_stream_output_components[stream];
+
+                       if (!num_components)
+                               continue;
+
+                       stride = 4 * num_components * ctx->gs_max_out_vertices;
+
+                       /* Limit on the stride field for <= GFX7. */
+                       assert(stride < (1 << 14));
+
+                       ring = LLVMBuildBitCast(ctx->ac.builder,
+                                               base_ring, v2i64, "");
+                       tmp = LLVMBuildExtractElement(ctx->ac.builder,
+                                                     ring, ctx->ac.i32_0, "");
+                       tmp = LLVMBuildAdd(ctx->ac.builder, tmp,
+                                          LLVMConstInt(ctx->ac.i64,
+                                                       stream_offset, 0), "");
+                       ring = LLVMBuildInsertElement(ctx->ac.builder,
+                                                     ring, tmp, ctx->ac.i32_0, "");
+
+                       stream_offset += stride * 64;
+
+                       ring = LLVMBuildBitCast(ctx->ac.builder, ring,
+                                               ctx->ac.v4i32, "");
+
+                       tmp = LLVMBuildExtractElement(ctx->ac.builder, ring,
+                                                     ctx->ac.i32_1, "");
+                       tmp = LLVMBuildOr(ctx->ac.builder, tmp,
+                                         LLVMConstInt(ctx->ac.i32,
+                                                      S_008F04_STRIDE(stride), false), "");
+                       ring = LLVMBuildInsertElement(ctx->ac.builder, ring, tmp,
+                                                     ctx->ac.i32_1, "");
+
+                       ring = LLVMBuildInsertElement(ctx->ac.builder, ring,
+                                                     LLVMConstInt(ctx->ac.i32,
+                                                                  num_records, false),
+                                                     LLVMConstInt(ctx->ac.i32, 2, false), "");
+
+                       ctx->gsvs_ring[stream] = ring;
+               }
        }
 
        if (ctx->stage == MESA_SHADER_TESS_CTRL ||
@@ -3167,13 +3818,13 @@ ac_setup_rings(struct radv_shader_context *ctx)
        }
 }
 
-static unsigned
-ac_nir_get_max_workgroup_size(enum chip_class chip_class,
-                             const struct nir_shader *nir)
+unsigned
+radv_nir_get_max_workgroup_size(enum chip_class chip_class,
+                               const struct nir_shader *nir)
 {
        switch (nir->info.stage) {
        case MESA_SHADER_TESS_CTRL:
-               return chip_class >= CIK ? 128 : 64;
+               return chip_class >= GFX7 ? 128 : 64;
        case MESA_SHADER_GEOMETRY:
                return chip_class >= GFX9 ? 128 : 64;
        case MESA_SHADER_COMPUTE:
@@ -3234,6 +3885,8 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
 
        memset(shader_info, 0, sizeof(*shader_info));
 
+       radv_nir_shader_info_init(&shader_info->info);
+
        for(int i = 0; i < shader_count; ++i)
                radv_nir_shader_info_pass(shaders[i], options, &shader_info->info);
 
@@ -3245,10 +3898,17 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
        ctx.max_workgroup_size = 0;
        for (int i = 0; i < shader_count; ++i) {
                ctx.max_workgroup_size = MAX2(ctx.max_workgroup_size,
-                                             ac_nir_get_max_workgroup_size(ctx.options->chip_class,
+                                             radv_nir_get_max_workgroup_size(ctx.options->chip_class,
                                                                            shaders[i]));
        }
 
+       if (ctx.ac.chip_class >= GFX10) {
+               if (shaders[0]->info.stage == MESA_SHADER_VERTEX &&
+                   options->key.vs.out.as_ngg) {
+                       ctx.max_workgroup_size = 128;
+               }
+       }
+
        create_function(&ctx, shaders[shader_count - 1]->info.stage, shader_count >= 2,
                        shader_count >= 2 ? shaders[shader_count - 2]->info.stage  : MESA_SHADER_VERTEX);
 
@@ -3260,12 +3920,20 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
        ctx.abi.load_sampler_desc = radv_get_sampler_desc;
        ctx.abi.load_resource = radv_load_resource;
        ctx.abi.clamp_shadow_reference = false;
-       ctx.abi.gfx9_stride_size_workaround = ctx.ac.chip_class == GFX9;
+       ctx.abi.gfx9_stride_size_workaround = ctx.ac.chip_class == GFX9 && HAVE_LLVM < 0x800;
+
+       /* Because the new raw/struct atomic intrinsics are buggy with LLVM 8,
+        * we fallback to the old intrinsics for atomic buffer image operations
+        * and thus we need to apply the indexing workaround...
+        */
+       ctx.abi.gfx9_stride_size_workaround_for_atomic = ctx.ac.chip_class == GFX9 && HAVE_LLVM < 0x900;
 
-       if (shader_count >= 2)
+       bool is_ngg = is_pre_gs_stage(shaders[0]->info.stage) &&  ctx.options->key.vs.out.as_ngg;
+       if (shader_count >= 2 || is_ngg)
                ac_init_exec_full_mask(&ctx.ac);
 
-       if (ctx.ac.chip_class == GFX9 &&
+       if ((ctx.ac.family == CHIP_VEGA10 ||
+            ctx.ac.family == CHIP_RAVEN) &&
            shaders[shader_count - 1]->info.stage == MESA_SHADER_TESS_CTRL)
                ac_nir_fixup_ls_hs_input_vgprs(&ctx);
 
@@ -3274,7 +3942,10 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
                ctx.output_mask = 0;
 
                if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
-                       ctx.gs_next_vertex = ac_build_alloca(&ctx.ac, ctx.ac.i32, "gs_next_vertex");
+                       for (int i = 0; i < 4; i++) {
+                               ctx.gs_next_vertex[i] =
+                                       ac_build_alloca(&ctx.ac, ctx.ac.i32, "");
+                       }
                        ctx.gs_max_out_vertices = shaders[i]->info.gs.vertices_out;
                        ctx.abi.load_inputs = load_gs_input;
                        ctx.abi.emit_primitive = visit_end_primitive;
@@ -3298,15 +3969,6 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
                        ctx.tcs_vertices_per_patch = shaders[i]->info.tess.tcs_vertices_out;
                        ctx.tcs_num_patches = ctx.options->key.tes.num_patches;
                } else if (shaders[i]->info.stage == MESA_SHADER_VERTEX) {
-                       if (shader_info->info.vs.needs_instance_id) {
-                               if (ctx.options->key.vs.as_ls) {
-                                       ctx.shader_info->vs.vgpr_comp_cnt =
-                                               MAX2(2, ctx.shader_info->vs.vgpr_comp_cnt);
-                               } else {
-                                       ctx.shader_info->vs.vgpr_comp_cnt =
-                                               MAX2(1, ctx.shader_info->vs.vgpr_comp_cnt);
-                               }
-                       }
                        ctx.abi.load_base_vertex = radv_load_base_vertex;
                } else if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT) {
                        shader_info->fs.can_discard = shaders[i]->info.fs.uses_discard;
@@ -3333,7 +3995,7 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
                ac_setup_rings(&ctx);
 
                LLVMBasicBlockRef merge_block;
-               if (shader_count >= 2) {
+               if (shader_count >= 2 || is_ngg) {
                        LLVMValueRef fn = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx.ac.builder));
                        LLVMBasicBlockRef then_block = LLVMAppendBasicBlockInContext(ctx.ac.context, fn, "");
                        merge_block = LLVMAppendBasicBlockInContext(ctx.ac.context, fn, "");
@@ -3356,11 +4018,19 @@ LLVMModuleRef ac_translate_nir_to_llvm(struct ac_llvm_compiler *ac_llvm,
 
                ac_nir_translate(&ctx.ac, &ctx.abi, shaders[i]);
 
-               if (shader_count >= 2) {
+               if (shader_count >= 2 || is_ngg) {
                        LLVMBuildBr(ctx.ac.builder, merge_block);
                        LLVMPositionBuilderAtEnd(ctx.ac.builder, merge_block);
                }
 
+               /* This needs to be outside the if wrapping the shader body, as sometimes
+                * the HW generates waves with 0 es/vs threads. */
+               if (is_pre_gs_stage(shaders[i]->info.stage) &&
+                   ctx.options->key.vs.out.as_ngg &&
+                   i == shader_count - 1) {
+                       handle_ngg_outputs_post(&ctx);
+               }
+
                if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
                        shader_info->gs.gsvs_vertex_size = ctx.gsvs_vertex_size;
                        shader_info->gs.max_gsvs_emit_size = ctx.max_gsvs_emit_size;
@@ -3403,9 +4073,9 @@ static void ac_diagnostic_handler(LLVMDiagnosticInfoRef di, void *context)
        LLVMDisposeMessage(description);
 }
 
-static unsigned ac_llvm_compile(LLVMModuleRef M,
-                                struct ac_shader_binary *binary,
-                                struct ac_llvm_compiler *ac_llvm)
+static unsigned radv_llvm_compile(LLVMModuleRef M,
+                                  char **pelf_buffer, size_t *pelf_size,
+                                  struct ac_llvm_compiler *ac_llvm)
 {
        unsigned retval = 0;
        LLVMContextRef llvm_ctx;
@@ -3417,98 +4087,55 @@ static unsigned ac_llvm_compile(LLVMModuleRef M,
                                        &retval);
 
        /* Compile IR*/
-       if (!radv_compile_to_binary(ac_llvm, M, binary))
+       if (!radv_compile_to_elf(ac_llvm, M, pelf_buffer, pelf_size))
                retval = 1;
        return retval;
 }
 
 static void ac_compile_llvm_module(struct ac_llvm_compiler *ac_llvm,
                                   LLVMModuleRef llvm_module,
-                                  struct ac_shader_binary *binary,
-                                  struct ac_shader_config *config,
+                                  struct radv_shader_binary **rbinary,
                                   struct radv_shader_variant_info *shader_info,
                                   gl_shader_stage stage,
                                   const struct radv_nir_compiler_options *options)
 {
+       char *elf_buffer = NULL;
+       size_t elf_size = 0;
+       char *llvm_ir_string = NULL;
        if (options->dump_shader)
                ac_dump_module(llvm_module);
 
-       memset(binary, 0, sizeof(*binary));
-
        if (options->record_llvm_ir) {
                char *llvm_ir = LLVMPrintModuleToString(llvm_module);
-               binary->llvm_ir_string = strdup(llvm_ir);
+               llvm_ir_string = strdup(llvm_ir);
                LLVMDisposeMessage(llvm_ir);
        }
 
-       int v = ac_llvm_compile(llvm_module, binary, ac_llvm);
+       int v = radv_llvm_compile(llvm_module, &elf_buffer, &elf_size, ac_llvm);
        if (v) {
                fprintf(stderr, "compile failed\n");
        }
 
-       if (options->dump_shader)
-               fprintf(stderr, "disasm:\n%s\n", binary->disasm_string);
-
-       ac_shader_binary_read_config(binary, config, 0, options->supports_spill);
-
        LLVMContextRef ctx = LLVMGetModuleContext(llvm_module);
        LLVMDisposeModule(llvm_module);
        LLVMContextDispose(ctx);
 
-       if (stage == MESA_SHADER_FRAGMENT) {
-               shader_info->num_input_vgprs = 0;
-               if (G_0286CC_PERSP_SAMPLE_ENA(config->spi_ps_input_addr))
-                       shader_info->num_input_vgprs += 2;
-               if (G_0286CC_PERSP_CENTER_ENA(config->spi_ps_input_addr))
-                       shader_info->num_input_vgprs += 2;
-               if (G_0286CC_PERSP_CENTROID_ENA(config->spi_ps_input_addr))
-                       shader_info->num_input_vgprs += 2;
-               if (G_0286CC_PERSP_PULL_MODEL_ENA(config->spi_ps_input_addr))
-                       shader_info->num_input_vgprs += 3;
-               if (G_0286CC_LINEAR_SAMPLE_ENA(config->spi_ps_input_addr))
-                       shader_info->num_input_vgprs += 2;
-               if (G_0286CC_LINEAR_CENTER_ENA(config->spi_ps_input_addr))
-                       shader_info->num_input_vgprs += 2;
-               if (G_0286CC_LINEAR_CENTROID_ENA(config->spi_ps_input_addr))
-                       shader_info->num_input_vgprs += 2;
-               if (G_0286CC_LINE_STIPPLE_TEX_ENA(config->spi_ps_input_addr))
-                       shader_info->num_input_vgprs += 1;
-               if (G_0286CC_POS_X_FLOAT_ENA(config->spi_ps_input_addr))
-                       shader_info->num_input_vgprs += 1;
-               if (G_0286CC_POS_Y_FLOAT_ENA(config->spi_ps_input_addr))
-                       shader_info->num_input_vgprs += 1;
-               if (G_0286CC_POS_Z_FLOAT_ENA(config->spi_ps_input_addr))
-                       shader_info->num_input_vgprs += 1;
-               if (G_0286CC_POS_W_FLOAT_ENA(config->spi_ps_input_addr))
-                       shader_info->num_input_vgprs += 1;
-               if (G_0286CC_FRONT_FACE_ENA(config->spi_ps_input_addr))
-                       shader_info->num_input_vgprs += 1;
-               if (G_0286CC_ANCILLARY_ENA(config->spi_ps_input_addr))
-                       shader_info->num_input_vgprs += 1;
-               if (G_0286CC_SAMPLE_COVERAGE_ENA(config->spi_ps_input_addr))
-                       shader_info->num_input_vgprs += 1;
-               if (G_0286CC_POS_FIXED_PT_ENA(config->spi_ps_input_addr))
-                       shader_info->num_input_vgprs += 1;
-       }
-       config->num_vgprs = MAX2(config->num_vgprs, shader_info->num_input_vgprs);
-
-       /* +3 for scratch wave offset and VCC */
-       config->num_sgprs = MAX2(config->num_sgprs,
-                                shader_info->num_input_sgprs + 3);
-
-       /* Enable 64-bit and 16-bit denormals, because there is no performance
-        * cost.
-        *
-        * If denormals are enabled, all floating-point output modifiers are
-        * ignored.
-        *
-        * Don't enable denormals for 32-bit floats, because:
-        * - Floating-point output modifiers would be ignored by the hw.
-        * - Some opcodes don't support denormals, such as v_mad_f32. We would
-        *   have to stop using those.
-        * - SI & CI would be very slow.
-        */
-       config->float_mode |= V_00B028_FP_64_DENORMS;
+       size_t llvm_ir_size = llvm_ir_string ? strlen(llvm_ir_string) : 0;
+       size_t alloc_size = sizeof(struct radv_shader_binary_rtld) + elf_size + llvm_ir_size + 1;
+       struct radv_shader_binary_rtld *rbin = calloc(1, alloc_size);
+       memcpy(rbin->data,  elf_buffer, elf_size);
+       if (llvm_ir_string)
+               memcpy(rbin->data + elf_size, llvm_ir_string, llvm_ir_size + 1);
+
+       rbin->base.type = RADV_BINARY_TYPE_RTLD;
+       rbin->base.stage = stage;
+       rbin->base.total_size = alloc_size;
+       rbin->elf_size = elf_size;
+       rbin->llvm_ir_size = llvm_ir_size;
+       *rbinary = &rbin->base;
+
+       free(llvm_ir_string);
+       free(elf_buffer);
 }
 
 static void
@@ -3533,17 +4160,17 @@ ac_fill_shader_info(struct radv_shader_variant_info *shader_info, struct nir_sha
                 shader_info->tes.spacing = nir->info.tess.spacing;
                 shader_info->tes.ccw = nir->info.tess.ccw;
                 shader_info->tes.point_mode = nir->info.tess.point_mode;
-                shader_info->tes.as_es = options->key.tes.as_es;
+                shader_info->tes.as_es = options->key.tes.out.as_es;
+                shader_info->tes.export_prim_id = options->key.tes.out.export_prim_id;
                 break;
         case MESA_SHADER_TESS_CTRL:
                 shader_info->tcs.tcs_vertices_out = nir->info.tess.tcs_vertices_out;
                 break;
         case MESA_SHADER_VERTEX:
-                shader_info->vs.as_es = options->key.vs.as_es;
-                shader_info->vs.as_ls = options->key.vs.as_ls;
-                /* in LS mode we need at least 1, invocation id needs 2, handled elsewhere */
-                if (options->key.vs.as_ls)
-                        shader_info->vs.vgpr_comp_cnt = MAX2(1, shader_info->vs.vgpr_comp_cnt);
+                shader_info->vs.as_es = options->key.vs.out.as_es;
+                shader_info->vs.as_ls = options->key.vs.out.as_ls;
+                shader_info->vs.export_prim_id = options->key.vs.out.export_prim_id;
+                shader_info->is_ngg = options->key.vs.out.as_ngg;
                 break;
         default:
                 break;
@@ -3552,8 +4179,7 @@ ac_fill_shader_info(struct radv_shader_variant_info *shader_info, struct nir_sha
 
 void
 radv_compile_nir_shader(struct ac_llvm_compiler *ac_llvm,
-                       struct ac_shader_binary *binary,
-                       struct ac_shader_config *config,
+                       struct radv_shader_binary **rbinary,
                        struct radv_shader_variant_info *shader_info,
                        struct nir_shader *const *nir,
                        int nir_count,
@@ -3565,8 +4191,8 @@ radv_compile_nir_shader(struct ac_llvm_compiler *ac_llvm,
        llvm_module = ac_translate_nir_to_llvm(ac_llvm, nir, nir_count, shader_info,
                                               options);
 
-       ac_compile_llvm_module(ac_llvm, llvm_module, binary, config, shader_info,
-                              nir[0]->info.stage, options);
+       ac_compile_llvm_module(ac_llvm, llvm_module, rbinary, shader_info,
+                              nir[nir_count - 1]->info.stage, options);
 
        for (int i = 0; i < nir_count; ++i)
                ac_fill_shader_info(shader_info, nir[i], options);
@@ -3586,56 +4212,98 @@ ac_gs_copy_shader_emit(struct radv_shader_context *ctx)
        LLVMValueRef vtx_offset =
                LLVMBuildMul(ctx->ac.builder, ctx->abi.vertex_id,
                             LLVMConstInt(ctx->ac.i32, 4, false), "");
-       unsigned offset = 0;
+       LLVMValueRef stream_id;
 
-       for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
-               unsigned output_usage_mask =
-                       ctx->shader_info->info.gs.output_usage_mask[i];
-               int length = 4;
+       /* Fetch the vertex stream ID. */
+       if (ctx->shader_info->info.so.num_outputs) {
+               stream_id =
+                       ac_unpack_param(&ctx->ac, ctx->streamout_config, 24, 2);
+       } else {
+               stream_id = ctx->ac.i32_0;
+       }
 
-               if (!(ctx->output_mask & (1ull << i)))
+       LLVMBasicBlockRef end_bb;
+       LLVMValueRef switch_inst;
+
+       end_bb = LLVMAppendBasicBlockInContext(ctx->ac.context,
+                                              ctx->main_function, "end");
+       switch_inst = LLVMBuildSwitch(ctx->ac.builder, stream_id, end_bb, 4);
+
+       for (unsigned stream = 0; stream < 4; stream++) {
+               unsigned num_components =
+                       ctx->shader_info->info.gs.num_stream_output_components[stream];
+               LLVMBasicBlockRef bb;
+               unsigned offset;
+
+               if (!num_components)
                        continue;
 
-               if (i == VARYING_SLOT_CLIP_DIST0) {
-                       /* unpack clip and cull from a single set of slots */
-                       length = util_last_bit(output_usage_mask);
-               }
+               if (stream > 0 && !ctx->shader_info->info.so.num_outputs)
+                       continue;
 
-               for (unsigned j = 0; j < length; j++) {
-                       LLVMValueRef value, soffset;
+               bb = LLVMInsertBasicBlockInContext(ctx->ac.context, end_bb, "out");
+               LLVMAddCase(switch_inst, LLVMConstInt(ctx->ac.i32, stream, 0), bb);
+               LLVMPositionBuilderAtEnd(ctx->ac.builder, bb);
 
-                       if (!(output_usage_mask & (1 << j)))
+               offset = 0;
+               for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
+                       unsigned output_usage_mask =
+                               ctx->shader_info->info.gs.output_usage_mask[i];
+                       unsigned output_stream =
+                               ctx->shader_info->info.gs.output_streams[i];
+                       int length = util_last_bit(output_usage_mask);
+
+                       if (!(ctx->output_mask & (1ull << i)) ||
+                           output_stream != stream)
                                continue;
 
-                       soffset = LLVMConstInt(ctx->ac.i32,
-                                              offset *
-                                              ctx->gs_max_out_vertices * 16 * 4, false);
+                       for (unsigned j = 0; j < length; j++) {
+                               LLVMValueRef value, soffset;
 
-                       offset++;
+                               if (!(output_usage_mask & (1 << j)))
+                                       continue;
+
+                               soffset = LLVMConstInt(ctx->ac.i32,
+                                                      offset *
+                                                      ctx->gs_max_out_vertices * 16 * 4, false);
 
-                       value = ac_build_buffer_load(&ctx->ac, ctx->gsvs_ring,
-                                                    1, ctx->ac.i32_0,
-                                                    vtx_offset, soffset,
-                                                    0, 1, 1, true, false);
+                               offset++;
+
+                               value = ac_build_buffer_load(&ctx->ac,
+                                                            ctx->gsvs_ring[0],
+                                                            1, ctx->ac.i32_0,
+                                                            vtx_offset, soffset,
+                                                            0, ac_glc | ac_slc, true, false);
+
+                               LLVMTypeRef type = LLVMGetAllocatedType(ctx->abi.outputs[ac_llvm_reg_index_soa(i, j)]);
+                               if (ac_get_type_size(type) == 2) {
+                                       value = LLVMBuildBitCast(ctx->ac.builder, value, ctx->ac.i32, "");
+                                       value = LLVMBuildTrunc(ctx->ac.builder, value, ctx->ac.i16, "");
+                               }
 
-                       LLVMTypeRef type = LLVMGetAllocatedType(ctx->abi.outputs[ac_llvm_reg_index_soa(i, j)]);
-                       if (ac_get_type_size(type) == 2) {
-                               value = LLVMBuildBitCast(ctx->ac.builder, value, ctx->ac.i32, "");
-                               value = LLVMBuildTrunc(ctx->ac.builder, value, ctx->ac.i16, "");
+                               LLVMBuildStore(ctx->ac.builder,
+                                              ac_to_float(&ctx->ac, value), ctx->abi.outputs[ac_llvm_reg_index_soa(i, j)]);
                        }
+               }
+
+               if (ctx->shader_info->info.so.num_outputs)
+                       radv_emit_streamout(ctx, stream);
 
-                       LLVMBuildStore(ctx->ac.builder,
-                                      ac_to_float(&ctx->ac, value), ctx->abi.outputs[ac_llvm_reg_index_soa(i, j)]);
+               if (stream == 0) {
+                       handle_vs_outputs_post(ctx, false, false, true,
+                                              &ctx->shader_info->vs.outinfo);
                }
+
+               LLVMBuildBr(ctx->ac.builder, end_bb);
        }
-       handle_vs_outputs_post(ctx, false, false, &ctx->shader_info->vs.outinfo);
+
+       LLVMPositionBuilderAtEnd(ctx->ac.builder, end_bb);
 }
 
 void
 radv_compile_gs_copy_shader(struct ac_llvm_compiler *ac_llvm,
                            struct nir_shader *geom_shader,
-                           struct ac_shader_binary *binary,
-                           struct ac_shader_config *config,
+                           struct radv_shader_binary **rbinary,
                            struct radv_shader_variant_info *shader_info,
                            const struct radv_nir_compiler_options *options)
 {
@@ -3675,6 +4343,8 @@ radv_compile_gs_copy_shader(struct ac_llvm_compiler *ac_llvm,
 
        ac_llvm_finalize_module(&ctx, ac_llvm->passmgr, options);
 
-       ac_compile_llvm_module(ac_llvm, ctx.ac.module, binary, config, shader_info,
+       ac_compile_llvm_module(ac_llvm, ctx.ac.module, rbinary, shader_info,
                               MESA_SHADER_VERTEX, options);
+       (*rbinary)->is_gs_copy_shader = true;
+       
 }