radv/gfx10: Only set HW edge flags with gs & tess disabled.
[mesa.git] / src / amd / vulkan / radv_pipeline.c
index a92875a4aadcdfcd5bf825cf85855e2d494fa528..5751440f3010353949eb46d850098494965723c0 100644 (file)
@@ -96,6 +96,30 @@ struct radv_gs_state {
        uint32_t lds_size;
 };
 
+struct radv_ngg_state {
+       uint16_t ngg_emit_size; /* in dwords */
+       uint32_t hw_max_esverts;
+       uint32_t max_gsprims;
+       uint32_t max_out_verts;
+       uint32_t prim_amp_factor;
+       uint32_t vgt_esgs_ring_itemsize;
+       bool max_vert_out_per_gs_instance;
+};
+
+bool radv_pipeline_has_ngg(const struct radv_pipeline *pipeline)
+{
+       struct radv_shader_variant *variant = NULL;
+       if (pipeline->shaders[MESA_SHADER_GEOMETRY])
+               variant = pipeline->shaders[MESA_SHADER_GEOMETRY];
+       else if (pipeline->shaders[MESA_SHADER_TESS_EVAL])
+               variant = pipeline->shaders[MESA_SHADER_TESS_EVAL];
+       else if (pipeline->shaders[MESA_SHADER_VERTEX])
+               variant = pipeline->shaders[MESA_SHADER_VERTEX];
+       else
+               return false;
+       return variant->info.is_ngg;
+}
+
 static void
 radv_pipeline_destroy(struct radv_device *device,
                       struct radv_pipeline *pipeline,
@@ -1583,6 +1607,203 @@ calculate_gs_info(const VkGraphicsPipelineCreateInfo *pCreateInfo,
        return gs;
 }
 
+static void clamp_gsprims_to_esverts(unsigned *max_gsprims, unsigned max_esverts,
+                                    unsigned min_verts_per_prim, bool use_adjacency)
+{
+       unsigned max_reuse = max_esverts - min_verts_per_prim;
+       if (use_adjacency)
+               max_reuse /= 2;
+       *max_gsprims = MIN2(*max_gsprims, 1 + max_reuse);
+}
+
+static struct radv_ngg_state
+calculate_ngg_info(const VkGraphicsPipelineCreateInfo *pCreateInfo,
+                  struct radv_pipeline *pipeline)
+{
+       struct radv_ngg_state ngg = {0};
+       struct radv_shader_variant_info *gs_info = &pipeline->shaders[MESA_SHADER_GEOMETRY]->info;
+       struct radv_es_output_info *es_info =
+               radv_pipeline_has_tess(pipeline) ? &gs_info->tes.es_info : &gs_info->vs.es_info;
+       unsigned gs_type = MESA_SHADER_VERTEX;
+       unsigned max_verts_per_prim = 3; // triangles
+       unsigned min_verts_per_prim =
+               gs_type == MESA_SHADER_GEOMETRY ? max_verts_per_prim : 1;
+       unsigned gs_num_invocations = 1;//MAX2(gs_info->gs.invocations, 1);
+       bool uses_adjacency;
+       switch(pCreateInfo->pInputAssemblyState->topology) {
+       case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
+       case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
+       case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
+       case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
+               uses_adjacency = true;
+               break;
+       default:
+               uses_adjacency = false;
+               break;
+       }
+
+       /* All these are in dwords: */
+       /* We can't allow using the whole LDS, because GS waves compete with
+        * other shader stages for LDS space.
+        *
+        * Streamout can increase the ESGS buffer size later on, so be more
+        * conservative with streamout and use 4K dwords. This may be suboptimal.
+        *
+        * Otherwise, use the limit of 7K dwords. The reason is that we need
+        * to leave some headroom for the max_esverts increase at the end.
+        *
+        * TODO: We should really take the shader's internal LDS use into
+        *       account. The linker will fail if the size is greater than
+        *       8K dwords.
+        */
+       const unsigned max_lds_size = (0 /*gs_info->info.so.num_outputs*/ ? 4 : 7) * 1024 - 128;
+       const unsigned target_lds_size = max_lds_size;
+       unsigned esvert_lds_size = 0;
+       unsigned gsprim_lds_size = 0;
+
+       /* All these are per subgroup: */
+       bool max_vert_out_per_gs_instance = false;
+       unsigned max_esverts_base = 256;
+       unsigned max_gsprims_base = 128; /* default prim group size clamp */
+
+       /* Hardware has the following non-natural restrictions on the value
+        * of GE_CNTL.VERT_GRP_SIZE based on based on the primitive type of
+        * the draw:
+        *  - at most 252 for any line input primitive type
+        *  - at most 251 for any quad input primitive type
+        *  - at most 251 for triangle strips with adjacency (this happens to
+        *    be the natural limit for triangle *lists* with adjacency)
+        */
+       max_esverts_base = MIN2(max_esverts_base, 251 + max_verts_per_prim - 1);
+
+       if (gs_type == MESA_SHADER_GEOMETRY) {
+               unsigned max_out_verts_per_gsprim =
+                       gs_info->gs.vertices_out * gs_num_invocations;
+
+               if (max_out_verts_per_gsprim <= 256) {
+                       if (max_out_verts_per_gsprim) {
+                               max_gsprims_base = MIN2(max_gsprims_base,
+                                                       256 / max_out_verts_per_gsprim);
+                       }
+               } else {
+                       /* Use special multi-cycling mode in which each GS
+                        * instance gets its own subgroup. Does not work with
+                        * tessellation. */
+                       max_vert_out_per_gs_instance = true;
+                       max_gsprims_base = 1;
+                       max_out_verts_per_gsprim = gs_info->gs.vertices_out;
+               }
+
+               esvert_lds_size = es_info->esgs_itemsize / 4;
+               gsprim_lds_size = (gs_info->gs.gsvs_vertex_size / 4 + 1) * max_out_verts_per_gsprim;
+       } else {
+               /* TODO: This needs to be adjusted once LDS use for compaction
+                * after culling is implemented. */
+               /*
+               if (es_info->info.so.num_outputs)
+                       esvert_lds_size = 4 * es_info->info.so.num_outputs + 1;
+               */
+       }
+
+       unsigned max_gsprims = max_gsprims_base;
+       unsigned max_esverts = max_esverts_base;
+
+       if (esvert_lds_size)
+               max_esverts = MIN2(max_esverts, target_lds_size / esvert_lds_size);
+       if (gsprim_lds_size)
+               max_gsprims = MIN2(max_gsprims, target_lds_size / gsprim_lds_size);
+
+       max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
+       clamp_gsprims_to_esverts(&max_gsprims, max_esverts, min_verts_per_prim, uses_adjacency);
+       assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
+
+       if (esvert_lds_size || gsprim_lds_size) {
+               /* Now that we have a rough proportionality between esverts
+                * and gsprims based on the primitive type, scale both of them
+                * down simultaneously based on required LDS space.
+                *
+                * We could be smarter about this if we knew how much vertex
+                * reuse to expect.
+                */
+               unsigned lds_total = max_esverts * esvert_lds_size +
+                                    max_gsprims * gsprim_lds_size;
+               if (lds_total > target_lds_size) {
+                       max_esverts = max_esverts * target_lds_size / lds_total;
+                       max_gsprims = max_gsprims * target_lds_size / lds_total;
+
+                       max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
+                       clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
+                                                min_verts_per_prim, uses_adjacency);
+                       assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
+               }
+       }
+
+       /* Round up towards full wave sizes for better ALU utilization. */
+       if (!max_vert_out_per_gs_instance) {
+               const unsigned wavesize = 64;
+               unsigned orig_max_esverts;
+               unsigned orig_max_gsprims;
+               do {
+                       orig_max_esverts = max_esverts;
+                       orig_max_gsprims = max_gsprims;
+
+                       max_esverts = align(max_esverts, wavesize);
+                       max_esverts = MIN2(max_esverts, max_esverts_base);
+                       if (esvert_lds_size)
+                               max_esverts = MIN2(max_esverts,
+                                                  (max_lds_size - max_gsprims * gsprim_lds_size) /
+                                                  esvert_lds_size);
+                       max_esverts = MIN2(max_esverts, max_gsprims * max_verts_per_prim);
+
+                       max_gsprims = align(max_gsprims, wavesize);
+                       max_gsprims = MIN2(max_gsprims, max_gsprims_base);
+                       if (gsprim_lds_size)
+                               max_gsprims = MIN2(max_gsprims,
+                                                  (max_lds_size - max_esverts * esvert_lds_size) /
+                                                  gsprim_lds_size);
+                       clamp_gsprims_to_esverts(&max_gsprims, max_esverts,
+                                                min_verts_per_prim, uses_adjacency);
+                       assert(max_esverts >= max_verts_per_prim && max_gsprims >= 1);
+               } while (orig_max_esverts != max_esverts || orig_max_gsprims != max_gsprims);
+       }
+
+       /* Hardware restriction: minimum value of max_esverts */
+       max_esverts = MAX2(max_esverts, 23 + max_verts_per_prim);
+
+       unsigned max_out_vertices =
+               max_vert_out_per_gs_instance ? gs_info->gs.vertices_out :
+               gs_type == MESA_SHADER_GEOMETRY ?
+               max_gsprims * gs_num_invocations * gs_info->gs.vertices_out :
+               max_esverts;
+       assert(max_out_vertices <= 256);
+
+       unsigned prim_amp_factor = 1;
+       if (gs_type == MESA_SHADER_GEOMETRY) {
+               /* Number of output primitives per GS input primitive after
+                * GS instancing. */
+               prim_amp_factor = gs_info->gs.vertices_out;
+       }
+
+       /* The GE only checks against the maximum number of ES verts after
+        * allocating a full GS primitive. So we need to ensure that whenever
+        * this check passes, there is enough space for a full primitive without
+        * vertex reuse.
+        */
+       ngg.hw_max_esverts = max_esverts - max_verts_per_prim + 1;
+       ngg.max_gsprims = max_gsprims;
+       ngg.max_out_verts = max_out_vertices;
+       ngg.prim_amp_factor = prim_amp_factor;
+       ngg.max_vert_out_per_gs_instance = max_vert_out_per_gs_instance;
+       ngg.ngg_emit_size = max_gsprims * gsprim_lds_size;
+       ngg.vgt_esgs_ring_itemsize = 1;
+
+       pipeline->graphics.esgs_ring_size = 4 * max_esverts * esvert_lds_size;
+
+       assert(ngg.hw_max_esverts >= 24); /* HW limitation */
+
+       return ngg;
+}
+
 static void
 calculate_gs_ring_sizes(struct radv_pipeline *pipeline, const struct radv_gs_state *gs)
 {
@@ -2000,7 +2221,8 @@ radv_generate_graphics_pipeline_key(struct radv_pipeline *pipeline,
 }
 
 static void
-radv_fill_shader_keys(struct radv_shader_variant_key *keys,
+radv_fill_shader_keys(struct radv_device *device,
+                     struct radv_shader_variant_key *keys,
                       const struct radv_pipeline_key *key,
                       nir_shader **nir)
 {
@@ -2016,7 +2238,7 @@ radv_fill_shader_keys(struct radv_shader_variant_key *keys,
        }
 
        if (nir[MESA_SHADER_TESS_CTRL]) {
-               keys[MESA_SHADER_VERTEX].vs.as_ls = true;
+               keys[MESA_SHADER_VERTEX].vs.out.as_ls = true;
                keys[MESA_SHADER_TESS_CTRL].tcs.num_inputs = 0;
                keys[MESA_SHADER_TESS_CTRL].tcs.input_vertices = key->tess_input_vertices;
                keys[MESA_SHADER_TESS_CTRL].tcs.primitive_mode = nir[MESA_SHADER_TESS_EVAL]->info.tess.primitive_mode;
@@ -2026,9 +2248,13 @@ radv_fill_shader_keys(struct radv_shader_variant_key *keys,
 
        if (nir[MESA_SHADER_GEOMETRY]) {
                if (nir[MESA_SHADER_TESS_CTRL])
-                       keys[MESA_SHADER_TESS_EVAL].tes.as_es = true;
+                       keys[MESA_SHADER_TESS_EVAL].tes.out.as_es = true;
                else
-                       keys[MESA_SHADER_VERTEX].vs.as_es = true;
+                       keys[MESA_SHADER_VERTEX].vs.out.as_es = true;
+       }
+
+       if (device->physical_device->rad_info.chip_class >= GFX10) {
+               keys[MESA_SHADER_VERTEX].vs.out.as_ngg = true;
        }
 
        for(int i = 0; i < MESA_SHADER_STAGES; ++i)
@@ -2132,7 +2358,7 @@ void radv_create_shaders(struct radv_pipeline *pipeline,
        struct radv_shader_module *modules[MESA_SHADER_STAGES] = { 0, };
        nir_shader *nir[MESA_SHADER_STAGES] = {0};
        struct radv_shader_binary *binaries[MESA_SHADER_STAGES] = {NULL};
-       struct radv_shader_variant_key keys[MESA_SHADER_STAGES] = {{{{0}}}};
+       struct radv_shader_variant_key keys[MESA_SHADER_STAGES] = {{{{{0}}}}};
        unsigned char hash[20], gs_copy_hash[20];
 
        radv_start_feedback(pipeline_feedback);
@@ -2221,7 +2447,7 @@ void radv_create_shaders(struct radv_pipeline *pipeline,
                        nir_print_shader(nir[i], stderr);
        }
 
-       radv_fill_shader_keys(keys, key, nir);
+       radv_fill_shader_keys(device, keys, key, nir);
 
        if (nir[MESA_SHADER_FRAGMENT]) {
                if (!pipeline->shaders[MESA_SHADER_FRAGMENT]) {
@@ -2236,17 +2462,17 @@ void radv_create_shaders(struct radv_pipeline *pipeline,
                }
 
                /* TODO: These are no longer used as keys we should refactor this */
-               keys[MESA_SHADER_VERTEX].vs.export_prim_id =
+               keys[MESA_SHADER_VERTEX].vs.out.export_prim_id =
                        pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.prim_id_input;
-               keys[MESA_SHADER_VERTEX].vs.export_layer_id =
+               keys[MESA_SHADER_VERTEX].vs.out.export_layer_id =
                        pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.layer_input;
-               keys[MESA_SHADER_VERTEX].vs.export_clip_dists =
+               keys[MESA_SHADER_VERTEX].vs.out.export_clip_dists =
                        !!pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.num_input_clips_culls;
-               keys[MESA_SHADER_TESS_EVAL].tes.export_prim_id =
+               keys[MESA_SHADER_TESS_EVAL].tes.out.export_prim_id =
                        pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.prim_id_input;
-               keys[MESA_SHADER_TESS_EVAL].tes.export_layer_id =
+               keys[MESA_SHADER_TESS_EVAL].tes.out.export_layer_id =
                        pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.layer_input;
-               keys[MESA_SHADER_TESS_EVAL].tes.export_clip_dists =
+               keys[MESA_SHADER_TESS_EVAL].tes.out.export_clip_dists =
                        !!pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.num_input_clips_culls;
        }
 
@@ -2356,6 +2582,8 @@ radv_pipeline_stage_to_user_data_0(struct radv_pipeline *pipeline,
 {
        bool has_gs = radv_pipeline_has_gs(pipeline);
        bool has_tess = radv_pipeline_has_tess(pipeline);
+       bool has_ngg = radv_pipeline_has_ngg(pipeline);
+
        switch (stage) {
        case MESA_SHADER_FRAGMENT:
                return R_00B030_SPI_SHADER_USER_DATA_PS_0;
@@ -2379,6 +2607,9 @@ radv_pipeline_stage_to_user_data_0(struct radv_pipeline *pipeline,
                        }
                }
 
+               if (has_ngg)
+                       return R_00B230_SPI_SHADER_USER_DATA_GS_0;
+
                return R_00B130_SPI_SHADER_USER_DATA_VS_0;
        case MESA_SHADER_GEOMETRY:
                return chip_class == GFX9 ? R_00B330_SPI_SHADER_USER_DATA_ES_0 :
@@ -2968,8 +3199,7 @@ radv_pipeline_generate_vgt_gs_mode(struct radeon_cmdbuf *ctx_cs,
                                    struct radv_pipeline *pipeline)
 {
        const struct radv_vs_output_info *outinfo = get_vs_output_info(pipeline);
-
-       uint32_t vgt_primitiveid_en = false;
+       unsigned vgt_primitiveid_en = 0;
        uint32_t vgt_gs_mode = 0;
 
        if (radv_pipeline_has_gs(pipeline)) {
@@ -2978,9 +3208,17 @@ radv_pipeline_generate_vgt_gs_mode(struct radeon_cmdbuf *ctx_cs,
 
                vgt_gs_mode = ac_vgt_gs_mode(gs->info.gs.vertices_out,
                                             pipeline->device->physical_device->rad_info.chip_class);
+       } else if (radv_pipeline_has_ngg(pipeline)) {
+               const struct radv_shader_variant *vs =
+                       pipeline->shaders[MESA_SHADER_VERTEX];
+               bool enable_prim_id =
+                       outinfo->export_prim_id || vs->info.info.uses_prim_id;
+
+               vgt_primitiveid_en |= S_028A84_PRIMITIVEID_EN(enable_prim_id) |
+                                     S_028A84_NGG_DISABLE_PROVOK_REUSE(enable_prim_id);
        } else if (outinfo->export_prim_id) {
                vgt_gs_mode = S_028A40_MODE(V_028A40_GS_SCENARIO_A);
-               vgt_primitiveid_en = true;
+               vgt_primitiveid_en |= S_028A84_PRIMITIVEID_EN(1);
        }
 
        radeon_set_context_reg(ctx_cs, R_028A84_VGT_PRIMITIVEID_EN, vgt_primitiveid_en);
@@ -3084,6 +3322,106 @@ radv_pipeline_generate_hw_ls(struct radeon_cmdbuf *cs,
        radeon_emit(cs, rsrc2);
 }
 
+static void
+radv_pipeline_generate_hw_ngg(struct radeon_cmdbuf *ctx_cs,
+                             struct radeon_cmdbuf *cs,
+                             struct radv_pipeline *pipeline,
+                             struct radv_shader_variant *shader,
+                             const struct radv_ngg_state *ngg_state)
+{
+       uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
+
+       radeon_set_sh_reg_seq(cs, R_00B320_SPI_SHADER_PGM_LO_ES, 2);
+       radeon_emit(cs, va >> 8);
+       radeon_emit(cs, va >> 40);
+       radeon_set_sh_reg_seq(cs, R_00B228_SPI_SHADER_PGM_RSRC1_GS, 2);
+       radeon_emit(cs, shader->config.rsrc1);
+       radeon_emit(cs, shader->config.rsrc2);
+
+       const struct radv_vs_output_info *outinfo = get_vs_output_info(pipeline);
+       unsigned clip_dist_mask, cull_dist_mask, total_mask;
+       clip_dist_mask = outinfo->clip_dist_mask;
+       cull_dist_mask = outinfo->cull_dist_mask;
+       total_mask = clip_dist_mask | cull_dist_mask;
+       bool misc_vec_ena = outinfo->writes_pointsize ||
+               outinfo->writes_layer ||
+               outinfo->writes_viewport_index;
+       bool break_wave_at_eoi = false;
+
+       radeon_set_context_reg(ctx_cs, R_0286C4_SPI_VS_OUT_CONFIG,
+                              S_0286C4_VS_EXPORT_COUNT(MAX2(1, outinfo->param_exports) - 1));
+       radeon_set_context_reg(ctx_cs, R_028708_SPI_SHADER_IDX_FORMAT,
+                              S_028708_IDX0_EXPORT_FORMAT(V_028708_SPI_SHADER_1COMP));
+       radeon_set_context_reg(ctx_cs, R_02870C_SPI_SHADER_POS_FORMAT,
+                              S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) |
+                              S_02870C_POS1_EXPORT_FORMAT(outinfo->pos_exports > 1 ?
+                                                          V_02870C_SPI_SHADER_4COMP :
+                                                          V_02870C_SPI_SHADER_NONE) |
+                              S_02870C_POS2_EXPORT_FORMAT(outinfo->pos_exports > 2 ?
+                                                          V_02870C_SPI_SHADER_4COMP :
+                                                          V_02870C_SPI_SHADER_NONE) |
+                              S_02870C_POS3_EXPORT_FORMAT(outinfo->pos_exports > 3 ?
+                                                          V_02870C_SPI_SHADER_4COMP :
+                                                          V_02870C_SPI_SHADER_NONE));
+
+       radeon_set_context_reg(ctx_cs, R_028818_PA_CL_VTE_CNTL,
+                              S_028818_VTX_W0_FMT(1) |
+                              S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
+                              S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
+                              S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
+       radeon_set_context_reg(ctx_cs, R_02881C_PA_CL_VS_OUT_CNTL,
+                              S_02881C_USE_VTX_POINT_SIZE(outinfo->writes_pointsize) |
+                              S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo->writes_layer) |
+                              S_02881C_USE_VTX_VIEWPORT_INDX(outinfo->writes_viewport_index) |
+                              S_02881C_VS_OUT_MISC_VEC_ENA(misc_vec_ena) |
+                              S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena) |
+                              S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask & 0x0f) != 0) |
+                              S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask & 0xf0) != 0) |
+                              cull_dist_mask << 8 |
+                              clip_dist_mask);
+
+       /* TODO: Correctly set REUSE_OFF */
+       radeon_set_context_reg(ctx_cs, R_028AB4_VGT_REUSE_OFF,
+                              S_028AB4_REUSE_OFF(0));
+       radeon_set_context_reg(ctx_cs, R_028AAC_VGT_ESGS_RING_ITEMSIZE,
+                              ngg_state->vgt_esgs_ring_itemsize);
+
+       /* NGG specific registers. */
+       struct radv_shader_variant *gs = pipeline->shaders[MESA_SHADER_GEOMETRY];
+       uint32_t gs_num_invocations = gs ? gs->info.gs.invocations : 1;
+
+       radeon_set_context_reg(ctx_cs, R_028A44_VGT_GS_ONCHIP_CNTL,
+                              S_028A44_ES_VERTS_PER_SUBGRP(ngg_state->hw_max_esverts) |
+                              S_028A44_GS_PRIMS_PER_SUBGRP(ngg_state->max_gsprims) |
+                              S_028A44_GS_INST_PRIMS_IN_SUBGRP(ngg_state->max_gsprims * gs_num_invocations));
+       radeon_set_context_reg(ctx_cs, R_0287FC_GE_MAX_OUTPUT_PER_SUBGROUP,
+                              S_0287FC_MAX_VERTS_PER_SUBGROUP(ngg_state->max_out_verts));
+       radeon_set_context_reg(ctx_cs, R_028B4C_GE_NGG_SUBGRP_CNTL,
+                              S_028B4C_PRIM_AMP_FACTOR(ngg_state->prim_amp_factor) |
+                              S_028B4C_THDS_PER_SUBGRP(0)); /* for fast launch */
+       radeon_set_context_reg(ctx_cs, R_028B90_VGT_GS_INSTANCE_CNT,
+                              S_028B90_CNT(gs_num_invocations) |
+                              S_028B90_ENABLE(gs_num_invocations > 1) |
+                              S_028B90_EN_MAX_VERT_OUT_PER_GS_INSTANCE(ngg_state->max_vert_out_per_gs_instance));
+
+       /* User edge flags are set by the pos exports. If user edge flags are
+        * not used, we must use hw-generated edge flags and pass them via
+        * the prim export to prevent drawing lines on internal edges of
+        * decomposed primitives (such as quads) with polygon mode = lines.
+        *
+        * TODO: We should combine hw-generated edge flags with user edge
+        *       flags in the shader.
+        */
+       radeon_set_context_reg(ctx_cs, R_028838_PA_CL_NGG_CNTL,
+                              S_028838_INDEX_BUF_EDGE_FLAG_ENA(!radv_pipeline_has_tess(pipeline) &&
+                                                               !radv_pipeline_has_gs(pipeline)));
+
+       radeon_set_context_reg(ctx_cs, R_03096C_GE_CNTL,
+                              S_03096C_PRIM_GRP_SIZE(ngg_state->max_gsprims) |
+                              S_03096C_VERT_GRP_SIZE(ngg_state->hw_max_esverts) |
+                              S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi));
+}
+
 static void
 radv_pipeline_generate_hw_hs(struct radeon_cmdbuf *cs,
                             struct radv_pipeline *pipeline,
@@ -3127,7 +3465,8 @@ static void
 radv_pipeline_generate_vertex_shader(struct radeon_cmdbuf *ctx_cs,
                                     struct radeon_cmdbuf *cs,
                                     struct radv_pipeline *pipeline,
-                                    const struct radv_tessellation_state *tess)
+                                    const struct radv_tessellation_state *tess,
+                                    const struct radv_ngg_state *ngg)
 {
        struct radv_shader_variant *vs;
 
@@ -3140,6 +3479,8 @@ radv_pipeline_generate_vertex_shader(struct radeon_cmdbuf *ctx_cs,
                radv_pipeline_generate_hw_ls(cs, pipeline, vs, tess);
        else if (vs->info.vs.as_es)
                radv_pipeline_generate_hw_es(cs, pipeline, vs);
+       else if (vs->info.is_ngg)
+               radv_pipeline_generate_hw_ngg(ctx_cs, cs, pipeline, vs, ngg);
        else
                radv_pipeline_generate_hw_vs(ctx_cs, cs, pipeline, vs);
 }
@@ -3300,7 +3641,6 @@ radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf *ctx_cs,
        }
 
        if (ps->info.info.ps.layer_input ||
-           ps->info.info.ps.uses_input_attachments ||
            ps->info.info.needs_multiview_view_index) {
                unsigned vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_LAYER];
                if (vs_offset != AC_EXP_PARAM_UNDEFINED)
@@ -3468,13 +3808,20 @@ radv_compute_vgt_shader_stages_en(const struct radv_pipeline *pipeline)
                        stages |=  S_028B54_ES_EN(V_028B54_ES_STAGE_DS) |
                                S_028B54_GS_EN(1) |
                                S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
+               else if (radv_pipeline_has_ngg(pipeline))
+                       stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_DS) |
+                                 S_028B54_PRIMGEN_EN(1);
                else
                        stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_DS);
 
-       } else if (radv_pipeline_has_gs(pipeline))
+       } else if (radv_pipeline_has_gs(pipeline)) {
                stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) |
                        S_028B54_GS_EN(1) |
                        S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
+       } else if (radv_pipeline_has_ngg(pipeline)) {
+               stages |= S_028B54_ES_EN(V_028B54_ES_STAGE_REAL) |
+                         S_028B54_PRIMGEN_EN(1);
+       }
 
        if (pipeline->device->physical_device->rad_info.chip_class >= GFX9)
                stages |= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
@@ -3513,6 +3860,41 @@ radv_compute_cliprect_rule(const VkGraphicsPipelineCreateInfo *pCreateInfo)
        return mask;
 }
 
+static void
+gfx10_pipeline_generate_ge_cntl(struct radeon_cmdbuf *ctx_cs,
+                               struct radv_pipeline *pipeline,
+                               const struct radv_tessellation_state *tess,
+                               const struct radv_gs_state *gs_state)
+{
+       bool break_wave_at_eoi = false;
+       unsigned primgroup_size;
+       unsigned vertgroup_size;
+
+       if (radv_pipeline_has_tess(pipeline)) {
+               primgroup_size = tess->num_patches; /* must be a multiple of NUM_PATCHES */
+               vertgroup_size = 0;
+       } else if (radv_pipeline_has_gs(pipeline)) {
+               unsigned vgt_gs_onchip_cntl = gs_state->vgt_gs_onchip_cntl;
+               primgroup_size = G_028A44_GS_PRIMS_PER_SUBGRP(vgt_gs_onchip_cntl);
+               vertgroup_size = G_028A44_ES_VERTS_PER_SUBGRP(vgt_gs_onchip_cntl);
+       } else {
+               primgroup_size = 128; /* recommended without a GS and tess */
+               vertgroup_size = 0;
+       }
+
+       if (radv_pipeline_has_tess(pipeline)) {
+               if (pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.info.uses_prim_id ||
+                   radv_get_shader(pipeline, MESA_SHADER_TESS_EVAL)->info.info.uses_prim_id)
+                       break_wave_at_eoi = true;
+       }
+
+       radeon_set_uconfig_reg(ctx_cs, R_03096C_GE_CNTL,
+                              S_03096C_PRIM_GRP_SIZE(primgroup_size) |
+                              S_03096C_VERT_GRP_SIZE(vertgroup_size) |
+                              S_03096C_PACKET_TO_ONE_PA(0) /* line stipple */ |
+                              S_03096C_BREAK_WAVE_AT_EOI(break_wave_at_eoi));
+}
+
 static void
 radv_pipeline_generate_pm4(struct radv_pipeline *pipeline,
                            const VkGraphicsPipelineCreateInfo *pCreateInfo,
@@ -3520,6 +3902,7 @@ radv_pipeline_generate_pm4(struct radv_pipeline *pipeline,
                            const struct radv_blend_state *blend,
                            const struct radv_tessellation_state *tess,
                            const struct radv_gs_state *gs,
+                           const struct radv_ngg_state *ngg,
                            unsigned prim, unsigned gs_out)
 {
        struct radeon_cmdbuf *ctx_cs = &pipeline->ctx_cs;
@@ -3535,7 +3918,7 @@ radv_pipeline_generate_pm4(struct radv_pipeline *pipeline,
        radv_pipeline_generate_raster_state(ctx_cs, pipeline, pCreateInfo);
        radv_pipeline_generate_multisample_state(ctx_cs, pipeline);
        radv_pipeline_generate_vgt_gs_mode(ctx_cs, pipeline);
-       radv_pipeline_generate_vertex_shader(ctx_cs, cs, pipeline, tess);
+       radv_pipeline_generate_vertex_shader(ctx_cs, cs, pipeline, tess, ngg);
        radv_pipeline_generate_tess_shaders(ctx_cs, cs, pipeline, tess);
        radv_pipeline_generate_geometry_shader(ctx_cs, cs, pipeline, gs);
        radv_pipeline_generate_fragment_shader(ctx_cs, cs, pipeline);
@@ -3543,6 +3926,9 @@ radv_pipeline_generate_pm4(struct radv_pipeline *pipeline,
        radv_pipeline_generate_vgt_vertex_reuse(ctx_cs, pipeline);
        radv_pipeline_generate_binning_state(ctx_cs, pipeline, pCreateInfo);
 
+       if (pipeline->device->physical_device->rad_info.chip_class >= GFX10 && !radv_pipeline_has_ngg(pipeline))
+               gfx10_pipeline_generate_ge_cntl(ctx_cs, pipeline, tess, gs);
+
        radeon_set_context_reg(ctx_cs, R_0286E8_SPI_TMPRING_SIZE,
                               S_0286E8_WAVES(pipeline->max_waves) |
                               S_0286E8_WAVESIZE(pipeline->scratch_bytes_per_wave >> 10));
@@ -3550,7 +3936,8 @@ radv_pipeline_generate_pm4(struct radv_pipeline *pipeline,
        radeon_set_context_reg(ctx_cs, R_028B54_VGT_SHADER_STAGES_EN, radv_compute_vgt_shader_stages_en(pipeline));
 
        if (pipeline->device->physical_device->rad_info.chip_class >= GFX7) {
-               radeon_set_uconfig_reg_idx(cs, R_030908_VGT_PRIMITIVE_TYPE, 1, prim);
+               radeon_set_uconfig_reg_idx(pipeline->device->physical_device,
+                                          cs, R_030908_VGT_PRIMITIVE_TYPE, 1, prim);
        } else {
                radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim);
        }
@@ -3770,6 +4157,9 @@ radv_pipeline_init(struct radv_pipeline *pipeline,
        if (radv_pipeline_has_gs(pipeline)) {
                gs_out = si_conv_gl_prim_to_gs_out(pipeline->shaders[MESA_SHADER_GEOMETRY]->info.gs.output_prim);
                pipeline->graphics.can_use_guardband = gs_out == V_028A6C_OUTPRIM_TYPE_TRISTRIP;
+       } else if (radv_pipeline_has_tess(pipeline)) {
+               gs_out = si_conv_gl_prim_to_gs_out(pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.tes.primitive_mode);
+               pipeline->graphics.can_use_guardband = gs_out == V_028A6C_OUTPRIM_TYPE_TRISTRIP;
        } else {
                gs_out = si_conv_prim_to_gs_out(pCreateInfo->pInputAssemblyState->topology);
        }
@@ -3777,6 +4167,8 @@ radv_pipeline_init(struct radv_pipeline *pipeline,
                prim = V_008958_DI_PT_RECTLIST;
                gs_out = V_028A6C_OUTPRIM_TYPE_TRISTRIP;
                pipeline->graphics.can_use_guardband = true;
+               if (radv_pipeline_has_ngg(pipeline))
+                       gs_out = V_028A6C_VGT_OUT_RECT_V0;
        }
        pipeline->graphics.prim_restart_enable = !!pCreateInfo->pInputAssemblyState->primitiveRestartEnable;
        /* prim vertex count will need TESS changes */
@@ -3809,8 +4201,12 @@ radv_pipeline_init(struct radv_pipeline *pipeline,
                }
        }
 
+       struct radv_ngg_state ngg = {0};
        struct radv_gs_state gs = {0};
-       if (radv_pipeline_has_gs(pipeline)) {
+
+       if (radv_pipeline_has_ngg(pipeline)) {
+               ngg = calculate_ngg_info(pCreateInfo, pipeline);
+       } else if (radv_pipeline_has_gs(pipeline)) {
                gs = calculate_gs_info(pCreateInfo, pipeline);
                calculate_gs_ring_sizes(pipeline, &gs);
        }
@@ -3846,7 +4242,7 @@ radv_pipeline_init(struct radv_pipeline *pipeline,
        pipeline->streamout_shader = radv_pipeline_get_streamout_shader(pipeline);
 
        result = radv_pipeline_scratch_init(device, pipeline);
-       radv_pipeline_generate_pm4(pipeline, pCreateInfo, extra, &blend, &tess, &gs, prim, gs_out);
+       radv_pipeline_generate_pm4(pipeline, pCreateInfo, extra, &blend, &tess, &gs, &ngg, prim, gs_out);
 
        return result;
 }