r600g: add support for geom shaders to r600/r700 chipsets (v2)
authorDave Airlie <airlied@redhat.com>
Thu, 30 Jan 2014 04:19:57 +0000 (04:19 +0000)
committerDave Airlie <airlied@redhat.com>
Wed, 5 Feb 2014 00:49:43 +0000 (10:49 +1000)
This is my first attempt at enabling r600/r700 geometry shaders,
the basic tests pass on both my rv770 and my rv635,

It requires this kernel patch:
http://www.spinics.net/lists/dri-devel/msg52745.html

v2: address Alex comments.

Signed-off-by: Dave Airlie <airlied@redhat.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
src/gallium/drivers/r600/r600_asm.c
src/gallium/drivers/r600/r600_pipe.c
src/gallium/drivers/r600/r600_pipe.h
src/gallium/drivers/r600/r600_shader.c
src/gallium/drivers/r600/r600_state.c
src/gallium/drivers/r600/r600_state_common.c
src/gallium/drivers/r600/r600d.h

index 899a8efd21d8338c16b5d1529ac90ec376205dfa..3afe7b2c019f5bddfbc87da18c2ed75b3b0c914f 100644 (file)
@@ -1535,7 +1535,7 @@ static int r600_bytecode_cf_build(struct r600_bytecode *bc, struct r600_bytecode
                        S_SQ_CF_ALLOC_EXPORT_WORD1_BARRIER(cf->barrier) |
                        S_SQ_CF_ALLOC_EXPORT_WORD1_CF_INST(opcode) |
                        S_SQ_CF_ALLOC_EXPORT_WORD1_END_OF_PROGRAM(cf->end_of_program);
-       } else if (cfop->flags & CF_STRM) {
+       } else if (cfop->flags & CF_MEM) {
                bc->bytecode[id++] = S_SQ_CF_ALLOC_EXPORT_WORD0_RW_GPR(cf->output.gpr) |
                        S_SQ_CF_ALLOC_EXPORT_WORD0_ELEM_SIZE(cf->output.elem_size) |
                        S_SQ_CF_ALLOC_EXPORT_WORD0_ARRAY_BASE(cf->output.array_base) |
index 47ada0702e545b68e68e7a1c963bcf36281863d3..6c72d07575720e36846761a776ffb5fd5e6b6da0 100644 (file)
@@ -372,7 +372,12 @@ static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
                return 1;
 
        case PIPE_CAP_GLSL_FEATURE_LEVEL:
-               return family >= CHIP_CEDAR ? 330 : 140;
+               if (family >= CHIP_CEDAR)
+                  return 330;
+               /* pre-evergreen geom shaders need newer kernel */
+               if (rscreen->b.info.drm_minor >= 37)
+                  return 330;
+               return 140;
 
        /* Supported except the original R600. */
        case PIPE_CAP_INDEP_BLEND_ENABLE:
@@ -456,9 +461,12 @@ static int r600_get_shader_param(struct pipe_screen* pscreen, unsigned shader, e
        case PIPE_SHADER_COMPUTE:
                break;
        case PIPE_SHADER_GEOMETRY:
-               if (rscreen->b.chip_class < EVERGREEN)
-                       return 0;
-               break;
+               if (rscreen->b.family >= CHIP_CEDAR)
+                       break;
+               /* pre-evergreen geom shaders need newer kernel */
+               if (rscreen->b.info.drm_minor >= 37)
+                       break;
+               return 0;
        default:
                /* XXX: support tessellation on Evergreen */
                return 0;
index 2d2c79b84e3ce8fea6591bb5ac77100207cfc8fb..9f27a17cd53ee781df731ac26febd28eda55e400 100644 (file)
@@ -160,6 +160,7 @@ struct r600_sample_mask {
 struct r600_config_state {
        struct r600_atom atom;
        unsigned sq_gpr_resource_mgmt_1;
+       unsigned sq_gpr_resource_mgmt_2;
 };
 
 struct r600_stencil_ref
@@ -565,6 +566,8 @@ r600_create_sampler_view_custom(struct pipe_context *ctx,
 void r600_init_state_functions(struct r600_context *rctx);
 void r600_init_atom_start_cs(struct r600_context *rctx);
 void r600_update_ps_state(struct pipe_context *ctx, struct r600_pipe_shader *shader);
+void r600_update_es_state(struct pipe_context *ctx, struct r600_pipe_shader *shader);
+void r600_update_gs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader);
 void r600_update_vs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader);
 void *r600_create_db_flush_dsa(struct r600_context *rctx);
 void *r600_create_resolve_blend(struct r600_context *rctx);
index 755fa91f8aef6fd1a0054ffcde3a816d378415cc..758abd76bf4b47d0d7eef5726ec2a9f957c846dd 100644 (file)
@@ -210,7 +210,8 @@ int r600_pipe_shader_create(struct pipe_context *ctx,
                        evergreen_update_gs_state(ctx, shader);
                        evergreen_update_vs_state(ctx, shader->gs_copy_shader);
                } else {
-                       assert(!"not suported yet");
+                       r600_update_gs_state(ctx, shader);
+                       r600_update_vs_state(ctx, shader->gs_copy_shader);
                }
                break;
        case TGSI_PROCESSOR_VERTEX:
@@ -220,7 +221,10 @@ int r600_pipe_shader_create(struct pipe_context *ctx,
                        else
                                evergreen_update_vs_state(ctx, shader);
                } else {
-                       r600_update_vs_state(ctx, shader);
+                       if (export_shader)
+                               r600_update_es_state(ctx, shader);
+                       else
+                               r600_update_vs_state(ctx, shader);
                }
                break;
        case TGSI_PROCESSOR_FRAGMENT:
@@ -906,7 +910,11 @@ static int fetch_gs_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_regi
        vtx.dst_sel_y = 1;              /* SEL_Y */
        vtx.dst_sel_z = 2;              /* SEL_Z */
        vtx.dst_sel_w = 3;              /* SEL_W */
-       vtx.use_const_fields = 1;
+       if (ctx->bc->chip_class >= EVERGREEN) {
+               vtx.use_const_fields = 1;
+       } else {
+               vtx.data_format = FMT_32_32_32_32_FLOAT;
+       }
 
        if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
                return r;
@@ -1229,7 +1237,11 @@ static int generate_gs_copy_shader(struct r600_context *rctx,
                vtx.dst_sel_y = 1;
                vtx.dst_sel_z = 2;
                vtx.dst_sel_w = 3;
-               vtx.use_const_fields = 1;
+               if (rctx->b.chip_class >= EVERGREEN) {
+                       vtx.use_const_fields = 1;
+               } else {
+                       vtx.data_format = FMT_32_32_32_32_FLOAT;
+               }
 
                r600_bytecode_add_vtx(ctx.bc, &vtx);
        }
@@ -1551,7 +1563,7 @@ static int r600_shader_from_tgsi(struct r600_context *rctx,
        if (ctx.type == TGSI_PROCESSOR_FRAGMENT && ctx.bc->chip_class >= EVERGREEN) {
                ctx.file_offset[TGSI_FILE_INPUT] = evergreen_gpr_count(&ctx);
        }
-       if (ctx.type == TGSI_PROCESSOR_GEOMETRY && ctx.bc->chip_class >= EVERGREEN) {
+       if (ctx.type == TGSI_PROCESSOR_GEOMETRY) {
                /* FIXME 1 would be enough in some cases (3 or less input vertices) */
                ctx.file_offset[TGSI_FILE_INPUT] = 2;
        }
@@ -6297,8 +6309,8 @@ static struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] = {
        {TGSI_OPCODE_TXF,       0, FETCH_OP_LD, tgsi_tex},
        {TGSI_OPCODE_TXQ,       0, FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
        {TGSI_OPCODE_CONT,      0, CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
-       {TGSI_OPCODE_EMIT,      0, ALU_OP0_NOP, tgsi_unsupported},
-       {TGSI_OPCODE_ENDPRIM,   0, ALU_OP0_NOP, tgsi_unsupported},
+       {TGSI_OPCODE_EMIT,      0, CF_OP_EMIT_VERTEX, tgsi_gs_emit},
+       {TGSI_OPCODE_ENDPRIM,   0, CF_OP_CUT_VERTEX, tgsi_gs_emit},
        {TGSI_OPCODE_BGNLOOP,   0, ALU_OP0_NOP, tgsi_bgnloop},
        {TGSI_OPCODE_BGNSUB,    0, ALU_OP0_NOP, tgsi_unsupported},
        {TGSI_OPCODE_ENDLOOP,   0, ALU_OP0_NOP, tgsi_endloop},
index e0c801e60ed9e51ab95c2077908c1ab616cceb3a..1b73ce302130d2cff9c61dd087a1ad6c33ec28cf 100644 (file)
@@ -2064,6 +2064,7 @@ static void r600_emit_config_state(struct r600_context *rctx, struct r600_atom *
        struct r600_config_state *a = (struct r600_config_state*)atom;
 
        r600_write_config_reg(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, a->sq_gpr_resource_mgmt_1);
+       r600_write_config_reg(cs, R_008C08_SQ_GPR_RESOURCE_MGMT_2, a->sq_gpr_resource_mgmt_2);
 }
 
 static void r600_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom *atom)
@@ -2115,16 +2116,18 @@ static void r600_emit_constant_buffers(struct r600_context *rctx,
                struct r600_resource *rbuffer;
                unsigned offset;
                unsigned buffer_index = ffs(dirty_mask) - 1;
-
+               unsigned gs_ring_buffer = (buffer_index == R600_GS_RING_CONST_BUFFER);
                cb = &state->cb[buffer_index];
                rbuffer = (struct r600_resource*)cb->buffer;
                assert(rbuffer);
 
                offset = cb->buffer_offset;
 
-               r600_write_context_reg(cs, reg_alu_constbuf_size + buffer_index * 4,
-                                      ALIGN_DIVUP(cb->buffer_size >> 4, 16));
-               r600_write_context_reg(cs, reg_alu_const_cache + buffer_index * 4, offset >> 8);
+               if (!gs_ring_buffer) {
+                       r600_write_context_reg(cs, reg_alu_constbuf_size + buffer_index * 4,
+                                              ALIGN_DIVUP(cb->buffer_size >> 4, 16));
+                       r600_write_context_reg(cs, reg_alu_const_cache + buffer_index * 4, offset >> 8);
+               }
 
                radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
                radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, RADEON_USAGE_READ));
@@ -2134,8 +2137,8 @@ static void r600_emit_constant_buffers(struct r600_context *rctx,
                radeon_emit(cs, offset); /* RESOURCEi_WORD0 */
                radeon_emit(cs, rbuffer->buf->size - offset - 1); /* RESOURCEi_WORD1 */
                radeon_emit(cs, /* RESOURCEi_WORD2 */
-                                S_038008_ENDIAN_SWAP(r600_endian_swap(32)) |
-                                S_038008_STRIDE(16));
+                           S_038008_ENDIAN_SWAP(gs_ring_buffer ? ENDIAN_NONE : r600_endian_swap(32)) |
+                           S_038008_STRIDE(gs_ring_buffer ? 4 : 16));
                radeon_emit(cs, 0); /* RESOURCEi_WORD3 */
                radeon_emit(cs, 0); /* RESOURCEi_WORD4 */
                radeon_emit(cs, 0); /* RESOURCEi_WORD5 */
@@ -2320,34 +2323,124 @@ static void r600_emit_vertex_fetch_shader(struct r600_context *rctx, struct r600
        radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, shader->buffer, RADEON_USAGE_READ));
 }
 
+static void r600_emit_shader_stages(struct r600_context *rctx, struct r600_atom *a)
+{
+       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+       struct r600_shader_stages_state *state = (struct r600_shader_stages_state*)a;
+
+       uint32_t v2 = 0, primid = 0;
+
+       if (state->geom_enable) {
+               uint32_t cut_val;
+
+               if (rctx->gs_shader->current->shader.gs_max_out_vertices <= 128)
+                       cut_val = V_028A40_GS_CUT_128;
+               else if (rctx->gs_shader->current->shader.gs_max_out_vertices <= 256)
+                       cut_val = V_028A40_GS_CUT_256;
+               else if (rctx->gs_shader->current->shader.gs_max_out_vertices <= 512)
+                       cut_val = V_028A40_GS_CUT_512;
+               else
+                       cut_val = V_028A40_GS_CUT_1024;
+
+               v2 = S_028A40_MODE(V_028A40_GS_SCENARIO_G) |
+                       S_028A40_CUT_MODE(cut_val);
+
+               if (rctx->gs_shader->current->shader.gs_prim_id_input)
+                       primid = 1;
+       }
+
+       r600_write_context_reg(cs, R_028A40_VGT_GS_MODE, v2);
+       r600_write_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid);
+}
+
+static void r600_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a)
+{
+       struct pipe_screen *screen = rctx->b.b.screen;
+       struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+       struct r600_gs_rings_state *state = (struct r600_gs_rings_state*)a;
+       struct r600_resource *rbuffer;
+
+       r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
+       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+       radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
+
+       if (state->enable) {
+               rbuffer =(struct r600_resource*)state->esgs_ring.buffer;
+               r600_write_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE,
+                               (r600_resource_va(screen, &rbuffer->b.b)) >> 8);
+               radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+               radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, RADEON_USAGE_READWRITE));
+               r600_write_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE,
+                               state->esgs_ring.buffer_size >> 8);
+
+               rbuffer =(struct r600_resource*)state->gsvs_ring.buffer;
+               r600_write_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE,
+                               (r600_resource_va(screen, &rbuffer->b.b)) >> 8);
+               radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+               radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, RADEON_USAGE_READWRITE));
+               r600_write_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE,
+                               state->gsvs_ring.buffer_size >> 8);
+       } else {
+               r600_write_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0);
+               r600_write_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0);
+       }
+
+       r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
+       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+       radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH));
+}
+
 /* Adjust GPR allocation on R6xx/R7xx */
 bool r600_adjust_gprs(struct r600_context *rctx)
 {
        unsigned num_ps_gprs = rctx->ps_shader->current->shader.bc.ngpr;
-       unsigned num_vs_gprs = rctx->vs_shader->current->shader.bc.ngpr;
+       unsigned num_vs_gprs, num_es_gprs, num_gs_gprs;
        unsigned new_num_ps_gprs = num_ps_gprs;
-       unsigned new_num_vs_gprs = num_vs_gprs;
+       unsigned new_num_vs_gprs, new_num_es_gprs, new_num_gs_gprs;
        unsigned cur_num_ps_gprs = G_008C04_NUM_PS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1);
        unsigned cur_num_vs_gprs = G_008C04_NUM_VS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1);
+       unsigned cur_num_gs_gprs = G_008C08_NUM_GS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2);
+       unsigned cur_num_es_gprs = G_008C08_NUM_ES_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2);
        unsigned def_num_ps_gprs = rctx->default_ps_gprs;
        unsigned def_num_vs_gprs = rctx->default_vs_gprs;
+       unsigned def_num_gs_gprs = 0;
+       unsigned def_num_es_gprs = 0;
        unsigned def_num_clause_temp_gprs = rctx->r6xx_num_clause_temp_gprs;
        /* hardware will reserve twice num_clause_temp_gprs */
-       unsigned max_gprs = def_num_ps_gprs + def_num_vs_gprs + def_num_clause_temp_gprs * 2;
-       unsigned tmp;
+       unsigned max_gprs = def_num_gs_gprs + def_num_es_gprs + def_num_ps_gprs + def_num_vs_gprs + def_num_clause_temp_gprs * 2;
+       unsigned tmp, tmp2;
+
+       if (rctx->gs_shader) {
+               num_es_gprs = rctx->vs_shader->current->shader.bc.ngpr;
+               num_gs_gprs = rctx->gs_shader->current->shader.bc.ngpr;
+               num_vs_gprs = rctx->gs_shader->current->gs_copy_shader->shader.bc.ngpr;
+       } else {
+               num_es_gprs = 0;
+               num_gs_gprs = 0;
+               num_vs_gprs = rctx->vs_shader->current->shader.bc.ngpr;
+       }
+       new_num_vs_gprs = num_vs_gprs;
+       new_num_es_gprs = num_es_gprs;
+       new_num_gs_gprs = num_gs_gprs;
 
        /* the sum of all SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS must <= to max_gprs */
-       if (new_num_ps_gprs > cur_num_ps_gprs || new_num_vs_gprs > cur_num_vs_gprs) {
+       if (new_num_ps_gprs > cur_num_ps_gprs || new_num_vs_gprs > cur_num_vs_gprs ||
+           new_num_es_gprs > cur_num_es_gprs || new_num_gs_gprs > cur_num_gs_gprs) {
                /* try to use switch back to default */
-               if (new_num_ps_gprs > def_num_ps_gprs || new_num_vs_gprs > def_num_vs_gprs) {
+               if (new_num_ps_gprs > def_num_ps_gprs || new_num_vs_gprs > def_num_vs_gprs ||
+                   new_num_gs_gprs > def_num_gs_gprs || new_num_es_gprs > def_num_es_gprs) {
                        /* always privilege vs stage so that at worst we have the
                         * pixel stage producing wrong output (not the vertex
                         * stage) */
-                       new_num_ps_gprs = max_gprs - (new_num_vs_gprs + def_num_clause_temp_gprs * 2);
+                       new_num_ps_gprs = max_gprs - ((new_num_vs_gprs - new_num_es_gprs - new_num_gs_gprs) + def_num_clause_temp_gprs * 2);
                        new_num_vs_gprs = num_vs_gprs;
+                       new_num_gs_gprs = num_gs_gprs;
+                       new_num_es_gprs = num_es_gprs;
                } else {
                        new_num_ps_gprs = def_num_ps_gprs;
                        new_num_vs_gprs = def_num_vs_gprs;
+                       new_num_es_gprs = def_num_es_gprs;
+                       new_num_gs_gprs = def_num_gs_gprs;
                }
        } else {
                return true;
@@ -2359,10 +2452,11 @@ bool r600_adjust_gprs(struct r600_context *rctx)
         * it will lockup. So in this case just discard the draw command
         * and don't change the current gprs repartitions.
         */
-       if (num_ps_gprs > new_num_ps_gprs || num_vs_gprs > new_num_vs_gprs) {
-               R600_ERR("ps & vs shader require too many register (%d + %d) "
+       if (num_ps_gprs > new_num_ps_gprs || num_vs_gprs > new_num_vs_gprs ||
+           num_gs_gprs > new_num_gs_gprs || num_es_gprs > new_num_es_gprs) {
+               R600_ERR("shaders require too many register (%d + %d + %d + %d) "
                         "for a combined maximum of %d\n",
-                        num_ps_gprs, num_vs_gprs, max_gprs);
+                        num_ps_gprs, num_vs_gprs, num_es_gprs, num_gs_gprs, max_gprs);
                return false;
        }
 
@@ -2370,8 +2464,12 @@ bool r600_adjust_gprs(struct r600_context *rctx)
        tmp = S_008C04_NUM_PS_GPRS(new_num_ps_gprs) |
                S_008C04_NUM_VS_GPRS(new_num_vs_gprs) |
                S_008C04_NUM_CLAUSE_TEMP_GPRS(def_num_clause_temp_gprs);
-       if (rctx->config_state.sq_gpr_resource_mgmt_1 != tmp) {
+
+       tmp2 = S_008C08_NUM_ES_GPRS(new_num_es_gprs) |
+               S_008C08_NUM_GS_GPRS(new_num_gs_gprs);
+       if (rctx->config_state.sq_gpr_resource_mgmt_1 != tmp || rctx->config_state.sq_gpr_resource_mgmt_2 != tmp2) {
                rctx->config_state.sq_gpr_resource_mgmt_1 = tmp;
+               rctx->config_state.sq_gpr_resource_mgmt_2 = tmp2;
                rctx->config_state.atom.dirty = true;
                rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
        }
@@ -2489,19 +2587,19 @@ void r600_init_atom_start_cs(struct r600_context *rctx)
                num_es_stack_entries = 16;
                break;
        case CHIP_RV770:
-               num_ps_gprs = 192;
+               num_ps_gprs = 130;
                num_vs_gprs = 56;
                num_temp_gprs = 4;
-               num_gs_gprs = 0;
-               num_es_gprs = 0;
-               num_ps_threads = 188;
+               num_gs_gprs = 31;
+               num_es_gprs = 31;
+               num_ps_threads = 180;
                num_vs_threads = 60;
-               num_gs_threads = 0;
-               num_es_threads = 0;
-               num_ps_stack_entries = 256;
-               num_vs_stack_entries = 256;
-               num_gs_stack_entries = 0;
-               num_es_stack_entries = 0;
+               num_gs_threads = 4;
+               num_es_threads = 4;
+               num_ps_stack_entries = 128;
+               num_vs_stack_entries = 128;
+               num_gs_stack_entries = 128;
+               num_es_stack_entries = 128;
                break;
        case CHIP_RV730:
        case CHIP_RV740:
@@ -2510,10 +2608,10 @@ void r600_init_atom_start_cs(struct r600_context *rctx)
                num_temp_gprs = 4;
                num_gs_gprs = 0;
                num_es_gprs = 0;
-               num_ps_threads = 188;
+               num_ps_threads = 180;
                num_vs_threads = 60;
-               num_gs_threads = 0;
-               num_es_threads = 0;
+               num_gs_threads = 4;
+               num_es_threads = 4;
                num_ps_stack_entries = 128;
                num_vs_stack_entries = 128;
                num_gs_stack_entries = 0;
@@ -2525,10 +2623,10 @@ void r600_init_atom_start_cs(struct r600_context *rctx)
                num_temp_gprs = 4;
                num_gs_gprs = 0;
                num_es_gprs = 0;
-               num_ps_threads = 144;
+               num_ps_threads = 136;
                num_vs_threads = 48;
-               num_gs_threads = 0;
-               num_es_threads = 0;
+               num_gs_threads = 4;
+               num_es_threads = 4;
                num_ps_stack_entries = 128;
                num_vs_stack_entries = 128;
                num_gs_stack_entries = 0;
@@ -2704,9 +2802,12 @@ void r600_init_atom_start_cs(struct r600_context *rctx)
        r600_store_value(cb, 0); /* R_028240_PA_SC_GENERIC_SCISSOR_TL */
        r600_store_value(cb, S_028244_BR_X(8192) | S_028244_BR_Y(8192)); /* R_028244_PA_SC_GENERIC_SCISSOR_BR */
 
-       r600_store_context_reg_seq(cb, R_0288CC_SQ_PGM_CF_OFFSET_PS, 2);
+       r600_store_context_reg_seq(cb, R_0288CC_SQ_PGM_CF_OFFSET_PS, 5);
        r600_store_value(cb, 0); /* R_0288CC_SQ_PGM_CF_OFFSET_PS */
        r600_store_value(cb, 0); /* R_0288D0_SQ_PGM_CF_OFFSET_VS */
+       r600_store_value(cb, 0); /* R_0288D4_SQ_PGM_CF_OFFSET_GS */
+       r600_store_value(cb, 0); /* R_0288D8_SQ_PGM_CF_OFFSET_ES */
+       r600_store_value(cb, 0); /* R_0288DC_SQ_PGM_CF_OFFSET_FS */
 
         r600_store_context_reg(cb, R_0288E0_SQ_VTX_SEMANTIC_CLEAR, ~0);
 
@@ -2715,7 +2816,6 @@ void r600_init_atom_start_cs(struct r600_context *rctx)
        r600_store_value(cb, 0); /* R_028404_VGT_MIN_VTX_INDX */
 
        r600_store_context_reg(cb, R_0288A4_SQ_PGM_RESOURCES_FS, 0);
-       r600_store_context_reg(cb, R_0288DC_SQ_PGM_CF_OFFSET_FS, 0);
 
        if (rctx->b.chip_class == R700 && rctx->screen->b.has_streamout)
                r600_store_context_reg(cb, R_028354_SX_SURFACE_SYNC, S_028354_SURFACE_SYNC_MASK(0xf));
@@ -2726,6 +2826,7 @@ void r600_init_atom_start_cs(struct r600_context *rctx)
 
        r600_store_loop_const(cb, R_03E200_SQ_LOOP_CONST_0, 0x1000FFF);
        r600_store_loop_const(cb, R_03E200_SQ_LOOP_CONST_0 + (32 * 4), 0x1000FFF);
+       r600_store_loop_const(cb, R_03E200_SQ_LOOP_CONST_0 + (64 * 4), 0x1000FFF);
 }
 
 void r600_update_ps_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
@@ -2898,6 +2999,94 @@ void r600_update_vs_state(struct pipe_context *ctx, struct r600_pipe_shader *sha
                S_02881C_USE_VTX_POINT_SIZE(rshader->vs_out_point_size);
 }
 
+static unsigned r600_conv_prim_to_gs_out(unsigned mode)
+{
+       static const int prim_conv[] = {
+               V_028A6C_OUTPRIM_TYPE_POINTLIST,
+               V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+               V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+               V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+               V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+               V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+               V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+               V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+               V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+               V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+               V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+               V_028A6C_OUTPRIM_TYPE_LINESTRIP,
+               V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+               V_028A6C_OUTPRIM_TYPE_TRISTRIP,
+               V_028A6C_OUTPRIM_TYPE_TRISTRIP
+       };
+       assert(mode < Elements(prim_conv));
+
+       return prim_conv[mode];
+}
+
+void r600_update_gs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
+{
+       struct r600_context *rctx = (struct r600_context *)ctx;
+       struct r600_command_buffer *cb = &shader->command_buffer;
+       struct r600_shader *rshader = &shader->shader;
+       struct r600_shader *cp_shader = &shader->gs_copy_shader->shader;
+       unsigned gsvs_itemsize =
+                       (cp_shader->ring_item_size * rshader->gs_max_out_vertices) >> 2;
+
+       r600_init_command_buffer(cb, 64);
+
+       /* VGT_GS_MODE is written by r600_emit_shader_stages */
+       r600_store_context_reg(cb, R_028AB8_VGT_VTX_CNT_EN, 1);
+
+       if (rctx->b.chip_class >= R700) {
+               r600_store_context_reg(cb, R_028B38_VGT_GS_MAX_VERT_OUT,
+                                      S_028B38_MAX_VERT_OUT(rshader->gs_max_out_vertices));
+       }
+       r600_store_context_reg(cb, R_028A6C_VGT_GS_OUT_PRIM_TYPE,
+                              r600_conv_prim_to_gs_out(rshader->gs_output_prim));
+
+       r600_store_context_reg_seq(cb, R_0288C8_SQ_GS_VERT_ITEMSIZE, 4);
+       r600_store_value(cb, cp_shader->ring_item_size >> 2);
+       r600_store_value(cb, 0);
+       r600_store_value(cb, 0);
+       r600_store_value(cb, 0);
+
+       r600_store_context_reg(cb, R_0288A8_SQ_ESGS_RING_ITEMSIZE,
+                              (rshader->ring_item_size) >> 2);
+
+       r600_store_context_reg(cb, R_0288AC_SQ_GSVS_RING_ITEMSIZE,
+                              gsvs_itemsize);
+
+       /* FIXME calculate these values somehow ??? */
+       r600_store_config_reg_seq(cb, R_0088C8_VGT_GS_PER_ES, 2);
+       r600_store_value(cb, 0x80); /* GS_PER_ES */
+       r600_store_value(cb, 0x100); /* ES_PER_GS */
+       r600_store_config_reg_seq(cb, R_0088E8_VGT_GS_PER_VS, 1);
+       r600_store_value(cb, 0x2); /* GS_PER_VS */
+
+       r600_store_context_reg(cb, R_02887C_SQ_PGM_RESOURCES_GS,
+                              S_02887C_NUM_GPRS(rshader->bc.ngpr) |
+                              S_02887C_STACK_SIZE(rshader->bc.nstack));
+       r600_store_context_reg(cb, R_02886C_SQ_PGM_START_GS,
+                              r600_resource_va(ctx->screen, (void *)shader->bo) >> 8);
+       /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
+}
+
+void r600_update_es_state(struct pipe_context *ctx, struct r600_pipe_shader *shader)
+{
+       struct r600_command_buffer *cb = &shader->command_buffer;
+       struct r600_shader *rshader = &shader->shader;
+
+       r600_init_command_buffer(cb, 32);
+
+       r600_store_context_reg(cb, R_028890_SQ_PGM_RESOURCES_ES,
+                              S_028890_NUM_GPRS(rshader->bc.ngpr) |
+                              S_028890_STACK_SIZE(rshader->bc.nstack));
+       r600_store_context_reg(cb, R_028880_SQ_PGM_START_ES,
+                              r600_resource_va(ctx->screen, (void *)shader->bo) >> 8);
+       /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */
+}
+
+
 void *r600_create_resolve_blend(struct r600_context *rctx)
 {
        struct pipe_blend_state blend;
@@ -3259,6 +3448,10 @@ void r600_init_state_functions(struct r600_context *rctx)
        rctx->atoms[id++] = &rctx->b.streamout.begin_atom;
        r600_init_atom(rctx, &rctx->vertex_shader.atom, id++, r600_emit_shader, 23);
        r600_init_atom(rctx, &rctx->pixel_shader.atom, id++, r600_emit_shader, 0);
+       r600_init_atom(rctx, &rctx->geometry_shader.atom, id++, r600_emit_shader, 0);
+       r600_init_atom(rctx, &rctx->export_shader.atom, id++, r600_emit_shader, 0);
+       r600_init_atom(rctx, &rctx->shader_stages.atom, id++, r600_emit_shader_stages, 0);
+       r600_init_atom(rctx, &rctx->gs_rings.atom, id++, r600_emit_gs_rings, 0);
 
        rctx->b.b.create_blend_state = r600_create_blend_state;
        rctx->b.b.create_depth_stencil_alpha_state = r600_create_dsa_state;
index ffa1eb0c4907bd501fce69373584125f17a396d3..d1410260cc50c0ae6839087ee78d85e33291f656 100644 (file)
@@ -1162,7 +1162,7 @@ static bool r600_update_derived_state(struct r600_context *rctx)
                if (unlikely(!rctx->gs_shader->current))
                        return false;
 
-               if (rctx->b.chip_class >= EVERGREEN && !rctx->shader_stages.geom_enable) {
+               if (!rctx->shader_stages.geom_enable) {
                        rctx->shader_stages.geom_enable = true;
                        rctx->shader_stages.atom.dirty = true;
                }
index 9ec45c6cdfb5109a11198d32b49bd1d6c545730b..05d1f0ab34ad6345d13f58ac2364f0cba065e98f 100644 (file)
 /* Registers */
 #define R_008490_CP_STRMOUT_CNTL                    0x008490
 #define   S_008490_OFFSET_UPDATE_DONE(x)               (((x) & 0x1) << 0)
+#define R_008C40_SQ_ESGS_RING_BASE                   0x008C40
+#define R_008C44_SQ_ESGS_RING_SIZE                   0x008C44
+#define R_008C48_SQ_GSVS_RING_BASE                   0x008C48
+#define R_008C4C_SQ_GSVS_RING_SIZE                   0x008C4C
+#define R_008C50_SQ_ESTMP_RING_BASE                  0x008C50
+#define R_008C54_SQ_ESTMP_RING_SIZE                  0x008C54
+#define R_008C50_SQ_GSTMP_RING_BASE                  0x008C58
+#define R_008C54_SQ_GSTMP_RING_SIZE                  0x008C5C
+
+#define R_0088C8_VGT_GS_PER_ES                       0x0088C8
+#define R_0088CC_VGT_ES_PER_GS                       0x0088CC
+#define R_0088E8_VGT_GS_PER_VS                       0x0088E8
+
 #define R_008960_VGT_STRMOUT_BUFFER_FILLED_SIZE_0    0x008960 /* read-only */
 #define R_008964_VGT_STRMOUT_BUFFER_FILLED_SIZE_1    0x008964 /* read-only */
 #define R_008968_VGT_STRMOUT_BUFFER_FILLED_SIZE_2    0x008968 /* read-only */
 #define   S_028A40_MODE(x)                             (((x) & 0x3) << 0)
 #define   G_028A40_MODE(x)                             (((x) >> 0) & 0x3)
 #define   C_028A40_MODE                                0xFFFFFFFC
+#define     V_028A40_GS_OFF                            0
+#define     V_028A40_GS_SCENARIO_A                     1
+#define     V_028A40_GS_SCENARIO_B                     2
+#define     V_028A40_GS_SCENARIO_G                     3
 #define   S_028A40_ES_PASSTHRU(x)                      (((x) & 0x1) << 2)
 #define   G_028A40_ES_PASSTHRU(x)                      (((x) >> 2) & 0x1)
 #define   C_028A40_ES_PASSTHRU                         0xFFFFFFFB
 #define   S_028A40_CUT_MODE(x)                         (((x) & 0x3) << 3)
 #define   G_028A40_CUT_MODE(x)                         (((x) >> 3) & 0x3)
 #define   C_028A40_CUT_MODE                            0xFFFFFFE7
+#define     V_028A40_GS_CUT_1024                       0
+#define     V_028A40_GS_CUT_512                        1
+#define     V_028A40_GS_CUT_256                        2
+#define     V_028A40_GS_CUT_128                        3
 #define R_008DFC_SQ_CF_WORD0                         0x008DFC
 #define   S_008DFC_ADDR(x)                             (((x) & 0xFFFFFFFF) << 0)
 #define   G_008DFC_ADDR(x)                             (((x) >> 0) & 0xFFFFFFFF)
 #define   S_028D44_ALPHA_TO_MASK_OFFSET3(x)            (((x) & 0x3) << 14)
 #define   S_028D44_OFFSET_ROUND(x)                     (((x) & 0x1) << 16)
 #define R_028868_SQ_PGM_RESOURCES_VS                 0x028868
+#define R_028890_SQ_PGM_RESOURCES_ES                 0x028890
+#define   S_028890_NUM_GPRS(x)                         (((x) & 0xFF) << 0)
+#define   G_028890_NUM_GPRS(x)                         (((x) >> 0) & 0xFF)
+#define   C_028890_NUM_GPRS                            0xFFFFFF00
+#define   S_028890_STACK_SIZE(x)                       (((x) & 0xFF) << 8)
+#define   G_028890_STACK_SIZE(x)                       (((x) >> 8) & 0xFF)
+#define   C_028890_STACK_SIZE                          0xFFFF00FF
+#define   S_028890_DX10_CLAMP(x)                       (((x) & 0x1) << 21)
+#define   G_028890_DX10_CLAMP(x)                       (((x) >> 21) & 0x1)
+#define   C_028890_DX10_CLAMP                          0xFFDFFFFF
+#define R_02887C_SQ_PGM_RESOURCES_GS                 0x02887C
+#define   S_02887C_NUM_GPRS(x)                         (((x) & 0xFF) << 0)
+#define   G_02887C_NUM_GPRS(x)                         (((x) >> 0) & 0xFF)
+#define   C_02887C_NUM_GPRS                            0xFFFFFF00
+#define   S_02887C_STACK_SIZE(x)                       (((x) & 0xFF) << 8)
+#define   G_02887C_STACK_SIZE(x)                       (((x) >> 8) & 0xFF)
+#define   C_02887C_STACK_SIZE                          0xFFFF00FF
+#define   S_02887C_DX10_CLAMP(x)                       (((x) & 0x1) << 21)
+#define   G_02887C_DX10_CLAMP(x)                       (((x) >> 21) & 0x1)
+#define   C_02887C_DX10_CLAMP                          0xFFDFFFFF
 #define R_0286CC_SPI_PS_IN_CONTROL_0                 0x0286CC
 #define R_0286D0_SPI_PS_IN_CONTROL_1                 0x0286D0
 #define R_028644_SPI_PS_INPUT_CNTL_0                 0x028644
 #define   G_028C04_MAX_SAMPLE_DIST(x)                  (((x) >> 13) & 0xF)
 #define   C_028C04_MAX_SAMPLE_DIST                     0xFFFE1FFF
 #define R_0288CC_SQ_PGM_CF_OFFSET_PS                 0x0288CC
-#define R_0288DC_SQ_PGM_CF_OFFSET_FS                 0x0288DC
 #define R_0288D0_SQ_PGM_CF_OFFSET_VS                 0x0288D0
+#define R_0288D4_SQ_PGM_CF_OFFSET_GS                 0x0288D4
+#define R_0288D8_SQ_PGM_CF_OFFSET_ES                 0x0288D8
+#define R_0288DC_SQ_PGM_CF_OFFSET_FS                 0x0288DC
 #define R_028840_SQ_PGM_START_PS                     0x028840
 #define R_028894_SQ_PGM_START_FS                     0x028894
 #define R_028858_SQ_PGM_START_VS                     0x028858
+#define R_02886C_SQ_PGM_START_GS                     0x02886C
+#define R_028880_SQ_PGM_START_ES                     0x028880
 #define R_028080_CB_COLOR0_VIEW                      0x028080
 #define   S_028080_SLICE_START(x)                      (((x) & 0x7FF) << 0)
 #define   G_028080_SLICE_START(x)                      (((x) >> 0) & 0x7FF)
 #define R_0283F4_SQ_VTX_SEMANTIC_29                  0x0283F4
 #define R_0283F8_SQ_VTX_SEMANTIC_30                  0x0283F8
 #define R_0283FC_SQ_VTX_SEMANTIC_31                  0x0283FC
+#define R_0288C8_SQ_GS_VERT_ITEMSIZE                 0x0288C8
 #define R_0288E0_SQ_VTX_SEMANTIC_CLEAR               0x0288E0
 #define R_028400_VGT_MAX_VTX_INDX                    0x028400
 #define   S_028400_MAX_INDX(x)                         (((x) & 0xFFFFFFFF) << 0)
 #define R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET             0x028B28
 #define R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE 0x028B2C
 #define R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE 0x028B30
+#define R_028B38_VGT_GS_MAX_VERT_OUT                 0x028B38 /* r7xx */
+#define   S_028B38_MAX_VERT_OUT(x)                      (((x) & 0x7FF) << 0)
 #define R_028B44_VGT_STRMOUT_BASE_OFFSET_HI_0       0x028B44
 #define R_028B48_VGT_STRMOUT_BASE_OFFSET_HI_1       0x028B48
 #define R_028B4C_VGT_STRMOUT_BASE_OFFSET_HI_2       0x028B4C