From: Glenn Kennard Date: Wed, 15 Oct 2014 15:12:16 +0000 (+0200) Subject: r600g: Implement sm5 UBO/sampler indexing X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=7b1c0cbc90d456384b0950ad21faa3c61a6b43ff;p=mesa.git r600g: Implement sm5 UBO/sampler indexing Caveat: Shaders using UBO/sampler indexing will not be optimized by SB, due to SB not currently supporting the necessary CF_INDEX_[01] index registers. Signed-off-by: Glenn Kennard --- diff --git a/docs/GL3.txt b/docs/GL3.txt index cda5a64fa30..28544312945 100644 --- a/docs/GL3.txt +++ b/docs/GL3.txt @@ -98,8 +98,8 @@ GL 4.0, GLSL 4.00: GL_ARB_draw_indirect DONE (i965, nvc0, radeonsi, llvmpipe, softpipe) GL_ARB_gpu_shader5 DONE (i965, nvc0) - 'precise' qualifier DONE - - Dynamically uniform sampler array indices DONE () - - Dynamically uniform UBO array indices DONE () + - Dynamically uniform sampler array indices DONE (r600) + - Dynamically uniform UBO array indices DONE (r600) - Implicit signed -> unsigned conversions DONE - Fused multiply-add DONE () - Packing/bitfield/conversion functions DONE (r600) diff --git a/src/gallium/drivers/r600/eg_asm.c b/src/gallium/drivers/r600/eg_asm.c index acb30409428..295cb4d80b7 100644 --- a/src/gallium/drivers/r600/eg_asm.c +++ b/src/gallium/drivers/r600/eg_asm.c @@ -43,10 +43,10 @@ int eg_bytecode_cf_build(struct r600_bytecode *bc, struct r600_bytecode_cf *cf) /* prepend ALU_EXTENDED if we need more than 2 kcache sets */ if (cf->eg_alu_extended) { bc->bytecode[id++] = - S_SQ_CF_ALU_WORD0_EXT_KCACHE_BANK_INDEX_MODE0(V_SQ_CF_INDEX_NONE) | - S_SQ_CF_ALU_WORD0_EXT_KCACHE_BANK_INDEX_MODE1(V_SQ_CF_INDEX_NONE) | - S_SQ_CF_ALU_WORD0_EXT_KCACHE_BANK_INDEX_MODE2(V_SQ_CF_INDEX_NONE) | - S_SQ_CF_ALU_WORD0_EXT_KCACHE_BANK_INDEX_MODE3(V_SQ_CF_INDEX_NONE) | + S_SQ_CF_ALU_WORD0_EXT_KCACHE_BANK_INDEX_MODE0(cf->kcache[0].index_mode) | + S_SQ_CF_ALU_WORD0_EXT_KCACHE_BANK_INDEX_MODE1(cf->kcache[1].index_mode) | + S_SQ_CF_ALU_WORD0_EXT_KCACHE_BANK_INDEX_MODE2(cf->kcache[2].index_mode) | + S_SQ_CF_ALU_WORD0_EXT_KCACHE_BANK_INDEX_MODE3(cf->kcache[3].index_mode) | S_SQ_CF_ALU_WORD0_EXT_KCACHE_BANK2(cf->kcache[2].bank) | S_SQ_CF_ALU_WORD0_EXT_KCACHE_BANK3(cf->kcache[3].bank) | S_SQ_CF_ALU_WORD0_EXT_KCACHE_MODE2(cf->kcache[2].mode); @@ -143,3 +143,47 @@ void eg_bytecode_export_read(struct r600_bytecode *bc, output->comp_mask = G_SQ_CF_ALLOC_EXPORT_WORD1_BUF_COMP_MASK(word1); } #endif + +int egcm_load_index_reg(struct r600_bytecode *bc, unsigned id, bool inside_alu_clause) +{ + struct r600_bytecode_alu alu; + int r; + unsigned type; + + assert(id < 2); + assert(bc->chip_class >= EVERGREEN); + + if (bc->index_loaded[id]) + return 0; + + memset(&alu, 0, sizeof(alu)); + alu.op = ALU_OP1_MOVA_INT; + alu.src[0].sel = bc->index_reg[id]; + alu.src[0].chan = 0; + alu.last = 1; + r = r600_bytecode_add_alu(bc, &alu); + if (r) + return r; + + bc->ar_loaded = 0; /* clobbered */ + + memset(&alu, 0, sizeof(alu)); + alu.op = id == 0 ? ALU_OP0_SET_CF_IDX0 : ALU_OP0_SET_CF_IDX1; + alu.last = 1; + r = r600_bytecode_add_alu(bc, &alu); + if (r) + return r; + + /* Must split ALU group as index only applies to following group */ + if (inside_alu_clause) { + type = bc->cf_last->op; + if ((r = r600_bytecode_add_cf(bc))) { + return r; + } + bc->cf_last->op = type; + } + + bc->index_loaded[id] = 1; + + return 0; +} diff --git a/src/gallium/drivers/r600/r600_asm.c b/src/gallium/drivers/r600/r600_asm.c index 8aa69b506a7..ce3c2d125a0 100644 --- a/src/gallium/drivers/r600/r600_asm.c +++ b/src/gallium/drivers/r600/r600_asm.c @@ -819,6 +819,10 @@ static int merge_inst_groups(struct r600_bytecode *bc, struct r600_bytecode_alu have_rel = 1; } + if (alu->op == ALU_OP0_SET_CF_IDX0 || + alu->op == ALU_OP0_SET_CF_IDX1) + return 0; /* data hazard with MOVA */ + /* Let's check source gprs */ num_src = r600_bytecode_get_num_operands(bc, alu); for (src = 0; src < num_src; ++src) { @@ -884,7 +888,7 @@ static int merge_inst_groups(struct r600_bytecode *bc, struct r600_bytecode_alu /* we'll keep kcache sets sorted by bank & addr */ static int r600_bytecode_alloc_kcache_line(struct r600_bytecode *bc, struct r600_bytecode_kcache *kcache, - unsigned bank, unsigned line) + unsigned bank, unsigned line, unsigned index_mode) { int i, kcache_banks = bc->chip_class >= EVERGREEN ? 4 : 2; @@ -907,6 +911,7 @@ static int r600_bytecode_alloc_kcache_line(struct r600_bytecode *bc, kcache[i].mode = V_SQ_CF_KCACHE_LOCK_1; kcache[i].bank = bank; kcache[i].addr = line; + kcache[i].index_mode = index_mode; return 0; } @@ -936,6 +941,7 @@ static int r600_bytecode_alloc_kcache_line(struct r600_bytecode *bc, kcache[i].mode = V_SQ_CF_KCACHE_LOCK_1; kcache[i].bank = bank; kcache[i].addr = line; + kcache[i].index_mode = index_mode; return 0; } } @@ -949,15 +955,16 @@ static int r600_bytecode_alloc_inst_kcache_lines(struct r600_bytecode *bc, int i, r; for (i = 0; i < 3; i++) { - unsigned bank, line, sel = alu->src[i].sel; + unsigned bank, line, sel = alu->src[i].sel, index_mode; if (sel < 512) continue; bank = alu->src[i].kc_bank; line = (sel-512)>>4; + index_mode = alu->src[i].kc_rel ? 1 : 0; // V_SQ_CF_INDEX_0 / V_SQ_CF_INDEX_NONE - if ((r = r600_bytecode_alloc_kcache_line(bc, kcache, bank, line))) + if ((r = r600_bytecode_alloc_kcache_line(bc, kcache, bank, line, index_mode))) return r; } return 0; @@ -1028,8 +1035,9 @@ static int r600_bytecode_alloc_kcache_lines(struct r600_bytecode *bc, memcpy(bc->cf_last->kcache, kcache, 4 * sizeof(struct r600_bytecode_kcache)); } - /* if we actually used more than 2 kcache sets - use ALU_EXTENDED on eg+ */ - if (kcache[2].mode != V_SQ_CF_KCACHE_NOP) { + /* if we actually used more than 2 kcache sets, or have relative indexing - use ALU_EXTENDED on eg+ */ + if (kcache[2].mode != V_SQ_CF_KCACHE_NOP || + kcache[0].index_mode || kcache[1].index_mode || kcache[2].index_mode || kcache[3].index_mode) { if (bc->chip_class < EVERGREEN) return -ENOMEM; bc->cf_last->eg_alu_extended = 1; @@ -1149,6 +1157,13 @@ int r600_bytecode_add_alu_type(struct r600_bytecode *bc, } bc->cf_last->op = type; + /* Load index register if required */ + if (bc->chip_class >= EVERGREEN) { + for (i = 0; i < 3; i++) + if (nalu->src[i].kc_bank && nalu->src[i].kc_rel) + egcm_load_index_reg(bc, 0, true); + } + /* Check AR usage and load it if required */ for (i = 0; i < 3; i++) if (nalu->src[i].rel && !bc->ar_loaded) @@ -1274,6 +1289,12 @@ int r600_bytecode_add_vtx(struct r600_bytecode *bc, const struct r600_bytecode_v return -ENOMEM; memcpy(nvtx, vtx, sizeof(struct r600_bytecode_vtx)); + /* Load index register if required */ + if (bc->chip_class >= EVERGREEN) { + if (vtx->buffer_index_mode) + egcm_load_index_reg(bc, 0, false); + } + /* cf can contains only alu or only vtx or only tex */ if (bc->cf_last == NULL || last_inst_was_not_vtx_fetch(bc) || @@ -1320,6 +1341,12 @@ int r600_bytecode_add_tex(struct r600_bytecode *bc, const struct r600_bytecode_t return -ENOMEM; memcpy(ntex, tex, sizeof(struct r600_bytecode_tex)); + /* Load index register if required */ + if (bc->chip_class >= EVERGREEN) { + if (tex->sampler_index_mode || tex->resource_index_mode) + egcm_load_index_reg(bc, 1, false); + } + /* we can't fetch data und use it as texture lookup address in the same TEX clause */ if (bc->cf_last != NULL && bc->cf_last->op == CF_OP_TEX) { @@ -1400,6 +1427,8 @@ static int r600_bytecode_vtx_build(struct r600_bytecode *bc, struct r600_bytecod S_SQ_VTX_WORD1_GPR_DST_GPR(vtx->dst_gpr); bc->bytecode[id] = S_SQ_VTX_WORD2_OFFSET(vtx->offset)| S_SQ_VTX_WORD2_ENDIAN_SWAP(vtx->endian); + if (bc->chip_class >= EVERGREEN) + bc->bytecode[id] |= ((vtx->buffer_index_mode & 0x3) << 21); // S_SQ_VTX_WORD2_BIM(vtx->buffer_index_mode); if (bc->chip_class < CAYMAN) bc->bytecode[id] |= S_SQ_VTX_WORD2_MEGA_FETCH(1); id++; @@ -1410,12 +1439,16 @@ static int r600_bytecode_vtx_build(struct r600_bytecode *bc, struct r600_bytecod /* common to all 3 families */ static int r600_bytecode_tex_build(struct r600_bytecode *bc, struct r600_bytecode_tex *tex, unsigned id) { - bc->bytecode[id++] = S_SQ_TEX_WORD0_TEX_INST( + bc->bytecode[id] = S_SQ_TEX_WORD0_TEX_INST( r600_isa_fetch_opcode(bc->isa->hw_class, tex->op)) | EG_S_SQ_TEX_WORD0_INST_MOD(tex->inst_mod) | S_SQ_TEX_WORD0_RESOURCE_ID(tex->resource_id) | S_SQ_TEX_WORD0_SRC_GPR(tex->src_gpr) | S_SQ_TEX_WORD0_SRC_REL(tex->src_rel); + if (bc->chip_class >= EVERGREEN) + bc->bytecode[id] |= ((tex->sampler_index_mode & 0x3) << 27) | // S_SQ_TEX_WORD0_SIM(tex->sampler_index_mode); + ((tex->resource_index_mode & 0x3) << 25); // S_SQ_TEX_WORD0_RIM(tex->resource_index_mode) + id++; bc->bytecode[id++] = S_SQ_TEX_WORD1_DST_GPR(tex->dst_gpr) | S_SQ_TEX_WORD1_DST_REL(tex->dst_rel) | S_SQ_TEX_WORD1_DST_SEL_X(tex->dst_sel_x) | @@ -1847,6 +1880,7 @@ static int print_indent(int p, int c) void r600_bytecode_disasm(struct r600_bytecode *bc) { + const char *index_mode[] = {"CF_INDEX_NONE", "CF_INDEX_0", "CF_INDEX_1"}; static int index = 0; struct r600_bytecode_cf *cf = NULL; struct r600_bytecode_alu *alu = NULL; @@ -1897,8 +1931,10 @@ void r600_bytecode_disasm(struct r600_bytecode *bc) if (cf->kcache[i].mode) { int c_start = (cf->kcache[i].addr << 4); int c_end = c_start + (cf->kcache[i].mode << 4); - fprintf(stderr, "KC%d[CB%d:%d-%d] ", - i, cf->kcache[i].bank, c_start, c_end); + fprintf(stderr, "KC%d[CB%d:%d-%d%s%s] ", + i, cf->kcache[i].bank, c_start, c_end, + cf->kcache[i].index_mode ? " " : "", + cf->kcache[i].index_mode ? index_mode[cf->kcache[i].index_mode] : ""); } } fprintf(stderr, "\n"); @@ -2064,6 +2100,9 @@ void r600_bytecode_disasm(struct r600_bytecode *bc) o += fprintf(stderr, ", RID:%d", tex->resource_id); o += fprintf(stderr, ", SID:%d ", tex->sampler_id); + if (tex->sampler_index_mode) + fprintf(stderr, "SQ_%s ", index_mode[tex->sampler_index_mode]); + if (tex->lod_bias) fprintf(stderr, "LB:%d ", tex->lod_bias); @@ -2115,6 +2154,9 @@ void r600_bytecode_disasm(struct r600_bytecode *bc) if (bc->chip_class < CAYMAN && vtx->mega_fetch_count) fprintf(stderr, "MFC:%d ", vtx->mega_fetch_count); + if (bc->chip_class >= EVERGREEN && vtx->buffer_index_mode) + fprintf(stderr, "SQ_%s ", index_mode[vtx->buffer_index_mode]); + fprintf(stderr, "UCF:%d ", vtx->use_const_fields); fprintf(stderr, "FMT(DTA:%d ", vtx->data_format); fprintf(stderr, "NUM:%d ", vtx->num_format_all); diff --git a/src/gallium/drivers/r600/r600_asm.h b/src/gallium/drivers/r600/r600_asm.h index 48ea3c4bc43..e37d92672bd 100644 --- a/src/gallium/drivers/r600/r600_asm.h +++ b/src/gallium/drivers/r600/r600_asm.h @@ -33,6 +33,7 @@ struct r600_bytecode_alu_src { unsigned abs; unsigned rel; unsigned kc_bank; + unsigned kc_rel; uint32_t value; }; @@ -86,6 +87,9 @@ struct r600_bytecode_tex { unsigned src_sel_y; unsigned src_sel_z; unsigned src_sel_w; + /* indexed samplers/resources only on evergreen/cayman */ + unsigned sampler_index_mode; + unsigned resource_index_mode; }; struct r600_bytecode_vtx { @@ -108,6 +112,7 @@ struct r600_bytecode_vtx { unsigned srf_mode_all; unsigned offset; unsigned endian; + unsigned buffer_index_mode; }; struct r600_bytecode_output { @@ -132,6 +137,7 @@ struct r600_bytecode_kcache { unsigned bank; unsigned mode; unsigned addr; + unsigned index_mode; }; struct r600_bytecode_cf { @@ -217,12 +223,15 @@ struct r600_bytecode { unsigned ar_chan; unsigned ar_handling; unsigned r6xx_nop_after_rel_dst; + bool index_loaded[2]; + unsigned index_reg[2]; /* indexing register CF_INDEX_[01] */ unsigned debug_id; struct r600_isa* isa; }; /* eg_asm.c */ int eg_bytecode_cf_build(struct r600_bytecode *bc, struct r600_bytecode_cf *cf); +int egcm_load_index_reg(struct r600_bytecode *bc, unsigned id, bool inside_alu_clause); /* r600_asm.c */ void r600_bytecode_init(struct r600_bytecode *bc, diff --git a/src/gallium/drivers/r600/r600_shader.c b/src/gallium/drivers/r600/r600_shader.c index 08125b79edb..aab4215d7ae 100644 --- a/src/gallium/drivers/r600/r600_shader.c +++ b/src/gallium/drivers/r600/r600_shader.c @@ -161,6 +161,8 @@ int r600_pipe_shader_create(struct pipe_context *ctx, /* disable SB for geom shaders - it can't handle the CF_EMIT instructions */ use_sb &= (shader->shader.processor_type != TGSI_PROCESSOR_GEOMETRY); + /* disable SB for shaders using CF_INDEX_0/1 (sampler/ubo array indexing) as it doesn't handle those currently */ + use_sb &= !shader->shader.uses_index_registers; /* Check if the bytecode has already been built. When using the llvm * backend, r600_shader_from_tgsi() will take care of building the @@ -265,6 +267,7 @@ struct r600_shader_src { unsigned abs; unsigned rel; unsigned kc_bank; + boolean kc_rel; /* true if cache bank is indexed */ uint32_t value[4]; }; @@ -325,7 +328,7 @@ static int tgsi_bgnloop(struct r600_shader_ctx *ctx); static int tgsi_endloop(struct r600_shader_ctx *ctx); static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx); static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx, - unsigned int cb_idx, unsigned int offset, unsigned ar_chan, + unsigned int cb_idx, unsigned cb_rel, unsigned int offset, unsigned ar_chan, unsigned int dst_reg); static void r600_bytecode_src(struct r600_bytecode_alu_src *bc_src, const struct r600_shader_src *shader_src, @@ -1031,12 +1034,15 @@ static void tgsi_src(struct r600_shader_ctx *ctx, if (tgsi_src->Register.File == TGSI_FILE_CONSTANT) { if (tgsi_src->Register.Dimension) { r600_src->kc_bank = tgsi_src->Dimension.Index; + if (tgsi_src->Dimension.Indirect) { + r600_src->kc_rel = 1; + } } } } static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx, - unsigned int cb_idx, unsigned int offset, unsigned ar_chan, + unsigned int cb_idx, unsigned cb_rel, unsigned int offset, unsigned ar_chan, unsigned int dst_reg) { struct r600_bytecode_vtx vtx; @@ -1083,6 +1089,7 @@ static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx, vtx.num_format_all = 2; /* NUM_FORMAT_SCALED */ vtx.format_comp_all = 1; /* FORMAT_COMP_SIGNED */ vtx.endian = r600_endian_swap(32); + vtx.buffer_index_mode = cb_rel; // cb_rel ? V_SQ_CF_INDEX_0 : V_SQ_CF_INDEX_NONE; if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx))) return r; @@ -1211,13 +1218,17 @@ static int tgsi_split_constant(struct r600_shader_ctx *ctx) continue; } + if (ctx->src[i].kc_rel) + ctx->shader->uses_index_registers = true; + if (ctx->src[i].rel) { int chan = inst->Src[i].Indirect.Swizzle; int treg = r600_get_temp(ctx); - if ((r = tgsi_fetch_rel_const(ctx, ctx->src[i].kc_bank, ctx->src[i].sel - 512, chan, treg))) + if ((r = tgsi_fetch_rel_const(ctx, ctx->src[i].kc_bank, ctx->src[i].kc_rel, ctx->src[i].sel - 512, chan, treg))) return r; ctx->src[i].kc_bank = 0; + ctx->src[i].kc_rel = 0; ctx->src[i].sel = treg; ctx->src[i].rel = 0; j--; @@ -1230,6 +1241,7 @@ static int tgsi_split_constant(struct r600_shader_ctx *ctx) alu.src[0].chan = k; alu.src[0].rel = ctx->src[i].rel; alu.src[0].kc_bank = ctx->src[i].kc_bank; + alu.src[0].kc_rel = ctx->src[i].kc_rel; alu.dst.sel = treg; alu.dst.chan = k; alu.dst.write = 1; @@ -1813,6 +1825,7 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, ctx.gs_out_ring_offset = 0; ctx.gs_next_vertex = 0; + shader->uses_index_registers = false; ctx.face_gpr = -1; ctx.fixed_pt_position_gpr = -1; ctx.fragcoord_input = -1; @@ -1896,8 +1909,13 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, if (ctx.type == TGSI_PROCESSOR_GEOMETRY) { ctx.gs_export_gpr_treg = ctx.bc->ar_reg + 1; ctx.temp_reg = ctx.bc->ar_reg + 2; - } else + ctx.bc->index_reg[0] = ctx.bc->ar_reg + 3; + ctx.bc->index_reg[1] = ctx.bc->ar_reg + 4; + } else { ctx.temp_reg = ctx.bc->ar_reg + 1; + ctx.bc->index_reg[0] = ctx.bc->ar_reg + 2; + ctx.bc->index_reg[1] = ctx.bc->ar_reg + 3; + } if (indirect_gprs) { shader->max_arrays = 0; @@ -2515,6 +2533,7 @@ static void r600_bytecode_src(struct r600_bytecode_alu_src *bc_src, bc_src->rel = shader_src->rel; bc_src->value = shader_src->value[bc_src->chan]; bc_src->kc_bank = shader_src->kc_bank; + bc_src->kc_rel = shader_src->kc_rel; } static void r600_bytecode_src_set_abs(struct r600_bytecode_alu_src *bc_src) @@ -5039,6 +5058,7 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) unsigned sampler_src_reg = inst->Instruction.Opcode == TGSI_OPCODE_TXQ_LZ ? 0 : 1; int8_t offset_x = 0, offset_y = 0, offset_z = 0; boolean has_txq_cube_array_z = false; + unsigned sampler_index_mode; if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ && ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY || @@ -5072,13 +5092,17 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) /* TGSI moves the sampler to src reg 3 for TXD */ sampler_src_reg = 3; + sampler_index_mode = inst->Src[sampler_src_reg].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE + for (i = 1; i < 3; i++) { /* set gradients h/v */ memset(&tex, 0, sizeof(struct r600_bytecode_tex)); tex.op = (i == 1) ? FETCH_OP_SET_GRADIENTS_H : FETCH_OP_SET_GRADIENTS_V; tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg); + tex.sampler_index_mode = sampler_index_mode; tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS; + tex.resource_index_mode = sampler_index_mode; if (tgsi_tex_src_requires_loading(ctx, i)) { tex.src_gpr = r600_get_temp(ctx); @@ -5185,6 +5209,10 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) src_gpr = ctx->temp_reg; } + sampler_index_mode = inst->Src[sampler_src_reg].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE + if (sampler_index_mode) + ctx->shader->uses_index_registers = true; + if ((inst->Texture.Texture == TGSI_TEXTURE_CUBE || inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY || inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE || @@ -5513,7 +5541,9 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) tex.op = FETCH_OP_LD; tex.inst_mod = 1; /* to indicate this is ldfptr */ tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg); + tex.sampler_index_mode = sampler_index_mode; tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS; + tex.resource_index_mode = sampler_index_mode; tex.src_gpr = src_gpr; tex.dst_gpr = temp; tex.dst_sel_x = 7; /* mask out these components */ @@ -5644,7 +5674,9 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) memset(&tex, 0, sizeof(struct r600_bytecode_tex)); tex.op = FETCH_OP_SET_TEXTURE_OFFSETS; tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg); + tex.sampler_index_mode = sampler_index_mode; tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS; + tex.resource_index_mode = sampler_index_mode; tex.src_gpr = ctx->file_offset[inst->TexOffsets[0].File] + inst->TexOffsets[0].Index; tex.src_sel_x = inst->TexOffsets[0].SwizzleX; @@ -5696,7 +5728,9 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) tex.op = opcode; tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg); + tex.sampler_index_mode = sampler_index_mode; tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS; + tex.resource_index_mode = sampler_index_mode; tex.src_gpr = src_gpr; tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index; @@ -6459,7 +6493,9 @@ static int tgsi_eg_arl(struct r600_shader_ctx *ctx) struct r600_bytecode_alu alu; int r; int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); + unsigned reg = inst->Dst[0].Register.Index > 0 ? ctx->bc->index_reg[inst->Dst[0].Register.Index - 1] : ctx->bc->ar_reg; + assert(inst->Dst[0].Register.Index < 3); memset(&alu, 0, sizeof(struct r600_bytecode_alu)); switch (inst->Instruction.Opcode) { @@ -6482,7 +6518,7 @@ static int tgsi_eg_arl(struct r600_shader_ctx *ctx) continue; r600_bytecode_src(&alu.src[0], &ctx->src[0], i); alu.last = i == lasti; - alu.dst.sel = ctx->bc->ar_reg; + alu.dst.sel = reg; alu.dst.chan = i; alu.dst.write = 1; r = r600_bytecode_add_alu(ctx->bc, &alu); @@ -6490,7 +6526,11 @@ static int tgsi_eg_arl(struct r600_shader_ctx *ctx) return r; } - ctx->bc->ar_loaded = 0; + if (inst->Dst[0].Register.Index > 0) + ctx->bc->index_loaded[inst->Dst[0].Register.Index - 1] = 0; + else + ctx->bc->ar_loaded = 0; + return 0; } static int tgsi_r600_arl(struct r600_shader_ctx *ctx) diff --git a/src/gallium/drivers/r600/r600_shader.h b/src/gallium/drivers/r600/r600_shader.h index 20829fd9fdb..ab67013d8c3 100644 --- a/src/gallium/drivers/r600/r600_shader.h +++ b/src/gallium/drivers/r600/r600_shader.h @@ -69,6 +69,8 @@ struct r600_shader { boolean has_txq_cube_array_z_comp; boolean uses_tex_buffers; boolean gs_prim_id_input; + /* Temporarily workaround SB not handling CF_INDEX_[01] index registers */ + boolean uses_index_registers; /* geometry shader properties */ unsigned gs_input_prim; diff --git a/src/gallium/drivers/r600/sb/sb_bc_dump.cpp b/src/gallium/drivers/r600/sb/sb_bc_dump.cpp index 1551e6d74f7..6f6a57e2647 100644 --- a/src/gallium/drivers/r600/sb/sb_bc_dump.cpp +++ b/src/gallium/drivers/r600/sb/sb_bc_dump.cpp @@ -165,13 +165,14 @@ void bc_dump::dump(cf_node& n) { s << " @" << (n.bc.addr << 1); if (n.bc.op_ptr->flags & CF_ALU) { + static const char *index_mode[] = {"", " CF_INDEX_0", " CF_INDEX_1"}; for (int k = 0; k < 4; ++k) { bc_kcache &kc = n.bc.kc[k]; if (kc.mode) { s << " KC" << k << "[CB" << kc.bank << ":" << (kc.addr << 4) << "-" << - (((kc.addr + kc.mode) << 4) - 1) << "]"; + (((kc.addr + kc.mode) << 4) - 1) << index_mode[kc.index_mode] << "]"; } } } @@ -445,6 +446,11 @@ void bc_dump::dump(fetch_node& n) { s << " MFC:" << n.bc.mega_fetch_count; if (n.bc.fetch_whole_quad) s << " FWQ"; + if (ctx.is_egcm() && n.bc.resource_index_mode) + s << " RIM:SQ_CF_INDEX_" << n.bc.resource_index_mode; + if (ctx.is_egcm() && n.bc.resource_index_mode) + s << " SID:SQ_CF_INDEX_" << n.bc.sampler_index_mode; + s << " UCF:" << n.bc.use_const_fields << " FMT(DTA:" << n.bc.data_format << " NUM:" << n.bc.num_format_all diff --git a/src/gallium/drivers/r600/sb/sb_sched.h b/src/gallium/drivers/r600/sb/sb_sched.h index a74484f50b3..87c45867e16 100644 --- a/src/gallium/drivers/r600/sb/sb_sched.h +++ b/src/gallium/drivers/r600/sb/sb_sched.h @@ -32,6 +32,8 @@ namespace r600_sb { typedef sb_map uc_map; // resource trackers for scheduler +// rp = read port +// uc = use count typedef sb_set kc_lines;