case EG_V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
case EG_V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
case EG_V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
+ case EG_V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
+ case EG_V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->cf_addr >> 1);
bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
S_SQ_CF_WORD1_BARRIER(1) |
r600_pipe_state_add_reg(rstate, R_030014_RESOURCE0_WORD5, 0x00000000, 0xFFFFFFFF, NULL);
r600_pipe_state_add_reg(rstate, R_030018_RESOURCE0_WORD6, 0x00000000, 0xFFFFFFFF, NULL);
r600_pipe_state_add_reg(rstate, R_03001C_RESOURCE0_WORD7, 0xC0000000, 0xFFFFFFFF, NULL);
- evergreen_vs_resource_set(&rctx->ctx, rstate, i);
+ evergreen_fs_resource_set(&rctx->ctx, rstate, i);
}
mask = 0;
(r600_bo_offset(shader->bo)) >> 8, 0xFFFFFFFF, shader->bo);
r600_pipe_state_add_reg(rstate,
R_0288A4_SQ_PGM_START_FS,
- (r600_bo_offset(shader->bo)) >> 8, 0xFFFFFFFF, shader->bo);
+ (r600_bo_offset(shader->bo)) >> 8, 0xFFFFFFFF, shader->bo_fetch);
r600_pipe_state_add_reg(rstate,
R_03A200_SQ_LOOP_CONST_0 + (32 * 4), 0x01000FFF,
void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_state *state);
void r600_context_pipe_state_set_ps_resource(struct r600_context *ctx, struct r600_pipe_state *state, unsigned rid);
void r600_context_pipe_state_set_vs_resource(struct r600_context *ctx, struct r600_pipe_state *state, unsigned rid);
+void r600_context_pipe_state_set_fs_resource(struct r600_context *ctx, struct r600_pipe_state *state, unsigned rid);
void r600_context_pipe_state_set_ps_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
void r600_context_pipe_state_set_vs_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
void r600_context_flush(struct r600_context *ctx);
void evergreen_context_draw(struct r600_context *ctx, const struct r600_draw *draw);
void evergreen_ps_resource_set(struct r600_context *ctx, struct r600_pipe_state *state, unsigned rid);
void evergreen_vs_resource_set(struct r600_context *ctx, struct r600_pipe_state *state, unsigned rid);
+void evergreen_fs_resource_set(struct r600_context *ctx, struct r600_pipe_state *state, unsigned rid);
void evergreen_context_pipe_state_set_ps_resource(struct r600_context *ctx, struct r600_pipe_state *state, unsigned rid);
void evergreen_context_pipe_state_set_vs_resource(struct r600_context *ctx, struct r600_pipe_state *state, unsigned rid);
+void evergreen_context_pipe_state_set_fs_resource(struct r600_context *ctx, struct r600_pipe_state *state, unsigned rid);
void evergreen_context_pipe_state_set_ps_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
void evergreen_context_pipe_state_set_vs_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id);
#include <stdio.h>
#include <errno.h>
#include "util/u_memory.h"
+#include "pipe/p_shader_tokens.h"
#include "r600_pipe.h"
#include "r600_sq.h"
#include "r600_opcodes.h"
/* common to all 3 families */
static int r600_bc_vtx_build(struct r600_bc *bc, struct r600_bc_vtx *vtx, unsigned id)
{
- bc->bytecode[id++] = S_SQ_VTX_WORD0_BUFFER_ID(vtx->buffer_id) |
- S_SQ_VTX_WORD0_SRC_GPR(vtx->src_gpr) |
- S_SQ_VTX_WORD0_SRC_SEL_X(vtx->src_sel_x) |
- S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(vtx->mega_fetch_count);
+ unsigned fetch_resource_start = 0;
+
+ /* check if we are fetch shader */
+ /* fetch shader can also access vertex resource,
+ * first fetch shader resource is at 160
+ */
+ if (bc->type == -1) {
+ switch (bc->chiprev) {
+ /* r600 */
+ case 0:
+ /* r700 */
+ case 1:
+ fetch_resource_start = 160;
+ break;
+ /* evergreen */
+ case 2:
+ fetch_resource_start = 0;
+ break;
+ default:
+ fprintf(stderr, "%s:%s:%d unknown chiprev %d\n",
+ __FILE__, __func__, __LINE__, bc->chiprev);
+ break;
+ }
+ }
+ bc->bytecode[id++] = S_SQ_VTX_WORD0_BUFFER_ID(vtx->buffer_id + fetch_resource_start) |
+ S_SQ_VTX_WORD0_SRC_GPR(vtx->src_gpr) |
+ S_SQ_VTX_WORD0_SRC_SEL_X(vtx->src_sel_x) |
+ S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(vtx->mega_fetch_count);
bc->bytecode[id++] = S_SQ_VTX_WORD1_DST_SEL_X(vtx->dst_sel_x) |
S_SQ_VTX_WORD1_DST_SEL_Y(vtx->dst_sel_y) |
S_SQ_VTX_WORD1_DST_SEL_Z(vtx->dst_sel_z) |
case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
+ case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
+ case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
bc->bytecode[id++] = S_SQ_CF_WORD0_ADDR(cf->cf_addr >> 1);
bc->bytecode[id++] = S_SQ_CF_WORD1_CF_INST(cf->inst) |
S_SQ_CF_WORD1_BARRIER(1) |
if (bc->callstack[0].max > 0)
bc->nstack = ((bc->callstack[0].max + 3) >> 2) + 2;
+ if (bc->type == TGSI_PROCESSOR_VERTEX && !bc->nstack) {
+ bc->nstack = 1;
+ }
/* first path compute addr of each CF block */
/* addr start after all the CF instructions */
case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_END:
case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_CONTINUE:
case V_SQ_CF_WORD1_SQ_CF_INST_LOOP_BREAK:
+ case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
+ case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
break;
default:
R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
case V_SQ_CF_WORD1_SQ_CF_INST_JUMP:
case V_SQ_CF_WORD1_SQ_CF_INST_ELSE:
case V_SQ_CF_WORD1_SQ_CF_INST_POP:
+ case V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS:
+ case V_SQ_CF_WORD1_SQ_CF_INST_RETURN:
break;
default:
R600_ERR("unsupported CF instruction (0x%X)\n", cf->inst);
struct r600_bc {
enum radeon_family family;
int chiprev; /* 0 - r600, 1 - r700, 2 - evergreen */
+ int type;
struct list_head cf;
struct r600_bc_cf *cf_last;
unsigned ndw;
struct r600_shader shader;
struct r600_pipe_state rstate;
struct r600_bo *bo;
+ struct r600_bo *bo_fetch;
struct r600_vertex_element vertex_elements;
};
r600_bo_offset(shader->bo) >> 8, 0xFFFFFFFF, shader->bo);
r600_pipe_state_add_reg(rstate,
R_028894_SQ_PGM_START_FS,
- r600_bo_offset(shader->bo) >> 8, 0xFFFFFFFF, shader->bo);
+ r600_bo_offset(shader->bo_fetch) >> 8, 0xFFFFFFFF, shader->bo_fetch);
r600_pipe_state_add_reg(rstate,
R_03E200_SQ_LOOP_CONST_0 + (32 * 4), 0x01000FFF,
void *ptr;
/* copy new shader */
+ if (rshader->processor_type == TGSI_PROCESSOR_VERTEX && shader->bo_fetch == NULL) {
+ shader->bo_fetch = r600_bo(rctx->radeon, rshader->bc_fetch.ndw * 4, 4096, 0, 0);
+ if (shader->bo_fetch == NULL) {
+ return -ENOMEM;
+ }
+ ptr = r600_bo_map(rctx->radeon, shader->bo_fetch, 0, NULL);
+ memcpy(ptr, rshader->bc_fetch.bytecode, rshader->bc_fetch.ndw * 4);
+ r600_bo_unmap(rctx->radeon, shader->bo_fetch);
+ }
if (shader->bo == NULL) {
shader->bo = r600_bo(rctx->radeon, rshader->bc.ndw * 4, 4096, 0, 0);
if (shader->bo == NULL) {
const struct util_format_description *desc;
enum pipe_format resource_format[160];
unsigned i, nresources = 0;
- struct r600_bc *bc = &shader->bc;
+ struct r600_bc *bc = &shader->bc_fetch;
struct r600_bc_cf *cf;
struct r600_bc_vtx *vtx;
for (i = 0; i < rctx->vertex_elements->count; i++) {
resource_format[nresources++] = rctx->vertex_elements->hw_format[i];
}
- r600_bo_reference(rctx->radeon, &rshader->bo, NULL);
+ r600_bo_reference(rctx->radeon, &rshader->bo_fetch, NULL);
LIST_FOR_EACH_ENTRY(cf, &bc->cf, list) {
switch (cf->inst) {
case V_SQ_CF_WORD1_SQ_CF_INST_VTX:
break;
}
}
- return r600_bc_build(&shader->bc);
+ return r600_bc_build(&shader->bc_fetch);
}
int r600_pipe_shader_update(struct pipe_context *ctx, struct r600_pipe_shader *shader)
R600_ERR("building bytecode failed !\n");
return r;
}
+ if (shader->shader.processor_type == TGSI_PROCESSOR_VERTEX) {
+ r = r600_bc_build(&shader->shader.bc_fetch);
+ if (r) {
+ R600_ERR("building bytecode failed !\n");
+ return r;
+ }
+ }
//fprintf(stderr, "______________________________________________________________\n");
return 0;
}
unsigned temp_reg;
struct r600_shader_tgsi_instruction *inst_info;
struct r600_bc *bc;
+ struct r600_bc *bc_fetch;
struct r600_shader *shader;
u32 value[4];
u32 *literals;
vtx.dst_sel_z = 2;
vtx.dst_sel_w = 3;
vtx.use_const_fields = 1;
- r = r600_bc_add_vtx(ctx->bc, &vtx);
+ r = r600_bc_add_vtx(ctx->bc_fetch, &vtx);
if (r)
return r;
}
int i, r = 0, pos0;
ctx.bc = &shader->bc;
+ ctx.bc_fetch = &shader->bc_fetch;
ctx.shader = shader;
r = r600_bc_init(ctx.bc, shader->family);
if (r)
tgsi_parse_init(&ctx.parse, tokens);
ctx.type = ctx.parse.FullHeader.Processor.Processor;
shader->processor_type = ctx.type;
+ if (shader->processor_type == TGSI_PROCESSOR_VERTEX) {
+ r = r600_bc_init(ctx.bc_fetch, shader->family);
+ if (r)
+ return r;
+ ctx.bc_fetch->type = -1;
+ }
+ ctx.bc->type = shader->processor_type;
/* register allocations */
/* Values [0,127] correspond to GPR[0..127].
}
if (ctx.type == TGSI_PROCESSOR_VERTEX) {
ctx.file_offset[TGSI_FILE_INPUT] = 1;
+ if (ctx.bc->chiprev == 2) {
+ r600_bc_add_cfinst(ctx.bc, EG_V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS);
+ } else {
+ r600_bc_add_cfinst(ctx.bc, V_SQ_CF_WORD1_SQ_CF_INST_CALL_FS);
+ }
}
if (ctx.type == TGSI_PROCESSOR_FRAGMENT && ctx.bc->chiprev == 2) {
ctx.file_offset[TGSI_FILE_INPUT] = evergreen_gpr_count(&ctx);
output[i].inst = BC_INST(ctx.bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT_DONE);
}
}
+ /* add return to fetch shader */
+ if (ctx.type == TGSI_PROCESSOR_VERTEX) {
+ if (ctx.bc->chiprev == 2) {
+ r600_bc_add_cfinst(ctx.bc_fetch, EG_V_SQ_CF_WORD1_SQ_CF_INST_RETURN);
+ } else {
+ r600_bc_add_cfinst(ctx.bc_fetch, V_SQ_CF_WORD1_SQ_CF_INST_RETURN);
+ }
+ }
/* add output to bytecode */
for (i = 0; i < noutput; i++) {
r = r600_bc_add_output(ctx.bc, &output[i]);
struct r600_shader_io output[32];
enum radeon_family family;
boolean uses_kill;
+ struct r600_bc bc_fetch;
};
int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader);
r600_pipe_state_add_reg(rstate, R_038010_RESOURCE0_WORD4, 0x00000000, 0xFFFFFFFF, NULL);
r600_pipe_state_add_reg(rstate, R_038014_RESOURCE0_WORD5, 0x00000000, 0xFFFFFFFF, NULL);
r600_pipe_state_add_reg(rstate, R_038018_RESOURCE0_WORD6, 0xC0000000, 0xFFFFFFFF, NULL);
- r600_context_pipe_state_set_vs_resource(&rctx->ctx, rstate, i);
+ r600_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i);
}
mask = 0;
if (r)
goto out_err;
}
+ /* FS RESOURCE */
+ for (int j = 0, offset = 0x7C00; j < 16; j++, offset += 0x20) {
+ r = evergreen_state_resource_init(ctx, offset);
+ if (r)
+ goto out_err;
+ }
/* PS loop const */
evergreen_loop_const_init(ctx, 0);
evergreen_context_pipe_state_set_resource(ctx, state, offset);
}
+void evergreen_context_pipe_state_set_fs_resource(struct r600_context *ctx, struct r600_pipe_state *state, unsigned rid)
+{
+ unsigned offset = R_030000_SQ_TEX_RESOURCE_WORD0_0 + 0x7C00 + 0x20 * rid;
+
+ evergreen_context_pipe_state_set_resource(ctx, state, offset);
+}
+
static inline void evergreen_context_pipe_state_set_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned offset)
{
struct r600_range *range;
evergreen_resource_set(ctx, state, offset);
}
+
+void evergreen_fs_resource_set(struct r600_context *ctx, struct r600_pipe_state *state, unsigned rid)
+{
+ unsigned offset = R_030000_RESOURCE0_WORD0 + 0x7C00 + 0x20 * rid;
+
+ evergreen_resource_set(ctx, state, offset);
+}
if (r)
goto out_err;
}
+ /* FS RESOURCE */
+ for (int j = 0, offset = 0x2300; j < 16; j++, offset += 0x1C) {
+ r = r600_state_resource_init(ctx, offset);
+ if (r)
+ goto out_err;
+ }
/* PS loop const */
r600_loop_const_init(ctx, 0);
r600_context_pipe_state_set_resource(ctx, state, offset);
}
+void r600_context_pipe_state_set_fs_resource(struct r600_context *ctx, struct r600_pipe_state *state, unsigned rid)
+{
+ unsigned offset = R_038000_SQ_TEX_RESOURCE_WORD0_0 + 0x2300 + 0x1C * rid;
+
+ r600_context_pipe_state_set_resource(ctx, state, offset);
+}
+
static inline void r600_context_pipe_state_set_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned offset)
{
struct r600_range *range;