#include "r600_llvm.h"
#include "r600_formats.h"
#include "r600_opcodes.h"
+#include "r600_shader.h"
#include "r600d.h"
#include "pipe/p_shader_tokens.h"
return 0;
}
-static int r600_shader_from_tgsi(struct r600_context * rctx, struct r600_pipe_shader *pipeshader);
+static int r600_shader_from_tgsi(struct r600_screen *rscreen,
+ struct r600_pipe_shader *pipeshader,
+ struct r600_shader_key key);
-int r600_pipe_shader_create(struct pipe_context *ctx, struct r600_pipe_shader *shader)
+int r600_pipe_shader_create(struct pipe_context *ctx,
+ struct r600_pipe_shader *shader,
+ struct r600_shader_key key)
{
static int dump_shaders = -1;
struct r600_context *rctx = (struct r600_context *)ctx;
+ struct r600_pipe_shader_selector *sel = shader->selector;
int r;
/* Would like some magic "get_bool_option_once" routine.
if (dump_shaders) {
fprintf(stderr, "--------------------------------------------------------------\n");
- tgsi_dump(shader->tokens, 0);
+ tgsi_dump(sel->tokens, 0);
- if (shader->so.num_outputs) {
+ if (sel->so.num_outputs) {
unsigned i;
fprintf(stderr, "STREAMOUT\n");
- for (i = 0; i < shader->so.num_outputs; i++) {
- unsigned mask = ((1 << shader->so.output[i].num_components) - 1) <<
- shader->so.output[i].start_component;
+ for (i = 0; i < sel->so.num_outputs; i++) {
+ unsigned mask = ((1 << sel->so.output[i].num_components) - 1) <<
+ sel->so.output[i].start_component;
fprintf(stderr, " %i: MEM_STREAM0_BUF%i OUT[%i].%s%s%s%s\n", i,
- shader->so.output[i].output_buffer, shader->so.output[i].register_index,
+ sel->so.output[i].output_buffer, sel->so.output[i].register_index,
mask & 1 ? "x" : "_",
(mask >> 1) & 1 ? "y" : "_",
(mask >> 2) & 1 ? "z" : "_",
}
}
}
- r = r600_shader_from_tgsi(rctx, shader);
+ r = r600_shader_from_tgsi(rctx->screen, shader, key);
if (r) {
R600_ERR("translation from TGSI failed !\n");
return r;
{
pipe_resource_reference((struct pipe_resource**)&shader->bo, NULL);
r600_bytecode_clear(&shader->shader.bc);
-
- memset(&shader->shader,0,sizeof(struct r600_shader));
}
/*
uint32_t *literals;
uint32_t nliterals;
uint32_t max_driver_temp_used;
+ boolean use_llvm;
/* needed for evergreen interpolation */
boolean input_centroid;
boolean input_linear;
if (dump) {
r600_bytecode_dump(shader_ctx.bc);
}
+ free(bytes);
return 1;
}
#endif /* HAVE_OPENCL */
+static uint32_t i32_from_byte_stream(unsigned char * bytes,
+ unsigned * bytes_read)
+{
+ unsigned i;
+ uint32_t out = 0;
+ for (i = 0; i < 4; i++) {
+ out |= bytes[(*bytes_read)++] << (8 * i);
+ }
+ return out;
+}
+
static unsigned r600_src_from_byte_stream(unsigned char * bytes,
unsigned bytes_read, struct r600_bytecode_alu * alu, unsigned src_idx)
{
unsigned char * bytes, unsigned bytes_read)
{
unsigned src_idx;
- unsigned inst0, inst1;
struct r600_bytecode_alu alu;
+ unsigned src_const_reg[3];
+ uint32_t word0, word1;
+
memset(&alu, 0, sizeof(alu));
for(src_idx = 0; src_idx < 3; src_idx++) {
- bytes_read = r600_src_from_byte_stream(bytes, bytes_read,
- &alu, src_idx);
- }
-
- alu.dst.sel = bytes[bytes_read++];
- alu.dst.chan = bytes[bytes_read++];
- alu.dst.clamp = bytes[bytes_read++];
- alu.dst.write = bytes[bytes_read++];
- alu.dst.rel = bytes[bytes_read++];
- inst0 = bytes[bytes_read++];
- inst1 = bytes[bytes_read++];
- alu.inst = inst0 | (inst1 << 8);
- alu.last = bytes[bytes_read++];
- alu.is_op3 = bytes[bytes_read++];
- alu.predicate = bytes[bytes_read++];
- alu.bank_swizzle = bytes[bytes_read++];
- alu.bank_swizzle_force = bytes[bytes_read++];
- alu.omod = bytes[bytes_read++];
- alu.index_mode = bytes[bytes_read++];
- r600_bytecode_add_alu(ctx->bc, &alu);
+ unsigned i;
+ src_const_reg[src_idx] = bytes[bytes_read++];
+ for (i = 0; i < 4; i++) {
+ alu.src[src_idx].value |= bytes[bytes_read++] << (i * 8);
+ }
+ }
+
+ word0 = i32_from_byte_stream(bytes, &bytes_read);
+ word1 = i32_from_byte_stream(bytes, &bytes_read);
+
+ switch(ctx->bc->chip_class) {
+ case R600:
+ r600_bytecode_alu_read(&alu, word0, word1);
+ break;
+ case R700:
+ case EVERGREEN:
+ case CAYMAN:
+ r700_bytecode_alu_read(&alu, word0, word1);
+ break;
+ }
+
+ for(src_idx = 0; src_idx < 3; src_idx++) {
+ if (src_const_reg[src_idx])
+ alu.src[src_idx].sel += 512;
+ }
+
+#if HAVE_LLVM < 0x0302
+ if (alu.inst == CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE) ||
+ alu.inst == CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE) ||
+ alu.inst == CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT) ||
+ alu.inst == CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT)) {
+ alu.update_pred = 1;
+ alu.dst.write = 0;
+ alu.src[1].sel = V_SQ_ALU_SRC_0;
+ alu.src[1].chan = 0;
+ alu.last = 1;
+ }
+#endif
+
+ if (alu.execute_mask) {
+ alu.pred_sel = 0;
+ r600_bytecode_add_alu_type(ctx->bc, &alu, CTX_INST(V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE));
+ } else {
+ r600_bytecode_add_alu(ctx->bc, &alu);
+ }
/* XXX: Handle other KILL instructions */
if (alu.inst == CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT)) {
static void llvm_if(struct r600_shader_ctx *ctx, struct r600_bytecode_alu * alu,
unsigned pred_inst)
{
- alu->inst = pred_inst;
- alu->predicate = 1;
- alu->dst.write = 0;
- alu->src[1].sel = V_SQ_ALU_SRC_0;
- alu->src[1].chan = 0;
- alu->last = 1;
- r600_bytecode_add_alu_type(ctx->bc, alu,
- CTX_INST(V_SQ_CF_ALU_WORD1_SQ_CF_INST_ALU_PUSH_BEFORE));
-
r600_bytecode_add_cfinst(ctx->bc, CTX_INST(V_SQ_CF_WORD1_SQ_CF_INST_JUMP));
fc_pushlevel(ctx, FC_IF);
callstack_check_depth(ctx, FC_PUSH_VPM, 0);
bytes_read = r600_src_from_byte_stream(bytes, bytes_read, &alu, 0);
inst = bytes[bytes_read++];
switch (inst) {
- case 0:
+ case 0: /* FC_IF */
llvm_if(ctx, &alu,
CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE));
break;
- case 1:
+ case 1: /* FC_IF_INT */
+ llvm_if(ctx, &alu,
+ CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT));
+ break;
+ case 2: /* FC_ELSE */
tgsi_else(ctx);
break;
- case 2:
+ case 3: /* FC_ENDIF */
tgsi_endif(ctx);
break;
- case 3:
+ case 4: /* FC_BGNLOOP */
tgsi_bgnloop(ctx);
break;
- case 4:
+ case 5: /* FC_ENDLOOP */
tgsi_endloop(ctx);
break;
- case 5:
+ case 6: /* FC_BREAK */
r600_break_from_byte_stream(ctx, &alu,
- CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE));
+ CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT));
break;
- case 6:
+ case 7: /* FC_BREAK_NZ_INT */
r600_break_from_byte_stream(ctx, &alu,
CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE_INT));
break;
- case 7:
+ case 8: /* FC_CONTINUE */
{
unsigned opcode = TGSI_OPCODE_CONT;
if (ctx->bc->chip_class == CAYMAN) {
tgsi_loop_brk_cont(ctx);
}
break;
- case 8:
+ case 9: /* FC_BREAK_Z_INT */
r600_break_from_byte_stream(ctx, &alu,
CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETE_INT));
break;
+ case 10: /* FC_BREAK_NZ */
+ r600_break_from_byte_stream(ctx, &alu,
+ CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_PRED_SETNE));
+ break;
}
return bytes_read;
unsigned char * bytes, unsigned bytes_read)
{
struct r600_bytecode_vtx vtx;
+
+ uint32_t word0 = i32_from_byte_stream(bytes, &bytes_read);
+ uint32_t word1 = i32_from_byte_stream(bytes, &bytes_read);
+ uint32_t word2 = i32_from_byte_stream(bytes, &bytes_read);
+
memset(&vtx, 0, sizeof(vtx));
- vtx.inst = bytes[bytes_read++];
- vtx.fetch_type = bytes[bytes_read++];
- vtx.buffer_id = bytes[bytes_read++];
- vtx.src_gpr = bytes[bytes_read++];
- vtx.src_sel_x = bytes[bytes_read++];
- vtx.mega_fetch_count = bytes[bytes_read++];
- vtx.dst_gpr = bytes[bytes_read++];
- vtx.dst_sel_x = bytes[bytes_read++];
- vtx.dst_sel_y = bytes[bytes_read++];
- vtx.dst_sel_z = bytes[bytes_read++];
- vtx.dst_sel_w = bytes[bytes_read++];
- vtx.use_const_fields = bytes[bytes_read++];
- vtx.data_format = bytes[bytes_read++];
- vtx.num_format_all = bytes[bytes_read++];
- vtx.format_comp_all = bytes[bytes_read++];
- vtx.srf_mode_all = bytes[bytes_read++];
- /* offset is 2 bytes wide */
- vtx.offset = bytes[bytes_read++];
- vtx.offset |= (bytes[bytes_read++] << 8);
- vtx.endian = bytes[bytes_read++];
+
+ /* WORD0 */
+ vtx.inst = G_SQ_VTX_WORD0_VTX_INST(word0);
+ vtx.fetch_type = G_SQ_VTX_WORD0_FETCH_TYPE(word0);
+ vtx.buffer_id = G_SQ_VTX_WORD0_BUFFER_ID(word0);
+ vtx.src_gpr = G_SQ_VTX_WORD0_SRC_GPR(word0);
+ vtx.src_sel_x = G_SQ_VTX_WORD0_SRC_SEL_X(word0);
+ vtx.mega_fetch_count = G_SQ_VTX_WORD0_MEGA_FETCH_COUNT(word0);
+
+ /* WORD1 */
+ vtx.dst_gpr = G_SQ_VTX_WORD1_GPR_DST_GPR(word1);
+ vtx.dst_sel_x = G_SQ_VTX_WORD1_DST_SEL_X(word1);
+ vtx.dst_sel_y = G_SQ_VTX_WORD1_DST_SEL_Y(word1);
+ vtx.dst_sel_z = G_SQ_VTX_WORD1_DST_SEL_Z(word1);
+ vtx.dst_sel_w = G_SQ_VTX_WORD1_DST_SEL_W(word1);
+ vtx.use_const_fields = G_SQ_VTX_WORD1_USE_CONST_FIELDS(word1);
+ vtx.data_format = G_SQ_VTX_WORD1_DATA_FORMAT(word1);
+ vtx.num_format_all = G_SQ_VTX_WORD1_NUM_FORMAT_ALL(word1);
+ vtx.format_comp_all = G_SQ_VTX_WORD1_FORMAT_COMP_ALL(word1);
+ vtx.srf_mode_all = G_SQ_VTX_WORD1_SRF_MODE_ALL(word1);
+
+ /* WORD 2*/
+ vtx.offset = G_SQ_VTX_WORD2_OFFSET(word2);
+ vtx.endian = G_SQ_VTX_WORD2_ENDIAN_SWAP(word2);
if (r600_bytecode_add_vtx(ctx->bc, &vtx)) {
fprintf(stderr, "Error adding vtx\n");
return bytes_read;
}
+static int r600_export_from_byte_stream(struct r600_shader_ctx *ctx,
+ unsigned char * bytes, unsigned bytes_read)
+{
+ struct r600_bytecode_output output;
+ memset(&output, 0, sizeof(struct r600_bytecode_output));
+ uint32_t word0 = i32_from_byte_stream(bytes, &bytes_read);
+ uint32_t word1 = i32_from_byte_stream(bytes, &bytes_read);
+ if (ctx->bc->chip_class >= EVERGREEN)
+ eg_bytecode_export_read(&output, word0,word1);
+ else
+ r600_bytecode_export_read(&output, word0,word1);
+ r600_bytecode_add_output(ctx->bc, &output);
+ return bytes_read;
+}
+
static void r600_bytecode_from_byte_stream(struct r600_shader_ctx *ctx,
unsigned char * bytes, unsigned num_bytes)
{
bytes_read = r600_vtx_from_byte_stream(ctx, bytes,
bytes_read);
break;
+ case 5:
+ bytes_read = r600_export_from_byte_stream(ctx, bytes,
+ bytes_read);
+ break;
default:
/* XXX: Error here */
break;
if (ctx->shader->input[index].spi_sid) {
ctx->shader->input[index].lds_pos = ctx->shader->nlds++;
- if (ctx->shader->input[index].interpolate > 0) {
- r = evergreen_interp_alu(ctx, index);
- } else {
- r = evergreen_interp_flat(ctx, index);
+ if (!ctx->use_llvm) {
+ if (ctx->shader->input[index].interpolate > 0) {
+ r = evergreen_interp_alu(ctx, index);
+ } else {
+ r = evergreen_interp_flat(ctx, index);
+ }
}
}
return r;
ctx->cv_output = i;
break;
}
+ } else if (ctx->type == TGSI_PROCESSOR_FRAGMENT) {
+ switch (d->Semantic.Name) {
+ case TGSI_SEMANTIC_COLOR:
+ ctx->shader->nr_ps_max_color_exports++;
+ break;
+ }
}
break;
case TGSI_FILE_CONSTANT:
{
int i, r, count = ctx->shader->ninput;
- /* additional inputs will be allocated right after the existing inputs,
- * we won't need them after the color selection, so we don't need to
- * reserve these gprs for the rest of the shader code and to adjust
- * output offsets etc. */
- int gpr = ctx->file_offset[TGSI_FILE_INPUT] +
- ctx->info.file_max[TGSI_FILE_INPUT] + 1;
-
- if (ctx->face_gpr == -1) {
- i = ctx->shader->ninput++;
- ctx->shader->input[i].name = TGSI_SEMANTIC_FACE;
- ctx->shader->input[i].spi_sid = 0;
- ctx->shader->input[i].gpr = gpr++;
- ctx->face_gpr = ctx->shader->input[i].gpr;
- }
-
for (i = 0; i < count; i++) {
if (ctx->shader->input[i].name == TGSI_SEMANTIC_COLOR) {
- int ni = ctx->shader->ninput++;
- memcpy(&ctx->shader->input[ni],&ctx->shader->input[i], sizeof(struct r600_shader_io));
- ctx->shader->input[ni].name = TGSI_SEMANTIC_BCOLOR;
- ctx->shader->input[ni].spi_sid = r600_spi_sid(&ctx->shader->input[ni]);
- ctx->shader->input[ni].gpr = gpr++;
-
+ unsigned back_facing_reg = ctx->shader->input[i].potential_back_facing_reg;
if (ctx->bc->chip_class >= EVERGREEN) {
- r = evergreen_interp_input(ctx, ni);
+ if ((r = evergreen_interp_input(ctx, back_facing_reg)))
+ return r;
+ }
+
+ if (!ctx->use_llvm) {
+ r = select_twoside_color(ctx, i, back_facing_reg);
if (r)
return r;
}
-
- r = select_twoside_color(ctx, i, ni);
- if (r)
- return r;
}
}
return 0;
}
-static int r600_shader_from_tgsi(struct r600_context * rctx, struct r600_pipe_shader *pipeshader)
+static int r600_shader_from_tgsi(struct r600_screen *rscreen,
+ struct r600_pipe_shader *pipeshader,
+ struct r600_shader_key key)
{
struct r600_shader *shader = &pipeshader->shader;
- struct tgsi_token *tokens = pipeshader->tokens;
- struct pipe_stream_output_info so = pipeshader->so;
+ struct tgsi_token *tokens = pipeshader->selector->tokens;
+ struct pipe_stream_output_info so = pipeshader->selector->so;
struct tgsi_full_immediate *immediate;
struct tgsi_full_property *property;
struct r600_shader_ctx ctx;
#endif
ctx.bc = &shader->bc;
ctx.shader = shader;
- ctx.native_integers = (rctx->screen->glsl_feature_level >= 130);
+ ctx.native_integers = true;
- r600_bytecode_init(ctx.bc, rctx->chip_class, rctx->family);
+ r600_bytecode_init(ctx.bc, rscreen->chip_class, rscreen->family,
+ rscreen->msaa_texture_support);
ctx.tokens = tokens;
tgsi_scan_shader(tokens, &ctx.info);
tgsi_parse_init(&ctx.parse, tokens);
ctx.colors_used = 0;
ctx.clip_vertex_write = 0;
- shader->two_side = (ctx.type == TGSI_PROCESSOR_FRAGMENT) && rctx->two_side;
- shader->nr_cbufs = rctx->nr_cbufs;
+ shader->nr_ps_color_exports = 0;
+ shader->nr_ps_max_color_exports = 0;
+
+ shader->two_side = key.color_two_side;
/* register allocations */
/* Values [0,127] correspond to GPR[0..127].
ctx.file_offset[TGSI_FILE_INPUT] = evergreen_gpr_count(&ctx);
}
- /* LLVM backend setup */
#ifdef R600_USE_LLVM
if (use_llvm && ctx.info.indirect_files) {
fprintf(stderr, "Warning: R600 LLVM backend does not support "
"backend.\n");
use_llvm = 0;
}
- if (use_llvm) {
- struct radeon_llvm_context radeon_llvm_ctx;
- LLVMModuleRef mod;
- unsigned dump = 0;
- memset(&radeon_llvm_ctx, 0, sizeof(radeon_llvm_ctx));
- radeon_llvm_ctx.reserved_reg_count = ctx.file_offset[TGSI_FILE_INPUT];
- mod = r600_tgsi_llvm(&radeon_llvm_ctx, tokens);
- if (debug_get_bool_option("R600_DUMP_SHADERS", FALSE)) {
- dump = 1;
- }
- if (r600_llvm_compile(mod, &inst_bytes, &inst_byte_count,
- rctx->family, dump)) {
- FREE(inst_bytes);
- radeon_llvm_dispose(&radeon_llvm_ctx);
- use_llvm = 0;
- fprintf(stderr, "R600 LLVM backend failed to compile "
- "shader. Falling back to TGSI\n");
- } else {
- ctx.file_offset[TGSI_FILE_OUTPUT] =
- ctx.file_offset[TGSI_FILE_INPUT];
- }
- radeon_llvm_dispose(&radeon_llvm_ctx);
- }
#endif
- /* End of LLVM backend setup */
+ ctx.use_llvm = use_llvm;
- if (!use_llvm) {
+ if (use_llvm) {
ctx.file_offset[TGSI_FILE_OUTPUT] =
+ ctx.file_offset[TGSI_FILE_INPUT];
+ } else {
+ ctx.file_offset[TGSI_FILE_OUTPUT] =
ctx.file_offset[TGSI_FILE_INPUT] +
ctx.info.file_max[TGSI_FILE_INPUT] + 1;
}
shader->fs_write_all = TRUE;
break;
case TGSI_PROPERTY_VS_PROHIBIT_UCPS:
- if (property->u[0].Data == 1)
- shader->vs_prohibit_ucps = TRUE;
+ /* we don't need this one */
break;
}
break;
goto out_err;
}
}
+
+ /* Process two side if needed */
+ if (shader->two_side && ctx.colors_used) {
+ int i, count = ctx.shader->ninput;
+ unsigned next_lds_loc = ctx.shader->nlds;
+
+ /* additional inputs will be allocated right after the existing inputs,
+ * we won't need them after the color selection, so we don't need to
+ * reserve these gprs for the rest of the shader code and to adjust
+ * output offsets etc. */
+ int gpr = ctx.file_offset[TGSI_FILE_INPUT] +
+ ctx.info.file_max[TGSI_FILE_INPUT] + 1;
+
+ if (ctx.face_gpr == -1) {
+ i = ctx.shader->ninput++;
+ ctx.shader->input[i].name = TGSI_SEMANTIC_FACE;
+ ctx.shader->input[i].spi_sid = 0;
+ ctx.shader->input[i].gpr = gpr++;
+ ctx.face_gpr = ctx.shader->input[i].gpr;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (ctx.shader->input[i].name == TGSI_SEMANTIC_COLOR) {
+ int ni = ctx.shader->ninput++;
+ memcpy(&ctx.shader->input[ni],&ctx.shader->input[i], sizeof(struct r600_shader_io));
+ ctx.shader->input[ni].name = TGSI_SEMANTIC_BCOLOR;
+ ctx.shader->input[ni].spi_sid = r600_spi_sid(&ctx.shader->input[ni]);
+ ctx.shader->input[ni].gpr = gpr++;
+ // TGSI to LLVM needs to know the lds position of inputs.
+ // Non LLVM path computes it later (in process_twoside_color)
+ ctx.shader->input[ni].lds_pos = next_lds_loc++;
+ ctx.shader->input[i].potential_back_facing_reg = ni;
+ }
+ }
+ }
- if (ctx.fragcoord_input >= 0) {
+/* LLVM backend setup */
+#ifdef R600_USE_LLVM
+ if (use_llvm) {
+ struct radeon_llvm_context radeon_llvm_ctx;
+ LLVMModuleRef mod;
+ unsigned dump = 0;
+ memset(&radeon_llvm_ctx, 0, sizeof(radeon_llvm_ctx));
+ radeon_llvm_ctx.reserved_reg_count = ctx.file_offset[TGSI_FILE_INPUT];
+ radeon_llvm_ctx.type = ctx.type;
+ radeon_llvm_ctx.two_side = shader->two_side;
+ radeon_llvm_ctx.face_input = ctx.face_gpr;
+ radeon_llvm_ctx.r600_inputs = ctx.shader->input;
+ radeon_llvm_ctx.r600_outputs = ctx.shader->output;
+ radeon_llvm_ctx.color_buffer_count = MAX2(key.nr_cbufs , 1);
+ radeon_llvm_ctx.chip_class = ctx.bc->chip_class;
+ radeon_llvm_ctx.fs_color_all = shader->fs_write_all && (rscreen->chip_class >= EVERGREEN);
+ mod = r600_tgsi_llvm(&radeon_llvm_ctx, tokens);
+ if (debug_get_bool_option("R600_DUMP_SHADERS", FALSE)) {
+ dump = 1;
+ }
+ if (r600_llvm_compile(mod, &inst_bytes, &inst_byte_count,
+ rscreen->family, dump)) {
+ FREE(inst_bytes);
+ radeon_llvm_dispose(&radeon_llvm_ctx);
+ use_llvm = 0;
+ fprintf(stderr, "R600 LLVM backend failed to compile "
+ "shader. Falling back to TGSI\n");
+ } else {
+ ctx.file_offset[TGSI_FILE_OUTPUT] =
+ ctx.file_offset[TGSI_FILE_INPUT];
+ }
+ radeon_llvm_dispose(&radeon_llvm_ctx);
+ }
+#endif
+/* End of LLVM backend setup */
+
+ if (shader->fs_write_all && rscreen->chip_class >= EVERGREEN)
+ shader->nr_ps_max_color_exports = 8;
+
+ if (ctx.fragcoord_input >= 0 && !use_llvm) {
if (ctx.bc->chip_class == CAYMAN) {
for (j = 0 ; j < 4; j++) {
struct r600_bytecode_alu alu;
break;
case TGSI_PROCESSOR_FRAGMENT:
if (shader->output[i].name == TGSI_SEMANTIC_COLOR) {
+ /* never export more colors than the number of CBs */
+ if (next_pixel_base && next_pixel_base >= key.nr_cbufs) {
+ /* skip export */
+ j--;
+ continue;
+ }
+ output[j].swizzle_w = key.alpha_to_one ? 5 : 3;
output[j].array_base = next_pixel_base++;
output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
- if (shader->fs_write_all && (rctx->chip_class >= EVERGREEN)) {
- for (k = 1; k < shader->nr_cbufs; k++) {
+ shader->nr_ps_color_exports++;
+ if (shader->fs_write_all && (rscreen->chip_class >= EVERGREEN)) {
+ for (k = 1; k < key.nr_cbufs; k++) {
j++;
memset(&output[j], 0, sizeof(struct r600_bytecode_output));
output[j].gpr = shader->output[i].gpr;
output[j].swizzle_x = 0;
output[j].swizzle_y = 1;
output[j].swizzle_z = 2;
- output[j].swizzle_w = 3;
+ output[j].swizzle_w = key.alpha_to_one ? 5 : 3;
output[j].burst_count = 1;
output[j].barrier = 1;
output[j].array_base = next_pixel_base++;
output[j].inst = BC_INST(ctx.bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT);
output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
+ shader->nr_ps_color_exports++;
}
}
} else if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
}
/* add fake pixel export */
- if (ctx.type == TGSI_PROCESSOR_FRAGMENT && j == 0) {
+ if (ctx.type == TGSI_PROCESSOR_FRAGMENT && next_pixel_base == 0) {
memset(&output[j], 0, sizeof(struct r600_bytecode_output));
output[j].gpr = 0;
output[j].elem_size = 3;
}
}
/* add output to bytecode */
- for (i = 0; i < noutput; i++) {
- r = r600_bytecode_add_output(ctx.bc, &output[i]);
- if (r)
- goto out_err;
+ if (!use_llvm || ctx.type != TGSI_PROCESSOR_FRAGMENT) {
+ for (i = 0; i < noutput; i++) {
+ r = r600_bytecode_add_output(ctx.bc, &output[i]);
+ if (r)
+ goto out_err;
+ }
}
/* add program end */
if (ctx.bc->chip_class == CAYMAN)
alu.inst = ctx->inst_info->r600_opcode;
for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
r600_bytecode_src(&alu.src[j], &ctx->src[j], 0);
+
+ /* RSQ should take the absolute value of src */
+ if (ctx->inst_info->tgsi_opcode == TGSI_OPCODE_RSQ) {
+ r600_bytecode_src_set_abs(&alu.src[j]);
+ }
}
tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1;
alu.src[0].sel = ctx->temp_reg;
alu.src[0].chan = i;
- if (i == last_inst)
+ if (i == last_inst || alu.inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_UINT)
alu.last = 1;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
unsigned src_gpr;
int r, i, j;
int opcode;
+ bool read_compressed_msaa = ctx->bc->msaa_texture_mode == MSAA_TEXTURE_COMPRESSED &&
+ inst->Instruction.Opcode == TGSI_OPCODE_TXF &&
+ (inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ||
+ inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY_MSAA);
/* Texture fetch instructions can only use gprs as source.
* Also they cannot negate the source or take the absolute value */
- const boolean src_requires_loading = tgsi_tex_src_requires_loading(ctx, 0);
+ const boolean src_requires_loading = (inst->Instruction.Opcode != TGSI_OPCODE_TXQ_LZ &&
+ tgsi_tex_src_requires_loading(ctx, 0)) ||
+ read_compressed_msaa;
boolean src_loaded = FALSE;
- unsigned sampler_src_reg = 1;
+ unsigned sampler_src_reg = inst->Instruction.Opcode == TGSI_OPCODE_TXQ_LZ ? 0 : 1;
uint8_t offset_x = 0, offset_y = 0, offset_z = 0;
src_gpr = tgsi_tex_get_src_gpr(ctx, 0);
if ((inst->Texture.Texture == TGSI_TEXTURE_CUBE ||
inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE) &&
- inst->Instruction.Opcode != TGSI_OPCODE_TXQ) {
+ inst->Instruction.Opcode != TGSI_OPCODE_TXQ &&
+ inst->Instruction.Opcode != TGSI_OPCODE_TXQ_LZ) {
static const unsigned src0_swizzle[] = {2, 2, 0, 1};
static const unsigned src1_swizzle[] = {1, 0, 2, 2};
src_gpr = ctx->temp_reg;
}
+ /* Obtain the sample index for reading a compressed MSAA color texture.
+ * To read the FMASK, we use the ldfptr instruction, which tells us
+ * where the samples are stored.
+ * For uncompressed 8x MSAA surfaces, ldfptr should return 0x76543210,
+ * which is the identity mapping. Each nibble says which physical sample
+ * should be fetched to get that sample.
+ *
+ * Assume src.z contains the sample index. It should be modified like this:
+ * src.z = (ldfptr() >> (src.z * 4)) & 0xF;
+ * Then fetch the texel with src.
+ */
+ if (read_compressed_msaa) {
+ unsigned sample_chan = inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ? 3 : 4;
+ unsigned temp = r600_get_temp(ctx);
+ assert(src_loaded);
+
+ /* temp.w = ldfptr() */
+ memset(&tex, 0, sizeof(struct r600_bytecode_tex));
+ tex.inst = SQ_TEX_INST_LD;
+ tex.inst_mod = 1; /* to indicate this is ldfptr */
+ tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
+ tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
+ tex.src_gpr = src_gpr;
+ tex.dst_gpr = temp;
+ tex.dst_sel_x = 7; /* mask out these components */
+ tex.dst_sel_y = 7;
+ tex.dst_sel_z = 7;
+ tex.dst_sel_w = 0; /* store X */
+ tex.src_sel_x = 0;
+ tex.src_sel_y = 1;
+ tex.src_sel_z = 2;
+ tex.src_sel_w = 3;
+ tex.offset_x = offset_x;
+ tex.offset_y = offset_y;
+ tex.offset_z = offset_z;
+ r = r600_bytecode_add_tex(ctx->bc, &tex);
+ if (r)
+ return r;
+
+ /* temp.x = sample_index*4 */
+ if (ctx->bc->chip_class == CAYMAN) {
+ for (i = 0 ; i < 4; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.inst = ctx->inst_info->r600_opcode;
+ alu.src[0].sel = src_gpr;
+ alu.src[0].chan = sample_chan;
+ alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[1].value = 4;
+ alu.dst.sel = temp;
+ alu.dst.chan = i;
+ alu.dst.write = i == 0;
+ if (i == 3)
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ } else {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MULLO_INT);
+ alu.src[0].sel = src_gpr;
+ alu.src[0].chan = sample_chan;
+ alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[1].value = 4;
+ alu.dst.sel = temp;
+ alu.dst.chan = 0;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+
+ /* sample_index = temp.w >> temp.x */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT);
+ alu.src[0].sel = temp;
+ alu.src[0].chan = 3;
+ alu.src[1].sel = temp;
+ alu.src[1].chan = 0;
+ alu.dst.sel = src_gpr;
+ alu.dst.chan = sample_chan;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+
+ /* sample_index & 0xF */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_AND_INT);
+ alu.src[0].sel = src_gpr;
+ alu.src[0].chan = sample_chan;
+ alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[1].value = 0xF;
+ alu.dst.sel = src_gpr;
+ alu.dst.chan = sample_chan;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+#if 0
+ /* visualize the FMASK */
+ for (i = 0; i < 4; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT);
+ alu.src[0].sel = src_gpr;
+ alu.src[0].chan = sample_chan;
+ alu.dst.sel = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ return 0;
+#endif
+ }
+
opcode = ctx->inst_info->r600_opcode;
if (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
- if (src_loaded) {
+
+ if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ_LZ) {
+ tex.src_sel_x = 4;
+ tex.src_sel_y = 4;
+ tex.src_sel_z = 4;
+ tex.src_sel_w = 4;
+ } else if (src_loaded) {
tex.src_sel_x = 0;
tex.src_sel_y = 1;
tex.src_sel_z = 2;
alu.dst.sel = ctx->temp_reg;
alu.dst.chan = i;
- if (i == 0)
- alu.dst.write = 1;
- if (i == 2)
- alu.last = 1;
+ alu.dst.write = i == 0;
+ alu.last = i == 2;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
alu.inst = opcode;
- alu.predicate = 1;
+ alu.execute_mask = 1;
+ alu.update_pred = 1;
alu.dst.sel = ctx->temp_reg;
alu.dst.write = 1;
{
struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[fc_sp];
- sp->mid = (struct r600_bytecode_cf **)realloc((void *)sp->mid,
+ sp->mid = realloc((void *)sp->mid,
sizeof(struct r600_bytecode_cf *) * (sp->num_mid + 1));
sp->mid[sp->num_mid] = ctx->bc->cf_last;
sp->num_mid++;
static void fc_poplevel(struct r600_shader_ctx *ctx)
{
struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[ctx->bc->fc_sp];
- if (sp->mid) {
- free(sp->mid);
- sp->mid = NULL;
- }
+ free(sp->mid);
+ sp->mid = NULL;
sp->num_mid = 0;
sp->start = NULL;
sp->type = 0;
static int tgsi_bgnloop(struct r600_shader_ctx *ctx)
{
- r600_bytecode_add_cfinst(ctx->bc, CTX_INST(V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_NO_AL));
+ /* LOOP_START_DX10 ignores the LOOP_CONFIG* registers, so it is not
+ * limited to 4096 iterations, like the other LOOP_* instructions. */
+ r600_bytecode_add_cfinst(ctx->bc, CTX_INST(V_SQ_CF_WORD1_SQ_CF_INST_LOOP_START_DX10));
fc_pushlevel(ctx, FC_LOOP);
{TGSI_OPCODE_BGNSUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
{TGSI_OPCODE_ENDLOOP, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_endloop},
{TGSI_OPCODE_ENDSUB, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
+ {TGSI_OPCODE_TXQ_LZ, 0, SQ_TEX_INST_GET_TEXTURE_RESINFO, tgsi_tex},
/* gap */
- {103, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
{104, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
{105, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
{106, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
{TGSI_OPCODE_ISGE, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGE_INT, tgsi_op2},
{TGSI_OPCODE_ISHR, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT, tgsi_op2_trans},
{TGSI_OPCODE_ISLT, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SETGT_INT, tgsi_op2_swap},
- {TGSI_OPCODE_F2U, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_UINT, tgsi_op2},
+ {TGSI_OPCODE_F2U, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_UINT, tgsi_op2_trans},
{TGSI_OPCODE_U2F, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_UINT_TO_FLT, tgsi_op2_trans},
{TGSI_OPCODE_UADD, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT, tgsi_op2},
{TGSI_OPCODE_UDIV, 0, V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_udiv},
{TGSI_OPCODE_BGNSUB, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
{TGSI_OPCODE_ENDLOOP, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_endloop},
{TGSI_OPCODE_ENDSUB, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
+ {TGSI_OPCODE_TXQ_LZ, 0, SQ_TEX_INST_GET_TEXTURE_RESINFO, tgsi_tex},
/* gap */
- {103, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
{104, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
{105, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
{106, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
{TGSI_OPCODE_BGNSUB, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
{TGSI_OPCODE_ENDLOOP, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_endloop},
{TGSI_OPCODE_ENDSUB, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
+ {TGSI_OPCODE_TXQ_LZ, 0, SQ_TEX_INST_GET_TEXTURE_RESINFO, tgsi_tex},
/* gap */
- {103, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
{104, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
{105, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},
{106, 0, EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP, tgsi_unsupported},