if (shader->bo == NULL) {
return -ENOMEM;
}
- ptr = r600_buffer_map_sync_with_rings(&rctx->b, shader->bo, PIPE_TRANSFER_WRITE);
+ ptr = r600_buffer_map_sync_with_rings(
+ &rctx->b, shader->bo,
+ PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
if (R600_BIG_ENDIAN) {
for (i = 0; i < shader->shader.bc.ndw; ++i) {
ptr[i] = util_cpu_to_le32(shader->shader.bc.bytecode[i]);
bool dump = r600_can_dump_shader(&rctx->screen->b,
tgsi_get_processor_type(sel->tokens));
unsigned use_sb = !(rctx->screen->b.debug_flags & DBG_NO_SB);
- unsigned sb_disasm = use_sb || (rctx->screen->b.debug_flags & DBG_SB_DISASM);
+ unsigned sb_disasm;
unsigned export_shader;
shader->shader.bc.isa = rctx->isa;
}
use_sb &= (shader->shader.processor_type != PIPE_SHADER_TESS_CTRL);
use_sb &= (shader->shader.processor_type != PIPE_SHADER_TESS_EVAL);
+ use_sb &= (shader->shader.processor_type != PIPE_SHADER_COMPUTE);
/* disable SB for shaders using doubles */
use_sb &= !shader->shader.uses_doubles;
+ use_sb &= !shader->shader.uses_atomics;
+ use_sb &= !shader->shader.uses_images;
+ use_sb &= !shader->shader.uses_helper_invocation;
+
/* Check if the bytecode has already been built. */
if (!shader->shader.bc.bytecode) {
r = r600_bytecode_build(&shader->shader.bc);
}
}
+ sb_disasm = use_sb || (rctx->screen->b.debug_flags & DBG_SB_DISASM);
if (dump && !sb_disasm) {
fprintf(stderr, "--------------------------------------------------------------\n");
r600_bytecode_disasm(&shader->shader.bc);
r600_update_ps_state(ctx, shader);
}
break;
+ case PIPE_SHADER_COMPUTE:
+ evergreen_update_ls_state(ctx, shader);
+ break;
default:
r = -EINVAL;
goto error;
return r;
}
-void r600_pipe_shader_destroy(struct pipe_context *ctx, struct r600_pipe_shader *shader)
+void r600_pipe_shader_destroy(struct pipe_context *ctx UNUSED, struct r600_pipe_shader *shader)
{
r600_resource_reference(&shader->bo, NULL);
r600_bytecode_clear(&shader->shader.bc);
struct r600_shader_ctx {
struct tgsi_shader_info info;
+ struct tgsi_array_info *array_infos;
+ /* flag for each tgsi temp array if its been spilled or not */
+ bool *spilled_arrays;
struct tgsi_parse_context parse;
const struct tgsi_token *tokens;
unsigned type;
boolean clip_vertex_write;
unsigned cv_output;
unsigned edgeflag_output;
+ int helper_invoc_reg;
+ int cs_block_size_reg;
+ int cs_grid_size_reg;
+ bool cs_block_size_loaded, cs_grid_size_loaded;
int fragcoord_input;
- int native_integers;
int next_ring_offset;
int gs_out_ring_offset;
int gs_next_vertex;
struct r600_shader *gs_for_vs;
int gs_export_gpr_tregs[4];
+ int gs_rotated_input[2];
const struct pipe_stream_output_info *gs_stream_output_info;
unsigned enabled_stream_buffers_mask;
unsigned tess_input_info; /* temp with tess input offsets */
unsigned tess_output_info; /* temp with tess input offsets */
+ unsigned thread_id_gpr; /* temp with thread id calculated for images */
};
struct r600_shader_tgsi_instruction {
static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so, int stream, bool ind);
static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[], eg_shader_tgsi_instruction[], cm_shader_tgsi_instruction[];
static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx);
-static inline void callstack_push(struct r600_shader_ctx *ctx, unsigned reason);
+static inline int callstack_push(struct r600_shader_ctx *ctx, unsigned reason);
static void fc_pushlevel(struct r600_shader_ctx *ctx, int type);
static int tgsi_else(struct r600_shader_ctx *ctx);
static int tgsi_endif(struct r600_shader_ctx *ctx);
const struct r600_shader_src *shader_src,
unsigned chan);
static int do_lds_fetch_values(struct r600_shader_ctx *ctx, unsigned temp_reg,
- unsigned dst_reg);
+ unsigned dst_reg, unsigned mask);
+
+static bool ctx_needs_stack_workaround_8xx(struct r600_shader_ctx *ctx)
+{
+ if (ctx->bc->family == CHIP_HEMLOCK ||
+ ctx->bc->family == CHIP_CYPRESS ||
+ ctx->bc->family == CHIP_JUNIPER)
+ return false;
+ return true;
+}
static int tgsi_last_instruction(unsigned writemask)
{
if (i->Src[j].Register.Dimension) {
switch (i->Src[j].Register.File) {
case TGSI_FILE_CONSTANT:
+ case TGSI_FILE_HW_ATOMIC:
break;
case TGSI_FILE_INPUT:
if (ctx->type == PIPE_SHADER_GEOMETRY ||
int r;
/* validate this for other ops */
- assert(op == ALU_OP3_MULADD_UINT24);
+ assert(op == ALU_OP3_MULADD_UINT24 || op == ALU_OP3_CNDE_INT || op == ALU_OP3_BFE_UINT);
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
alu.op = op;
alu.src[0].sel = src0_sel;
return 0;
}
+static void choose_spill_arrays(struct r600_shader_ctx *ctx, int *regno, unsigned *scratch_space_needed)
+{
+ // pick largest array and spill it, repeat until the number of temps is under limit or we run out of arrays
+ unsigned n = ctx->info.array_max[TGSI_FILE_TEMPORARY];
+ unsigned narrays_left = n;
+ bool *spilled = ctx->spilled_arrays; // assumed calloc:ed
+
+ *scratch_space_needed = 0;
+ while (*regno > 124 && narrays_left) {
+ unsigned i;
+ unsigned largest = 0;
+ unsigned largest_index = 0;
+
+ for (i = 0; i < n; i++) {
+ unsigned size = ctx->array_infos[i].range.Last - ctx->array_infos[i].range.First + 1;
+ if (!spilled[i] && size > largest) {
+ largest = size;
+ largest_index = i;
+ }
+ }
+
+ spilled[largest_index] = true;
+ *regno -= largest;
+ *scratch_space_needed += largest;
+
+ narrays_left --;
+ }
+
+ if (narrays_left == 0) {
+ ctx->info.indirect_files &= ~(1 << TGSI_FILE_TEMPORARY);
+ }
+}
+
+/* Take spilled temp arrays into account when translating tgsi register
+ * indexes into r600 gprs if spilled is false, or scratch array offset if
+ * spilled is true */
+static int map_tgsi_reg_index_to_r600_gpr(struct r600_shader_ctx *ctx, unsigned tgsi_reg_index, bool *spilled)
+{
+ unsigned i;
+ unsigned spilled_size = 0;
+
+ for (i = 0; i < ctx->info.array_max[TGSI_FILE_TEMPORARY]; i++) {
+ if (tgsi_reg_index >= ctx->array_infos[i].range.First && tgsi_reg_index <= ctx->array_infos[i].range.Last) {
+ if (ctx->spilled_arrays[i]) {
+ /* vec4 index into spilled scratch memory */
+ *spilled = true;
+ return tgsi_reg_index - ctx->array_infos[i].range.First + spilled_size;
+ }
+ else {
+ /* regular GPR array */
+ *spilled = false;
+ return tgsi_reg_index - spilled_size + ctx->file_offset[TGSI_FILE_TEMPORARY];
+ }
+ }
+
+ if (tgsi_reg_index < ctx->array_infos[i].range.First)
+ break;
+ if (ctx->spilled_arrays[i]) {
+ spilled_size += ctx->array_infos[i].range.Last - ctx->array_infos[i].range.First + 1;
+ }
+ }
+
+ /* regular GPR index, minus the holes from spilled arrays */
+ *spilled = false;
+
+ return tgsi_reg_index - spilled_size + ctx->file_offset[TGSI_FILE_TEMPORARY];
+}
+
+/* look up spill area base offset and array size for a spilled temp array */
+static void get_spilled_array_base_and_size(struct r600_shader_ctx *ctx, unsigned tgsi_reg_index,
+ unsigned *array_base, unsigned *array_size)
+{
+ unsigned i;
+ unsigned offset = 0;
+
+ for (i = 0; i < ctx->info.array_max[TGSI_FILE_TEMPORARY]; i++) {
+ if (ctx->spilled_arrays[i]) {
+ unsigned size = ctx->array_infos[i].range.Last - ctx->array_infos[i].range.First + 1;
+
+ if (tgsi_reg_index >= ctx->array_infos[i].range.First && tgsi_reg_index <= ctx->array_infos[i].range.Last) {
+ *array_base = offset;
+ *array_size = size - 1; /* hw counts from 1 */
+
+ return;
+ }
+
+ offset += size;
+ }
+ }
+}
+
static int tgsi_declaration(struct r600_shader_ctx *ctx)
{
struct tgsi_full_declaration *d = &ctx->parse.FullToken.FullDeclaration;
ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]);
switch (d->Semantic.Name) {
case TGSI_SEMANTIC_CLIPDIST:
- ctx->shader->clip_dist_write |= d->Declaration.UsageMask <<
- ((d->Semantic.Index + j) << 2);
break;
case TGSI_SEMANTIC_PSIZE:
ctx->shader->vs_out_misc_write = 1;
case TGSI_FILE_TEMPORARY:
if (ctx->info.indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
if (d->Array.ArrayID) {
- r600_add_gpr_array(ctx->shader,
- ctx->file_offset[TGSI_FILE_TEMPORARY] +
- d->Range.First,
- d->Range.Last - d->Range.First + 1, 0x0F);
+ bool spilled;
+ unsigned idx = map_tgsi_reg_index_to_r600_gpr(ctx,
+ d->Range.First,
+ &spilled);
+
+ if (!spilled) {
+ r600_add_gpr_array(ctx->shader, idx,
+ d->Range.Last - d->Range.First + 1, 0x0F);
+ }
}
}
break;
case TGSI_FILE_SAMPLER:
case TGSI_FILE_SAMPLER_VIEW:
case TGSI_FILE_ADDRESS:
+ case TGSI_FILE_BUFFER:
+ case TGSI_FILE_IMAGE:
+ case TGSI_FILE_MEMORY:
+ break;
+
+ case TGSI_FILE_HW_ATOMIC:
+ i = ctx->shader->nhwatomic_ranges;
+ ctx->shader->atomics[i].start = d->Range.First;
+ ctx->shader->atomics[i].end = d->Range.Last;
+ ctx->shader->atomics[i].hw_idx = ctx->shader->atomic_base + ctx->shader->nhwatomic;
+ ctx->shader->atomics[i].array_id = d->Array.ArrayID;
+ ctx->shader->atomics[i].buffer_id = d->Dim.Index2D;
+ ctx->shader->nhwatomic_ranges++;
+ ctx->shader->nhwatomic += count;
break;
case TGSI_FILE_SYSTEM_VALUE:
d->Semantic.Name == TGSI_SEMANTIC_SAMPLEPOS) {
break; /* Already handled from allocate_system_value_inputs */
} else if (d->Semantic.Name == TGSI_SEMANTIC_INSTANCEID) {
- if (!ctx->native_integers) {
- struct r600_bytecode_alu alu;
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
-
- alu.op = ALU_OP1_INT_TO_FLT;
- alu.src[0].sel = 0;
- alu.src[0].chan = 3;
-
- alu.dst.sel = 0;
- alu.dst.chan = 3;
- alu.dst.write = 1;
- alu.last = 1;
-
- if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
- return r;
- }
break;
} else if (d->Semantic.Name == TGSI_SEMANTIC_VERTEXID)
break;
if (r)
return r;
- do_lds_fetch_values(ctx, temp_reg, dreg);
+ do_lds_fetch_values(ctx, temp_reg, dreg, 0xf);
}
else if (d->Semantic.Name == TGSI_SEMANTIC_TESSCOORD) {
/* MOV r1.x, r0.x;
{ false, &ctx->fixed_pt_position_gpr, TGSI_SEMANTIC_SAMPLEID, TGSI_SEMANTIC_SAMPLEPOS } /* SAMPLEID is in Fixed Point Position GPR.w */
};
- int i, k, num_regs = 0;
+ int num_regs = 0;
+ unsigned k, i;
if (tgsi_parse_init(&parse, ctx->tokens) != TGSI_PARSE_OK) {
return 0;
if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
location = TGSI_INTERPOLATE_LOC_CENTER;
- inputs[1].enabled = true; /* needs SAMPLEID */
} else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
location = TGSI_INTERPOLATE_LOC_CENTER;
/* Needs sample positions, currently those are always available */
tgsi_parse_free(&parse);
+ if (ctx->info.reads_samplemask &&
+ (ctx->info.uses_linear_sample || ctx->info.uses_persp_sample)) {
+ inputs[1].enabled = true;
+ }
+
+ if (ctx->bc->chip_class >= EVERGREEN) {
+ int num_baryc = 0;
+ /* assign gpr to each interpolator according to priority */
+ for (i = 0; i < ARRAY_SIZE(ctx->eg_interpolators); i++) {
+ if (ctx->eg_interpolators[i].enabled) {
+ ctx->eg_interpolators[i].ij_index = num_baryc;
+ num_baryc++;
+ }
+ }
+ num_baryc = (num_baryc + 1) >> 1;
+ gpr_offset += num_baryc;
+ }
+
for (i = 0; i < ARRAY_SIZE(inputs); i++) {
boolean enabled = inputs[i].enabled;
int *reg = inputs[i].reg;
* for evergreen we need to scan the shader to find the number of GPRs we need to
* reserve for interpolation and system values
*
- * we need to know if we are going to emit
- * any sample or centroid inputs
+ * we need to know if we are going to emit any sample or centroid inputs
* if perspective and linear are required
*/
static int evergreen_gpr_count(struct r600_shader_ctx *ctx)
{
unsigned i;
- int num_baryc;
- struct tgsi_parse_context parse;
memset(&ctx->eg_interpolators, 0, sizeof(ctx->eg_interpolators));
+ /*
+ * Could get this information from the shader info. But right now
+ * we interpolate all declared inputs, whereas the shader info will
+ * only contain the bits if the inputs are actually used, so it might
+ * not be safe...
+ */
for (i = 0; i < ctx->info.num_inputs; i++) {
int k;
/* skip position/face/mask/sampleid */
ctx->eg_interpolators[k].enabled = TRUE;
}
- if (tgsi_parse_init(&parse, ctx->tokens) != TGSI_PARSE_OK) {
- return 0;
- }
-
- /* need to scan shader for system values and interpolateAtSample/Offset/Centroid */
- while (!tgsi_parse_end_of_tokens(&parse)) {
- tgsi_parse_token(&parse);
-
- if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_INSTRUCTION) {
- const struct tgsi_full_instruction *inst = &parse.FullToken.FullInstruction;
- if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE ||
- inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
- inst->Instruction.Opcode == TGSI_OPCODE_INTERP_CENTROID)
- {
- int interpolate, location, k;
-
- if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
- location = TGSI_INTERPOLATE_LOC_CENTER;
- } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
- location = TGSI_INTERPOLATE_LOC_CENTER;
- } else {
- location = TGSI_INTERPOLATE_LOC_CENTROID;
- }
-
- interpolate = ctx->info.input_interpolate[inst->Src[0].Register.Index];
- k = eg_get_interpolator_index(interpolate, location);
- if (k >= 0)
- ctx->eg_interpolators[k].enabled = true;
- }
- }
- }
-
- tgsi_parse_free(&parse);
-
- /* assign gpr to each interpolator according to priority */
- num_baryc = 0;
- for (i = 0; i < ARRAY_SIZE(ctx->eg_interpolators); i++) {
- if (ctx->eg_interpolators[i].enabled) {
- ctx->eg_interpolators[i].ij_index = num_baryc;
- num_baryc ++;
- }
- }
-
/* XXX PULL MODEL and LINE STIPPLE */
- num_baryc = (num_baryc + 1) >> 1;
- return allocate_system_value_inputs(ctx, num_baryc);
+ return allocate_system_value_inputs(ctx, 0);
}
/* sample_id_sel == NULL means fetch for current sample */
struct r600_bytecode_vtx vtx;
int r, t1;
- assert(ctx->fixed_pt_position_gpr != -1);
-
t1 = r600_get_temp(ctx);
memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER;
vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
if (sample_id == NULL) {
+ assert(ctx->fixed_pt_position_gpr != -1);
+
vtx.src_gpr = ctx->fixed_pt_position_gpr; // SAMPLEID is in .w;
vtx.src_sel_x = 3;
}
vtx.num_format_all = 2;
vtx.format_comp_all = 1;
vtx.use_const_fields = 0;
- vtx.offset = 1; // first element is size of buffer
+ vtx.offset = 0;
+ vtx.endian = r600_endian_swap(32);
+ vtx.srf_mode_all = 1; /* SRF_MODE_NO_ZERO */
+
+ r = r600_bytecode_add_vtx(ctx->bc, &vtx);
+ if (r)
+ return r;
+
+ return t1;
+}
+
+static int eg_load_helper_invocation(struct r600_shader_ctx *ctx)
+{
+ int r;
+ struct r600_bytecode_alu alu;
+
+ /* do a vtx fetch with wqm set on the vtx fetch */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ alu.dst.sel = ctx->helper_invoc_reg;
+ alu.dst.chan = 0;
+ alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[0].value = 0xffffffff;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+
+ /* do a vtx fetch in VPM mode */
+ struct r600_bytecode_vtx vtx;
+ memset(&vtx, 0, sizeof(vtx));
+ vtx.op = FETCH_OP_GET_BUFFER_RESINFO;
+ vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER;
+ vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
+ vtx.src_gpr = 0;
+ vtx.mega_fetch_count = 16; /* no idea here really... */
+ vtx.dst_gpr = ctx->helper_invoc_reg;
+ vtx.dst_sel_x = 4;
+ vtx.dst_sel_y = 7; /* SEL_Y */
+ vtx.dst_sel_z = 7; /* SEL_Z */
+ vtx.dst_sel_w = 7; /* SEL_W */
+ vtx.data_format = FMT_32;
+ if ((r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx)))
+ return r;
+ ctx->bc->cf_last->vpm = 1;
+ return 0;
+}
+
+static int cm_load_helper_invocation(struct r600_shader_ctx *ctx)
+{
+ int r;
+ struct r600_bytecode_alu alu;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ alu.dst.sel = ctx->helper_invoc_reg;
+ alu.dst.chan = 0;
+ alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[0].value = 0xffffffff;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ alu.dst.sel = ctx->helper_invoc_reg;
+ alu.dst.chan = 0;
+ alu.src[0].sel = V_SQ_ALU_SRC_0;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu_type(ctx->bc, &alu, CF_OP_ALU_VALID_PIXEL_MODE);
+ if (r)
+ return r;
+
+ return ctx->helper_invoc_reg;
+}
+
+static int load_block_grid_size(struct r600_shader_ctx *ctx, bool load_block)
+{
+ struct r600_bytecode_vtx vtx;
+ int r, t1;
+
+ if (ctx->cs_block_size_loaded)
+ return ctx->cs_block_size_reg;
+ if (ctx->cs_grid_size_loaded)
+ return ctx->cs_grid_size_reg;
+
+ t1 = load_block ? ctx->cs_block_size_reg : ctx->cs_grid_size_reg;
+ struct r600_bytecode_alu alu;
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ alu.src[0].sel = V_SQ_ALU_SRC_0;
+ alu.dst.sel = t1;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+
+ memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
+ vtx.op = FETCH_OP_VFETCH;
+ vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER;
+ vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
+ vtx.src_gpr = t1;
+ vtx.src_sel_x = 0;
+
+ vtx.mega_fetch_count = 16;
+ vtx.dst_gpr = t1;
+ vtx.dst_sel_x = 0;
+ vtx.dst_sel_y = 1;
+ vtx.dst_sel_z = 2;
+ vtx.dst_sel_w = 7;
+ vtx.data_format = FMT_32_32_32_32;
+ vtx.num_format_all = 1;
+ vtx.format_comp_all = 0;
+ vtx.use_const_fields = 0;
+ vtx.offset = load_block ? 0 : 16; // first element is size of buffer
vtx.endian = r600_endian_swap(32);
vtx.srf_mode_all = 1; /* SRF_MODE_NO_ZERO */
if (r)
return r;
+ if (load_block)
+ ctx->cs_block_size_loaded = true;
+ else
+ ctx->cs_grid_size_loaded = true;
return t1;
}
r600_src->neg = tgsi_src->Register.Negate;
r600_src->abs = tgsi_src->Register.Absolute;
+ if (tgsi_src->Register.File == TGSI_FILE_TEMPORARY) {
+ bool spilled;
+ unsigned idx;
+
+ idx = map_tgsi_reg_index_to_r600_gpr(ctx, tgsi_src->Register.Index, &spilled);
+
+ if (spilled) {
+ int reg = r600_get_temp(ctx);
+ int r;
+
+ r600_src->sel = reg;
+
+ if (ctx->bc->chip_class < R700) {
+ struct r600_bytecode_output cf;
+
+ memset(&cf, 0, sizeof(struct r600_bytecode_output));
+ cf.op = CF_OP_MEM_SCRATCH;
+ cf.elem_size = 3;
+ cf.gpr = reg;
+ cf.comp_mask = 0xF;
+ cf.swizzle_x = 0;
+ cf.swizzle_y = 1;
+ cf.swizzle_z = 2;
+ cf.swizzle_w = 3;
+ cf.burst_count = 1;
+
+ get_spilled_array_base_and_size(ctx, tgsi_src->Register.Index,
+ &cf.array_base, &cf.array_size);
+
+ if (tgsi_src->Register.Indirect) {
+ cf.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_READ_IND;
+ cf.index_gpr = ctx->bc->ar_reg;
+ }
+ else {
+ cf.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_READ;
+ cf.array_base += idx;
+ cf.array_size = 0;
+ }
+
+ r = r600_bytecode_add_output(ctx->bc, &cf);
+ }
+ else {
+ struct r600_bytecode_vtx vtx;
+
+ if (r600_bytecode_get_need_wait_ack(ctx->bc)) {
+ r600_bytecode_need_wait_ack(ctx->bc, false);
+ r = r600_bytecode_add_cfinst(ctx->bc, CF_OP_WAIT_ACK);
+ }
+
+ memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
+ vtx.op = FETCH_OP_READ_SCRATCH;
+ vtx.dst_gpr = reg;
+ vtx.uncached = 1; // Must bypass cache since prior spill written in same invocation
+ vtx.elem_size = 3;
+ vtx.data_format = FMT_32_32_32_32;
+ vtx.num_format_all = V_038010_SQ_NUM_FORMAT_INT;
+ vtx.dst_sel_x = tgsi_src->Register.SwizzleX;
+ vtx.dst_sel_y = tgsi_src->Register.SwizzleY;
+ vtx.dst_sel_z = tgsi_src->Register.SwizzleZ;
+ vtx.dst_sel_w = tgsi_src->Register.SwizzleW;
+
+ get_spilled_array_base_and_size(ctx, tgsi_src->Register.Index,
+ &vtx.array_base, &vtx.array_size);
+
+ if (tgsi_src->Register.Indirect) {
+ vtx.indexed = 1;
+ vtx.src_gpr = ctx->bc->ar_reg;
+ }
+ else {
+ vtx.array_base += idx;
+ vtx.array_size = 0;
+ }
+
+ r = r600_bytecode_add_vtx(ctx->bc, &vtx);
+ }
+
+ if (r)
+ return;
+ }
+ else {
+ if (tgsi_src->Register.Indirect)
+ r600_src->rel = V_SQ_REL_RELATIVE;
+
+ r600_src->sel = idx;
+ }
+
+ return;
+ }
+
if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) {
int index;
if ((tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleY) &&
r600_src->swizzle[2] = 0;
r600_src->swizzle[3] = 0;
r600_src->sel = 0;
+ } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_THREAD_ID) {
+ r600_src->sel = 0;
+ } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_BLOCK_ID) {
+ r600_src->sel = 1;
} else if (ctx->type != PIPE_SHADER_TESS_CTRL && ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INVOCATIONID) {
r600_src->swizzle[0] = 3;
r600_src->swizzle[1] = 3;
} else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_TESSOUTER) {
r600_src->sel = 2;
} else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_VERTICESIN) {
- if (ctx->type == PIPE_SHADER_TESS_CTRL) {
- r600_src->sel = ctx->tess_input_info;
- r600_src->swizzle[0] = 2;
- r600_src->swizzle[1] = 2;
- r600_src->swizzle[2] = 2;
- r600_src->swizzle[3] = 2;
- } else {
- r600_src->sel = ctx->tess_input_info;
- r600_src->swizzle[0] = 3;
- r600_src->swizzle[1] = 3;
- r600_src->swizzle[2] = 3;
- r600_src->swizzle[3] = 3;
- }
+ r600_src->sel = ctx->tess_input_info;
+ r600_src->swizzle[0] = 2;
+ r600_src->swizzle[1] = 2;
+ r600_src->swizzle[2] = 2;
+ r600_src->swizzle[3] = 2;
} else if (ctx->type == PIPE_SHADER_TESS_CTRL && ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_PRIMID) {
r600_src->sel = 0;
r600_src->swizzle[0] = 0;
r600_src->swizzle[1] = 3;
r600_src->swizzle[2] = 3;
r600_src->swizzle[3] = 3;
+ } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_GRID_SIZE) {
+ r600_src->sel = load_block_grid_size(ctx, false);
+ } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_BLOCK_SIZE) {
+ r600_src->sel = load_block_grid_size(ctx, true);
+ } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_HELPER_INVOCATION) {
+ r600_src->sel = ctx->helper_invoc_reg;
+ r600_src->swizzle[0] = 0;
+ r600_src->swizzle[1] = 0;
+ r600_src->swizzle[2] = 0;
+ r600_src->swizzle[3] = 0;
}
} else {
if (tgsi_src->Register.Indirect)
int r;
unsigned index = src->Register.Index;
unsigned vtx_id = src->Dimension.Index;
- int offset_reg = vtx_id / 3;
+ int offset_reg = ctx->gs_rotated_input[vtx_id / 3];
int offset_chan = vtx_id % 3;
int t2 = 0;
/* offsets of per-vertex data in ESGS ring are passed to GS in R0.x, R0.y,
* R0.w, R1.x, R1.y, R1.z (it seems R0.z is used for PrimitiveID) */
- if (offset_reg == 0 && offset_chan == 2)
+ if (offset_reg == ctx->gs_rotated_input[0] && offset_chan == 2)
offset_chan = 3;
if (src->Dimension.Indirect || src->Register.Indirect)
for (i = 0; i < 3; i++) {
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
alu.op = ALU_OP1_MOV;
- alu.src[0].sel = 0;
+ alu.src[0].sel = ctx->gs_rotated_input[0];
alu.src[0].chan = i == 2 ? 3 : i;
alu.dst.sel = treg[i];
alu.dst.chan = 0;
}
static int do_lds_fetch_values(struct r600_shader_ctx *ctx, unsigned temp_reg,
- unsigned dst_reg)
+ unsigned dst_reg, unsigned mask)
{
struct r600_bytecode_alu alu;
- int r, i;
+ int r, i, lasti;
if ((ctx->bc->cf_last->ndw>>1) >= 0x60)
ctx->bc->force_add_cf = 1;
- for (i = 1; i < 4; i++) {
+
+ lasti = tgsi_last_instruction(mask);
+ for (i = 1; i <= lasti; i++) {
+ if (!(mask & (1 << i)))
+ continue;
+
r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
temp_reg, i,
temp_reg, 0,
if (r)
return r;
}
- for (i = 0; i < 4; i++) {
+ for (i = 0; i <= lasti; i++) {
+ if (!(mask & (1 << i)))
+ continue;
+
/* emit an LDS_READ_RET */
memset(&alu, 0, sizeof(alu));
alu.op = LDS_OP1_LDS_READ_RET;
if (r)
return r;
}
- for (i = 0; i < 4; i++) {
+ for (i = 0; i <= lasti; i++) {
+ if (!(mask & (1 << i)))
+ continue;
+
/* then read from LDS_OQ_A_POP */
memset(&alu, 0, sizeof(alu));
return 0;
}
+static int fetch_mask(struct tgsi_src_register *reg)
+{
+ int mask = 0;
+ mask |= 1 << reg->SwizzleX;
+ mask |= 1 << reg->SwizzleY;
+ mask |= 1 << reg->SwizzleZ;
+ mask |= 1 << reg->SwizzleW;
+ return mask;
+}
+
static int fetch_tes_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg)
{
int r;
if (r)
return r;
- r = do_lds_fetch_values(ctx, temp_reg, dst_reg);
+ r = do_lds_fetch_values(ctx, temp_reg, dst_reg, fetch_mask(&src->Register));
if (r)
return r;
return 0;
if (r)
return r;
- r = do_lds_fetch_values(ctx, temp_reg, dst_reg);
+ r = do_lds_fetch_values(ctx, temp_reg, dst_reg, fetch_mask(&src->Register));
if (r)
return r;
return 0;
if (r)
return r;
- r = do_lds_fetch_values(ctx, temp_reg, dst_reg);
+ r = do_lds_fetch_values(ctx, temp_reg, dst_reg, fetch_mask(&src->Register));
if (r)
return r;
return 0;
}
static int emit_streamout(struct r600_shader_ctx *ctx, struct pipe_stream_output_info *so,
- int stream, unsigned *stream_item_size)
+ int stream, unsigned *stream_item_size UNUSED)
{
unsigned so_gpr[PIPE_MAX_SHADER_OUTPUTS];
unsigned start_comp[PIPE_MAX_SHADER_OUTPUTS];
- int i, j, r;
+ int j, r;
+ unsigned i;
/* Sanity checking. */
if (so->num_outputs > PIPE_MAX_SO_OUTPUTS) {
for (i = 0; i < so->num_outputs; i++) {
struct r600_bytecode_output output;
- if (stream != -1 && stream != so->output[i].output_buffer)
+ if (stream != -1 && stream != so->output[i].stream)
continue;
memset(&output, 0, sizeof(struct r600_bytecode_output));
struct r600_shader_ctx ctx = {};
struct r600_shader *gs_shader = &gs->shader;
struct r600_pipe_shader *cshader;
- int ocnt = gs_shader->noutput;
+ unsigned ocnt = gs_shader->noutput;
struct r600_bytecode_alu alu;
struct r600_bytecode_vtx vtx;
struct r600_bytecode_output output;
struct r600_bytecode_cf *cf_jump, *cf_pop,
*last_exp_pos = NULL, *last_exp_param = NULL;
- int i, j, next_clip_pos = 61, next_param = 0;
+ int next_clip_pos = 61, next_param = 0;
+ unsigned i, j;
int ring;
bool only_ring_0 = true;
cshader = calloc(1, sizeof(struct r600_pipe_shader));
/* spi_sid is 0 for clipdistance outputs that were generated
* for clipvertex - we don't need to pass them to PS */
ctx.shader->clip_dist_write = gs->shader.clip_dist_write;
+ ctx.shader->cull_dist_write = gs->shader.cull_dist_write;
+ ctx.shader->cc_dist_mask = gs->shader.cc_dist_mask;
if (out->spi_sid) {
/* duplicate it as PARAM to pass to the pixel shader */
output.array_base = next_param++;
return 0;
}
-static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so, int stream, bool ind)
+static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so UNUSED, int stream, bool ind)
{
struct r600_bytecode_output output;
- int i, k, ring_offset;
+ int ring_offset;
+ unsigned i, k;
int effective_stream = stream == -1 ? 0 : stream;
int idx = 0;
static int emit_lds_vs_writes(struct r600_shader_ctx *ctx)
{
- int i, j, r;
+ int j, r;
int temp_reg;
+ unsigned i;
/* fetch tcs input values into input_vals */
ctx->tess_input_info = r600_get_temp(ctx);
}
static int r600_tess_factor_read(struct r600_shader_ctx *ctx,
- int output_idx)
+ int output_idx, int nc)
{
int param;
unsigned temp_reg = r600_get_temp(ctx);
if (r)
return r;
- r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
- temp_reg, 0,
- temp_reg, 0,
- V_SQ_ALU_SRC_LITERAL, param * 16);
- if (r)
- return r;
-
- do_lds_fetch_values(ctx, temp_reg, dreg);
- return 0;
-}
+ if (param) {
+ r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
+ temp_reg, 0,
+ temp_reg, 0,
+ V_SQ_ALU_SRC_LITERAL, param * 16);
+ if (r)
+ return r;
+ }
+
+ do_lds_fetch_values(ctx, temp_reg, dreg, ((1u << nc) - 1));
+ return 0;
+}
static int r600_emit_tess_factor(struct r600_shader_ctx *ctx)
{
- unsigned i;
int stride, outer_comps, inner_comps;
int tessinner_idx = -1, tessouter_idx = -1;
- int r;
+ int i, r;
+ unsigned j;
int temp_reg = r600_get_temp(ctx);
int treg[3] = {-1, -1, -1};
struct r600_bytecode_alu alu;
/* R0 is InvocationID, RelPatchID, PatchID, tf_base */
/* TF_WRITE takes index in R.x, value in R.y */
- for (i = 0; i < ctx->shader->noutput; i++) {
- if (ctx->shader->output[i].name == TGSI_SEMANTIC_TESSINNER)
- tessinner_idx = i;
- if (ctx->shader->output[i].name == TGSI_SEMANTIC_TESSOUTER)
- tessouter_idx = i;
+ for (j = 0; j < ctx->shader->noutput; j++) {
+ if (ctx->shader->output[j].name == TGSI_SEMANTIC_TESSINNER)
+ tessinner_idx = j;
+ if (ctx->shader->output[j].name == TGSI_SEMANTIC_TESSOUTER)
+ tessouter_idx = j;
}
if (tessouter_idx == -1)
return -1;
if (tessouter_idx != -1) {
- r = r600_tess_factor_read(ctx, tessouter_idx);
+ r = r600_tess_factor_read(ctx, tessouter_idx, outer_comps);
if (r)
return r;
}
if (tessinner_idx != -1) {
- r = r600_tess_factor_read(ctx, tessinner_idx);
+ r = r600_tess_factor_read(ctx, tessinner_idx, inner_comps);
if (r)
return r;
}
int out_idx = i >= outer_comps ? tessinner_idx : tessouter_idx;
int out_comp = i >= outer_comps ? i - outer_comps : i;
+ if (ctx->shader->tcs_prim_mode == PIPE_PRIM_LINES) {
+ if (out_comp == 1)
+ out_comp = 0;
+ else if (out_comp == 0)
+ out_comp = 1;
+ }
+
r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
treg[i / 2], (2 * (i % 2)),
temp_reg, 0,
return 0;
}
+/*
+ * We have to work out the thread ID for load and atomic
+ * operations, which store the returned value to an index
+ * in an intermediate buffer.
+ * The index is calculated by taking the thread id,
+ * calculated from the MBCNT instructions.
+ * Then the shader engine ID is multiplied by 256,
+ * and the wave id is added.
+ * Then the result is multipled by 64 and thread id is
+ * added.
+ */
+static int load_thread_id_gpr(struct r600_shader_ctx *ctx)
+{
+ struct r600_bytecode_alu alu;
+ int r;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MBCNT_32LO_ACCUM_PREV_INT;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = 0;
+ alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[0].value = 0xffffffff;
+ alu.dst.write = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MBCNT_32HI_INT;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = 1;
+ alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[0].value = 0xffffffff;
+ alu.dst.write = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP3_MULADD_UINT24;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = 2;
+ alu.src[0].sel = EG_V_SQ_ALU_SRC_SE_ID;
+ alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[1].value = 256;
+ alu.src[2].sel = EG_V_SQ_ALU_SRC_HW_WAVE_ID;
+ alu.dst.write = 1;
+ alu.is_op3 = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+
+ r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24,
+ ctx->thread_id_gpr, 1,
+ ctx->temp_reg, 2,
+ V_SQ_ALU_SRC_LITERAL, 0x40,
+ ctx->temp_reg, 0);
+ if (r)
+ return r;
+ return 0;
+}
+
static int r600_shader_from_tgsi(struct r600_context *rctx,
struct r600_pipe_shader *pipeshader,
union r600_shader_key key)
struct r600_bytecode_output output[ARRAY_SIZE(shader->output)];
unsigned output_done, noutput;
unsigned opcode;
- int i, j, k, r = 0;
+ int j, k, r = 0;
+ unsigned i;
int next_param_base = 0, next_clip_base;
int max_color_exports = MAX2(key.ps.nr_cbufs, 1);
bool indirect_gprs;
ctx.bc = &shader->bc;
ctx.shader = shader;
- ctx.native_integers = true;
r600_bytecode_init(ctx.bc, rscreen->b.chip_class, rscreen->b.family,
rscreen->has_compressed_msaa_texturing);
tgsi_scan_shader(tokens, &ctx.info);
shader->indirect_files = ctx.info.indirect_files;
+ int narrays = ctx.info.array_max[TGSI_FILE_TEMPORARY];
+ ctx.array_infos = calloc(narrays, sizeof(*ctx.array_infos));
+ ctx.spilled_arrays = calloc(narrays, sizeof(bool));
+ tgsi_scan_arrays(tokens, TGSI_FILE_TEMPORARY, narrays, ctx.array_infos);
+
+ shader->uses_helper_invocation = false;
shader->uses_doubles = ctx.info.uses_doubles;
+ shader->uses_atomics = ctx.info.file_mask[TGSI_FILE_HW_ATOMIC];
shader->nsys_inputs = 0;
+ shader->uses_images = ctx.info.file_count[TGSI_FILE_IMAGE] > 0 ||
+ ctx.info.file_count[TGSI_FILE_BUFFER] > 0;
indirect_gprs = ctx.info.indirect_files & ~((1 << TGSI_FILE_CONSTANT) | (1 << TGSI_FILE_SAMPLER));
tgsi_parse_init(&ctx.parse, tokens);
ctx.type = ctx.info.processor;
shader->vs_as_gs_a = key.vs.as_gs_a;
shader->vs_as_es = key.vs.as_es;
shader->vs_as_ls = key.vs.as_ls;
+ shader->atomic_base = key.vs.first_atomic_counter;
if (shader->vs_as_es)
ring_outputs = true;
if (shader->vs_as_ls)
break;
case PIPE_SHADER_GEOMETRY:
ring_outputs = true;
+ shader->atomic_base = key.gs.first_atomic_counter;
+ shader->gs_tri_strip_adj_fix = key.gs.tri_strip_adj_fix;
break;
case PIPE_SHADER_TESS_CTRL:
shader->tcs_prim_mode = key.tcs.prim_mode;
+ shader->atomic_base = key.tcs.first_atomic_counter;
lds_outputs = true;
lds_inputs = true;
break;
case PIPE_SHADER_TESS_EVAL:
shader->tes_as_es = key.tes.as_es;
+ shader->atomic_base = key.tes.first_atomic_counter;
lds_inputs = true;
if (shader->tes_as_es)
ring_outputs = true;
break;
case PIPE_SHADER_FRAGMENT:
shader->two_side = key.ps.color_two_side;
+ shader->atomic_base = key.ps.first_atomic_counter;
+ shader->rat_base = key.ps.nr_cbufs;
+ shader->image_size_const_offset = key.ps.image_size_const_offset;
+ break;
+ case PIPE_SHADER_COMPUTE:
+ shader->rat_base = 0;
+ shader->image_size_const_offset = ctx.info.file_count[TGSI_FILE_SAMPLER];
break;
default:
break;
ctx.gs_next_vertex = 0;
ctx.gs_stream_output_info = &so;
+ ctx.thread_id_gpr = -1;
ctx.face_gpr = -1;
ctx.fixed_pt_position_gpr = -1;
ctx.fragcoord_input = -1;
ctx.colors_used = 0;
ctx.clip_vertex_write = 0;
+ ctx.helper_invoc_reg = -1;
+ ctx.cs_block_size_reg = -1;
+ ctx.cs_grid_size_reg = -1;
+ ctx.cs_block_size_loaded = false;
+ ctx.cs_grid_size_loaded = false;
+
shader->nr_ps_color_exports = 0;
shader->nr_ps_max_color_exports = 0;
ctx.file_offset[i] = 0;
}
- if (ctx.type == PIPE_SHADER_VERTEX) {
+ if (ctx.type == PIPE_SHADER_VERTEX) {
+
ctx.file_offset[TGSI_FILE_INPUT] = 1;
- r600_bytecode_add_cfinst(ctx.bc, CF_OP_CALL_FS);
+ if (ctx.info.num_inputs)
+ r600_bytecode_add_cfinst(ctx.bc, CF_OP_CALL_FS);
}
if (ctx.type == PIPE_SHADER_FRAGMENT) {
if (ctx.bc->chip_class >= EVERGREEN)
ctx.file_offset[TGSI_FILE_INPUT] = evergreen_gpr_count(&ctx);
else
ctx.file_offset[TGSI_FILE_INPUT] = allocate_system_value_inputs(&ctx, ctx.file_offset[TGSI_FILE_INPUT]);
+
+ for (i = 0; i < PIPE_MAX_SHADER_INPUTS; i++) {
+ if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_HELPER_INVOCATION) {
+ ctx.helper_invoc_reg = ctx.file_offset[TGSI_FILE_INPUT]++;
+ shader->uses_helper_invocation = true;
+ }
+ }
}
if (ctx.type == PIPE_SHADER_GEOMETRY) {
/* FIXME 1 would be enough in some cases (3 or less input vertices) */
if (add_tess_inout)
ctx.file_offset[TGSI_FILE_INPUT]+=2;
}
+ if (ctx.type == PIPE_SHADER_COMPUTE) {
+ ctx.file_offset[TGSI_FILE_INPUT] = 2;
+ for (i = 0; i < PIPE_MAX_SHADER_INPUTS; i++) {
+ if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_GRID_SIZE)
+ ctx.cs_grid_size_reg = ctx.file_offset[TGSI_FILE_INPUT]++;
+ if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_BLOCK_SIZE)
+ ctx.cs_block_size_reg = ctx.file_offset[TGSI_FILE_INPUT]++;
+ }
+ }
ctx.file_offset[TGSI_FILE_OUTPUT] =
ctx.file_offset[TGSI_FILE_INPUT] +
/* Outside the GPR range. This will be translated to one of the
* kcache banks later. */
ctx.file_offset[TGSI_FILE_CONSTANT] = 512;
-
ctx.file_offset[TGSI_FILE_IMMEDIATE] = V_SQ_ALU_SRC_LITERAL;
- ctx.bc->ar_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] +
- ctx.info.file_max[TGSI_FILE_TEMPORARY] + 1;
- ctx.bc->index_reg[0] = ctx.bc->ar_reg + 1;
- ctx.bc->index_reg[1] = ctx.bc->ar_reg + 2;
+
+ pipeshader->scratch_space_needed = 0;
+ int regno = ctx.file_offset[TGSI_FILE_TEMPORARY] +
+ ctx.info.file_max[TGSI_FILE_TEMPORARY];
+ if (regno > 124) {
+ choose_spill_arrays(&ctx, ®no, &pipeshader->scratch_space_needed);
+ shader->indirect_files = ctx.info.indirect_files;
+ }
+ shader->needs_scratch_space = pipeshader->scratch_space_needed != 0;
+
+ ctx.bc->ar_reg = ++regno;
+ ctx.bc->index_reg[0] = ++regno;
+ ctx.bc->index_reg[1] = ++regno;
if (ctx.type == PIPE_SHADER_TESS_CTRL) {
- ctx.tess_input_info = ctx.bc->ar_reg + 3;
- ctx.tess_output_info = ctx.bc->ar_reg + 4;
- ctx.temp_reg = ctx.bc->ar_reg + 5;
+ ctx.tess_input_info = ++regno;
+ ctx.tess_output_info = ++regno;
} else if (ctx.type == PIPE_SHADER_TESS_EVAL) {
- ctx.tess_input_info = 0;
- ctx.tess_output_info = ctx.bc->ar_reg + 3;
- ctx.temp_reg = ctx.bc->ar_reg + 4;
+ ctx.tess_input_info = ++regno;
+ ctx.tess_output_info = ++regno;
} else if (ctx.type == PIPE_SHADER_GEOMETRY) {
- ctx.gs_export_gpr_tregs[0] = ctx.bc->ar_reg + 3;
- ctx.gs_export_gpr_tregs[1] = ctx.bc->ar_reg + 4;
- ctx.gs_export_gpr_tregs[2] = ctx.bc->ar_reg + 5;
- ctx.gs_export_gpr_tregs[3] = ctx.bc->ar_reg + 6;
- ctx.temp_reg = ctx.bc->ar_reg + 7;
- } else {
- ctx.temp_reg = ctx.bc->ar_reg + 3;
+ ctx.gs_export_gpr_tregs[0] = ++regno;
+ ctx.gs_export_gpr_tregs[1] = ++regno;
+ ctx.gs_export_gpr_tregs[2] = ++regno;
+ ctx.gs_export_gpr_tregs[3] = ++regno;
+ if (ctx.shader->gs_tri_strip_adj_fix) {
+ ctx.gs_rotated_input[0] = ++regno;
+ ctx.gs_rotated_input[1] = ++regno;
+ } else {
+ ctx.gs_rotated_input[0] = 0;
+ ctx.gs_rotated_input[1] = 1;
+ }
}
+ if (shader->uses_images) {
+ ctx.thread_id_gpr = ++regno;
+ }
+ ctx.temp_reg = ++regno;
+
shader->max_arrays = 0;
shader->num_arrays = 0;
if (indirect_gprs) {
ctx.nliterals = 0;
ctx.literals = NULL;
+ ctx.max_driver_temp_used = 0;
shader->fs_write_all = ctx.info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] &&
ctx.info.colors_written == 1;
shader->vs_position_window_space = ctx.info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION];
shader->ps_conservative_z = (uint8_t)ctx.info.properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT];
+ if (ctx.type == PIPE_SHADER_VERTEX ||
+ ctx.type == PIPE_SHADER_GEOMETRY ||
+ ctx.type == PIPE_SHADER_TESS_EVAL) {
+ shader->cc_dist_mask = (1 << (ctx.info.properties[TGSI_PROPERTY_NUM_CULLDIST_ENABLED] +
+ ctx.info.properties[TGSI_PROPERTY_NUM_CLIPDIST_ENABLED])) - 1;
+ shader->clip_dist_write = (1 << ctx.info.properties[TGSI_PROPERTY_NUM_CLIPDIST_ENABLED]) - 1;
+ shader->cull_dist_write = ((1 << ctx.info.properties[TGSI_PROPERTY_NUM_CULLDIST_ENABLED]) - 1) << ctx.info.properties[TGSI_PROPERTY_NUM_CLIPDIST_ENABLED];
+ }
+
if (shader->vs_as_gs_a)
vs_add_primid_output(&ctx, key.vs.prim_id_out);
+ if (ctx.thread_id_gpr != -1) {
+ r = load_thread_id_gpr(&ctx);
+ if (r)
+ return r;
+ }
+
if (ctx.type == PIPE_SHADER_TESS_EVAL)
r600_fetch_tess_io_info(&ctx);
if (shader->fs_write_all && rscreen->b.chip_class >= EVERGREEN)
shader->nr_ps_max_color_exports = 8;
+ if (ctx.shader->uses_helper_invocation) {
+ if (ctx.bc->chip_class == CAYMAN)
+ r = cm_load_helper_invocation(&ctx);
+ else
+ r = eg_load_helper_invocation(&ctx);
+ if (r)
+ return r;
+ }
+
+ /*
+ * XXX this relies on fixed_pt_position_gpr only being present when
+ * this shader should be executed per sample. Should be the case for now...
+ */
+ if (ctx.fixed_pt_position_gpr != -1 && ctx.info.reads_samplemask) {
+ /*
+ * Fix up sample mask. The hw always gives us coverage mask for
+ * the pixel. However, for per-sample shading, we need the
+ * coverage for the shader invocation only.
+ * Also, with disabled msaa, only the first bit should be set
+ * (luckily the same fixup works for both problems).
+ * For now, we can only do it if we know this shader is always
+ * executed per sample (due to usage of bits in the shader
+ * forcing per-sample execution).
+ * If the fb is not multisampled, we'd do unnecessary work but
+ * it should still be correct.
+ * It will however do nothing for sample shading according
+ * to MinSampleShading.
+ */
+ struct r600_bytecode_alu alu;
+ int tmp = r600_get_temp(&ctx);
+ assert(ctx.face_gpr != -1);
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+
+ alu.op = ALU_OP2_LSHL_INT;
+ alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[0].value = 0x1;
+ alu.src[1].sel = ctx.fixed_pt_position_gpr;
+ alu.src[1].chan = 3;
+ alu.dst.sel = tmp;
+ alu.dst.chan = 0;
+ alu.dst.write = 1;
+ alu.last = 1;
+ if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
+ return r;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_AND_INT;
+ alu.src[0].sel = tmp;
+ alu.src[1].sel = ctx.face_gpr;
+ alu.src[1].chan = 2;
+ alu.dst.sel = ctx.face_gpr;
+ alu.dst.chan = 2;
+ alu.dst.write = 1;
+ alu.last = 1;
+ if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
+ return r;
+ }
+
if (ctx.fragcoord_input >= 0) {
if (ctx.bc->chip_class == CAYMAN) {
for (j = 0 ; j < 4; j++) {
alu.dst.sel = shader->input[ctx.fragcoord_input].gpr;
alu.dst.chan = j;
alu.dst.write = (j == 3);
- alu.last = 1;
+ alu.last = (j == 3);
if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
return r;
}
if (r)
return r;
}
+
+ if (ctx.shader->gs_tri_strip_adj_fix) {
+ r = single_alu_op2(&ctx, ALU_OP2_AND_INT,
+ ctx.gs_rotated_input[0], 2,
+ 0, 2,
+ V_SQ_ALU_SRC_LITERAL, 1);
+ if (r)
+ return r;
+
+ for (i = 0; i < 6; i++) {
+ int rotated = (i + 4) % 6;
+ int offset_reg = i / 3;
+ int offset_chan = i % 3;
+ int rotated_offset_reg = rotated / 3;
+ int rotated_offset_chan = rotated % 3;
+
+ if (offset_reg == 0 && offset_chan == 2)
+ offset_chan = 3;
+ if (rotated_offset_reg == 0 && rotated_offset_chan == 2)
+ rotated_offset_chan = 3;
+
+ r = single_alu_op3(&ctx, ALU_OP3_CNDE_INT,
+ ctx.gs_rotated_input[offset_reg], offset_chan,
+ ctx.gs_rotated_input[0], 2,
+ offset_reg, offset_chan,
+ rotated_offset_reg, rotated_offset_chan);
+ if (r)
+ return r;
+ }
+ }
}
if (ctx.type == PIPE_SHADER_TESS_CTRL)
ctx.inst_info = &eg_shader_tgsi_instruction[opcode];
else
ctx.inst_info = &r600_shader_tgsi_instruction[opcode];
+
+ ctx.bc->precise |= ctx.parse.FullToken.FullInstruction.Instruction.Precise;
+
r = ctx.inst_info->process(&ctx);
if (r)
goto out_err;
shader->output[ctx.cv_output].spi_sid = 0;
shader->clip_dist_write = 0xFF;
+ shader->cc_dist_mask = 0xFF;
for (i = 0; i < 8; i++) {
int oreg = i >> 2;
output[j].swizzle_z = 2;
output[j].swizzle_w = 3;
output[j].burst_count = 1;
- output[j].type = -1;
+ output[j].type = 0xffffffff;
output[j].op = CF_OP_EXPORT;
switch (ctx.type) {
case PIPE_SHADER_VERTEX:
output[j].array_base = shader->output[i].sid;
output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
shader->nr_ps_color_exports++;
+ shader->ps_color_export_mask |= (0xf << (shader->output[i].sid * 4));
+
+ /* If the i-th target format is set, all previous target formats must
+ * be non-zero to avoid hangs. - from radeonsi, seems to apply to eg as well.
+ */
+ if (shader->output[i].sid > 0)
+ for (unsigned x = 0; x < shader->output[i].sid; x++)
+ shader->ps_color_export_mask |= (1 << (x*4));
+
+ if (shader->output[i].sid > shader->ps_export_highest)
+ shader->ps_export_highest = shader->output[i].sid;
if (shader->fs_write_all && (rscreen->b.chip_class >= EVERGREEN)) {
for (k = 1; k < max_color_exports; k++) {
j++;
output[j].op = CF_OP_EXPORT;
output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL;
shader->nr_ps_color_exports++;
+ if (k > shader->ps_export_highest)
+ shader->ps_export_highest = k;
+ shader->ps_color_export_mask |= (0xf << (j * 4));
}
}
} else if (shader->output[i].name == TGSI_SEMANTIC_POSITION) {
goto out_err;
}
- if (output[j].type==-1) {
+ if (output[j].type == 0xffffffff) {
output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM;
output[j].array_base = next_param_base++;
}
output[j].op = CF_OP_EXPORT;
j++;
shader->nr_ps_color_exports++;
+ shader->ps_color_export_mask = 0xf;
}
noutput = j;
/* set export done on last export of each type */
- for (i = noutput - 1, output_done = 0; i >= 0; i--) {
- if (!(output_done & (1 << output[i].type))) {
- output_done |= (1 << output[i].type);
- output[i].op = CF_OP_EXPORT_DONE;
+ for (k = noutput - 1, output_done = 0; k >= 0; k--) {
+ if (!(output_done & (1 << output[k].type))) {
+ output_done |= (1 << output[k].type);
+ output[k].op = CF_OP_EXPORT_DONE;
}
}
/* add output to bytecode */
last = r600_isa_cf(ctx.bc->cf_last->op);
/* alu clause instructions don't have EOP bit, so add NOP */
- if (!last || last->flags & CF_ALU || ctx.bc->cf_last->op == CF_OP_LOOP_END || ctx.bc->cf_last->op == CF_OP_CALL_FS || ctx.bc->cf_last->op == CF_OP_POP || ctx.bc->cf_last->op == CF_OP_GDS)
+ if (!last || last->flags & CF_ALU || ctx.bc->cf_last->op == CF_OP_LOOP_END || ctx.bc->cf_last->op == CF_OP_POP)
r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP);
ctx.bc->cf_last->end_of_program = 1;
return r;
}
+ free(ctx.spilled_arrays);
+ free(ctx.array_infos);
free(ctx.literals);
tgsi_parse_free(&ctx.parse);
return 0;
out_err:
+ free(ctx.spilled_arrays);
+ free(ctx.array_infos);
free(ctx.literals);
tgsi_parse_free(&ctx.parse);
return r;
return -EINVAL;
}
-static int tgsi_end(struct r600_shader_ctx *ctx)
+static int tgsi_end(struct r600_shader_ctx *ctx UNUSED)
{
return 0;
}
{
struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
- r600_dst->sel = tgsi_dst->Register.Index;
- r600_dst->sel += ctx->file_offset[tgsi_dst->Register.File];
+ if (tgsi_dst->Register.File == TGSI_FILE_TEMPORARY) {
+ bool spilled;
+ unsigned idx;
+
+ idx = map_tgsi_reg_index_to_r600_gpr(ctx, tgsi_dst->Register.Index, &spilled);
+
+ if (spilled) {
+ struct r600_bytecode_output cf;
+ int reg = 0;
+ int r;
+ bool add_pending_output = true;
+
+ memset(&cf, 0, sizeof(struct r600_bytecode_output));
+ get_spilled_array_base_and_size(ctx, tgsi_dst->Register.Index,
+ &cf.array_base, &cf.array_size);
+
+ /* If no component has spilled, reserve a register and add the spill code
+ * ctx->bc->n_pending_outputs is cleared after each instruction group */
+ if (ctx->bc->n_pending_outputs == 0) {
+ reg = r600_get_temp(ctx);
+ } else {
+ /* If we are already spilling and the output address is the same like
+ * before then just reuse the same slot */
+ struct r600_bytecode_output *tmpl = &ctx->bc->pending_outputs[ctx->bc->n_pending_outputs-1];
+ if ((cf.array_base + idx == tmpl->array_base) ||
+ (cf.array_base == tmpl->array_base &&
+ tmpl->index_gpr == ctx->bc->ar_reg &&
+ tgsi_dst->Register.Indirect)) {
+ reg = ctx->bc->pending_outputs[0].gpr;
+ add_pending_output = false;
+ } else {
+ reg = r600_get_temp(ctx);
+ }
+ }
+
+ r600_dst->sel = reg;
+ r600_dst->chan = swizzle;
+ r600_dst->write = 1;
+ if (inst->Instruction.Saturate) {
+ r600_dst->clamp = 1;
+ }
+
+ /* Add new outputs as pending */
+ if (add_pending_output) {
+ cf.op = CF_OP_MEM_SCRATCH;
+ cf.elem_size = 3;
+ cf.gpr = reg;
+ cf.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE;
+ cf.mark = 1;
+ cf.comp_mask = inst->Dst[0].Register.WriteMask;
+ cf.swizzle_x = 0;
+ cf.swizzle_y = 1;
+ cf.swizzle_z = 2;
+ cf.swizzle_w = 3;
+ cf.burst_count = 1;
+
+ if (tgsi_dst->Register.Indirect) {
+ if (ctx->bc->chip_class < R700)
+ cf.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
+ else
+ cf.type = 3; // V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND_ACK;
+ cf.index_gpr = ctx->bc->ar_reg;
+ }
+ else {
+ cf.array_base += idx;
+ cf.array_size = 0;
+ }
+
+ r = r600_bytecode_add_pending_output(ctx->bc, &cf);
+ if (r)
+ return;
+
+ if (ctx->bc->chip_class >= R700)
+ r600_bytecode_need_wait_ack(ctx->bc, true);
+ }
+ return;
+ }
+ else {
+ r600_dst->sel = idx;
+ }
+ }
+ else {
+ r600_dst->sel = tgsi_dst->Register.Index;
+ r600_dst->sel += ctx->file_offset[tgsi_dst->Register.File];
+ }
r600_dst->chan = swizzle;
r600_dst->write = 1;
if (inst->Instruction.Saturate) {
}
-static int tgsi_op2_64_params(struct r600_shader_ctx *ctx, bool singledest, bool swap)
+static int tgsi_op2_64_params(struct r600_shader_ctx *ctx, bool singledest, bool swap, int dest_temp, int op_override)
{
struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
unsigned write_mask = inst->Dst[0].Register.WriteMask;
struct r600_bytecode_alu alu;
int i, j, r, lasti = tgsi_last_instruction(write_mask);
int use_tmp = 0;
+ int swizzle_x = inst->Src[0].Register.SwizzleX;
if (singledest) {
switch (write_mask) {
case 0x1:
- write_mask = 0x3;
+ if (swizzle_x == 2) {
+ write_mask = 0xc;
+ use_tmp = 3;
+ } else
+ write_mask = 0x3;
break;
case 0x2:
- use_tmp = 1;
- write_mask = 0x3;
+ if (swizzle_x == 2) {
+ write_mask = 0xc;
+ use_tmp = 3;
+ } else {
+ write_mask = 0x3;
+ use_tmp = 1;
+ }
break;
case 0x4:
- write_mask = 0xc;
+ if (swizzle_x == 0) {
+ write_mask = 0x3;
+ use_tmp = 1;
+ } else
+ write_mask = 0xc;
break;
case 0x8:
- write_mask = 0xc;
- use_tmp = 3;
+ if (swizzle_x == 0) {
+ write_mask = 0x3;
+ use_tmp = 1;
+ } else {
+ write_mask = 0xc;
+ use_tmp = 3;
+ }
break;
}
}
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
if (singledest) {
- tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
- if (use_tmp) {
- alu.dst.sel = ctx->temp_reg;
+ if (use_tmp || dest_temp) {
+ alu.dst.sel = use_tmp ? ctx->temp_reg : dest_temp;
alu.dst.chan = i;
alu.dst.write = 1;
+ } else {
+ tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
}
if (i == 1 || i == 3)
alu.dst.write = 0;
} else
tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
- alu.op = ctx->inst_info->op;
+ alu.op = op_override ? op_override : ctx->inst_info->op;
if (ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DABS) {
r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
} else if (!swap) {
if (use_tmp) {
write_mask = inst->Dst[0].Register.WriteMask;
+ lasti = tgsi_last_instruction(write_mask);
/* move result from temp to dst */
for (i = 0; i <= lasti; i++) {
if (!(write_mask & (1 << i)))
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
alu.op = ALU_OP1_MOV;
- tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+
+ if (dest_temp) {
+ alu.dst.sel = dest_temp;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ } else
+ tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
alu.src[0].sel = ctx->temp_reg;
alu.src[0].chan = use_tmp - 1;
alu.last = (i == lasti);
fprintf(stderr, "illegal writemask for 64-bit: 0x%x\n", write_mask);
return -1;
}
- return tgsi_op2_64_params(ctx, false, false);
+ return tgsi_op2_64_params(ctx, false, false, 0, 0);
}
static int tgsi_op2_64_single_dest(struct r600_shader_ctx *ctx)
{
- return tgsi_op2_64_params(ctx, true, false);
+ return tgsi_op2_64_params(ctx, true, false, 0, 0);
}
static int tgsi_op2_64_single_dest_s(struct r600_shader_ctx *ctx)
{
- return tgsi_op2_64_params(ctx, true, true);
+ return tgsi_op2_64_params(ctx, true, true, 0, 0);
}
static int tgsi_op3_64(struct r600_shader_ctx *ctx)
struct r600_bytecode_alu alu;
unsigned write_mask = inst->Dst[0].Register.WriteMask;
int i, j, r;
- int firsti = write_mask == 0xc ? 2 : 0;
for (i = 0; i <= 3; i++) {
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
return r;
}
- /* MOV first two channels to writemask dst0 */
- for (i = 0; i <= 1; i++) {
+ /* Replicate significand result across channels. */
+ for (i = 0; i <= 3; i++) {
+ if (!(write_mask & (1 << i)))
+ continue;
+
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
alu.op = ALU_OP1_MOV;
- alu.src[0].chan = i + 2;
+ alu.src[0].chan = (i & 1) + 2;
alu.src[0].sel = ctx->temp_reg;
- tgsi_dst(ctx, &inst->Dst[0], firsti + i, &alu.dst);
- alu.dst.write = (inst->Dst[0].Register.WriteMask >> (firsti + i)) & 1;
+ tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ alu.dst.write = 1;
alu.last = 1;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
{
struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
struct r600_bytecode_alu alu;
- int i, r;
- int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
+ int i, c, r;
+ int write_mask = inst->Dst[0].Register.WriteMask;
+ int temp_reg = r600_get_temp(ctx);
assert(inst->Instruction.Opcode == TGSI_OPCODE_I2D ||
inst->Instruction.Opcode == TGSI_OPCODE_U2D);
- for (i = 0; i <= (lasti+1)/2; i++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ctx->inst_info->op;
-
- r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
- alu.dst.sel = ctx->temp_reg;
- alu.dst.chan = i;
- alu.dst.write = 1;
- alu.last = 1;
+ for (c = 0; c < 2; c++) {
+ int dchan = c * 2;
+ if (write_mask & (0x3 << dchan)) {
+ /* split into 24-bit int and 8-bit int */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_AND_INT;
+ alu.dst.sel = temp_reg;
+ alu.dst.chan = dchan;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], c);
+ alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[1].value = 0xffffff00;
+ alu.dst.write = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_AND_INT;
+ alu.dst.sel = temp_reg;
+ alu.dst.chan = dchan + 1;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], c);
+ alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[1].value = 0xff;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
}
- for (i = 0; i <= lasti; i++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP1_FLT32_TO_FLT64;
+ for (c = 0; c < 2; c++) {
+ int dchan = c * 2;
+ if (write_mask & (0x3 << dchan)) {
+ for (i = dchan; i <= dchan + 1; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = i == dchan ? ctx->inst_info->op : ALU_OP1_UINT_TO_FLT;
- alu.src[0].chan = i/2;
- if (i%2 == 0)
- alu.src[0].sel = ctx->temp_reg;
- else {
- alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
- alu.src[0].value = 0x0;
+ alu.src[0].sel = temp_reg;
+ alu.src[0].chan = i;
+ alu.dst.sel = temp_reg;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ if (ctx->bc->chip_class == CAYMAN)
+ alu.last = i == dchan + 1;
+ else
+ alu.last = 1; /* trans only ops on evergreen */
+
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
}
- tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
- alu.last = i == lasti;
+ }
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
+ for (c = 0; c < 2; c++) {
+ int dchan = c * 2;
+ if (write_mask & (0x3 << dchan)) {
+ for (i = 0; i < 4; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_FLT32_TO_FLT64;
+
+ alu.src[0].chan = dchan + (i / 2);
+ if (i == 0 || i == 2)
+ alu.src[0].sel = temp_reg;
+ else {
+ alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[0].value = 0x0;
+ }
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = i;
+ alu.last = i == 3;
+ alu.dst.write = 1;
+
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+
+ for (i = 0; i <= 1; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_ADD_64;
+
+ alu.src[0].chan = fp64_switch(i);
+ alu.src[0].sel = ctx->temp_reg;
+
+ alu.src[1].chan = fp64_switch(i + 2);
+ alu.src[1].sel = ctx->temp_reg;
+ tgsi_dst(ctx, &inst->Dst[0], dchan + i, &alu.dst);
+ alu.last = i == 1;
+
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ }
}
return 0;
struct r600_bytecode_alu alu;
int i, r;
int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
-
+ int treg = r600_get_temp(ctx);
assert(inst->Instruction.Opcode == TGSI_OPCODE_D2I ||
inst->Instruction.Opcode == TGSI_OPCODE_D2U);
+ /* do a 64->32 into a temp register */
+ r = tgsi_op2_64_params(ctx, true, false, treg, ALU_OP1_FLT64_TO_FLT32);
+ if (r)
+ return r;
+
for (i = 0; i <= lasti; i++) {
+ if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
+ continue;
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP1_FLT64_TO_FLT32;
+ alu.op = ctx->inst_info->op;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], fp64_switch(i));
- alu.dst.chan = i;
- alu.dst.sel = ctx->temp_reg;
- alu.dst.write = i%2 == 0;
- alu.last = i == lasti;
-
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
-
- for (i = 0; i <= (lasti+1)/2; i++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ctx->inst_info->op;
-
- alu.src[0].chan = i*2;
- alu.src[0].sel = ctx->temp_reg;
- tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
- alu.last = 1;
+ alu.src[0].chan = i;
+ alu.src[0].sel = treg;
+ tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ alu.last = (i == lasti);
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return 0;
}
-static int tgsi_scs(struct r600_shader_ctx *ctx)
-{
- struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
- struct r600_bytecode_alu alu;
- int i, r;
-
- /* We'll only need the trig stuff if we are going to write to the
- * X or Y components of the destination vector.
- */
- if (likely(inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_XY)) {
- r = tgsi_setup_trig(ctx);
- if (r)
- return r;
- }
-
- /* dst.x = COS */
- if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) {
- if (ctx->bc->chip_class == CAYMAN) {
- for (i = 0 ; i < 3; i++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP1_COS;
- tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
-
- if (i == 0)
- alu.dst.write = 1;
- else
- alu.dst.write = 0;
- alu.src[0].sel = ctx->temp_reg;
- alu.src[0].chan = 0;
- if (i == 2)
- alu.last = 1;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
- } else {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP1_COS;
- tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
-
- alu.src[0].sel = ctx->temp_reg;
- alu.src[0].chan = 0;
- alu.last = 1;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
- }
-
- /* dst.y = SIN */
- if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) {
- if (ctx->bc->chip_class == CAYMAN) {
- for (i = 0 ; i < 3; i++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP1_SIN;
- tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
- if (i == 1)
- alu.dst.write = 1;
- else
- alu.dst.write = 0;
- alu.src[0].sel = ctx->temp_reg;
- alu.src[0].chan = 0;
- if (i == 2)
- alu.last = 1;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
- } else {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP1_SIN;
- tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
-
- alu.src[0].sel = ctx->temp_reg;
- alu.src[0].chan = 0;
- alu.last = 1;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
- }
-
- /* dst.z = 0.0; */
- if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
-
- alu.op = ALU_OP1_MOV;
-
- tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
-
- alu.src[0].sel = V_SQ_ALU_SRC_0;
- alu.src[0].chan = 0;
-
- alu.last = 1;
-
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
-
- /* dst.w = 1.0; */
- if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
-
- alu.op = ALU_OP1_MOV;
-
- tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst);
-
- alu.src[0].sel = V_SQ_ALU_SRC_1;
- alu.src[0].chan = 0;
-
- alu.last = 1;
-
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
-
- return 0;
-}
-
static int tgsi_kill(struct r600_shader_ctx *ctx)
{
const struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- /* XXX:
- * For state trackers other than OpenGL, we'll want to use
- * _RECIPSQRT_IEEE instead.
- */
- alu.op = ALU_OP1_RECIPSQRT_CLAMPED;
+ alu.op = ALU_OP1_RECIPSQRT_IEEE;
for (i = 0; i < inst->Instruction.NumSrcRegs; i++) {
r600_bytecode_src(&alu.src[i], &ctx->src[i], 0);
return tgsi_helper_tempx_replicate(ctx);
}
+static int emit_mul_int_op(struct r600_bytecode *bc,
+ struct r600_bytecode_alu *alu_src)
+{
+ struct r600_bytecode_alu alu;
+ int i, r;
+ alu = *alu_src;
+ if (bc->chip_class == CAYMAN) {
+ for (i = 0; i < 4; i++) {
+ alu.dst.chan = i;
+ alu.dst.write = (i == alu_src->dst.chan);
+ alu.last = (i == 3);
+
+ r = r600_bytecode_add_alu(bc, &alu);
+ if (r)
+ return r;
+ }
+ } else {
+ alu.last = 1;
+ r = r600_bytecode_add_alu(bc, &alu);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
static int tgsi_divmod(struct r600_shader_ctx *ctx, int mod, int signed_op)
{
struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
struct r600_bytecode_alu alu;
int i, r, j;
unsigned write_mask = inst->Dst[0].Register.WriteMask;
+ int lasti = tgsi_last_instruction(write_mask);
int tmp0 = ctx->temp_reg;
int tmp1 = r600_get_temp(ctx);
int tmp2 = r600_get_temp(ctx);
int tmp3 = r600_get_temp(ctx);
+ int tmp4 = 0;
+
+ /* Use additional temp if dst register and src register are the same */
+ if (inst->Src[0].Register.Index == inst->Dst[0].Register.Index ||
+ inst->Src[1].Register.Index == inst->Dst[0].Register.Index) {
+ tmp4 = r600_get_temp(ctx);
+ }
+
/* Unsigned path:
*
* we need to represent src1 as src2*q + r, where q - quotient, r - remainder
}
/* 2. tmp0.z = lo (tmp0.x * src2) */
- if (ctx->bc->chip_class == CAYMAN) {
- for (j = 0 ; j < 4; j++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP2_MULLO_UINT;
-
- alu.dst.sel = tmp0;
- alu.dst.chan = j;
- alu.dst.write = (j == 2);
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_MULLO_UINT;
- alu.src[0].sel = tmp0;
- alu.src[0].chan = 0;
- if (signed_op) {
- alu.src[1].sel = tmp2;
- alu.src[1].chan = 1;
- } else {
- r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
- }
+ alu.dst.sel = tmp0;
+ alu.dst.chan = 2;
+ alu.dst.write = 1;
- alu.last = (j == 3);
- if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
- return r;
- }
+ alu.src[0].sel = tmp0;
+ alu.src[0].chan = 0;
+ if (signed_op) {
+ alu.src[1].sel = tmp2;
+ alu.src[1].chan = 1;
} else {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP2_MULLO_UINT;
-
- alu.dst.sel = tmp0;
- alu.dst.chan = 2;
- alu.dst.write = 1;
-
- alu.src[0].sel = tmp0;
- alu.src[0].chan = 0;
- if (signed_op) {
- alu.src[1].sel = tmp2;
- alu.src[1].chan = 1;
- } else {
- r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
- }
-
- alu.last = 1;
- if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
- return r;
+ r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
}
+ if ((r = emit_mul_int_op(ctx->bc, &alu)))
+ return r;
+
/* 3. tmp0.w = -tmp0.z */
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
alu.op = ALU_OP2_SUB_INT;
return r;
/* 4. tmp0.y = hi (tmp0.x * src2) */
- if (ctx->bc->chip_class == CAYMAN) {
- for (j = 0 ; j < 4; j++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP2_MULHI_UINT;
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_MULHI_UINT;
- alu.dst.sel = tmp0;
- alu.dst.chan = j;
- alu.dst.write = (j == 1);
+ alu.dst.sel = tmp0;
+ alu.dst.chan = 1;
+ alu.dst.write = 1;
- alu.src[0].sel = tmp0;
- alu.src[0].chan = 0;
+ alu.src[0].sel = tmp0;
+ alu.src[0].chan = 0;
- if (signed_op) {
- alu.src[1].sel = tmp2;
- alu.src[1].chan = 1;
- } else {
- r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
- }
- alu.last = (j == 3);
- if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
- return r;
- }
+ if (signed_op) {
+ alu.src[1].sel = tmp2;
+ alu.src[1].chan = 1;
} else {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP2_MULHI_UINT;
-
- alu.dst.sel = tmp0;
- alu.dst.chan = 1;
- alu.dst.write = 1;
-
- alu.src[0].sel = tmp0;
- alu.src[0].chan = 0;
-
- if (signed_op) {
- alu.src[1].sel = tmp2;
- alu.src[1].chan = 1;
- } else {
- r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
- }
-
- alu.last = 1;
- if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
- return r;
+ r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
}
+ if ((r = emit_mul_int_op(ctx->bc, &alu)))
+ return r;
+
/* 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src)) */
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
alu.op = ALU_OP3_CNDE_INT;
return r;
/* 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error */
- if (ctx->bc->chip_class == CAYMAN) {
- for (j = 0 ; j < 4; j++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP2_MULHI_UINT;
-
- alu.dst.sel = tmp0;
- alu.dst.chan = j;
- alu.dst.write = (j == 3);
-
- alu.src[0].sel = tmp0;
- alu.src[0].chan = 2;
-
- alu.src[1].sel = tmp0;
- alu.src[1].chan = 0;
-
- alu.last = (j == 3);
- if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
- return r;
- }
- } else {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP2_MULHI_UINT;
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_MULHI_UINT;
- alu.dst.sel = tmp0;
- alu.dst.chan = 3;
- alu.dst.write = 1;
+ alu.dst.sel = tmp0;
+ alu.dst.chan = 3;
+ alu.dst.write = 1;
- alu.src[0].sel = tmp0;
- alu.src[0].chan = 2;
+ alu.src[0].sel = tmp0;
+ alu.src[0].chan = 2;
- alu.src[1].sel = tmp0;
- alu.src[1].chan = 0;
+ alu.src[1].sel = tmp0;
+ alu.src[1].chan = 0;
- alu.last = 1;
- if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
+ if ((r = emit_mul_int_op(ctx->bc, &alu)))
return r;
- }
/* 7. tmp1.x = tmp0.x - tmp0.w */
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
return r;
/* 10. tmp0.z = hi(tmp0.x * src1) = q */
- if (ctx->bc->chip_class == CAYMAN) {
- for (j = 0 ; j < 4; j++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP2_MULHI_UINT;
-
- alu.dst.sel = tmp0;
- alu.dst.chan = j;
- alu.dst.write = (j == 2);
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_MULHI_UINT;
- alu.src[0].sel = tmp0;
- alu.src[0].chan = 0;
+ alu.dst.sel = tmp0;
+ alu.dst.chan = 2;
+ alu.dst.write = 1;
- if (signed_op) {
- alu.src[1].sel = tmp2;
- alu.src[1].chan = 0;
- } else {
- r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
- }
+ alu.src[0].sel = tmp0;
+ alu.src[0].chan = 0;
- alu.last = (j == 3);
- if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
- return r;
- }
+ if (signed_op) {
+ alu.src[1].sel = tmp2;
+ alu.src[1].chan = 0;
} else {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP2_MULHI_UINT;
-
- alu.dst.sel = tmp0;
- alu.dst.chan = 2;
- alu.dst.write = 1;
-
- alu.src[0].sel = tmp0;
- alu.src[0].chan = 0;
-
- if (signed_op) {
- alu.src[1].sel = tmp2;
- alu.src[1].chan = 0;
- } else {
- r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
- }
-
- alu.last = 1;
- if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
- return r;
+ r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
}
- /* 11. tmp0.y = lo (src2 * tmp0.z) = src2*q = src1 - r */
- if (ctx->bc->chip_class == CAYMAN) {
- for (j = 0 ; j < 4; j++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP2_MULLO_UINT;
-
- alu.dst.sel = tmp0;
- alu.dst.chan = j;
- alu.dst.write = (j == 1);
+ if ((r = emit_mul_int_op(ctx->bc, &alu)))
+ return r;
- if (signed_op) {
- alu.src[0].sel = tmp2;
- alu.src[0].chan = 1;
- } else {
- r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
- }
+ /* 11. tmp0.y = lo (src2 * tmp0.z) = src2*q = src1 - r */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_MULLO_UINT;
- alu.src[1].sel = tmp0;
- alu.src[1].chan = 2;
+ alu.dst.sel = tmp0;
+ alu.dst.chan = 1;
+ alu.dst.write = 1;
- alu.last = (j == 3);
- if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
- return r;
- }
+ if (signed_op) {
+ alu.src[0].sel = tmp2;
+ alu.src[0].chan = 1;
} else {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP2_MULLO_UINT;
-
- alu.dst.sel = tmp0;
- alu.dst.chan = 1;
- alu.dst.write = 1;
-
- if (signed_op) {
- alu.src[0].sel = tmp2;
- alu.src[0].chan = 1;
- } else {
- r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
- }
+ r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
+ }
- alu.src[1].sel = tmp0;
- alu.src[1].chan = 2;
+ alu.src[1].sel = tmp0;
+ alu.src[1].chan = 2;
- alu.last = 1;
- if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
- return r;
- }
+ if ((r = emit_mul_int_op(ctx->bc, &alu)))
+ return r;
/* 12. tmp0.w = src1 - tmp0.y = r */
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
alu.dst.chan = 2;
alu.dst.write = 1;
} else {
- tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ if (tmp4 > 0) {
+ alu.dst.sel = tmp4;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ } else {
+ tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ }
}
alu.src[0].sel = tmp1;
alu.op = ALU_OP3_CNDGE_INT;
alu.is_op3 = 1;
- tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ if (tmp4 > 0) {
+ alu.dst.sel = tmp4;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ } else {
+ tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ }
r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
alu.src[1].sel = tmp0;
alu.op = ALU_OP3_CNDGE_INT;
alu.is_op3 = 1;
- tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ if (tmp4 > 0) {
+ alu.dst.sel = tmp4;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ } else {
+ tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ }
alu.src[0].sel = tmp2;
alu.src[0].chan = 2;
}
}
}
+
+ if (tmp4 > 0) {
+ for (i = 0; i <= lasti; ++i) {
+ if (!(write_mask & (1<<i)))
+ continue;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ alu.src[0].sel = tmp4;
+ alu.src[0].chan = i;
+
+ if (i == lasti)
+ alu.last = 1;
+ if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
+ return r;
+ }
+ }
+
return 0;
}
static int tgsi_ssg(struct r600_shader_ctx *ctx)
{
struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ unsigned write_mask = inst->Dst[0].Register.WriteMask;
+ int last_inst = tgsi_last_instruction(write_mask);
struct r600_bytecode_alu alu;
int i, r;
/* tmp = (src > 0 ? 1 : src) */
- for (i = 0; i < 4; i++) {
+ for (i = 0; i <= last_inst; i++) {
+ if (!(write_mask & (1 << i)))
+ continue;
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
alu.op = ALU_OP3_CNDGT;
alu.is_op3 = 1;
alu.src[1].sel = V_SQ_ALU_SRC_1;
r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
- if (i == 3)
+ if (i == last_inst)
alu.last = 1;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
}
/* dst = (-tmp > 0 ? -1 : tmp) */
- for (i = 0; i < 4; i++) {
+ for (i = 0; i <= last_inst; i++) {
+ if (!(write_mask & (1 << i)))
+ continue;
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
alu.op = ALU_OP3_CNDGT;
alu.is_op3 = 1;
alu.src[2].sel = ctx->temp_reg;
alu.src[2].chan = i;
- if (i == 3)
+ if (i == last_inst)
alu.last = 1;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
unsigned write_mask = inst->Dst[0].Register.WriteMask;
int last_inst = tgsi_last_instruction(write_mask);
- t1 = ctx->temp_reg;
+ t1 = r600_get_temp(ctx);
+
+ for (i = 0; i < 4; i++) {
+ if (!(write_mask & (1<<i)))
+ continue;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_SETGE_INT;
+ r600_bytecode_src(&alu.src[0], &ctx->src[3], i);
+ alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[1].value = 32;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ alu.last = i == last_inst;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
for (i = 0; i < 4; i++) {
if (!(write_mask & (1<<i)))
return r;
}
+ for (i = 0; i < 4; i++) {
+ if (!(write_mask & (1<<i)))
+ continue;
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP3_CNDE_INT;
+ alu.is_op3 = 1;
+ alu.src[0].sel = ctx->temp_reg;
+ alu.src[0].chan = i;
+ r600_bytecode_src(&alu.src[2], &ctx->src[1], i);
+
+ tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+
+ alu.src[1].sel = alu.dst.sel;
+ alu.src[1].chan = i;
+
+ alu.last = i == last_inst;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
return 0;
}
}
else {
location = TGSI_INTERPOLATE_LOC_CENTROID;
+ ctx->shader->input[input].uses_interpolate_at_centroid = 1;
}
k = eg_get_interpolator_index(ctx->shader->input[input].interpolate, location);
}
static int tgsi_make_src_for_op3(struct r600_shader_ctx *ctx,
- unsigned temp, int chan,
+ unsigned writemask,
struct r600_bytecode_alu_src *bc_src,
const struct r600_shader_src *shader_src)
{
struct r600_bytecode_alu alu;
- int r;
+ int i, r;
+ int lasti = tgsi_last_instruction(writemask);
+ int temp_reg = 0;
- r600_bytecode_src(bc_src, shader_src, chan);
+ r600_bytecode_src(&bc_src[0], shader_src, 0);
+ r600_bytecode_src(&bc_src[1], shader_src, 1);
+ r600_bytecode_src(&bc_src[2], shader_src, 2);
+ r600_bytecode_src(&bc_src[3], shader_src, 3);
- /* op3 operands don't support abs modifier */
if (bc_src->abs) {
- assert(temp!=0); /* we actually need the extra register, make sure it is allocated. */
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP1_MOV;
- alu.dst.sel = temp;
- alu.dst.chan = chan;
- alu.dst.write = 1;
-
- alu.src[0] = *bc_src;
- alu.last = true; // sufficient?
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
+ temp_reg = r600_get_temp(ctx);
- memset(bc_src, 0, sizeof(*bc_src));
- bc_src->sel = temp;
- bc_src->chan = chan;
+ for (i = 0; i < lasti + 1; i++) {
+ if (!(writemask & (1 << i)))
+ continue;
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ alu.dst.sel = temp_reg;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ alu.src[0] = bc_src[i];
+ if (i == lasti) {
+ alu.last = 1;
+ }
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ memset(&bc_src[i], 0, sizeof(*bc_src));
+ bc_src[i].sel = temp_reg;
+ bc_src[i].chan = i;
+ }
}
return 0;
}
-static int tgsi_op3(struct r600_shader_ctx *ctx)
+static int tgsi_op3_dst(struct r600_shader_ctx *ctx, int dst)
{
struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
struct r600_bytecode_alu alu;
+ struct r600_bytecode_alu_src srcs[4][4];
int i, j, r;
int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
- int temp_regs[4];
unsigned op = ctx->inst_info->op;
if (op == ALU_OP3_MULADD_IEEE &&
op = ALU_OP3_MULADD;
for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
- temp_regs[j] = 0;
- if (ctx->src[j].abs)
- temp_regs[j] = r600_get_temp(ctx);
+ r = tgsi_make_src_for_op3(ctx, inst->Dst[0].Register.WriteMask,
+ srcs[j], &ctx->src[j]);
+ if (r)
+ return r;
}
+
for (i = 0; i < lasti + 1; i++) {
if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
continue;
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
alu.op = op;
for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
- r = tgsi_make_src_for_op3(ctx, temp_regs[j], i, &alu.src[j], &ctx->src[j]);
- if (r)
- return r;
+ alu.src[j] = srcs[j][i];
}
- tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ if (dst == -1) {
+ tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ } else {
+ alu.dst.sel = dst;
+ }
alu.dst.chan = i;
alu.dst.write = 1;
alu.is_op3 = 1;
return 0;
}
+static int tgsi_op3(struct r600_shader_ctx *ctx)
+{
+ return tgsi_op3_dst(ctx, -1);
+}
+
static int tgsi_dp(struct r600_shader_ctx *ctx)
{
struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
alu.src[0].chan = alu.src[1].chan = 0;
}
break;
- case TGSI_OPCODE_DPH:
- if (i == 3) {
- alu.src[0].sel = V_SQ_ALU_SRC_1;
- alu.src[0].chan = 0;
- alu.src[0].neg = 0;
- }
- break;
default:
break;
}
struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
int src_gpr, r, i;
int id = tgsi_tex_get_src_gpr(ctx, 1);
+ int sampler_index_mode = inst->Src[1].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
src_gpr = tgsi_tex_get_src_gpr(ctx, 0);
if (src_requires_loading) {
vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7; /* SEL_Z */
vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7; /* SEL_W */
vtx.use_const_fields = 1;
+ vtx.buffer_index_mode = sampler_index_mode;
if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx)))
return r;
return 0;
}
-static int r600_do_buffer_txq(struct r600_shader_ctx *ctx)
+static int r600_do_buffer_txq(struct r600_shader_ctx *ctx, int reg_idx, int offset, int eg_buffer_base)
{
struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
- struct r600_bytecode_alu alu;
int r;
- int id = tgsi_tex_get_src_gpr(ctx, 1);
+ int id = tgsi_tex_get_src_gpr(ctx, reg_idx) + offset;
+ int sampler_index_mode = inst->Src[reg_idx].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP1_MOV;
- alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
- if (ctx->bc->chip_class >= EVERGREEN) {
- /* channel 0 or 2 of each word */
- alu.src[0].sel += (id / 2);
- alu.src[0].chan = (id % 2) * 2;
- } else {
+ if (ctx->bc->chip_class < EVERGREEN) {
+ struct r600_bytecode_alu alu;
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
/* r600 we have them at channel 2 of the second dword */
alu.src[0].sel += (id * 2) + 1;
alu.src[0].chan = 1;
+ alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
+ tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ return 0;
+ } else {
+ struct r600_bytecode_vtx vtx;
+ memset(&vtx, 0, sizeof(vtx));
+ vtx.op = FETCH_OP_GET_BUFFER_RESINFO;
+ vtx.buffer_id = id + eg_buffer_base;
+ vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
+ vtx.src_gpr = 0;
+ vtx.mega_fetch_count = 16; /* no idea here really... */
+ vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
+ vtx.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; /* SEL_X */
+ vtx.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 4 : 7; /* SEL_Y */
+ vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 4 : 7; /* SEL_Z */
+ vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 4 : 7; /* SEL_W */
+ vtx.data_format = FMT_32_32_32_32;
+ vtx.buffer_index_mode = sampler_index_mode;
+
+ if ((r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx)))
+ return r;
+ return 0;
}
- alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
- tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
- alu.last = 1;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- return 0;
}
+
static int tgsi_tex(struct r600_shader_ctx *ctx)
{
struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
struct r600_bytecode_tex tex;
+ struct r600_bytecode_tex grad_offs[3];
struct r600_bytecode_alu alu;
unsigned src_gpr;
- int r, i, j;
+ int r, i, j, n_grad_offs = 0;
int opcode;
bool read_compressed_msaa = ctx->bc->has_compressed_msaa_texturing &&
inst->Instruction.Opcode == TGSI_OPCODE_TXF &&
/* Texture fetch instructions can only use gprs as source.
* Also they cannot negate the source or take the absolute value */
- const boolean src_requires_loading = (inst->Instruction.Opcode != TGSI_OPCODE_TXQ_LZ &&
- inst->Instruction.Opcode != TGSI_OPCODE_TXQS &&
+ const boolean src_requires_loading = (inst->Instruction.Opcode != TGSI_OPCODE_TXQS &&
tgsi_tex_src_requires_loading(ctx, 0)) ||
read_compressed_msaa || txf_add_offsets;
boolean src_loaded = FALSE;
- unsigned sampler_src_reg = inst->Instruction.Opcode == TGSI_OPCODE_TXQ_LZ ? 0 : 1;
+ unsigned sampler_src_reg = 1;
int8_t offset_x = 0, offset_y = 0, offset_z = 0;
boolean has_txq_cube_array_z = false;
unsigned sampler_index_mode;
+ int array_index_offset_channel = -1;
if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ &&
((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
if (inst->Texture.Texture == TGSI_TEXTURE_BUFFER) {
if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ) {
- ctx->shader->uses_tex_buffers = true;
- return r600_do_buffer_txq(ctx);
+ if (ctx->bc->chip_class < EVERGREEN)
+ ctx->shader->uses_tex_buffers = true;
+ return r600_do_buffer_txq(ctx, 1, 0, R600_MAX_CONST_BUFFERS);
}
else if (inst->Instruction.Opcode == TGSI_OPCODE_TXF) {
if (ctx->bc->chip_class < EVERGREEN)
inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) &&
- inst->Instruction.Opcode != TGSI_OPCODE_TXQ &&
- inst->Instruction.Opcode != TGSI_OPCODE_TXQ_LZ) {
+ inst->Instruction.Opcode != TGSI_OPCODE_TXQ) {
static const unsigned src0_swizzle[] = {2, 2, 0, 1};
static const unsigned src1_swizzle[] = {1, 0, 2, 2};
if (r)
return r;
- /* have to multiply original layer by 8 and add to face id (temp.w) in Z */
+ /* Evaluate the array index according to floor(idx + 0.5). This
+ * needs to be done before merging the face select value, because
+ * otherwise the fractional part of the array index will interfere
+ * with the face select value */
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP3_MULADD;
- alu.is_op3 = 1;
r600_bytecode_src(&alu.src[0], &ctx->src[0], 3);
- alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
- alu.src[1].chan = 0;
- alu.src[1].value = u_bitcast_f2u(8.0f);
- alu.src[2].sel = mytmp;
- alu.src[2].chan = 0;
+ alu.op = ALU_OP1_RNDNE;
alu.dst.sel = ctx->temp_reg;
alu.dst.chan = 3;
alu.dst.write = 1;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
- } else if (ctx->bc->chip_class < EVERGREEN) {
- memset(&tex, 0, sizeof(struct r600_bytecode_tex));
- tex.op = FETCH_OP_SET_CUBEMAP_INDEX;
- tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
- tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
- tex.src_gpr = r600_get_temp(ctx);
- tex.src_sel_x = 0;
- tex.src_sel_y = 0;
- tex.src_sel_z = 0;
- tex.src_sel_w = 0;
+
+ /* Because the array slice index and the cube face index are merged
+ * into one value we have to make sure the array slice index is >= 0,
+ * otherwise the face selection will fail */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_MAX;
+ alu.src[0].sel = ctx->temp_reg;
+ alu.src[0].chan = 3;
+ alu.src[1].sel = V_SQ_ALU_SRC_0;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = 3;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+
+ /* have to multiply original layer by 8 and add to face id (temp.w) in Z */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP3_MULADD;
+ alu.is_op3 = 1;
+ alu.src[0].sel = ctx->temp_reg;
+ alu.src[0].chan = 3;
+ alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[1].chan = 0;
+ alu.src[1].value = u_bitcast_f2u(8.0f);
+ alu.src[2].sel = mytmp;
+ alu.src[2].chan = 0;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = 3;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ } else if (ctx->bc->chip_class < EVERGREEN) {
+ memset(&tex, 0, sizeof(struct r600_bytecode_tex));
+ tex.op = FETCH_OP_SET_CUBEMAP_INDEX;
+ tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
+ tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
+ tex.src_gpr = r600_get_temp(ctx);
+ tex.src_sel_x = 0;
+ tex.src_sel_y = 0;
+ tex.src_sel_z = 0;
+ tex.src_sel_w = 0;
tex.dst_sel_x = tex.dst_sel_y = tex.dst_sel_z = tex.dst_sel_w = 7;
tex.coord_type_x = 1;
tex.coord_type_y = 1;
}
for (i = 1; i < 3; i++) {
/* set gradients h/v */
- memset(&tex, 0, sizeof(struct r600_bytecode_tex));
- tex.op = (i == 1) ? FETCH_OP_SET_GRADIENTS_H :
+ struct r600_bytecode_tex *t = &grad_offs[n_grad_offs++];
+ memset(t, 0, sizeof(struct r600_bytecode_tex));
+ t->op = (i == 1) ? FETCH_OP_SET_GRADIENTS_H :
FETCH_OP_SET_GRADIENTS_V;
- tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
- tex.sampler_index_mode = sampler_index_mode;
- tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
- tex.resource_index_mode = sampler_index_mode;
-
- tex.src_gpr = (i == 1) ? temp_h : temp_v;
- tex.src_sel_x = 0;
- tex.src_sel_y = 1;
- tex.src_sel_z = 2;
- tex.src_sel_w = 3;
-
- tex.dst_gpr = r600_get_temp(ctx); /* just to avoid confusing the asm scheduler */
- tex.dst_sel_x = tex.dst_sel_y = tex.dst_sel_z = tex.dst_sel_w = 7;
+ t->sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
+ t->sampler_index_mode = sampler_index_mode;
+ t->resource_id = t->sampler_id + R600_MAX_CONST_BUFFERS;
+ t->resource_index_mode = sampler_index_mode;
+
+ t->src_gpr = (i == 1) ? temp_h : temp_v;
+ t->src_sel_x = 0;
+ t->src_sel_y = 1;
+ t->src_sel_z = 2;
+ t->src_sel_w = 3;
+
+ t->dst_gpr = r600_get_temp(ctx); /* just to avoid confusing the asm scheduler */
+ t->dst_sel_x = t->dst_sel_y = t->dst_sel_z = t->dst_sel_w = 7;
if (inst->Texture.Texture != TGSI_TEXTURE_RECT) {
- tex.coord_type_x = 1;
- tex.coord_type_y = 1;
- tex.coord_type_z = 1;
- tex.coord_type_w = 1;
+ t->coord_type_x = 1;
+ t->coord_type_y = 1;
+ t->coord_type_z = 1;
+ t->coord_type_w = 1;
}
- r = r600_bytecode_add_tex(ctx->bc, &tex);
- if (r)
- return r;
+ }
+ }
+
+ if (inst->Instruction.Opcode == TGSI_OPCODE_TG4) {
+ /* Gather4 should follow the same rules as bilinear filtering, but the hardware
+ * incorrectly forces nearest filtering if the texture format is integer.
+ * The only effect it has on Gather4, which always returns 4 texels for
+ * bilinear filtering, is that the final coordinates are off by 0.5 of
+ * the texel size.
+ *
+ * The workaround is to subtract 0.5 from the unnormalized coordinates,
+ * or (0.5 / size) from the normalized coordinates.
+ */
+ if (inst->Texture.ReturnType == TGSI_RETURN_TYPE_SINT ||
+ inst->Texture.ReturnType == TGSI_RETURN_TYPE_UINT) {
+ int treg = r600_get_temp(ctx);
+
+ /* mov array and comparison oordinate to temp_reg if needed */
+ if ((inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
+ inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
+ inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY) && !src_loaded) {
+ int end = inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ? 3 : 2;
+ for (i = 2; i <= end; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ alu.last = (i == end);
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ }
+
+ if (inst->Texture.Texture == TGSI_TEXTURE_RECT ||
+ inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT) {
+ for (i = 0; i < 2; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_ADD;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ alu.last = i == 1;
+ if (src_loaded) {
+ alu.src[0].sel = ctx->temp_reg;
+ alu.src[0].chan = i;
+ } else
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
+ alu.src[1].sel = V_SQ_ALU_SRC_0_5;
+ alu.src[1].neg = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ } else {
+ /* execute a TXQ */
+ memset(&tex, 0, sizeof(struct r600_bytecode_tex));
+ tex.op = FETCH_OP_GET_TEXTURE_RESINFO;
+ tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
+ tex.sampler_index_mode = sampler_index_mode;
+ tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
+ tex.resource_index_mode = sampler_index_mode;
+ tex.dst_gpr = treg;
+ tex.src_sel_x = 4;
+ tex.src_sel_y = 4;
+ tex.src_sel_z = 4;
+ tex.src_sel_w = 4;
+ tex.dst_sel_x = 0;
+ tex.dst_sel_y = 1;
+ tex.dst_sel_z = 7;
+ tex.dst_sel_w = 7;
+ r = r600_bytecode_add_tex(ctx->bc, &tex);
+ if (r)
+ return r;
+
+ /* coord.xy = -0.5 * (1.0/int_to_flt(size)) + coord.xy */
+ if (ctx->bc->chip_class == CAYMAN) {
+ /* */
+ for (i = 0; i < 2; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_INT_TO_FLT;
+ alu.dst.sel = treg;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ alu.src[0].sel = treg;
+ alu.src[0].chan = i;
+ alu.last = (i == 1) ? 1 : 0;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ for (j = 0; j < 2; j++) {
+ for (i = 0; i < 3; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_RECIP_IEEE;
+ alu.src[0].sel = treg;
+ alu.src[0].chan = j;
+ alu.dst.sel = treg;
+ alu.dst.chan = i;
+ if (i == 2)
+ alu.last = 1;
+ if (i == j)
+ alu.dst.write = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ }
+ } else {
+ for (i = 0; i < 2; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_INT_TO_FLT;
+ alu.dst.sel = treg;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ alu.src[0].sel = treg;
+ alu.src[0].chan = i;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ for (i = 0; i < 2; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_RECIP_IEEE;
+ alu.src[0].sel = treg;
+ alu.src[0].chan = i;
+ alu.dst.sel = treg;
+ alu.dst.chan = i;
+ alu.last = 1;
+ alu.dst.write = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ }
+ for (i = 0; i < 2; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP3_MULADD;
+ alu.is_op3 = 1;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ alu.last = i == 1;
+ alu.src[0].sel = treg;
+ alu.src[0].chan = i;
+ alu.src[1].sel = V_SQ_ALU_SRC_0_5;
+ alu.src[1].neg = 1;
+ if (src_loaded) {
+ alu.src[2].sel = ctx->temp_reg;
+ alu.src[2].chan = i;
+ } else
+ r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ }
+ src_loaded = TRUE;
+ src_gpr = ctx->temp_reg;
}
}
return r;
/* temp.x = sample_index*4 */
- if (ctx->bc->chip_class == CAYMAN) {
- for (i = 0 ; i < 4; i++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP2_MULLO_INT;
- alu.src[0].sel = src_gpr;
- alu.src[0].chan = sample_chan;
- alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
- alu.src[1].value = 4;
- alu.dst.sel = temp;
- alu.dst.chan = i;
- alu.dst.write = i == 0;
- if (i == 3)
- alu.last = 1;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
- } else {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP2_MULLO_INT;
- alu.src[0].sel = src_gpr;
- alu.src[0].chan = sample_chan;
- alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
- alu.src[1].value = 4;
- alu.dst.sel = temp;
- alu.dst.chan = 0;
- alu.dst.write = 1;
- alu.last = 1;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_MULLO_INT;
+ alu.src[0].sel = src_gpr;
+ alu.src[0].chan = sample_chan;
+ alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[1].value = 4;
+ alu.dst.sel = temp;
+ alu.dst.chan = 0;
+ alu.dst.write = 1;
+ r = emit_mul_int_op(ctx->bc, &alu);
+ if (r)
+ return r;
/* sample_index = temp.w >> temp.x */
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
if (ctx->bc->chip_class >= EVERGREEN) {
- /* channel 1 or 3 of each word */
- alu.src[0].sel += (id / 2);
- alu.src[0].chan = ((id % 2) * 2) + 1;
+ /* with eg each dword is number of cubes */
+ alu.src[0].sel += id / 4;
+ alu.src[0].chan = id % 4;
} else {
/* r600 we have them at channel 2 of the second dword */
alu.src[0].sel += (id * 2) + 1;
if (opcode == FETCH_OP_GATHER4 &&
inst->TexOffsets[0].File != TGSI_FILE_NULL &&
inst->TexOffsets[0].File != TGSI_FILE_IMMEDIATE) {
+ struct r600_bytecode_tex *t;
opcode = FETCH_OP_GATHER4_O;
/* GATHER4_O/GATHER4_C_O use offset values loaded by
SET_TEXTURE_OFFSETS instruction. The immediate offset values
encoded in the instruction are ignored. */
- memset(&tex, 0, sizeof(struct r600_bytecode_tex));
- tex.op = FETCH_OP_SET_TEXTURE_OFFSETS;
- tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
- tex.sampler_index_mode = sampler_index_mode;
- tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
- tex.resource_index_mode = sampler_index_mode;
-
- tex.src_gpr = ctx->file_offset[inst->TexOffsets[0].File] + inst->TexOffsets[0].Index;
- tex.src_sel_x = inst->TexOffsets[0].SwizzleX;
- tex.src_sel_y = inst->TexOffsets[0].SwizzleY;
- tex.src_sel_z = inst->TexOffsets[0].SwizzleZ;
- tex.src_sel_w = 4;
+ t = &grad_offs[n_grad_offs++];
+ memset(t, 0, sizeof(struct r600_bytecode_tex));
+ t->op = FETCH_OP_SET_TEXTURE_OFFSETS;
+ t->sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
+ t->sampler_index_mode = sampler_index_mode;
+ t->resource_id = t->sampler_id + R600_MAX_CONST_BUFFERS;
+ t->resource_index_mode = sampler_index_mode;
+
+ t->src_gpr = ctx->file_offset[inst->TexOffsets[0].File] + inst->TexOffsets[0].Index;
+ t->src_sel_x = inst->TexOffsets[0].SwizzleX;
+ t->src_sel_y = inst->TexOffsets[0].SwizzleY;
+ if (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
+ inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY)
+ /* make sure array index selector is 0, this is just a safety
+ * precausion because TGSI seems to emit something strange here */
+ t->src_sel_z = 4;
+ else
+ t->src_sel_z = inst->TexOffsets[0].SwizzleZ;
- tex.dst_sel_x = 7;
- tex.dst_sel_y = 7;
- tex.dst_sel_z = 7;
- tex.dst_sel_w = 7;
+ t->src_sel_w = 4;
- r = r600_bytecode_add_tex(ctx->bc, &tex);
- if (r)
- return r;
+ t->dst_sel_x = 7;
+ t->dst_sel_y = 7;
+ t->dst_sel_z = 7;
+ t->dst_sel_w = 7;
}
if (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
tex.inst_mod = texture_component_select;
if (ctx->bc->chip_class == CAYMAN) {
- /* GATHER4 result order is different from TGSI TG4 */
- tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 0 : 7;
- tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 4) ? 1 : 7;
- tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 1) ? 2 : 7;
+ tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
+ tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
+ tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
} else {
- tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
- tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
- tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
+ /* GATHER4 result order is different from TGSI TG4 */
+ tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 1 : 7;
+ tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 2 : 7;
+ tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 0 : 7;
tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
}
}
}
- if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ_LZ ||
- inst->Instruction.Opcode == TGSI_OPCODE_TXQS) {
+ if (inst->Instruction.Opcode == TGSI_OPCODE_TXQS) {
tex.src_sel_x = 4;
tex.src_sel_y = 4;
tex.src_sel_z = 4;
opcode == FETCH_OP_SAMPLE_C_LB) {
/* the array index is read from Y */
tex.coord_type_y = 0;
+ array_index_offset_channel = tex.src_sel_y;
} else {
/* the array index is read from Z */
tex.coord_type_z = 0;
tex.src_sel_z = tex.src_sel_y;
+ array_index_offset_channel = tex.src_sel_z;
}
} else if (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
- inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
- ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
+ inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY) {
+ tex.coord_type_z = 0;
+ array_index_offset_channel = tex.src_sel_z;
+ } else if ((inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY ||
inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) &&
- (ctx->bc->chip_class >= EVERGREEN)))
- /* the array index is read from Z */
+ (ctx->bc->chip_class >= EVERGREEN))
+ /* the array index is read from Z, coordinate will be corrected elsewhere */
tex.coord_type_z = 0;
+ /* We have array access to 1D or 2D ARRAY, the coordinates are not int ->
+ * evaluate the array index */
+ if (array_index_offset_channel >= 0 &&
+ opcode != FETCH_OP_LD &&
+ opcode != FETCH_OP_GET_TEXTURE_RESINFO) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.src[0].sel = tex.src_gpr;
+ alu.src[0].chan = array_index_offset_channel;
+ alu.src[0].rel = tex.src_rel;
+ alu.op = ALU_OP1_RNDNE;
+ alu.dst.sel = tex.src_gpr;
+ alu.dst.chan = array_index_offset_channel;
+ alu.dst.rel = tex.src_rel;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+
/* mask unused source components */
if (opcode == FETCH_OP_SAMPLE || opcode == FETCH_OP_GATHER4) {
switch (inst->Texture.Texture) {
}
}
+ /* Emit set gradient and offset instructions. */
+ for (i = 0; i < n_grad_offs; ++i) {
+ r = r600_bytecode_add_tex(ctx->bc, &grad_offs[i]);
+ if (r)
+ return r;
+ }
+
r = r600_bytecode_add_tex(ctx->bc, &tex);
if (r)
return r;
return 0;
}
-static int tgsi_lrp(struct r600_shader_ctx *ctx)
+static int find_hw_atomic_counter(struct r600_shader_ctx *ctx,
+ struct tgsi_full_src_register *src)
+{
+ unsigned i;
+
+ if (src->Register.Indirect) {
+ for (i = 0; i < ctx->shader->nhwatomic_ranges; i++) {
+ if (src->Indirect.ArrayID == ctx->shader->atomics[i].array_id)
+ return ctx->shader->atomics[i].hw_idx;
+ }
+ } else {
+ uint32_t index = src->Register.Index;
+ for (i = 0; i < ctx->shader->nhwatomic_ranges; i++) {
+ if (ctx->shader->atomics[i].buffer_id != (unsigned)src->Dimension.Index)
+ continue;
+ if (index > ctx->shader->atomics[i].end)
+ continue;
+ if (index < ctx->shader->atomics[i].start)
+ continue;
+ uint32_t offset = (index - ctx->shader->atomics[i].start);
+ return ctx->shader->atomics[i].hw_idx + offset;
+ }
+ }
+ assert(0);
+ return -1;
+}
+
+static int tgsi_set_gds_temp(struct r600_shader_ctx *ctx,
+ int *uav_id_p, int *uav_index_mode_p)
{
struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
- struct r600_bytecode_alu alu;
- int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
- unsigned i, temp_regs[2];
+ int uav_id, uav_index_mode = 0;
int r;
+ bool is_cm = (ctx->bc->chip_class == CAYMAN);
- /* optimize if it's just an equal balance */
- if (ctx->src[0].sel == V_SQ_ALU_SRC_0_5) {
- for (i = 0; i < lasti + 1; i++) {
- if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
- continue;
+ uav_id = find_hw_atomic_counter(ctx, &inst->Src[0]);
+ if (inst->Src[0].Register.Indirect) {
+ if (is_cm) {
+ struct r600_bytecode_alu alu;
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP2_ADD;
- r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
- r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
- alu.omod = 3;
- tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
- alu.dst.chan = i;
- if (i == lasti) {
- alu.last = 1;
- }
+ alu.op = ALU_OP2_LSHL_INT;
+ alu.src[0].sel = get_address_file_reg(ctx, inst->Src[0].Indirect.Index);
+ alu.src[0].chan = 0;
+ alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[1].value = 2;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = 0;
+ alu.dst.write = 1;
+ alu.last = 1;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
- }
- return 0;
+
+ r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
+ ctx->temp_reg, 0,
+ ctx->temp_reg, 0,
+ V_SQ_ALU_SRC_LITERAL, uav_id * 4);
+ if (r)
+ return r;
+ } else
+ uav_index_mode = 2;
+ } else if (is_cm) {
+ r = single_alu_op2(ctx, ALU_OP1_MOV,
+ ctx->temp_reg, 0,
+ V_SQ_ALU_SRC_LITERAL, uav_id * 4,
+ 0, 0);
+ if (r)
+ return r;
}
+ *uav_id_p = uav_id;
+ *uav_index_mode_p = uav_index_mode;
+ return 0;
+}
- /* 1 - src0 */
- for (i = 0; i < lasti + 1; i++) {
- if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
- continue;
+static int tgsi_load_gds(struct r600_shader_ctx *ctx)
+{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ int r;
+ struct r600_bytecode_gds gds;
+ int uav_id = 0;
+ int uav_index_mode = 0;
+ bool is_cm = (ctx->bc->chip_class == CAYMAN);
+
+ r = tgsi_set_gds_temp(ctx, &uav_id, &uav_index_mode);
+ if (r)
+ return r;
+
+ memset(&gds, 0, sizeof(struct r600_bytecode_gds));
+ gds.op = FETCH_OP_GDS_READ_RET;
+ gds.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
+ gds.uav_id = is_cm ? 0 : uav_id;
+ gds.uav_index_mode = is_cm ? 0 : uav_index_mode;
+ gds.src_gpr = ctx->temp_reg;
+ gds.src_sel_x = (is_cm) ? 0 : 4;
+ gds.src_sel_y = 4;
+ gds.src_sel_z = 4;
+ gds.dst_sel_x = 0;
+ gds.dst_sel_y = 7;
+ gds.dst_sel_z = 7;
+ gds.dst_sel_w = 7;
+ gds.src_gpr2 = 0;
+ gds.alloc_consume = !is_cm;
+ r = r600_bytecode_add_gds(ctx->bc, &gds);
+ if (r)
+ return r;
+
+ ctx->bc->cf_last->vpm = 1;
+ return 0;
+}
+
+/* this fixes up 1D arrays properly */
+static int load_index_src(struct r600_shader_ctx *ctx, int src_index, int *idx_gpr)
+{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ int r, i;
+ struct r600_bytecode_alu alu;
+ int temp_reg = r600_get_temp(ctx);
+ for (i = 0; i < 4; i++) {
+ bool def_val = true, write_zero = false;
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP2_ADD;
- alu.src[0].sel = V_SQ_ALU_SRC_1;
- alu.src[0].chan = 0;
- r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
- r600_bytecode_src_toggle_neg(&alu.src[1]);
- alu.dst.sel = ctx->temp_reg;
+ alu.op = ALU_OP1_MOV;
+ alu.dst.sel = temp_reg;
alu.dst.chan = i;
- if (i == lasti) {
- alu.last = 1;
+
+ switch (inst->Memory.Texture) {
+ case TGSI_TEXTURE_BUFFER:
+ case TGSI_TEXTURE_1D:
+ if (i == 1 || i == 2 || i == 3) {
+ write_zero = true;
+ }
+ break;
+ case TGSI_TEXTURE_1D_ARRAY:
+ if (i == 1 || i == 3)
+ write_zero = true;
+ else if (i == 2) {
+ r600_bytecode_src(&alu.src[0], &ctx->src[src_index], 1);
+ def_val = false;
+ }
+ break;
+ case TGSI_TEXTURE_2D:
+ if (i == 2 || i == 3)
+ write_zero = true;
+ break;
+ default:
+ if (i == 3)
+ write_zero = true;
+ break;
+ }
+
+ if (write_zero) {
+ alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[0].value = 0;
+ } else if (def_val) {
+ r600_bytecode_src(&alu.src[0], &ctx->src[src_index], i);
}
+
+ if (i == 3)
+ alu.last = 1;
alu.dst.write = 1;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
}
+ *idx_gpr = temp_reg;
+ return 0;
+}
- /* (1 - src0) * src2 */
- for (i = 0; i < lasti + 1; i++) {
- if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
- continue;
-
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP2_MUL;
- alu.src[0].sel = ctx->temp_reg;
- alu.src[0].chan = i;
- r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
- alu.dst.sel = ctx->temp_reg;
- alu.dst.chan = i;
- if (i == lasti) {
- alu.last = 1;
- }
- alu.dst.write = 1;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
+static int load_buffer_coord(struct r600_shader_ctx *ctx, int src_idx,
+ int temp_reg)
+{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ int r;
+ if (inst->Src[src_idx].Register.File == TGSI_FILE_IMMEDIATE) {
+ int value = (ctx->literals[4 * inst->Src[src_idx].Register.Index + inst->Src[src_idx].Register.SwizzleX]);
+ r = single_alu_op2(ctx, ALU_OP1_MOV,
+ temp_reg, 0,
+ V_SQ_ALU_SRC_LITERAL, value >> 2,
+ 0, 0);
if (r)
return r;
- }
-
- /* src0 * src1 + (1 - src0) * src2 */
- if (ctx->src[0].abs)
- temp_regs[0] = r600_get_temp(ctx);
- else
- temp_regs[0] = 0;
- if (ctx->src[1].abs)
- temp_regs[1] = r600_get_temp(ctx);
- else
- temp_regs[1] = 0;
-
- for (i = 0; i < lasti + 1; i++) {
- if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
- continue;
-
+ } else {
+ struct r600_bytecode_alu alu;
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP3_MULADD;
- alu.is_op3 = 1;
- r = tgsi_make_src_for_op3(ctx, temp_regs[0], i, &alu.src[0], &ctx->src[0]);
- if (r)
- return r;
- r = tgsi_make_src_for_op3(ctx, temp_regs[1], i, &alu.src[1], &ctx->src[1]);
- if (r)
- return r;
- alu.src[2].sel = ctx->temp_reg;
- alu.src[2].chan = i;
-
- tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
- alu.dst.chan = i;
- if (i == lasti) {
- alu.last = 1;
- }
+ alu.op = ALU_OP2_LSHR_INT;
+ r600_bytecode_src(&alu.src[0], &ctx->src[src_idx], 0);
+ alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[1].value = 2;
+ alu.dst.sel = temp_reg;
+ alu.dst.write = 1;
+ alu.last = 1;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
return 0;
}
-static int tgsi_cmp(struct r600_shader_ctx *ctx)
+static int tgsi_load_buffer(struct r600_shader_ctx *ctx)
{
struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
- struct r600_bytecode_alu alu;
- int i, r, j;
- int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
- int temp_regs[3];
- unsigned op;
+ /* have to work out the offset into the RAT immediate return buffer */
+ struct r600_bytecode_vtx vtx;
+ struct r600_bytecode_cf *cf;
+ int r;
+ int temp_reg = r600_get_temp(ctx);
+ unsigned rat_index_mode;
+ unsigned base;
- if (ctx->src[0].abs && ctx->src[0].neg) {
- op = ALU_OP3_CNDE;
- ctx->src[0].abs = 0;
- ctx->src[0].neg = 0;
+ rat_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
+ base = R600_IMAGE_REAL_RESOURCE_OFFSET + ctx->info.file_count[TGSI_FILE_IMAGE];
+
+ r = load_buffer_coord(ctx, 1, temp_reg);
+ if (r)
+ return r;
+ ctx->bc->cf_last->barrier = 1;
+ memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
+ vtx.op = FETCH_OP_VFETCH;
+ vtx.buffer_id = inst->Src[0].Register.Index + base;
+ vtx.buffer_index_mode = rat_index_mode;
+ vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
+ vtx.src_gpr = temp_reg;
+ vtx.src_sel_x = 0;
+ vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
+ vtx.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; /* SEL_X */
+ vtx.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7; /* SEL_Y */
+ vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7; /* SEL_Z */
+ vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7; /* SEL_W */
+ vtx.num_format_all = 1;
+ vtx.format_comp_all = 1;
+ vtx.srf_mode_all = 0;
+
+ if (inst->Dst[0].Register.WriteMask & 8) {
+ vtx.data_format = FMT_32_32_32_32;
+ vtx.use_const_fields = 0;
+ } else if (inst->Dst[0].Register.WriteMask & 4) {
+ vtx.data_format = FMT_32_32_32;
+ vtx.use_const_fields = 0;
+ } else if (inst->Dst[0].Register.WriteMask & 2) {
+ vtx.data_format = FMT_32_32;
+ vtx.use_const_fields = 0;
} else {
- op = ALU_OP3_CNDGE;
+ vtx.data_format = FMT_32;
+ vtx.use_const_fields = 0;
}
- for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
- temp_regs[j] = 0;
- if (ctx->src[j].abs)
- temp_regs[j] = r600_get_temp(ctx);
- }
+ r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx);
+ if (r)
+ return r;
+ cf = ctx->bc->cf_last;
+ cf->barrier = 1;
+ return 0;
+}
- for (i = 0; i < lasti + 1; i++) {
- if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
- continue;
+static int tgsi_load_rat(struct r600_shader_ctx *ctx)
+{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ /* have to work out the offset into the RAT immediate return buffer */
+ struct r600_bytecode_vtx vtx;
+ struct r600_bytecode_cf *cf;
+ int r;
+ int idx_gpr;
+ unsigned format, num_format, format_comp, endian;
+ const struct util_format_description *desc;
+ unsigned rat_index_mode;
+ unsigned immed_base;
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = op;
- r = tgsi_make_src_for_op3(ctx, temp_regs[0], i, &alu.src[0], &ctx->src[0]);
- if (r)
- return r;
- r = tgsi_make_src_for_op3(ctx, temp_regs[2], i, &alu.src[1], &ctx->src[2]);
- if (r)
- return r;
- r = tgsi_make_src_for_op3(ctx, temp_regs[1], i, &alu.src[2], &ctx->src[1]);
- if (r)
- return r;
- tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
- alu.dst.chan = i;
- alu.dst.write = 1;
- alu.is_op3 = 1;
- if (i == lasti)
- alu.last = 1;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
+ rat_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
+
+ immed_base = R600_IMAGE_IMMED_RESOURCE_OFFSET;
+ r = load_index_src(ctx, 1, &idx_gpr);
+ if (r)
+ return r;
+
+ if (rat_index_mode)
+ egcm_load_index_reg(ctx->bc, 1, false);
+
+ r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
+ cf = ctx->bc->cf_last;
+
+ cf->rat.id = ctx->shader->rat_base + inst->Src[0].Register.Index;
+ cf->rat.inst = V_RAT_INST_NOP_RTN;
+ cf->rat.index_mode = rat_index_mode;
+ cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_READ_IND;
+ cf->output.gpr = ctx->thread_id_gpr;
+ cf->output.index_gpr = idx_gpr;
+ cf->output.comp_mask = 0xf;
+ cf->output.burst_count = 1;
+ cf->vpm = 1;
+ cf->barrier = 1;
+ cf->mark = 1;
+ cf->output.elem_size = 0;
+
+ r600_bytecode_add_cfinst(ctx->bc, CF_OP_WAIT_ACK);
+ cf = ctx->bc->cf_last;
+ cf->barrier = 1;
+
+ desc = util_format_description(inst->Memory.Format);
+ r600_vertex_data_type(inst->Memory.Format,
+ &format, &num_format, &format_comp, &endian);
+ memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
+ vtx.op = FETCH_OP_VFETCH;
+ vtx.buffer_id = immed_base + inst->Src[0].Register.Index;
+ vtx.buffer_index_mode = rat_index_mode;
+ vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
+ vtx.src_gpr = ctx->thread_id_gpr;
+ vtx.src_sel_x = 1;
+ vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
+ vtx.dst_sel_x = desc->swizzle[0];
+ vtx.dst_sel_y = desc->swizzle[1];
+ vtx.dst_sel_z = desc->swizzle[2];
+ vtx.dst_sel_w = desc->swizzle[3];
+ vtx.srf_mode_all = 1;
+ vtx.data_format = format;
+ vtx.num_format_all = num_format;
+ vtx.format_comp_all = format_comp;
+ vtx.endian = endian;
+ vtx.offset = 0;
+ vtx.mega_fetch_count = 3;
+ r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx);
+ if (r)
+ return r;
+ cf = ctx->bc->cf_last;
+ cf->barrier = 1;
return 0;
}
-static int tgsi_ucmp(struct r600_shader_ctx *ctx)
+static int tgsi_load_lds(struct r600_shader_ctx *ctx)
{
struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
struct r600_bytecode_alu alu;
- int i, r;
- int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
-
- for (i = 0; i < lasti + 1; i++) {
- if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
- continue;
+ int r;
+ int temp_reg = r600_get_temp(ctx);
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
+ alu.dst.sel = temp_reg;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+
+ r = do_lds_fetch_values(ctx, temp_reg,
+ ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index, inst->Dst[0].Register.WriteMask);
+ if (r)
+ return r;
+ return 0;
+}
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP3_CNDE_INT;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
- r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
- r600_bytecode_src(&alu.src[2], &ctx->src[1], i);
- tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
- alu.dst.chan = i;
- alu.dst.write = 1;
- alu.is_op3 = 1;
- if (i == lasti)
- alu.last = 1;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
+static int tgsi_load(struct r600_shader_ctx *ctx)
+{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ if (inst->Src[0].Register.File == TGSI_FILE_IMAGE)
+ return tgsi_load_rat(ctx);
+ if (inst->Src[0].Register.File == TGSI_FILE_HW_ATOMIC)
+ return tgsi_load_gds(ctx);
+ if (inst->Src[0].Register.File == TGSI_FILE_BUFFER)
+ return tgsi_load_buffer(ctx);
+ if (inst->Src[0].Register.File == TGSI_FILE_MEMORY)
+ return tgsi_load_lds(ctx);
return 0;
}
-static int tgsi_xpd(struct r600_shader_ctx *ctx)
+static int tgsi_store_buffer_rat(struct r600_shader_ctx *ctx)
{
struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
- static const unsigned int src0_swizzle[] = {2, 0, 1};
- static const unsigned int src1_swizzle[] = {1, 2, 0};
- struct r600_bytecode_alu alu;
- uint32_t use_temp = 0;
- int i, r;
+ struct r600_bytecode_cf *cf;
+ int r, i;
+ unsigned rat_index_mode;
+ int lasti;
+ int temp_reg = r600_get_temp(ctx), treg2 = r600_get_temp(ctx);
+
+ r = load_buffer_coord(ctx, 0, treg2);
+ if (r)
+ return r;
- if (inst->Dst[0].Register.WriteMask != 0xf)
- use_temp = 1;
+ rat_index_mode = inst->Dst[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
+ if (rat_index_mode)
+ egcm_load_index_reg(ctx->bc, 1, false);
- for (i = 0; i < 4; i++) {
+ for (i = 0; i <= 3; i++) {
+ struct r600_bytecode_alu alu;
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP2_MUL;
- if (i < 3) {
- r600_bytecode_src(&alu.src[0], &ctx->src[0], src0_swizzle[i]);
- r600_bytecode_src(&alu.src[1], &ctx->src[1], src1_swizzle[i]);
- } else {
- alu.src[0].sel = V_SQ_ALU_SRC_0;
- alu.src[0].chan = i;
- alu.src[1].sel = V_SQ_ALU_SRC_0;
- alu.src[1].chan = i;
- }
-
- alu.dst.sel = ctx->temp_reg;
+ alu.op = ALU_OP1_MOV;
+ alu.dst.sel = temp_reg;
alu.dst.chan = i;
+ alu.src[0].sel = V_SQ_ALU_SRC_0;
+ alu.last = (i == 3);
alu.dst.write = 1;
-
- if (i == 3)
- alu.last = 1;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
}
- for (i = 0; i < 4; i++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP3_MULADD;
+ lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
+ for (i = 0; i <= lasti; i++) {
+ struct r600_bytecode_alu alu;
+ if (!((1 << i) & inst->Dst[0].Register.WriteMask))
+ continue;
- if (i < 3) {
- r600_bytecode_src(&alu.src[0], &ctx->src[0], src1_swizzle[i]);
- r600_bytecode_src(&alu.src[1], &ctx->src[1], src0_swizzle[i]);
- } else {
- alu.src[0].sel = V_SQ_ALU_SRC_0;
- alu.src[0].chan = i;
- alu.src[1].sel = V_SQ_ALU_SRC_0;
- alu.src[1].chan = i;
- }
+ r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
+ temp_reg, 0,
+ treg2, 0,
+ V_SQ_ALU_SRC_LITERAL, i);
+ if (r)
+ return r;
- alu.src[2].sel = ctx->temp_reg;
- alu.src[2].neg = 1;
- alu.src[2].chan = i;
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = 0;
- if (use_temp)
- alu.dst.sel = ctx->temp_reg;
- else
- tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
- alu.dst.chan = i;
+ r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
+ alu.last = 1;
alu.dst.write = 1;
- alu.is_op3 = 1;
- if (i == 3)
- alu.last = 1;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
+
+ r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
+ cf = ctx->bc->cf_last;
+
+ cf->rat.id = ctx->shader->rat_base + inst->Dst[0].Register.Index + ctx->info.file_count[TGSI_FILE_IMAGE];
+ cf->rat.inst = V_RAT_INST_STORE_TYPED;
+ cf->rat.index_mode = rat_index_mode;
+ cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
+ cf->output.gpr = ctx->temp_reg;
+ cf->output.index_gpr = temp_reg;
+ cf->output.comp_mask = 1;
+ cf->output.burst_count = 1;
+ cf->vpm = 1;
+ cf->barrier = 1;
+ cf->output.elem_size = 0;
}
- if (use_temp)
- return tgsi_helper_copy(ctx, inst);
return 0;
}
-static int tgsi_exp(struct r600_shader_ctx *ctx)
+static int tgsi_store_rat(struct r600_shader_ctx *ctx)
{
struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
- struct r600_bytecode_alu alu;
- int r;
- unsigned i;
-
- /* result.x = 2^floor(src); */
- if (inst->Dst[0].Register.WriteMask & 1) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
-
- alu.op = ALU_OP1_FLOOR;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ struct r600_bytecode_cf *cf;
+ bool src_requires_loading = false;
+ int val_gpr, idx_gpr;
+ int r, i;
+ unsigned rat_index_mode;
- alu.dst.sel = ctx->temp_reg;
- alu.dst.chan = 0;
- alu.dst.write = 1;
- alu.last = 1;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
+ rat_index_mode = inst->Dst[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
- if (ctx->bc->chip_class == CAYMAN) {
- for (i = 0; i < 3; i++) {
- alu.op = ALU_OP1_EXP_IEEE;
- alu.src[0].sel = ctx->temp_reg;
- alu.src[0].chan = 0;
+ r = load_index_src(ctx, 0, &idx_gpr);
+ if (r)
+ return r;
- alu.dst.sel = ctx->temp_reg;
- alu.dst.chan = i;
- alu.dst.write = i == 0;
- alu.last = i == 2;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
- } else {
- alu.op = ALU_OP1_EXP_IEEE;
- alu.src[0].sel = ctx->temp_reg;
- alu.src[0].chan = 0;
+ if (inst->Src[1].Register.File != TGSI_FILE_TEMPORARY)
+ src_requires_loading = true;
+ if (src_requires_loading) {
+ struct r600_bytecode_alu alu;
+ for (i = 0; i < 4; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
alu.dst.sel = ctx->temp_reg;
- alu.dst.chan = 0;
- alu.dst.write = 1;
- alu.last = 1;
+ alu.dst.chan = i;
+
+ r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
+ if (i == 3)
+ alu.last = 1;
+ alu.dst.write = 1;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
}
- }
-
- /* result.y = tmp - floor(tmp); */
- if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
-
- alu.op = ALU_OP1_FRACT;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ val_gpr = ctx->temp_reg;
+ } else
+ val_gpr = tgsi_tex_get_src_gpr(ctx, 1);
+ if (rat_index_mode)
+ egcm_load_index_reg(ctx->bc, 1, false);
+
+ r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
+ cf = ctx->bc->cf_last;
+
+ cf->rat.id = ctx->shader->rat_base + inst->Dst[0].Register.Index;
+ cf->rat.inst = V_RAT_INST_STORE_TYPED;
+ cf->rat.index_mode = rat_index_mode;
+ cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND;
+ cf->output.gpr = val_gpr;
+ cf->output.index_gpr = idx_gpr;
+ cf->output.comp_mask = 0xf;
+ cf->output.burst_count = 1;
+ cf->vpm = 1;
+ cf->barrier = 1;
+ cf->output.elem_size = 0;
+ return 0;
+}
- alu.dst.sel = ctx->temp_reg;
-#if 0
- r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
- if (r)
- return r;
-#endif
- alu.dst.write = 1;
- alu.dst.chan = 1;
+static int tgsi_store_lds(struct r600_shader_ctx *ctx)
+{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ struct r600_bytecode_alu alu;
+ int r, i, lasti;
+ int write_mask = inst->Dst[0].Register.WriteMask;
+ int temp_reg = r600_get_temp(ctx);
- alu.last = 1;
+ /* LDS write */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ alu.dst.sel = temp_reg;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
+ lasti = tgsi_last_instruction(write_mask);
+ for (i = 1; i <= lasti; i++) {
+ if (!(write_mask & (1 << i)))
+ continue;
+ r = single_alu_op2(ctx, ALU_OP2_ADD_INT,
+ temp_reg, i,
+ temp_reg, 0,
+ V_SQ_ALU_SRC_LITERAL, 4 * i);
if (r)
return r;
}
+ for (i = 0; i <= lasti; i++) {
+ if (!(write_mask & (1 << i)))
+ continue;
- /* result.z = RoughApprox2ToX(tmp);*/
- if ((inst->Dst[0].Register.WriteMask >> 2) & 0x1) {
- if (ctx->bc->chip_class == CAYMAN) {
- for (i = 0; i < 3; i++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP1_EXP_IEEE;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
-
- alu.dst.sel = ctx->temp_reg;
- alu.dst.chan = i;
- if (i == 2) {
- alu.dst.write = 1;
- alu.last = 1;
- }
-
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
- } else {
+ if ((i == 0 && ((write_mask & 3) == 3)) ||
+ (i == 2 && ((write_mask & 0xc) == 0xc))) {
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP1_EXP_IEEE;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
-
- alu.dst.sel = ctx->temp_reg;
- alu.dst.write = 1;
- alu.dst.chan = 2;
+ alu.op = LDS_OP3_LDS_WRITE_REL;
+ alu.src[0].sel = temp_reg;
+ alu.src[0].chan = i;
+ r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
+ r600_bytecode_src(&alu.src[2], &ctx->src[1], i + 1);
alu.last = 1;
-
+ alu.is_lds_idx_op = true;
+ alu.lds_idx = 1;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
+ i += 1;
+ continue;
}
- }
-
- /* result.w = 1.0;*/
- if ((inst->Dst[0].Register.WriteMask >> 3) & 0x1) {
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = LDS_OP2_LDS_WRITE;
- alu.op = ALU_OP1_MOV;
- alu.src[0].sel = V_SQ_ALU_SRC_1;
- alu.src[0].chan = 0;
+ alu.src[0].sel = temp_reg;
+ alu.src[0].chan = i;
+ r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
- alu.dst.sel = ctx->temp_reg;
- alu.dst.chan = 3;
- alu.dst.write = 1;
alu.last = 1;
+ alu.is_lds_idx_op = true;
+
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
}
- return tgsi_helper_copy(ctx, inst);
+ return 0;
}
-static int tgsi_log(struct r600_shader_ctx *ctx)
+static int tgsi_store(struct r600_shader_ctx *ctx)
+{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER)
+ return tgsi_store_buffer_rat(ctx);
+ else if (inst->Dst[0].Register.File == TGSI_FILE_MEMORY)
+ return tgsi_store_lds(ctx);
+ else
+ return tgsi_store_rat(ctx);
+}
+
+static int tgsi_atomic_op_rat(struct r600_shader_ctx *ctx)
{
struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ /* have to work out the offset into the RAT immediate return buffer */
struct r600_bytecode_alu alu;
+ struct r600_bytecode_vtx vtx;
+ struct r600_bytecode_cf *cf;
int r;
- unsigned i;
-
- /* result.x = floor(log2(|src|)); */
- if (inst->Dst[0].Register.WriteMask & 1) {
- if (ctx->bc->chip_class == CAYMAN) {
- for (i = 0; i < 3; i++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
-
- alu.op = ALU_OP1_LOG_IEEE;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
- r600_bytecode_src_set_abs(&alu.src[0]);
-
- alu.dst.sel = ctx->temp_reg;
- alu.dst.chan = i;
- if (i == 0)
- alu.dst.write = 1;
- if (i == 2)
- alu.last = 1;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
+ int idx_gpr;
+ unsigned format, num_format, format_comp, endian;
+ const struct util_format_description *desc;
+ unsigned rat_index_mode;
+ unsigned immed_base;
+ unsigned rat_base;
- } else {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ immed_base = R600_IMAGE_IMMED_RESOURCE_OFFSET;
+ rat_base = ctx->shader->rat_base;
- alu.op = ALU_OP1_LOG_IEEE;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
- r600_bytecode_src_set_abs(&alu.src[0]);
+ if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) {
+ immed_base += ctx->info.file_count[TGSI_FILE_IMAGE];
+ rat_base += ctx->info.file_count[TGSI_FILE_IMAGE];
- alu.dst.sel = ctx->temp_reg;
- alu.dst.chan = 0;
- alu.dst.write = 1;
- alu.last = 1;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
+ r = load_buffer_coord(ctx, 1, ctx->temp_reg);
+ if (r)
+ return r;
+ idx_gpr = ctx->temp_reg;
+ } else {
+ r = load_index_src(ctx, 1, &idx_gpr);
+ if (r)
+ return r;
+ }
- alu.op = ALU_OP1_FLOOR;
- alu.src[0].sel = ctx->temp_reg;
- alu.src[0].chan = 0;
+ rat_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
- alu.dst.sel = ctx->temp_reg;
+ if (ctx->inst_info->op == V_RAT_INST_CMPXCHG_INT_RTN) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ alu.dst.sel = ctx->thread_id_gpr;
alu.dst.chan = 0;
alu.dst.write = 1;
+ r600_bytecode_src(&alu.src[0], &ctx->src[3], 0);
alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ alu.dst.sel = ctx->thread_id_gpr;
+ if (ctx->bc->chip_class == CAYMAN)
+ alu.dst.chan = 2;
+ else
+ alu.dst.chan = 3;
+ alu.dst.write = 1;
+ r600_bytecode_src(&alu.src[0], &ctx->src[2], 0);
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ } else {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ alu.dst.sel = ctx->thread_id_gpr;
+ alu.dst.chan = 0;
+ alu.dst.write = 1;
+ r600_bytecode_src(&alu.src[0], &ctx->src[2], 0);
+ alu.last = 1;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
}
- /* result.y = |src.x| / (2 ^ floor(log2(|src.x|))); */
- if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
+ if (rat_index_mode)
+ egcm_load_index_reg(ctx->bc, 1, false);
+ r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT);
+ cf = ctx->bc->cf_last;
+
+ cf->rat.id = rat_base + inst->Src[0].Register.Index;
+ cf->rat.inst = ctx->inst_info->op;
+ cf->rat.index_mode = rat_index_mode;
+ cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_READ_IND;
+ cf->output.gpr = ctx->thread_id_gpr;
+ cf->output.index_gpr = idx_gpr;
+ cf->output.comp_mask = 0xf;
+ cf->output.burst_count = 1;
+ cf->vpm = 1;
+ cf->barrier = 1;
+ cf->mark = 1;
+ cf->output.elem_size = 0;
+ r600_bytecode_add_cfinst(ctx->bc, CF_OP_WAIT_ACK);
+ cf = ctx->bc->cf_last;
+ cf->barrier = 1;
+ cf->cf_addr = 1;
- if (ctx->bc->chip_class == CAYMAN) {
- for (i = 0; i < 3; i++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
+ if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) {
+ desc = util_format_description(inst->Memory.Format);
+ r600_vertex_data_type(inst->Memory.Format,
+ &format, &num_format, &format_comp, &endian);
+ vtx.dst_sel_x = desc->swizzle[0];
+ } else {
+ format = FMT_32;
+ num_format = 1;
+ format_comp = 0;
+ endian = 0;
+ vtx.dst_sel_x = 0;
+ }
+ vtx.op = FETCH_OP_VFETCH;
+ vtx.buffer_id = immed_base + inst->Src[0].Register.Index;
+ vtx.buffer_index_mode = rat_index_mode;
+ vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
+ vtx.src_gpr = ctx->thread_id_gpr;
+ vtx.src_sel_x = 1;
+ vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
+ vtx.dst_sel_y = 7;
+ vtx.dst_sel_z = 7;
+ vtx.dst_sel_w = 7;
+ vtx.use_const_fields = 0;
+ vtx.srf_mode_all = 1;
+ vtx.data_format = format;
+ vtx.num_format_all = num_format;
+ vtx.format_comp_all = format_comp;
+ vtx.endian = endian;
+ vtx.offset = 0;
+ vtx.mega_fetch_count = 0xf;
+ r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx);
+ if (r)
+ return r;
+ cf = ctx->bc->cf_last;
+ cf->vpm = 1;
+ cf->barrier = 1;
+ return 0;
+}
- alu.op = ALU_OP1_LOG_IEEE;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
- r600_bytecode_src_set_abs(&alu.src[0]);
+static int get_gds_op(int opcode)
+{
+ switch (opcode) {
+ case TGSI_OPCODE_ATOMUADD:
+ return FETCH_OP_GDS_ADD_RET;
+ case TGSI_OPCODE_ATOMAND:
+ return FETCH_OP_GDS_AND_RET;
+ case TGSI_OPCODE_ATOMOR:
+ return FETCH_OP_GDS_OR_RET;
+ case TGSI_OPCODE_ATOMXOR:
+ return FETCH_OP_GDS_XOR_RET;
+ case TGSI_OPCODE_ATOMUMIN:
+ return FETCH_OP_GDS_MIN_UINT_RET;
+ case TGSI_OPCODE_ATOMUMAX:
+ return FETCH_OP_GDS_MAX_UINT_RET;
+ case TGSI_OPCODE_ATOMXCHG:
+ return FETCH_OP_GDS_XCHG_RET;
+ case TGSI_OPCODE_ATOMCAS:
+ return FETCH_OP_GDS_CMP_XCHG_RET;
+ default:
+ return -1;
+ }
+}
- alu.dst.sel = ctx->temp_reg;
- alu.dst.chan = i;
- if (i == 1)
- alu.dst.write = 1;
- if (i == 2)
- alu.last = 1;
+static int tgsi_atomic_op_gds(struct r600_shader_ctx *ctx)
+{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ struct r600_bytecode_gds gds;
+ struct r600_bytecode_alu alu;
+ int gds_op = get_gds_op(inst->Instruction.Opcode);
+ int r;
+ int uav_id = 0;
+ int uav_index_mode = 0;
+ bool is_cm = (ctx->bc->chip_class == CAYMAN);
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
- } else {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ if (gds_op == -1) {
+ fprintf(stderr, "unknown GDS op for opcode %d\n", inst->Instruction.Opcode);
+ return -1;
+ }
- alu.op = ALU_OP1_LOG_IEEE;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
- r600_bytecode_src_set_abs(&alu.src[0]);
+ r = tgsi_set_gds_temp(ctx, &uav_id, &uav_index_mode);
+ if (r)
+ return r;
+ if (gds_op == FETCH_OP_GDS_CMP_XCHG_RET) {
+ if (inst->Src[3].Register.File == TGSI_FILE_IMMEDIATE) {
+ int value = (ctx->literals[4 * inst->Src[3].Register.Index + inst->Src[3].Register.SwizzleX]);
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
alu.dst.sel = ctx->temp_reg;
- alu.dst.chan = 1;
+ alu.dst.chan = is_cm ? 2 : 1;
+ alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[0].value = value;
+ alu.last = 1;
alu.dst.write = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ } else {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = is_cm ? 2 : 1;
+ r600_bytecode_src(&alu.src[0], &ctx->src[3], 0);
alu.last = 1;
-
+ alu.dst.write = 1;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
}
-
+ }
+ if (inst->Src[2].Register.File == TGSI_FILE_IMMEDIATE) {
+ int value = (ctx->literals[4 * inst->Src[2].Register.Index + inst->Src[2].Register.SwizzleX]);
+ int abs_value = abs(value);
+ if (abs_value != value && gds_op == FETCH_OP_GDS_ADD_RET)
+ gds_op = FETCH_OP_GDS_SUB_RET;
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
-
- alu.op = ALU_OP1_FLOOR;
- alu.src[0].sel = ctx->temp_reg;
- alu.src[0].chan = 1;
-
+ alu.op = ALU_OP1_MOV;
alu.dst.sel = ctx->temp_reg;
- alu.dst.chan = 1;
- alu.dst.write = 1;
+ alu.dst.chan = is_cm ? 1 : 0;
+ alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[0].value = abs_value;
alu.last = 1;
-
+ alu.dst.write = 1;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
+ } else {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = is_cm ? 1 : 0;
+ r600_bytecode_src(&alu.src[0], &ctx->src[2], 0);
+ alu.last = 1;
+ alu.dst.write = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
- if (ctx->bc->chip_class == CAYMAN) {
- for (i = 0; i < 3; i++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP1_EXP_IEEE;
- alu.src[0].sel = ctx->temp_reg;
- alu.src[0].chan = 1;
-
- alu.dst.sel = ctx->temp_reg;
- alu.dst.chan = i;
- if (i == 1)
- alu.dst.write = 1;
- if (i == 2)
- alu.last = 1;
-
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
- } else {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP1_EXP_IEEE;
- alu.src[0].sel = ctx->temp_reg;
- alu.src[0].chan = 1;
-
- alu.dst.sel = ctx->temp_reg;
- alu.dst.chan = 1;
- alu.dst.write = 1;
- alu.last = 1;
-
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
-
- if (ctx->bc->chip_class == CAYMAN) {
- for (i = 0; i < 3; i++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP1_RECIP_IEEE;
- alu.src[0].sel = ctx->temp_reg;
- alu.src[0].chan = 1;
- alu.dst.sel = ctx->temp_reg;
- alu.dst.chan = i;
- if (i == 1)
- alu.dst.write = 1;
- if (i == 2)
- alu.last = 1;
+ memset(&gds, 0, sizeof(struct r600_bytecode_gds));
+ gds.op = gds_op;
+ gds.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
+ gds.uav_id = is_cm ? 0 : uav_id;
+ gds.uav_index_mode = is_cm ? 0 : uav_index_mode;
+ gds.src_gpr = ctx->temp_reg;
+ gds.src_gpr2 = 0;
+ gds.src_sel_x = is_cm ? 0 : 4;
+ gds.src_sel_y = is_cm ? 1 : 0;
+ if (gds_op == FETCH_OP_GDS_CMP_XCHG_RET)
+ gds.src_sel_z = is_cm ? 2 : 1;
+ else
+ gds.src_sel_z = 7;
+ gds.dst_sel_x = 0;
+ gds.dst_sel_y = 7;
+ gds.dst_sel_z = 7;
+ gds.dst_sel_w = 7;
+ gds.alloc_consume = !is_cm;
+
+ r = r600_bytecode_add_gds(ctx->bc, &gds);
+ if (r)
+ return r;
+ ctx->bc->cf_last->vpm = 1;
+ return 0;
+}
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
- } else {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP1_RECIP_IEEE;
- alu.src[0].sel = ctx->temp_reg;
- alu.src[0].chan = 1;
+static int get_lds_op(int opcode)
+{
+ switch (opcode) {
+ case TGSI_OPCODE_ATOMUADD:
+ return LDS_OP2_LDS_ADD_RET;
+ case TGSI_OPCODE_ATOMAND:
+ return LDS_OP2_LDS_AND_RET;
+ case TGSI_OPCODE_ATOMOR:
+ return LDS_OP2_LDS_OR_RET;
+ case TGSI_OPCODE_ATOMXOR:
+ return LDS_OP2_LDS_XOR_RET;
+ case TGSI_OPCODE_ATOMUMIN:
+ return LDS_OP2_LDS_MIN_UINT_RET;
+ case TGSI_OPCODE_ATOMUMAX:
+ return LDS_OP2_LDS_MAX_UINT_RET;
+ case TGSI_OPCODE_ATOMIMIN:
+ return LDS_OP2_LDS_MIN_INT_RET;
+ case TGSI_OPCODE_ATOMIMAX:
+ return LDS_OP2_LDS_MAX_INT_RET;
+ case TGSI_OPCODE_ATOMXCHG:
+ return LDS_OP2_LDS_XCHG_RET;
+ case TGSI_OPCODE_ATOMCAS:
+ return LDS_OP3_LDS_CMP_XCHG_RET;
+ default:
+ return -1;
+ }
+}
- alu.dst.sel = ctx->temp_reg;
- alu.dst.chan = 1;
- alu.dst.write = 1;
- alu.last = 1;
+static int tgsi_atomic_op_lds(struct r600_shader_ctx *ctx)
+{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ int lds_op = get_lds_op(inst->Instruction.Opcode);
+ int r;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
+ struct r600_bytecode_alu alu;
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = lds_op;
+ alu.is_lds_idx_op = true;
+ alu.last = 1;
+ r600_bytecode_src(&alu.src[0], &ctx->src[1], 0);
+ r600_bytecode_src(&alu.src[1], &ctx->src[2], 0);
+ if (lds_op == LDS_OP3_LDS_CMP_XCHG_RET)
+ r600_bytecode_src(&alu.src[2], &ctx->src[3], 0);
+ else
+ alu.src[2].sel = V_SQ_ALU_SRC_0;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ /* then read from LDS_OQ_A_POP */
+ memset(&alu, 0, sizeof(alu));
- alu.op = ALU_OP2_MUL;
+ alu.op = ALU_OP1_MOV;
+ alu.src[0].sel = EG_V_SQ_ALU_SRC_LDS_OQ_A_POP;
+ alu.src[0].chan = 0;
+ tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
- r600_bytecode_src_set_abs(&alu.src[0]);
+ return 0;
+}
- alu.src[1].sel = ctx->temp_reg;
- alu.src[1].chan = 1;
+static int tgsi_atomic_op(struct r600_shader_ctx *ctx)
+{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ if (inst->Src[0].Register.File == TGSI_FILE_IMAGE)
+ return tgsi_atomic_op_rat(ctx);
+ if (inst->Src[0].Register.File == TGSI_FILE_HW_ATOMIC)
+ return tgsi_atomic_op_gds(ctx);
+ if (inst->Src[0].Register.File == TGSI_FILE_BUFFER)
+ return tgsi_atomic_op_rat(ctx);
+ if (inst->Src[0].Register.File == TGSI_FILE_MEMORY)
+ return tgsi_atomic_op_lds(ctx);
+ return 0;
+}
- alu.dst.sel = ctx->temp_reg;
- alu.dst.chan = 1;
- alu.dst.write = 1;
- alu.last = 1;
+static int tgsi_resq(struct r600_shader_ctx *ctx)
+{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ unsigned sampler_index_mode;
+ struct r600_bytecode_tex tex;
+ int r;
+ boolean has_txq_cube_array_z = false;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
+ if (inst->Src[0].Register.File == TGSI_FILE_BUFFER ||
+ (inst->Src[0].Register.File == TGSI_FILE_IMAGE && inst->Memory.Texture == TGSI_TEXTURE_BUFFER)) {
+ if (ctx->bc->chip_class < EVERGREEN)
+ ctx->shader->uses_tex_buffers = true;
+ unsigned eg_buffer_base = 0;
+ eg_buffer_base = R600_IMAGE_REAL_RESOURCE_OFFSET;
+ if (inst->Src[0].Register.File == TGSI_FILE_BUFFER)
+ eg_buffer_base += ctx->info.file_count[TGSI_FILE_IMAGE];
+ return r600_do_buffer_txq(ctx, 0, ctx->shader->image_size_const_offset, eg_buffer_base);
}
- /* result.z = log2(|src|);*/
- if ((inst->Dst[0].Register.WriteMask >> 2) & 1) {
- if (ctx->bc->chip_class == CAYMAN) {
- for (i = 0; i < 3; i++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
-
- alu.op = ALU_OP1_LOG_IEEE;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
- r600_bytecode_src_set_abs(&alu.src[0]);
-
- alu.dst.sel = ctx->temp_reg;
- if (i == 2)
- alu.dst.write = 1;
- alu.dst.chan = i;
- if (i == 2)
- alu.last = 1;
-
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
- } else {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ if (inst->Memory.Texture == TGSI_TEXTURE_CUBE_ARRAY &&
+ inst->Dst[0].Register.WriteMask & 4) {
+ ctx->shader->has_txq_cube_array_z_comp = true;
+ has_txq_cube_array_z = true;
+ }
- alu.op = ALU_OP1_LOG_IEEE;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
- r600_bytecode_src_set_abs(&alu.src[0]);
+ sampler_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE
+ if (sampler_index_mode)
+ egcm_load_index_reg(ctx->bc, 1, false);
- alu.dst.sel = ctx->temp_reg;
- alu.dst.write = 1;
- alu.dst.chan = 2;
- alu.last = 1;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
- }
+ /* does this shader want a num layers from TXQ for a cube array? */
+ if (has_txq_cube_array_z) {
+ int id = tgsi_tex_get_src_gpr(ctx, 0) + ctx->shader->image_size_const_offset;
+ struct r600_bytecode_alu alu;
- /* result.w = 1.0; */
- if ((inst->Dst[0].Register.WriteMask >> 3) & 1) {
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
-
alu.op = ALU_OP1_MOV;
- alu.src[0].sel = V_SQ_ALU_SRC_1;
- alu.src[0].chan = 0;
- alu.dst.sel = ctx->temp_reg;
- alu.dst.chan = 3;
- alu.dst.write = 1;
+ alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL;
+ /* with eg each dword is either number of cubes */
+ alu.src[0].sel += id / 4;
+ alu.src[0].chan = id % 4;
+ alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER;
+ tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst);
alu.last = 1;
-
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
+ /* disable writemask from texture instruction */
+ inst->Dst[0].Register.WriteMask &= ~4;
}
+ memset(&tex, 0, sizeof(struct r600_bytecode_tex));
+ tex.op = ctx->inst_info->op;
+ tex.sampler_id = R600_IMAGE_REAL_RESOURCE_OFFSET + inst->Src[0].Register.Index;
+ tex.sampler_index_mode = sampler_index_mode;
+ tex.resource_id = tex.sampler_id;
+ tex.resource_index_mode = sampler_index_mode;
+ tex.src_sel_x = 4;
+ tex.src_sel_y = 4;
+ tex.src_sel_z = 4;
+ tex.src_sel_w = 4;
+ tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
+ tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
+ tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
+ tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
+ tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index;
+ r = r600_bytecode_add_tex(ctx->bc, &tex);
+ if (r)
+ return r;
- return tgsi_helper_copy(ctx, inst);
+ return 0;
}
-static int tgsi_eg_arl(struct r600_shader_ctx *ctx)
+static int tgsi_lrp(struct r600_shader_ctx *ctx)
{
struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
struct r600_bytecode_alu alu;
+ unsigned lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
+ struct r600_bytecode_alu_src srcs[2][4];
+ unsigned i;
int r;
- int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
- unsigned reg = get_address_file_reg(ctx, inst->Dst[0].Register.Index);
- assert(inst->Dst[0].Register.Index < 3);
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ /* optimize if it's just an equal balance */
+ if (ctx->src[0].sel == V_SQ_ALU_SRC_0_5) {
+ for (i = 0; i < lasti + 1; i++) {
+ if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
+ continue;
- switch (inst->Instruction.Opcode) {
- case TGSI_OPCODE_ARL:
- alu.op = ALU_OP1_FLT_TO_INT_FLOOR;
- break;
- case TGSI_OPCODE_ARR:
- alu.op = ALU_OP1_FLT_TO_INT;
- break;
- case TGSI_OPCODE_UARL:
- alu.op = ALU_OP1_MOV;
- break;
- default:
- assert(0);
- return -1;
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_ADD;
+ r600_bytecode_src(&alu.src[0], &ctx->src[1], i);
+ r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
+ alu.omod = 3;
+ tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ alu.dst.chan = i;
+ if (i == lasti) {
+ alu.last = 1;
+ }
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ return 0;
}
- for (i = 0; i <= lasti; ++i) {
+ /* 1 - src0 */
+ for (i = 0; i < lasti + 1; i++) {
if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
continue;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
- alu.last = i == lasti;
- alu.dst.sel = reg;
- alu.dst.chan = i;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_ADD;
+ alu.src[0].sel = V_SQ_ALU_SRC_1;
+ alu.src[0].chan = 0;
+ r600_bytecode_src(&alu.src[1], &ctx->src[0], i);
+ r600_bytecode_src_toggle_neg(&alu.src[1]);
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = i;
+ if (i == lasti) {
+ alu.last = 1;
+ }
alu.dst.write = 1;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
}
- if (inst->Dst[0].Register.Index > 0)
- ctx->bc->index_loaded[inst->Dst[0].Register.Index - 1] = 0;
- else
- ctx->bc->ar_loaded = 0;
+ /* (1 - src0) * src2 */
+ for (i = 0; i < lasti + 1; i++) {
+ if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
+ continue;
- return 0;
-}
-static int tgsi_r600_arl(struct r600_shader_ctx *ctx)
-{
- struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
- struct r600_bytecode_alu alu;
- int r;
- int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
-
- switch (inst->Instruction.Opcode) {
- case TGSI_OPCODE_ARL:
- memset(&alu, 0, sizeof(alu));
- alu.op = ALU_OP1_FLOOR;
- alu.dst.sel = ctx->bc->ar_reg;
- alu.dst.write = 1;
- for (i = 0; i <= lasti; ++i) {
- if (inst->Dst[0].Register.WriteMask & (1 << i)) {
- alu.dst.chan = i;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
- alu.last = i == lasti;
- if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
- return r;
- }
- }
-
- memset(&alu, 0, sizeof(alu));
- alu.op = ALU_OP1_FLT_TO_INT;
- alu.src[0].sel = ctx->bc->ar_reg;
- alu.dst.sel = ctx->bc->ar_reg;
- alu.dst.write = 1;
- /* FLT_TO_INT is trans-only on r600/r700 */
- alu.last = TRUE;
- for (i = 0; i <= lasti; ++i) {
- alu.dst.chan = i;
- alu.src[0].chan = i;
- if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
- return r;
- }
- break;
- case TGSI_OPCODE_ARR:
- memset(&alu, 0, sizeof(alu));
- alu.op = ALU_OP1_FLT_TO_INT;
- alu.dst.sel = ctx->bc->ar_reg;
- alu.dst.write = 1;
- /* FLT_TO_INT is trans-only on r600/r700 */
- alu.last = TRUE;
- for (i = 0; i <= lasti; ++i) {
- if (inst->Dst[0].Register.WriteMask & (1 << i)) {
- alu.dst.chan = i;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
- if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
- return r;
- }
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_MUL;
+ alu.src[0].sel = ctx->temp_reg;
+ alu.src[0].chan = i;
+ r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = i;
+ if (i == lasti) {
+ alu.last = 1;
}
- break;
- case TGSI_OPCODE_UARL:
- memset(&alu, 0, sizeof(alu));
- alu.op = ALU_OP1_MOV;
- alu.dst.sel = ctx->bc->ar_reg;
alu.dst.write = 1;
- for (i = 0; i <= lasti; ++i) {
- if (inst->Dst[0].Register.WriteMask & (1 << i)) {
- alu.dst.chan = i;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
- alu.last = i == lasti;
- if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
- return r;
- }
- }
- break;
- default:
- assert(0);
- return -1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
}
- ctx->bc->ar_loaded = 0;
- return 0;
-}
+ /* src0 * src1 + (1 - src0) * src2 */
-static int tgsi_opdst(struct r600_shader_ctx *ctx)
-{
- struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
- struct r600_bytecode_alu alu;
- int i, r = 0;
+ for (i = 0; i < 2; i++) {
+ r = tgsi_make_src_for_op3(ctx, inst->Dst[0].Register.WriteMask,
+ srcs[i], &ctx->src[i]);
+ if (r)
+ return r;
+ }
+
+ for (i = 0; i < lasti + 1; i++) {
+ if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
+ continue;
- for (i = 0; i < 4; i++) {
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP3_MULADD;
+ alu.is_op3 = 1;
+ alu.src[0] = srcs[0][i];
+ alu.src[1] = srcs[1][i];
+ alu.src[2].sel = ctx->temp_reg;
+ alu.src[2].chan = i;
- alu.op = ALU_OP2_MUL;
tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
-
- if (i == 0 || i == 3) {
- alu.src[0].sel = V_SQ_ALU_SRC_1;
- } else {
- r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
- }
-
- if (i == 0 || i == 2) {
- alu.src[1].sel = V_SQ_ALU_SRC_1;
- } else {
- r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
- }
- if (i == 3)
+ alu.dst.chan = i;
+ if (i == lasti) {
alu.last = 1;
+ }
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
return 0;
}
-static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode, int alu_type)
+static int tgsi_cmp(struct r600_shader_ctx *ctx)
{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
struct r600_bytecode_alu alu;
- int r;
+ int i, r, j;
+ int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
+ struct r600_bytecode_alu_src srcs[3][4];
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = opcode;
- alu.execute_mask = 1;
- alu.update_pred = 1;
+ unsigned op;
- alu.dst.sel = ctx->temp_reg;
- alu.dst.write = 1;
- alu.dst.chan = 0;
+ if (ctx->src[0].abs && ctx->src[0].neg) {
+ op = ALU_OP3_CNDE;
+ ctx->src[0].abs = 0;
+ ctx->src[0].neg = 0;
+ } else {
+ op = ALU_OP3_CNDGE;
+ }
- r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
- alu.src[1].sel = V_SQ_ALU_SRC_0;
- alu.src[1].chan = 0;
+ for (j = 0; j < inst->Instruction.NumSrcRegs; j++) {
+ r = tgsi_make_src_for_op3(ctx, inst->Dst[0].Register.WriteMask,
+ srcs[j], &ctx->src[j]);
+ if (r)
+ return r;
+ }
- alu.last = 1;
+ for (i = 0; i < lasti + 1; i++) {
+ if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
+ continue;
- r = r600_bytecode_add_alu_type(ctx->bc, &alu, alu_type);
- if (r)
- return r;
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = op;
+ alu.src[0] = srcs[0][i];
+ alu.src[1] = srcs[2][i];
+ alu.src[2] = srcs[1][i];
+
+ tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ alu.is_op3 = 1;
+ if (i == lasti)
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
return 0;
}
-static int pops(struct r600_shader_ctx *ctx, int pops)
+static int tgsi_ucmp(struct r600_shader_ctx *ctx)
{
- unsigned force_pop = ctx->bc->force_add_cf;
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ struct r600_bytecode_alu alu;
+ int i, r;
+ int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
- if (!force_pop) {
- int alu_pop = 3;
- if (ctx->bc->cf_last) {
- if (ctx->bc->cf_last->op == CF_OP_ALU)
- alu_pop = 0;
- else if (ctx->bc->cf_last->op == CF_OP_ALU_POP_AFTER)
- alu_pop = 1;
- }
- alu_pop += pops;
- if (alu_pop == 1) {
- ctx->bc->cf_last->op = CF_OP_ALU_POP_AFTER;
- ctx->bc->force_add_cf = 1;
- } else if (alu_pop == 2) {
- ctx->bc->cf_last->op = CF_OP_ALU_POP2_AFTER;
- ctx->bc->force_add_cf = 1;
- } else {
- force_pop = 1;
- }
- }
+ for (i = 0; i < lasti + 1; i++) {
+ if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
+ continue;
- if (force_pop) {
- r600_bytecode_add_cfinst(ctx->bc, CF_OP_POP);
- ctx->bc->cf_last->pop_count = pops;
- ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP3_CNDE_INT;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
+ r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
+ r600_bytecode_src(&alu.src[2], &ctx->src[1], i);
+ tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ alu.is_op3 = 1;
+ if (i == lasti)
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
}
-
return 0;
}
-static inline void callstack_update_max_depth(struct r600_shader_ctx *ctx,
- unsigned reason)
+static int tgsi_exp(struct r600_shader_ctx *ctx)
{
- struct r600_stack_info *stack = &ctx->bc->stack;
- unsigned elements, entries;
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ struct r600_bytecode_alu alu;
+ int r;
+ unsigned i;
- unsigned entry_size = stack->entry_size;
+ /* result.x = 2^floor(src); */
+ if (inst->Dst[0].Register.WriteMask & 1) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- elements = (stack->loop + stack->push_wqm ) * entry_size;
- elements += stack->push;
+ alu.op = ALU_OP1_FLOOR;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
- switch (ctx->bc->chip_class) {
- case R600:
- case R700:
- /* pre-r8xx: if any non-WQM PUSH instruction is invoked, 2 elements on
- * the stack must be reserved to hold the current active/continue
- * masks */
- if (reason == FC_PUSH_VPM) {
- elements += 2;
- }
- break;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = 0;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
- case CAYMAN:
- /* r9xx: any stack operation on empty stack consumes 2 additional
- * elements */
- elements += 2;
+ if (ctx->bc->chip_class == CAYMAN) {
+ for (i = 0; i < 3; i++) {
+ alu.op = ALU_OP1_EXP_IEEE;
+ alu.src[0].sel = ctx->temp_reg;
+ alu.src[0].chan = 0;
- /* fallthrough */
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = i;
+ alu.dst.write = i == 0;
+ alu.last = i == 2;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ } else {
+ alu.op = ALU_OP1_EXP_IEEE;
+ alu.src[0].sel = ctx->temp_reg;
+ alu.src[0].chan = 0;
+
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = 0;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ }
+
+ /* result.y = tmp - floor(tmp); */
+ if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+
+ alu.op = ALU_OP1_FRACT;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+
+ alu.dst.sel = ctx->temp_reg;
+#if 0
+ r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ if (r)
+ return r;
+#endif
+ alu.dst.write = 1;
+ alu.dst.chan = 1;
+
+ alu.last = 1;
+
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+
+ /* result.z = RoughApprox2ToX(tmp);*/
+ if ((inst->Dst[0].Register.WriteMask >> 2) & 0x1) {
+ if (ctx->bc->chip_class == CAYMAN) {
+ for (i = 0; i < 3; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_EXP_IEEE;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = i;
+ if (i == 2) {
+ alu.dst.write = 1;
+ alu.last = 1;
+ }
+
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ } else {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_EXP_IEEE;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.write = 1;
+ alu.dst.chan = 2;
+
+ alu.last = 1;
+
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ }
+
+ /* result.w = 1.0;*/
+ if ((inst->Dst[0].Register.WriteMask >> 3) & 0x1) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+
+ alu.op = ALU_OP1_MOV;
+ alu.src[0].sel = V_SQ_ALU_SRC_1;
+ alu.src[0].chan = 0;
+
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = 3;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ return tgsi_helper_copy(ctx, inst);
+}
+
+static int tgsi_log(struct r600_shader_ctx *ctx)
+{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ struct r600_bytecode_alu alu;
+ int r;
+ unsigned i;
+
+ /* result.x = floor(log2(|src|)); */
+ if (inst->Dst[0].Register.WriteMask & 1) {
+ if (ctx->bc->chip_class == CAYMAN) {
+ for (i = 0; i < 3; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+
+ alu.op = ALU_OP1_LOG_IEEE;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ r600_bytecode_src_set_abs(&alu.src[0]);
+
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = i;
+ if (i == 0)
+ alu.dst.write = 1;
+ if (i == 2)
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+
+ } else {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+
+ alu.op = ALU_OP1_LOG_IEEE;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ r600_bytecode_src_set_abs(&alu.src[0]);
+
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = 0;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+
+ alu.op = ALU_OP1_FLOOR;
+ alu.src[0].sel = ctx->temp_reg;
+ alu.src[0].chan = 0;
+
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = 0;
+ alu.dst.write = 1;
+ alu.last = 1;
+
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+
+ /* result.y = |src.x| / (2 ^ floor(log2(|src.x|))); */
+ if ((inst->Dst[0].Register.WriteMask >> 1) & 1) {
+
+ if (ctx->bc->chip_class == CAYMAN) {
+ for (i = 0; i < 3; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+
+ alu.op = ALU_OP1_LOG_IEEE;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ r600_bytecode_src_set_abs(&alu.src[0]);
+
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = i;
+ if (i == 1)
+ alu.dst.write = 1;
+ if (i == 2)
+ alu.last = 1;
+
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ } else {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+
+ alu.op = ALU_OP1_LOG_IEEE;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ r600_bytecode_src_set_abs(&alu.src[0]);
+
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = 1;
+ alu.dst.write = 1;
+ alu.last = 1;
+
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+
+ alu.op = ALU_OP1_FLOOR;
+ alu.src[0].sel = ctx->temp_reg;
+ alu.src[0].chan = 1;
+
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = 1;
+ alu.dst.write = 1;
+ alu.last = 1;
+
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+
+ if (ctx->bc->chip_class == CAYMAN) {
+ for (i = 0; i < 3; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_EXP_IEEE;
+ alu.src[0].sel = ctx->temp_reg;
+ alu.src[0].chan = 1;
+
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = i;
+ if (i == 1)
+ alu.dst.write = 1;
+ if (i == 2)
+ alu.last = 1;
+
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ } else {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_EXP_IEEE;
+ alu.src[0].sel = ctx->temp_reg;
+ alu.src[0].chan = 1;
+
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = 1;
+ alu.dst.write = 1;
+ alu.last = 1;
+
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+
+ if (ctx->bc->chip_class == CAYMAN) {
+ for (i = 0; i < 3; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_RECIP_IEEE;
+ alu.src[0].sel = ctx->temp_reg;
+ alu.src[0].chan = 1;
+
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = i;
+ if (i == 1)
+ alu.dst.write = 1;
+ if (i == 2)
+ alu.last = 1;
+
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ } else {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_RECIP_IEEE;
+ alu.src[0].sel = ctx->temp_reg;
+ alu.src[0].chan = 1;
+
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = 1;
+ alu.dst.write = 1;
+ alu.last = 1;
+
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+
+ alu.op = ALU_OP2_MUL;
+
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ r600_bytecode_src_set_abs(&alu.src[0]);
+
+ alu.src[1].sel = ctx->temp_reg;
+ alu.src[1].chan = 1;
+
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = 1;
+ alu.dst.write = 1;
+ alu.last = 1;
+
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+
+ /* result.z = log2(|src|);*/
+ if ((inst->Dst[0].Register.WriteMask >> 2) & 1) {
+ if (ctx->bc->chip_class == CAYMAN) {
+ for (i = 0; i < 3; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+
+ alu.op = ALU_OP1_LOG_IEEE;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ r600_bytecode_src_set_abs(&alu.src[0]);
+
+ alu.dst.sel = ctx->temp_reg;
+ if (i == 2)
+ alu.dst.write = 1;
+ alu.dst.chan = i;
+ if (i == 2)
+ alu.last = 1;
+
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ } else {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+
+ alu.op = ALU_OP1_LOG_IEEE;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ r600_bytecode_src_set_abs(&alu.src[0]);
+
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.write = 1;
+ alu.dst.chan = 2;
+ alu.last = 1;
+
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ }
+
+ /* result.w = 1.0; */
+ if ((inst->Dst[0].Register.WriteMask >> 3) & 1) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+
+ alu.op = ALU_OP1_MOV;
+ alu.src[0].sel = V_SQ_ALU_SRC_1;
+ alu.src[0].chan = 0;
+
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = 3;
+ alu.dst.write = 1;
+ alu.last = 1;
+
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+
+ return tgsi_helper_copy(ctx, inst);
+}
+
+static int tgsi_eg_arl(struct r600_shader_ctx *ctx)
+{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ struct r600_bytecode_alu alu;
+ int r;
+ int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
+ unsigned reg = get_address_file_reg(ctx, inst->Dst[0].Register.Index);
+
+ assert(inst->Dst[0].Register.Index < 3);
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_ARL:
+ alu.op = ALU_OP1_FLT_TO_INT_FLOOR;
+ break;
+ case TGSI_OPCODE_ARR:
+ alu.op = ALU_OP1_FLT_TO_INT;
+ break;
+ case TGSI_OPCODE_UARL:
+ alu.op = ALU_OP1_MOV;
+ break;
+ default:
+ assert(0);
+ return -1;
+ }
+
+ for (i = 0; i <= lasti; ++i) {
+ if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
+ continue;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
+ alu.last = i == lasti;
+ alu.dst.sel = reg;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+
+ if (inst->Dst[0].Register.Index > 0)
+ ctx->bc->index_loaded[inst->Dst[0].Register.Index - 1] = 0;
+ else
+ ctx->bc->ar_loaded = 0;
+
+ return 0;
+}
+static int tgsi_r600_arl(struct r600_shader_ctx *ctx)
+{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ struct r600_bytecode_alu alu;
+ int r;
+ int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
+
+ switch (inst->Instruction.Opcode) {
+ case TGSI_OPCODE_ARL:
+ memset(&alu, 0, sizeof(alu));
+ alu.op = ALU_OP1_FLOOR;
+ alu.dst.sel = ctx->bc->ar_reg;
+ alu.dst.write = 1;
+ for (i = 0; i <= lasti; ++i) {
+ if (inst->Dst[0].Register.WriteMask & (1 << i)) {
+ alu.dst.chan = i;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
+ alu.last = i == lasti;
+ if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
+ return r;
+ }
+ }
+
+ memset(&alu, 0, sizeof(alu));
+ alu.op = ALU_OP1_FLT_TO_INT;
+ alu.src[0].sel = ctx->bc->ar_reg;
+ alu.dst.sel = ctx->bc->ar_reg;
+ alu.dst.write = 1;
+ /* FLT_TO_INT is trans-only on r600/r700 */
+ alu.last = TRUE;
+ for (i = 0; i <= lasti; ++i) {
+ alu.dst.chan = i;
+ alu.src[0].chan = i;
+ if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
+ return r;
+ }
+ break;
+ case TGSI_OPCODE_ARR:
+ memset(&alu, 0, sizeof(alu));
+ alu.op = ALU_OP1_FLT_TO_INT;
+ alu.dst.sel = ctx->bc->ar_reg;
+ alu.dst.write = 1;
+ /* FLT_TO_INT is trans-only on r600/r700 */
+ alu.last = TRUE;
+ for (i = 0; i <= lasti; ++i) {
+ if (inst->Dst[0].Register.WriteMask & (1 << i)) {
+ alu.dst.chan = i;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
+ if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
+ return r;
+ }
+ }
+ break;
+ case TGSI_OPCODE_UARL:
+ memset(&alu, 0, sizeof(alu));
+ alu.op = ALU_OP1_MOV;
+ alu.dst.sel = ctx->bc->ar_reg;
+ alu.dst.write = 1;
+ for (i = 0; i <= lasti; ++i) {
+ if (inst->Dst[0].Register.WriteMask & (1 << i)) {
+ alu.dst.chan = i;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
+ alu.last = i == lasti;
+ if ((r = r600_bytecode_add_alu(ctx->bc, &alu)))
+ return r;
+ }
+ }
+ break;
+ default:
+ assert(0);
+ return -1;
+ }
+
+ ctx->bc->ar_loaded = 0;
+ return 0;
+}
+
+static int tgsi_opdst(struct r600_shader_ctx *ctx)
+{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ struct r600_bytecode_alu alu;
+ int i, r = 0;
+
+ for (i = 0; i < 4; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+
+ alu.op = ALU_OP2_MUL;
+ tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+
+ if (i == 0 || i == 3) {
+ alu.src[0].sel = V_SQ_ALU_SRC_1;
+ } else {
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
+ }
+
+ if (i == 0 || i == 2) {
+ alu.src[1].sel = V_SQ_ALU_SRC_1;
+ } else {
+ r600_bytecode_src(&alu.src[1], &ctx->src[1], i);
+ }
+ if (i == 3)
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode, int alu_type,
+ struct r600_bytecode_alu_src *src)
+{
+ struct r600_bytecode_alu alu;
+ int r;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = opcode;
+ alu.execute_mask = 1;
+ alu.update_pred = 1;
+
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.write = 1;
+ alu.dst.chan = 0;
+
+ alu.src[0] = *src;
+ alu.src[1].sel = V_SQ_ALU_SRC_0;
+ alu.src[1].chan = 0;
+
+ alu.last = 1;
+
+ r = r600_bytecode_add_alu_type(ctx->bc, &alu, alu_type);
+ if (r)
+ return r;
+ return 0;
+}
+
+static int pops(struct r600_shader_ctx *ctx, int pops)
+{
+ unsigned force_pop = ctx->bc->force_add_cf;
+
+ if (!force_pop) {
+ int alu_pop = 3;
+ if (ctx->bc->cf_last) {
+ if (ctx->bc->cf_last->op == CF_OP_ALU)
+ alu_pop = 0;
+ else if (ctx->bc->cf_last->op == CF_OP_ALU_POP_AFTER)
+ alu_pop = 1;
+ }
+ alu_pop += pops;
+ if (alu_pop == 1) {
+ ctx->bc->cf_last->op = CF_OP_ALU_POP_AFTER;
+ ctx->bc->force_add_cf = 1;
+ } else if (alu_pop == 2) {
+ ctx->bc->cf_last->op = CF_OP_ALU_POP2_AFTER;
+ ctx->bc->force_add_cf = 1;
+ } else {
+ force_pop = 1;
+ }
+ }
+
+ if (force_pop) {
+ r600_bytecode_add_cfinst(ctx->bc, CF_OP_POP);
+ ctx->bc->cf_last->pop_count = pops;
+ ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
+ }
+
+ return 0;
+}
+
+static inline int callstack_update_max_depth(struct r600_shader_ctx *ctx,
+ unsigned reason)
+{
+ struct r600_stack_info *stack = &ctx->bc->stack;
+ unsigned elements;
+ int entries;
+
+ unsigned entry_size = stack->entry_size;
+
+ elements = (stack->loop + stack->push_wqm ) * entry_size;
+ elements += stack->push;
+
+ switch (ctx->bc->chip_class) {
+ case R600:
+ case R700:
+ /* pre-r8xx: if any non-WQM PUSH instruction is invoked, 2 elements on
+ * the stack must be reserved to hold the current active/continue
+ * masks */
+ if (reason == FC_PUSH_VPM || stack->push > 0) {
+ elements += 2;
+ }
+ break;
+
+ case CAYMAN:
+ /* r9xx: any stack operation on empty stack consumes 2 additional
+ * elements */
+ elements += 2;
+
+ /* fallthrough */
/* FIXME: do the two elements added above cover the cases for the
* r8xx+ below? */
- case EVERGREEN:
- /* r8xx+: 2 extra elements are not always required, but one extra
- * element must be added for each of the following cases:
- * 1. There is an ALU_ELSE_AFTER instruction at the point of greatest
- * stack usage.
- * (Currently we don't use ALU_ELSE_AFTER.)
- * 2. There are LOOP/WQM frames on the stack when any flavor of non-WQM
- * PUSH instruction executed.
- *
- * NOTE: it seems we also need to reserve additional element in some
- * other cases, e.g. when we have 4 levels of PUSH_VPM in the shader,
- * then STACK_SIZE should be 2 instead of 1 */
- if (reason == FC_PUSH_VPM) {
- elements += 1;
+ case EVERGREEN:
+ /* r8xx+: 2 extra elements are not always required, but one extra
+ * element must be added for each of the following cases:
+ * 1. There is an ALU_ELSE_AFTER instruction at the point of greatest
+ * stack usage.
+ * (Currently we don't use ALU_ELSE_AFTER.)
+ * 2. There are LOOP/WQM frames on the stack when any flavor of non-WQM
+ * PUSH instruction executed.
+ *
+ * NOTE: it seems we also need to reserve additional element in some
+ * other cases, e.g. when we have 4 levels of PUSH_VPM in the shader,
+ * then STACK_SIZE should be 2 instead of 1 */
+ if (reason == FC_PUSH_VPM || stack->push > 0) {
+ elements += 1;
+ }
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+
+ /* NOTE: it seems STACK_SIZE is interpreted by hw as if entry_size is 4
+ * for all chips, so we use 4 in the final formula, not the real entry_size
+ * for the chip */
+ entry_size = 4;
+
+ entries = (elements + (entry_size - 1)) / entry_size;
+
+ if (entries > stack->max_entries)
+ stack->max_entries = entries;
+ return elements;
+}
+
+static inline void callstack_pop(struct r600_shader_ctx *ctx, unsigned reason)
+{
+ switch(reason) {
+ case FC_PUSH_VPM:
+ --ctx->bc->stack.push;
+ assert(ctx->bc->stack.push >= 0);
+ break;
+ case FC_PUSH_WQM:
+ --ctx->bc->stack.push_wqm;
+ assert(ctx->bc->stack.push_wqm >= 0);
+ break;
+ case FC_LOOP:
+ --ctx->bc->stack.loop;
+ assert(ctx->bc->stack.loop >= 0);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+}
+
+static inline int callstack_push(struct r600_shader_ctx *ctx, unsigned reason)
+{
+ switch (reason) {
+ case FC_PUSH_VPM:
+ ++ctx->bc->stack.push;
+ break;
+ case FC_PUSH_WQM:
+ ++ctx->bc->stack.push_wqm;
+ break;
+ case FC_LOOP:
+ ++ctx->bc->stack.loop;
+ break;
+ default:
+ assert(0);
+ }
+
+ return callstack_update_max_depth(ctx, reason);
+}
+
+static void fc_set_mid(struct r600_shader_ctx *ctx, int fc_sp)
+{
+ struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[fc_sp];
+
+ sp->mid = realloc((void *)sp->mid,
+ sizeof(struct r600_bytecode_cf *) * (sp->num_mid + 1));
+ sp->mid[sp->num_mid] = ctx->bc->cf_last;
+ sp->num_mid++;
+}
+
+static void fc_pushlevel(struct r600_shader_ctx *ctx, int type)
+{
+ assert(ctx->bc->fc_sp < ARRAY_SIZE(ctx->bc->fc_stack));
+ ctx->bc->fc_stack[ctx->bc->fc_sp].type = type;
+ ctx->bc->fc_stack[ctx->bc->fc_sp].start = ctx->bc->cf_last;
+ ctx->bc->fc_sp++;
+}
+
+static void fc_poplevel(struct r600_shader_ctx *ctx)
+{
+ struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[ctx->bc->fc_sp - 1];
+ free(sp->mid);
+ sp->mid = NULL;
+ sp->num_mid = 0;
+ sp->start = NULL;
+ sp->type = 0;
+ ctx->bc->fc_sp--;
+}
+
+#if 0
+static int emit_return(struct r600_shader_ctx *ctx)
+{
+ r600_bytecode_add_cfinst(ctx->bc, CF_OP_RETURN));
+ return 0;
+}
+
+static int emit_jump_to_offset(struct r600_shader_ctx *ctx, int pops, int offset)
+{
+
+ r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP));
+ ctx->bc->cf_last->pop_count = pops;
+ /* XXX work out offset */
+ return 0;
+}
+
+static int emit_setret_in_loop_flag(struct r600_shader_ctx *ctx, unsigned flag_value)
+{
+ return 0;
+}
+
+static void emit_testflag(struct r600_shader_ctx *ctx)
+{
+
+}
+
+static void emit_return_on_flag(struct r600_shader_ctx *ctx, unsigned ifidx)
+{
+ emit_testflag(ctx);
+ emit_jump_to_offset(ctx, 1, 4);
+ emit_setret_in_loop_flag(ctx, V_SQ_ALU_SRC_0);
+ pops(ctx, ifidx + 1);
+ emit_return(ctx);
+}
+
+static void break_loop_on_flag(struct r600_shader_ctx *ctx, unsigned fc_sp)
+{
+ emit_testflag(ctx);
+
+ r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
+ ctx->bc->cf_last->pop_count = 1;
+
+ fc_set_mid(ctx, fc_sp);
+
+ pops(ctx, 1);
+}
+#endif
+
+static int emit_if(struct r600_shader_ctx *ctx, int opcode,
+ struct r600_bytecode_alu_src *src)
+{
+ int alu_type = CF_OP_ALU_PUSH_BEFORE;
+ bool needs_workaround = false;
+ int elems = callstack_push(ctx, FC_PUSH_VPM);
+
+ if (ctx->bc->chip_class == CAYMAN && ctx->bc->stack.loop > 1)
+ needs_workaround = true;
+
+ if (ctx->bc->chip_class == EVERGREEN && ctx_needs_stack_workaround_8xx(ctx)) {
+ unsigned dmod1 = (elems - 1) % ctx->bc->stack.entry_size;
+ unsigned dmod2 = (elems) % ctx->bc->stack.entry_size;
+
+ if (elems && (!dmod1 || !dmod2))
+ needs_workaround = true;
+ }
+
+ /* There is a hardware bug on Cayman where a BREAK/CONTINUE followed by
+ * LOOP_STARTxxx for nested loops may put the branch stack into a state
+ * such that ALU_PUSH_BEFORE doesn't work as expected. Workaround this
+ * by replacing the ALU_PUSH_BEFORE with a PUSH + ALU */
+ if (needs_workaround) {
+ r600_bytecode_add_cfinst(ctx->bc, CF_OP_PUSH);
+ ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
+ alu_type = CF_OP_ALU;
+ }
+
+ emit_logic_pred(ctx, opcode, alu_type, src);
+
+ r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP);
+
+ fc_pushlevel(ctx, FC_IF);
+
+ return 0;
+}
+
+static int tgsi_if(struct r600_shader_ctx *ctx)
+{
+ struct r600_bytecode_alu_src alu_src;
+ r600_bytecode_src(&alu_src, &ctx->src[0], 0);
+
+ return emit_if(ctx, ALU_OP2_PRED_SETNE, &alu_src);
+}
+
+static int tgsi_uif(struct r600_shader_ctx *ctx)
+{
+ struct r600_bytecode_alu_src alu_src;
+ r600_bytecode_src(&alu_src, &ctx->src[0], 0);
+ return emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src);
+}
+
+static int tgsi_else(struct r600_shader_ctx *ctx)
+{
+ r600_bytecode_add_cfinst(ctx->bc, CF_OP_ELSE);
+ ctx->bc->cf_last->pop_count = 1;
+
+ fc_set_mid(ctx, ctx->bc->fc_sp - 1);
+ ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id;
+ return 0;
+}
+
+static int tgsi_endif(struct r600_shader_ctx *ctx)
+{
+ int offset = 2;
+ pops(ctx, 1);
+ if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].type != FC_IF) {
+ R600_ERR("if/endif unbalanced in shader\n");
+ return -1;
+ }
+
+ /* ALU_EXTENDED needs 4 DWords instead of two, adjust jump target offset accordingly */
+ if (ctx->bc->cf_last->eg_alu_extended)
+ offset += 2;
+
+ if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid == NULL) {
+ ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id + offset;
+ ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->pop_count = 1;
+ } else {
+ ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid[0]->cf_addr = ctx->bc->cf_last->id + offset;
+ }
+ fc_poplevel(ctx);
+
+ callstack_pop(ctx, FC_PUSH_VPM);
+ return 0;
+}
+
+static int tgsi_bgnloop(struct r600_shader_ctx *ctx)
+{
+ /* LOOP_START_DX10 ignores the LOOP_CONFIG* registers, so it is not
+ * limited to 4096 iterations, like the other LOOP_* instructions. */
+ r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_START_DX10);
+
+ fc_pushlevel(ctx, FC_LOOP);
+
+ /* check stack depth */
+ callstack_push(ctx, FC_LOOP);
+ return 0;
+}
+
+static int tgsi_endloop(struct r600_shader_ctx *ctx)
+{
+ int i;
+
+ r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_END);
+
+ if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].type != FC_LOOP) {
+ R600_ERR("loop/endloop in shader code are not paired.\n");
+ return -EINVAL;
+ }
+
+ /* fixup loop pointers - from r600isa
+ LOOP END points to CF after LOOP START,
+ LOOP START point to CF after LOOP END
+ BRK/CONT point to LOOP END CF
+ */
+ ctx->bc->cf_last->cf_addr = ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->id + 2;
+
+ ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id + 2;
+
+ for (i = 0; i < ctx->bc->fc_stack[ctx->bc->fc_sp - 1].num_mid; i++) {
+ ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid[i]->cf_addr = ctx->bc->cf_last->id;
+ }
+ /* XXX add LOOPRET support */
+ fc_poplevel(ctx);
+ callstack_pop(ctx, FC_LOOP);
+ return 0;
+}
+
+static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx)
+{
+ unsigned int fscp;
+
+ for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--)
+ {
+ if (FC_LOOP == ctx->bc->fc_stack[fscp - 1].type)
+ break;
+ }
+
+ if (fscp == 0) {
+ R600_ERR("Break not inside loop/endloop pair\n");
+ return -EINVAL;
+ }
+
+ r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
+
+ fc_set_mid(ctx, fscp - 1);
+
+ return 0;
+}
+
+static int tgsi_gs_emit(struct r600_shader_ctx *ctx)
+{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ int stream = ctx->literals[inst->Src[0].Register.Index * 4 + inst->Src[0].Register.SwizzleX];
+ int r;
+
+ if (ctx->inst_info->op == CF_OP_EMIT_VERTEX)
+ emit_gs_ring_writes(ctx, ctx->gs_stream_output_info, stream, TRUE);
+
+ r = r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
+ if (!r) {
+ ctx->bc->cf_last->count = stream; // Count field for CUT/EMIT_VERTEX indicates which stream
+ if (ctx->inst_info->op == CF_OP_EMIT_VERTEX)
+ return emit_inc_ring_offset(ctx, stream, TRUE);
+ }
+ return r;
+}
+
+static int tgsi_umad(struct r600_shader_ctx *ctx)
+{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ struct r600_bytecode_alu alu;
+ int i, j, r;
+ int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
+
+ /* src0 * src1 */
+ for (i = 0; i < lasti + 1; i++) {
+ if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
+ continue;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+
+ alu.dst.chan = i;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.write = 1;
+
+ alu.op = ALU_OP2_MULLO_UINT;
+ for (j = 0; j < 2; j++) {
+ r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
}
- break;
- default:
- assert(0);
- break;
+ alu.last = 1;
+ r = emit_mul_int_op(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+
+
+ for (i = 0; i < lasti + 1; i++) {
+ if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
+ continue;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+
+ alu.op = ALU_OP2_ADD_INT;
+
+ alu.src[0].sel = ctx->temp_reg;
+ alu.src[0].chan = i;
+
+ r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
+ if (i == lasti) {
+ alu.last = 1;
+ }
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
}
+ return 0;
+}
+
+static int tgsi_pk2h(struct r600_shader_ctx *ctx)
+{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ struct r600_bytecode_alu alu;
+ int r, i;
+ int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
+
+ /* temp.xy = f32_to_f16(src) */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_FLT32_TO_FLT16;
+ alu.dst.chan = 0;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.write = 1;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ alu.dst.chan = 1;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
- /* NOTE: it seems STACK_SIZE is interpreted by hw as if entry_size is 4
- * for all chips, so we use 4 in the final formula, not the real entry_size
- * for the chip */
- entry_size = 4;
+ /* dst.x = temp.y * 0x10000 + temp.x */
+ for (i = 0; i < lasti + 1; i++) {
+ if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
+ continue;
- entries = (elements + (entry_size - 1)) / entry_size;
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP3_MULADD_UINT24;
+ alu.is_op3 = 1;
+ tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ alu.last = i == lasti;
+ alu.src[0].sel = ctx->temp_reg;
+ alu.src[0].chan = 1;
+ alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[1].value = 0x10000;
+ alu.src[2].sel = ctx->temp_reg;
+ alu.src[2].chan = 0;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
- if (entries > stack->max_entries)
- stack->max_entries = entries;
+ return 0;
}
-static inline void callstack_pop(struct r600_shader_ctx *ctx, unsigned reason)
+static int tgsi_up2h(struct r600_shader_ctx *ctx)
{
- switch(reason) {
- case FC_PUSH_VPM:
- --ctx->bc->stack.push;
- assert(ctx->bc->stack.push >= 0);
- break;
- case FC_PUSH_WQM:
- --ctx->bc->stack.push_wqm;
- assert(ctx->bc->stack.push_wqm >= 0);
- break;
- case FC_LOOP:
- --ctx->bc->stack.loop;
- assert(ctx->bc->stack.loop >= 0);
- break;
- default:
- assert(0);
- break;
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ struct r600_bytecode_alu alu;
+ int r, i;
+ int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
+
+ /* temp.x = src.x */
+ /* note: no need to mask out the high bits */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ alu.dst.chan = 0;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.write = 1;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+
+ /* temp.y = src.x >> 16 */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_LSHR_INT;
+ alu.dst.chan = 1;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.write = 1;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[1].value = 16;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+
+ /* dst.wz = dst.xy = f16_to_f32(temp.xy) */
+ for (i = 0; i < lasti + 1; i++) {
+ if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
+ continue;
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ alu.op = ALU_OP1_FLT16_TO_FLT32;
+ alu.src[0].sel = ctx->temp_reg;
+ alu.src[0].chan = i % 2;
+ alu.last = i == lasti;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
}
+
+ return 0;
}
-static inline void callstack_push(struct r600_shader_ctx *ctx, unsigned reason)
+static int tgsi_bfe(struct r600_shader_ctx *ctx)
{
- switch (reason) {
- case FC_PUSH_VPM:
- ++ctx->bc->stack.push;
- break;
- case FC_PUSH_WQM:
- ++ctx->bc->stack.push_wqm;
- case FC_LOOP:
- ++ctx->bc->stack.loop;
- break;
- default:
- assert(0);
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ struct r600_bytecode_alu alu;
+ int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
+ int r, i;
+ int dst = -1;
+
+ if ((inst->Src[0].Register.File == inst->Dst[0].Register.File &&
+ inst->Src[0].Register.Index == inst->Dst[0].Register.Index) ||
+ (inst->Src[2].Register.File == inst->Dst[0].Register.File &&
+ inst->Src[2].Register.Index == inst->Dst[0].Register.Index))
+ dst = r600_get_temp(ctx);
+
+ r = tgsi_op3_dst(ctx, dst);
+ if (r)
+ return r;
+
+ for (i = 0; i < lasti + 1; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_SETGE_INT;
+ r600_bytecode_src(&alu.src[0], &ctx->src[2], i);
+ alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[1].value = 32;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ if (i == lasti)
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
}
- callstack_update_max_depth(ctx, reason);
-}
+ for (i = 0; i < lasti + 1; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP3_CNDE_INT;
+ alu.is_op3 = 1;
+ alu.src[0].sel = ctx->temp_reg;
+ alu.src[0].chan = i;
-static void fc_set_mid(struct r600_shader_ctx *ctx, int fc_sp)
-{
- struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[fc_sp];
+ tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ if (dst != -1)
+ alu.src[1].sel = dst;
+ else
+ alu.src[1].sel = alu.dst.sel;
+ alu.src[1].chan = i;
+ r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
+ alu.dst.write = 1;
+ if (i == lasti)
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
- sp->mid = realloc((void *)sp->mid,
- sizeof(struct r600_bytecode_cf *) * (sp->num_mid + 1));
- sp->mid[sp->num_mid] = ctx->bc->cf_last;
- sp->num_mid++;
+ return 0;
}
-static void fc_pushlevel(struct r600_shader_ctx *ctx, int type)
+static int tgsi_clock(struct r600_shader_ctx *ctx)
{
- assert(ctx->bc->fc_sp < ARRAY_SIZE(ctx->bc->fc_stack));
- ctx->bc->fc_stack[ctx->bc->fc_sp].type = type;
- ctx->bc->fc_stack[ctx->bc->fc_sp].start = ctx->bc->cf_last;
- ctx->bc->fc_sp++;
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ struct r600_bytecode_alu alu;
+ int r;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
+ alu.src[0].sel = EG_V_SQ_ALU_SRC_TIME_LO;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
+ alu.src[0].sel = EG_V_SQ_ALU_SRC_TIME_HI;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ return 0;
}
-static void fc_poplevel(struct r600_shader_ctx *ctx)
+static int emit_u64add(struct r600_shader_ctx *ctx, int op,
+ int treg,
+ int src0_sel, int src0_chan,
+ int src1_sel, int src1_chan)
{
- struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[ctx->bc->fc_sp - 1];
- free(sp->mid);
- sp->mid = NULL;
- sp->num_mid = 0;
- sp->start = NULL;
- sp->type = 0;
- ctx->bc->fc_sp--;
+ struct r600_bytecode_alu alu;
+ int r;
+ int opc;
+
+ if (op == ALU_OP2_ADD_INT)
+ opc = ALU_OP2_ADDC_UINT;
+ else
+ opc = ALU_OP2_SUBB_UINT;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = op; ;
+ alu.dst.sel = treg;
+ alu.dst.chan = 0;
+ alu.dst.write = 1;
+ alu.src[0].sel = src0_sel;
+ alu.src[0].chan = src0_chan + 0;
+ alu.src[1].sel = src1_sel;
+ alu.src[1].chan = src1_chan + 0;
+ alu.src[1].neg = 0;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = op;
+ alu.dst.sel = treg;
+ alu.dst.chan = 1;
+ alu.dst.write = 1;
+ alu.src[0].sel = src0_sel;
+ alu.src[0].chan = src0_chan + 1;
+ alu.src[1].sel = src1_sel;
+ alu.src[1].chan = src1_chan + 1;
+ alu.src[1].neg = 0;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = opc;
+ alu.dst.sel = treg;
+ alu.dst.chan = 2;
+ alu.dst.write = 1;
+ alu.last = 1;
+ alu.src[0].sel = src0_sel;
+ alu.src[0].chan = src0_chan + 0;
+ alu.src[1].sel = src1_sel;
+ alu.src[1].chan = src1_chan + 0;
+ alu.src[1].neg = 0;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = op;
+ alu.dst.sel = treg;
+ alu.dst.chan = 1;
+ alu.dst.write = 1;
+ alu.src[0].sel = treg;
+ alu.src[0].chan = 1;
+ alu.src[1].sel = treg;
+ alu.src[1].chan = 2;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ return 0;
}
-#if 0
-static int emit_return(struct r600_shader_ctx *ctx)
+static int egcm_u64add(struct r600_shader_ctx *ctx)
{
- r600_bytecode_add_cfinst(ctx->bc, CF_OP_RETURN));
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ struct r600_bytecode_alu alu;
+ int r;
+ int treg = ctx->temp_reg;
+ int op = ALU_OP2_ADD_INT, opc = ALU_OP2_ADDC_UINT;
+
+ if (ctx->src[1].neg) {
+ op = ALU_OP2_SUB_INT;
+ opc = ALU_OP2_SUBB_UINT;
+ }
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = op; ;
+ alu.dst.sel = treg;
+ alu.dst.chan = 0;
+ alu.dst.write = 1;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
+ alu.src[1].neg = 0;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = op;
+ alu.dst.sel = treg;
+ alu.dst.chan = 1;
+ alu.dst.write = 1;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
+ r600_bytecode_src(&alu.src[1], &ctx->src[1], 1);
+ alu.src[1].neg = 0;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = opc ;
+ alu.dst.sel = treg;
+ alu.dst.chan = 2;
+ alu.dst.write = 1;
+ alu.last = 1;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
+ alu.src[1].neg = 0;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = op;
+ tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
+ alu.src[0].sel = treg;
+ alu.src[0].chan = 1;
+ alu.src[1].sel = treg;
+ alu.src[1].chan = 2;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
+ alu.src[0].sel = treg;
+ alu.src[0].chan = 0;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
return 0;
}
-static int emit_jump_to_offset(struct r600_shader_ctx *ctx, int pops, int offset)
+/* result.y = mul_high a, b
+ result.x = mul a,b
+ result.y += a.x * b.y + a.y * b.x;
+*/
+static int egcm_u64mul(struct r600_shader_ctx *ctx)
{
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
+ struct r600_bytecode_alu alu;
+ int r;
+ int treg = ctx->temp_reg;
- r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP));
- ctx->bc->cf_last->pop_count = pops;
- /* XXX work out offset */
- return 0;
-}
+ /* temp.x = mul_lo a.x, b.x */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_MULLO_UINT;
+ alu.dst.sel = treg;
+ alu.dst.chan = 0;
+ alu.dst.write = 1;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
+ r = emit_mul_int_op(ctx->bc, &alu);
+ if (r)
+ return r;
-static int emit_setret_in_loop_flag(struct r600_shader_ctx *ctx, unsigned flag_value)
-{
- return 0;
-}
+ /* temp.y = mul_hi a.x, b.x */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_MULHI_UINT;
+ alu.dst.sel = treg;
+ alu.dst.chan = 1;
+ alu.dst.write = 1;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
+ r = emit_mul_int_op(ctx->bc, &alu);
+ if (r)
+ return r;
-static void emit_testflag(struct r600_shader_ctx *ctx)
-{
+ /* temp.z = mul a.x, b.y */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_MULLO_UINT;
+ alu.dst.sel = treg;
+ alu.dst.chan = 2;
+ alu.dst.write = 1;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ r600_bytecode_src(&alu.src[1], &ctx->src[1], 1);
+ r = emit_mul_int_op(ctx->bc, &alu);
+ if (r)
+ return r;
-}
+ /* temp.w = mul a.y, b.x */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_MULLO_UINT;
+ alu.dst.sel = treg;
+ alu.dst.chan = 3;
+ alu.dst.write = 1;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
+ r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
+ r = emit_mul_int_op(ctx->bc, &alu);
+ if (r)
+ return r;
-static void emit_return_on_flag(struct r600_shader_ctx *ctx, unsigned ifidx)
-{
- emit_testflag(ctx);
- emit_jump_to_offset(ctx, 1, 4);
- emit_setret_in_loop_flag(ctx, V_SQ_ALU_SRC_0);
- pops(ctx, ifidx + 1);
- emit_return(ctx);
-}
+ /* temp.z = temp.z + temp.w */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_ADD_INT;
+ alu.dst.sel = treg;
+ alu.dst.chan = 2;
+ alu.dst.write = 1;
+ alu.src[0].sel = treg;
+ alu.src[0].chan = 2;
+ alu.src[1].sel = treg;
+ alu.src[1].chan = 3;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
-static void break_loop_on_flag(struct r600_shader_ctx *ctx, unsigned fc_sp)
-{
- emit_testflag(ctx);
+ /* temp.y = temp.y + temp.z */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_ADD_INT;
+ alu.dst.sel = treg;
+ alu.dst.chan = 1;
+ alu.dst.write = 1;
+ alu.src[0].sel = treg;
+ alu.src[0].chan = 1;
+ alu.src[1].sel = treg;
+ alu.src[1].chan = 2;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
- r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
- ctx->bc->cf_last->pop_count = 1;
+ /* dst.x = temp.x */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
+ alu.src[0].sel = treg;
+ alu.src[0].chan = 0;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
- fc_set_mid(ctx, fc_sp);
+ /* dst.y = temp.y */
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
+ alu.src[0].sel = treg;
+ alu.src[0].chan = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
- pops(ctx, 1);
+ return 0;
}
-#endif
-static int emit_if(struct r600_shader_ctx *ctx, int opcode)
+static int emit_u64sge(struct r600_shader_ctx *ctx,
+ int treg,
+ int src0_sel, int src0_base_chan,
+ int src1_sel, int src1_base_chan)
{
- int alu_type = CF_OP_ALU_PUSH_BEFORE;
-
- /* There is a hardware bug on Cayman where a BREAK/CONTINUE followed by
- * LOOP_STARTxxx for nested loops may put the branch stack into a state
- * such that ALU_PUSH_BEFORE doesn't work as expected. Workaround this
- * by replacing the ALU_PUSH_BEFORE with a PUSH + ALU */
- if (ctx->bc->chip_class == CAYMAN && ctx->bc->stack.loop > 1) {
- r600_bytecode_add_cfinst(ctx->bc, CF_OP_PUSH);
- ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2;
- alu_type = CF_OP_ALU;
- }
+ int r;
+ /* for 64-bit sge */
+ /* result = (src0.y > src1.y) || ((src0.y == src1.y) && src0.x >= src1.x)) */
+ r = single_alu_op2(ctx, ALU_OP2_SETGT_UINT,
+ treg, 1,
+ src0_sel, src0_base_chan + 1,
+ src1_sel, src1_base_chan + 1);
+ if (r)
+ return r;
- emit_logic_pred(ctx, opcode, alu_type);
+ r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT,
+ treg, 0,
+ src0_sel, src0_base_chan,
+ src1_sel, src1_base_chan);
+ if (r)
+ return r;
- r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP);
+ r = single_alu_op2(ctx, ALU_OP2_SETE_INT,
+ treg, 2,
+ src0_sel, src0_base_chan + 1,
+ src1_sel, src1_base_chan + 1);
+ if (r)
+ return r;
- fc_pushlevel(ctx, FC_IF);
+ r = single_alu_op2(ctx, ALU_OP2_AND_INT,
+ treg, 0,
+ treg, 0,
+ treg, 2);
+ if (r)
+ return r;
- callstack_push(ctx, FC_PUSH_VPM);
+ r = single_alu_op2(ctx, ALU_OP2_OR_INT,
+ treg, 0,
+ treg, 0,
+ treg, 1);
+ if (r)
+ return r;
return 0;
}
-static int tgsi_if(struct r600_shader_ctx *ctx)
+/* this isn't a complete div it's just enough for qbo shader to work */
+static int egcm_u64div(struct r600_shader_ctx *ctx)
{
- return emit_if(ctx, ALU_OP2_PRED_SETNE);
-}
+ struct r600_bytecode_alu alu;
+ struct r600_bytecode_alu_src alu_num_hi, alu_num_lo, alu_denom_hi, alu_denom_lo, alu_src;
+ int r, i;
+ struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
-static int tgsi_uif(struct r600_shader_ctx *ctx)
-{
- return emit_if(ctx, ALU_OP2_PRED_SETNE_INT);
-}
+ /* make sure we are dividing my a const with 0 in the high bits */
+ if (ctx->src[1].sel != V_SQ_ALU_SRC_LITERAL)
+ return -1;
+ if (ctx->src[1].value[ctx->src[1].swizzle[1]] != 0)
+ return -1;
+ /* make sure we are doing one division */
+ if (inst->Dst[0].Register.WriteMask != 0x3)
+ return -1;
-static int tgsi_else(struct r600_shader_ctx *ctx)
-{
- r600_bytecode_add_cfinst(ctx->bc, CF_OP_ELSE);
- ctx->bc->cf_last->pop_count = 1;
+ /* emit_if uses ctx->temp_reg so we can't */
+ int treg = r600_get_temp(ctx);
+ int tmp_num = r600_get_temp(ctx);
+ int sub_tmp = r600_get_temp(ctx);
- fc_set_mid(ctx, ctx->bc->fc_sp - 1);
- ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id;
- return 0;
-}
+ /* tmp quot are tmp_num.zw */
+ r600_bytecode_src(&alu_num_lo, &ctx->src[0], 0);
+ r600_bytecode_src(&alu_num_hi, &ctx->src[0], 1);
+ r600_bytecode_src(&alu_denom_lo, &ctx->src[1], 0);
+ r600_bytecode_src(&alu_denom_hi, &ctx->src[1], 1);
-static int tgsi_endif(struct r600_shader_ctx *ctx)
-{
- pops(ctx, 1);
- if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].type != FC_IF) {
- R600_ERR("if/endif unbalanced in shader\n");
- return -1;
- }
+ /* MOV tmp_num.xy, numerator */
+ r = single_alu_op2(ctx, ALU_OP1_MOV,
+ tmp_num, 0,
+ alu_num_lo.sel, alu_num_lo.chan,
+ 0, 0);
+ if (r)
+ return r;
+ r = single_alu_op2(ctx, ALU_OP1_MOV,
+ tmp_num, 1,
+ alu_num_hi.sel, alu_num_hi.chan,
+ 0, 0);
+ if (r)
+ return r;
- if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid == NULL) {
- ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id + 2;
- ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->pop_count = 1;
- } else {
- ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid[0]->cf_addr = ctx->bc->cf_last->id + 2;
- }
- fc_poplevel(ctx);
+ r = single_alu_op2(ctx, ALU_OP1_MOV,
+ tmp_num, 2,
+ V_SQ_ALU_SRC_LITERAL, 0,
+ 0, 0);
+ if (r)
+ return r;
- callstack_pop(ctx, FC_PUSH_VPM);
- return 0;
-}
+ r = single_alu_op2(ctx, ALU_OP1_MOV,
+ tmp_num, 3,
+ V_SQ_ALU_SRC_LITERAL, 0,
+ 0, 0);
+ if (r)
+ return r;
-static int tgsi_bgnloop(struct r600_shader_ctx *ctx)
-{
- /* LOOP_START_DX10 ignores the LOOP_CONFIG* registers, so it is not
- * limited to 4096 iterations, like the other LOOP_* instructions. */
- r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_START_DX10);
+ /* treg 0 is log2_denom */
+ /* normally this gets the MSB for the denom high value
+ - however we know this will always be 0 here. */
+ r = single_alu_op2(ctx,
+ ALU_OP1_MOV,
+ treg, 0,
+ V_SQ_ALU_SRC_LITERAL, 32,
+ 0, 0);
+ if (r)
+ return r;
- fc_pushlevel(ctx, FC_LOOP);
+ /* normally check demon hi for 0, but we know it is already */
+ /* t0.z = num_hi >= denom_lo */
+ r = single_alu_op2(ctx,
+ ALU_OP2_SETGE_UINT,
+ treg, 1,
+ alu_num_hi.sel, alu_num_hi.chan,
+ V_SQ_ALU_SRC_LITERAL, alu_denom_lo.value);
+ if (r)
+ return r;
- /* check stack depth */
- callstack_push(ctx, FC_LOOP);
- return 0;
-}
+ memset(&alu_src, 0, sizeof(alu_src));
+ alu_src.sel = treg;
+ alu_src.chan = 1;
+ r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src);
+ if (r)
+ return r;
-static int tgsi_endloop(struct r600_shader_ctx *ctx)
-{
- unsigned i;
+ /* for loops in here */
+ /* get msb t0.x = msb(src[1].x) first */
+ int msb_lo = util_last_bit(alu_denom_lo.value);
+ r = single_alu_op2(ctx, ALU_OP1_MOV,
+ treg, 0,
+ V_SQ_ALU_SRC_LITERAL, msb_lo,
+ 0, 0);
+ if (r)
+ return r;
- r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_END);
+ /* unroll the asm here */
+ for (i = 0; i < 31; i++) {
+ r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT,
+ treg, 2,
+ V_SQ_ALU_SRC_LITERAL, i,
+ treg, 0);
+ if (r)
+ return r;
- if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].type != FC_LOOP) {
- R600_ERR("loop/endloop in shader code are not paired.\n");
- return -EINVAL;
- }
+ /* we can do this on the CPU */
+ uint32_t denom_lo_shl = alu_denom_lo.value << (31 - i);
+ /* t0.z = tmp_num.y >= t0.z */
+ r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT,
+ treg, 1,
+ tmp_num, 1,
+ V_SQ_ALU_SRC_LITERAL, denom_lo_shl);
+ if (r)
+ return r;
- /* fixup loop pointers - from r600isa
- LOOP END points to CF after LOOP START,
- LOOP START point to CF after LOOP END
- BRK/CONT point to LOOP END CF
- */
- ctx->bc->cf_last->cf_addr = ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->id + 2;
+ r = single_alu_op2(ctx, ALU_OP2_AND_INT,
+ treg, 1,
+ treg, 1,
+ treg, 2);
+ if (r)
+ return r;
- ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id + 2;
+ memset(&alu_src, 0, sizeof(alu_src));
+ alu_src.sel = treg;
+ alu_src.chan = 1;
+ r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src);
+ if (r)
+ return r;
- for (i = 0; i < ctx->bc->fc_stack[ctx->bc->fc_sp - 1].num_mid; i++) {
- ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid[i]->cf_addr = ctx->bc->cf_last->id;
- }
- /* XXX add LOOPRET support */
- fc_poplevel(ctx);
- callstack_pop(ctx, FC_LOOP);
- return 0;
-}
+ r = single_alu_op2(ctx, ALU_OP2_SUB_INT,
+ tmp_num, 1,
+ tmp_num, 1,
+ V_SQ_ALU_SRC_LITERAL, denom_lo_shl);
+ if (r)
+ return r;
-static int tgsi_loop_breakc(struct r600_shader_ctx *ctx)
-{
- int r;
- unsigned int fscp;
+ r = single_alu_op2(ctx, ALU_OP2_OR_INT,
+ tmp_num, 3,
+ tmp_num, 3,
+ V_SQ_ALU_SRC_LITERAL, 1U << (31 - i));
+ if (r)
+ return r;
- for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--)
- {
- if (FC_LOOP == ctx->bc->fc_stack[fscp - 1].type)
- break;
- }
- if (fscp == 0) {
- R600_ERR("BREAKC not inside loop/endloop pair\n");
- return -EINVAL;
+ r = tgsi_endif(ctx);
+ if (r)
+ return r;
}
- if (ctx->bc->chip_class == EVERGREEN &&
- ctx->bc->family != CHIP_CYPRESS &&
- ctx->bc->family != CHIP_JUNIPER) {
- /* HW bug: ALU_BREAK does not save the active mask correctly */
- r = tgsi_uif(ctx);
+ /* log2_denom is always <= 31, so manually peel the last loop
+ * iteration.
+ */
+ r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT,
+ treg, 1,
+ tmp_num, 1,
+ V_SQ_ALU_SRC_LITERAL, alu_denom_lo.value);
+ if (r)
+ return r;
+
+ memset(&alu_src, 0, sizeof(alu_src));
+ alu_src.sel = treg;
+ alu_src.chan = 1;
+ r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src);
+ if (r)
+ return r;
+
+ r = single_alu_op2(ctx, ALU_OP2_SUB_INT,
+ tmp_num, 1,
+ tmp_num, 1,
+ V_SQ_ALU_SRC_LITERAL, alu_denom_lo.value);
+ if (r)
+ return r;
+
+ r = single_alu_op2(ctx, ALU_OP2_OR_INT,
+ tmp_num, 3,
+ tmp_num, 3,
+ V_SQ_ALU_SRC_LITERAL, 1U);
+ if (r)
+ return r;
+ r = tgsi_endif(ctx);
+ if (r)
+ return r;
+
+ r = tgsi_endif(ctx);
+ if (r)
+ return r;
+
+ /* onto the second loop to unroll */
+ for (i = 0; i < 31; i++) {
+ r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT,
+ treg, 1,
+ V_SQ_ALU_SRC_LITERAL, (63 - (31 - i)),
+ treg, 0);
if (r)
return r;
- r = r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_BREAK);
+ uint64_t denom_shl = (uint64_t)alu_denom_lo.value << (31 - i);
+ r = single_alu_op2(ctx, ALU_OP1_MOV,
+ treg, 2,
+ V_SQ_ALU_SRC_LITERAL, (denom_shl & 0xffffffff),
+ 0, 0);
if (r)
return r;
- fc_set_mid(ctx, fscp - 1);
- return tgsi_endif(ctx);
- } else {
- r = emit_logic_pred(ctx, ALU_OP2_PRED_SETE_INT, CF_OP_ALU_BREAK);
+ r = single_alu_op2(ctx, ALU_OP1_MOV,
+ treg, 3,
+ V_SQ_ALU_SRC_LITERAL, (denom_shl >> 32),
+ 0, 0);
if (r)
return r;
- fc_set_mid(ctx, fscp - 1);
- }
-
- return 0;
-}
-
-static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx)
-{
- unsigned int fscp;
-
- for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--)
- {
- if (FC_LOOP == ctx->bc->fc_stack[fscp - 1].type)
- break;
- }
-
- if (fscp == 0) {
- R600_ERR("Break not inside loop/endloop pair\n");
- return -EINVAL;
- }
-
- r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
-
- fc_set_mid(ctx, fscp - 1);
-
- return 0;
-}
-
-static int tgsi_gs_emit(struct r600_shader_ctx *ctx)
-{
- struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
- int stream = ctx->literals[inst->Src[0].Register.Index * 4 + inst->Src[0].Register.SwizzleX];
- int r;
- if (ctx->inst_info->op == CF_OP_EMIT_VERTEX)
- emit_gs_ring_writes(ctx, ctx->gs_stream_output_info, stream, TRUE);
+ r = emit_u64sge(ctx, sub_tmp,
+ tmp_num, 0,
+ treg, 2);
+ if (r)
+ return r;
- r = r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op);
- if (!r) {
- ctx->bc->cf_last->count = stream; // Count field for CUT/EMIT_VERTEX indicates which stream
- if (ctx->inst_info->op == CF_OP_EMIT_VERTEX)
- return emit_inc_ring_offset(ctx, stream, TRUE);
- }
- return r;
-}
+ r = single_alu_op2(ctx, ALU_OP2_AND_INT,
+ treg, 1,
+ treg, 1,
+ sub_tmp, 0);
+ if (r)
+ return r;
-static int tgsi_umad(struct r600_shader_ctx *ctx)
-{
- struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
- struct r600_bytecode_alu alu;
- int i, j, k, r;
- int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
+ memset(&alu_src, 0, sizeof(alu_src));
+ alu_src.sel = treg;
+ alu_src.chan = 1;
+ r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src);
+ if (r)
+ return r;
- /* src0 * src1 */
- for (i = 0; i < lasti + 1; i++) {
- if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
- continue;
- if (ctx->bc->chip_class == CAYMAN) {
- for (j = 0 ; j < 4; j++) {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ r = emit_u64add(ctx, ALU_OP2_SUB_INT,
+ sub_tmp,
+ tmp_num, 0,
+ treg, 2);
+ if (r)
+ return r;
- alu.op = ALU_OP2_MULLO_UINT;
- for (k = 0; k < inst->Instruction.NumSrcRegs; k++) {
- r600_bytecode_src(&alu.src[k], &ctx->src[k], i);
- }
- alu.dst.chan = j;
- alu.dst.sel = ctx->temp_reg;
- alu.dst.write = (j == i);
- if (j == 3)
- alu.last = 1;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
- } else {
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ r = single_alu_op2(ctx, ALU_OP1_MOV,
+ tmp_num, 0,
+ sub_tmp, 0,
+ 0, 0);
+ if (r)
+ return r;
- alu.dst.chan = i;
- alu.dst.sel = ctx->temp_reg;
- alu.dst.write = 1;
+ r = single_alu_op2(ctx, ALU_OP1_MOV,
+ tmp_num, 1,
+ sub_tmp, 1,
+ 0, 0);
+ if (r)
+ return r;
- alu.op = ALU_OP2_MULLO_UINT;
- for (j = 0; j < 2; j++) {
- r600_bytecode_src(&alu.src[j], &ctx->src[j], i);
- }
+ r = single_alu_op2(ctx, ALU_OP2_OR_INT,
+ tmp_num, 2,
+ tmp_num, 2,
+ V_SQ_ALU_SRC_LITERAL, 1U << (31 - i));
+ if (r)
+ return r;
- alu.last = 1;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
+ r = tgsi_endif(ctx);
+ if (r)
+ return r;
}
+ /* log2_denom is always <= 63, so manually peel the last loop
+ * iteration.
+ */
+ uint64_t denom_shl = (uint64_t)alu_denom_lo.value;
+ r = single_alu_op2(ctx, ALU_OP1_MOV,
+ treg, 2,
+ V_SQ_ALU_SRC_LITERAL, (denom_shl & 0xffffffff),
+ 0, 0);
+ if (r)
+ return r;
- for (i = 0; i < lasti + 1; i++) {
- if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
- continue;
-
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
+ r = single_alu_op2(ctx, ALU_OP1_MOV,
+ treg, 3,
+ V_SQ_ALU_SRC_LITERAL, (denom_shl >> 32),
+ 0, 0);
+ if (r)
+ return r;
- alu.op = ALU_OP2_ADD_INT;
+ r = emit_u64sge(ctx, sub_tmp,
+ tmp_num, 0,
+ treg, 2);
+ if (r)
+ return r;
- alu.src[0].sel = ctx->temp_reg;
- alu.src[0].chan = i;
+ memset(&alu_src, 0, sizeof(alu_src));
+ alu_src.sel = sub_tmp;
+ alu_src.chan = 0;
+ r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src);
+ if (r)
+ return r;
- r600_bytecode_src(&alu.src[1], &ctx->src[2], i);
- if (i == lasti) {
- alu.last = 1;
- }
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
- return 0;
-}
+ r = emit_u64add(ctx, ALU_OP2_SUB_INT,
+ sub_tmp,
+ tmp_num, 0,
+ treg, 2);
+ if (r)
+ return r;
-static int tgsi_pk2h(struct r600_shader_ctx *ctx)
-{
- struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
- struct r600_bytecode_alu alu;
- int r, i;
- int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
+ r = single_alu_op2(ctx, ALU_OP2_OR_INT,
+ tmp_num, 2,
+ tmp_num, 2,
+ V_SQ_ALU_SRC_LITERAL, 1U);
+ if (r)
+ return r;
+ r = tgsi_endif(ctx);
+ if (r)
+ return r;
- /* temp.xy = f32_to_f16(src) */
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP1_FLT32_TO_FLT16;
- alu.dst.chan = 0;
- alu.dst.sel = ctx->temp_reg;
- alu.dst.write = 1;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ alu.op = ALU_OP1_MOV;
+ tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
+ alu.src[0].sel = tmp_num;
+ alu.src[0].chan = 2;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
- alu.dst.chan = 1;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst);
+ alu.src[0].sel = tmp_num;
+ alu.src[0].chan = 3;
alu.last = 1;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
-
- /* dst.x = temp.y * 0x10000 + temp.x */
- for (i = 0; i < lasti + 1; i++) {
- if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
- continue;
-
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP3_MULADD_UINT24;
- alu.is_op3 = 1;
- tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
- alu.last = i == lasti;
- alu.src[0].sel = ctx->temp_reg;
- alu.src[0].chan = 1;
- alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
- alu.src[1].value = 0x10000;
- alu.src[2].sel = ctx->temp_reg;
- alu.src[2].chan = 0;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
-
return 0;
}
-static int tgsi_up2h(struct r600_shader_ctx *ctx)
+static int egcm_u64sne(struct r600_shader_ctx *ctx)
{
struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction;
struct r600_bytecode_alu alu;
- int r, i;
- int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask);
+ int r;
+ int treg = ctx->temp_reg;
- /* temp.x = src.x */
- /* note: no need to mask out the high bits */
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP1_MOV;
+ alu.op = ALU_OP2_SETNE_INT;
+ alu.dst.sel = treg;
alu.dst.chan = 0;
- alu.dst.sel = ctx->temp_reg;
alu.dst.write = 1;
r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
+ r600_bytecode_src(&alu.src[1], &ctx->src[1], 0);
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
- /* temp.y = src.x >> 16 */
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- alu.op = ALU_OP2_LSHR_INT;
+ alu.op = ALU_OP2_SETNE_INT;
+ alu.dst.sel = treg;
alu.dst.chan = 1;
- alu.dst.sel = ctx->temp_reg;
alu.dst.write = 1;
- r600_bytecode_src(&alu.src[0], &ctx->src[0], 0);
- alu.src[1].sel = V_SQ_ALU_SRC_LITERAL;
- alu.src[1].value = 16;
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], 1);
+ r600_bytecode_src(&alu.src[1], &ctx->src[1], 1);
alu.last = 1;
r = r600_bytecode_add_alu(ctx->bc, &alu);
if (r)
return r;
- /* dst.wz = dst.xy = f16_to_f32(temp.xy) */
- for (i = 0; i < lasti + 1; i++) {
- if (!(inst->Dst[0].Register.WriteMask & (1 << i)))
- continue;
- memset(&alu, 0, sizeof(struct r600_bytecode_alu));
- tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst);
- alu.op = ALU_OP1_FLT16_TO_FLT32;
- alu.src[0].sel = ctx->temp_reg;
- alu.src[0].chan = i % 2;
- alu.last = i == lasti;
- r = r600_bytecode_add_alu(ctx->bc, &alu);
- if (r)
- return r;
- }
-
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_OR_INT;
+ tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst);
+ alu.src[0].sel = treg;
+ alu.src[0].chan = 0;
+ alu.src[1].sel = treg;
+ alu.src[1].chan = 1;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
return 0;
}
[TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
[TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
- /* XXX:
- * For state trackers other than OpenGL, we'll want to use
- * _RECIP_IEEE instead.
- */
- [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_CLAMPED, tgsi_trans_srcx_replicate},
+ [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, tgsi_trans_srcx_replicate},
[TGSI_OPCODE_RSQ] = { ALU_OP0_NOP, tgsi_rsq},
[TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
[TGSI_OPCODE_DP3] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
[TGSI_OPCODE_DP4] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
[TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
- [TGSI_OPCODE_MIN] = { ALU_OP2_MIN, tgsi_op2},
- [TGSI_OPCODE_MAX] = { ALU_OP2_MAX, tgsi_op2},
+ /* MIN_DX10 returns non-nan result if one src is NaN, MIN returns NaN */
+ [TGSI_OPCODE_MIN] = { ALU_OP2_MIN_DX10, tgsi_op2},
+ [TGSI_OPCODE_MAX] = { ALU_OP2_MAX_DX10, tgsi_op2},
[TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
[TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
[TGSI_OPCODE_MAD] = { ALU_OP3_MULADD_IEEE, tgsi_op3},
[TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
[TGSI_OPCODE_FMA] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, tgsi_trans_srcx_replicate},
- [TGSI_OPCODE_DP2A] = { ALU_OP0_NOP, tgsi_unsupported},
+ [21] = { ALU_OP0_NOP, tgsi_unsupported},
[22] = { ALU_OP0_NOP, tgsi_unsupported},
[23] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
[TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate},
[TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate},
[TGSI_OPCODE_POW] = { ALU_OP0_NOP, tgsi_pow},
- [TGSI_OPCODE_XPD] = { ALU_OP0_NOP, tgsi_xpd},
+ [31] = { ALU_OP0_NOP, tgsi_unsupported},
[32] = { ALU_OP0_NOP, tgsi_unsupported},
- [33] = { ALU_OP0_NOP, tgsi_unsupported},
+ [TGSI_OPCODE_CLOCK] = { ALU_OP0_NOP, tgsi_unsupported},
[34] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_DPH] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
+ [35] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_COS] = { ALU_OP1_COS, tgsi_trig},
[TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
[TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
[TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
[TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
- [TGSI_OPCODE_SCS] = { ALU_OP0_NOP, tgsi_scs},
+ [67] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
[69] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
[TGSI_OPCODE_DDX_FINE] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_DDY_FINE] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_PUSHA] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_POPA] = { ALU_OP0_NOP, tgsi_unsupported},
+ [81] = { ALU_OP0_NOP, tgsi_unsupported},
+ [82] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
[TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2_trans},
[TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
[TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
[TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
[TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
- [TGSI_OPCODE_SAD] = { ALU_OP0_NOP, tgsi_unsupported},
+ [93] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
[TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
[TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
[TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
[TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_TXQ_LZ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
+ [103] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
[TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
[TGSI_OPCODE_RESQ] = { ALU_OP0_NOP, tgsi_unsupported},
[106] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
[TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
[TGSI_OPCODE_MEMBAR] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_CALLNZ] = { ALU_OP0_NOP, tgsi_unsupported},
+ [113] = { ALU_OP0_NOP, tgsi_unsupported},
[114] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_BREAKC] = { ALU_OP0_NOP, tgsi_loop_breakc},
+ [115] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
[TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
[TGSI_OPCODE_DFMA] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
[TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_MFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_LFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_SFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
+ [163] = { ALU_OP0_NOP, tgsi_unsupported},
+ [164] = { ALU_OP0_NOP, tgsi_unsupported},
+ [165] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_BARRIER] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2},
[TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit},
[TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, tgsi_trans_srcx_replicate},
- [TGSI_OPCODE_RSQ] = { ALU_OP1_RECIPSQRT_IEEE, tgsi_rsq},
+ [TGSI_OPCODE_RSQ] = { ALU_OP0_NOP, tgsi_rsq},
[TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp},
[TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log},
[TGSI_OPCODE_MUL] = { ALU_OP2_MUL_IEEE, tgsi_op2},
[TGSI_OPCODE_DP3] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
[TGSI_OPCODE_DP4] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
[TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
- [TGSI_OPCODE_MIN] = { ALU_OP2_MIN, tgsi_op2},
- [TGSI_OPCODE_MAX] = { ALU_OP2_MAX, tgsi_op2},
+ [TGSI_OPCODE_MIN] = { ALU_OP2_MIN_DX10, tgsi_op2},
+ [TGSI_OPCODE_MAX] = { ALU_OP2_MAX_DX10, tgsi_op2},
[TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
[TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
[TGSI_OPCODE_MAD] = { ALU_OP3_MULADD_IEEE, tgsi_op3},
[TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
[TGSI_OPCODE_FMA] = { ALU_OP3_FMA, tgsi_op3},
[TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, tgsi_trans_srcx_replicate},
- [TGSI_OPCODE_DP2A] = { ALU_OP0_NOP, tgsi_unsupported},
+ [21] = { ALU_OP0_NOP, tgsi_unsupported},
[22] = { ALU_OP0_NOP, tgsi_unsupported},
[23] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
[TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate},
[TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate},
[TGSI_OPCODE_POW] = { ALU_OP0_NOP, tgsi_pow},
- [TGSI_OPCODE_XPD] = { ALU_OP0_NOP, tgsi_xpd},
+ [31] = { ALU_OP0_NOP, tgsi_unsupported},
[32] = { ALU_OP0_NOP, tgsi_unsupported},
- [33] = { ALU_OP0_NOP, tgsi_unsupported},
+ [TGSI_OPCODE_CLOCK] = { ALU_OP0_NOP, tgsi_clock},
[34] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_DPH] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
+ [35] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_COS] = { ALU_OP1_COS, tgsi_trig},
[TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
[TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
[TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
[TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
- [TGSI_OPCODE_SCS] = { ALU_OP0_NOP, tgsi_scs},
+ [67] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
[69] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
[TGSI_OPCODE_DDX_FINE] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
[TGSI_OPCODE_DDY_FINE] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
- [TGSI_OPCODE_PUSHA] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_POPA] = { ALU_OP0_NOP, tgsi_unsupported},
+ [82] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
[TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2_trans},
[TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
[TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
[TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
[TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
- [TGSI_OPCODE_SAD] = { ALU_OP0_NOP, tgsi_unsupported},
+ [93] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
[TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
[TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
[TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
[TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_TXQ_LZ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
+ [103] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
[TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
- [TGSI_OPCODE_RESQ] = { ALU_OP0_NOP, tgsi_unsupported},
+ [TGSI_OPCODE_RESQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_resq},
[106] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
[TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
[TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
[TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
- [TGSI_OPCODE_MEMBAR] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_CALLNZ] = { ALU_OP0_NOP, tgsi_unsupported},
+ [TGSI_OPCODE_MEMBAR] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
+ [113] = { ALU_OP0_NOP, tgsi_unsupported},
[114] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_BREAKC] = { ALU_OP0_NOP, tgsi_unsupported},
+ [115] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
[TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
/* Refer below for TGSI_OPCODE_DFMA */
[TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
[TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
[TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
- [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_MFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_LFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_SFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
+ [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_load},
+ [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_store},
+ [163] = { ALU_OP0_NOP, tgsi_unsupported},
+ [164] = { ALU_OP0_NOP, tgsi_unsupported},
+ [165] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_BARRIER] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
- [TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_ATOMCAS] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_ATOMAND] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_ATOMOR] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_ATOMXOR] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_ATOMUMIN] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_ATOMUMAX] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_ATOMIMIN] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_ATOMIMAX] = { ALU_OP0_NOP, tgsi_unsupported},
+ [TGSI_OPCODE_ATOMUADD] = { V_RAT_INST_ADD_RTN, tgsi_atomic_op},
+ [TGSI_OPCODE_ATOMXCHG] = { V_RAT_INST_XCHG_RTN, tgsi_atomic_op},
+ [TGSI_OPCODE_ATOMCAS] = { V_RAT_INST_CMPXCHG_INT_RTN, tgsi_atomic_op},
+ [TGSI_OPCODE_ATOMAND] = { V_RAT_INST_AND_RTN, tgsi_atomic_op},
+ [TGSI_OPCODE_ATOMOR] = { V_RAT_INST_OR_RTN, tgsi_atomic_op},
+ [TGSI_OPCODE_ATOMXOR] = { V_RAT_INST_XOR_RTN, tgsi_atomic_op},
+ [TGSI_OPCODE_ATOMUMIN] = { V_RAT_INST_MIN_UINT_RTN, tgsi_atomic_op},
+ [TGSI_OPCODE_ATOMUMAX] = { V_RAT_INST_MAX_UINT_RTN, tgsi_atomic_op},
+ [TGSI_OPCODE_ATOMIMIN] = { V_RAT_INST_MIN_INT_RTN, tgsi_atomic_op},
+ [TGSI_OPCODE_ATOMIMAX] = { V_RAT_INST_MAX_INT_RTN, tgsi_atomic_op},
[TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
[TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
[TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
[TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, tgsi_op2_trans},
[TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_tex},
[TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_tex},
- [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_op3},
- [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_op3},
+ [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_bfe},
+ [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_bfe},
[TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_bfi},
[TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_op2},
[TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_op2},
[TGSI_OPCODE_D2U] = { ALU_OP1_FLT_TO_UINT, egcm_double_to_int},
[TGSI_OPCODE_U2D] = { ALU_OP1_UINT_TO_FLT, egcm_int_to_double},
[TGSI_OPCODE_DRSQ] = { ALU_OP2_RECIPSQRT_64, cayman_emit_double_instr},
+ [TGSI_OPCODE_U64SNE] = { ALU_OP0_NOP, egcm_u64sne },
+ [TGSI_OPCODE_U64ADD] = { ALU_OP0_NOP, egcm_u64add },
+ [TGSI_OPCODE_U64MUL] = { ALU_OP0_NOP, egcm_u64mul },
+ [TGSI_OPCODE_U64DIV] = { ALU_OP0_NOP, egcm_u64div },
[TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
};
[TGSI_OPCODE_DP3] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
[TGSI_OPCODE_DP4] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
[TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst},
- [TGSI_OPCODE_MIN] = { ALU_OP2_MIN, tgsi_op2},
- [TGSI_OPCODE_MAX] = { ALU_OP2_MAX, tgsi_op2},
+ [TGSI_OPCODE_MIN] = { ALU_OP2_MIN_DX10, tgsi_op2},
+ [TGSI_OPCODE_MAX] = { ALU_OP2_MAX_DX10, tgsi_op2},
[TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap},
[TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2},
[TGSI_OPCODE_MAD] = { ALU_OP3_MULADD_IEEE, tgsi_op3},
[TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp},
[TGSI_OPCODE_FMA] = { ALU_OP3_FMA, tgsi_op3},
[TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, cayman_emit_float_instr},
- [TGSI_OPCODE_DP2A] = { ALU_OP0_NOP, tgsi_unsupported},
+ [21] = { ALU_OP0_NOP, tgsi_unsupported},
[22] = { ALU_OP0_NOP, tgsi_unsupported},
[23] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2},
[TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, cayman_emit_float_instr},
[TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, cayman_emit_float_instr},
[TGSI_OPCODE_POW] = { ALU_OP0_NOP, cayman_pow},
- [TGSI_OPCODE_XPD] = { ALU_OP0_NOP, tgsi_xpd},
+ [31] = { ALU_OP0_NOP, tgsi_unsupported},
[32] = { ALU_OP0_NOP, tgsi_unsupported},
- [33] = { ALU_OP0_NOP, tgsi_unsupported},
+ [TGSI_OPCODE_CLOCK] = { ALU_OP0_NOP, tgsi_clock},
[34] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_DPH] = { ALU_OP2_DOT4_IEEE, tgsi_dp},
+ [35] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_COS] = { ALU_OP1_COS, cayman_trig},
[TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
[TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
[TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg},
[TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp},
- [TGSI_OPCODE_SCS] = { ALU_OP0_NOP, tgsi_scs},
+ [67] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
[69] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif},
[TGSI_OPCODE_DDX_FINE] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex},
[TGSI_OPCODE_DDY_FINE] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex},
- [TGSI_OPCODE_PUSHA] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_POPA] = { ALU_OP0_NOP, tgsi_unsupported},
+ [82] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2},
[TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2},
[TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2},
[TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2},
[TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod},
[TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2},
- [TGSI_OPCODE_SAD] = { ALU_OP0_NOP, tgsi_unsupported},
+ [93] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex},
[TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
[TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont},
[TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop},
[TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_TXQ_LZ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
+ [103] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex},
[TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex},
- [TGSI_OPCODE_RESQ] = { ALU_OP0_NOP, tgsi_unsupported},
+ [TGSI_OPCODE_RESQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_resq},
[106] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2},
[TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2},
[TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap},
[TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap},
- [TGSI_OPCODE_MEMBAR] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_CALLNZ] = { ALU_OP0_NOP, tgsi_unsupported},
+ [TGSI_OPCODE_MEMBAR] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
+ [113] = { ALU_OP0_NOP, tgsi_unsupported},
[114] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_BREAKC] = { ALU_OP0_NOP, tgsi_unsupported},
+ [115] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */
[TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */
/* Refer below for TGSI_OPCODE_DFMA */
[TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp},
[TGSI_OPCODE_IABS] = { 0, tgsi_iabs},
[TGSI_OPCODE_ISSG] = { 0, tgsi_issg},
- [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_MFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_LFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_SFENCE] = { ALU_OP0_NOP, tgsi_unsupported},
+ [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_load},
+ [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_store},
+ [163] = { ALU_OP0_NOP, tgsi_unsupported},
+ [164] = { ALU_OP0_NOP, tgsi_unsupported},
+ [165] = { ALU_OP0_NOP, tgsi_unsupported},
[TGSI_OPCODE_BARRIER] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier},
- [TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_ATOMCAS] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_ATOMAND] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_ATOMOR] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_ATOMXOR] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_ATOMUMIN] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_ATOMUMAX] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_ATOMIMIN] = { ALU_OP0_NOP, tgsi_unsupported},
- [TGSI_OPCODE_ATOMIMAX] = { ALU_OP0_NOP, tgsi_unsupported},
+ [TGSI_OPCODE_ATOMUADD] = { V_RAT_INST_ADD_RTN, tgsi_atomic_op},
+ [TGSI_OPCODE_ATOMXCHG] = { V_RAT_INST_XCHG_RTN, tgsi_atomic_op},
+ [TGSI_OPCODE_ATOMCAS] = { V_RAT_INST_CMPXCHG_INT_RTN, tgsi_atomic_op},
+ [TGSI_OPCODE_ATOMAND] = { V_RAT_INST_AND_RTN, tgsi_atomic_op},
+ [TGSI_OPCODE_ATOMOR] = { V_RAT_INST_OR_RTN, tgsi_atomic_op},
+ [TGSI_OPCODE_ATOMXOR] = { V_RAT_INST_XOR_RTN, tgsi_atomic_op},
+ [TGSI_OPCODE_ATOMUMIN] = { V_RAT_INST_MIN_UINT_RTN, tgsi_atomic_op},
+ [TGSI_OPCODE_ATOMUMAX] = { V_RAT_INST_MAX_UINT_RTN, tgsi_atomic_op},
+ [TGSI_OPCODE_ATOMIMIN] = { V_RAT_INST_MIN_INT_RTN, tgsi_atomic_op},
+ [TGSI_OPCODE_ATOMIMAX] = { V_RAT_INST_MAX_INT_RTN, tgsi_atomic_op},
[TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex},
[TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex},
[TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex},
[TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, cayman_mul_int_instr},
[TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_tex},
[TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_tex},
- [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_op3},
- [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_op3},
+ [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_bfe},
+ [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_bfe},
[TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_bfi},
[TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_op2},
[TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_op2},
[TGSI_OPCODE_D2U] = { ALU_OP1_FLT_TO_UINT, egcm_double_to_int},
[TGSI_OPCODE_U2D] = { ALU_OP1_UINT_TO_FLT, egcm_int_to_double},
[TGSI_OPCODE_DRSQ] = { ALU_OP2_RECIPSQRT_64, cayman_emit_double_instr},
+ [TGSI_OPCODE_U64SNE] = { ALU_OP0_NOP, egcm_u64sne },
+ [TGSI_OPCODE_U64ADD] = { ALU_OP0_NOP, egcm_u64add },
+ [TGSI_OPCODE_U64MUL] = { ALU_OP0_NOP, egcm_u64mul },
+ [TGSI_OPCODE_U64DIV] = { ALU_OP0_NOP, egcm_u64div },
[TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported},
};