if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
location = TGSI_INTERPOLATE_LOC_CENTER;
- inputs[1].enabled = true; /* needs SAMPLEID */
} else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
location = TGSI_INTERPOLATE_LOC_CENTER;
/* Needs sample positions, currently those are always available */
tgsi_parse_free(&parse);
+ if (ctx->info.reads_samplemask &&
+ (ctx->info.uses_linear_sample || ctx->info.uses_linear_sample)) {
+ inputs[1].enabled = true;
+ }
+
+ if (ctx->bc->chip_class >= EVERGREEN) {
+ int num_baryc = 0;
+ /* assign gpr to each interpolator according to priority */
+ for (i = 0; i < ARRAY_SIZE(ctx->eg_interpolators); i++) {
+ if (ctx->eg_interpolators[i].enabled) {
+ ctx->eg_interpolators[i].ij_index = num_baryc;
+ num_baryc++;
+ }
+ }
+ num_baryc = (num_baryc + 1) >> 1;
+ gpr_offset += num_baryc;
+ }
+
for (i = 0; i < ARRAY_SIZE(inputs); i++) {
boolean enabled = inputs[i].enabled;
int *reg = inputs[i].reg;
* for evergreen we need to scan the shader to find the number of GPRs we need to
* reserve for interpolation and system values
*
- * we need to know if we are going to emit
- * any sample or centroid inputs
+ * we need to know if we are going to emit any sample or centroid inputs
* if perspective and linear are required
*/
static int evergreen_gpr_count(struct r600_shader_ctx *ctx)
{
unsigned i;
- int num_baryc;
- struct tgsi_parse_context parse;
memset(&ctx->eg_interpolators, 0, sizeof(ctx->eg_interpolators));
+ /*
+ * Could get this information from the shader info. But right now
+ * we interpolate all declared inputs, whereas the shader info will
+ * only contain the bits if the inputs are actually used, so it might
+ * not be safe...
+ */
for (i = 0; i < ctx->info.num_inputs; i++) {
int k;
/* skip position/face/mask/sampleid */
ctx->eg_interpolators[k].enabled = TRUE;
}
- if (tgsi_parse_init(&parse, ctx->tokens) != TGSI_PARSE_OK) {
- return 0;
- }
-
- /* need to scan shader for system values and interpolateAtSample/Offset/Centroid */
- while (!tgsi_parse_end_of_tokens(&parse)) {
- tgsi_parse_token(&parse);
-
- if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_INSTRUCTION) {
- const struct tgsi_full_instruction *inst = &parse.FullToken.FullInstruction;
- if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE ||
- inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET ||
- inst->Instruction.Opcode == TGSI_OPCODE_INTERP_CENTROID)
- {
- int interpolate, location, k;
-
- if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) {
- location = TGSI_INTERPOLATE_LOC_CENTER;
- } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) {
- location = TGSI_INTERPOLATE_LOC_CENTER;
- } else {
- location = TGSI_INTERPOLATE_LOC_CENTROID;
- }
-
- interpolate = ctx->info.input_interpolate[inst->Src[0].Register.Index];
- k = eg_get_interpolator_index(interpolate, location);
- if (k >= 0)
- ctx->eg_interpolators[k].enabled = true;
- }
- }
- }
-
- tgsi_parse_free(&parse);
-
- /* assign gpr to each interpolator according to priority */
- num_baryc = 0;
- for (i = 0; i < ARRAY_SIZE(ctx->eg_interpolators); i++) {
- if (ctx->eg_interpolators[i].enabled) {
- ctx->eg_interpolators[i].ij_index = num_baryc;
- num_baryc ++;
- }
- }
-
/* XXX PULL MODEL and LINE STIPPLE */
- num_baryc = (num_baryc + 1) >> 1;
- return allocate_system_value_inputs(ctx, num_baryc);
+ return allocate_system_value_inputs(ctx, 0);
}
/* sample_id_sel == NULL means fetch for current sample */
struct r600_bytecode_vtx vtx;
int r, t1;
- assert(ctx->fixed_pt_position_gpr != -1);
-
t1 = r600_get_temp(ctx);
memset(&vtx, 0, sizeof(struct r600_bytecode_vtx));
vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER;
vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET;
if (sample_id == NULL) {
+ assert(ctx->fixed_pt_position_gpr != -1);
+
vtx.src_gpr = ctx->fixed_pt_position_gpr; // SAMPLEID is in .w;
vtx.src_sel_x = 3;
}
ctx.file_offset[TGSI_FILE_CONSTANT] = 512;
ctx.file_offset[TGSI_FILE_IMMEDIATE] = V_SQ_ALU_SRC_LITERAL;
- ctx.bc->ar_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] +
- ctx.info.file_max[TGSI_FILE_TEMPORARY] + 1;
- ctx.bc->index_reg[0] = ctx.bc->ar_reg + 1;
- ctx.bc->index_reg[1] = ctx.bc->ar_reg + 2;
+
+ int regno = ctx.file_offset[TGSI_FILE_TEMPORARY] +
+ ctx.info.file_max[TGSI_FILE_TEMPORARY];
+ ctx.bc->ar_reg = ++regno;
+ ctx.bc->index_reg[0] = ++regno;
+ ctx.bc->index_reg[1] = ++regno;
if (ctx.type == PIPE_SHADER_TESS_CTRL) {
- ctx.tess_input_info = ctx.bc->ar_reg + 3;
- ctx.tess_output_info = ctx.bc->ar_reg + 4;
- ctx.temp_reg = ctx.bc->ar_reg + 5;
+ ctx.tess_input_info = ++regno;
+ ctx.tess_output_info = ++regno;
} else if (ctx.type == PIPE_SHADER_TESS_EVAL) {
ctx.tess_input_info = 0;
- ctx.tess_output_info = ctx.bc->ar_reg + 3;
- ctx.temp_reg = ctx.bc->ar_reg + 4;
+ ctx.tess_output_info = ++regno;
} else if (ctx.type == PIPE_SHADER_GEOMETRY) {
- ctx.gs_export_gpr_tregs[0] = ctx.bc->ar_reg + 3;
- ctx.gs_export_gpr_tregs[1] = ctx.bc->ar_reg + 4;
- ctx.gs_export_gpr_tregs[2] = ctx.bc->ar_reg + 5;
- ctx.gs_export_gpr_tregs[3] = ctx.bc->ar_reg + 6;
- ctx.temp_reg = ctx.bc->ar_reg + 7;
+ ctx.gs_export_gpr_tregs[0] = ++regno;
+ ctx.gs_export_gpr_tregs[1] = ++regno;
+ ctx.gs_export_gpr_tregs[2] = ++regno;
+ ctx.gs_export_gpr_tregs[3] = ++regno;
if (ctx.shader->gs_tri_strip_adj_fix) {
- ctx.gs_rotated_input[0] = ctx.bc->ar_reg + 7;
- ctx.gs_rotated_input[1] = ctx.bc->ar_reg + 8;
- ctx.temp_reg += 2;
+ ctx.gs_rotated_input[0] = ++regno;
+ ctx.gs_rotated_input[1] = ++regno;
} else {
ctx.gs_rotated_input[0] = 0;
ctx.gs_rotated_input[1] = 1;
}
- } else {
- ctx.temp_reg = ctx.bc->ar_reg + 3;
}
if (shader->uses_images) {
- ctx.thread_id_gpr = ctx.temp_reg++;
+ ctx.thread_id_gpr = ++regno;
ctx.thread_id_gpr_loaded = false;
}
+ ctx.temp_reg = ++regno;
shader->max_arrays = 0;
shader->num_arrays = 0;
r = eg_load_helper_invocation(&ctx);
if (r)
return r;
+ }
+ /*
+ * XXX this relies on fixed_pt_position_gpr only being present when
+ * this shader should be executed per sample. Should be the case for now...
+ */
+ if (ctx.fixed_pt_position_gpr != -1 && ctx.info.reads_samplemask) {
+ /*
+ * Fix up sample mask. The hw always gives us coverage mask for
+ * the pixel. However, for per-sample shading, we need the
+ * coverage for the shader invocation only.
+ * Also, with disabled msaa, only the first bit should be set
+ * (luckily the same fixup works for both problems).
+ * For now, we can only do it if we know this shader is always
+ * executed per sample (due to usage of bits in the shader
+ * forcing per-sample execution).
+ * If the fb is not multisampled, we'd do unnecessary work but
+ * it should still be correct.
+ * It will however do nothing for sample shading according
+ * to MinSampleShading.
+ */
+ struct r600_bytecode_alu alu;
+ int tmp = r600_get_temp(&ctx);
+ assert(ctx.face_gpr != -1);
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+
+ alu.op = ALU_OP2_LSHL_INT;
+ alu.src[0].sel = V_SQ_ALU_SRC_LITERAL;
+ alu.src[0].value = 0x1;
+ alu.src[1].sel = ctx.fixed_pt_position_gpr;
+ alu.src[1].chan = 3;
+ alu.dst.sel = tmp;
+ alu.dst.chan = 0;
+ alu.dst.write = 1;
+ alu.last = 1;
+ if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
+ return r;
+
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_AND_INT;
+ alu.src[0].sel = tmp;
+ alu.src[1].sel = ctx.face_gpr;
+ alu.src[1].chan = 2;
+ alu.dst.sel = ctx.face_gpr;
+ alu.dst.chan = 2;
+ alu.dst.write = 1;
+ alu.last = 1;
+ if ((r = r600_bytecode_add_alu(ctx.bc, &alu)))
+ return r;
}
+
if (ctx.fragcoord_input >= 0) {
if (ctx.bc->chip_class == CAYMAN) {
for (j = 0 ; j < 4; j++) {
}
}
+ if (inst->Instruction.Opcode == TGSI_OPCODE_TG4) {
+ /* Gather4 should follow the same rules as bilinear filtering, but the hardware
+ * incorrectly forces nearest filtering if the texture format is integer.
+ * The only effect it has on Gather4, which always returns 4 texels for
+ * bilinear filtering, is that the final coordinates are off by 0.5 of
+ * the texel size.
+ *
+ * The workaround is to subtract 0.5 from the unnormalized coordinates,
+ * or (0.5 / size) from the normalized coordinates.
+ */
+ if (inst->Texture.ReturnType == TGSI_RETURN_TYPE_SINT ||
+ inst->Texture.ReturnType == TGSI_RETURN_TYPE_UINT) {
+ int treg = r600_get_temp(ctx);
+
+ /* mov array and comparison oordinate to temp_reg if needed */
+ if ((inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
+ inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
+ inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY) && !src_loaded) {
+ int end = inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ? 3 : 2;
+ for (i = 2; i <= end; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_MOV;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ alu.last = (i == end);
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ }
+
+ if (inst->Texture.Texture == TGSI_TEXTURE_RECT ||
+ inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT) {
+ for (i = 0; i < 2; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP2_ADD;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ alu.last = i == 1;
+ if (src_loaded) {
+ alu.src[0].sel = ctx->temp_reg;
+ alu.src[0].chan = i;
+ } else
+ r600_bytecode_src(&alu.src[0], &ctx->src[0], i);
+ alu.src[1].sel = V_SQ_ALU_SRC_0_5;
+ alu.src[1].neg = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ } else {
+ /* execute a TXQ */
+ memset(&tex, 0, sizeof(struct r600_bytecode_tex));
+ tex.op = FETCH_OP_GET_TEXTURE_RESINFO;
+ tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg);
+ tex.sampler_index_mode = sampler_index_mode;
+ tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS;
+ tex.resource_index_mode = sampler_index_mode;
+ tex.dst_gpr = treg;
+ tex.src_sel_x = 4;
+ tex.src_sel_y = 4;
+ tex.src_sel_z = 4;
+ tex.src_sel_w = 4;
+ tex.dst_sel_x = 0;
+ tex.dst_sel_y = 1;
+ tex.dst_sel_z = 7;
+ tex.dst_sel_w = 7;
+ r = r600_bytecode_add_tex(ctx->bc, &tex);
+ if (r)
+ return r;
+
+ /* coord.xy = -0.5 * (1.0/int_to_flt(size)) + coord.xy */
+ if (ctx->bc->chip_class == CAYMAN) {
+ /* */
+ for (i = 0; i < 2; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_INT_TO_FLT;
+ alu.dst.sel = treg;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ alu.src[0].sel = treg;
+ alu.src[0].chan = i;
+ alu.last = (i == 1) ? 1 : 0;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ for (j = 0; j < 2; j++) {
+ for (i = 0; i < 3; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_RECIP_IEEE;
+ alu.src[0].sel = treg;
+ alu.src[0].chan = j;
+ alu.dst.sel = treg;
+ alu.dst.chan = i;
+ if (i == 2)
+ alu.last = 1;
+ if (i == j)
+ alu.dst.write = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ }
+ } else {
+ for (i = 0; i < 2; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_INT_TO_FLT;
+ alu.dst.sel = treg;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ alu.src[0].sel = treg;
+ alu.src[0].chan = i;
+ alu.last = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ for (i = 0; i < 2; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP1_RECIP_IEEE;
+ alu.src[0].sel = treg;
+ alu.src[0].chan = i;
+ alu.dst.sel = treg;
+ alu.dst.chan = i;
+ alu.last = 1;
+ alu.dst.write = 1;
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ }
+ for (i = 0; i < 2; i++) {
+ memset(&alu, 0, sizeof(struct r600_bytecode_alu));
+ alu.op = ALU_OP3_MULADD;
+ alu.is_op3 = 1;
+ alu.dst.sel = ctx->temp_reg;
+ alu.dst.chan = i;
+ alu.dst.write = 1;
+ alu.last = i == 1;
+ alu.src[0].sel = treg;
+ alu.src[0].chan = i;
+ alu.src[1].sel = V_SQ_ALU_SRC_0_5;
+ alu.src[1].neg = 1;
+ if (src_loaded) {
+ alu.src[2].sel = ctx->temp_reg;
+ alu.src[2].chan = i;
+ } else
+ r600_bytecode_src(&alu.src[2], &ctx->src[0], i);
+ r = r600_bytecode_add_alu(ctx->bc, &alu);
+ if (r)
+ return r;
+ }
+ }
+ src_loaded = TRUE;
+ src_gpr = ctx->temp_reg;
+ }
+ }
+
if (src_requires_loading && !src_loaded) {
for (i = 0; i < 4; i++) {
memset(&alu, 0, sizeof(struct r600_bytecode_alu));
tex.inst_mod = texture_component_select;
if (ctx->bc->chip_class == CAYMAN) {
- /* GATHER4 result order is different from TGSI TG4 */
- tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 0 : 7;
- tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 4) ? 1 : 7;
- tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 1) ? 2 : 7;
+ tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
+ tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
+ tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
} else {
- tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7;
- tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7;
- tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7;
+ /* GATHER4 result order is different from TGSI TG4 */
+ tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 1 : 7;
+ tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 2 : 7;
+ tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 0 : 7;
tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7;
}
}