X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fr600%2Fr600_shader.c;h=903a66302632f38161324b112778b8045a287468;hb=22fc5eff803d14e99e9f86db223ceedcef1b9108;hp=ebe27445486dd7c7c9733163045dc1ec857b931b;hpb=d995115b1733ec14182e6bb4653b8f8389b87518;p=mesa.git diff --git a/src/gallium/drivers/r600/r600_shader.c b/src/gallium/drivers/r600/r600_shader.c index ebe27445486..903a6630263 100644 --- a/src/gallium/drivers/r600/r600_shader.c +++ b/src/gallium/drivers/r600/r600_shader.c @@ -39,23 +39,23 @@ #include #include -/* CAYMAN notes +/* CAYMAN notes Why CAYMAN got loops for lots of instructions is explained here. -These 8xx t-slot only ops are implemented in all vector slots. MUL_LIT, FLT_TO_UINT, INT_TO_FLT, UINT_TO_FLT -These 8xx t-slot only opcodes become vector ops, with all four -slots expecting the arguments on sources a and b. Result is +These 8xx t-slot only opcodes become vector ops, with all four +slots expecting the arguments on sources a and b. Result is broadcast to all channels. MULLO_INT, MULHI_INT, MULLO_UINT, MULHI_UINT, MUL_64 -These 8xx t-slot only opcodes become vector ops in the z, y, and +These 8xx t-slot only opcodes become vector ops in the z, y, and x slots. EXP_IEEE, LOG_IEEE/CLAMPED, RECIP_IEEE/CLAMPED/FF/INT/UINT/_64/CLAMPED_64 RECIPSQRT_IEEE/CLAMPED/FF/_64/CLAMPED_64 SQRT_IEEE/_64 SIN/COS -The w slot may have an independent co-issued operation, or if the -result is required to be in the w slot, the opcode above may be +The w slot may have an independent co-issued operation, or if the +result is required to be in the w slot, the opcode above may be issued in the w slot as well. The compiler must issue the source argument to slots z, y, and x */ @@ -190,10 +190,15 @@ int r600_pipe_shader_create(struct pipe_context *ctx, } use_sb &= (shader->shader.processor_type != PIPE_SHADER_TESS_CTRL); use_sb &= (shader->shader.processor_type != PIPE_SHADER_TESS_EVAL); + use_sb &= (shader->shader.processor_type != PIPE_SHADER_COMPUTE); /* disable SB for shaders using doubles */ use_sb &= !shader->shader.uses_doubles; + use_sb &= !shader->shader.uses_atomics; + use_sb &= !shader->shader.uses_images; + use_sb &= !shader->shader.uses_helper_invocation; + /* Check if the bytecode has already been built. */ if (!shader->shader.bc.bytecode) { r = r600_bytecode_build(&shader->shader.bc); @@ -276,6 +281,9 @@ int r600_pipe_shader_create(struct pipe_context *ctx, r600_update_ps_state(ctx, shader); } break; + case PIPE_SHADER_COMPUTE: + evergreen_update_ls_state(ctx, shader); + break; default: r = -EINVAL; goto error; @@ -287,7 +295,7 @@ error: return r; } -void r600_pipe_shader_destroy(struct pipe_context *ctx, struct r600_pipe_shader *shader) +void r600_pipe_shader_destroy(struct pipe_context *ctx UNUSED, struct r600_pipe_shader *shader) { r600_resource_reference(&shader->bo, NULL); r600_bytecode_clear(&shader->shader.bc); @@ -339,17 +347,23 @@ struct r600_shader_ctx { boolean clip_vertex_write; unsigned cv_output; unsigned edgeflag_output; + int helper_invoc_reg; + int cs_block_size_reg; + int cs_grid_size_reg; + bool cs_block_size_loaded, cs_grid_size_loaded; int fragcoord_input; - int native_integers; int next_ring_offset; int gs_out_ring_offset; int gs_next_vertex; struct r600_shader *gs_for_vs; int gs_export_gpr_tregs[4]; + int gs_rotated_input[2]; const struct pipe_stream_output_info *gs_stream_output_info; unsigned enabled_stream_buffers_mask; unsigned tess_input_info; /* temp with tess input offsets */ unsigned tess_output_info; /* temp with tess input offsets */ + unsigned thread_id_gpr; /* temp with thread id calculated for images */ + bool thread_id_gpr_loaded; }; struct r600_shader_tgsi_instruction { @@ -374,7 +388,7 @@ static void r600_bytecode_src(struct r600_bytecode_alu_src *bc_src, const struct r600_shader_src *shader_src, unsigned chan); static int do_lds_fetch_values(struct r600_shader_ctx *ctx, unsigned temp_reg, - unsigned dst_reg); + unsigned dst_reg, unsigned mask); static int tgsi_last_instruction(unsigned writemask) { @@ -397,10 +411,6 @@ static int tgsi_is_supported(struct r600_shader_ctx *ctx) R600_ERR("too many dst (%d)\n", i->Instruction.NumDstRegs); return -EINVAL; } - if (i->Instruction.Predicate) { - R600_ERR("predicate unsupported\n"); - return -EINVAL; - } #if 0 if (i->Instruction.Label) { R600_ERR("label unsupported\n"); @@ -411,6 +421,7 @@ static int tgsi_is_supported(struct r600_shader_ctx *ctx) if (i->Src[j].Register.Dimension) { switch (i->Src[j].Register.File) { case TGSI_FILE_CONSTANT: + case TGSI_FILE_HW_ATOMIC: break; case TGSI_FILE_INPUT: if (ctx->type == PIPE_SHADER_GEOMETRY || @@ -761,7 +772,7 @@ static int single_alu_op3(struct r600_shader_ctx *ctx, int op, int r; /* validate this for other ops */ - assert(op == ALU_OP3_MULADD_UINT24); + assert(op == ALU_OP3_MULADD_UINT24 || op == ALU_OP3_CNDE_INT || op == ALU_OP3_BFE_UINT); memset(&alu, 0, sizeof(struct r600_bytecode_alu)); alu.op = op; alu.src[0].sel = src0_sel; @@ -915,8 +926,6 @@ static int tgsi_declaration(struct r600_shader_ctx *ctx) ctx->shader->output[i].spi_sid = r600_spi_sid(&ctx->shader->output[i]); switch (d->Semantic.Name) { case TGSI_SEMANTIC_CLIPDIST: - ctx->shader->clip_dist_write |= d->Declaration.UsageMask << - ((d->Semantic.Index + j) << 2); break; case TGSI_SEMANTIC_PSIZE: ctx->shader->vs_out_misc_write = 1; @@ -968,6 +977,20 @@ static int tgsi_declaration(struct r600_shader_ctx *ctx) case TGSI_FILE_SAMPLER: case TGSI_FILE_SAMPLER_VIEW: case TGSI_FILE_ADDRESS: + case TGSI_FILE_BUFFER: + case TGSI_FILE_IMAGE: + case TGSI_FILE_MEMORY: + break; + + case TGSI_FILE_HW_ATOMIC: + i = ctx->shader->nhwatomic_ranges; + ctx->shader->atomics[i].start = d->Range.First; + ctx->shader->atomics[i].end = d->Range.Last; + ctx->shader->atomics[i].hw_idx = ctx->shader->atomic_base + ctx->shader->nhwatomic; + ctx->shader->atomics[i].array_id = d->Array.ArrayID; + ctx->shader->atomics[i].buffer_id = d->Dim.Index2D; + ctx->shader->nhwatomic_ranges++; + ctx->shader->nhwatomic += count; break; case TGSI_FILE_SYSTEM_VALUE: @@ -976,22 +999,6 @@ static int tgsi_declaration(struct r600_shader_ctx *ctx) d->Semantic.Name == TGSI_SEMANTIC_SAMPLEPOS) { break; /* Already handled from allocate_system_value_inputs */ } else if (d->Semantic.Name == TGSI_SEMANTIC_INSTANCEID) { - if (!ctx->native_integers) { - struct r600_bytecode_alu alu; - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - - alu.op = ALU_OP1_INT_TO_FLT; - alu.src[0].sel = 0; - alu.src[0].chan = 3; - - alu.dst.sel = 0; - alu.dst.chan = 3; - alu.dst.write = 1; - alu.last = 1; - - if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) - return r; - } break; } else if (d->Semantic.Name == TGSI_SEMANTIC_VERTEXID) break; @@ -1014,7 +1021,7 @@ static int tgsi_declaration(struct r600_shader_ctx *ctx) if (r) return r; - do_lds_fetch_values(ctx, temp_reg, dreg); + do_lds_fetch_values(ctx, temp_reg, dreg, 0xf); } else if (d->Semantic.Name == TGSI_SEMANTIC_TESSCOORD) { /* MOV r1.x, r0.x; @@ -1083,7 +1090,8 @@ static int allocate_system_value_inputs(struct r600_shader_ctx *ctx, int gpr_off { false, &ctx->fixed_pt_position_gpr, TGSI_SEMANTIC_SAMPLEID, TGSI_SEMANTIC_SAMPLEPOS } /* SAMPLEID is in Fixed Point Position GPR.w */ }; - int i, k, num_regs = 0; + int num_regs = 0; + unsigned k, i; if (tgsi_parse_init(&parse, ctx->tokens) != TGSI_PARSE_OK) { return 0; @@ -1103,7 +1111,6 @@ static int allocate_system_value_inputs(struct r600_shader_ctx *ctx, int gpr_off if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) { location = TGSI_INTERPOLATE_LOC_CENTER; - inputs[1].enabled = true; /* needs SAMPLEID */ } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) { location = TGSI_INTERPOLATE_LOC_CENTER; /* Needs sample positions, currently those are always available */ @@ -1113,7 +1120,8 @@ static int allocate_system_value_inputs(struct r600_shader_ctx *ctx, int gpr_off interpolate = ctx->info.input_interpolate[inst->Src[0].Register.Index]; k = eg_get_interpolator_index(interpolate, location); - ctx->eg_interpolators[k].enabled = true; + if (k >= 0) + ctx->eg_interpolators[k].enabled = true; } } else if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_DECLARATION) { struct tgsi_full_declaration *d = &parse.FullToken.FullDeclaration; @@ -1130,6 +1138,24 @@ static int allocate_system_value_inputs(struct r600_shader_ctx *ctx, int gpr_off tgsi_parse_free(&parse); + if (ctx->info.reads_samplemask && + (ctx->info.uses_linear_sample || ctx->info.uses_linear_sample)) { + inputs[1].enabled = true; + } + + if (ctx->bc->chip_class >= EVERGREEN) { + int num_baryc = 0; + /* assign gpr to each interpolator according to priority */ + for (i = 0; i < ARRAY_SIZE(ctx->eg_interpolators); i++) { + if (ctx->eg_interpolators[i].enabled) { + ctx->eg_interpolators[i].ij_index = num_baryc; + num_baryc++; + } + } + num_baryc = (num_baryc + 1) >> 1; + gpr_offset += num_baryc; + } + for (i = 0; i < ARRAY_SIZE(inputs); i++) { boolean enabled = inputs[i].enabled; int *reg = inputs[i].reg; @@ -1137,9 +1163,10 @@ static int allocate_system_value_inputs(struct r600_shader_ctx *ctx, int gpr_off if (enabled) { int gpr = gpr_offset + num_regs++; + ctx->shader->nsys_inputs++; // add to inputs, allocate a gpr - k = ctx->shader->ninput ++; + k = ctx->shader->ninput++; ctx->shader->input[k].name = name; ctx->shader->input[k].sid = 0; ctx->shader->input[k].interpolate = TGSI_INTERPOLATE_CONSTANT; @@ -1155,18 +1182,21 @@ static int allocate_system_value_inputs(struct r600_shader_ctx *ctx, int gpr_off * for evergreen we need to scan the shader to find the number of GPRs we need to * reserve for interpolation and system values * - * we need to know if we are going to emit - * any sample or centroid inputs + * we need to know if we are going to emit any sample or centroid inputs * if perspective and linear are required */ static int evergreen_gpr_count(struct r600_shader_ctx *ctx) { unsigned i; - int num_baryc; - struct tgsi_parse_context parse; memset(&ctx->eg_interpolators, 0, sizeof(ctx->eg_interpolators)); + /* + * Could get this information from the shader info. But right now + * we interpolate all declared inputs, whereas the shader info will + * only contain the bits if the inputs are actually used, so it might + * not be safe... + */ for (i = 0; i < ctx->info.num_inputs; i++) { int k; /* skip position/face/mask/sampleid */ @@ -1183,52 +1213,9 @@ static int evergreen_gpr_count(struct r600_shader_ctx *ctx) ctx->eg_interpolators[k].enabled = TRUE; } - if (tgsi_parse_init(&parse, ctx->tokens) != TGSI_PARSE_OK) { - return 0; - } - - /* need to scan shader for system values and interpolateAtSample/Offset/Centroid */ - while (!tgsi_parse_end_of_tokens(&parse)) { - tgsi_parse_token(&parse); - - if (parse.FullToken.Token.Type == TGSI_TOKEN_TYPE_INSTRUCTION) { - const struct tgsi_full_instruction *inst = &parse.FullToken.FullInstruction; - if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE || - inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET || - inst->Instruction.Opcode == TGSI_OPCODE_INTERP_CENTROID) - { - int interpolate, location, k; - - if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) { - location = TGSI_INTERPOLATE_LOC_CENTER; - } else if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET) { - location = TGSI_INTERPOLATE_LOC_CENTER; - } else { - location = TGSI_INTERPOLATE_LOC_CENTROID; - } - - interpolate = ctx->info.input_interpolate[inst->Src[0].Register.Index]; - k = eg_get_interpolator_index(interpolate, location); - ctx->eg_interpolators[k].enabled = true; - } - } - } - - tgsi_parse_free(&parse); - - /* assign gpr to each interpolator according to priority */ - num_baryc = 0; - for (i = 0; i < ARRAY_SIZE(ctx->eg_interpolators); i++) { - if (ctx->eg_interpolators[i].enabled) { - ctx->eg_interpolators[i].ij_index = num_baryc; - num_baryc ++; - } - } - /* XXX PULL MODEL and LINE STIPPLE */ - num_baryc = (num_baryc + 1) >> 1; - return allocate_system_value_inputs(ctx, num_baryc); + return allocate_system_value_inputs(ctx, 0); } /* sample_id_sel == NULL means fetch for current sample */ @@ -1237,8 +1224,6 @@ static int load_sample_position(struct r600_shader_ctx *ctx, struct r600_shader_ struct r600_bytecode_vtx vtx; int r, t1; - assert(ctx->fixed_pt_position_gpr != -1); - t1 = r600_get_temp(ctx); memset(&vtx, 0, sizeof(struct r600_bytecode_vtx)); @@ -1246,6 +1231,8 @@ static int load_sample_position(struct r600_shader_ctx *ctx, struct r600_shader_ vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER; vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET; if (sample_id == NULL) { + assert(ctx->fixed_pt_position_gpr != -1); + vtx.src_gpr = ctx->fixed_pt_position_gpr; // SAMPLEID is in .w; vtx.src_sel_x = 3; } @@ -1275,7 +1262,126 @@ static int load_sample_position(struct r600_shader_ctx *ctx, struct r600_shader_ vtx.num_format_all = 2; vtx.format_comp_all = 1; vtx.use_const_fields = 0; - vtx.offset = 1; // first element is size of buffer + vtx.offset = 0; + vtx.endian = r600_endian_swap(32); + vtx.srf_mode_all = 1; /* SRF_MODE_NO_ZERO */ + + r = r600_bytecode_add_vtx(ctx->bc, &vtx); + if (r) + return r; + + return t1; +} + +static int eg_load_helper_invocation(struct r600_shader_ctx *ctx) +{ + int r; + struct r600_bytecode_alu alu; + + /* do a vtx fetch with wqm set on the vtx fetch */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + alu.dst.sel = ctx->helper_invoc_reg; + alu.dst.chan = 0; + alu.src[0].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[0].value = 0xffffffff; + alu.dst.write = 1; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + /* do a vtx fetch in VPM mode */ + struct r600_bytecode_vtx vtx; + memset(&vtx, 0, sizeof(vtx)); + vtx.op = FETCH_OP_GET_BUFFER_RESINFO; + vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER; + vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET; + vtx.src_gpr = 0; + vtx.mega_fetch_count = 16; /* no idea here really... */ + vtx.dst_gpr = ctx->helper_invoc_reg; + vtx.dst_sel_x = 4; + vtx.dst_sel_y = 7; /* SEL_Y */ + vtx.dst_sel_z = 7; /* SEL_Z */ + vtx.dst_sel_w = 7; /* SEL_W */ + vtx.data_format = FMT_32; + if ((r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx))) + return r; + ctx->bc->cf_last->vpm = 1; + return 0; +} + +static int cm_load_helper_invocation(struct r600_shader_ctx *ctx) +{ + int r; + struct r600_bytecode_alu alu; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + alu.dst.sel = ctx->helper_invoc_reg; + alu.dst.chan = 0; + alu.src[0].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[0].value = 0xffffffff; + alu.dst.write = 1; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + alu.dst.sel = ctx->helper_invoc_reg; + alu.dst.chan = 0; + alu.src[0].sel = V_SQ_ALU_SRC_0; + alu.dst.write = 1; + alu.last = 1; + r = r600_bytecode_add_alu_type(ctx->bc, &alu, CF_OP_ALU_VALID_PIXEL_MODE); + if (r) + return r; + + return ctx->helper_invoc_reg; +} + +static int load_block_grid_size(struct r600_shader_ctx *ctx, bool load_block) +{ + struct r600_bytecode_vtx vtx; + int r, t1; + + if (ctx->cs_block_size_loaded) + return ctx->cs_block_size_reg; + if (ctx->cs_grid_size_loaded) + return ctx->cs_grid_size_reg; + + t1 = load_block ? ctx->cs_block_size_reg : ctx->cs_grid_size_reg; + struct r600_bytecode_alu alu; + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + alu.src[0].sel = V_SQ_ALU_SRC_0; + alu.dst.sel = t1; + alu.dst.write = 1; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + memset(&vtx, 0, sizeof(struct r600_bytecode_vtx)); + vtx.op = FETCH_OP_VFETCH; + vtx.buffer_id = R600_BUFFER_INFO_CONST_BUFFER; + vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET; + vtx.src_gpr = t1; + vtx.src_sel_x = 0; + + vtx.mega_fetch_count = 16; + vtx.dst_gpr = t1; + vtx.dst_sel_x = 0; + vtx.dst_sel_y = 1; + vtx.dst_sel_z = 2; + vtx.dst_sel_w = 7; + vtx.data_format = FMT_32_32_32_32; + vtx.num_format_all = 1; + vtx.format_comp_all = 0; + vtx.use_const_fields = 0; + vtx.offset = load_block ? 0 : 16; // first element is size of buffer vtx.endian = r600_endian_swap(32); vtx.srf_mode_all = 1; /* SRF_MODE_NO_ZERO */ @@ -1283,6 +1389,10 @@ static int load_sample_position(struct r600_shader_ctx *ctx, struct r600_shader_ if (r) return r; + if (load_block) + ctx->cs_block_size_loaded = true; + else + ctx->cs_grid_size_loaded = true; return t1; } @@ -1343,6 +1453,10 @@ static void tgsi_src(struct r600_shader_ctx *ctx, r600_src->swizzle[2] = 0; r600_src->swizzle[3] = 0; r600_src->sel = 0; + } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_THREAD_ID) { + r600_src->sel = 0; + } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_BLOCK_ID) { + r600_src->sel = 1; } else if (ctx->type != PIPE_SHADER_TESS_CTRL && ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_INVOCATIONID) { r600_src->swizzle[0] = 3; r600_src->swizzle[1] = 3; @@ -1387,6 +1501,16 @@ static void tgsi_src(struct r600_shader_ctx *ctx, r600_src->swizzle[1] = 3; r600_src->swizzle[2] = 3; r600_src->swizzle[3] = 3; + } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_GRID_SIZE) { + r600_src->sel = load_block_grid_size(ctx, false); + } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_BLOCK_SIZE) { + r600_src->sel = load_block_grid_size(ctx, true); + } else if (ctx->info.system_value_semantic_name[tgsi_src->Register.Index] == TGSI_SEMANTIC_HELPER_INVOCATION) { + r600_src->sel = ctx->helper_invoc_reg; + r600_src->swizzle[0] = 0; + r600_src->swizzle[1] = 0; + r600_src->swizzle[2] = 0; + r600_src->swizzle[3] = 0; } } else { if (tgsi_src->Register.Indirect) @@ -1466,14 +1590,14 @@ static int fetch_gs_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_regi int r; unsigned index = src->Register.Index; unsigned vtx_id = src->Dimension.Index; - int offset_reg = vtx_id / 3; + int offset_reg = ctx->gs_rotated_input[vtx_id / 3]; int offset_chan = vtx_id % 3; int t2 = 0; /* offsets of per-vertex data in ESGS ring are passed to GS in R0.x, R0.y, * R0.w, R1.x, R1.y, R1.z (it seems R0.z is used for PrimitiveID) */ - if (offset_reg == 0 && offset_chan == 2) + if (offset_reg == ctx->gs_rotated_input[0] && offset_chan == 2) offset_chan = 3; if (src->Dimension.Indirect || src->Register.Indirect) @@ -1504,7 +1628,7 @@ static int fetch_gs_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_regi for (i = 0; i < 3; i++) { memset(&alu, 0, sizeof(struct r600_bytecode_alu)); alu.op = ALU_OP1_MOV; - alu.src[0].sel = 0; + alu.src[0].sel = ctx->gs_rotated_input[0]; alu.src[0].chan = i == 2 ? 3 : i; alu.dst.sel = treg[i]; alu.dst.chan = 0; @@ -1728,14 +1852,19 @@ static int r600_get_byte_address(struct r600_shader_ctx *ctx, int temp_reg, } static int do_lds_fetch_values(struct r600_shader_ctx *ctx, unsigned temp_reg, - unsigned dst_reg) + unsigned dst_reg, unsigned mask) { struct r600_bytecode_alu alu; - int r, i; + int r, i, lasti; if ((ctx->bc->cf_last->ndw>>1) >= 0x60) ctx->bc->force_add_cf = 1; - for (i = 1; i < 4; i++) { + + lasti = tgsi_last_instruction(mask); + for (i = 1; i <= lasti; i++) { + if (!(mask & (1 << i))) + continue; + r = single_alu_op2(ctx, ALU_OP2_ADD_INT, temp_reg, i, temp_reg, 0, @@ -1743,7 +1872,10 @@ static int do_lds_fetch_values(struct r600_shader_ctx *ctx, unsigned temp_reg, if (r) return r; } - for (i = 0; i < 4; i++) { + for (i = 0; i <= lasti; i++) { + if (!(mask & (1 << i))) + continue; + /* emit an LDS_READ_RET */ memset(&alu, 0, sizeof(alu)); alu.op = LDS_OP1_LDS_READ_RET; @@ -1758,7 +1890,10 @@ static int do_lds_fetch_values(struct r600_shader_ctx *ctx, unsigned temp_reg, if (r) return r; } - for (i = 0; i < 4; i++) { + for (i = 0; i <= lasti; i++) { + if (!(mask & (1 << i))) + continue; + /* then read from LDS_OQ_A_POP */ memset(&alu, 0, sizeof(alu)); @@ -1776,6 +1911,16 @@ static int do_lds_fetch_values(struct r600_shader_ctx *ctx, unsigned temp_reg, return 0; } +static int fetch_mask(struct tgsi_src_register *reg) +{ + int mask = 0; + mask |= 1 << reg->SwizzleX; + mask |= 1 << reg->SwizzleY; + mask |= 1 << reg->SwizzleZ; + mask |= 1 << reg->SwizzleW; + return mask; +} + static int fetch_tes_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_register *src, unsigned int dst_reg) { int r; @@ -1792,7 +1937,7 @@ static int fetch_tes_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_reg if (r) return r; - r = do_lds_fetch_values(ctx, temp_reg, dst_reg); + r = do_lds_fetch_values(ctx, temp_reg, dst_reg, fetch_mask(&src->Register)); if (r) return r; return 0; @@ -1818,7 +1963,7 @@ static int fetch_tcs_input(struct r600_shader_ctx *ctx, struct tgsi_full_src_reg if (r) return r; - r = do_lds_fetch_values(ctx, temp_reg, dst_reg); + r = do_lds_fetch_values(ctx, temp_reg, dst_reg, fetch_mask(&src->Register)); if (r) return r; return 0; @@ -1840,7 +1985,7 @@ static int fetch_tcs_output(struct r600_shader_ctx *ctx, struct tgsi_full_src_re if (r) return r; - r = do_lds_fetch_values(ctx, temp_reg, dst_reg); + r = do_lds_fetch_values(ctx, temp_reg, dst_reg, fetch_mask(&src->Register)); if (r) return r; return 0; @@ -1983,11 +2128,12 @@ static int process_twoside_color_inputs(struct r600_shader_ctx *ctx) } static int emit_streamout(struct r600_shader_ctx *ctx, struct pipe_stream_output_info *so, - int stream, unsigned *stream_item_size) + int stream, unsigned *stream_item_size UNUSED) { unsigned so_gpr[PIPE_MAX_SHADER_OUTPUTS]; unsigned start_comp[PIPE_MAX_SHADER_OUTPUTS]; - int i, j, r; + int j, r; + unsigned i; /* Sanity checking. */ if (so->num_outputs > PIPE_MAX_SO_OUTPUTS) { @@ -2043,7 +2189,7 @@ static int emit_streamout(struct r600_shader_ctx *ctx, struct pipe_stream_output for (i = 0; i < so->num_outputs; i++) { struct r600_bytecode_output output; - if (stream != -1 && stream != so->output[i].output_buffer) + if (stream != -1 && stream != so->output[i].stream) continue; memset(&output, 0, sizeof(struct r600_bytecode_output)); @@ -2139,13 +2285,14 @@ static int generate_gs_copy_shader(struct r600_context *rctx, struct r600_shader_ctx ctx = {}; struct r600_shader *gs_shader = &gs->shader; struct r600_pipe_shader *cshader; - int ocnt = gs_shader->noutput; + unsigned ocnt = gs_shader->noutput; struct r600_bytecode_alu alu; struct r600_bytecode_vtx vtx; struct r600_bytecode_output output; struct r600_bytecode_cf *cf_jump, *cf_pop, *last_exp_pos = NULL, *last_exp_param = NULL; - int i, j, next_clip_pos = 61, next_param = 0; + int next_clip_pos = 61, next_param = 0; + unsigned i, j; int ring; bool only_ring_0 = true; cshader = calloc(1, sizeof(struct r600_pipe_shader)); @@ -2355,6 +2502,8 @@ static int generate_gs_copy_shader(struct r600_context *rctx, /* spi_sid is 0 for clipdistance outputs that were generated * for clipvertex - we don't need to pass them to PS */ ctx.shader->clip_dist_write = gs->shader.clip_dist_write; + ctx.shader->cull_dist_write = gs->shader.cull_dist_write; + ctx.shader->cc_dist_mask = gs->shader.cc_dist_mask; if (out->spi_sid) { /* duplicate it as PARAM to pass to the pixel shader */ output.array_base = next_param++; @@ -2461,10 +2610,11 @@ static int emit_inc_ring_offset(struct r600_shader_ctx *ctx, int idx, bool ind) return 0; } -static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so, int stream, bool ind) +static int emit_gs_ring_writes(struct r600_shader_ctx *ctx, const struct pipe_stream_output_info *so UNUSED, int stream, bool ind) { struct r600_bytecode_output output; - int i, k, ring_offset; + int ring_offset; + unsigned i, k; int effective_stream = stream == -1 ? 0 : stream; int idx = 0; @@ -2605,8 +2755,9 @@ static int r600_fetch_tess_io_info(struct r600_shader_ctx *ctx) static int emit_lds_vs_writes(struct r600_shader_ctx *ctx) { - int i, j, r; + int j, r; int temp_reg; + unsigned i; /* fetch tcs input values into input_vals */ ctx->tess_input_info = r600_get_temp(ctx); @@ -2753,7 +2904,7 @@ static int r600_store_tcs_output(struct r600_shader_ctx *ctx) } static int r600_tess_factor_read(struct r600_shader_ctx *ctx, - int output_idx) + int output_idx, int nc) { int param; unsigned temp_reg = r600_get_temp(ctx); @@ -2766,23 +2917,25 @@ static int r600_tess_factor_read(struct r600_shader_ctx *ctx, if (r) return r; - r = single_alu_op2(ctx, ALU_OP2_ADD_INT, - temp_reg, 0, - temp_reg, 0, - V_SQ_ALU_SRC_LITERAL, param * 16); - if (r) - return r; + if (param) { + r = single_alu_op2(ctx, ALU_OP2_ADD_INT, + temp_reg, 0, + temp_reg, 0, + V_SQ_ALU_SRC_LITERAL, param * 16); + if (r) + return r; + } - do_lds_fetch_values(ctx, temp_reg, dreg); + do_lds_fetch_values(ctx, temp_reg, dreg, ((1u << nc) - 1)); return 0; } static int r600_emit_tess_factor(struct r600_shader_ctx *ctx) { - unsigned i; int stride, outer_comps, inner_comps; int tessinner_idx = -1, tessouter_idx = -1; - int r; + int i, r; + unsigned j; int temp_reg = r600_get_temp(ctx); int treg[3] = {-1, -1, -1}; struct r600_bytecode_alu alu; @@ -2829,11 +2982,11 @@ static int r600_emit_tess_factor(struct r600_shader_ctx *ctx) /* R0 is InvocationID, RelPatchID, PatchID, tf_base */ /* TF_WRITE takes index in R.x, value in R.y */ - for (i = 0; i < ctx->shader->noutput; i++) { - if (ctx->shader->output[i].name == TGSI_SEMANTIC_TESSINNER) - tessinner_idx = i; - if (ctx->shader->output[i].name == TGSI_SEMANTIC_TESSOUTER) - tessouter_idx = i; + for (j = 0; j < ctx->shader->noutput; j++) { + if (ctx->shader->output[j].name == TGSI_SEMANTIC_TESSINNER) + tessinner_idx = j; + if (ctx->shader->output[j].name == TGSI_SEMANTIC_TESSOUTER) + tessouter_idx = j; } if (tessouter_idx == -1) @@ -2843,13 +2996,13 @@ static int r600_emit_tess_factor(struct r600_shader_ctx *ctx) return -1; if (tessouter_idx != -1) { - r = r600_tess_factor_read(ctx, tessouter_idx); + r = r600_tess_factor_read(ctx, tessouter_idx, outer_comps); if (r) return r; } if (tessinner_idx != -1) { - r = r600_tess_factor_read(ctx, tessinner_idx); + r = r600_tess_factor_read(ctx, tessinner_idx, inner_comps); if (r) return r; } @@ -2871,6 +3024,13 @@ static int r600_emit_tess_factor(struct r600_shader_ctx *ctx) int out_idx = i >= outer_comps ? tessinner_idx : tessouter_idx; int out_comp = i >= outer_comps ? i - outer_comps : i; + if (ctx->shader->tcs_prim_mode == PIPE_PRIM_LINES) { + if (out_comp == 1) + out_comp = 0; + else if (out_comp == 0) + out_comp = 1; + } + r = single_alu_op2(ctx, ALU_OP2_ADD_INT, treg[i / 2], (2 * (i % 2)), temp_reg, 0, @@ -2914,6 +3074,73 @@ static int r600_emit_tess_factor(struct r600_shader_ctx *ctx) return 0; } +/* + * We have to work out the thread ID for load and atomic + * operations, which store the returned value to an index + * in an intermediate buffer. + * The index is calculated by taking the thread id, + * calculated from the MBCNT instructions. + * Then the shader engine ID is multiplied by 256, + * and the wave id is added. + * Then the result is multipled by 64 and thread id is + * added. + */ +static int load_thread_id_gpr(struct r600_shader_ctx *ctx) +{ + struct r600_bytecode_alu alu; + int r; + + if (ctx->thread_id_gpr_loaded) + return 0; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MBCNT_32LO_ACCUM_PREV_INT; + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = 0; + alu.src[0].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[0].value = 0xffffffff; + alu.dst.write = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MBCNT_32HI_INT; + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = 1; + alu.src[0].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[0].value = 0xffffffff; + alu.dst.write = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP3_MULADD_UINT24; + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = 2; + alu.src[0].sel = EG_V_SQ_ALU_SRC_SE_ID; + alu.src[1].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[1].value = 256; + alu.src[2].sel = EG_V_SQ_ALU_SRC_HW_WAVE_ID; + alu.dst.write = 1; + alu.is_op3 = 1; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + r = single_alu_op3(ctx, ALU_OP3_MULADD_UINT24, + ctx->thread_id_gpr, 1, + ctx->temp_reg, 2, + V_SQ_ALU_SRC_LITERAL, 0x40, + ctx->temp_reg, 0); + if (r) + return r; + ctx->thread_id_gpr_loaded = true; + return 0; +} + static int r600_shader_from_tgsi(struct r600_context *rctx, struct r600_pipe_shader *pipeshader, union r600_shader_key key) @@ -2924,10 +3151,11 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, struct pipe_stream_output_info so = pipeshader->selector->so; struct tgsi_full_immediate *immediate; struct r600_shader_ctx ctx; - struct r600_bytecode_output output[32]; + struct r600_bytecode_output output[ARRAY_SIZE(shader->output)]; unsigned output_done, noutput; unsigned opcode; - int i, j, k, r = 0; + int j, k, r = 0; + unsigned i; int next_param_base = 0, next_clip_base; int max_color_exports = MAX2(key.ps.nr_cbufs, 1); bool indirect_gprs; @@ -2938,7 +3166,6 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, ctx.bc = &shader->bc; ctx.shader = shader; - ctx.native_integers = true; r600_bytecode_init(ctx.bc, rscreen->b.chip_class, rscreen->b.family, rscreen->has_compressed_msaa_texturing); @@ -2946,8 +3173,13 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, tgsi_scan_shader(tokens, &ctx.info); shader->indirect_files = ctx.info.indirect_files; + shader->uses_helper_invocation = false; shader->uses_doubles = ctx.info.uses_doubles; + shader->uses_atomics = ctx.info.file_mask[TGSI_FILE_HW_ATOMIC]; + shader->nsys_inputs = 0; + shader->uses_images = ctx.info.file_count[TGSI_FILE_IMAGE] > 0 || + ctx.info.file_count[TGSI_FILE_BUFFER] > 0; indirect_gprs = ctx.info.indirect_files & ~((1 << TGSI_FILE_CONSTANT) | (1 << TGSI_FILE_SAMPLER)); tgsi_parse_init(&ctx.parse, tokens); ctx.type = ctx.info.processor; @@ -2959,6 +3191,7 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, shader->vs_as_gs_a = key.vs.as_gs_a; shader->vs_as_es = key.vs.as_es; shader->vs_as_ls = key.vs.as_ls; + shader->atomic_base = key.vs.first_atomic_counter; if (shader->vs_as_es) ring_outputs = true; if (shader->vs_as_ls) @@ -2966,20 +3199,31 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, break; case PIPE_SHADER_GEOMETRY: ring_outputs = true; + shader->atomic_base = key.gs.first_atomic_counter; + shader->gs_tri_strip_adj_fix = key.gs.tri_strip_adj_fix; break; case PIPE_SHADER_TESS_CTRL: shader->tcs_prim_mode = key.tcs.prim_mode; + shader->atomic_base = key.tcs.first_atomic_counter; lds_outputs = true; lds_inputs = true; break; case PIPE_SHADER_TESS_EVAL: shader->tes_as_es = key.tes.as_es; + shader->atomic_base = key.tes.first_atomic_counter; lds_inputs = true; if (shader->tes_as_es) ring_outputs = true; break; case PIPE_SHADER_FRAGMENT: shader->two_side = key.ps.color_two_side; + shader->atomic_base = key.ps.first_atomic_counter; + shader->rat_base = key.ps.nr_cbufs; + shader->image_size_const_offset = key.ps.image_size_const_offset; + break; + case PIPE_SHADER_COMPUTE: + shader->rat_base = 0; + shader->image_size_const_offset = ctx.info.file_count[TGSI_FILE_SAMPLER]; break; default: break; @@ -3001,6 +3245,13 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, ctx.fragcoord_input = -1; ctx.colors_used = 0; ctx.clip_vertex_write = 0; + ctx.thread_id_gpr_loaded = false; + + ctx.helper_invoc_reg = -1; + ctx.cs_block_size_reg = -1; + ctx.cs_grid_size_reg = -1; + ctx.cs_block_size_loaded = false; + ctx.cs_grid_size_loaded = false; shader->nr_ps_color_exports = 0; shader->nr_ps_max_color_exports = 0; @@ -3031,15 +3282,24 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, ctx.file_offset[i] = 0; } - if (ctx.type == PIPE_SHADER_VERTEX) { + if (ctx.type == PIPE_SHADER_VERTEX) { + ctx.file_offset[TGSI_FILE_INPUT] = 1; - r600_bytecode_add_cfinst(ctx.bc, CF_OP_CALL_FS); + if (ctx.info.num_inputs) + r600_bytecode_add_cfinst(ctx.bc, CF_OP_CALL_FS); } if (ctx.type == PIPE_SHADER_FRAGMENT) { if (ctx.bc->chip_class >= EVERGREEN) ctx.file_offset[TGSI_FILE_INPUT] = evergreen_gpr_count(&ctx); else ctx.file_offset[TGSI_FILE_INPUT] = allocate_system_value_inputs(&ctx, ctx.file_offset[TGSI_FILE_INPUT]); + + for (i = 0; i < PIPE_MAX_SHADER_INPUTS; i++) { + if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_HELPER_INVOCATION) { + ctx.helper_invoc_reg = ctx.file_offset[TGSI_FILE_INPUT]++; + shader->uses_helper_invocation = true; + } + } } if (ctx.type == PIPE_SHADER_GEOMETRY) { /* FIXME 1 would be enough in some cases (3 or less input vertices) */ @@ -3063,6 +3323,15 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, if (add_tess_inout) ctx.file_offset[TGSI_FILE_INPUT]+=2; } + if (ctx.type == PIPE_SHADER_COMPUTE) { + ctx.file_offset[TGSI_FILE_INPUT] = 2; + for (i = 0; i < PIPE_MAX_SHADER_INPUTS; i++) { + if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_GRID_SIZE) + ctx.cs_grid_size_reg = ctx.file_offset[TGSI_FILE_INPUT]++; + if (ctx.info.system_value_semantic_name[i] == TGSI_SEMANTIC_BLOCK_SIZE) + ctx.cs_block_size_reg = ctx.file_offset[TGSI_FILE_INPUT]++; + } + } ctx.file_offset[TGSI_FILE_OUTPUT] = ctx.file_offset[TGSI_FILE_INPUT] + @@ -3075,28 +3344,38 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, ctx.file_offset[TGSI_FILE_CONSTANT] = 512; ctx.file_offset[TGSI_FILE_IMMEDIATE] = V_SQ_ALU_SRC_LITERAL; - ctx.bc->ar_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] + - ctx.info.file_max[TGSI_FILE_TEMPORARY] + 1; - ctx.bc->index_reg[0] = ctx.bc->ar_reg + 1; - ctx.bc->index_reg[1] = ctx.bc->ar_reg + 2; + + int regno = ctx.file_offset[TGSI_FILE_TEMPORARY] + + ctx.info.file_max[TGSI_FILE_TEMPORARY]; + ctx.bc->ar_reg = ++regno; + ctx.bc->index_reg[0] = ++regno; + ctx.bc->index_reg[1] = ++regno; if (ctx.type == PIPE_SHADER_TESS_CTRL) { - ctx.tess_input_info = ctx.bc->ar_reg + 3; - ctx.tess_output_info = ctx.bc->ar_reg + 4; - ctx.temp_reg = ctx.bc->ar_reg + 5; + ctx.tess_input_info = ++regno; + ctx.tess_output_info = ++regno; } else if (ctx.type == PIPE_SHADER_TESS_EVAL) { ctx.tess_input_info = 0; - ctx.tess_output_info = ctx.bc->ar_reg + 3; - ctx.temp_reg = ctx.bc->ar_reg + 4; + ctx.tess_output_info = ++regno; } else if (ctx.type == PIPE_SHADER_GEOMETRY) { - ctx.gs_export_gpr_tregs[0] = ctx.bc->ar_reg + 3; - ctx.gs_export_gpr_tregs[1] = ctx.bc->ar_reg + 4; - ctx.gs_export_gpr_tregs[2] = ctx.bc->ar_reg + 5; - ctx.gs_export_gpr_tregs[3] = ctx.bc->ar_reg + 6; - ctx.temp_reg = ctx.bc->ar_reg + 7; - } else { - ctx.temp_reg = ctx.bc->ar_reg + 3; + ctx.gs_export_gpr_tregs[0] = ++regno; + ctx.gs_export_gpr_tregs[1] = ++regno; + ctx.gs_export_gpr_tregs[2] = ++regno; + ctx.gs_export_gpr_tregs[3] = ++regno; + if (ctx.shader->gs_tri_strip_adj_fix) { + ctx.gs_rotated_input[0] = ++regno; + ctx.gs_rotated_input[1] = ++regno; + } else { + ctx.gs_rotated_input[0] = 0; + ctx.gs_rotated_input[1] = 1; + } + } + + if (shader->uses_images) { + ctx.thread_id_gpr = ++regno; + ctx.thread_id_gpr_loaded = false; } + ctx.temp_reg = ++regno; shader->max_arrays = 0; shader->num_arrays = 0; @@ -3118,12 +3397,22 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, ctx.nliterals = 0; ctx.literals = NULL; + ctx.max_driver_temp_used = 0; shader->fs_write_all = ctx.info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS] && ctx.info.colors_written == 1; shader->vs_position_window_space = ctx.info.properties[TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION]; shader->ps_conservative_z = (uint8_t)ctx.info.properties[TGSI_PROPERTY_FS_DEPTH_LAYOUT]; + if (ctx.type == PIPE_SHADER_VERTEX || + ctx.type == PIPE_SHADER_GEOMETRY || + ctx.type == PIPE_SHADER_TESS_EVAL) { + shader->cc_dist_mask = (1 << (ctx.info.properties[TGSI_PROPERTY_NUM_CULLDIST_ENABLED] + + ctx.info.properties[TGSI_PROPERTY_NUM_CLIPDIST_ENABLED])) - 1; + shader->clip_dist_write = (1 << ctx.info.properties[TGSI_PROPERTY_NUM_CLIPDIST_ENABLED]) - 1; + shader->cull_dist_write = ((1 << ctx.info.properties[TGSI_PROPERTY_NUM_CULLDIST_ENABLED]) - 1) << ctx.info.properties[TGSI_PROPERTY_NUM_CLIPDIST_ENABLED]; + } + if (shader->vs_as_gs_a) vs_add_primid_output(&ctx, key.vs.prim_id_out); @@ -3160,7 +3449,7 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, goto out_err; } } - + shader->ring_item_sizes[0] = ctx.next_ring_offset; shader->ring_item_sizes[1] = 0; shader->ring_item_sizes[2] = 0; @@ -3209,9 +3498,67 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, if (shader->fs_write_all && rscreen->b.chip_class >= EVERGREEN) shader->nr_ps_max_color_exports = 8; - if (ctx.fragcoord_input >= 0) { - if (ctx.bc->chip_class == CAYMAN) { - for (j = 0 ; j < 4; j++) { + if (ctx.shader->uses_helper_invocation) { + if (ctx.bc->chip_class == CAYMAN) + r = cm_load_helper_invocation(&ctx); + else + r = eg_load_helper_invocation(&ctx); + if (r) + return r; + } + + /* + * XXX this relies on fixed_pt_position_gpr only being present when + * this shader should be executed per sample. Should be the case for now... + */ + if (ctx.fixed_pt_position_gpr != -1 && ctx.info.reads_samplemask) { + /* + * Fix up sample mask. The hw always gives us coverage mask for + * the pixel. However, for per-sample shading, we need the + * coverage for the shader invocation only. + * Also, with disabled msaa, only the first bit should be set + * (luckily the same fixup works for both problems). + * For now, we can only do it if we know this shader is always + * executed per sample (due to usage of bits in the shader + * forcing per-sample execution). + * If the fb is not multisampled, we'd do unnecessary work but + * it should still be correct. + * It will however do nothing for sample shading according + * to MinSampleShading. + */ + struct r600_bytecode_alu alu; + int tmp = r600_get_temp(&ctx); + assert(ctx.face_gpr != -1); + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + + alu.op = ALU_OP2_LSHL_INT; + alu.src[0].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[0].value = 0x1; + alu.src[1].sel = ctx.fixed_pt_position_gpr; + alu.src[1].chan = 3; + alu.dst.sel = tmp; + alu.dst.chan = 0; + alu.dst.write = 1; + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx.bc, &alu))) + return r; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_AND_INT; + alu.src[0].sel = tmp; + alu.src[1].sel = ctx.face_gpr; + alu.src[1].chan = 2; + alu.dst.sel = ctx.face_gpr; + alu.dst.chan = 2; + alu.dst.write = 1; + alu.last = 1; + if ((r = r600_bytecode_add_alu(ctx.bc, &alu))) + return r; + } + + if (ctx.fragcoord_input >= 0) { + if (ctx.bc->chip_class == CAYMAN) { + for (j = 0 ; j < 4; j++) { struct r600_bytecode_alu alu; memset(&alu, 0, sizeof(struct r600_bytecode_alu)); alu.op = ALU_OP1_RECIP_IEEE; @@ -3261,6 +3608,36 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, if (r) return r; } + + if (ctx.shader->gs_tri_strip_adj_fix) { + r = single_alu_op2(&ctx, ALU_OP2_AND_INT, + ctx.gs_rotated_input[0], 2, + 0, 2, + V_SQ_ALU_SRC_LITERAL, 1); + if (r) + return r; + + for (i = 0; i < 6; i++) { + int rotated = (i + 4) % 6; + int offset_reg = i / 3; + int offset_chan = i % 3; + int rotated_offset_reg = rotated / 3; + int rotated_offset_chan = rotated % 3; + + if (offset_reg == 0 && offset_chan == 2) + offset_chan = 3; + if (rotated_offset_reg == 0 && rotated_offset_chan == 2) + rotated_offset_chan = 3; + + r = single_alu_op3(&ctx, ALU_OP3_CNDE_INT, + ctx.gs_rotated_input[offset_reg], offset_chan, + ctx.gs_rotated_input[0], 2, + offset_reg, offset_chan, + rotated_offset_reg, rotated_offset_chan); + if (r) + return r; + } + } } if (ctx.type == PIPE_SHADER_TESS_CTRL) @@ -3342,6 +3719,7 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, shader->output[ctx.cv_output].spi_sid = 0; shader->clip_dist_write = 0xFF; + shader->cc_dist_mask = 0xFF; for (i = 0; i < 8; i++) { int oreg = i >> 2; @@ -3413,7 +3791,7 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, output[j].swizzle_z = 2; output[j].swizzle_w = 3; output[j].burst_count = 1; - output[j].type = -1; + output[j].type = 0xffffffff; output[j].op = CF_OP_EXPORT; switch (ctx.type) { case PIPE_SHADER_VERTEX: @@ -3519,6 +3897,17 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, output[j].array_base = shader->output[i].sid; output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL; shader->nr_ps_color_exports++; + shader->ps_color_export_mask |= (0xf << (shader->output[i].sid * 4)); + + /* If the i-th target format is set, all previous target formats must + * be non-zero to avoid hangs. - from radeonsi, seems to apply to eg as well. + */ + if (shader->output[i].sid > 0) + for (unsigned x = 0; x < shader->output[i].sid; x++) + shader->ps_color_export_mask |= (1 << (x*4)); + + if (shader->output[i].sid > shader->ps_export_highest) + shader->ps_export_highest = shader->output[i].sid; if (shader->fs_write_all && (rscreen->b.chip_class >= EVERGREEN)) { for (k = 1; k < max_color_exports; k++) { j++; @@ -3534,6 +3923,7 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, output[j].op = CF_OP_EXPORT; output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL; shader->nr_ps_color_exports++; + shader->ps_color_export_mask |= (0xf << (j * 4)); } } } else if (shader->output[i].name == TGSI_SEMANTIC_POSITION) { @@ -3569,7 +3959,7 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, goto out_err; } - if (output[j].type==-1) { + if (output[j].type == 0xffffffff) { output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM; output[j].array_base = next_param_base++; } @@ -3622,15 +4012,16 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, output[j].op = CF_OP_EXPORT; j++; shader->nr_ps_color_exports++; + shader->ps_color_export_mask = 0xf; } noutput = j; /* set export done on last export of each type */ - for (i = noutput - 1, output_done = 0; i >= 0; i--) { - if (!(output_done & (1 << output[i].type))) { - output_done |= (1 << output[i].type); - output[i].op = CF_OP_EXPORT_DONE; + for (k = noutput - 1, output_done = 0; k >= 0; k--) { + if (!(output_done & (1 << output[k].type))) { + output_done |= (1 << output[k].type); + output[k].op = CF_OP_EXPORT_DONE; } } /* add output to bytecode */ @@ -3651,7 +4042,7 @@ static int r600_shader_from_tgsi(struct r600_context *rctx, last = r600_isa_cf(ctx.bc->cf_last->op); /* alu clause instructions don't have EOP bit, so add NOP */ - if (!last || last->flags & CF_ALU || ctx.bc->cf_last->op == CF_OP_LOOP_END || ctx.bc->cf_last->op == CF_OP_CALL_FS || ctx.bc->cf_last->op == CF_OP_POP || ctx.bc->cf_last->op == CF_OP_GDS) + if (!last || last->flags & CF_ALU || ctx.bc->cf_last->op == CF_OP_LOOP_END || ctx.bc->cf_last->op == CF_OP_POP) r600_bytecode_add_cfinst(ctx.bc, CF_OP_NOP); ctx.bc->cf_last->end_of_program = 1; @@ -3688,7 +4079,7 @@ static int tgsi_unsupported(struct r600_shader_ctx *ctx) return -EINVAL; } -static int tgsi_end(struct r600_shader_ctx *ctx) +static int tgsi_end(struct r600_shader_ctx *ctx UNUSED) { return 0; } @@ -3742,29 +4133,48 @@ static void tgsi_dst(struct r600_shader_ctx *ctx, } -static int tgsi_op2_64_params(struct r600_shader_ctx *ctx, bool singledest, bool swap) +static int tgsi_op2_64_params(struct r600_shader_ctx *ctx, bool singledest, bool swap, int dest_temp, int op_override) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; unsigned write_mask = inst->Dst[0].Register.WriteMask; struct r600_bytecode_alu alu; int i, j, r, lasti = tgsi_last_instruction(write_mask); int use_tmp = 0; + int swizzle_x = inst->Src[0].Register.SwizzleX; if (singledest) { switch (write_mask) { case 0x1: - write_mask = 0x3; + if (swizzle_x == 2) { + write_mask = 0xc; + use_tmp = 3; + } else + write_mask = 0x3; break; case 0x2: - use_tmp = 1; - write_mask = 0x3; + if (swizzle_x == 2) { + write_mask = 0xc; + use_tmp = 3; + } else { + write_mask = 0x3; + use_tmp = 1; + } break; case 0x4: - write_mask = 0xc; + if (swizzle_x == 0) { + write_mask = 0x3; + use_tmp = 1; + } else + write_mask = 0xc; break; case 0x8: - write_mask = 0xc; - use_tmp = 3; + if (swizzle_x == 0) { + write_mask = 0x3; + use_tmp = 1; + } else { + write_mask = 0xc; + use_tmp = 3; + } break; } } @@ -3778,18 +4188,19 @@ static int tgsi_op2_64_params(struct r600_shader_ctx *ctx, bool singledest, bool memset(&alu, 0, sizeof(struct r600_bytecode_alu)); if (singledest) { - tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - if (use_tmp) { - alu.dst.sel = ctx->temp_reg; + if (use_tmp || dest_temp) { + alu.dst.sel = use_tmp ? ctx->temp_reg : dest_temp; alu.dst.chan = i; alu.dst.write = 1; + } else { + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); } if (i == 1 || i == 3) alu.dst.write = 0; } else tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - alu.op = ctx->inst_info->op; + alu.op = op_override ? op_override : ctx->inst_info->op; if (ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DABS) { r600_bytecode_src(&alu.src[0], &ctx->src[0], i); } else if (!swap) { @@ -3822,6 +4233,7 @@ static int tgsi_op2_64_params(struct r600_shader_ctx *ctx, bool singledest, bool if (use_tmp) { write_mask = inst->Dst[0].Register.WriteMask; + lasti = tgsi_last_instruction(write_mask); /* move result from temp to dst */ for (i = 0; i <= lasti; i++) { if (!(write_mask & (1 << i))) @@ -3829,7 +4241,13 @@ static int tgsi_op2_64_params(struct r600_shader_ctx *ctx, bool singledest, bool memset(&alu, 0, sizeof(struct r600_bytecode_alu)); alu.op = ALU_OP1_MOV; - tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + + if (dest_temp) { + alu.dst.sel = dest_temp; + alu.dst.chan = i; + alu.dst.write = 1; + } else + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); alu.src[0].sel = ctx->temp_reg; alu.src[0].chan = use_tmp - 1; alu.last = (i == lasti); @@ -3852,17 +4270,17 @@ static int tgsi_op2_64(struct r600_shader_ctx *ctx) fprintf(stderr, "illegal writemask for 64-bit: 0x%x\n", write_mask); return -1; } - return tgsi_op2_64_params(ctx, false, false); + return tgsi_op2_64_params(ctx, false, false, 0, 0); } static int tgsi_op2_64_single_dest(struct r600_shader_ctx *ctx) { - return tgsi_op2_64_params(ctx, true, false); + return tgsi_op2_64_params(ctx, true, false, 0, 0); } static int tgsi_op2_64_single_dest_s(struct r600_shader_ctx *ctx) { - return tgsi_op2_64_params(ctx, true, true); + return tgsi_op2_64_params(ctx, true, true, 0, 0); } static int tgsi_op3_64(struct r600_shader_ctx *ctx) @@ -3906,6 +4324,11 @@ static int tgsi_op2_s(struct r600_shader_ctx *ctx, int swap, int trans_only) int i, j, r, lasti = tgsi_last_instruction(write_mask); /* use temp register if trans_only and more than one dst component */ int use_tmp = trans_only && (write_mask ^ (1 << lasti)); + unsigned op = ctx->inst_info->op; + + if (op == ALU_OP2_MUL_IEEE && + ctx->info.properties[TGSI_PROPERTY_MUL_ZERO_WINS]) + op = ALU_OP2_MUL; for (i = 0; i <= lasti; i++) { if (!(write_mask & (1 << i))) @@ -3919,7 +4342,7 @@ static int tgsi_op2_s(struct r600_shader_ctx *ctx, int swap, int trans_only) } else tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - alu.op = ctx->inst_info->op; + alu.op = op; if (!swap) { for (j = 0; j < inst->Instruction.NumSrcRegs; j++) { r600_bytecode_src(&alu.src[j], &ctx->src[j], i); @@ -4040,7 +4463,6 @@ static int tgsi_dfracexp(struct r600_shader_ctx *ctx) struct r600_bytecode_alu alu; unsigned write_mask = inst->Dst[0].Register.WriteMask; int i, j, r; - int firsti = write_mask == 0xc ? 2 : 0; for (i = 0; i <= 3; i++) { memset(&alu, 0, sizeof(struct r600_bytecode_alu)); @@ -4061,15 +4483,18 @@ static int tgsi_dfracexp(struct r600_shader_ctx *ctx) return r; } - /* MOV first two channels to writemask dst0 */ - for (i = 0; i <= 1; i++) { + /* Replicate significand result across channels. */ + for (i = 0; i <= 3; i++) { + if (!(write_mask & (1 << i))) + continue; + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); alu.op = ALU_OP1_MOV; - alu.src[0].chan = i + 2; + alu.src[0].chan = (i & 1) + 2; alu.src[0].sel = ctx->temp_reg; - tgsi_dst(ctx, &inst->Dst[0], firsti + i, &alu.dst); - alu.dst.write = (inst->Dst[0].Register.WriteMask >> (firsti + i)) & 1; + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + alu.dst.write = 1; alu.last = 1; r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) @@ -4100,44 +4525,109 @@ static int egcm_int_to_double(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; struct r600_bytecode_alu alu; - int i, r; - int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); + int i, c, r; + int write_mask = inst->Dst[0].Register.WriteMask; + int temp_reg = r600_get_temp(ctx); assert(inst->Instruction.Opcode == TGSI_OPCODE_I2D || inst->Instruction.Opcode == TGSI_OPCODE_U2D); - for (i = 0; i <= (lasti+1)/2; i++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ctx->inst_info->op; - - r600_bytecode_src(&alu.src[0], &ctx->src[0], i); - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = i; - alu.dst.write = 1; - alu.last = 1; + for (c = 0; c < 2; c++) { + int dchan = c * 2; + if (write_mask & (0x3 << dchan)) { + /* split into 24-bit int and 8-bit int */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_AND_INT; + alu.dst.sel = temp_reg; + alu.dst.chan = dchan; + r600_bytecode_src(&alu.src[0], &ctx->src[0], c); + alu.src[1].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[1].value = 0xffffff00; + alu.dst.write = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_AND_INT; + alu.dst.sel = temp_reg; + alu.dst.chan = dchan + 1; + r600_bytecode_src(&alu.src[0], &ctx->src[0], c); + alu.src[1].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[1].value = 0xff; + alu.dst.write = 1; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } } - for (i = 0; i <= lasti; i++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP1_FLT32_TO_FLT64; + for (c = 0; c < 2; c++) { + int dchan = c * 2; + if (write_mask & (0x3 << dchan)) { + for (i = dchan; i <= dchan + 1; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = i == dchan ? ctx->inst_info->op : ALU_OP1_UINT_TO_FLT; - alu.src[0].chan = i/2; - if (i%2 == 0) - alu.src[0].sel = ctx->temp_reg; - else { - alu.src[0].sel = V_SQ_ALU_SRC_LITERAL; - alu.src[0].value = 0x0; + alu.src[0].sel = temp_reg; + alu.src[0].chan = i; + alu.dst.sel = temp_reg; + alu.dst.chan = i; + alu.dst.write = 1; + if (ctx->bc->chip_class == CAYMAN) + alu.last = i == dchan + 1; + else + alu.last = 1; /* trans only ops on evergreen */ + + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } } - tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - alu.last = i == lasti; + } - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; + for (c = 0; c < 2; c++) { + int dchan = c * 2; + if (write_mask & (0x3 << dchan)) { + for (i = 0; i < 4; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_FLT32_TO_FLT64; + + alu.src[0].chan = dchan + (i / 2); + if (i == 0 || i == 2) + alu.src[0].sel = temp_reg; + else { + alu.src[0].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[0].value = 0x0; + } + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = i; + alu.last = i == 3; + alu.dst.write = 1; + + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + + for (i = 0; i <= 1; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_ADD_64; + + alu.src[0].chan = fp64_switch(i); + alu.src[0].sel = ctx->temp_reg; + + alu.src[1].chan = fp64_switch(i + 2); + alu.src[1].sel = ctx->temp_reg; + tgsi_dst(ctx, &inst->Dst[0], dchan + i, &alu.dst); + alu.last = i == 1; + + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + } } return 0; @@ -4149,35 +4639,62 @@ static int egcm_double_to_int(struct r600_shader_ctx *ctx) struct r600_bytecode_alu alu; int i, r; int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); - + int treg = r600_get_temp(ctx); assert(inst->Instruction.Opcode == TGSI_OPCODE_D2I || inst->Instruction.Opcode == TGSI_OPCODE_D2U); + /* do a 64->32 into a temp register */ + r = tgsi_op2_64_params(ctx, true, false, treg, ALU_OP1_FLT64_TO_FLT32); + if (r) + return r; + for (i = 0; i <= lasti; i++) { + if (!(inst->Dst[0].Register.WriteMask & (1 << i))) + continue; memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP1_FLT64_TO_FLT32; + alu.op = ctx->inst_info->op; - r600_bytecode_src(&alu.src[0], &ctx->src[0], fp64_switch(i)); - alu.dst.chan = i; - alu.dst.sel = ctx->temp_reg; - alu.dst.write = i%2 == 0; - alu.last = i == lasti; + alu.src[0].chan = i; + alu.src[0].sel = treg; + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + alu.last = (i == lasti); r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; } - for (i = 0; i <= (lasti+1)/2; i++) { + return 0; +} + +static int cayman_emit_unary_double_raw(struct r600_bytecode *bc, + unsigned op, + int dst_reg, + struct r600_shader_src *src, + bool abs) +{ + struct r600_bytecode_alu alu; + const int last_slot = 3; + int r; + + /* these have to write the result to X/Y by the looks of it */ + for (int i = 0 ; i < last_slot; i++) { memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ctx->inst_info->op; + alu.op = op; - alu.src[0].chan = i*2; - alu.src[0].sel = ctx->temp_reg; - tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst); - alu.last = 1; + r600_bytecode_src(&alu.src[0], src, 1); + r600_bytecode_src(&alu.src[1], src, 0); - r = r600_bytecode_add_alu(ctx->bc, &alu); + if (abs) + r600_bytecode_src_set_abs(&alu.src[1]); + + alu.dst.sel = dst_reg; + alu.dst.chan = i; + alu.dst.write = (i == 0 || i == 1); + + if (bc->chip_class != CAYMAN || i == last_slot - 1) + alu.last = 1; + r = r600_bytecode_add_alu(bc, &alu); if (r) return r; } @@ -4190,36 +4707,23 @@ static int cayman_emit_double_instr(struct r600_shader_ctx *ctx) struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; int i, r; struct r600_bytecode_alu alu; - int last_slot = 3; int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); int t1 = ctx->temp_reg; - /* these have to write the result to X/Y by the looks of it */ - for (i = 0 ; i < last_slot; i++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ctx->inst_info->op; + /* should only be one src regs */ + assert(inst->Instruction.NumSrcRegs == 1); - /* should only be one src regs */ - assert (inst->Instruction.NumSrcRegs == 1); - - r600_bytecode_src(&alu.src[0], &ctx->src[0], 1); - r600_bytecode_src(&alu.src[1], &ctx->src[0], 0); - - /* RSQ should take the absolute value of src */ - if (ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DRSQ || - ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DSQRT) { - r600_bytecode_src_set_abs(&alu.src[1]); - } - alu.dst.sel = t1; - alu.dst.chan = i; - alu.dst.write = (i == 0 || i == 1); + /* only support one double at a time */ + assert(inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY || + inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_ZW); - if (ctx->bc->chip_class != CAYMAN || i == last_slot - 1) - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } + r = cayman_emit_unary_double_raw( + ctx->bc, ctx->inst_info->op, t1, + &ctx->src[0], + ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DRSQ || + ctx->parse.FullToken.FullInstruction.Instruction.Opcode == TGSI_OPCODE_DSQRT); + if (r) + return r; for (i = 0 ; i <= lasti; i++) { if (!(inst->Dst[0].Register.WriteMask & (1 << i))) @@ -4245,7 +4749,7 @@ static int cayman_emit_float_instr(struct r600_shader_ctx *ctx) int i, j, r; struct r600_bytecode_alu alu; int last_slot = (inst->Dst[0].Register.WriteMask & 0x8) ? 4 : 3; - + for (i = 0 ; i < last_slot; i++) { memset(&alu, 0, sizeof(struct r600_bytecode_alu)); alu.op = ctx->inst_info->op; @@ -4326,25 +4830,27 @@ static int cayman_mul_double_instr(struct r600_shader_ctx *ctx) int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); int t1 = ctx->temp_reg; - for (k = 0; k < 2; k++) { - if (!(inst->Dst[0].Register.WriteMask & (0x3 << (k * 2)))) - continue; + /* t1 would get overwritten below if we actually tried to + * multiply two pairs of doubles at a time. */ + assert(inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY || + inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_ZW); - for (i = 0; i < 4; i++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ctx->inst_info->op; - for (j = 0; j < inst->Instruction.NumSrcRegs; j++) { - r600_bytecode_src(&alu.src[j], &ctx->src[j], k * 2 + ((i == 3) ? 0 : 1)); - } - alu.dst.sel = t1; - alu.dst.chan = i; - alu.dst.write = 1; - if (i == 3) - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; + k = inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ? 0 : 1; + + for (i = 0; i < 4; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ctx->inst_info->op; + for (j = 0; j < inst->Instruction.NumSrcRegs; j++) { + r600_bytecode_src(&alu.src[j], &ctx->src[j], k * 2 + ((i == 3) ? 0 : 1)); } + alu.dst.sel = t1; + alu.dst.chan = i; + alu.dst.write = 1; + if (i == 3) + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; } for (i = 0; i <= lasti; i++) { @@ -4366,6 +4872,63 @@ static int cayman_mul_double_instr(struct r600_shader_ctx *ctx) return 0; } +/* + * Emit RECIP_64 + MUL_64 to implement division. + */ +static int cayman_ddiv_instr(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + int r; + struct r600_bytecode_alu alu; + int t1 = ctx->temp_reg; + int k; + + /* Only support one double at a time. This is the same constraint as + * in DMUL lowering. */ + assert(inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY || + inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_ZW); + + k = inst->Dst[0].Register.WriteMask == TGSI_WRITEMASK_XY ? 0 : 1; + + r = cayman_emit_unary_double_raw(ctx->bc, ALU_OP2_RECIP_64, t1, &ctx->src[1], false); + if (r) + return r; + + for (int i = 0; i < 4; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_MUL_64; + + r600_bytecode_src(&alu.src[0], &ctx->src[0], k * 2 + ((i == 3) ? 0 : 1)); + + alu.src[1].sel = t1; + alu.src[1].chan = (i == 3) ? 0 : 1; + + alu.dst.sel = t1; + alu.dst.chan = i; + alu.dst.write = 1; + if (i == 3) + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + + for (int i = 0; i < 2; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + alu.src[0].sel = t1; + alu.src[0].chan = i; + tgsi_dst(ctx, &inst->Dst[0], k * 2 + i, &alu.dst); + alu.dst.write = 1; + if (i == 1) + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + return 0; +} + /* * r600 - trunc to -PI..PI range * r700 - normalize by dividing by 2PI @@ -4516,127 +5079,6 @@ static int tgsi_trig(struct r600_shader_ctx *ctx) return 0; } -static int tgsi_scs(struct r600_shader_ctx *ctx) -{ - struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bytecode_alu alu; - int i, r; - - /* We'll only need the trig stuff if we are going to write to the - * X or Y components of the destination vector. - */ - if (likely(inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_XY)) { - r = tgsi_setup_trig(ctx); - if (r) - return r; - } - - /* dst.x = COS */ - if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) { - if (ctx->bc->chip_class == CAYMAN) { - for (i = 0 ; i < 3; i++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP1_COS; - tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - - if (i == 0) - alu.dst.write = 1; - else - alu.dst.write = 0; - alu.src[0].sel = ctx->temp_reg; - alu.src[0].chan = 0; - if (i == 2) - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - } else { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP1_COS; - tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst); - - alu.src[0].sel = ctx->temp_reg; - alu.src[0].chan = 0; - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - } - - /* dst.y = SIN */ - if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) { - if (ctx->bc->chip_class == CAYMAN) { - for (i = 0 ; i < 3; i++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP1_SIN; - tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - if (i == 1) - alu.dst.write = 1; - else - alu.dst.write = 0; - alu.src[0].sel = ctx->temp_reg; - alu.src[0].chan = 0; - if (i == 2) - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - } else { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP1_SIN; - tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst); - - alu.src[0].sel = ctx->temp_reg; - alu.src[0].chan = 0; - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - } - - /* dst.z = 0.0; */ - if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Z) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - - alu.op = ALU_OP1_MOV; - - tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst); - - alu.src[0].sel = V_SQ_ALU_SRC_0; - alu.src[0].chan = 0; - - alu.last = 1; - - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - - /* dst.w = 1.0; */ - if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_W) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - - alu.op = ALU_OP1_MOV; - - tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst); - - alu.src[0].sel = V_SQ_ALU_SRC_1; - alu.src[0].chan = 0; - - alu.last = 1; - - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - - return 0; -} - static int tgsi_kill(struct r600_shader_ctx *ctx) { const struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; @@ -4713,7 +5155,7 @@ static int tgsi_lit(struct r600_shader_ctx *ctx) alu.last = 1; } else alu.dst.write = 0; - + r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; @@ -4829,11 +5271,7 @@ static int tgsi_rsq(struct r600_shader_ctx *ctx) memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - /* XXX: - * For state trackers other than OpenGL, we'll want to use - * _RECIPSQRT_IEEE instead. - */ - alu.op = ALU_OP1_RECIPSQRT_CLAMPED; + alu.op = ALU_OP1_RECIPSQRT_IEEE; for (i = 0; i < inst->Instruction.NumSrcRegs; i++) { r600_bytecode_src(&alu.src[i], &ctx->src[i], 0); @@ -4981,6 +5419,31 @@ static int tgsi_pow(struct r600_shader_ctx *ctx) return tgsi_helper_tempx_replicate(ctx); } +static int emit_mul_int_op(struct r600_bytecode *bc, + struct r600_bytecode_alu *alu_src) +{ + struct r600_bytecode_alu alu; + int i, r; + alu = *alu_src; + if (bc->chip_class == CAYMAN) { + for (i = 0; i < 4; i++) { + alu.dst.chan = i; + alu.dst.write = (i == alu_src->dst.chan); + alu.last = (i == 3); + + r = r600_bytecode_add_alu(bc, &alu); + if (r) + return r; + } + } else { + alu.last = 1; + r = r600_bytecode_add_alu(bc, &alu); + if (r) + return r; + } + return 0; +} + static int tgsi_divmod(struct r600_shader_ctx *ctx, int mod, int signed_op) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; @@ -5189,7 +5652,7 @@ static int tgsi_divmod(struct r600_shader_ctx *ctx, int mod, int signed_op) memset(&alu, 0, sizeof(struct r600_bytecode_alu)); alu.op = ALU_OP1_FLT_TO_UINT; - + alu.dst.sel = tmp0; alu.dst.chan = 0; alu.dst.write = 1; @@ -5222,50 +5685,25 @@ static int tgsi_divmod(struct r600_shader_ctx *ctx, int mod, int signed_op) } /* 2. tmp0.z = lo (tmp0.x * src2) */ - if (ctx->bc->chip_class == CAYMAN) { - for (j = 0 ; j < 4; j++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP2_MULLO_UINT; - - alu.dst.sel = tmp0; - alu.dst.chan = j; - alu.dst.write = (j == 2); + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_MULLO_UINT; - alu.src[0].sel = tmp0; - alu.src[0].chan = 0; - if (signed_op) { - alu.src[1].sel = tmp2; - alu.src[1].chan = 1; - } else { - r600_bytecode_src(&alu.src[1], &ctx->src[1], i); - } + alu.dst.sel = tmp0; + alu.dst.chan = 2; + alu.dst.write = 1; - alu.last = (j == 3); - if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) - return r; - } + alu.src[0].sel = tmp0; + alu.src[0].chan = 0; + if (signed_op) { + alu.src[1].sel = tmp2; + alu.src[1].chan = 1; } else { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP2_MULLO_UINT; - - alu.dst.sel = tmp0; - alu.dst.chan = 2; - alu.dst.write = 1; - - alu.src[0].sel = tmp0; - alu.src[0].chan = 0; - if (signed_op) { - alu.src[1].sel = tmp2; - alu.src[1].chan = 1; - } else { - r600_bytecode_src(&alu.src[1], &ctx->src[1], i); - } - - alu.last = 1; - if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) - return r; + r600_bytecode_src(&alu.src[1], &ctx->src[1], i); } + if ((r = emit_mul_int_op(ctx->bc, &alu))) + return r; + /* 3. tmp0.w = -tmp0.z */ memset(&alu, 0, sizeof(struct r600_bytecode_alu)); alu.op = ALU_OP2_SUB_INT; @@ -5283,51 +5721,26 @@ static int tgsi_divmod(struct r600_shader_ctx *ctx, int mod, int signed_op) return r; /* 4. tmp0.y = hi (tmp0.x * src2) */ - if (ctx->bc->chip_class == CAYMAN) { - for (j = 0 ; j < 4; j++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP2_MULHI_UINT; + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_MULHI_UINT; - alu.dst.sel = tmp0; - alu.dst.chan = j; - alu.dst.write = (j == 1); + alu.dst.sel = tmp0; + alu.dst.chan = 1; + alu.dst.write = 1; - alu.src[0].sel = tmp0; - alu.src[0].chan = 0; + alu.src[0].sel = tmp0; + alu.src[0].chan = 0; - if (signed_op) { - alu.src[1].sel = tmp2; - alu.src[1].chan = 1; - } else { - r600_bytecode_src(&alu.src[1], &ctx->src[1], i); - } - alu.last = (j == 3); - if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) - return r; - } + if (signed_op) { + alu.src[1].sel = tmp2; + alu.src[1].chan = 1; } else { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP2_MULHI_UINT; - - alu.dst.sel = tmp0; - alu.dst.chan = 1; - alu.dst.write = 1; - - alu.src[0].sel = tmp0; - alu.src[0].chan = 0; - - if (signed_op) { - alu.src[1].sel = tmp2; - alu.src[1].chan = 1; - } else { - r600_bytecode_src(&alu.src[1], &ctx->src[1], i); - } - - alu.last = 1; - if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) - return r; + r600_bytecode_src(&alu.src[1], &ctx->src[1], i); } + if ((r = emit_mul_int_op(ctx->bc, &alu))) + return r; + /* 5. tmp0.z = (tmp0.y == 0 ? tmp0.w : tmp0.z) = abs(lo(rcp*src)) */ memset(&alu, 0, sizeof(struct r600_bytecode_alu)); alu.op = ALU_OP3_CNDE_INT; @@ -5349,43 +5762,21 @@ static int tgsi_divmod(struct r600_shader_ctx *ctx, int mod, int signed_op) return r; /* 6. tmp0.w = hi (tmp0.z * tmp0.x) = e, rounding error */ - if (ctx->bc->chip_class == CAYMAN) { - for (j = 0 ; j < 4; j++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP2_MULHI_UINT; - - alu.dst.sel = tmp0; - alu.dst.chan = j; - alu.dst.write = (j == 3); - - alu.src[0].sel = tmp0; - alu.src[0].chan = 2; - - alu.src[1].sel = tmp0; - alu.src[1].chan = 0; - - alu.last = (j == 3); - if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) - return r; - } - } else { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP2_MULHI_UINT; + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_MULHI_UINT; - alu.dst.sel = tmp0; - alu.dst.chan = 3; - alu.dst.write = 1; + alu.dst.sel = tmp0; + alu.dst.chan = 3; + alu.dst.write = 1; - alu.src[0].sel = tmp0; - alu.src[0].chan = 2; + alu.src[0].sel = tmp0; + alu.src[0].chan = 2; - alu.src[1].sel = tmp0; - alu.src[1].chan = 0; + alu.src[1].sel = tmp0; + alu.src[1].chan = 0; - alu.last = 1; - if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + if ((r = emit_mul_int_op(ctx->bc, &alu))) return r; - } /* 7. tmp1.x = tmp0.x - tmp0.w */ memset(&alu, 0, sizeof(struct r600_bytecode_alu)); @@ -5442,98 +5833,46 @@ static int tgsi_divmod(struct r600_shader_ctx *ctx, int mod, int signed_op) return r; /* 10. tmp0.z = hi(tmp0.x * src1) = q */ - if (ctx->bc->chip_class == CAYMAN) { - for (j = 0 ; j < 4; j++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP2_MULHI_UINT; - - alu.dst.sel = tmp0; - alu.dst.chan = j; - alu.dst.write = (j == 2); + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_MULHI_UINT; - alu.src[0].sel = tmp0; - alu.src[0].chan = 0; + alu.dst.sel = tmp0; + alu.dst.chan = 2; + alu.dst.write = 1; - if (signed_op) { - alu.src[1].sel = tmp2; - alu.src[1].chan = 0; - } else { - r600_bytecode_src(&alu.src[1], &ctx->src[0], i); - } + alu.src[0].sel = tmp0; + alu.src[0].chan = 0; - alu.last = (j == 3); - if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) - return r; - } + if (signed_op) { + alu.src[1].sel = tmp2; + alu.src[1].chan = 0; } else { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP2_MULHI_UINT; - - alu.dst.sel = tmp0; - alu.dst.chan = 2; - alu.dst.write = 1; - - alu.src[0].sel = tmp0; - alu.src[0].chan = 0; - - if (signed_op) { - alu.src[1].sel = tmp2; - alu.src[1].chan = 0; - } else { - r600_bytecode_src(&alu.src[1], &ctx->src[0], i); - } - - alu.last = 1; - if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) - return r; + r600_bytecode_src(&alu.src[1], &ctx->src[0], i); } - /* 11. tmp0.y = lo (src2 * tmp0.z) = src2*q = src1 - r */ - if (ctx->bc->chip_class == CAYMAN) { - for (j = 0 ; j < 4; j++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP2_MULLO_UINT; - - alu.dst.sel = tmp0; - alu.dst.chan = j; - alu.dst.write = (j == 1); + if ((r = emit_mul_int_op(ctx->bc, &alu))) + return r; - if (signed_op) { - alu.src[0].sel = tmp2; - alu.src[0].chan = 1; - } else { - r600_bytecode_src(&alu.src[0], &ctx->src[1], i); - } + /* 11. tmp0.y = lo (src2 * tmp0.z) = src2*q = src1 - r */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_MULLO_UINT; - alu.src[1].sel = tmp0; - alu.src[1].chan = 2; + alu.dst.sel = tmp0; + alu.dst.chan = 1; + alu.dst.write = 1; - alu.last = (j == 3); - if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) - return r; - } + if (signed_op) { + alu.src[0].sel = tmp2; + alu.src[0].chan = 1; } else { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP2_MULLO_UINT; - - alu.dst.sel = tmp0; - alu.dst.chan = 1; - alu.dst.write = 1; + r600_bytecode_src(&alu.src[0], &ctx->src[1], i); + } - if (signed_op) { - alu.src[0].sel = tmp2; - alu.src[0].chan = 1; - } else { - r600_bytecode_src(&alu.src[0], &ctx->src[1], i); - } - - alu.src[1].sel = tmp0; - alu.src[1].chan = 2; + alu.src[1].sel = tmp0; + alu.src[1].chan = 2; - alu.last = 1; - if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) - return r; - } + if ((r = emit_mul_int_op(ctx->bc, &alu))) + return r; /* 12. tmp0.w = src1 - tmp0.y = r */ memset(&alu, 0, sizeof(struct r600_bytecode_alu)); @@ -6075,7 +6414,25 @@ static int tgsi_bfi(struct r600_shader_ctx *ctx) unsigned write_mask = inst->Dst[0].Register.WriteMask; int last_inst = tgsi_last_instruction(write_mask); - t1 = ctx->temp_reg; + t1 = r600_get_temp(ctx); + + for (i = 0; i < 4; i++) { + if (!(write_mask & (1<src[3], i); + alu.src[1].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[1].value = 32; + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = i; + alu.dst.write = 1; + alu.last = i == last_inst; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } for (i = 0; i < 4; i++) { if (!(write_mask & (1<temp_reg; + alu.src[0].chan = i; + r600_bytecode_src(&alu.src[2], &ctx->src[1], i); + + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + + alu.src[1].sel = alu.dst.sel; + alu.src[1].chan = i; + + alu.last = i == last_inst; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } return 0; } @@ -6238,12 +6615,10 @@ static int tgsi_interp_egcm(struct r600_shader_ctx *ctx) struct r600_bytecode_alu alu; int r, i = 0, k, interp_gpr, interp_base_chan, tmp, lasti; unsigned location; - int input; + const int input = inst->Src[0].Register.Index + ctx->shader->nsys_inputs; assert(inst->Src[0].Register.File == TGSI_FILE_INPUT); - input = inst->Src[0].Register.Index; - /* Interpolators have been marked for use already by allocate_system_value_inputs */ if (inst->Instruction.Opcode == TGSI_OPCODE_INTERP_OFFSET || inst->Instruction.Opcode == TGSI_OPCODE_INTERP_SAMPLE) { @@ -6455,13 +6830,18 @@ static int tgsi_make_src_for_op3(struct r600_shader_ctx *ctx, return 0; } -static int tgsi_op3(struct r600_shader_ctx *ctx) +static int tgsi_op3_dst(struct r600_shader_ctx *ctx, int dst) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; struct r600_bytecode_alu alu; int i, j, r; int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); int temp_regs[4]; + unsigned op = ctx->inst_info->op; + + if (op == ALU_OP3_MULADD_IEEE && + ctx->info.properties[TGSI_PROPERTY_MUL_ZERO_WINS]) + op = ALU_OP3_MULADD; for (j = 0; j < inst->Instruction.NumSrcRegs; j++) { temp_regs[j] = 0; @@ -6473,14 +6853,18 @@ static int tgsi_op3(struct r600_shader_ctx *ctx) continue; memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ctx->inst_info->op; + alu.op = op; for (j = 0; j < inst->Instruction.NumSrcRegs; j++) { r = tgsi_make_src_for_op3(ctx, temp_regs[j], i, &alu.src[j], &ctx->src[j]); if (r) return r; } - tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + if (dst == -1) { + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + } else { + alu.dst.sel = dst; + } alu.dst.chan = i; alu.dst.write = 1; alu.is_op3 = 1; @@ -6494,15 +6878,24 @@ static int tgsi_op3(struct r600_shader_ctx *ctx) return 0; } +static int tgsi_op3(struct r600_shader_ctx *ctx) +{ + return tgsi_op3_dst(ctx, -1); +} + static int tgsi_dp(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; struct r600_bytecode_alu alu; int i, j, r; + unsigned op = ctx->inst_info->op; + if (op == ALU_OP2_DOT4_IEEE && + ctx->info.properties[TGSI_PROPERTY_MUL_ZERO_WINS]) + op = ALU_OP2_DOT4; for (i = 0; i < 4; i++) { memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ctx->inst_info->op; + alu.op = op; for (j = 0; j < inst->Instruction.NumSrcRegs; j++) { r600_bytecode_src(&alu.src[j], &ctx->src[j], i); } @@ -6524,13 +6917,6 @@ static int tgsi_dp(struct r600_shader_ctx *ctx) alu.src[0].chan = alu.src[1].chan = 0; } break; - case TGSI_OPCODE_DPH: - if (i == 3) { - alu.src[0].sel = V_SQ_ALU_SRC_1; - alu.src[0].chan = 0; - alu.src[0].neg = 0; - } - break; default: break; } @@ -6569,6 +6955,7 @@ static int do_vtx_fetch_inst(struct r600_shader_ctx *ctx, boolean src_requires_l struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; int src_gpr, r, i; int id = tgsi_tex_get_src_gpr(ctx, 1); + int sampler_index_mode = inst->Src[1].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE src_gpr = tgsi_tex_get_src_gpr(ctx, 0); if (src_requires_loading) { @@ -6600,6 +6987,7 @@ static int do_vtx_fetch_inst(struct r600_shader_ctx *ctx, boolean src_requires_l vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7; /* SEL_Z */ vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7; /* SEL_W */ vtx.use_const_fields = 1; + vtx.buffer_index_mode = sampler_index_mode; if ((r = r600_bytecode_add_vtx(ctx->bc, &vtx))) return r; @@ -6657,34 +7045,51 @@ static int do_vtx_fetch_inst(struct r600_shader_ctx *ctx, boolean src_requires_l return 0; } -static int r600_do_buffer_txq(struct r600_shader_ctx *ctx) +static int r600_do_buffer_txq(struct r600_shader_ctx *ctx, int reg_idx, int offset, int eg_buffer_base) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bytecode_alu alu; int r; - int id = tgsi_tex_get_src_gpr(ctx, 1); + int id = tgsi_tex_get_src_gpr(ctx, reg_idx) + offset; + int sampler_index_mode = inst->Src[reg_idx].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP1_MOV; - alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL; - if (ctx->bc->chip_class >= EVERGREEN) { - /* channel 0 or 2 of each word */ - alu.src[0].sel += (id / 2); - alu.src[0].chan = (id % 2) * 2; - } else { + if (ctx->bc->chip_class < EVERGREEN) { + struct r600_bytecode_alu alu; + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL; /* r600 we have them at channel 2 of the second dword */ alu.src[0].sel += (id * 2) + 1; alu.src[0].chan = 1; + alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER; + tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst); + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + return 0; + } else { + struct r600_bytecode_vtx vtx; + memset(&vtx, 0, sizeof(vtx)); + vtx.op = FETCH_OP_GET_BUFFER_RESINFO; + vtx.buffer_id = id + eg_buffer_base; + vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET; + vtx.src_gpr = 0; + vtx.mega_fetch_count = 16; /* no idea here really... */ + vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index; + vtx.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; /* SEL_X */ + vtx.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 4 : 7; /* SEL_Y */ + vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 4 : 7; /* SEL_Z */ + vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 4 : 7; /* SEL_W */ + vtx.data_format = FMT_32_32_32_32; + vtx.buffer_index_mode = sampler_index_mode; + + if ((r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx))) + return r; + return 0; } - alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER; - tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst); - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - return 0; } + static int tgsi_tex(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; @@ -6704,13 +7109,12 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) /* Texture fetch instructions can only use gprs as source. * Also they cannot negate the source or take the absolute value */ - const boolean src_requires_loading = (inst->Instruction.Opcode != TGSI_OPCODE_TXQ_LZ && - inst->Instruction.Opcode != TGSI_OPCODE_TXQS && + const boolean src_requires_loading = (inst->Instruction.Opcode != TGSI_OPCODE_TXQS && tgsi_tex_src_requires_loading(ctx, 0)) || read_compressed_msaa || txf_add_offsets; boolean src_loaded = FALSE; - unsigned sampler_src_reg = inst->Instruction.Opcode == TGSI_OPCODE_TXQ_LZ ? 0 : 1; + unsigned sampler_src_reg = 1; int8_t offset_x = 0, offset_y = 0, offset_z = 0; boolean has_txq_cube_array_z = false; unsigned sampler_index_mode; @@ -6739,8 +7143,9 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) if (inst->Texture.Texture == TGSI_TEXTURE_BUFFER) { if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ) { - ctx->shader->uses_tex_buffers = true; - return r600_do_buffer_txq(ctx); + if (ctx->bc->chip_class < EVERGREEN) + ctx->shader->uses_tex_buffers = true; + return r600_do_buffer_txq(ctx, 1, 0, R600_MAX_CONST_BUFFERS); } else if (inst->Instruction.Opcode == TGSI_OPCODE_TXF) { if (ctx->bc->chip_class < EVERGREEN) @@ -6818,8 +7223,7 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) inst->Texture.Texture == TGSI_TEXTURE_CUBE_ARRAY || inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE || inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) && - inst->Instruction.Opcode != TGSI_OPCODE_TXQ && - inst->Instruction.Opcode != TGSI_OPCODE_TXQ_LZ) { + inst->Instruction.Opcode != TGSI_OPCODE_TXQ) { static const unsigned src0_swizzle[] = {2, 2, 0, 1}; static const unsigned src1_swizzle[] = {1, 0, 2, 2}; @@ -6919,7 +7323,7 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; - /* write initial compare value into Z component + /* write initial compare value into Z component - W src 0 for shadow cube - X src 1 for shadow cube array */ if (inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE || @@ -6997,7 +7401,7 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; - + r = r600_bytecode_add_tex(ctx->bc, &tex); if (r) return r; @@ -7093,6 +7497,168 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) } } + if (inst->Instruction.Opcode == TGSI_OPCODE_TG4) { + /* Gather4 should follow the same rules as bilinear filtering, but the hardware + * incorrectly forces nearest filtering if the texture format is integer. + * The only effect it has on Gather4, which always returns 4 texels for + * bilinear filtering, is that the final coordinates are off by 0.5 of + * the texel size. + * + * The workaround is to subtract 0.5 from the unnormalized coordinates, + * or (0.5 / size) from the normalized coordinates. + */ + if (inst->Texture.ReturnType == TGSI_RETURN_TYPE_SINT || + inst->Texture.ReturnType == TGSI_RETURN_TYPE_UINT) { + int treg = r600_get_temp(ctx); + + /* mov array and comparison oordinate to temp_reg if needed */ + if ((inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D || + inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY || + inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY) && !src_loaded) { + int end = inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ? 3 : 2; + for (i = 2; i <= end; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = i; + alu.dst.write = 1; + alu.last = (i == end); + r600_bytecode_src(&alu.src[0], &ctx->src[0], i); + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + } + + if (inst->Texture.Texture == TGSI_TEXTURE_RECT || + inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT) { + for (i = 0; i < 2; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_ADD; + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = i; + alu.dst.write = 1; + alu.last = i == 1; + if (src_loaded) { + alu.src[0].sel = ctx->temp_reg; + alu.src[0].chan = i; + } else + r600_bytecode_src(&alu.src[0], &ctx->src[0], i); + alu.src[1].sel = V_SQ_ALU_SRC_0_5; + alu.src[1].neg = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + } else { + /* execute a TXQ */ + memset(&tex, 0, sizeof(struct r600_bytecode_tex)); + tex.op = FETCH_OP_GET_TEXTURE_RESINFO; + tex.sampler_id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg); + tex.sampler_index_mode = sampler_index_mode; + tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS; + tex.resource_index_mode = sampler_index_mode; + tex.dst_gpr = treg; + tex.src_sel_x = 4; + tex.src_sel_y = 4; + tex.src_sel_z = 4; + tex.src_sel_w = 4; + tex.dst_sel_x = 0; + tex.dst_sel_y = 1; + tex.dst_sel_z = 7; + tex.dst_sel_w = 7; + r = r600_bytecode_add_tex(ctx->bc, &tex); + if (r) + return r; + + /* coord.xy = -0.5 * (1.0/int_to_flt(size)) + coord.xy */ + if (ctx->bc->chip_class == CAYMAN) { + /* */ + for (i = 0; i < 2; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_INT_TO_FLT; + alu.dst.sel = treg; + alu.dst.chan = i; + alu.dst.write = 1; + alu.src[0].sel = treg; + alu.src[0].chan = i; + alu.last = (i == 1) ? 1 : 0; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + for (j = 0; j < 2; j++) { + for (i = 0; i < 3; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_RECIP_IEEE; + alu.src[0].sel = treg; + alu.src[0].chan = j; + alu.dst.sel = treg; + alu.dst.chan = i; + if (i == 2) + alu.last = 1; + if (i == j) + alu.dst.write = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + } + } else { + for (i = 0; i < 2; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_INT_TO_FLT; + alu.dst.sel = treg; + alu.dst.chan = i; + alu.dst.write = 1; + alu.src[0].sel = treg; + alu.src[0].chan = i; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + for (i = 0; i < 2; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_RECIP_IEEE; + alu.src[0].sel = treg; + alu.src[0].chan = i; + alu.dst.sel = treg; + alu.dst.chan = i; + alu.last = 1; + alu.dst.write = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + } + for (i = 0; i < 2; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP3_MULADD; + alu.is_op3 = 1; + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = i; + alu.dst.write = 1; + alu.last = i == 1; + alu.src[0].sel = treg; + alu.src[0].chan = i; + alu.src[1].sel = V_SQ_ALU_SRC_0_5; + alu.src[1].neg = 1; + if (src_loaded) { + alu.src[2].sel = ctx->temp_reg; + alu.src[2].chan = i; + } else + r600_bytecode_src(&alu.src[2], &ctx->src[0], i); + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + } + src_loaded = TRUE; + src_gpr = ctx->temp_reg; + } + } + if (src_requires_loading && !src_loaded) { for (i = 0; i < 4; i++) { memset(&alu, 0, sizeof(struct r600_bytecode_alu)); @@ -7240,38 +7806,18 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) return r; /* temp.x = sample_index*4 */ - if (ctx->bc->chip_class == CAYMAN) { - for (i = 0 ; i < 4; i++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP2_MULLO_INT; - alu.src[0].sel = src_gpr; - alu.src[0].chan = sample_chan; - alu.src[1].sel = V_SQ_ALU_SRC_LITERAL; - alu.src[1].value = 4; - alu.dst.sel = temp; - alu.dst.chan = i; - alu.dst.write = i == 0; - if (i == 3) - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - } else { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP2_MULLO_INT; - alu.src[0].sel = src_gpr; - alu.src[0].chan = sample_chan; - alu.src[1].sel = V_SQ_ALU_SRC_LITERAL; - alu.src[1].value = 4; - alu.dst.sel = temp; - alu.dst.chan = 0; - alu.dst.write = 1; - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_MULLO_INT; + alu.src[0].sel = src_gpr; + alu.src[0].chan = sample_chan; + alu.src[1].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[1].value = 4; + alu.dst.sel = temp; + alu.dst.chan = 0; + alu.dst.write = 1; + r = emit_mul_int_op(ctx->bc, &alu); + if (r) + return r; /* sample_index = temp.w >> temp.x */ memset(&alu, 0, sizeof(struct r600_bytecode_alu)); @@ -7324,15 +7870,15 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) /* does this shader want a num layers from TXQ for a cube array? */ if (has_txq_cube_array_z) { int id = tgsi_tex_get_src_gpr(ctx, sampler_src_reg); - + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); alu.op = ALU_OP1_MOV; alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL; if (ctx->bc->chip_class >= EVERGREEN) { - /* channel 1 or 3 of each word */ - alu.src[0].sel += (id / 2); - alu.src[0].chan = ((id % 2) * 2) + 1; + /* with eg each dword is number of cubes */ + alu.src[0].sel += id / 4; + alu.src[0].chan = id % 4; } else { /* r600 we have them at channel 2 of the second dword */ alu.src[0].sel += (id * 2) + 1; @@ -7430,15 +7976,15 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) tex.inst_mod = texture_component_select; if (ctx->bc->chip_class == CAYMAN) { - /* GATHER4 result order is different from TGSI TG4 */ - tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 0 : 7; - tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 4) ? 1 : 7; - tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 1) ? 2 : 7; + tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; + tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7; + tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7; tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7; } else { - tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7; - tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7; - tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; + /* GATHER4 result order is different from TGSI TG4 */ + tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 1 : 7; + tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 2 : 7; + tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 0 : 7; tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7; } } @@ -7462,8 +8008,7 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) } - if (inst->Instruction.Opcode == TGSI_OPCODE_TXQ_LZ || - inst->Instruction.Opcode == TGSI_OPCODE_TXQS) { + if (inst->Instruction.Opcode == TGSI_OPCODE_TXQS) { tex.src_sel_x = 4; tex.src_sel_y = 4; tex.src_sel_z = 4; @@ -7569,191 +8114,198 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) return 0; } -static int tgsi_lrp(struct r600_shader_ctx *ctx) +static int find_hw_atomic_counter(struct r600_shader_ctx *ctx, + struct tgsi_full_src_register *src) +{ + unsigned i; + + if (src->Register.Indirect) { + for (i = 0; i < ctx->shader->nhwatomic_ranges; i++) { + if (src->Indirect.ArrayID == ctx->shader->atomics[i].array_id) + return ctx->shader->atomics[i].hw_idx; + } + } else { + uint32_t index = src->Register.Index; + for (i = 0; i < ctx->shader->nhwatomic_ranges; i++) { + if (ctx->shader->atomics[i].buffer_id != (unsigned)src->Dimension.Index) + continue; + if (index > ctx->shader->atomics[i].end) + continue; + if (index < ctx->shader->atomics[i].start) + continue; + uint32_t offset = (index - ctx->shader->atomics[i].start); + return ctx->shader->atomics[i].hw_idx + offset; + } + } + assert(0); + return -1; +} + +static int tgsi_set_gds_temp(struct r600_shader_ctx *ctx, + int *uav_id_p, int *uav_index_mode_p) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bytecode_alu alu; - int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); - unsigned i, temp_regs[2]; + int uav_id, uav_index_mode = 0; int r; + bool is_cm = (ctx->bc->chip_class == CAYMAN); - /* optimize if it's just an equal balance */ - if (ctx->src[0].sel == V_SQ_ALU_SRC_0_5) { - for (i = 0; i < lasti + 1; i++) { - if (!(inst->Dst[0].Register.WriteMask & (1 << i))) - continue; + uav_id = find_hw_atomic_counter(ctx, &inst->Src[0]); + if (inst->Src[0].Register.Indirect) { + if (is_cm) { + struct r600_bytecode_alu alu; memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP2_ADD; - r600_bytecode_src(&alu.src[0], &ctx->src[1], i); - r600_bytecode_src(&alu.src[1], &ctx->src[2], i); - alu.omod = 3; - tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - alu.dst.chan = i; - if (i == lasti) { - alu.last = 1; - } + alu.op = ALU_OP2_LSHL_INT; + alu.src[0].sel = get_address_file_reg(ctx, inst->Src[0].Indirect.Index); + alu.src[0].chan = 0; + alu.src[1].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[1].value = 2; + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = 0; + alu.dst.write = 1; + alu.last = 1; r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; - } - return 0; - } - - /* 1 - src0 */ - for (i = 0; i < lasti + 1; i++) { - if (!(inst->Dst[0].Register.WriteMask & (1 << i))) - continue; - - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP2_ADD; - alu.src[0].sel = V_SQ_ALU_SRC_1; - alu.src[0].chan = 0; - r600_bytecode_src(&alu.src[1], &ctx->src[0], i); - r600_bytecode_src_toggle_neg(&alu.src[1]); - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = i; - if (i == lasti) { - alu.last = 1; - } - alu.dst.write = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - - /* (1 - src0) * src2 */ - for (i = 0; i < lasti + 1; i++) { - if (!(inst->Dst[0].Register.WriteMask & (1 << i))) - continue; - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP2_MUL; - alu.src[0].sel = ctx->temp_reg; - alu.src[0].chan = i; - r600_bytecode_src(&alu.src[1], &ctx->src[2], i); - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = i; - if (i == lasti) { - alu.last = 1; - } - alu.dst.write = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); + r = single_alu_op2(ctx, ALU_OP2_ADD_INT, + ctx->temp_reg, 0, + ctx->temp_reg, 0, + V_SQ_ALU_SRC_LITERAL, uav_id * 4); + if (r) + return r; + } else + uav_index_mode = 2; + } else if (is_cm) { + r = single_alu_op2(ctx, ALU_OP1_MOV, + ctx->temp_reg, 0, + V_SQ_ALU_SRC_LITERAL, uav_id * 4, + 0, 0); if (r) return r; } + *uav_id_p = uav_id; + *uav_index_mode_p = uav_index_mode; + return 0; +} - /* src0 * src1 + (1 - src0) * src2 */ - if (ctx->src[0].abs) - temp_regs[0] = r600_get_temp(ctx); - else - temp_regs[0] = 0; - if (ctx->src[1].abs) - temp_regs[1] = r600_get_temp(ctx); - else - temp_regs[1] = 0; +static int tgsi_load_gds(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + int r; + struct r600_bytecode_gds gds; + int uav_id = 0; + int uav_index_mode = 0; + bool is_cm = (ctx->bc->chip_class == CAYMAN); - for (i = 0; i < lasti + 1; i++) { - if (!(inst->Dst[0].Register.WriteMask & (1 << i))) - continue; + r = tgsi_set_gds_temp(ctx, &uav_id, &uav_index_mode); + if (r) + return r; - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP3_MULADD; - alu.is_op3 = 1; - r = tgsi_make_src_for_op3(ctx, temp_regs[0], i, &alu.src[0], &ctx->src[0]); - if (r) - return r; - r = tgsi_make_src_for_op3(ctx, temp_regs[1], i, &alu.src[1], &ctx->src[1]); - if (r) - return r; - alu.src[2].sel = ctx->temp_reg; - alu.src[2].chan = i; + memset(&gds, 0, sizeof(struct r600_bytecode_gds)); + gds.op = FETCH_OP_GDS_READ_RET; + gds.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index; + gds.uav_id = is_cm ? 0 : uav_id; + gds.uav_index_mode = is_cm ? 0 : uav_index_mode; + gds.src_gpr = ctx->temp_reg; + gds.src_sel_x = (is_cm) ? 0 : 4; + gds.src_sel_y = 4; + gds.src_sel_z = 4; + gds.dst_sel_x = 0; + gds.dst_sel_y = 7; + gds.dst_sel_z = 7; + gds.dst_sel_w = 7; + gds.src_gpr2 = 0; + gds.alloc_consume = !is_cm; + r = r600_bytecode_add_gds(ctx->bc, &gds); + if (r) + return r; - tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - alu.dst.chan = i; - if (i == lasti) { - alu.last = 1; - } - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } + ctx->bc->cf_last->vpm = 1; return 0; } -static int tgsi_cmp(struct r600_shader_ctx *ctx) +/* this fixes up 1D arrays properly */ +static int load_index_src(struct r600_shader_ctx *ctx, int src_index, int *idx_gpr) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + int r, i; struct r600_bytecode_alu alu; - int i, r, j; - int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); - int temp_regs[3]; - unsigned op; + int temp_reg = r600_get_temp(ctx); - if (ctx->src[0].abs && ctx->src[0].neg) { - op = ALU_OP3_CNDE; - ctx->src[0].abs = 0; - ctx->src[0].neg = 0; - } else { - op = ALU_OP3_CNDGE; - } + for (i = 0; i < 4; i++) { + bool def_val = true, write_zero = false; + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + alu.dst.sel = temp_reg; + alu.dst.chan = i; - for (j = 0; j < inst->Instruction.NumSrcRegs; j++) { - temp_regs[j] = 0; - if (ctx->src[j].abs) - temp_regs[j] = r600_get_temp(ctx); - } + switch (inst->Memory.Texture) { + case TGSI_TEXTURE_BUFFER: + case TGSI_TEXTURE_1D: + if (i == 1 || i == 2 || i == 3) { + write_zero = true; + } + break; + case TGSI_TEXTURE_1D_ARRAY: + if (i == 1 || i == 3) + write_zero = true; + else if (i == 2) { + r600_bytecode_src(&alu.src[0], &ctx->src[src_index], 1); + def_val = false; + } + break; + case TGSI_TEXTURE_2D: + if (i == 2 || i == 3) + write_zero = true; + break; + default: + if (i == 3) + write_zero = true; + break; + } - for (i = 0; i < lasti + 1; i++) { - if (!(inst->Dst[0].Register.WriteMask & (1 << i))) - continue; + if (write_zero) { + alu.src[0].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[0].value = 0; + } else if (def_val) { + r600_bytecode_src(&alu.src[0], &ctx->src[src_index], i); + } - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = op; - r = tgsi_make_src_for_op3(ctx, temp_regs[0], i, &alu.src[0], &ctx->src[0]); - if (r) - return r; - r = tgsi_make_src_for_op3(ctx, temp_regs[2], i, &alu.src[1], &ctx->src[2]); - if (r) - return r; - r = tgsi_make_src_for_op3(ctx, temp_regs[1], i, &alu.src[2], &ctx->src[1]); - if (r) - return r; - tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - alu.dst.chan = i; - alu.dst.write = 1; - alu.is_op3 = 1; - if (i == lasti) + if (i == 3) alu.last = 1; + alu.dst.write = 1; r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; } + *idx_gpr = temp_reg; return 0; } -static int tgsi_ucmp(struct r600_shader_ctx *ctx) +static int load_buffer_coord(struct r600_shader_ctx *ctx, int src_idx, + int temp_reg) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bytecode_alu alu; - int i, r; - int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); - - for (i = 0; i < lasti + 1; i++) { - if (!(inst->Dst[0].Register.WriteMask & (1 << i))) - continue; - + int r; + if (inst->Src[src_idx].Register.File == TGSI_FILE_IMMEDIATE) { + int value = (ctx->literals[4 * inst->Src[src_idx].Register.Index + inst->Src[src_idx].Register.SwizzleX]); + r = single_alu_op2(ctx, ALU_OP1_MOV, + temp_reg, 0, + V_SQ_ALU_SRC_LITERAL, value >> 2, + 0, 0); + if (r) + return r; + } else { + struct r600_bytecode_alu alu; memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP3_CNDE_INT; - r600_bytecode_src(&alu.src[0], &ctx->src[0], i); - r600_bytecode_src(&alu.src[1], &ctx->src[2], i); - r600_bytecode_src(&alu.src[2], &ctx->src[1], i); - tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - alu.dst.chan = i; + alu.op = ALU_OP2_LSHR_INT; + r600_bytecode_src(&alu.src[0], &ctx->src[src_idx], 0); + alu.src[1].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[1].value = 2; + alu.dst.sel = temp_reg; alu.dst.write = 1; - alu.is_op3 = 1; - if (i == lasti) - alu.last = 1; + alu.last = 1; r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; @@ -7761,615 +8313,910 @@ static int tgsi_ucmp(struct r600_shader_ctx *ctx) return 0; } -static int tgsi_xpd(struct r600_shader_ctx *ctx) +static int tgsi_load_buffer(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - static const unsigned int src0_swizzle[] = {2, 0, 1}; - static const unsigned int src1_swizzle[] = {1, 2, 0}; - struct r600_bytecode_alu alu; - uint32_t use_temp = 0; - int i, r; - - if (inst->Dst[0].Register.WriteMask != 0xf) - use_temp = 1; + /* have to work out the offset into the RAT immediate return buffer */ + struct r600_bytecode_vtx vtx; + struct r600_bytecode_cf *cf; + int r; + int temp_reg = r600_get_temp(ctx); + unsigned rat_index_mode; + unsigned base; - for (i = 0; i < 4; i++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP2_MUL; - if (i < 3) { - r600_bytecode_src(&alu.src[0], &ctx->src[0], src0_swizzle[i]); - r600_bytecode_src(&alu.src[1], &ctx->src[1], src1_swizzle[i]); - } else { - alu.src[0].sel = V_SQ_ALU_SRC_0; - alu.src[0].chan = i; - alu.src[1].sel = V_SQ_ALU_SRC_0; - alu.src[1].chan = i; - } + rat_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE + base = R600_IMAGE_REAL_RESOURCE_OFFSET + ctx->info.file_count[TGSI_FILE_IMAGE]; - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = i; - alu.dst.write = 1; + r = load_buffer_coord(ctx, 1, temp_reg); + if (r) + return r; + ctx->bc->cf_last->barrier = 1; + memset(&vtx, 0, sizeof(struct r600_bytecode_vtx)); + vtx.op = FETCH_OP_VFETCH; + vtx.buffer_id = inst->Src[0].Register.Index + base; + vtx.buffer_index_mode = rat_index_mode; + vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET; + vtx.src_gpr = temp_reg; + vtx.src_sel_x = 0; + vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index; + vtx.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; /* SEL_X */ + vtx.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7; /* SEL_Y */ + vtx.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7; /* SEL_Z */ + vtx.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7; /* SEL_W */ + vtx.num_format_all = 1; + vtx.format_comp_all = 1; + vtx.srf_mode_all = 0; - if (i == 3) - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; + if (inst->Dst[0].Register.WriteMask & 8) { + vtx.data_format = FMT_32_32_32_32; + vtx.use_const_fields = 0; + } else if (inst->Dst[0].Register.WriteMask & 4) { + vtx.data_format = FMT_32_32_32; + vtx.use_const_fields = 0; + } else if (inst->Dst[0].Register.WriteMask & 2) { + vtx.data_format = FMT_32_32; + vtx.use_const_fields = 0; + } else { + vtx.data_format = FMT_32; + vtx.use_const_fields = 0; } - for (i = 0; i < 4; i++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP3_MULADD; - - if (i < 3) { - r600_bytecode_src(&alu.src[0], &ctx->src[0], src1_swizzle[i]); - r600_bytecode_src(&alu.src[1], &ctx->src[1], src0_swizzle[i]); - } else { - alu.src[0].sel = V_SQ_ALU_SRC_0; - alu.src[0].chan = i; - alu.src[1].sel = V_SQ_ALU_SRC_0; - alu.src[1].chan = i; - } - - alu.src[2].sel = ctx->temp_reg; - alu.src[2].neg = 1; - alu.src[2].chan = i; - - if (use_temp) - alu.dst.sel = ctx->temp_reg; - else - tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - alu.dst.chan = i; - alu.dst.write = 1; - alu.is_op3 = 1; - if (i == 3) - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - if (use_temp) - return tgsi_helper_copy(ctx, inst); + r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx); + if (r) + return r; + cf = ctx->bc->cf_last; + cf->barrier = 1; return 0; } -static int tgsi_exp(struct r600_shader_ctx *ctx) +static int tgsi_load_rat(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bytecode_alu alu; + /* have to work out the offset into the RAT immediate return buffer */ + struct r600_bytecode_vtx vtx; + struct r600_bytecode_cf *cf; int r; - unsigned i; + int idx_gpr; + unsigned format, num_format, format_comp, endian; + const struct util_format_description *desc; + unsigned rat_index_mode; + unsigned immed_base; - /* result.x = 2^floor(src); */ - if (inst->Dst[0].Register.WriteMask & 1) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + r = load_thread_id_gpr(ctx); + if (r) + return r; - alu.op = ALU_OP1_FLOOR; - r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + rat_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = 0; - alu.dst.write = 1; - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; + immed_base = R600_IMAGE_IMMED_RESOURCE_OFFSET; + r = load_index_src(ctx, 1, &idx_gpr); + if (r) + return r; - if (ctx->bc->chip_class == CAYMAN) { - for (i = 0; i < 3; i++) { - alu.op = ALU_OP1_EXP_IEEE; - alu.src[0].sel = ctx->temp_reg; - alu.src[0].chan = 0; + if (rat_index_mode) + egcm_load_index_reg(ctx->bc, 1, false); + + r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT); + cf = ctx->bc->cf_last; + + cf->rat.id = ctx->shader->rat_base + inst->Src[0].Register.Index; + cf->rat.inst = V_RAT_INST_NOP_RTN; + cf->rat.index_mode = rat_index_mode; + cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_READ_IND; + cf->output.gpr = ctx->thread_id_gpr; + cf->output.index_gpr = idx_gpr; + cf->output.comp_mask = 0xf; + cf->output.burst_count = 1; + cf->vpm = 1; + cf->barrier = 1; + cf->mark = 1; + cf->output.elem_size = 0; + + r600_bytecode_add_cfinst(ctx->bc, CF_OP_WAIT_ACK); + cf = ctx->bc->cf_last; + cf->barrier = 1; + + desc = util_format_description(inst->Memory.Format); + r600_vertex_data_type(inst->Memory.Format, + &format, &num_format, &format_comp, &endian); + memset(&vtx, 0, sizeof(struct r600_bytecode_vtx)); + vtx.op = FETCH_OP_VFETCH; + vtx.buffer_id = immed_base + inst->Src[0].Register.Index; + vtx.buffer_index_mode = rat_index_mode; + vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET; + vtx.src_gpr = ctx->thread_id_gpr; + vtx.src_sel_x = 1; + vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index; + vtx.dst_sel_x = desc->swizzle[0]; + vtx.dst_sel_y = desc->swizzle[1]; + vtx.dst_sel_z = desc->swizzle[2]; + vtx.dst_sel_w = desc->swizzle[3]; + vtx.srf_mode_all = 1; + vtx.data_format = format; + vtx.num_format_all = num_format; + vtx.format_comp_all = format_comp; + vtx.endian = endian; + vtx.offset = 0; + vtx.mega_fetch_count = 3; + r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx); + if (r) + return r; + cf = ctx->bc->cf_last; + cf->barrier = 1; + return 0; +} - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = i; - alu.dst.write = i == 0; - alu.last = i == 2; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - } else { - alu.op = ALU_OP1_EXP_IEEE; - alu.src[0].sel = ctx->temp_reg; - alu.src[0].chan = 0; +static int tgsi_load_lds(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bytecode_alu alu; + int r; + int temp_reg = r600_get_temp(ctx); + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + r600_bytecode_src(&alu.src[0], &ctx->src[1], 0); + alu.dst.sel = temp_reg; + alu.dst.write = 1; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + r = do_lds_fetch_values(ctx, temp_reg, + ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index, inst->Dst[0].Register.WriteMask); + if (r) + return r; + return 0; +} - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = 0; - alu.dst.write = 1; - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - } +static int tgsi_load(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) + return tgsi_load_rat(ctx); + if (inst->Src[0].Register.File == TGSI_FILE_HW_ATOMIC) + return tgsi_load_gds(ctx); + if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) + return tgsi_load_buffer(ctx); + if (inst->Src[0].Register.File == TGSI_FILE_MEMORY) + return tgsi_load_lds(ctx); + return 0; +} - /* result.y = tmp - floor(tmp); */ - if ((inst->Dst[0].Register.WriteMask >> 1) & 1) { +static int tgsi_store_buffer_rat(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bytecode_cf *cf; + int r, i; + unsigned rat_index_mode; + int lasti; + int temp_reg = r600_get_temp(ctx), treg2 = r600_get_temp(ctx); + + r = load_buffer_coord(ctx, 0, treg2); + if (r) + return r; + + rat_index_mode = inst->Dst[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE + if (rat_index_mode) + egcm_load_index_reg(ctx->bc, 1, false); + + for (i = 0; i <= 3; i++) { + struct r600_bytecode_alu alu; memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + alu.dst.sel = temp_reg; + alu.dst.chan = i; + alu.src[0].sel = V_SQ_ALU_SRC_0; + alu.last = (i == 3); + alu.dst.write = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } - alu.op = ALU_OP1_FRACT; - r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); + for (i = 0; i <= lasti; i++) { + struct r600_bytecode_alu alu; + if (!((1 << i) & inst->Dst[0].Register.WriteMask)) + continue; - alu.dst.sel = ctx->temp_reg; -#if 0 - r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + r = single_alu_op2(ctx, ALU_OP2_ADD_INT, + temp_reg, 0, + treg2, 0, + V_SQ_ALU_SRC_LITERAL, i); if (r) return r; -#endif - alu.dst.write = 1; - alu.dst.chan = 1; - alu.last = 1; + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = 0; + r600_bytecode_src(&alu.src[0], &ctx->src[1], i); + alu.last = 1; + alu.dst.write = 1; r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; + + r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT); + cf = ctx->bc->cf_last; + + cf->rat.id = ctx->shader->rat_base + inst->Dst[0].Register.Index + ctx->info.file_count[TGSI_FILE_IMAGE]; + cf->rat.inst = V_RAT_INST_STORE_TYPED; + cf->rat.index_mode = rat_index_mode; + cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND; + cf->output.gpr = ctx->temp_reg; + cf->output.index_gpr = temp_reg; + cf->output.comp_mask = 1; + cf->output.burst_count = 1; + cf->vpm = 1; + cf->barrier = 1; + cf->output.elem_size = 0; } + return 0; +} - /* result.z = RoughApprox2ToX(tmp);*/ - if ((inst->Dst[0].Register.WriteMask >> 2) & 0x1) { - if (ctx->bc->chip_class == CAYMAN) { - for (i = 0; i < 3; i++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP1_EXP_IEEE; - r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); +static int tgsi_store_rat(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bytecode_cf *cf; + bool src_requires_loading = false; + int val_gpr, idx_gpr; + int r, i; + unsigned rat_index_mode; - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = i; - if (i == 2) { - alu.dst.write = 1; - alu.last = 1; - } + rat_index_mode = inst->Dst[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - } else { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP1_EXP_IEEE; - r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + r = load_index_src(ctx, 0, &idx_gpr); + if (r) + return r; - alu.dst.sel = ctx->temp_reg; - alu.dst.write = 1; - alu.dst.chan = 2; + if (inst->Src[1].Register.File != TGSI_FILE_TEMPORARY) + src_requires_loading = true; - alu.last = 1; + if (src_requires_loading) { + struct r600_bytecode_alu alu; + for (i = 0; i < 4; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = i; + r600_bytecode_src(&alu.src[0], &ctx->src[1], i); + if (i == 3) + alu.last = 1; + alu.dst.write = 1; r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; } - } - - /* result.w = 1.0;*/ - if ((inst->Dst[0].Register.WriteMask >> 3) & 0x1) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - - alu.op = ALU_OP1_MOV; - alu.src[0].sel = V_SQ_ALU_SRC_1; - alu.src[0].chan = 0; - - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = 3; - alu.dst.write = 1; - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - return tgsi_helper_copy(ctx, inst); + val_gpr = ctx->temp_reg; + } else + val_gpr = tgsi_tex_get_src_gpr(ctx, 1); + if (rat_index_mode) + egcm_load_index_reg(ctx->bc, 1, false); + + r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT); + cf = ctx->bc->cf_last; + + cf->rat.id = ctx->shader->rat_base + inst->Dst[0].Register.Index; + cf->rat.inst = V_RAT_INST_STORE_TYPED; + cf->rat.index_mode = rat_index_mode; + cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_WRITE_IND; + cf->output.gpr = val_gpr; + cf->output.index_gpr = idx_gpr; + cf->output.comp_mask = 0xf; + cf->output.burst_count = 1; + cf->vpm = 1; + cf->barrier = 1; + cf->output.elem_size = 0; + return 0; } -static int tgsi_log(struct r600_shader_ctx *ctx) +static int tgsi_store_lds(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; struct r600_bytecode_alu alu; - int r; - unsigned i; + int r, i, lasti; + int write_mask = inst->Dst[0].Register.WriteMask; + int temp_reg = r600_get_temp(ctx); - /* result.x = floor(log2(|src|)); */ - if (inst->Dst[0].Register.WriteMask & 1) { - if (ctx->bc->chip_class == CAYMAN) { - for (i = 0; i < 3; i++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + /* LDS write */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + alu.dst.sel = temp_reg; + alu.dst.write = 1; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; - alu.op = ALU_OP1_LOG_IEEE; - r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); - r600_bytecode_src_set_abs(&alu.src[0]); - - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = i; - if (i == 0) - alu.dst.write = 1; - if (i == 2) - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } + lasti = tgsi_last_instruction(write_mask); + for (i = 1; i <= lasti; i++) { + if (!(write_mask & (1 << i))) + continue; + r = single_alu_op2(ctx, ALU_OP2_ADD_INT, + temp_reg, i, + temp_reg, 0, + V_SQ_ALU_SRC_LITERAL, 4 * i); + if (r) + return r; + } + for (i = 0; i <= lasti; i++) { + if (!(write_mask & (1 << i))) + continue; - } else { + if ((i == 0 && ((write_mask & 3) == 3)) || + (i == 2 && ((write_mask & 0xc) == 0xc))) { memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = LDS_OP3_LDS_WRITE_REL; - alu.op = ALU_OP1_LOG_IEEE; - r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); - r600_bytecode_src_set_abs(&alu.src[0]); - - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = 0; - alu.dst.write = 1; + alu.src[0].sel = temp_reg; + alu.src[0].chan = i; + r600_bytecode_src(&alu.src[1], &ctx->src[1], i); + r600_bytecode_src(&alu.src[2], &ctx->src[1], i + 1); alu.last = 1; + alu.is_lds_idx_op = true; + alu.lds_idx = 1; r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; + i += 1; + continue; } + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = LDS_OP2_LDS_WRITE; - alu.op = ALU_OP1_FLOOR; - alu.src[0].sel = ctx->temp_reg; - alu.src[0].chan = 0; + alu.src[0].sel = temp_reg; + alu.src[0].chan = i; + r600_bytecode_src(&alu.src[1], &ctx->src[1], i); - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = 0; - alu.dst.write = 1; alu.last = 1; + alu.is_lds_idx_op = true; r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; } + return 0; +} - /* result.y = |src.x| / (2 ^ floor(log2(|src.x|))); */ - if ((inst->Dst[0].Register.WriteMask >> 1) & 1) { +static int tgsi_store(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + if (inst->Dst[0].Register.File == TGSI_FILE_BUFFER) + return tgsi_store_buffer_rat(ctx); + else if (inst->Dst[0].Register.File == TGSI_FILE_MEMORY) + return tgsi_store_lds(ctx); + else + return tgsi_store_rat(ctx); +} - if (ctx->bc->chip_class == CAYMAN) { - for (i = 0; i < 3; i++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); +static int tgsi_atomic_op_rat(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + /* have to work out the offset into the RAT immediate return buffer */ + struct r600_bytecode_alu alu; + struct r600_bytecode_vtx vtx; + struct r600_bytecode_cf *cf; + int r; + int idx_gpr; + unsigned format, num_format, format_comp, endian; + const struct util_format_description *desc; + unsigned rat_index_mode; + unsigned immed_base; + unsigned rat_base; - alu.op = ALU_OP1_LOG_IEEE; - r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); - r600_bytecode_src_set_abs(&alu.src[0]); + immed_base = R600_IMAGE_IMMED_RESOURCE_OFFSET; + rat_base = ctx->shader->rat_base; - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = i; - if (i == 1) - alu.dst.write = 1; - if (i == 2) - alu.last = 1; - - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - } else { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + r = load_thread_id_gpr(ctx); + if (r) + return r; - alu.op = ALU_OP1_LOG_IEEE; - r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); - r600_bytecode_src_set_abs(&alu.src[0]); + if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) { + immed_base += ctx->info.file_count[TGSI_FILE_IMAGE]; + rat_base += ctx->info.file_count[TGSI_FILE_IMAGE]; - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = 1; - alu.dst.write = 1; - alu.last = 1; + r = load_buffer_coord(ctx, 1, ctx->temp_reg); + if (r) + return r; + idx_gpr = ctx->temp_reg; + } else { + r = load_index_src(ctx, 1, &idx_gpr); + if (r) + return r; + } - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } + rat_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE + if (ctx->inst_info->op == V_RAT_INST_CMPXCHG_INT_RTN) { memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - - alu.op = ALU_OP1_FLOOR; - alu.src[0].sel = ctx->temp_reg; - alu.src[0].chan = 1; - - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = 1; + alu.op = ALU_OP1_MOV; + alu.dst.sel = ctx->thread_id_gpr; + alu.dst.chan = 0; alu.dst.write = 1; + r600_bytecode_src(&alu.src[0], &ctx->src[3], 0); alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + alu.dst.sel = ctx->thread_id_gpr; + if (ctx->bc->chip_class == CAYMAN) + alu.dst.chan = 2; + else + alu.dst.chan = 3; + alu.dst.write = 1; + r600_bytecode_src(&alu.src[0], &ctx->src[2], 0); + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } else { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + alu.dst.sel = ctx->thread_id_gpr; + alu.dst.chan = 0; + alu.dst.write = 1; + r600_bytecode_src(&alu.src[0], &ctx->src[2], 0); + alu.last = 1; r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; + } - if (ctx->bc->chip_class == CAYMAN) { - for (i = 0; i < 3; i++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP1_EXP_IEEE; - alu.src[0].sel = ctx->temp_reg; - alu.src[0].chan = 1; + if (rat_index_mode) + egcm_load_index_reg(ctx->bc, 1, false); + r600_bytecode_add_cfinst(ctx->bc, CF_OP_MEM_RAT); + cf = ctx->bc->cf_last; + + cf->rat.id = rat_base + inst->Src[0].Register.Index; + cf->rat.inst = ctx->inst_info->op; + cf->rat.index_mode = rat_index_mode; + cf->output.type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_READ_IND; + cf->output.gpr = ctx->thread_id_gpr; + cf->output.index_gpr = idx_gpr; + cf->output.comp_mask = 0xf; + cf->output.burst_count = 1; + cf->vpm = 1; + cf->barrier = 1; + cf->mark = 1; + cf->output.elem_size = 0; + r600_bytecode_add_cfinst(ctx->bc, CF_OP_WAIT_ACK); + cf = ctx->bc->cf_last; + cf->barrier = 1; + cf->cf_addr = 1; - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = i; - if (i == 1) - alu.dst.write = 1; - if (i == 2) - alu.last = 1; + memset(&vtx, 0, sizeof(struct r600_bytecode_vtx)); + if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) { + desc = util_format_description(inst->Memory.Format); + r600_vertex_data_type(inst->Memory.Format, + &format, &num_format, &format_comp, &endian); + vtx.dst_sel_x = desc->swizzle[0]; + } else { + format = FMT_32; + num_format = 1; + format_comp = 0; + endian = 0; + vtx.dst_sel_x = 0; + } + vtx.op = FETCH_OP_VFETCH; + vtx.buffer_id = immed_base + inst->Src[0].Register.Index; + vtx.buffer_index_mode = rat_index_mode; + vtx.fetch_type = SQ_VTX_FETCH_NO_INDEX_OFFSET; + vtx.src_gpr = ctx->thread_id_gpr; + vtx.src_sel_x = 1; + vtx.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index; + vtx.dst_sel_y = 7; + vtx.dst_sel_z = 7; + vtx.dst_sel_w = 7; + vtx.use_const_fields = 0; + vtx.srf_mode_all = 1; + vtx.data_format = format; + vtx.num_format_all = num_format; + vtx.format_comp_all = format_comp; + vtx.endian = endian; + vtx.offset = 0; + vtx.mega_fetch_count = 0xf; + r = r600_bytecode_add_vtx_tc(ctx->bc, &vtx); + if (r) + return r; + cf = ctx->bc->cf_last; + cf->vpm = 1; + cf->barrier = 1; + return 0; +} - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - } else { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP1_EXP_IEEE; - alu.src[0].sel = ctx->temp_reg; - alu.src[0].chan = 1; +static int get_gds_op(int opcode) +{ + switch (opcode) { + case TGSI_OPCODE_ATOMUADD: + return FETCH_OP_GDS_ADD_RET; + case TGSI_OPCODE_ATOMAND: + return FETCH_OP_GDS_AND_RET; + case TGSI_OPCODE_ATOMOR: + return FETCH_OP_GDS_OR_RET; + case TGSI_OPCODE_ATOMXOR: + return FETCH_OP_GDS_XOR_RET; + case TGSI_OPCODE_ATOMUMIN: + return FETCH_OP_GDS_MIN_UINT_RET; + case TGSI_OPCODE_ATOMUMAX: + return FETCH_OP_GDS_MAX_UINT_RET; + case TGSI_OPCODE_ATOMXCHG: + return FETCH_OP_GDS_XCHG_RET; + case TGSI_OPCODE_ATOMCAS: + return FETCH_OP_GDS_CMP_XCHG_RET; + default: + return -1; + } +} - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = 1; - alu.dst.write = 1; - alu.last = 1; +static int tgsi_atomic_op_gds(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bytecode_gds gds; + struct r600_bytecode_alu alu; + int gds_op = get_gds_op(inst->Instruction.Opcode); + int r; + int uav_id = 0; + int uav_index_mode = 0; + bool is_cm = (ctx->bc->chip_class == CAYMAN); - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } + if (gds_op == -1) { + fprintf(stderr, "unknown GDS op for opcode %d\n", inst->Instruction.Opcode); + return -1; + } - if (ctx->bc->chip_class == CAYMAN) { - for (i = 0; i < 3; i++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP1_RECIP_IEEE; - alu.src[0].sel = ctx->temp_reg; - alu.src[0].chan = 1; + r = tgsi_set_gds_temp(ctx, &uav_id, &uav_index_mode); + if (r) + return r; - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = i; - if (i == 1) - alu.dst.write = 1; - if (i == 2) - alu.last = 1; - - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - } else { + if (gds_op == FETCH_OP_GDS_CMP_XCHG_RET) { + if (inst->Src[3].Register.File == TGSI_FILE_IMMEDIATE) { + int value = (ctx->literals[4 * inst->Src[3].Register.Index + inst->Src[3].Register.SwizzleX]); memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP1_RECIP_IEEE; - alu.src[0].sel = ctx->temp_reg; - alu.src[0].chan = 1; - + alu.op = ALU_OP1_MOV; alu.dst.sel = ctx->temp_reg; - alu.dst.chan = 1; - alu.dst.write = 1; + alu.dst.chan = is_cm ? 2 : 1; + alu.src[0].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[0].value = value; alu.last = 1; - + alu.dst.write = 1; r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; - } - - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - - alu.op = ALU_OP2_MUL; - - r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); - r600_bytecode_src_set_abs(&alu.src[0]); - - alu.src[1].sel = ctx->temp_reg; - alu.src[1].chan = 1; - - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = 1; - alu.dst.write = 1; - alu.last = 1; - - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - - /* result.z = log2(|src|);*/ - if ((inst->Dst[0].Register.WriteMask >> 2) & 1) { - if (ctx->bc->chip_class == CAYMAN) { - for (i = 0; i < 3; i++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - - alu.op = ALU_OP1_LOG_IEEE; - r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); - r600_bytecode_src_set_abs(&alu.src[0]); - - alu.dst.sel = ctx->temp_reg; - if (i == 2) - alu.dst.write = 1; - alu.dst.chan = i; - if (i == 2) - alu.last = 1; - - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } } else { memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - - alu.op = ALU_OP1_LOG_IEEE; - r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); - r600_bytecode_src_set_abs(&alu.src[0]); - + alu.op = ALU_OP1_MOV; alu.dst.sel = ctx->temp_reg; - alu.dst.write = 1; - alu.dst.chan = 2; + alu.dst.chan = is_cm ? 2 : 1; + r600_bytecode_src(&alu.src[0], &ctx->src[3], 0); alu.last = 1; - + alu.dst.write = 1; r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; } } - - /* result.w = 1.0; */ - if ((inst->Dst[0].Register.WriteMask >> 3) & 1) { + if (inst->Src[2].Register.File == TGSI_FILE_IMMEDIATE) { + int value = (ctx->literals[4 * inst->Src[2].Register.Index + inst->Src[2].Register.SwizzleX]); + int abs_value = abs(value); + if (abs_value != value && gds_op == FETCH_OP_GDS_ADD_RET) + gds_op = FETCH_OP_GDS_SUB_RET; memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP1_MOV; - alu.src[0].sel = V_SQ_ALU_SRC_1; - alu.src[0].chan = 0; - alu.dst.sel = ctx->temp_reg; - alu.dst.chan = 3; + alu.dst.chan = is_cm ? 1 : 0; + alu.src[0].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[0].value = abs_value; + alu.last = 1; alu.dst.write = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } else { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = is_cm ? 1 : 0; + r600_bytecode_src(&alu.src[0], &ctx->src[2], 0); alu.last = 1; - + alu.dst.write = 1; r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; } - return tgsi_helper_copy(ctx, inst); + + memset(&gds, 0, sizeof(struct r600_bytecode_gds)); + gds.op = gds_op; + gds.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index; + gds.uav_id = is_cm ? 0 : uav_id; + gds.uav_index_mode = is_cm ? 0 : uav_index_mode; + gds.src_gpr = ctx->temp_reg; + gds.src_gpr2 = 0; + gds.src_sel_x = is_cm ? 0 : 4; + gds.src_sel_y = is_cm ? 1 : 0; + if (gds_op == FETCH_OP_GDS_CMP_XCHG_RET) + gds.src_sel_z = is_cm ? 2 : 1; + else + gds.src_sel_z = 7; + gds.dst_sel_x = 0; + gds.dst_sel_y = 7; + gds.dst_sel_z = 7; + gds.dst_sel_w = 7; + gds.alloc_consume = !is_cm; + + r = r600_bytecode_add_gds(ctx->bc, &gds); + if (r) + return r; + ctx->bc->cf_last->vpm = 1; + return 0; } -static int tgsi_eg_arl(struct r600_shader_ctx *ctx) +static int get_lds_op(int opcode) +{ + switch (opcode) { + case TGSI_OPCODE_ATOMUADD: + return LDS_OP2_LDS_ADD_RET; + case TGSI_OPCODE_ATOMAND: + return LDS_OP2_LDS_AND_RET; + case TGSI_OPCODE_ATOMOR: + return LDS_OP2_LDS_OR_RET; + case TGSI_OPCODE_ATOMXOR: + return LDS_OP2_LDS_XOR_RET; + case TGSI_OPCODE_ATOMUMIN: + return LDS_OP2_LDS_MIN_UINT_RET; + case TGSI_OPCODE_ATOMUMAX: + return LDS_OP2_LDS_MAX_UINT_RET; + case TGSI_OPCODE_ATOMIMIN: + return LDS_OP2_LDS_MIN_INT_RET; + case TGSI_OPCODE_ATOMIMAX: + return LDS_OP2_LDS_MAX_INT_RET; + case TGSI_OPCODE_ATOMXCHG: + return LDS_OP2_LDS_XCHG_RET; + case TGSI_OPCODE_ATOMCAS: + return LDS_OP3_LDS_CMP_XCHG_RET; + default: + return -1; + } +} + +static int tgsi_atomic_op_lds(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bytecode_alu alu; + int lds_op = get_lds_op(inst->Instruction.Opcode); int r; - int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); - unsigned reg = get_address_file_reg(ctx, inst->Dst[0].Register.Index); - assert(inst->Dst[0].Register.Index < 3); + struct r600_bytecode_alu alu; memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = lds_op; + alu.is_lds_idx_op = true; + alu.last = 1; + r600_bytecode_src(&alu.src[0], &ctx->src[1], 0); + r600_bytecode_src(&alu.src[1], &ctx->src[2], 0); + if (lds_op == LDS_OP3_LDS_CMP_XCHG_RET) + r600_bytecode_src(&alu.src[2], &ctx->src[3], 0); + else + alu.src[2].sel = V_SQ_ALU_SRC_0; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; - switch (inst->Instruction.Opcode) { - case TGSI_OPCODE_ARL: - alu.op = ALU_OP1_FLT_TO_INT_FLOOR; - break; - case TGSI_OPCODE_ARR: - alu.op = ALU_OP1_FLT_TO_INT; - break; - case TGSI_OPCODE_UARL: - alu.op = ALU_OP1_MOV; - break; - default: - assert(0); - return -1; + /* then read from LDS_OQ_A_POP */ + memset(&alu, 0, sizeof(alu)); + + alu.op = ALU_OP1_MOV; + alu.src[0].sel = EG_V_SQ_ALU_SRC_LDS_OQ_A_POP; + alu.src[0].chan = 0; + tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst); + alu.dst.write = 1; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + return 0; +} + +static int tgsi_atomic_op(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + if (inst->Src[0].Register.File == TGSI_FILE_IMAGE) + return tgsi_atomic_op_rat(ctx); + if (inst->Src[0].Register.File == TGSI_FILE_HW_ATOMIC) + return tgsi_atomic_op_gds(ctx); + if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) + return tgsi_atomic_op_rat(ctx); + if (inst->Src[0].Register.File == TGSI_FILE_MEMORY) + return tgsi_atomic_op_lds(ctx); + return 0; +} + +static int tgsi_resq(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + unsigned sampler_index_mode; + struct r600_bytecode_tex tex; + int r; + boolean has_txq_cube_array_z = false; + + if (inst->Src[0].Register.File == TGSI_FILE_BUFFER || + (inst->Src[0].Register.File == TGSI_FILE_IMAGE && inst->Memory.Texture == TGSI_TEXTURE_BUFFER)) { + if (ctx->bc->chip_class < EVERGREEN) + ctx->shader->uses_tex_buffers = true; + unsigned eg_buffer_base = 0; + eg_buffer_base = R600_IMAGE_REAL_RESOURCE_OFFSET; + if (inst->Src[0].Register.File == TGSI_FILE_BUFFER) + eg_buffer_base += ctx->info.file_count[TGSI_FILE_IMAGE]; + return r600_do_buffer_txq(ctx, 0, ctx->shader->image_size_const_offset, eg_buffer_base); } - for (i = 0; i <= lasti; ++i) { - if (!(inst->Dst[0].Register.WriteMask & (1 << i))) - continue; - r600_bytecode_src(&alu.src[0], &ctx->src[0], i); - alu.last = i == lasti; - alu.dst.sel = reg; - alu.dst.chan = i; - alu.dst.write = 1; + if (inst->Memory.Texture == TGSI_TEXTURE_CUBE_ARRAY && + inst->Dst[0].Register.WriteMask & 4) { + ctx->shader->has_txq_cube_array_z_comp = true; + has_txq_cube_array_z = true; + } + + sampler_index_mode = inst->Src[0].Indirect.Index == 2 ? 2 : 0; // CF_INDEX_1 : CF_INDEX_NONE + if (sampler_index_mode) + egcm_load_index_reg(ctx->bc, 1, false); + + + /* does this shader want a num layers from TXQ for a cube array? */ + if (has_txq_cube_array_z) { + int id = tgsi_tex_get_src_gpr(ctx, 0) + ctx->shader->image_size_const_offset; + struct r600_bytecode_alu alu; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + + alu.src[0].sel = R600_SHADER_BUFFER_INFO_SEL; + /* with eg each dword is either number of cubes */ + alu.src[0].sel += id / 4; + alu.src[0].chan = id % 4; + alu.src[0].kc_bank = R600_BUFFER_INFO_CONST_BUFFER; + tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst); + alu.last = 1; r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; + /* disable writemask from texture instruction */ + inst->Dst[0].Register.WriteMask &= ~4; } - - if (inst->Dst[0].Register.Index > 0) - ctx->bc->index_loaded[inst->Dst[0].Register.Index - 1] = 0; - else - ctx->bc->ar_loaded = 0; + memset(&tex, 0, sizeof(struct r600_bytecode_tex)); + tex.op = ctx->inst_info->op; + tex.sampler_id = R600_IMAGE_REAL_RESOURCE_OFFSET + inst->Src[0].Register.Index; + tex.sampler_index_mode = sampler_index_mode; + tex.resource_id = tex.sampler_id; + tex.resource_index_mode = sampler_index_mode; + tex.src_sel_x = 4; + tex.src_sel_y = 4; + tex.src_sel_z = 4; + tex.src_sel_w = 4; + tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; + tex.dst_sel_y = (inst->Dst[0].Register.WriteMask & 2) ? 1 : 7; + tex.dst_sel_z = (inst->Dst[0].Register.WriteMask & 4) ? 2 : 7; + tex.dst_sel_w = (inst->Dst[0].Register.WriteMask & 8) ? 3 : 7; + tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index; + r = r600_bytecode_add_tex(ctx->bc, &tex); + if (r) + return r; return 0; } -static int tgsi_r600_arl(struct r600_shader_ctx *ctx) + +static int tgsi_lrp(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; struct r600_bytecode_alu alu; + unsigned lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); + unsigned i, temp_regs[2]; int r; - int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); - switch (inst->Instruction.Opcode) { - case TGSI_OPCODE_ARL: - memset(&alu, 0, sizeof(alu)); - alu.op = ALU_OP1_FLOOR; - alu.dst.sel = ctx->bc->ar_reg; - alu.dst.write = 1; - for (i = 0; i <= lasti; ++i) { - if (inst->Dst[0].Register.WriteMask & (1 << i)) { - alu.dst.chan = i; - r600_bytecode_src(&alu.src[0], &ctx->src[0], i); - alu.last = i == lasti; - if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) - return r; - } - } + /* optimize if it's just an equal balance */ + if (ctx->src[0].sel == V_SQ_ALU_SRC_0_5) { + for (i = 0; i < lasti + 1; i++) { + if (!(inst->Dst[0].Register.WriteMask & (1 << i))) + continue; - memset(&alu, 0, sizeof(alu)); - alu.op = ALU_OP1_FLT_TO_INT; - alu.src[0].sel = ctx->bc->ar_reg; - alu.dst.sel = ctx->bc->ar_reg; - alu.dst.write = 1; - /* FLT_TO_INT is trans-only on r600/r700 */ - alu.last = TRUE; - for (i = 0; i <= lasti; ++i) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_ADD; + r600_bytecode_src(&alu.src[0], &ctx->src[1], i); + r600_bytecode_src(&alu.src[1], &ctx->src[2], i); + alu.omod = 3; + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); alu.dst.chan = i; - alu.src[0].chan = i; - if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + if (i == lasti) { + alu.last = 1; + } + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) return r; } - break; - case TGSI_OPCODE_ARR: - memset(&alu, 0, sizeof(alu)); - alu.op = ALU_OP1_FLT_TO_INT; - alu.dst.sel = ctx->bc->ar_reg; - alu.dst.write = 1; - /* FLT_TO_INT is trans-only on r600/r700 */ - alu.last = TRUE; - for (i = 0; i <= lasti; ++i) { - if (inst->Dst[0].Register.WriteMask & (1 << i)) { - alu.dst.chan = i; - r600_bytecode_src(&alu.src[0], &ctx->src[0], i); - if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) - return r; - } + return 0; + } + + /* 1 - src0 */ + for (i = 0; i < lasti + 1; i++) { + if (!(inst->Dst[0].Register.WriteMask & (1 << i))) + continue; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_ADD; + alu.src[0].sel = V_SQ_ALU_SRC_1; + alu.src[0].chan = 0; + r600_bytecode_src(&alu.src[1], &ctx->src[0], i); + r600_bytecode_src_toggle_neg(&alu.src[1]); + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = i; + if (i == lasti) { + alu.last = 1; } - break; - case TGSI_OPCODE_UARL: - memset(&alu, 0, sizeof(alu)); - alu.op = ALU_OP1_MOV; - alu.dst.sel = ctx->bc->ar_reg; alu.dst.write = 1; - for (i = 0; i <= lasti; ++i) { - if (inst->Dst[0].Register.WriteMask & (1 << i)) { - alu.dst.chan = i; - r600_bytecode_src(&alu.src[0], &ctx->src[0], i); - alu.last = i == lasti; - if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) - return r; - } + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + + /* (1 - src0) * src2 */ + for (i = 0; i < lasti + 1; i++) { + if (!(inst->Dst[0].Register.WriteMask & (1 << i))) + continue; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_MUL; + alu.src[0].sel = ctx->temp_reg; + alu.src[0].chan = i; + r600_bytecode_src(&alu.src[1], &ctx->src[2], i); + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = i; + if (i == lasti) { + alu.last = 1; } - break; - default: - assert(0); - return -1; + alu.dst.write = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; } - ctx->bc->ar_loaded = 0; - return 0; -} + /* src0 * src1 + (1 - src0) * src2 */ + if (ctx->src[0].abs) + temp_regs[0] = r600_get_temp(ctx); + else + temp_regs[0] = 0; + if (ctx->src[1].abs) + temp_regs[1] = r600_get_temp(ctx); + else + temp_regs[1] = 0; -static int tgsi_opdst(struct r600_shader_ctx *ctx) -{ - struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bytecode_alu alu; - int i, r = 0; + for (i = 0; i < lasti + 1; i++) { + if (!(inst->Dst[0].Register.WriteMask & (1 << i))) + continue; - for (i = 0; i < 4; i++) { memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP3_MULADD; + alu.is_op3 = 1; + r = tgsi_make_src_for_op3(ctx, temp_regs[0], i, &alu.src[0], &ctx->src[0]); + if (r) + return r; + r = tgsi_make_src_for_op3(ctx, temp_regs[1], i, &alu.src[1], &ctx->src[1]); + if (r) + return r; + alu.src[2].sel = ctx->temp_reg; + alu.src[2].chan = i; - alu.op = ALU_OP2_MUL; tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - - if (i == 0 || i == 3) { - alu.src[0].sel = V_SQ_ALU_SRC_1; - } else { - r600_bytecode_src(&alu.src[0], &ctx->src[0], i); - } - - if (i == 0 || i == 2) { - alu.src[1].sel = V_SQ_ALU_SRC_1; - } else { - r600_bytecode_src(&alu.src[1], &ctx->src[1], i); - } - if (i == 3) + alu.dst.chan = i; + if (i == lasti) { alu.last = 1; + } r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; @@ -8377,597 +9224,1932 @@ static int tgsi_opdst(struct r600_shader_ctx *ctx) return 0; } -static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode, int alu_type) +static int tgsi_cmp(struct r600_shader_ctx *ctx) { + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; struct r600_bytecode_alu alu; - int r; - - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = opcode; - alu.execute_mask = 1; - alu.update_pred = 1; + int i, r, j; + int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); + int temp_regs[3]; + unsigned op; - alu.dst.sel = ctx->temp_reg; - alu.dst.write = 1; - alu.dst.chan = 0; + if (ctx->src[0].abs && ctx->src[0].neg) { + op = ALU_OP3_CNDE; + ctx->src[0].abs = 0; + ctx->src[0].neg = 0; + } else { + op = ALU_OP3_CNDGE; + } - r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); - alu.src[1].sel = V_SQ_ALU_SRC_0; - alu.src[1].chan = 0; + for (j = 0; j < inst->Instruction.NumSrcRegs; j++) { + temp_regs[j] = 0; + if (ctx->src[j].abs) + temp_regs[j] = r600_get_temp(ctx); + } - alu.last = 1; + for (i = 0; i < lasti + 1; i++) { + if (!(inst->Dst[0].Register.WriteMask & (1 << i))) + continue; - r = r600_bytecode_add_alu_type(ctx->bc, &alu, alu_type); - if (r) - return r; + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = op; + r = tgsi_make_src_for_op3(ctx, temp_regs[0], i, &alu.src[0], &ctx->src[0]); + if (r) + return r; + r = tgsi_make_src_for_op3(ctx, temp_regs[2], i, &alu.src[1], &ctx->src[2]); + if (r) + return r; + r = tgsi_make_src_for_op3(ctx, temp_regs[1], i, &alu.src[2], &ctx->src[1]); + if (r) + return r; + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + alu.dst.chan = i; + alu.dst.write = 1; + alu.is_op3 = 1; + if (i == lasti) + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } return 0; } -static int pops(struct r600_shader_ctx *ctx, int pops) +static int tgsi_ucmp(struct r600_shader_ctx *ctx) { - unsigned force_pop = ctx->bc->force_add_cf; + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bytecode_alu alu; + int i, r; + int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); - if (!force_pop) { - int alu_pop = 3; - if (ctx->bc->cf_last) { - if (ctx->bc->cf_last->op == CF_OP_ALU) - alu_pop = 0; - else if (ctx->bc->cf_last->op == CF_OP_ALU_POP_AFTER) - alu_pop = 1; - } - alu_pop += pops; - if (alu_pop == 1) { - ctx->bc->cf_last->op = CF_OP_ALU_POP_AFTER; - ctx->bc->force_add_cf = 1; - } else if (alu_pop == 2) { - ctx->bc->cf_last->op = CF_OP_ALU_POP2_AFTER; - ctx->bc->force_add_cf = 1; - } else { - force_pop = 1; - } - } + for (i = 0; i < lasti + 1; i++) { + if (!(inst->Dst[0].Register.WriteMask & (1 << i))) + continue; - if (force_pop) { - r600_bytecode_add_cfinst(ctx->bc, CF_OP_POP); - ctx->bc->cf_last->pop_count = pops; - ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2; + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP3_CNDE_INT; + r600_bytecode_src(&alu.src[0], &ctx->src[0], i); + r600_bytecode_src(&alu.src[1], &ctx->src[2], i); + r600_bytecode_src(&alu.src[2], &ctx->src[1], i); + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + alu.dst.chan = i; + alu.dst.write = 1; + alu.is_op3 = 1; + if (i == lasti) + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; } - return 0; } -static inline void callstack_update_max_depth(struct r600_shader_ctx *ctx, - unsigned reason) +static int tgsi_exp(struct r600_shader_ctx *ctx) { - struct r600_stack_info *stack = &ctx->bc->stack; - unsigned elements, entries; + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bytecode_alu alu; + int r; + unsigned i; - unsigned entry_size = stack->entry_size; + /* result.x = 2^floor(src); */ + if (inst->Dst[0].Register.WriteMask & 1) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - elements = (stack->loop + stack->push_wqm ) * entry_size; - elements += stack->push; + alu.op = ALU_OP1_FLOOR; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); - switch (ctx->bc->chip_class) { - case R600: - case R700: - /* pre-r8xx: if any non-WQM PUSH instruction is invoked, 2 elements on - * the stack must be reserved to hold the current active/continue - * masks */ - if (reason == FC_PUSH_VPM) { - elements += 2; - } - break; + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = 0; + alu.dst.write = 1; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; - case CAYMAN: - /* r9xx: any stack operation on empty stack consumes 2 additional - * elements */ - elements += 2; + if (ctx->bc->chip_class == CAYMAN) { + for (i = 0; i < 3; i++) { + alu.op = ALU_OP1_EXP_IEEE; + alu.src[0].sel = ctx->temp_reg; + alu.src[0].chan = 0; - /* fallthrough */ - /* FIXME: do the two elements added above cover the cases for the - * r8xx+ below? */ + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = i; + alu.dst.write = i == 0; + alu.last = i == 2; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + } else { + alu.op = ALU_OP1_EXP_IEEE; + alu.src[0].sel = ctx->temp_reg; + alu.src[0].chan = 0; - case EVERGREEN: - /* r8xx+: 2 extra elements are not always required, but one extra - * element must be added for each of the following cases: - * 1. There is an ALU_ELSE_AFTER instruction at the point of greatest - * stack usage. - * (Currently we don't use ALU_ELSE_AFTER.) - * 2. There are LOOP/WQM frames on the stack when any flavor of non-WQM - * PUSH instruction executed. - * - * NOTE: it seems we also need to reserve additional element in some - * other cases, e.g. when we have 4 levels of PUSH_VPM in the shader, - * then STACK_SIZE should be 2 instead of 1 */ - if (reason == FC_PUSH_VPM) { - elements += 1; + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = 0; + alu.dst.write = 1; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; } - break; - - default: - assert(0); - break; } - /* NOTE: it seems STACK_SIZE is interpreted by hw as if entry_size is 4 - * for all chips, so we use 4 in the final formula, not the real entry_size - * for the chip */ - entry_size = 4; + /* result.y = tmp - floor(tmp); */ + if ((inst->Dst[0].Register.WriteMask >> 1) & 1) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - entries = (elements + (entry_size - 1)) / entry_size; + alu.op = ALU_OP1_FRACT; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); - if (entries > stack->max_entries) - stack->max_entries = entries; -} + alu.dst.sel = ctx->temp_reg; +#if 0 + r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + if (r) + return r; +#endif + alu.dst.write = 1; + alu.dst.chan = 1; -static inline void callstack_pop(struct r600_shader_ctx *ctx, unsigned reason) -{ - switch(reason) { - case FC_PUSH_VPM: - --ctx->bc->stack.push; - assert(ctx->bc->stack.push >= 0); - break; - case FC_PUSH_WQM: - --ctx->bc->stack.push_wqm; - assert(ctx->bc->stack.push_wqm >= 0); - break; - case FC_LOOP: - --ctx->bc->stack.loop; - assert(ctx->bc->stack.loop >= 0); - break; - default: - assert(0); - break; - } -} + alu.last = 1; -static inline void callstack_push(struct r600_shader_ctx *ctx, unsigned reason) -{ - switch (reason) { - case FC_PUSH_VPM: - ++ctx->bc->stack.push; - break; - case FC_PUSH_WQM: - ++ctx->bc->stack.push_wqm; - case FC_LOOP: - ++ctx->bc->stack.loop; - break; - default: - assert(0); + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; } - callstack_update_max_depth(ctx, reason); -} + /* result.z = RoughApprox2ToX(tmp);*/ + if ((inst->Dst[0].Register.WriteMask >> 2) & 0x1) { + if (ctx->bc->chip_class == CAYMAN) { + for (i = 0; i < 3; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_EXP_IEEE; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); -static void fc_set_mid(struct r600_shader_ctx *ctx, int fc_sp) -{ - struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[fc_sp]; + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = i; + if (i == 2) { + alu.dst.write = 1; + alu.last = 1; + } - sp->mid = realloc((void *)sp->mid, - sizeof(struct r600_bytecode_cf *) * (sp->num_mid + 1)); - sp->mid[sp->num_mid] = ctx->bc->cf_last; - sp->num_mid++; -} + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + } else { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_EXP_IEEE; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + + alu.dst.sel = ctx->temp_reg; + alu.dst.write = 1; + alu.dst.chan = 2; + + alu.last = 1; + + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + } + + /* result.w = 1.0;*/ + if ((inst->Dst[0].Register.WriteMask >> 3) & 0x1) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + + alu.op = ALU_OP1_MOV; + alu.src[0].sel = V_SQ_ALU_SRC_1; + alu.src[0].chan = 0; + + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = 3; + alu.dst.write = 1; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + return tgsi_helper_copy(ctx, inst); +} + +static int tgsi_log(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bytecode_alu alu; + int r; + unsigned i; + + /* result.x = floor(log2(|src|)); */ + if (inst->Dst[0].Register.WriteMask & 1) { + if (ctx->bc->chip_class == CAYMAN) { + for (i = 0; i < 3; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + + alu.op = ALU_OP1_LOG_IEEE; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + r600_bytecode_src_set_abs(&alu.src[0]); + + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = i; + if (i == 0) + alu.dst.write = 1; + if (i == 2) + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + + } else { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + + alu.op = ALU_OP1_LOG_IEEE; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + r600_bytecode_src_set_abs(&alu.src[0]); + + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = 0; + alu.dst.write = 1; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + + alu.op = ALU_OP1_FLOOR; + alu.src[0].sel = ctx->temp_reg; + alu.src[0].chan = 0; + + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = 0; + alu.dst.write = 1; + alu.last = 1; + + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + + /* result.y = |src.x| / (2 ^ floor(log2(|src.x|))); */ + if ((inst->Dst[0].Register.WriteMask >> 1) & 1) { + + if (ctx->bc->chip_class == CAYMAN) { + for (i = 0; i < 3; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + + alu.op = ALU_OP1_LOG_IEEE; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + r600_bytecode_src_set_abs(&alu.src[0]); + + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = i; + if (i == 1) + alu.dst.write = 1; + if (i == 2) + alu.last = 1; + + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + } else { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + + alu.op = ALU_OP1_LOG_IEEE; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + r600_bytecode_src_set_abs(&alu.src[0]); + + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = 1; + alu.dst.write = 1; + alu.last = 1; + + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + + alu.op = ALU_OP1_FLOOR; + alu.src[0].sel = ctx->temp_reg; + alu.src[0].chan = 1; + + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = 1; + alu.dst.write = 1; + alu.last = 1; + + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + if (ctx->bc->chip_class == CAYMAN) { + for (i = 0; i < 3; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_EXP_IEEE; + alu.src[0].sel = ctx->temp_reg; + alu.src[0].chan = 1; + + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = i; + if (i == 1) + alu.dst.write = 1; + if (i == 2) + alu.last = 1; + + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + } else { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_EXP_IEEE; + alu.src[0].sel = ctx->temp_reg; + alu.src[0].chan = 1; + + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = 1; + alu.dst.write = 1; + alu.last = 1; + + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + + if (ctx->bc->chip_class == CAYMAN) { + for (i = 0; i < 3; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_RECIP_IEEE; + alu.src[0].sel = ctx->temp_reg; + alu.src[0].chan = 1; + + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = i; + if (i == 1) + alu.dst.write = 1; + if (i == 2) + alu.last = 1; + + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + } else { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_RECIP_IEEE; + alu.src[0].sel = ctx->temp_reg; + alu.src[0].chan = 1; + + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = 1; + alu.dst.write = 1; + alu.last = 1; + + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + + alu.op = ALU_OP2_MUL; + + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + r600_bytecode_src_set_abs(&alu.src[0]); + + alu.src[1].sel = ctx->temp_reg; + alu.src[1].chan = 1; + + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = 1; + alu.dst.write = 1; + alu.last = 1; + + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + + /* result.z = log2(|src|);*/ + if ((inst->Dst[0].Register.WriteMask >> 2) & 1) { + if (ctx->bc->chip_class == CAYMAN) { + for (i = 0; i < 3; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + + alu.op = ALU_OP1_LOG_IEEE; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + r600_bytecode_src_set_abs(&alu.src[0]); + + alu.dst.sel = ctx->temp_reg; + if (i == 2) + alu.dst.write = 1; + alu.dst.chan = i; + if (i == 2) + alu.last = 1; + + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + } else { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + + alu.op = ALU_OP1_LOG_IEEE; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + r600_bytecode_src_set_abs(&alu.src[0]); + + alu.dst.sel = ctx->temp_reg; + alu.dst.write = 1; + alu.dst.chan = 2; + alu.last = 1; + + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + } + + /* result.w = 1.0; */ + if ((inst->Dst[0].Register.WriteMask >> 3) & 1) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + + alu.op = ALU_OP1_MOV; + alu.src[0].sel = V_SQ_ALU_SRC_1; + alu.src[0].chan = 0; + + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = 3; + alu.dst.write = 1; + alu.last = 1; + + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + + return tgsi_helper_copy(ctx, inst); +} + +static int tgsi_eg_arl(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bytecode_alu alu; + int r; + int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); + unsigned reg = get_address_file_reg(ctx, inst->Dst[0].Register.Index); + + assert(inst->Dst[0].Register.Index < 3); + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + + switch (inst->Instruction.Opcode) { + case TGSI_OPCODE_ARL: + alu.op = ALU_OP1_FLT_TO_INT_FLOOR; + break; + case TGSI_OPCODE_ARR: + alu.op = ALU_OP1_FLT_TO_INT; + break; + case TGSI_OPCODE_UARL: + alu.op = ALU_OP1_MOV; + break; + default: + assert(0); + return -1; + } + + for (i = 0; i <= lasti; ++i) { + if (!(inst->Dst[0].Register.WriteMask & (1 << i))) + continue; + r600_bytecode_src(&alu.src[0], &ctx->src[0], i); + alu.last = i == lasti; + alu.dst.sel = reg; + alu.dst.chan = i; + alu.dst.write = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + + if (inst->Dst[0].Register.Index > 0) + ctx->bc->index_loaded[inst->Dst[0].Register.Index - 1] = 0; + else + ctx->bc->ar_loaded = 0; + + return 0; +} +static int tgsi_r600_arl(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bytecode_alu alu; + int r; + int i, lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); + + switch (inst->Instruction.Opcode) { + case TGSI_OPCODE_ARL: + memset(&alu, 0, sizeof(alu)); + alu.op = ALU_OP1_FLOOR; + alu.dst.sel = ctx->bc->ar_reg; + alu.dst.write = 1; + for (i = 0; i <= lasti; ++i) { + if (inst->Dst[0].Register.WriteMask & (1 << i)) { + alu.dst.chan = i; + r600_bytecode_src(&alu.src[0], &ctx->src[0], i); + alu.last = i == lasti; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } + } + + memset(&alu, 0, sizeof(alu)); + alu.op = ALU_OP1_FLT_TO_INT; + alu.src[0].sel = ctx->bc->ar_reg; + alu.dst.sel = ctx->bc->ar_reg; + alu.dst.write = 1; + /* FLT_TO_INT is trans-only on r600/r700 */ + alu.last = TRUE; + for (i = 0; i <= lasti; ++i) { + alu.dst.chan = i; + alu.src[0].chan = i; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } + break; + case TGSI_OPCODE_ARR: + memset(&alu, 0, sizeof(alu)); + alu.op = ALU_OP1_FLT_TO_INT; + alu.dst.sel = ctx->bc->ar_reg; + alu.dst.write = 1; + /* FLT_TO_INT is trans-only on r600/r700 */ + alu.last = TRUE; + for (i = 0; i <= lasti; ++i) { + if (inst->Dst[0].Register.WriteMask & (1 << i)) { + alu.dst.chan = i; + r600_bytecode_src(&alu.src[0], &ctx->src[0], i); + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } + } + break; + case TGSI_OPCODE_UARL: + memset(&alu, 0, sizeof(alu)); + alu.op = ALU_OP1_MOV; + alu.dst.sel = ctx->bc->ar_reg; + alu.dst.write = 1; + for (i = 0; i <= lasti; ++i) { + if (inst->Dst[0].Register.WriteMask & (1 << i)) { + alu.dst.chan = i; + r600_bytecode_src(&alu.src[0], &ctx->src[0], i); + alu.last = i == lasti; + if ((r = r600_bytecode_add_alu(ctx->bc, &alu))) + return r; + } + } + break; + default: + assert(0); + return -1; + } + + ctx->bc->ar_loaded = 0; + return 0; +} + +static int tgsi_opdst(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bytecode_alu alu; + int i, r = 0; + + for (i = 0; i < 4; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + + alu.op = ALU_OP2_MUL; + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + + if (i == 0 || i == 3) { + alu.src[0].sel = V_SQ_ALU_SRC_1; + } else { + r600_bytecode_src(&alu.src[0], &ctx->src[0], i); + } + + if (i == 0 || i == 2) { + alu.src[1].sel = V_SQ_ALU_SRC_1; + } else { + r600_bytecode_src(&alu.src[1], &ctx->src[1], i); + } + if (i == 3) + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + return 0; +} + +static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode, int alu_type, + struct r600_bytecode_alu_src *src) +{ + struct r600_bytecode_alu alu; + int r; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = opcode; + alu.execute_mask = 1; + alu.update_pred = 1; + + alu.dst.sel = ctx->temp_reg; + alu.dst.write = 1; + alu.dst.chan = 0; + + alu.src[0] = *src; + alu.src[1].sel = V_SQ_ALU_SRC_0; + alu.src[1].chan = 0; + + alu.last = 1; + + r = r600_bytecode_add_alu_type(ctx->bc, &alu, alu_type); + if (r) + return r; + return 0; +} + +static int pops(struct r600_shader_ctx *ctx, int pops) +{ + unsigned force_pop = ctx->bc->force_add_cf; + + if (!force_pop) { + int alu_pop = 3; + if (ctx->bc->cf_last) { + if (ctx->bc->cf_last->op == CF_OP_ALU) + alu_pop = 0; + else if (ctx->bc->cf_last->op == CF_OP_ALU_POP_AFTER) + alu_pop = 1; + } + alu_pop += pops; + if (alu_pop == 1) { + ctx->bc->cf_last->op = CF_OP_ALU_POP_AFTER; + ctx->bc->force_add_cf = 1; + } else if (alu_pop == 2) { + ctx->bc->cf_last->op = CF_OP_ALU_POP2_AFTER; + ctx->bc->force_add_cf = 1; + } else { + force_pop = 1; + } + } + + if (force_pop) { + r600_bytecode_add_cfinst(ctx->bc, CF_OP_POP); + ctx->bc->cf_last->pop_count = pops; + ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2; + } + + return 0; +} + +static inline void callstack_update_max_depth(struct r600_shader_ctx *ctx, + unsigned reason) +{ + struct r600_stack_info *stack = &ctx->bc->stack; + unsigned elements; + int entries; + + unsigned entry_size = stack->entry_size; + + elements = (stack->loop + stack->push_wqm ) * entry_size; + elements += stack->push; + + switch (ctx->bc->chip_class) { + case R600: + case R700: + /* pre-r8xx: if any non-WQM PUSH instruction is invoked, 2 elements on + * the stack must be reserved to hold the current active/continue + * masks */ + if (reason == FC_PUSH_VPM) { + elements += 2; + } + break; + + case CAYMAN: + /* r9xx: any stack operation on empty stack consumes 2 additional + * elements */ + elements += 2; + + /* fallthrough */ + /* FIXME: do the two elements added above cover the cases for the + * r8xx+ below? */ + + case EVERGREEN: + /* r8xx+: 2 extra elements are not always required, but one extra + * element must be added for each of the following cases: + * 1. There is an ALU_ELSE_AFTER instruction at the point of greatest + * stack usage. + * (Currently we don't use ALU_ELSE_AFTER.) + * 2. There are LOOP/WQM frames on the stack when any flavor of non-WQM + * PUSH instruction executed. + * + * NOTE: it seems we also need to reserve additional element in some + * other cases, e.g. when we have 4 levels of PUSH_VPM in the shader, + * then STACK_SIZE should be 2 instead of 1 */ + if (reason == FC_PUSH_VPM) { + elements += 1; + } + break; + + default: + assert(0); + break; + } + + /* NOTE: it seems STACK_SIZE is interpreted by hw as if entry_size is 4 + * for all chips, so we use 4 in the final formula, not the real entry_size + * for the chip */ + entry_size = 4; + + entries = (elements + (entry_size - 1)) / entry_size; + + if (entries > stack->max_entries) + stack->max_entries = entries; +} + +static inline void callstack_pop(struct r600_shader_ctx *ctx, unsigned reason) +{ + switch(reason) { + case FC_PUSH_VPM: + --ctx->bc->stack.push; + assert(ctx->bc->stack.push >= 0); + break; + case FC_PUSH_WQM: + --ctx->bc->stack.push_wqm; + assert(ctx->bc->stack.push_wqm >= 0); + break; + case FC_LOOP: + --ctx->bc->stack.loop; + assert(ctx->bc->stack.loop >= 0); + break; + default: + assert(0); + break; + } +} + +static inline void callstack_push(struct r600_shader_ctx *ctx, unsigned reason) +{ + switch (reason) { + case FC_PUSH_VPM: + ++ctx->bc->stack.push; + break; + case FC_PUSH_WQM: + ++ctx->bc->stack.push_wqm; + case FC_LOOP: + ++ctx->bc->stack.loop; + break; + default: + assert(0); + } + + callstack_update_max_depth(ctx, reason); +} + +static void fc_set_mid(struct r600_shader_ctx *ctx, int fc_sp) +{ + struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[fc_sp]; + + sp->mid = realloc((void *)sp->mid, + sizeof(struct r600_bytecode_cf *) * (sp->num_mid + 1)); + sp->mid[sp->num_mid] = ctx->bc->cf_last; + sp->num_mid++; +} + +static void fc_pushlevel(struct r600_shader_ctx *ctx, int type) +{ + assert(ctx->bc->fc_sp < ARRAY_SIZE(ctx->bc->fc_stack)); + ctx->bc->fc_stack[ctx->bc->fc_sp].type = type; + ctx->bc->fc_stack[ctx->bc->fc_sp].start = ctx->bc->cf_last; + ctx->bc->fc_sp++; +} + +static void fc_poplevel(struct r600_shader_ctx *ctx) +{ + struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[ctx->bc->fc_sp - 1]; + free(sp->mid); + sp->mid = NULL; + sp->num_mid = 0; + sp->start = NULL; + sp->type = 0; + ctx->bc->fc_sp--; +} + +#if 0 +static int emit_return(struct r600_shader_ctx *ctx) +{ + r600_bytecode_add_cfinst(ctx->bc, CF_OP_RETURN)); + return 0; +} + +static int emit_jump_to_offset(struct r600_shader_ctx *ctx, int pops, int offset) +{ + + r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP)); + ctx->bc->cf_last->pop_count = pops; + /* XXX work out offset */ + return 0; +} + +static int emit_setret_in_loop_flag(struct r600_shader_ctx *ctx, unsigned flag_value) +{ + return 0; +} + +static void emit_testflag(struct r600_shader_ctx *ctx) +{ + +} + +static void emit_return_on_flag(struct r600_shader_ctx *ctx, unsigned ifidx) +{ + emit_testflag(ctx); + emit_jump_to_offset(ctx, 1, 4); + emit_setret_in_loop_flag(ctx, V_SQ_ALU_SRC_0); + pops(ctx, ifidx + 1); + emit_return(ctx); +} + +static void break_loop_on_flag(struct r600_shader_ctx *ctx, unsigned fc_sp) +{ + emit_testflag(ctx); + + r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op); + ctx->bc->cf_last->pop_count = 1; + + fc_set_mid(ctx, fc_sp); + + pops(ctx, 1); +} +#endif + +static int emit_if(struct r600_shader_ctx *ctx, int opcode, + struct r600_bytecode_alu_src *src) +{ + int alu_type = CF_OP_ALU_PUSH_BEFORE; + + /* There is a hardware bug on Cayman where a BREAK/CONTINUE followed by + * LOOP_STARTxxx for nested loops may put the branch stack into a state + * such that ALU_PUSH_BEFORE doesn't work as expected. Workaround this + * by replacing the ALU_PUSH_BEFORE with a PUSH + ALU */ + if (ctx->bc->chip_class == CAYMAN && ctx->bc->stack.loop > 1) { + r600_bytecode_add_cfinst(ctx->bc, CF_OP_PUSH); + ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2; + alu_type = CF_OP_ALU; + } + + emit_logic_pred(ctx, opcode, alu_type, src); + + r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP); + + fc_pushlevel(ctx, FC_IF); + + callstack_push(ctx, FC_PUSH_VPM); + return 0; +} + +static int tgsi_if(struct r600_shader_ctx *ctx) +{ + struct r600_bytecode_alu_src alu_src; + r600_bytecode_src(&alu_src, &ctx->src[0], 0); + + return emit_if(ctx, ALU_OP2_PRED_SETNE, &alu_src); +} + +static int tgsi_uif(struct r600_shader_ctx *ctx) +{ + struct r600_bytecode_alu_src alu_src; + r600_bytecode_src(&alu_src, &ctx->src[0], 0); + return emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src); +} + +static int tgsi_else(struct r600_shader_ctx *ctx) +{ + r600_bytecode_add_cfinst(ctx->bc, CF_OP_ELSE); + ctx->bc->cf_last->pop_count = 1; + + fc_set_mid(ctx, ctx->bc->fc_sp - 1); + ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id; + return 0; +} + +static int tgsi_endif(struct r600_shader_ctx *ctx) +{ + pops(ctx, 1); + if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].type != FC_IF) { + R600_ERR("if/endif unbalanced in shader\n"); + return -1; + } + + if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid == NULL) { + ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id + 2; + ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->pop_count = 1; + } else { + ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid[0]->cf_addr = ctx->bc->cf_last->id + 2; + } + fc_poplevel(ctx); + + callstack_pop(ctx, FC_PUSH_VPM); + return 0; +} + +static int tgsi_bgnloop(struct r600_shader_ctx *ctx) +{ + /* LOOP_START_DX10 ignores the LOOP_CONFIG* registers, so it is not + * limited to 4096 iterations, like the other LOOP_* instructions. */ + r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_START_DX10); + + fc_pushlevel(ctx, FC_LOOP); + + /* check stack depth */ + callstack_push(ctx, FC_LOOP); + return 0; +} + +static int tgsi_endloop(struct r600_shader_ctx *ctx) +{ + int i; + + r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_END); + + if (ctx->bc->fc_stack[ctx->bc->fc_sp - 1].type != FC_LOOP) { + R600_ERR("loop/endloop in shader code are not paired.\n"); + return -EINVAL; + } + + /* fixup loop pointers - from r600isa + LOOP END points to CF after LOOP START, + LOOP START point to CF after LOOP END + BRK/CONT point to LOOP END CF + */ + ctx->bc->cf_last->cf_addr = ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->id + 2; + + ctx->bc->fc_stack[ctx->bc->fc_sp - 1].start->cf_addr = ctx->bc->cf_last->id + 2; + + for (i = 0; i < ctx->bc->fc_stack[ctx->bc->fc_sp - 1].num_mid; i++) { + ctx->bc->fc_stack[ctx->bc->fc_sp - 1].mid[i]->cf_addr = ctx->bc->cf_last->id; + } + /* XXX add LOOPRET support */ + fc_poplevel(ctx); + callstack_pop(ctx, FC_LOOP); + return 0; +} + +static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx) +{ + unsigned int fscp; + + for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--) + { + if (FC_LOOP == ctx->bc->fc_stack[fscp - 1].type) + break; + } + + if (fscp == 0) { + R600_ERR("Break not inside loop/endloop pair\n"); + return -EINVAL; + } + + r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op); + + fc_set_mid(ctx, fscp - 1); + + return 0; +} + +static int tgsi_gs_emit(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + int stream = ctx->literals[inst->Src[0].Register.Index * 4 + inst->Src[0].Register.SwizzleX]; + int r; + + if (ctx->inst_info->op == CF_OP_EMIT_VERTEX) + emit_gs_ring_writes(ctx, ctx->gs_stream_output_info, stream, TRUE); + + r = r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op); + if (!r) { + ctx->bc->cf_last->count = stream; // Count field for CUT/EMIT_VERTEX indicates which stream + if (ctx->inst_info->op == CF_OP_EMIT_VERTEX) + return emit_inc_ring_offset(ctx, stream, TRUE); + } + return r; +} + +static int tgsi_umad(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bytecode_alu alu; + int i, j, r; + int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); + + /* src0 * src1 */ + for (i = 0; i < lasti + 1; i++) { + if (!(inst->Dst[0].Register.WriteMask & (1 << i))) + continue; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + + alu.dst.chan = i; + alu.dst.sel = ctx->temp_reg; + alu.dst.write = 1; + + alu.op = ALU_OP2_MULLO_UINT; + for (j = 0; j < 2; j++) { + r600_bytecode_src(&alu.src[j], &ctx->src[j], i); + } + + alu.last = 1; + r = emit_mul_int_op(ctx->bc, &alu); + if (r) + return r; + } + + + for (i = 0; i < lasti + 1; i++) { + if (!(inst->Dst[0].Register.WriteMask & (1 << i))) + continue; -static void fc_pushlevel(struct r600_shader_ctx *ctx, int type) + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + + alu.op = ALU_OP2_ADD_INT; + + alu.src[0].sel = ctx->temp_reg; + alu.src[0].chan = i; + + r600_bytecode_src(&alu.src[1], &ctx->src[2], i); + if (i == lasti) { + alu.last = 1; + } + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + return 0; +} + +static int tgsi_pk2h(struct r600_shader_ctx *ctx) { - ctx->bc->fc_sp++; - ctx->bc->fc_stack[ctx->bc->fc_sp].type = type; - ctx->bc->fc_stack[ctx->bc->fc_sp].start = ctx->bc->cf_last; + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bytecode_alu alu; + int r, i; + int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); + + /* temp.xy = f32_to_f16(src) */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_FLT32_TO_FLT16; + alu.dst.chan = 0; + alu.dst.sel = ctx->temp_reg; + alu.dst.write = 1; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + alu.dst.chan = 1; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 1); + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + /* dst.x = temp.y * 0x10000 + temp.x */ + for (i = 0; i < lasti + 1; i++) { + if (!(inst->Dst[0].Register.WriteMask & (1 << i))) + continue; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP3_MULADD_UINT24; + alu.is_op3 = 1; + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + alu.last = i == lasti; + alu.src[0].sel = ctx->temp_reg; + alu.src[0].chan = 1; + alu.src[1].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[1].value = 0x10000; + alu.src[2].sel = ctx->temp_reg; + alu.src[2].chan = 0; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + + return 0; } -static void fc_poplevel(struct r600_shader_ctx *ctx) +static int tgsi_up2h(struct r600_shader_ctx *ctx) { - struct r600_cf_stack_entry *sp = &ctx->bc->fc_stack[ctx->bc->fc_sp]; - free(sp->mid); - sp->mid = NULL; - sp->num_mid = 0; - sp->start = NULL; - sp->type = 0; - ctx->bc->fc_sp--; + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bytecode_alu alu; + int r, i; + int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); + + /* temp.x = src.x */ + /* note: no need to mask out the high bits */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + alu.dst.chan = 0; + alu.dst.sel = ctx->temp_reg; + alu.dst.write = 1; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + /* temp.y = src.x >> 16 */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_LSHR_INT; + alu.dst.chan = 1; + alu.dst.sel = ctx->temp_reg; + alu.dst.write = 1; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + alu.src[1].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[1].value = 16; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + /* dst.wz = dst.xy = f16_to_f32(temp.xy) */ + for (i = 0; i < lasti + 1; i++) { + if (!(inst->Dst[0].Register.WriteMask & (1 << i))) + continue; + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + alu.op = ALU_OP1_FLT16_TO_FLT32; + alu.src[0].sel = ctx->temp_reg; + alu.src[0].chan = i % 2; + alu.last = i == lasti; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + + return 0; +} + +static int tgsi_bfe(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bytecode_alu alu; + int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); + int r, i; + int dst = -1; + + if ((inst->Src[0].Register.File == inst->Dst[0].Register.File && + inst->Src[0].Register.Index == inst->Dst[0].Register.Index) || + (inst->Src[2].Register.File == inst->Dst[0].Register.File && + inst->Src[2].Register.Index == inst->Dst[0].Register.Index)) + dst = r600_get_temp(ctx); + + r = tgsi_op3_dst(ctx, dst); + if (r) + return r; + + for (i = 0; i < lasti + 1; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_SETGE_INT; + r600_bytecode_src(&alu.src[0], &ctx->src[2], i); + alu.src[1].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[1].value = 32; + alu.dst.sel = ctx->temp_reg; + alu.dst.chan = i; + alu.dst.write = 1; + if (i == lasti) + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + + for (i = 0; i < lasti + 1; i++) { + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP3_CNDE_INT; + alu.is_op3 = 1; + alu.src[0].sel = ctx->temp_reg; + alu.src[0].chan = i; + + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + if (dst != -1) + alu.src[1].sel = dst; + else + alu.src[1].sel = alu.dst.sel; + alu.src[1].chan = i; + r600_bytecode_src(&alu.src[2], &ctx->src[0], i); + alu.dst.write = 1; + if (i == lasti) + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + } + + return 0; +} + +static int tgsi_clock(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bytecode_alu alu; + int r; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst); + alu.src[0].sel = EG_V_SQ_ALU_SRC_TIME_LO; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst); + alu.src[0].sel = EG_V_SQ_ALU_SRC_TIME_HI; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + return 0; +} + +static int emit_u64add(struct r600_shader_ctx *ctx, int op, + int treg, + int src0_sel, int src0_chan, + int src1_sel, int src1_chan) +{ + struct r600_bytecode_alu alu; + int r; + int opc; + + if (op == ALU_OP2_ADD_INT) + opc = ALU_OP2_ADDC_UINT; + else + opc = ALU_OP2_SUBB_UINT; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = op; ; + alu.dst.sel = treg; + alu.dst.chan = 0; + alu.dst.write = 1; + alu.src[0].sel = src0_sel; + alu.src[0].chan = src0_chan + 0; + alu.src[1].sel = src1_sel; + alu.src[1].chan = src1_chan + 0; + alu.src[1].neg = 0; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = op; + alu.dst.sel = treg; + alu.dst.chan = 1; + alu.dst.write = 1; + alu.src[0].sel = src0_sel; + alu.src[0].chan = src0_chan + 1; + alu.src[1].sel = src1_sel; + alu.src[1].chan = src1_chan + 1; + alu.src[1].neg = 0; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = opc; + alu.dst.sel = treg; + alu.dst.chan = 2; + alu.dst.write = 1; + alu.last = 1; + alu.src[0].sel = src0_sel; + alu.src[0].chan = src0_chan + 0; + alu.src[1].sel = src1_sel; + alu.src[1].chan = src1_chan + 0; + alu.src[1].neg = 0; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = op; + alu.dst.sel = treg; + alu.dst.chan = 1; + alu.dst.write = 1; + alu.src[0].sel = treg; + alu.src[0].chan = 1; + alu.src[1].sel = treg; + alu.src[1].chan = 2; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + return 0; } -#if 0 -static int emit_return(struct r600_shader_ctx *ctx) -{ - r600_bytecode_add_cfinst(ctx->bc, CF_OP_RETURN)); +static int egcm_u64add(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bytecode_alu alu; + int r; + int treg = ctx->temp_reg; + int op = ALU_OP2_ADD_INT, opc = ALU_OP2_ADDC_UINT; + + if (ctx->src[1].neg) { + op = ALU_OP2_SUB_INT; + opc = ALU_OP2_SUBB_UINT; + } + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = op; ; + alu.dst.sel = treg; + alu.dst.chan = 0; + alu.dst.write = 1; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + r600_bytecode_src(&alu.src[1], &ctx->src[1], 0); + alu.src[1].neg = 0; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = op; + alu.dst.sel = treg; + alu.dst.chan = 1; + alu.dst.write = 1; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 1); + r600_bytecode_src(&alu.src[1], &ctx->src[1], 1); + alu.src[1].neg = 0; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = opc ; + alu.dst.sel = treg; + alu.dst.chan = 2; + alu.dst.write = 1; + alu.last = 1; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + r600_bytecode_src(&alu.src[1], &ctx->src[1], 0); + alu.src[1].neg = 0; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = op; + tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst); + alu.src[0].sel = treg; + alu.src[0].chan = 1; + alu.src[1].sel = treg; + alu.src[1].chan = 2; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst); + alu.src[0].sel = treg; + alu.src[0].chan = 0; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; return 0; } -static int emit_jump_to_offset(struct r600_shader_ctx *ctx, int pops, int offset) +/* result.y = mul_high a, b + result.x = mul a,b + result.y += a.x * b.y + a.y * b.x; +*/ +static int egcm_u64mul(struct r600_shader_ctx *ctx) { + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bytecode_alu alu; + int r; + int treg = ctx->temp_reg; - r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP)); - ctx->bc->cf_last->pop_count = pops; - /* XXX work out offset */ - return 0; -} + /* temp.x = mul_lo a.x, b.x */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_MULLO_UINT; + alu.dst.sel = treg; + alu.dst.chan = 0; + alu.dst.write = 1; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + r600_bytecode_src(&alu.src[1], &ctx->src[1], 0); + r = emit_mul_int_op(ctx->bc, &alu); + if (r) + return r; -static int emit_setret_in_loop_flag(struct r600_shader_ctx *ctx, unsigned flag_value) -{ - return 0; -} + /* temp.y = mul_hi a.x, b.x */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_MULHI_UINT; + alu.dst.sel = treg; + alu.dst.chan = 1; + alu.dst.write = 1; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + r600_bytecode_src(&alu.src[1], &ctx->src[1], 0); + r = emit_mul_int_op(ctx->bc, &alu); + if (r) + return r; -static void emit_testflag(struct r600_shader_ctx *ctx) -{ + /* temp.z = mul a.x, b.y */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_MULLO_UINT; + alu.dst.sel = treg; + alu.dst.chan = 2; + alu.dst.write = 1; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + r600_bytecode_src(&alu.src[1], &ctx->src[1], 1); + r = emit_mul_int_op(ctx->bc, &alu); + if (r) + return r; -} + /* temp.w = mul a.y, b.x */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_MULLO_UINT; + alu.dst.sel = treg; + alu.dst.chan = 3; + alu.dst.write = 1; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 1); + r600_bytecode_src(&alu.src[1], &ctx->src[1], 0); + r = emit_mul_int_op(ctx->bc, &alu); + if (r) + return r; -static void emit_return_on_flag(struct r600_shader_ctx *ctx, unsigned ifidx) -{ - emit_testflag(ctx); - emit_jump_to_offset(ctx, 1, 4); - emit_setret_in_loop_flag(ctx, V_SQ_ALU_SRC_0); - pops(ctx, ifidx + 1); - emit_return(ctx); -} + /* temp.z = temp.z + temp.w */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_ADD_INT; + alu.dst.sel = treg; + alu.dst.chan = 2; + alu.dst.write = 1; + alu.src[0].sel = treg; + alu.src[0].chan = 2; + alu.src[1].sel = treg; + alu.src[1].chan = 3; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; -static void break_loop_on_flag(struct r600_shader_ctx *ctx, unsigned fc_sp) -{ - emit_testflag(ctx); + /* temp.y = temp.y + temp.z */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_ADD_INT; + alu.dst.sel = treg; + alu.dst.chan = 1; + alu.dst.write = 1; + alu.src[0].sel = treg; + alu.src[0].chan = 1; + alu.src[1].sel = treg; + alu.src[1].chan = 2; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; - r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op); - ctx->bc->cf_last->pop_count = 1; + /* dst.x = temp.x */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst); + alu.src[0].sel = treg; + alu.src[0].chan = 0; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; - fc_set_mid(ctx, fc_sp); + /* dst.y = temp.y */ + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst); + alu.src[0].sel = treg; + alu.src[0].chan = 1; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; - pops(ctx, 1); + return 0; } -#endif -static int emit_if(struct r600_shader_ctx *ctx, int opcode) +static int emit_u64sge(struct r600_shader_ctx *ctx, + int treg, + int src0_sel, int src0_base_chan, + int src1_sel, int src1_base_chan) { - int alu_type = CF_OP_ALU_PUSH_BEFORE; - - /* There is a hardware bug on Cayman where a BREAK/CONTINUE followed by - * LOOP_STARTxxx for nested loops may put the branch stack into a state - * such that ALU_PUSH_BEFORE doesn't work as expected. Workaround this - * by replacing the ALU_PUSH_BEFORE with a PUSH + ALU */ - if (ctx->bc->chip_class == CAYMAN && ctx->bc->stack.loop > 1) { - r600_bytecode_add_cfinst(ctx->bc, CF_OP_PUSH); - ctx->bc->cf_last->cf_addr = ctx->bc->cf_last->id + 2; - alu_type = CF_OP_ALU; - } + int r; + /* for 64-bit sge */ + /* result = (src0.y > src1.y) || ((src0.y == src1.y) && src0.x >= src1.x)) */ + r = single_alu_op2(ctx, ALU_OP2_SETGT_UINT, + treg, 1, + src0_sel, src0_base_chan + 1, + src1_sel, src1_base_chan + 1); + if (r) + return r; - emit_logic_pred(ctx, opcode, alu_type); + r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT, + treg, 0, + src0_sel, src0_base_chan, + src1_sel, src1_base_chan); + if (r) + return r; - r600_bytecode_add_cfinst(ctx->bc, CF_OP_JUMP); + r = single_alu_op2(ctx, ALU_OP2_SETE_INT, + treg, 2, + src0_sel, src0_base_chan + 1, + src1_sel, src1_base_chan + 1); + if (r) + return r; - fc_pushlevel(ctx, FC_IF); + r = single_alu_op2(ctx, ALU_OP2_AND_INT, + treg, 0, + treg, 0, + treg, 2); + if (r) + return r; - callstack_push(ctx, FC_PUSH_VPM); + r = single_alu_op2(ctx, ALU_OP2_OR_INT, + treg, 0, + treg, 0, + treg, 1); + if (r) + return r; return 0; } -static int tgsi_if(struct r600_shader_ctx *ctx) +/* this isn't a complete div it's just enough for qbo shader to work */ +static int egcm_u64div(struct r600_shader_ctx *ctx) { - return emit_if(ctx, ALU_OP2_PRED_SETNE); -} + struct r600_bytecode_alu alu; + struct r600_bytecode_alu_src alu_num_hi, alu_num_lo, alu_denom_hi, alu_denom_lo, alu_src; + int r, i; + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; -static int tgsi_uif(struct r600_shader_ctx *ctx) -{ - return emit_if(ctx, ALU_OP2_PRED_SETNE_INT); -} + /* make sure we are dividing my a const with 0 in the high bits */ + if (ctx->src[1].sel != V_SQ_ALU_SRC_LITERAL) + return -1; + if (ctx->src[1].value[ctx->src[1].swizzle[1]] != 0) + return -1; + /* make sure we are doing one division */ + if (inst->Dst[0].Register.WriteMask != 0x3) + return -1; -static int tgsi_else(struct r600_shader_ctx *ctx) -{ - r600_bytecode_add_cfinst(ctx->bc, CF_OP_ELSE); - ctx->bc->cf_last->pop_count = 1; + /* emit_if uses ctx->temp_reg so we can't */ + int treg = r600_get_temp(ctx); + int tmp_num = r600_get_temp(ctx); + int sub_tmp = r600_get_temp(ctx); - fc_set_mid(ctx, ctx->bc->fc_sp); - ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id; - return 0; -} + /* tmp quot are tmp_num.zw */ + r600_bytecode_src(&alu_num_lo, &ctx->src[0], 0); + r600_bytecode_src(&alu_num_hi, &ctx->src[0], 1); + r600_bytecode_src(&alu_denom_lo, &ctx->src[1], 0); + r600_bytecode_src(&alu_denom_hi, &ctx->src[1], 1); -static int tgsi_endif(struct r600_shader_ctx *ctx) -{ - pops(ctx, 1); - if (ctx->bc->fc_stack[ctx->bc->fc_sp].type != FC_IF) { - R600_ERR("if/endif unbalanced in shader\n"); - return -1; - } + /* MOV tmp_num.xy, numerator */ + r = single_alu_op2(ctx, ALU_OP1_MOV, + tmp_num, 0, + alu_num_lo.sel, alu_num_lo.chan, + 0, 0); + if (r) + return r; + r = single_alu_op2(ctx, ALU_OP1_MOV, + tmp_num, 1, + alu_num_hi.sel, alu_num_hi.chan, + 0, 0); + if (r) + return r; - if (ctx->bc->fc_stack[ctx->bc->fc_sp].mid == NULL) { - ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id + 2; - ctx->bc->fc_stack[ctx->bc->fc_sp].start->pop_count = 1; - } else { - ctx->bc->fc_stack[ctx->bc->fc_sp].mid[0]->cf_addr = ctx->bc->cf_last->id + 2; - } - fc_poplevel(ctx); + r = single_alu_op2(ctx, ALU_OP1_MOV, + tmp_num, 2, + V_SQ_ALU_SRC_LITERAL, 0, + 0, 0); + if (r) + return r; - callstack_pop(ctx, FC_PUSH_VPM); - return 0; -} + r = single_alu_op2(ctx, ALU_OP1_MOV, + tmp_num, 3, + V_SQ_ALU_SRC_LITERAL, 0, + 0, 0); + if (r) + return r; -static int tgsi_bgnloop(struct r600_shader_ctx *ctx) -{ - /* LOOP_START_DX10 ignores the LOOP_CONFIG* registers, so it is not - * limited to 4096 iterations, like the other LOOP_* instructions. */ - r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_START_DX10); + /* treg 0 is log2_denom */ + /* normally this gets the MSB for the denom high value + - however we know this will always be 0 here. */ + r = single_alu_op2(ctx, + ALU_OP1_MOV, + treg, 0, + V_SQ_ALU_SRC_LITERAL, 32, + 0, 0); + if (r) + return r; - fc_pushlevel(ctx, FC_LOOP); + /* normally check demon hi for 0, but we know it is already */ + /* t0.z = num_hi >= denom_lo */ + r = single_alu_op2(ctx, + ALU_OP2_SETGE_UINT, + treg, 1, + alu_num_hi.sel, alu_num_hi.chan, + V_SQ_ALU_SRC_LITERAL, alu_denom_lo.value); + if (r) + return r; - /* check stack depth */ - callstack_push(ctx, FC_LOOP); - return 0; -} + memset(&alu_src, 0, sizeof(alu_src)); + alu_src.sel = treg; + alu_src.chan = 1; + r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src); + if (r) + return r; -static int tgsi_endloop(struct r600_shader_ctx *ctx) -{ - unsigned i; + /* for loops in here */ + /* get msb t0.x = msb(src[1].x) first */ + int msb_lo = util_last_bit(alu_denom_lo.value); + r = single_alu_op2(ctx, ALU_OP1_MOV, + treg, 0, + V_SQ_ALU_SRC_LITERAL, msb_lo, + 0, 0); + if (r) + return r; - r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_END); + /* unroll the asm here */ + for (i = 0; i < 31; i++) { + r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT, + treg, 2, + V_SQ_ALU_SRC_LITERAL, i, + treg, 0); + if (r) + return r; - if (ctx->bc->fc_stack[ctx->bc->fc_sp].type != FC_LOOP) { - R600_ERR("loop/endloop in shader code are not paired.\n"); - return -EINVAL; - } + /* we can do this on the CPU */ + uint32_t denom_lo_shl = alu_denom_lo.value << (31 - i); + /* t0.z = tmp_num.y >= t0.z */ + r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT, + treg, 1, + tmp_num, 1, + V_SQ_ALU_SRC_LITERAL, denom_lo_shl); + if (r) + return r; - /* fixup loop pointers - from r600isa - LOOP END points to CF after LOOP START, - LOOP START point to CF after LOOP END - BRK/CONT point to LOOP END CF - */ - ctx->bc->cf_last->cf_addr = ctx->bc->fc_stack[ctx->bc->fc_sp].start->id + 2; + r = single_alu_op2(ctx, ALU_OP2_AND_INT, + treg, 1, + treg, 1, + treg, 2); + if (r) + return r; + + memset(&alu_src, 0, sizeof(alu_src)); + alu_src.sel = treg; + alu_src.chan = 1; + r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src); + if (r) + return r; + + r = single_alu_op2(ctx, ALU_OP2_SUB_INT, + tmp_num, 1, + tmp_num, 1, + V_SQ_ALU_SRC_LITERAL, denom_lo_shl); + if (r) + return r; - ctx->bc->fc_stack[ctx->bc->fc_sp].start->cf_addr = ctx->bc->cf_last->id + 2; + r = single_alu_op2(ctx, ALU_OP2_OR_INT, + tmp_num, 3, + tmp_num, 3, + V_SQ_ALU_SRC_LITERAL, 1U << (31 - i)); + if (r) + return r; - for (i = 0; i < ctx->bc->fc_stack[ctx->bc->fc_sp].num_mid; i++) { - ctx->bc->fc_stack[ctx->bc->fc_sp].mid[i]->cf_addr = ctx->bc->cf_last->id; + r = tgsi_endif(ctx); + if (r) + return r; } - /* XXX add LOOPRET support */ - fc_poplevel(ctx); - callstack_pop(ctx, FC_LOOP); - return 0; -} -static int tgsi_loop_breakc(struct r600_shader_ctx *ctx) -{ - int r; - unsigned int fscp; + /* log2_denom is always <= 31, so manually peel the last loop + * iteration. + */ + r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT, + treg, 1, + tmp_num, 1, + V_SQ_ALU_SRC_LITERAL, alu_denom_lo.value); + if (r) + return r; - for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--) - { - if (FC_LOOP == ctx->bc->fc_stack[fscp].type) - break; - } - if (fscp == 0) { - R600_ERR("BREAKC not inside loop/endloop pair\n"); - return -EINVAL; - } + memset(&alu_src, 0, sizeof(alu_src)); + alu_src.sel = treg; + alu_src.chan = 1; + r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src); + if (r) + return r; + + r = single_alu_op2(ctx, ALU_OP2_SUB_INT, + tmp_num, 1, + tmp_num, 1, + V_SQ_ALU_SRC_LITERAL, alu_denom_lo.value); + if (r) + return r; - if (ctx->bc->chip_class == EVERGREEN && - ctx->bc->family != CHIP_CYPRESS && - ctx->bc->family != CHIP_JUNIPER) { - /* HW bug: ALU_BREAK does not save the active mask correctly */ - r = tgsi_uif(ctx); + r = single_alu_op2(ctx, ALU_OP2_OR_INT, + tmp_num, 3, + tmp_num, 3, + V_SQ_ALU_SRC_LITERAL, 1U); + if (r) + return r; + r = tgsi_endif(ctx); + if (r) + return r; + + r = tgsi_endif(ctx); + if (r) + return r; + + /* onto the second loop to unroll */ + for (i = 0; i < 31; i++) { + r = single_alu_op2(ctx, ALU_OP2_SETGE_UINT, + treg, 1, + V_SQ_ALU_SRC_LITERAL, (63 - (31 - i)), + treg, 0); if (r) return r; - r = r600_bytecode_add_cfinst(ctx->bc, CF_OP_LOOP_BREAK); + uint64_t denom_shl = (uint64_t)alu_denom_lo.value << (31 - i); + r = single_alu_op2(ctx, ALU_OP1_MOV, + treg, 2, + V_SQ_ALU_SRC_LITERAL, (denom_shl & 0xffffffff), + 0, 0); if (r) return r; - fc_set_mid(ctx, fscp); - return tgsi_endif(ctx); - } else { - r = emit_logic_pred(ctx, ALU_OP2_PRED_SETE_INT, CF_OP_ALU_BREAK); + r = single_alu_op2(ctx, ALU_OP1_MOV, + treg, 3, + V_SQ_ALU_SRC_LITERAL, (denom_shl >> 32), + 0, 0); if (r) return r; - fc_set_mid(ctx, fscp); - } - - return 0; -} - -static int tgsi_loop_brk_cont(struct r600_shader_ctx *ctx) -{ - unsigned int fscp; - - for (fscp = ctx->bc->fc_sp; fscp > 0; fscp--) - { - if (FC_LOOP == ctx->bc->fc_stack[fscp].type) - break; - } - - if (fscp == 0) { - R600_ERR("Break not inside loop/endloop pair\n"); - return -EINVAL; - } - - r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op); - - fc_set_mid(ctx, fscp); - - return 0; -} - -static int tgsi_gs_emit(struct r600_shader_ctx *ctx) -{ - struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - int stream = ctx->literals[inst->Src[0].Register.Index * 4 + inst->Src[0].Register.SwizzleX]; - int r; - if (ctx->inst_info->op == CF_OP_EMIT_VERTEX) - emit_gs_ring_writes(ctx, ctx->gs_stream_output_info, stream, TRUE); + r = emit_u64sge(ctx, sub_tmp, + tmp_num, 0, + treg, 2); + if (r) + return r; - r = r600_bytecode_add_cfinst(ctx->bc, ctx->inst_info->op); - if (!r) { - ctx->bc->cf_last->count = stream; // Count field for CUT/EMIT_VERTEX indicates which stream - if (ctx->inst_info->op == CF_OP_EMIT_VERTEX) - return emit_inc_ring_offset(ctx, stream, TRUE); - } - return r; -} + r = single_alu_op2(ctx, ALU_OP2_AND_INT, + treg, 1, + treg, 1, + sub_tmp, 0); + if (r) + return r; -static int tgsi_umad(struct r600_shader_ctx *ctx) -{ - struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bytecode_alu alu; - int i, j, k, r; - int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); + memset(&alu_src, 0, sizeof(alu_src)); + alu_src.sel = treg; + alu_src.chan = 1; + r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src); + if (r) + return r; - /* src0 * src1 */ - for (i = 0; i < lasti + 1; i++) { - if (!(inst->Dst[0].Register.WriteMask & (1 << i))) - continue; - if (ctx->bc->chip_class == CAYMAN) { - for (j = 0 ; j < 4; j++) { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + r = emit_u64add(ctx, ALU_OP2_SUB_INT, + sub_tmp, + tmp_num, 0, + treg, 2); + if (r) + return r; - alu.op = ALU_OP2_MULLO_UINT; - for (k = 0; k < inst->Instruction.NumSrcRegs; k++) { - r600_bytecode_src(&alu.src[k], &ctx->src[k], i); - } - alu.dst.chan = j; - alu.dst.sel = ctx->temp_reg; - alu.dst.write = (j == i); - if (j == 3) - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - } else { - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + r = single_alu_op2(ctx, ALU_OP1_MOV, + tmp_num, 0, + sub_tmp, 0, + 0, 0); + if (r) + return r; - alu.dst.chan = i; - alu.dst.sel = ctx->temp_reg; - alu.dst.write = 1; + r = single_alu_op2(ctx, ALU_OP1_MOV, + tmp_num, 1, + sub_tmp, 1, + 0, 0); + if (r) + return r; - alu.op = ALU_OP2_MULLO_UINT; - for (j = 0; j < 2; j++) { - r600_bytecode_src(&alu.src[j], &ctx->src[j], i); - } + r = single_alu_op2(ctx, ALU_OP2_OR_INT, + tmp_num, 2, + tmp_num, 2, + V_SQ_ALU_SRC_LITERAL, 1U << (31 - i)); + if (r) + return r; - alu.last = 1; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } + r = tgsi_endif(ctx); + if (r) + return r; } + /* log2_denom is always <= 63, so manually peel the last loop + * iteration. + */ + uint64_t denom_shl = (uint64_t)alu_denom_lo.value; + r = single_alu_op2(ctx, ALU_OP1_MOV, + treg, 2, + V_SQ_ALU_SRC_LITERAL, (denom_shl & 0xffffffff), + 0, 0); + if (r) + return r; - for (i = 0; i < lasti + 1; i++) { - if (!(inst->Dst[0].Register.WriteMask & (1 << i))) - continue; + r = single_alu_op2(ctx, ALU_OP1_MOV, + treg, 3, + V_SQ_ALU_SRC_LITERAL, (denom_shl >> 32), + 0, 0); + if (r) + return r; - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); + r = emit_u64sge(ctx, sub_tmp, + tmp_num, 0, + treg, 2); + if (r) + return r; - alu.op = ALU_OP2_ADD_INT; + memset(&alu_src, 0, sizeof(alu_src)); + alu_src.sel = sub_tmp; + alu_src.chan = 0; + r = emit_if(ctx, ALU_OP2_PRED_SETNE_INT, &alu_src); + if (r) + return r; - alu.src[0].sel = ctx->temp_reg; - alu.src[0].chan = i; - - r600_bytecode_src(&alu.src[1], &ctx->src[2], i); - if (i == lasti) { - alu.last = 1; - } - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - return 0; -} + r = emit_u64add(ctx, ALU_OP2_SUB_INT, + sub_tmp, + tmp_num, 0, + treg, 2); + if (r) + return r; -static int tgsi_pk2h(struct r600_shader_ctx *ctx) -{ - struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bytecode_alu alu; - int r, i; - int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); + r = single_alu_op2(ctx, ALU_OP2_OR_INT, + tmp_num, 2, + tmp_num, 2, + V_SQ_ALU_SRC_LITERAL, 1U); + if (r) + return r; + r = tgsi_endif(ctx); + if (r) + return r; - /* temp.xy = f32_to_f16(src) */ memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP1_FLT32_TO_FLT16; - alu.dst.chan = 0; - alu.dst.sel = ctx->temp_reg; - alu.dst.write = 1; - r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + alu.op = ALU_OP1_MOV; + tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst); + alu.src[0].sel = tmp_num; + alu.src[0].chan = 2; r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; - alu.dst.chan = 1; - r600_bytecode_src(&alu.src[0], &ctx->src[0], 1); + + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP1_MOV; + tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst); + alu.src[0].sel = tmp_num; + alu.src[0].chan = 3; alu.last = 1; r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; - - /* dst.x = temp.y * 0x10000 + temp.x */ - for (i = 0; i < lasti + 1; i++) { - if (!(inst->Dst[0].Register.WriteMask & (1 << i))) - continue; - - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP3_MULADD_UINT24; - alu.is_op3 = 1; - tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - alu.last = i == lasti; - alu.src[0].sel = ctx->temp_reg; - alu.src[0].chan = 1; - alu.src[1].sel = V_SQ_ALU_SRC_LITERAL; - alu.src[1].value = 0x10000; - alu.src[2].sel = ctx->temp_reg; - alu.src[2].chan = 0; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - return 0; } -static int tgsi_up2h(struct r600_shader_ctx *ctx) +static int egcm_u64sne(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; struct r600_bytecode_alu alu; - int r, i; - int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); + int r; + int treg = ctx->temp_reg; - /* temp.x = src.x */ - /* note: no need to mask out the high bits */ memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP1_MOV; + alu.op = ALU_OP2_SETNE_INT; + alu.dst.sel = treg; alu.dst.chan = 0; - alu.dst.sel = ctx->temp_reg; alu.dst.write = 1; r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); + r600_bytecode_src(&alu.src[1], &ctx->src[1], 0); r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; - /* temp.y = src.x >> 16 */ memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - alu.op = ALU_OP2_LSHR_INT; + alu.op = ALU_OP2_SETNE_INT; + alu.dst.sel = treg; alu.dst.chan = 1; - alu.dst.sel = ctx->temp_reg; alu.dst.write = 1; - r600_bytecode_src(&alu.src[0], &ctx->src[0], 0); - alu.src[1].sel = V_SQ_ALU_SRC_LITERAL; - alu.src[1].value = 16; + r600_bytecode_src(&alu.src[0], &ctx->src[0], 1); + r600_bytecode_src(&alu.src[1], &ctx->src[1], 1); alu.last = 1; r = r600_bytecode_add_alu(ctx->bc, &alu); if (r) return r; - /* dst.wz = dst.xy = f16_to_f32(temp.xy) */ - for (i = 0; i < lasti + 1; i++) { - if (!(inst->Dst[0].Register.WriteMask & (1 << i))) - continue; - memset(&alu, 0, sizeof(struct r600_bytecode_alu)); - tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - alu.op = ALU_OP1_FLT16_TO_FLT32; - alu.src[0].sel = ctx->temp_reg; - alu.src[0].chan = i % 2; - alu.last = i == lasti; - r = r600_bytecode_add_alu(ctx->bc, &alu); - if (r) - return r; - } - + memset(&alu, 0, sizeof(struct r600_bytecode_alu)); + alu.op = ALU_OP2_OR_INT; + tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst); + alu.src[0].sel = treg; + alu.src[0].chan = 0; + alu.src[1].sel = treg; + alu.src[1].chan = 1; + alu.last = 1; + r = r600_bytecode_add_alu(ctx->bc, &alu); + if (r) + return r; return 0; } @@ -8976,43 +11158,40 @@ static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2}, [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit}, - /* XXX: - * For state trackers other than OpenGL, we'll want to use - * _RECIP_IEEE instead. - */ - [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_CLAMPED, tgsi_trans_srcx_replicate}, + [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, tgsi_trans_srcx_replicate}, [TGSI_OPCODE_RSQ] = { ALU_OP0_NOP, tgsi_rsq}, [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp}, [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log}, - [TGSI_OPCODE_MUL] = { ALU_OP2_MUL, tgsi_op2}, + [TGSI_OPCODE_MUL] = { ALU_OP2_MUL_IEEE, tgsi_op2}, [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2}, - [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4, tgsi_dp}, - [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4, tgsi_dp}, + [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4_IEEE, tgsi_dp}, + [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4_IEEE, tgsi_dp}, [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst}, - [TGSI_OPCODE_MIN] = { ALU_OP2_MIN, tgsi_op2}, - [TGSI_OPCODE_MAX] = { ALU_OP2_MAX, tgsi_op2}, + /* MIN_DX10 returns non-nan result if one src is NaN, MIN returns NaN */ + [TGSI_OPCODE_MIN] = { ALU_OP2_MIN_DX10, tgsi_op2}, + [TGSI_OPCODE_MAX] = { ALU_OP2_MAX_DX10, tgsi_op2}, [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap}, [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2}, - [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD, tgsi_op3}, + [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD_IEEE, tgsi_op3}, [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp}, [TGSI_OPCODE_FMA] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, tgsi_trans_srcx_replicate}, - [TGSI_OPCODE_DP2A] = { ALU_OP0_NOP, tgsi_unsupported}, + [21] = { ALU_OP0_NOP, tgsi_unsupported}, [22] = { ALU_OP0_NOP, tgsi_unsupported}, [23] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2}, - [TGSI_OPCODE_CLAMP] = { ALU_OP0_NOP, tgsi_unsupported}, + [25] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2}, [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2}, [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate}, [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate}, [TGSI_OPCODE_POW] = { ALU_OP0_NOP, tgsi_pow}, - [TGSI_OPCODE_XPD] = { ALU_OP0_NOP, tgsi_xpd}, + [31] = { ALU_OP0_NOP, tgsi_unsupported}, [32] = { ALU_OP0_NOP, tgsi_unsupported}, - [33] = { ALU_OP0_NOP, tgsi_unsupported}, + [TGSI_OPCODE_CLOCK] = { ALU_OP0_NOP, tgsi_unsupported}, [34] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_DPH] = { ALU_OP2_DOT4, tgsi_dp}, + [35] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_COS] = { ALU_OP1_COS, tgsi_trig}, [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex}, [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex}, @@ -9044,11 +11223,11 @@ static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg}, [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp}, - [TGSI_OPCODE_SCS] = { ALU_OP0_NOP, tgsi_scs}, + [67] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex}, [69] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4, tgsi_dp}, + [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4_IEEE, tgsi_dp}, [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex}, [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont}, [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if}, @@ -9058,8 +11237,8 @@ static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif}, [TGSI_OPCODE_DDX_FINE] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_DDY_FINE] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_PUSHA] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_POPA] = { ALU_OP0_NOP, tgsi_unsupported}, + [81] = { ALU_OP0_NOP, tgsi_unsupported}, + [82] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2}, [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2_trans}, [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2}, @@ -9070,7 +11249,7 @@ static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2}, [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod}, [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2}, - [TGSI_OPCODE_SAD] = { ALU_OP0_NOP, tgsi_unsupported}, + [93] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex}, [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex}, [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont}, @@ -9080,7 +11259,7 @@ static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop}, [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_TXQ_LZ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex}, + [103] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex}, [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex}, [TGSI_OPCODE_RESQ] = { ALU_OP0_NOP, tgsi_unsupported}, [106] = { ALU_OP0_NOP, tgsi_unsupported}, @@ -9090,9 +11269,9 @@ static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap}, [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap}, [TGSI_OPCODE_MEMBAR] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_CALLNZ] = { ALU_OP0_NOP, tgsi_unsupported}, + [113] = { ALU_OP0_NOP, tgsi_unsupported}, [114] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_BREAKC] = { ALU_OP0_NOP, tgsi_loop_breakc}, + [115] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */ [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */ [TGSI_OPCODE_DFMA] = { ALU_OP0_NOP, tgsi_unsupported}, @@ -9140,9 +11319,9 @@ static const struct r600_shader_tgsi_instruction r600_shader_tgsi_instruction[] [TGSI_OPCODE_ISSG] = { 0, tgsi_issg}, [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_MFENCE] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_LFENCE] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_SFENCE] = { ALU_OP0_NOP, tgsi_unsupported}, + [163] = { ALU_OP0_NOP, tgsi_unsupported}, + [164] = { ALU_OP0_NOP, tgsi_unsupported}, + [165] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_BARRIER] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported}, @@ -9180,37 +11359,37 @@ static const struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = [TGSI_OPCODE_MOV] = { ALU_OP1_MOV, tgsi_op2}, [TGSI_OPCODE_LIT] = { ALU_OP0_NOP, tgsi_lit}, [TGSI_OPCODE_RCP] = { ALU_OP1_RECIP_IEEE, tgsi_trans_srcx_replicate}, - [TGSI_OPCODE_RSQ] = { ALU_OP1_RECIPSQRT_IEEE, tgsi_rsq}, + [TGSI_OPCODE_RSQ] = { ALU_OP0_NOP, tgsi_rsq}, [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp}, [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log}, - [TGSI_OPCODE_MUL] = { ALU_OP2_MUL, tgsi_op2}, + [TGSI_OPCODE_MUL] = { ALU_OP2_MUL_IEEE, tgsi_op2}, [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2}, - [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4, tgsi_dp}, - [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4, tgsi_dp}, + [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4_IEEE, tgsi_dp}, + [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4_IEEE, tgsi_dp}, [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst}, - [TGSI_OPCODE_MIN] = { ALU_OP2_MIN, tgsi_op2}, - [TGSI_OPCODE_MAX] = { ALU_OP2_MAX, tgsi_op2}, + [TGSI_OPCODE_MIN] = { ALU_OP2_MIN_DX10, tgsi_op2}, + [TGSI_OPCODE_MAX] = { ALU_OP2_MAX_DX10, tgsi_op2}, [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap}, [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2}, - [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD, tgsi_op3}, + [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD_IEEE, tgsi_op3}, [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp}, [TGSI_OPCODE_FMA] = { ALU_OP3_FMA, tgsi_op3}, [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, tgsi_trans_srcx_replicate}, - [TGSI_OPCODE_DP2A] = { ALU_OP0_NOP, tgsi_unsupported}, + [21] = { ALU_OP0_NOP, tgsi_unsupported}, [22] = { ALU_OP0_NOP, tgsi_unsupported}, [23] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2}, - [TGSI_OPCODE_CLAMP] = { ALU_OP0_NOP, tgsi_unsupported}, + [25] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2}, [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2}, [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, tgsi_trans_srcx_replicate}, [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, tgsi_trans_srcx_replicate}, [TGSI_OPCODE_POW] = { ALU_OP0_NOP, tgsi_pow}, - [TGSI_OPCODE_XPD] = { ALU_OP0_NOP, tgsi_xpd}, + [31] = { ALU_OP0_NOP, tgsi_unsupported}, [32] = { ALU_OP0_NOP, tgsi_unsupported}, - [33] = { ALU_OP0_NOP, tgsi_unsupported}, + [TGSI_OPCODE_CLOCK] = { ALU_OP0_NOP, tgsi_clock}, [34] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_DPH] = { ALU_OP2_DOT4, tgsi_dp}, + [35] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_COS] = { ALU_OP1_COS, tgsi_trig}, [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex}, [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex}, @@ -9242,11 +11421,11 @@ static const struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg}, [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp}, - [TGSI_OPCODE_SCS] = { ALU_OP0_NOP, tgsi_scs}, + [67] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex}, [69] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4, tgsi_dp}, + [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4_IEEE, tgsi_dp}, [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex}, [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont}, [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if}, @@ -9256,8 +11435,7 @@ static const struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif}, [TGSI_OPCODE_DDX_FINE] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex}, [TGSI_OPCODE_DDY_FINE] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex}, - [TGSI_OPCODE_PUSHA] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_POPA] = { ALU_OP0_NOP, tgsi_unsupported}, + [82] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2}, [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2_trans}, [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2}, @@ -9268,7 +11446,7 @@ static const struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2}, [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod}, [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2}, - [TGSI_OPCODE_SAD] = { ALU_OP0_NOP, tgsi_unsupported}, + [93] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex}, [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex}, [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont}, @@ -9278,19 +11456,19 @@ static const struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop}, [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_TXQ_LZ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex}, + [103] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex}, [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex}, - [TGSI_OPCODE_RESQ] = { ALU_OP0_NOP, tgsi_unsupported}, + [TGSI_OPCODE_RESQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_resq}, [106] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2}, [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2}, [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap}, [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap}, - [TGSI_OPCODE_MEMBAR] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_CALLNZ] = { ALU_OP0_NOP, tgsi_unsupported}, + [TGSI_OPCODE_MEMBAR] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier}, + [113] = { ALU_OP0_NOP, tgsi_unsupported}, [114] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_BREAKC] = { ALU_OP0_NOP, tgsi_unsupported}, + [115] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */ [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */ /* Refer below for TGSI_OPCODE_DFMA */ @@ -9336,22 +11514,22 @@ static const struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp}, [TGSI_OPCODE_IABS] = { 0, tgsi_iabs}, [TGSI_OPCODE_ISSG] = { 0, tgsi_issg}, - [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_MFENCE] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_LFENCE] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_SFENCE] = { ALU_OP0_NOP, tgsi_unsupported}, + [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_load}, + [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_store}, + [163] = { ALU_OP0_NOP, tgsi_unsupported}, + [164] = { ALU_OP0_NOP, tgsi_unsupported}, + [165] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_BARRIER] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier}, - [TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_ATOMCAS] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_ATOMAND] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_ATOMOR] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_ATOMXOR] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_ATOMUMIN] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_ATOMUMAX] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_ATOMIMIN] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_ATOMIMAX] = { ALU_OP0_NOP, tgsi_unsupported}, + [TGSI_OPCODE_ATOMUADD] = { V_RAT_INST_ADD_RTN, tgsi_atomic_op}, + [TGSI_OPCODE_ATOMXCHG] = { V_RAT_INST_XCHG_RTN, tgsi_atomic_op}, + [TGSI_OPCODE_ATOMCAS] = { V_RAT_INST_CMPXCHG_INT_RTN, tgsi_atomic_op}, + [TGSI_OPCODE_ATOMAND] = { V_RAT_INST_AND_RTN, tgsi_atomic_op}, + [TGSI_OPCODE_ATOMOR] = { V_RAT_INST_OR_RTN, tgsi_atomic_op}, + [TGSI_OPCODE_ATOMXOR] = { V_RAT_INST_XOR_RTN, tgsi_atomic_op}, + [TGSI_OPCODE_ATOMUMIN] = { V_RAT_INST_MIN_UINT_RTN, tgsi_atomic_op}, + [TGSI_OPCODE_ATOMUMAX] = { V_RAT_INST_MAX_UINT_RTN, tgsi_atomic_op}, + [TGSI_OPCODE_ATOMIMIN] = { V_RAT_INST_MIN_INT_RTN, tgsi_atomic_op}, + [TGSI_OPCODE_ATOMIMAX] = { V_RAT_INST_MAX_INT_RTN, tgsi_atomic_op}, [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex}, [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex}, [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex}, @@ -9359,8 +11537,8 @@ static const struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, tgsi_op2_trans}, [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_tex}, [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_tex}, - [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_op3}, - [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_op3}, + [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_bfe}, + [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_bfe}, [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_bfi}, [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_op2}, [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_op2}, @@ -9376,6 +11554,7 @@ static const struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = [TGSI_OPCODE_DNEG] = { ALU_OP2_ADD_64, tgsi_dneg}, [TGSI_OPCODE_DADD] = { ALU_OP2_ADD_64, tgsi_op2_64}, [TGSI_OPCODE_DMUL] = { ALU_OP2_MUL_64, cayman_mul_double_instr}, + [TGSI_OPCODE_DDIV] = { 0, cayman_ddiv_instr }, [TGSI_OPCODE_DMAX] = { ALU_OP2_MAX_64, tgsi_op2_64}, [TGSI_OPCODE_DMIN] = { ALU_OP2_MIN_64, tgsi_op2_64}, [TGSI_OPCODE_DSLT] = { ALU_OP2_SETGT_64, tgsi_op2_64_single_dest_s}, @@ -9394,6 +11573,10 @@ static const struct r600_shader_tgsi_instruction eg_shader_tgsi_instruction[] = [TGSI_OPCODE_D2U] = { ALU_OP1_FLT_TO_UINT, egcm_double_to_int}, [TGSI_OPCODE_U2D] = { ALU_OP1_UINT_TO_FLT, egcm_int_to_double}, [TGSI_OPCODE_DRSQ] = { ALU_OP2_RECIPSQRT_64, cayman_emit_double_instr}, + [TGSI_OPCODE_U64SNE] = { ALU_OP0_NOP, egcm_u64sne }, + [TGSI_OPCODE_U64ADD] = { ALU_OP0_NOP, egcm_u64add }, + [TGSI_OPCODE_U64MUL] = { ALU_OP0_NOP, egcm_u64mul }, + [TGSI_OPCODE_U64DIV] = { ALU_OP0_NOP, egcm_u64div }, [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported}, }; @@ -9405,34 +11588,34 @@ static const struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = [TGSI_OPCODE_RSQ] = { ALU_OP1_RECIPSQRT_IEEE, cayman_emit_float_instr}, [TGSI_OPCODE_EXP] = { ALU_OP0_NOP, tgsi_exp}, [TGSI_OPCODE_LOG] = { ALU_OP0_NOP, tgsi_log}, - [TGSI_OPCODE_MUL] = { ALU_OP2_MUL, tgsi_op2}, + [TGSI_OPCODE_MUL] = { ALU_OP2_MUL_IEEE, tgsi_op2}, [TGSI_OPCODE_ADD] = { ALU_OP2_ADD, tgsi_op2}, - [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4, tgsi_dp}, - [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4, tgsi_dp}, + [TGSI_OPCODE_DP3] = { ALU_OP2_DOT4_IEEE, tgsi_dp}, + [TGSI_OPCODE_DP4] = { ALU_OP2_DOT4_IEEE, tgsi_dp}, [TGSI_OPCODE_DST] = { ALU_OP0_NOP, tgsi_opdst}, - [TGSI_OPCODE_MIN] = { ALU_OP2_MIN, tgsi_op2}, - [TGSI_OPCODE_MAX] = { ALU_OP2_MAX, tgsi_op2}, + [TGSI_OPCODE_MIN] = { ALU_OP2_MIN_DX10, tgsi_op2}, + [TGSI_OPCODE_MAX] = { ALU_OP2_MAX_DX10, tgsi_op2}, [TGSI_OPCODE_SLT] = { ALU_OP2_SETGT, tgsi_op2_swap}, [TGSI_OPCODE_SGE] = { ALU_OP2_SETGE, tgsi_op2}, - [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD, tgsi_op3}, + [TGSI_OPCODE_MAD] = { ALU_OP3_MULADD_IEEE, tgsi_op3}, [TGSI_OPCODE_LRP] = { ALU_OP0_NOP, tgsi_lrp}, [TGSI_OPCODE_FMA] = { ALU_OP3_FMA, tgsi_op3}, [TGSI_OPCODE_SQRT] = { ALU_OP1_SQRT_IEEE, cayman_emit_float_instr}, - [TGSI_OPCODE_DP2A] = { ALU_OP0_NOP, tgsi_unsupported}, + [21] = { ALU_OP0_NOP, tgsi_unsupported}, [22] = { ALU_OP0_NOP, tgsi_unsupported}, [23] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_FRC] = { ALU_OP1_FRACT, tgsi_op2}, - [TGSI_OPCODE_CLAMP] = { ALU_OP0_NOP, tgsi_unsupported}, + [25] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_FLR] = { ALU_OP1_FLOOR, tgsi_op2}, [TGSI_OPCODE_ROUND] = { ALU_OP1_RNDNE, tgsi_op2}, [TGSI_OPCODE_EX2] = { ALU_OP1_EXP_IEEE, cayman_emit_float_instr}, [TGSI_OPCODE_LG2] = { ALU_OP1_LOG_IEEE, cayman_emit_float_instr}, [TGSI_OPCODE_POW] = { ALU_OP0_NOP, cayman_pow}, - [TGSI_OPCODE_XPD] = { ALU_OP0_NOP, tgsi_xpd}, + [31] = { ALU_OP0_NOP, tgsi_unsupported}, [32] = { ALU_OP0_NOP, tgsi_unsupported}, - [33] = { ALU_OP0_NOP, tgsi_unsupported}, + [TGSI_OPCODE_CLOCK] = { ALU_OP0_NOP, tgsi_clock}, [34] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_DPH] = { ALU_OP2_DOT4, tgsi_dp}, + [35] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_COS] = { ALU_OP1_COS, cayman_trig}, [TGSI_OPCODE_DDX] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex}, [TGSI_OPCODE_DDY] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex}, @@ -9464,11 +11647,11 @@ static const struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = [TGSI_OPCODE_RET] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_SSG] = { ALU_OP0_NOP, tgsi_ssg}, [TGSI_OPCODE_CMP] = { ALU_OP0_NOP, tgsi_cmp}, - [TGSI_OPCODE_SCS] = { ALU_OP0_NOP, tgsi_scs}, + [67] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_TXB] = { FETCH_OP_SAMPLE_LB, tgsi_tex}, [69] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_DIV] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4, tgsi_dp}, + [TGSI_OPCODE_DP2] = { ALU_OP2_DOT4_IEEE, tgsi_dp}, [TGSI_OPCODE_TXL] = { FETCH_OP_SAMPLE_L, tgsi_tex}, [TGSI_OPCODE_BRK] = { CF_OP_LOOP_BREAK, tgsi_loop_brk_cont}, [TGSI_OPCODE_IF] = { ALU_OP0_NOP, tgsi_if}, @@ -9478,8 +11661,7 @@ static const struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = [TGSI_OPCODE_ENDIF] = { ALU_OP0_NOP, tgsi_endif}, [TGSI_OPCODE_DDX_FINE] = { FETCH_OP_GET_GRADIENTS_H, tgsi_tex}, [TGSI_OPCODE_DDY_FINE] = { FETCH_OP_GET_GRADIENTS_V, tgsi_tex}, - [TGSI_OPCODE_PUSHA] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_POPA] = { ALU_OP0_NOP, tgsi_unsupported}, + [82] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_CEIL] = { ALU_OP1_CEIL, tgsi_op2}, [TGSI_OPCODE_I2F] = { ALU_OP1_INT_TO_FLT, tgsi_op2}, [TGSI_OPCODE_NOT] = { ALU_OP1_NOT_INT, tgsi_op2}, @@ -9490,7 +11672,7 @@ static const struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = [TGSI_OPCODE_OR] = { ALU_OP2_OR_INT, tgsi_op2}, [TGSI_OPCODE_MOD] = { ALU_OP0_NOP, tgsi_imod}, [TGSI_OPCODE_XOR] = { ALU_OP2_XOR_INT, tgsi_op2}, - [TGSI_OPCODE_SAD] = { ALU_OP0_NOP, tgsi_unsupported}, + [93] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_TXF] = { FETCH_OP_LD, tgsi_tex}, [TGSI_OPCODE_TXQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex}, [TGSI_OPCODE_CONT] = { CF_OP_LOOP_CONTINUE, tgsi_loop_brk_cont}, @@ -9500,19 +11682,19 @@ static const struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = [TGSI_OPCODE_BGNSUB] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_ENDLOOP] = { ALU_OP0_NOP, tgsi_endloop}, [TGSI_OPCODE_ENDSUB] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_TXQ_LZ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex}, + [103] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_tex}, [TGSI_OPCODE_TXQS] = { FETCH_OP_GET_NUMBER_OF_SAMPLES, tgsi_tex}, - [TGSI_OPCODE_RESQ] = { ALU_OP0_NOP, tgsi_unsupported}, + [TGSI_OPCODE_RESQ] = { FETCH_OP_GET_TEXTURE_RESINFO, tgsi_resq}, [106] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_NOP] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_FSEQ] = { ALU_OP2_SETE_DX10, tgsi_op2}, [TGSI_OPCODE_FSGE] = { ALU_OP2_SETGE_DX10, tgsi_op2}, [TGSI_OPCODE_FSLT] = { ALU_OP2_SETGT_DX10, tgsi_op2_swap}, [TGSI_OPCODE_FSNE] = { ALU_OP2_SETNE_DX10, tgsi_op2_swap}, - [TGSI_OPCODE_MEMBAR] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_CALLNZ] = { ALU_OP0_NOP, tgsi_unsupported}, + [TGSI_OPCODE_MEMBAR] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier}, + [113] = { ALU_OP0_NOP, tgsi_unsupported}, [114] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_BREAKC] = { ALU_OP0_NOP, tgsi_unsupported}, + [115] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_KILL_IF] = { ALU_OP2_KILLGT, tgsi_kill}, /* conditional kill */ [TGSI_OPCODE_END] = { ALU_OP0_NOP, tgsi_end}, /* aka HALT */ /* Refer below for TGSI_OPCODE_DFMA */ @@ -9558,22 +11740,22 @@ static const struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = [TGSI_OPCODE_UCMP] = { ALU_OP0_NOP, tgsi_ucmp}, [TGSI_OPCODE_IABS] = { 0, tgsi_iabs}, [TGSI_OPCODE_ISSG] = { 0, tgsi_issg}, - [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_MFENCE] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_LFENCE] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_SFENCE] = { ALU_OP0_NOP, tgsi_unsupported}, + [TGSI_OPCODE_LOAD] = { ALU_OP0_NOP, tgsi_load}, + [TGSI_OPCODE_STORE] = { ALU_OP0_NOP, tgsi_store}, + [163] = { ALU_OP0_NOP, tgsi_unsupported}, + [164] = { ALU_OP0_NOP, tgsi_unsupported}, + [165] = { ALU_OP0_NOP, tgsi_unsupported}, [TGSI_OPCODE_BARRIER] = { ALU_OP0_GROUP_BARRIER, tgsi_barrier}, - [TGSI_OPCODE_ATOMUADD] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_ATOMXCHG] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_ATOMCAS] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_ATOMAND] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_ATOMOR] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_ATOMXOR] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_ATOMUMIN] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_ATOMUMAX] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_ATOMIMIN] = { ALU_OP0_NOP, tgsi_unsupported}, - [TGSI_OPCODE_ATOMIMAX] = { ALU_OP0_NOP, tgsi_unsupported}, + [TGSI_OPCODE_ATOMUADD] = { V_RAT_INST_ADD_RTN, tgsi_atomic_op}, + [TGSI_OPCODE_ATOMXCHG] = { V_RAT_INST_XCHG_RTN, tgsi_atomic_op}, + [TGSI_OPCODE_ATOMCAS] = { V_RAT_INST_CMPXCHG_INT_RTN, tgsi_atomic_op}, + [TGSI_OPCODE_ATOMAND] = { V_RAT_INST_AND_RTN, tgsi_atomic_op}, + [TGSI_OPCODE_ATOMOR] = { V_RAT_INST_OR_RTN, tgsi_atomic_op}, + [TGSI_OPCODE_ATOMXOR] = { V_RAT_INST_XOR_RTN, tgsi_atomic_op}, + [TGSI_OPCODE_ATOMUMIN] = { V_RAT_INST_MIN_UINT_RTN, tgsi_atomic_op}, + [TGSI_OPCODE_ATOMUMAX] = { V_RAT_INST_MAX_UINT_RTN, tgsi_atomic_op}, + [TGSI_OPCODE_ATOMIMIN] = { V_RAT_INST_MIN_INT_RTN, tgsi_atomic_op}, + [TGSI_OPCODE_ATOMIMAX] = { V_RAT_INST_MAX_INT_RTN, tgsi_atomic_op}, [TGSI_OPCODE_TEX2] = { FETCH_OP_SAMPLE, tgsi_tex}, [TGSI_OPCODE_TXB2] = { FETCH_OP_SAMPLE_LB, tgsi_tex}, [TGSI_OPCODE_TXL2] = { FETCH_OP_SAMPLE_L, tgsi_tex}, @@ -9581,8 +11763,8 @@ static const struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = [TGSI_OPCODE_UMUL_HI] = { ALU_OP2_MULHI_UINT, cayman_mul_int_instr}, [TGSI_OPCODE_TG4] = { FETCH_OP_GATHER4, tgsi_tex}, [TGSI_OPCODE_LODQ] = { FETCH_OP_GET_LOD, tgsi_tex}, - [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_op3}, - [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_op3}, + [TGSI_OPCODE_IBFE] = { ALU_OP3_BFE_INT, tgsi_bfe}, + [TGSI_OPCODE_UBFE] = { ALU_OP3_BFE_UINT, tgsi_bfe}, [TGSI_OPCODE_BFI] = { ALU_OP0_NOP, tgsi_bfi}, [TGSI_OPCODE_BREV] = { ALU_OP1_BFREV_INT, tgsi_op2}, [TGSI_OPCODE_POPC] = { ALU_OP1_BCNT_INT, tgsi_op2}, @@ -9598,6 +11780,7 @@ static const struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = [TGSI_OPCODE_DNEG] = { ALU_OP2_ADD_64, tgsi_dneg}, [TGSI_OPCODE_DADD] = { ALU_OP2_ADD_64, tgsi_op2_64}, [TGSI_OPCODE_DMUL] = { ALU_OP2_MUL_64, cayman_mul_double_instr}, + [TGSI_OPCODE_DDIV] = { 0, cayman_ddiv_instr }, [TGSI_OPCODE_DMAX] = { ALU_OP2_MAX_64, tgsi_op2_64}, [TGSI_OPCODE_DMIN] = { ALU_OP2_MIN_64, tgsi_op2_64}, [TGSI_OPCODE_DSLT] = { ALU_OP2_SETGT_64, tgsi_op2_64_single_dest_s}, @@ -9616,5 +11799,9 @@ static const struct r600_shader_tgsi_instruction cm_shader_tgsi_instruction[] = [TGSI_OPCODE_D2U] = { ALU_OP1_FLT_TO_UINT, egcm_double_to_int}, [TGSI_OPCODE_U2D] = { ALU_OP1_UINT_TO_FLT, egcm_int_to_double}, [TGSI_OPCODE_DRSQ] = { ALU_OP2_RECIPSQRT_64, cayman_emit_double_instr}, + [TGSI_OPCODE_U64SNE] = { ALU_OP0_NOP, egcm_u64sne }, + [TGSI_OPCODE_U64ADD] = { ALU_OP0_NOP, egcm_u64add }, + [TGSI_OPCODE_U64MUL] = { ALU_OP0_NOP, egcm_u64mul }, + [TGSI_OPCODE_U64DIV] = { ALU_OP0_NOP, egcm_u64div }, [TGSI_OPCODE_LAST] = { ALU_OP0_NOP, tgsi_unsupported}, };